/linux-master/drivers/infiniband/hw/irdma/ |
H A D | uk.c | 62 u32 wqe_idx; local 68 wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring); 69 wqe = qp->sq_base[wqe_idx].elem; 71 qp->sq_wrtrk_array[wqe_idx].quanta = IRDMA_QP_WQE_MIN_QUANTA; 92 * @qp_wqe_idx: wqe_idx 97 u32 wqe_idx; local 100 wqe_idx = (qp_wqe_idx + 128) % qp->sq_ring.size; 101 sq = qp->sq_base + wqe_idx; 102 if (wqe_idx) 149 * @wqe_idx 154 irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx, u16 quanta, u32 total_size, struct irdma_post_sq_info *info) argument 206 irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx) argument 238 u32 i, wqe_idx; local 336 u32 wqe_idx; local 413 u32 i, wqe_idx, total_size = 0, byte_off; local 660 u32 wqe_idx; local 728 u32 wqe_idx; local 801 u32 wqe_idx; local 844 u32 wqe_idx, i, byte_off; local 977 u32 wqe_idx; local 1535 u32 wqe_idx; local [all...] |
H A D | puda.c | 15 struct irdma_puda_buf *buf, u32 wqe_idx); 75 * @wqe_idx: wqe index to use 79 static void irdma_puda_post_recvbuf(struct irdma_puda_rsrc *rsrc, u32 wqe_idx, argument 89 qp->qp_uk.rq_wrid_array[wqe_idx] = (uintptr_t)buf; 90 wqe = qp->qp_uk.rq_base[wqe_idx].elem; 189 * @wqe_idx: wqe index for caller 192 u32 *wqe_idx) 196 *wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring); 197 if (!*wqe_idx) 203 return qp->sq_base[*wqe_idx] 191 irdma_puda_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx) argument 430 u32 wqe_idx; local 1120 irdma_ilq_putback_rcvbuf(struct irdma_sc_qp *qp, struct irdma_puda_buf *buf, u32 wqe_idx) argument 1684 u32 wqe_idx = ieq->compl_rxwqe_idx; local [all...] |
H A D | user.h | 398 __le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx, 401 __le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx);
|
H A D | utils.c | 537 u32 i, pending_work, wqe_idx; local 540 wqe_idx = IRDMA_RING_CURRENT_TAIL(cqp->sc_cqp.sq_ring); 543 cqp->scratch_array[wqe_idx]; 546 wqe_idx = (wqe_idx + 1) % IRDMA_RING_SIZE(cqp->sc_cqp.sq_ring); 2454 u32 wqe_idx; local 2471 wqe_idx = sq_ring->tail; 2474 cmpl->cpi.wr_id = qp->sq_wrtrk_array[wqe_idx].wrid; 2475 sw_wqe = qp->sq_base[wqe_idx].elem; 2515 wqe_idx [all...] |
H A D | puda.h | 38 u32 wqe_idx; member in struct:irdma_puda_cmpl_info
|
H A D | type.h | 949 u16 wqe_idx; member in struct:irdma_aeqe_info 1481 u32 *wqe_idx); 1490 u32 wqe_idx; local 1492 return irdma_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
|
H A D | ctrl.c | 1297 u32 wqe_idx; local 1311 wqe = irdma_qp_get_next_send_wqe(&qp->qp_uk, &wqe_idx, 1316 irdma_clr_wqes(&qp->qp_uk, wqe_idx); 1319 "MR: wr_id[%llxh] wqe_idx[%04d] location[%p]\n", 1320 info->wr_id, wqe_idx, local 1321 &qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid); 3258 * @wqe_idx: WQE index of CQP SQ 3261 u32 *wqe_idx) 3273 IRDMA_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, *wqe_idx, ret_code); 3278 if (!*wqe_idx) 3260 irdma_sc_cqp_get_next_send_wqe_idx(struct irdma_sc_cqp *cqp, u64 scratch, u32 *wqe_idx) argument 3351 u32 wqe_idx; local 4422 u32 wqe_idx; local [all...] |
H A D | hw.c | 399 ctx_info->roce_info->err_rq_idx = info->wqe_idx; 409 ctx_info->iwarp_info->err_rq_idx = info->wqe_idx;
|
/linux-master/drivers/infiniband/hw/erdma/ |
H A D | erdma_cq.c | 117 u16 wqe_idx, depth; local 132 wqe_idx = be32_to_cpu(cqe->qe_idx); 148 wqe_hdr = get_queue_entry(qp->kern_qp.sq_buf, wqe_idx, 152 wqe_idx + 1; 157 wc->wr_id = id_table[wqe_idx & (depth - 1)];
|
H A D | erdma_qp.c | 208 const struct ib_send_wr *send_wr, u16 wqe_idx, 215 wqe_idx += (sgl_offset >> SQEBB_SHIFT); 217 data = get_queue_entry(qp->kern_qp.sq_buf, wqe_idx, qp->attrs.sq_size, 238 wqe_idx += (sgl_offset >> SQEBB_SHIFT); 241 data = get_queue_entry(qp->kern_qp.sq_buf, wqe_idx, 255 u16 wqe_idx, u32 sgl_offset, __le32 *length_field) 268 wqe_idx += (sgl_offset >> SQEBB_SHIFT); 270 sgl = get_queue_entry(qp->kern_qp.sq_buf, wqe_idx, 207 fill_inline_data(struct erdma_qp *qp, const struct ib_send_wr *send_wr, u16 wqe_idx, u32 sgl_offset, __le32 *length_field) argument 254 fill_sgl(struct erdma_qp *qp, const struct ib_send_wr *send_wr, u16 wqe_idx, u32 sgl_offset, __le32 *length_field) argument
|
/linux-master/drivers/infiniband/hw/ocrdma/ |
H A D | ocrdma_verbs.c | 1594 u32 qpn = 0, wqe_idx = 0; local 1627 wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >> 1630 BUG_ON(wqe_idx < 1); 1633 ocrdma_srq_toggle_bit(qp->srq, wqe_idx - 1); 2400 u32 wqe_idx) 2406 hdr = ocrdma_hwq_head_from_idx(&qp->sq, wqe_idx); 2408 ibwc->wr_id = qp->wqe_wr_id_tbl[wqe_idx].wrid; 2561 u32 wqe_idx; local 2572 wqe_idx = (le32_to_cpu(cqe->wq.wqeidx) & 2574 if (tail != wqe_idx) 2399 ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc, u32 wqe_idx) argument 2630 u32 wqe_idx; local [all...] |
/linux-master/drivers/infiniband/hw/hns/ |
H A D | hns_roce_hw_v2.c | 698 unsigned int wqe_idx; local 721 wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1); 731 wqe = hns_roce_get_send_wqe(qp, wqe_idx); 732 qp->sq.wrid[wqe_idx] = wr->wr_id; 805 u32 wqe_idx, u32 max_sge) 809 wqe = hns_roce_get_recv_wqe(hr_qp, wqe_idx); 820 u32 wqe_idx, nreq, max_sge; local 850 wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1); 851 fill_rq_wqe(hr_qp, wr, wqe_idx, max_sge); 852 hr_qp->rq.wrid[wqe_idx] 804 fill_rq_wqe(struct hns_roce_qp *hr_qp, const struct ib_recv_wr *wr, u32 wqe_idx, u32 max_sge) argument 916 get_srq_wqe_idx(struct hns_roce_srq *srq, u32 *wqe_idx) argument 930 fill_wqe_idx(struct hns_roce_srq *srq, unsigned int wqe_idx) argument 964 u32 wqe_idx; local 3892 u16 wqe_idx; local [all...] |
/linux-master/drivers/infiniband/hw/bnxt_re/ |
H A D | qplib_fp.c | 1797 u32 wqe_idx; local 1820 swq = bnxt_qplib_get_swqe(sq, &wqe_idx); 2016 bnxt_qplib_swq_mod_start(sq, wqe_idx); 2054 u32 wqe_idx; local 2073 swq = bnxt_qplib_get_swqe(rq, &wqe_idx); 2102 base_hdr->wr_id[0] = cpu_to_le32(wqe_idx); 2104 bnxt_qplib_swq_mod_start(rq, wqe_idx);
|