/linux-master/drivers/infiniband/sw/rxe/ |
H A D | rxe_comp.c | 141 struct rxe_send_wqe *wqe; local 146 wqe = queue_head(qp->sq.queue, QUEUE_TYPE_FROM_CLIENT); 147 *wqe_p = wqe; 150 if (!wqe || wqe->state == wqe_state_posted) 154 if (wqe->state == wqe_state_done) 158 if (wqe->state == wqe_state_error) 174 struct rxe_send_wqe *wqe) 181 diff = psn_compare(pkt->psn, wqe->last_psn); 183 if (wqe 172 check_psn(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe) argument 215 check_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe) argument 354 do_read(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe) argument 374 do_atomic(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe) argument 393 make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe, struct rxe_cqe *cqe) argument 440 do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe) argument 499 complete_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe) argument 523 complete_wqe(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe) argument 557 flush_send_wqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe) argument 587 struct rxe_send_wqe *wqe; local 641 struct rxe_send_wqe *wqe = NULL; local [all...] |
H A D | rxe_req.c | 14 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe, 18 struct rxe_send_wqe *wqe, int npsn) 23 int to_send = (wqe->dma.resid > qp->mtu) ? 24 qp->mtu : wqe->dma.resid; 26 qp->req.opcode = next_opcode(qp, wqe, 27 wqe->wr.opcode); 29 if (wqe->wr.send_flags & IB_SEND_INLINE) { 30 wqe->dma.resid -= to_send; 31 wqe->dma.sge_offset += to_send; 33 advance_dma_data(&wqe 17 retry_first_write_send(struct rxe_qp *qp, struct rxe_send_wqe *wqe, int npsn) argument 40 struct rxe_send_wqe *wqe; local 121 struct rxe_send_wqe *wqe; local 177 struct rxe_send_wqe *wqe; local 206 rxe_wqe_is_fenced(struct rxe_qp *qp, struct rxe_send_wqe *wqe) argument 354 next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe, u32 opcode) argument 384 check_init_depth(struct rxe_qp *qp, struct rxe_send_wqe *wqe) argument 414 init_req_packet(struct rxe_qp *qp, struct rxe_av *av, struct rxe_send_wqe *wqe, int opcode, u32 payload, struct rxe_pkt_info *pkt) argument 498 finish_packet(struct rxe_qp *qp, struct rxe_av *av, struct rxe_send_wqe *wqe, struct rxe_pkt_info *pkt, struct sk_buff *skb, u32 payload) argument 541 update_wqe_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe, struct rxe_pkt_info *pkt) argument 555 update_wqe_psn(struct rxe_qp *qp, struct rxe_send_wqe *wqe, struct rxe_pkt_info *pkt, u32 payload) argument 593 rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe) argument 644 struct rxe_send_wqe *wqe; local [all...] |
H A D | rxe_mw.c | 50 static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe, argument 83 if (unlikely(!mr || wqe->wr.wr.mw.length == 0)) { 117 if (unlikely(wqe->wr.wr.mw.length > mr->ibmr.length)) { 123 if (unlikely((wqe->wr.wr.mw.addr < mr->ibmr.iova) || 124 ((wqe->wr.wr.mw.addr + wqe->wr.wr.mw.length) > 135 static void rxe_do_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe, argument 138 u32 key = wqe->wr.wr.mw.rkey & 0xff; 143 mw->addr = wqe->wr.wr.mw.addr; 144 mw->length = wqe 164 rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe) argument [all...] |
/linux-master/drivers/infiniband/hw/irdma/ |
H A D | uda.c | 23 __le64 *wqe; local 26 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 27 if (!wqe) 30 set_64bit_val(wqe, 0, ether_addr_to_u64(info->mac_addr) << 16); 41 set_64bit_val(wqe, 40, 44 set_64bit_val(wqe, 32, 48 set_64bit_val(wqe, 56, 51 set_64bit_val(wqe, 48, 55 set_64bit_val(wqe, 32, 58 set_64bit_val(wqe, 4 119 __le64 *wqe; local [all...] |
H A D | uk.c | 9 * irdma_set_fragment - set fragment in wqe 10 * @wqe: wqe for setting fragment 13 * @valid: The wqe valid 15 static void irdma_set_fragment(__le64 *wqe, u32 offset, struct ib_sge *sge, argument 19 set_64bit_val(wqe, offset, 21 set_64bit_val(wqe, offset + 8, 26 set_64bit_val(wqe, offset, 0); 27 set_64bit_val(wqe, offset + 8, 33 * irdma_set_fragment_gen_1 - set fragment in wqe 39 irdma_set_fragment_gen_1(__le64 *wqe, u32 offset, struct ib_sge *sge, u8 valid) argument 61 __le64 *wqe; local 158 __le64 *wqe; local 208 __le64 *wqe; local 236 __le64 *wqe; local 335 __le64 *wqe; local 410 __le64 *wqe; local 501 irdma_set_mw_bind_wqe_gen_1(__le64 *wqe, struct irdma_bind_window *op_info) argument 518 irdma_copy_inline_data_gen_1(u8 *wqe, struct ib_sge *sge_list, u32 num_sges, u8 polarity) argument 563 irdma_set_mw_bind_wqe(__le64 *wqe, struct irdma_bind_window *op_info) argument 580 irdma_copy_inline_data(u8 *wqe, struct ib_sge *sge_list, u32 num_sges, u8 polarity) argument 657 __le64 *wqe; local 725 __le64 *wqe; local 798 __le64 *wqe; local 846 __le64 *wqe; local 1533 __le64 *wqe; local [all...] |
H A D | ctrl.c | 176 * irdma_sc_add_arp_cache_entry - cqp wqe add arp cache entry 186 __le64 *wqe; local 189 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 190 if (!wqe) 192 set_64bit_val(wqe, 8, info->reach_max); 193 set_64bit_val(wqe, 16, ether_addr_to_u64(info->mac_addr)); 202 set_64bit_val(wqe, 24, hdr); 205 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false); 222 __le64 *wqe; local 225 wqe 256 __le64 *wqe; local 304 __le64 *wqe; local 452 __le64 *wqe; local 502 __le64 *wqe; local 572 __le64 *wqe; local 764 __le64 *wqe; local 799 __le64 *wqe; local 837 __le64 *wqe; local 1059 __le64 *wqe; local 1123 __le64 *wqe; local 1215 __le64 *wqe; local 1256 __le64 *wqe; local 1296 __le64 *wqe; local 1366 __le64 *wqe; local 1403 __le64 *wqe; local 1445 __le64 *wqe; local 2051 __le64 *wqe; local 2098 __le64 *wqe; local 2134 __le64 *wqe; local 2177 __le64 *wqe; local 2220 __le64 *wqe; local 2294 __le64 *wqe; local 2333 __le64 *wqe; local 2373 __le64 *wqe; local 2412 __le64 *wqe; local 2442 __le64 *wqe; local 2519 __le64 *wqe; local 2592 __le64 *wqe; local 2660 __le64 *wqe; local 3263 __le64 *wqe = NULL; local 3460 __le64 *wqe; local 3518 __le64 *wqe; local 3579 __le64 *wqe; local 3669 __le64 *wqe; local 3767 __le64 *wqe; local 3932 __le64 *wqe; local 3971 __le64 *wqe; local 4261 __le64 *wqe; local 4418 __le64 *wqe; local 4541 __le64 *wqe; local 4637 __le64 *wqe; local [all...] |
/linux-master/drivers/infiniband/sw/rdmavt/ |
H A D | trace_tx.h | 45 "[%s] wqe %p wr_id %llx send_flags %x qpn %x qpt %u psn %x lpsn %x ssn %x length %u opcode 0x%.2x,%s size %u avail %u head %u last %u pid %u num_sge %u wr_num_sge %u" 49 TP_PROTO(struct rvt_qp *qp, struct rvt_swqe *wqe, int wr_num_sge), 50 TP_ARGS(qp, wqe, wr_num_sge), 54 __field(struct rvt_swqe *, wqe) 73 __entry->wqe = wqe; 74 __entry->wr_id = wqe->wr.wr_id; 77 __entry->psn = wqe->psn; 78 __entry->lpsn = wqe->lpsn; 79 __entry->length = wqe [all...] |
H A D | rc.c | 155 * rvt_restart_sge - rewind the sge state for a wqe 157 * @wqe: the wqe to rewind 158 * @len: the data length from the start of the wqe in bytes 162 u32 rvt_restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, u32 len) argument 164 ss->sge = wqe->sg_list[0]; 165 ss->sg_list = wqe->sg_list + 1; 166 ss->num_sge = wqe->wr.num_sge; 167 ss->total_len = wqe->length; 169 return wqe [all...] |
/linux-master/drivers/infiniband/sw/siw/ |
H A D | siw_qp_tx.c | 51 struct siw_wqe *wqe = &c_tx->wqe_active; local 52 struct siw_sge *sge = &wqe->sqe.sge[0]; 55 if (bytes > MAX_HDR_INLINE || wqe->sqe.num_sge != 1) 61 if (tx_flags(wqe) & SIW_WQE_INLINE) { 62 memcpy(paddr, &wqe->sqe.sge[1], bytes); 64 struct siw_mem *mem = wqe->mem[0]; 119 struct siw_wqe *wqe = &c_tx->wqe_active; local 123 switch (tx_type(wqe)) { 135 c_tx->pkt.rreq.sink_stag = htonl(wqe->sqe.sge[0].lkey); 137 cpu_to_be64(wqe 432 struct siw_wqe *wqe = &c_tx->wqe_active; local 700 siw_prepare_fpdu(struct siw_qp *qp, struct siw_wqe *wqe) argument 758 siw_check_sgl_tx(struct ib_pd *pd, struct siw_wqe *wqe, enum ib_access_flags perms) argument 788 siw_qp_sq_proc_tx(struct siw_qp *qp, struct siw_wqe *wqe) argument 973 siw_qp_sq_proc_local(struct siw_qp *qp, struct siw_wqe *wqe) argument 1020 struct siw_wqe *wqe = tx_wqe(qp); local [all...] |
H A D | siw_qp.c | 265 struct siw_wqe *wqe = tx_wqe(qp); local 271 if (unlikely(wqe->wr_status != SIW_WR_IDLE)) { 275 memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE); 277 wqe->wr_status = SIW_WR_QUEUED; 278 wqe->sqe.flags = 0; 279 wqe->sqe.num_sge = 1; 280 wqe->sqe.sge[0].length = 0; 281 wqe->sqe.sge[0].laddr = 0; 282 wqe 465 struct siw_wqe *wqe = tx_wqe(qp); local 887 struct siw_wqe *wqe = tx_wqe(qp); local 983 struct siw_wqe *wqe = tx_wqe(qp); local 1194 struct siw_wqe *wqe = tx_wqe(qp); local 1269 struct siw_wqe *wqe = &qp->rx_untagged.wqe_active; local [all...] |
H A D | siw_qp_rx.c | 169 struct siw_wqe *wqe = &frx->wqe_active; local 176 srx->ddp_stag = wqe->sqe.sge[0].lkey; 177 srx->ddp_to = wqe->sqe.sge[0].laddr; 204 (wqe->processed + srx->fpdu_part_rem != wqe->bytes))) { 207 wqe->processed + srx->fpdu_part_rem, wqe->bytes); 281 struct siw_wqe *wqe = &frx->wqe_active; local 301 if (unlikely(ddp_mo != wqe->processed)) { 303 qp_id(rx_qp(srx)), ddp_mo, wqe 334 struct siw_wqe *wqe = NULL; local 439 struct siw_wqe *wqe; local 565 struct siw_wqe *wqe = rx_wqe(frx); local 742 struct siw_wqe *wqe = NULL; local 786 struct siw_wqe *wqe = rx_wqe(frx); local 1202 struct siw_wqe *wqe = rx_wqe(qp->rx_fpdu); local [all...] |
/linux-master/drivers/infiniband/hw/hfi1/ |
H A D | rc.h | 35 static inline u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, argument 40 len = delta_psn(psn, wqe->psn) * pmtu; 41 return rvt_restart_sge(ss, wqe, len); 56 struct rvt_swqe *do_rc_completion(struct rvt_qp *qp, struct rvt_swqe *wqe,
|
H A D | tid_rdma.h | 214 void __trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe); 219 * @wqe: the send wqe 221 static inline void trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe) argument 223 if (!wqe->priv) 225 __trdma_clean_swqe(qp, wqe); 244 u32 hfi1_build_tid_rdma_read_packet(struct rvt_swqe *wqe, 247 u32 hfi1_build_tid_rdma_read_req(struct rvt_qp *qp, struct rvt_swqe *wqe, 258 void hfi1_tid_rdma_restart_req(struct rvt_qp *qp, struct rvt_swqe *wqe, 261 bool hfi1_tid_rdma_wqe_interlock(struct rvt_qp *qp, struct rvt_swqe *wqe); 264 hfi1_setup_tid_rdma_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe) argument [all...] |
H A D | rc.c | 394 struct rvt_swqe *wqe; local 449 wqe = rvt_get_swqe_ptr(qp, qp->s_last); 450 hfi1_trdma_send_complete(qp, wqe, qp->s_last != qp->s_acked ? 469 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); 495 if ((wqe->wr.send_flags & IB_SEND_FENCE) && 497 (wqe->wr.opcode != IB_WR_TID_RDMA_READ || 506 if (wqe->wr.opcode == IB_WR_REG_MR || 507 wqe->wr.opcode == IB_WR_LOCAL_INV) { 517 if (!(wqe->wr.send_flags & 521 wqe 1417 update_num_rd_atomic(struct rvt_qp *qp, u32 psn, struct rvt_swqe *wqe) argument 1461 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n); local 1567 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked); local 1637 struct rvt_swqe *wqe; local 1697 struct rvt_swqe *wqe; local 1824 do_rc_completion(struct rvt_qp *qp, struct rvt_swqe *wqe, struct hfi1_ibport *ibp) argument 1970 struct rvt_swqe *wqe; local 2266 struct rvt_swqe *wqe; local 2310 struct rvt_swqe *wqe; local [all...] |
H A D | uc.c | 26 struct rvt_swqe *wqe; local 49 wqe = rvt_get_swqe_ptr(qp, qp->s_last); 50 rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); 72 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); 88 if (wqe->wr.opcode == IB_WR_REG_MR || 89 wqe->wr.opcode == IB_WR_LOCAL_INV) { 97 if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) { 99 qp, wqe->wr.ex.invalidate_rkey); 102 rvt_send_complete(qp, wqe, err ? IB_WC_LOC_PROT_ERR 111 qp->s_psn = wqe [all...] |
H A D | trace_rc.h | 80 struct rvt_swqe *wqe), 81 TP_ARGS(qp, aeth, psn, wqe), 96 __entry->opcode = wqe->wr.opcode; 97 __entry->spsn = wqe->psn; 98 __entry->lpsn = wqe->lpsn; 115 struct rvt_swqe *wqe), 116 TP_ARGS(qp, aeth, psn, wqe)
|
H A D | ud.c | 224 static void hfi1_make_bth_deth(struct rvt_qp *qp, struct rvt_swqe *wqe, argument 232 if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) { 233 ohdr->u.ud.imm_data = wqe->wr.ex.imm_data; 239 if (wqe->wr.send_flags & IB_SEND_SOLICITED) 243 *pkey = hfi1_get_pkey(ibp, rvt_get_swqe_pkey_index(wqe)); 249 ohdr->bth[1] = cpu_to_be32(rvt_get_swqe_remote_qpn(wqe)); 250 ohdr->bth[2] = cpu_to_be32(mask_psn(wqe->psn)); 256 cpu_to_be32((int)rvt_get_swqe_remote_qkey(wqe) < 0 ? qp->qkey : 257 rvt_get_swqe_remote_qkey(wqe)); 262 struct rvt_swqe *wqe) 261 hfi1_make_ud_req_9B(struct rvt_qp *qp, struct hfi1_pkt_state *ps, struct rvt_swqe *wqe) argument 332 hfi1_make_ud_req_16B(struct rvt_qp *qp, struct hfi1_pkt_state *ps, struct rvt_swqe *wqe) argument 441 struct rvt_swqe *wqe; local [all...] |
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ |
H A D | ktls_txrx.c | 74 mlx5e_ktls_build_static_params(struct mlx5e_set_tls_static_params_wqe *wqe, argument 80 struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl; 81 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; 86 #define STATIC_PARAMS_DS_CNT DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS) 97 fill_static_params(&wqe->params, crypto_info, key_id, resync_tcp_sn); 117 mlx5e_ktls_build_progress_params(struct mlx5e_set_tls_progress_params_wqe *wqe, argument 123 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; 128 #define PROGRESS_PARAMS_DS_CNT DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS) 136 fill_progress_params(&wqe->params, tis_tir_num, next_record_tcp_sn);
|
/linux-master/drivers/infiniband/hw/mlx5/ |
H A D | umr.c | 228 struct mlx5r_umr_wqe *wqe, bool with_data) 259 mlx5r_memcpy_send_wqe(&qp->sq, &cur_edge, &seg, &size, wqe, wqe_size); 289 struct mlx5r_umr_wqe *wqe, bool with_data) 295 err = umr_check_mkey_mask(dev, be64_to_cpu(wqe->ctrl_seg.mkey_mask)); 316 err = mlx5r_umr_post_send(umrc->qp, mkey, &umr_context.cqe, wqe, 361 struct mlx5r_umr_wqe wqe = {}; local 366 wqe.ctrl_seg.mkey_mask |= get_umr_update_pd_mask(); 367 wqe.ctrl_seg.mkey_mask |= get_umr_disable_mr_mask(); 368 wqe.ctrl_seg.flags |= MLX5_UMR_INLINE; 370 MLX5_SET(mkc, &wqe 227 mlx5r_umr_post_send(struct ib_qp *ibqp, u32 mkey, struct ib_cqe *cqe, struct mlx5r_umr_wqe *wqe, bool with_data) argument 288 mlx5r_umr_post_send_wait(struct mlx5_ib_dev *dev, u32 mkey, struct mlx5r_umr_wqe *wqe, bool with_data) argument 401 struct mlx5r_umr_wqe wqe = {}; local 573 mlx5r_umr_final_update_xlt(struct mlx5_ib_dev *dev, struct mlx5r_umr_wqe *wqe, struct mlx5_ib_mr *mr, struct ib_sge *sg, unsigned int flags) argument 615 struct mlx5r_umr_wqe wqe = {}; local 698 struct mlx5r_umr_wqe wqe = {}; local [all...] |
/linux-master/drivers/infiniband/hw/qib/ |
H A D | qib_uc.c | 53 struct rvt_swqe *wqe; local 71 wqe = rvt_get_swqe_ptr(qp, qp->s_last); 72 rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); 85 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); 98 qp->s_psn = wqe->psn; 99 qp->s_sge.sge = wqe->sg_list[0]; 100 qp->s_sge.sg_list = wqe->sg_list + 1; 101 qp->s_sge.num_sge = wqe->wr.num_sge; 102 qp->s_sge.total_len = wqe->length; 103 len = wqe [all...] |
H A D | qib_rc.c | 42 static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, argument 47 len = ((psn - wqe->psn) & QIB_PSN_MASK) * pmtu; 48 return rvt_restart_sge(ss, wqe, len); 222 struct rvt_swqe *wqe; local 252 wqe = rvt_get_swqe_ptr(qp, qp->s_last); 253 rvt_send_complete(qp, wqe, qp->s_last != qp->s_acked ? 276 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); 297 if ((wqe->wr.send_flags & IB_SEND_FENCE) && 303 qp->s_psn = wqe->psn; 310 len = wqe 739 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n); local 823 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked); local 859 struct rvt_swqe *wqe; local 885 struct rvt_swqe *wqe; local 951 do_rc_completion(struct rvt_qp *qp, struct rvt_swqe *wqe, struct qib_ibport *ibp) argument 1012 struct rvt_swqe *wqe; local 1227 struct rvt_swqe *wqe; local 1277 struct rvt_swqe *wqe; local [all...] |
H A D | qib_ud.c | 238 struct rvt_swqe *wqe; local 258 wqe = rvt_get_swqe_ptr(qp, qp->s_last); 259 rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); 267 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); 275 ah_attr = rvt_get_swqe_ah_attr(wqe); 300 qib_ud_loopback(qp, wqe); 303 rvt_send_complete(qp, wqe, IB_WC_SUCCESS); 309 extra_bytes = -wqe->length & 3; 310 nwords = (wqe->length + extra_bytes) >> 2; 314 qp->s_cur_size = wqe [all...] |
/linux-master/drivers/infiniband/hw/cxgb4/ |
H A D | qp.c | 489 static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe, argument 501 wqe->send.sendop_pkd = cpu_to_be32( 504 wqe->send.sendop_pkd = cpu_to_be32( 506 wqe->send.stag_inv = 0; 510 wqe->send.sendop_pkd = cpu_to_be32( 513 wqe->send.sendop_pkd = cpu_to_be32( 515 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); 521 wqe->send.r3 = 0; 522 wqe->send.r4 = 0; 527 ret = build_immd(sq, wqe 556 build_rdma_write(struct t4_sq *sq, union t4_wr *wqe, const struct ib_send_wr *wr, u8 *len16) argument 660 build_rdma_read(union t4_wr *wqe, const struct ib_send_wr *wr, u8 *len16) argument 697 union t4_wr *wqe; local 759 build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe, const struct ib_recv_wr *wr, u8 *len16) argument 774 build_srq_recv(union t4_recv_wr *wqe, const struct ib_recv_wr *wr, u8 *len16) argument 820 build_memreg(struct t4_sq *sq, union t4_wr *wqe, const struct ib_reg_wr *wr, struct c4iw_mr *mhp, u8 *len16, bool dsgl_supported) argument 884 build_inv_stag(union t4_wr *wqe, const struct ib_send_wr *wr, u8 *len16) argument 1086 union t4_wr *wqe = NULL; local 1266 union t4_recv_wr *wqe = NULL; local 1341 defer_srq_wr(struct t4_srq *srq, union t4_recv_wr *wqe, u64 wr_id, u8 len16) argument 1360 union t4_recv_wr *wqe, lwqe; local 1563 struct fw_ri_wr *wqe; local 1695 struct fw_ri_wr *wqe; local 1751 struct fw_ri_wr *wqe; local 2645 c4iw_copy_wr_to_srq(struct t4_srq *srq, union t4_recv_wr *wqe, u8 len16) argument [all...] |
/linux-master/drivers/infiniband/hw/mthca/ |
H A D | mthca_srq.c | 92 static inline int *wqe_to_link(void *wqe) argument 94 return (int *) (wqe + offsetof(struct mthca_next_seg, imm)); 158 void *wqe; local 185 next = wqe = get_wqe(srq, i); 188 *wqe_to_link(wqe) = i + 1; 191 *wqe_to_link(wqe) = -1; 195 for (scatter = wqe + sizeof (struct mthca_next_seg); 196 (void *) scatter < wqe + (1 << srq->wqe_shift); 495 void *wqe; local 504 wqe 588 void *wqe; local [all...] |
/linux-master/drivers/scsi/lpfc/ |
H A D | lpfc_nvmet.c | 80 union lpfc_wqe128 *wqe; local 83 wqe = &lpfc_tsend_cmd_template; 84 memset(wqe, 0, sizeof(union lpfc_wqe128)); 97 bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE); 98 bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, PARM_REL_OFF); 99 bf_set(wqe_class, &wqe->fcp_tsend.wqe_com, CLASS3); 100 bf_set(wqe_ct, &wqe->fcp_tsend.wqe_com, SLI4_CT_RPI); 101 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1); 108 bf_set(wqe_xchg, &wqe->fcp_tsend.wqe_com, LPFC_NVME_XCHG); 109 bf_set(wqe_dbde, &wqe 1488 union lpfc_wqe128 *wqe; local 2584 union lpfc_wqe128 *wqe; local 2710 union lpfc_wqe128 *wqe; local [all...] |