Searched refs:wqe (Results 1 - 25 of 42) sorted by relevance

12

/freebsd-current/contrib/ofed/libcxgb4/
H A Dqp.c45 static void copy_wr_to_sq(struct t4_wq *wq, union t4_wr *wqe, u8 len16) argument
51 src = &wqe->flits[0];
81 static void copy_wr_to_rq(struct t4_wq *wq, union t4_recv_wr *wqe, u8 len16) argument
87 src = &wqe->flits[0];
158 static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe, argument
168 wqe->send.sendop_pkd = htobe32(
171 wqe->send.sendop_pkd = htobe32(
173 wqe->send.stag_inv = 0;
174 wqe->send.r3 = 0;
175 wqe
207 build_rdma_write(struct t4_sq *sq, union t4_wr *wqe, struct ibv_send_wr *wr, u8 *len16) argument
248 build_rdma_read(union t4_wr *wqe, struct ibv_send_wr *wr, u8 *len16) argument
275 build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe, struct ibv_recv_wr *wr, u8 *len16) argument
317 union t4_wr *wqe, lwqe; local
422 union t4_recv_wr *wqe, lwqe; local
[all...]
H A Dt4.h142 static inline void init_wr_hdr(union t4_wr *wqe, u16 wrid, argument
145 wqe->send.opcode = (u8)opcode;
146 wqe->send.flags = flags;
147 wqe->send.wrid = wrid;
148 wqe->send.r1[0] = 0;
149 wqe->send.r1[1] = 0;
150 wqe->send.r1[2] = 0;
151 wqe->send.len16 = len16;
458 static void copy_wqe_to_udb(volatile u32 *udb_offset, void *wqe) argument
463 src = (u64 *)wqe;
476 t4_ring_sq_db(struct t4_wq *wq, u16 inc, u8 t4, u8 len16, union t4_wr *wqe) argument
525 t4_ring_rq_db(struct t4_wq *wq, u16 inc, u8 t4, u8 len16, union t4_recv_wr *wqe) argument
[all...]
/freebsd-current/sys/dev/irdma/
H A Dirdma_uda.c54 __le64 *wqe; local
57 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
58 if (!wqe)
61 set_64bit_val(wqe, IRDMA_BYTE_0,
74 set_64bit_val(wqe, IRDMA_BYTE_40,
77 set_64bit_val(wqe, IRDMA_BYTE_32,
81 set_64bit_val(wqe, IRDMA_BYTE_56,
84 set_64bit_val(wqe, IRDMA_BYTE_48,
88 set_64bit_val(wqe, IRDMA_BYTE_32,
91 set_64bit_val(wqe, IRDMA_BYTE_4
154 __le64 *wqe; local
[all...]
H A Dirdma_uk.c41 * irdma_set_fragment - set fragment in wqe
42 * @wqe: wqe for setting fragment
45 * @valid: The wqe valid
48 irdma_set_fragment(__le64 * wqe, u32 offset, struct ib_sge *sge, argument
52 set_64bit_val(wqe, offset,
54 set_64bit_val(wqe, offset + IRDMA_BYTE_8,
59 set_64bit_val(wqe, offset, 0);
60 set_64bit_val(wqe, offset + IRDMA_BYTE_8,
66 * irdma_set_fragment_gen_1 - set fragment in wqe
73 irdma_set_fragment_gen_1(__le64 * wqe, u32 offset, struct ib_sge *sge, u8 valid) argument
105 __le64 *wqe; local
136 __le64 *wqe; local
204 irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 * wqe, u16 quanta, u32 wqe_idx, bool post_sq) argument
234 __le64 *wqe; local
295 __le64 *wqe; local
324 __le64 *wqe; local
427 __le64 *wqe; local
511 __le64 *wqe; local
606 irdma_copy_inline_data_gen_1(u8 *wqe, struct ib_sge *sge_list, u32 num_sges, u8 polarity) argument
653 irdma_copy_inline_data(u8 *wqe, struct ib_sge *sge_list, u32 num_sges, u8 polarity) argument
730 __le64 *wqe; local
801 __le64 *wqe; local
876 __le64 *wqe; local
927 __le64 *wqe; local
1104 __le64 *cqe, *wqe; local
[all...]
H A Dirdma_ctrl.c230 * irdma_sc_add_arp_cache_entry - cqp wqe add arp cache entry
241 __le64 *wqe; local
244 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
245 if (!wqe)
247 set_64bit_val(wqe, IRDMA_BYTE_8, info->reach_max);
249 set_64bit_val(wqe, IRDMA_BYTE_16, irdma_mac_to_u64(info->mac_addr));
258 set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
260 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "ARP_CACHE_ENTRY WQE", wqe,
279 __le64 *wqe; local
282 wqe
313 __le64 *wqe; local
361 __le64 *wqe; local
513 __le64 *wqe; local
565 __le64 *wqe; local
637 __le64 *wqe; local
833 __le64 *wqe; local
868 __le64 *wqe; local
907 __le64 *wqe; local
1132 __le64 *wqe; local
1197 __le64 *wqe; local
1287 __le64 *wqe; local
1329 __le64 *wqe; local
1370 __le64 *wqe; local
1443 __le64 *wqe; local
1490 __le64 *wqe; local
1533 __le64 *wqe; local
2106 __le64 *wqe; local
2150 __le64 *wqe; local
2187 __le64 *wqe; local
2231 __le64 *wqe; local
2275 __le64 *wqe; local
2349 __le64 *wqe; local
2389 __le64 *wqe; local
2430 __le64 *wqe; local
2470 __le64 *wqe; local
2501 __le64 *wqe; local
2584 __le64 *wqe; local
2660 __le64 *wqe; local
2730 __le64 *wqe; local
3352 __le64 *wqe = NULL; local
3564 __le64 *wqe; local
3616 __le64 *wqe; local
3679 __le64 *wqe; local
3770 __le64 *wqe; local
3870 __le64 *wqe; local
4039 __le64 *wqe; local
4078 __le64 *wqe; local
4387 __le64 *wqe; local
4543 __le64 *wqe; local
4667 __le64 *wqe; local
4755 __le64 *wqe; local
[all...]
H A Dirdma_puda.c111 * irdma_puda_post_recvbuf - set wqe for rcv buffer
113 * @wqe_idx: wqe index to use
121 __le64 *wqe; local
128 wqe = qp->qp_uk.rq_base[wqe_idx].elem;
130 get_64bit_val(wqe, IRDMA_BYTE_24, &offset24);
134 set_64bit_val(wqe, IRDMA_BYTE_16, 0);
135 set_64bit_val(wqe, 0, buf->mem.pa);
137 set_64bit_val(wqe, IRDMA_BYTE_8,
140 set_64bit_val(wqe, IRDMA_BYTE_8,
146 set_64bit_val(wqe, IRDMA_BYTE_2
466 __le64 *wqe; local
648 __le64 *wqe; local
761 __le64 *wqe; local
1221 __le64 *wqe; local
[all...]
/freebsd-current/contrib/ofed/libirdma/
H A Dirdma_uk.c41 * irdma_set_fragment - set fragment in wqe
42 * @wqe: wqe for setting fragment
45 * @valid: The wqe valid
48 irdma_set_fragment(__le64 * wqe, u32 offset, struct ibv_sge *sge, argument
52 set_64bit_val(wqe, offset,
54 set_64bit_val(wqe, offset + IRDMA_BYTE_8,
59 set_64bit_val(wqe, offset, 0);
60 set_64bit_val(wqe, offset + IRDMA_BYTE_8,
66 * irdma_set_fragment_gen_1 - set fragment in wqe
73 irdma_set_fragment_gen_1(__le64 * wqe, u32 offset, struct ibv_sge *sge, u8 valid) argument
105 __le64 *wqe; local
136 __le64 *wqe; local
204 irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 * wqe, u16 quanta, u32 wqe_idx, bool post_sq) argument
234 __le64 *wqe; local
295 __le64 *wqe; local
324 __le64 *wqe; local
427 __le64 *wqe; local
511 __le64 *wqe; local
604 irdma_set_mw_bind_wqe_gen_1(__le64 * wqe, struct irdma_bind_window *op_info) argument
622 irdma_copy_inline_data_gen_1(u8 *wqe, struct ibv_sge *sge_list, u32 num_sges, u8 polarity) argument
667 irdma_set_mw_bind_wqe(__le64 * wqe, struct irdma_bind_window *op_info) argument
685 irdma_copy_inline_data(u8 *wqe, struct ibv_sge *sge_list, u32 num_sges, u8 polarity) argument
762 __le64 *wqe; local
833 __le64 *wqe; local
908 __le64 *wqe; local
958 __le64 *wqe; local
1011 __le64 *wqe; local
[all...]
H A Dirdma_user.h324 void (*iw_set_fragment)(__le64 *wqe, u32 offset, struct ibv_sge *sge,
326 void (*iw_set_mw_bind_wqe)(__le64 *wqe,
390 bool push_mode:1; /* whether the last post wqe was pushed */
467 void irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 *wqe, u16 quanta,
/freebsd-current/sys/dev/cxgbe/iw_cxgbe/
H A Dqp.c405 static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe, argument
417 wqe->send.sendop_pkd = cpu_to_be32(
420 wqe->send.sendop_pkd = cpu_to_be32(
422 wqe->send.stag_inv = 0;
426 wqe->send.sendop_pkd = cpu_to_be32(
429 wqe->send.sendop_pkd = cpu_to_be32(
431 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
437 wqe->send.r3 = 0;
438 wqe->send.r4 = 0;
443 ret = build_immd(sq, wqe
472 build_rdma_write(struct t4_sq *sq, union t4_wr *wqe, const struct ib_send_wr *wr, u8 *len16) argument
515 build_rdma_read(union t4_wr *wqe, const struct ib_send_wr *wr, u8 *len16) argument
545 build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe, const struct ib_recv_wr *wr, u8 *len16) argument
560 build_inv_stag(union t4_wr *wqe, const struct ib_send_wr *wr, u8 *len16) argument
703 build_memreg(struct t4_sq *sq, union t4_wr *wqe, const struct ib_reg_wr *wr, struct c4iw_mr *mhp, u8 *len16, bool dsgl_supported) argument
779 union t4_wr *wqe = NULL; local
916 union t4_recv_wr *wqe = NULL; local
1115 struct fw_ri_wr *wqe; local
1245 struct fw_ri_wr *wqe; local
1338 struct fw_ri_wr *wqe; local
[all...]
H A Dt4.h131 static inline void init_wr_hdr(union t4_wr *wqe, u16 wrid, argument
134 wqe->send.opcode = (u8)opcode;
135 wqe->send.flags = flags;
136 wqe->send.wrid = wrid;
137 wqe->send.r1[0] = 0;
138 wqe->send.r1[1] = 0;
139 wqe->send.r1[2] = 0;
140 wqe->send.len16 = len16;
482 t4_ring_sq_db(struct t4_wq *wq, u16 inc, union t4_wr *wqe, u8 wc) argument
487 if (wc && inc == 1 && wq->sq.bar2_qid == 0 && wqe) {
507 t4_ring_rq_db(struct t4_wq *wq, u16 inc, union t4_recv_wr *wqe, u8 wc) argument
[all...]
/freebsd-current/sys/dev/mlx5/mlx5_en/
H A Dmlx5_en_tx.c57 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi); local
59 memset(&wqe->ctrl, 0, sizeof(wqe->ctrl));
61 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP);
62 wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
64 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
66 wqe->ctrl.fm_ce_se = 0;
69 memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32));
352 mlx5e_get_vxlan_header_size(const struct mbuf *mb, struct mlx5e_tx_wqe *wqe, argument
396 wqe
550 struct mlx5_wqe_dump_seg *wqe; local
689 struct mlx5e_tx_wqe *wqe; local
[all...]
H A Dmlx5_en_hw_tls_rx.c141 struct mlx5e_tx_umr_wqe *wqe; local
150 wqe = mlx5_wq_cyc_get_wqe(&iq->wq, pi);
152 memset(wqe, 0, sizeof(*wqe));
154 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((iq->pc << 8) |
156 wqe->ctrl.qpn_ds = cpu_to_be32((iq->sqn << 8) | ds_cnt);
157 wqe->ctrl.imm = cpu_to_be32(ptag->tirn << 8);
158 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE | MLX5_FENCE_MODE_INITIATOR_SMALL;
161 wqe->umr.flags = 0x80; /* inline data */
162 wqe
214 struct mlx5e_tx_psv_wqe *wqe; local
335 struct mlx5e_get_tls_progress_params_wqe *wqe; local
[all...]
H A Dmlx5_en_hw_tls.c501 struct mlx5e_tx_umr_wqe *wqe; local
505 wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
507 memset(wqe, 0, sizeof(*wqe));
509 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) |
511 wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
512 wqe->ctrl.imm = cpu_to_be32(ptag->tisn << 8);
515 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE | MLX5_FENCE_MODE_INITIATOR_SMALL;
517 wqe->ctrl.fm_ce_se = MLX5_FENCE_MODE_INITIATOR_SMALL;
520 wqe
546 struct mlx5e_tx_psv_wqe *wqe; local
580 struct mlx5e_tx_wqe *wqe; local
[all...]
H A Dmlx5_en_rx.c34 struct mlx5e_rx_wqe *wqe, u16 ix)
81 wqe->data[0].addr = cpu_to_be64(segs[0].ds_addr);
82 wqe->data[0].byte_count = cpu_to_be32(segs[0].ds_len |
85 wqe->data[i].addr = cpu_to_be64(segs[i].ds_addr);
86 wqe->data[i].byte_count = cpu_to_be32(segs[i].ds_len);
89 wqe->data[i].addr = 0;
90 wqe->data[i].byte_count = 0;
112 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, rq->wq.head); local
114 if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, rq->wq.head))) {
118 mlx5_wq_ll_push(&rq->wq, be16_to_cpu(wqe
33 mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix) argument
505 struct mlx5e_rx_wqe *wqe; local
[all...]
H A Dmlx5_en_iq.c107 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&iq->wq, pi); local
111 memset(&wqe->ctrl, 0, sizeof(wqe->ctrl));
113 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((iq->pc << 8) | MLX5_OPCODE_NOP);
114 wqe->ctrl.qpn_ds = cpu_to_be32((iq->sqn << 8) | ds_cnt);
115 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
118 memcpy(iq->doorbell.d32, &wqe->ctrl, sizeof(iq->doorbell.d32));
447 /* Ensure wqe is visible to device before updating doorbell record */
/freebsd-current/sys/dev/mthca/
H A Dmthca_srq.c92 static inline int *wqe_to_link(void *wqe) argument
94 return (int *) (wqe + offsetof(struct mthca_next_seg, imm));
152 void *wqe; local
179 next = wqe = get_wqe(srq, i);
182 *wqe_to_link(wqe) = i + 1;
185 *wqe_to_link(wqe) = -1;
189 for (scatter = wqe + sizeof (struct mthca_next_seg);
190 (void *) scatter < wqe + (1 << srq->wqe_shift);
489 void *wqe; local
498 wqe
588 void *wqe; local
[all...]
H A Dmthca_qp.c1613 void *wqe; local
1649 wqe = get_send_wqe(qp, ind);
1651 qp->sq.last = wqe;
1653 ((struct mthca_next_seg *) wqe)->nda_op = 0;
1654 ((struct mthca_next_seg *) wqe)->ee_nds = 0;
1655 ((struct mthca_next_seg *) wqe)->flags =
1663 ((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data;
1665 wqe += sizeof (struct mthca_next_seg);
1673 set_raddr_seg(wqe, atomic_wr(wr)->remote_addr,
1675 wqe
1830 void *wqe; local
1928 void *wqe; local
2172 void *wqe; local
[all...]
H A Dmthca_cq.c126 __be32 wqe; member in struct:mthca_cqe
140 __be32 wqe; member in struct:mthca_err_cqe
312 mthca_free_srq_wqe(srq, be32_to_cpu(cqe->wqe));
388 be32_to_cpu(cqe->my_qpn), be32_to_cpu(cqe->wqe),
477 cqe->wqe = new_wqe;
511 be32_to_cpu(cqe->wqe));
540 wqe_index = ((be32_to_cpu(cqe->wqe) - (*cur_qp)->send_wqe_offset)
546 u32 wqe = be32_to_cpu(cqe->wqe); local
548 wqe_index = wqe >> sr
552 s32 wqe; local
[all...]
/freebsd-current/contrib/ofed/libmlx4/
H A Dqp.c44 #include "wqe.h"
76 uint32_t *wqe = get_send_wqe(qp, n); local
78 int ds = (((struct mlx4_wqe_ctrl_seg *)wqe)->fence_size & 0x3f) << 2;
81 wqe[i] = 0xffffffff;
218 void *wqe; local
252 ctrl = wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
268 wqe += sizeof *ctrl;
280 set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
282 wqe += sizeof (struct mlx4_wqe_raddr_seg);
284 set_atomic_seg(wqe, w
[all...]
/freebsd-current/sys/dev/bnxt/bnxt_re/
H A Dib_verbs.c538 struct bnxt_qplib_swqe *wqe = &fence->bind_wqe; local
544 memset(wqe, 0, sizeof(*wqe));
545 wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
546 wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
547 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
548 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
549 wqe->bind.zero_based = false;
550 wqe->bind.parent_l_key = ib_mr->lkey;
551 wqe
570 struct bnxt_qplib_swqe wqe; local
1379 struct bnxt_qplib_swqe wqe = {}; local
2907 bnxt_re_build_raw_send(const struct ib_send_wr *wr, struct bnxt_qplib_swqe *wqe) argument
2924 bnxt_re_build_qp1_send(struct bnxt_re_qp *qp, const struct ib_send_wr *wr, struct bnxt_qplib_swqe *wqe, int payload_size) argument
3016 bnxt_re_build_gsi_send(struct bnxt_re_qp *qp, const struct ib_send_wr *wr, struct bnxt_qplib_swqe *wqe) argument
3048 bnxt_re_build_qp1_recv(struct bnxt_re_qp *qp, const struct ib_recv_wr *wr, struct bnxt_qplib_swqe *wqe) argument
3154 bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp, const struct ib_recv_wr *wr, struct bnxt_qplib_swqe *wqe) argument
3203 bnxt_re_build_send_wqe(struct bnxt_re_qp *qp, const struct ib_send_wr *wr, struct bnxt_qplib_swqe *wqe) argument
3244 bnxt_re_build_rdma_wqe(const struct ib_send_wr *wr, struct bnxt_qplib_swqe *wqe) argument
3276 bnxt_re_build_atomic_wqe(const struct ib_send_wr *wr, struct bnxt_qplib_swqe *wqe) argument
3303 bnxt_re_build_inv_wqe(const struct ib_send_wr *wr, struct bnxt_qplib_swqe *wqe) argument
3318 bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr, struct bnxt_qplib_swqe *wqe) argument
3377 bnxt_re_set_sg_list(const struct ib_send_wr *wr, struct bnxt_qplib_swqe *wqe) argument
3403 struct bnxt_qplib_swqe wqe; local
3441 bnxt_re_legacy_set_uc_fence(struct bnxt_qplib_swqe *wqe) argument
3458 struct bnxt_qplib_swqe wqe; local
3558 struct bnxt_qplib_swqe wqe; local
3590 bnxt_re_build_gsi_recv(struct bnxt_re_qp *qp, const struct ib_recv_wr *wr, struct bnxt_qplib_swqe *wqe) argument
3610 struct bnxt_qplib_swqe wqe; local
[all...]
H A Dqplib_fp.c766 struct bnxt_qplib_swqe *wqe)
790 i < wqe->num_sge; i++, hw_sge++) {
791 hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
792 hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
793 hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
795 srqe->wqe_type = wqe->type;
796 srqe->flags = wqe->flags;
797 srqe->wqe_size = wqe->num_sge +
799 if (!wqe->num_sge)
802 srq->swq[next].wr_id = wqe
765 bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq, struct bnxt_qplib_swqe *wqe) argument
1726 bnxt_qplib_fill_msn_search(struct bnxt_qplib_qp *qp, struct bnxt_qplib_swqe *wqe, struct bnxt_qplib_swq *swq) argument
1754 bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp, struct bnxt_qplib_swqe *wqe, struct bnxt_qplib_swq *swq) argument
1791 _calc_ilsize(struct bnxt_qplib_swqe *wqe) argument
1801 bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp, struct bnxt_qplib_swqe *wqe, u32 *sw_prod) argument
1866 _calculate_wqe_byte(struct bnxt_qplib_qp *qp, struct bnxt_qplib_swqe *wqe, u16 *wqe_byte) argument
1928 bnxt_qplib_post_send(struct bnxt_qplib_qp *qp, struct bnxt_qplib_swqe *wqe) argument
2295 bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp, struct bnxt_qplib_swqe *wqe) argument
[all...]
H A Dqplib_fp.h568 struct bnxt_qplib_swqe *wqe);
583 struct bnxt_qplib_swqe *wqe);
586 struct bnxt_qplib_swqe *wqe);
/freebsd-current/sys/ofed/include/rdma/
H A Drdmavt_qp.h491 * @wqe - the send wqe
494 * a wqe relative reserved operation use.
498 struct rvt_swqe *wqe)
500 wqe->wr.send_flags |= RVT_SEND_RESERVE_USED;
507 * @wqe - the send wqe
521 struct rvt_swqe *wqe)
523 if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED)) {
524 wqe
496 rvt_qp_wqe_reserve( struct rvt_qp *qp, struct rvt_swqe *wqe) argument
519 rvt_qp_wqe_unreserve( struct rvt_qp *qp, struct rvt_swqe *wqe) argument
[all...]
/freebsd-current/sys/dev/mlx4/mlx4_ib/
H A Dmlx4_ib_qp.c218 __be32 *wqe; local
233 wqe = buf + (i & ((1 << qp->sq.wqe_shift) - 1));
234 *wqe = stamp;
240 wqe = buf + i;
241 *wqe = cpu_to_be32(0xffffffff);
250 void *wqe; local
253 ctrl = wqe = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1));
257 struct mlx4_wqe_datagram_seg *dgram = wqe + sizeof *ctrl;
266 inl = wqe + s;
2299 void *wqe, unsigne
2297 build_sriov_qp0_header(struct mlx4_ib_sqp *sqp, const struct ib_ud_wr *wr, void *wqe, unsigned *mlx_seg_len) argument
2427 build_mlx_header(struct mlx4_ib_sqp *sqp, const struct ib_ud_wr *wr, void *wqe, unsigned *mlx_seg_len) argument
2806 build_tunnel_header(const struct ib_ud_wr *wr, void *wqe, unsigned *mlx_seg_len) argument
2889 build_lso_seg(struct mlx4_wqe_lso_seg *wqe, const struct ib_ud_wr *wr, struct mlx4_ib_qp *qp, unsigned *lso_seg_len, __be32 *lso_hdr_sz, __be32 *blh) argument
2924 add_zero_len_inline(void *wqe) argument
2935 void *wqe; local
3132 build_tunnel_header(ud_wr(wr), wqe, &seglen); local
3147 build_tunnel_header(ud_wr(wr), wqe, &seglen); local
[all...]
/freebsd-current/contrib/ofed/libmlx5/
H A Dqp.c43 #include "wqe.h"
270 void *wqe, int *sz,
282 seg = wqe;
283 wqe += sizeof *seg;
293 if (unlikely(wqe + len > qend)) {
294 copy = qend - wqe;
295 memcpy(wqe, addr, copy);
298 wqe = mlx5_get_send_wqe(qp, 0);
300 memcpy(wqe, addr, len);
301 wqe
269 set_data_inl_seg(struct mlx5_qp *qp, struct ibv_send_wr *wr, void *wqe, int *sz, struct mlx5_sg_copy_ptr *sg_copy_ptr) argument
[all...]

Completed in 342 milliseconds

12