Searched refs:wqe (Results 1 - 25 of 28) sorted by relevance

12

/freebsd-12-stable/contrib/ofed/libcxgb4/
H A Dqp.c45 static void copy_wr_to_sq(struct t4_wq *wq, union t4_wr *wqe, u8 len16) argument
51 src = &wqe->flits[0];
81 static void copy_wr_to_rq(struct t4_wq *wq, union t4_recv_wr *wqe, u8 len16) argument
87 src = &wqe->flits[0];
158 static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe, argument
168 wqe->send.sendop_pkd = htobe32(
171 wqe->send.sendop_pkd = htobe32(
173 wqe->send.stag_inv = 0;
174 wqe->send.r3 = 0;
175 wqe
207 build_rdma_write(struct t4_sq *sq, union t4_wr *wqe, struct ibv_send_wr *wr, u8 *len16) argument
248 build_rdma_read(union t4_wr *wqe, struct ibv_send_wr *wr, u8 *len16) argument
275 build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe, struct ibv_recv_wr *wr, u8 *len16) argument
317 union t4_wr *wqe, lwqe; local
422 union t4_recv_wr *wqe, lwqe; local
[all...]
H A Dt4.h142 static inline void init_wr_hdr(union t4_wr *wqe, u16 wrid, argument
145 wqe->send.opcode = (u8)opcode;
146 wqe->send.flags = flags;
147 wqe->send.wrid = wrid;
148 wqe->send.r1[0] = 0;
149 wqe->send.r1[1] = 0;
150 wqe->send.r1[2] = 0;
151 wqe->send.len16 = len16;
458 static void copy_wqe_to_udb(volatile u32 *udb_offset, void *wqe) argument
463 src = (u64 *)wqe;
476 t4_ring_sq_db(struct t4_wq *wq, u16 inc, u8 t4, u8 len16, union t4_wr *wqe) argument
525 t4_ring_rq_db(struct t4_wq *wq, u16 inc, u8 t4, u8 len16, union t4_recv_wr *wqe) argument
[all...]
/freebsd-12-stable/sys/dev/cxgbe/iw_cxgbe/
H A Dqp.c407 static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe, argument
419 wqe->send.sendop_pkd = cpu_to_be32(
422 wqe->send.sendop_pkd = cpu_to_be32(
424 wqe->send.stag_inv = 0;
428 wqe->send.sendop_pkd = cpu_to_be32(
431 wqe->send.sendop_pkd = cpu_to_be32(
433 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
439 wqe->send.r3 = 0;
440 wqe->send.r4 = 0;
445 ret = build_immd(sq, wqe
474 build_rdma_write(struct t4_sq *sq, union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) argument
517 build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) argument
547 build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe, struct ib_recv_wr *wr, u8 *len16) argument
562 build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) argument
707 build_memreg(struct t4_sq *sq, union t4_wr *wqe, struct ib_reg_wr *wr, struct c4iw_mr *mhp, u8 *len16, bool dsgl_supported) argument
783 union t4_wr *wqe = NULL; local
920 union t4_recv_wr *wqe = NULL; local
1119 struct fw_ri_wr *wqe; local
1249 struct fw_ri_wr *wqe; local
1342 struct fw_ri_wr *wqe; local
[all...]
H A Dt4.h136 static inline void init_wr_hdr(union t4_wr *wqe, u16 wrid, argument
139 wqe->send.opcode = (u8)opcode;
140 wqe->send.flags = flags;
141 wqe->send.wrid = wrid;
142 wqe->send.r1[0] = 0;
143 wqe->send.r1[1] = 0;
144 wqe->send.r1[2] = 0;
145 wqe->send.len16 = len16;
487 t4_ring_sq_db(struct t4_wq *wq, u16 inc, union t4_wr *wqe, u8 wc) argument
492 if (wc && inc == 1 && wq->sq.bar2_qid == 0 && wqe) {
512 t4_ring_rq_db(struct t4_wq *wq, u16 inc, union t4_recv_wr *wqe, u8 wc) argument
[all...]
/freebsd-12-stable/sys/dev/mlx5/mlx5_en/
H A Dmlx5_en_tx.c47 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi); local
49 memset(&wqe->ctrl, 0, sizeof(wqe->ctrl));
51 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP);
52 wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
54 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
56 wqe->ctrl.fm_ce_se = 0;
59 memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32));
311 struct mlx5e_tx_wqe *wqe; local
342 wqe
[all...]
H A Dmlx5_en_rx.c33 struct mlx5e_rx_wqe *wqe, u16 ix)
80 wqe->data[0].addr = cpu_to_be64(segs[0].ds_addr);
81 wqe->data[0].byte_count = cpu_to_be32(segs[0].ds_len |
84 wqe->data[i].addr = cpu_to_be64(segs[i].ds_addr);
85 wqe->data[i].byte_count = cpu_to_be32(segs[i].ds_len);
88 wqe->data[i].addr = 0;
89 wqe->data[i].byte_count = 0;
111 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, rq->wq.head); local
113 if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, rq->wq.head))) {
117 mlx5_wq_ll_push(&rq->wq, be16_to_cpu(wqe
32 mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix) argument
427 struct mlx5e_rx_wqe *wqe; local
[all...]
H A Den.h1110 mlx5e_tx_notify_hw(struct mlx5e_sq *sq, u32 *wqe, int bf_sz) argument
1114 /* ensure wqe is visible to device before updating doorbell record */
1126 __iowrite64_copy(sq->uar.bf_map + ofst, wqe, bf_sz);
1132 mlx5_write64(wqe, sq->uar.map + ofst,
/freebsd-12-stable/sys/dev/mthca/
H A Dmthca_srq.c90 static inline int *wqe_to_link(void *wqe) argument
92 return (int *) (wqe + offsetof(struct mthca_next_seg, imm));
151 void *wqe; local
178 next = wqe = get_wqe(srq, i);
181 *wqe_to_link(wqe) = i + 1;
184 *wqe_to_link(wqe) = -1;
188 for (scatter = wqe + sizeof (struct mthca_next_seg);
189 (void *) scatter < wqe + (1 << srq->wqe_shift);
487 void *wqe; local
496 wqe
586 void *wqe; local
[all...]
H A Dmthca_qp.c1607 void *wqe; local
1643 wqe = get_send_wqe(qp, ind);
1645 qp->sq.last = wqe;
1647 ((struct mthca_next_seg *) wqe)->nda_op = 0;
1648 ((struct mthca_next_seg *) wqe)->ee_nds = 0;
1649 ((struct mthca_next_seg *) wqe)->flags =
1657 ((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data;
1659 wqe += sizeof (struct mthca_next_seg);
1667 set_raddr_seg(wqe, atomic_wr(wr)->remote_addr,
1669 wqe
1824 void *wqe; local
1922 void *wqe; local
2166 void *wqe; local
[all...]
H A Dmthca_cq.c126 __be32 wqe; member in struct:mthca_cqe
140 __be32 wqe; member in struct:mthca_err_cqe
312 mthca_free_srq_wqe(srq, be32_to_cpu(cqe->wqe));
388 be32_to_cpu(cqe->my_qpn), be32_to_cpu(cqe->wqe),
477 cqe->wqe = new_wqe;
511 be32_to_cpu(cqe->wqe));
540 wqe_index = ((be32_to_cpu(cqe->wqe) - (*cur_qp)->send_wqe_offset)
546 u32 wqe = be32_to_cpu(cqe->wqe); local
548 wqe_index = wqe >> sr
552 s32 wqe; local
[all...]
/freebsd-12-stable/contrib/ofed/libmlx4/
H A Dqp.c44 #include "wqe.h"
76 uint32_t *wqe = get_send_wqe(qp, n); local
78 int ds = (((struct mlx4_wqe_ctrl_seg *)wqe)->fence_size & 0x3f) << 2;
81 wqe[i] = 0xffffffff;
218 void *wqe; local
252 ctrl = wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
268 wqe += sizeof *ctrl;
280 set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
282 wqe += sizeof (struct mlx4_wqe_raddr_seg);
284 set_atomic_seg(wqe, w
[all...]
/freebsd-12-stable/sys/contrib/octeon-sdk/
H A Dcvmx-raid.h105 uint64_t wqe : 1; /**< Indicates whether RAD submits a work queue entry or writes an L2/DRAM byte to member in struct:__anon11396::__anon11397
H A Dcvmx-ipd.c63 #include <asm/octeon/cvmx-wqe.h>
83 #include "cvmx-wqe.h"
H A Dcvmx-pcie.c76 #include <asm/octeon/cvmx-wqe.h>
85 #include "cvmx-wqe.h"
/freebsd-12-stable/sys/ofed/include/rdma/
H A Drdmavt_qp.h493 * @wqe - the send wqe
496 * a wqe relative reserved operation use.
500 struct rvt_swqe *wqe)
502 wqe->wr.send_flags |= RVT_SEND_RESERVE_USED;
509 * @wqe - the send wqe
523 struct rvt_swqe *wqe)
525 if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED)) {
526 wqe
498 rvt_qp_wqe_reserve( struct rvt_qp *qp, struct rvt_swqe *wqe) argument
521 rvt_qp_wqe_unreserve( struct rvt_qp *qp, struct rvt_swqe *wqe) argument
[all...]
H A Drdma_vt.h331 int (*check_send_wqe)(struct rvt_qp *qp, struct rvt_swqe *wqe);
/freebsd-12-stable/sys/dev/mlx4/mlx4_ib/
H A Dmlx4_ib_qp.c217 __be32 *wqe; local
232 wqe = buf + (i & ((1 << qp->sq.wqe_shift) - 1));
233 *wqe = stamp;
239 wqe = buf + i;
240 *wqe = cpu_to_be32(0xffffffff);
249 void *wqe; local
252 ctrl = wqe = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1));
256 struct mlx4_wqe_datagram_seg *dgram = wqe + sizeof *ctrl;
265 inl = wqe + s;
2299 void *wqe, unsigne
2297 build_sriov_qp0_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr, void *wqe, unsigned *mlx_seg_len) argument
2427 build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr, void *wqe, unsigned *mlx_seg_len) argument
2806 build_tunnel_header(struct ib_ud_wr *wr, void *wqe, unsigned *mlx_seg_len) argument
2889 build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_ud_wr *wr, struct mlx4_ib_qp *qp, unsigned *lso_seg_len, __be32 *lso_hdr_sz, __be32 *blh) argument
2924 add_zero_len_inline(void *wqe) argument
2935 void *wqe; local
3132 build_tunnel_header(ud_wr(wr), wqe, &seglen); local
3147 build_tunnel_header(ud_wr(wr), wqe, &seglen); local
[all...]
/freebsd-12-stable/contrib/ofed/libmlx5/
H A Dqp.c43 #include "wqe.h"
270 void *wqe, int *sz,
282 seg = wqe;
283 wqe += sizeof *seg;
293 if (unlikely(wqe + len > qend)) {
294 copy = qend - wqe;
295 memcpy(wqe, addr, copy);
298 wqe = mlx5_get_send_wqe(qp, 0);
300 memcpy(wqe, addr, len);
301 wqe
269 set_data_inl_seg(struct mlx5_qp *qp, struct ibv_send_wr *wr, void *wqe, int *sz, struct mlx5_sg_copy_ptr *sg_copy_ptr) argument
[all...]
H A Dmlx5.h754 static inline uint8_t calc_sig(void *wqe, int size) argument
757 uint8_t *p = wqe;
/freebsd-12-stable/sys/dev/ocs_fc/
H A Docs_hw.c174 * If target wqe timeouts are enabled,
175 * remove from active wqe list.
1170 /* shutdown target wqe timer */
1325 /* shutdown target wqe timer */
3590 * @param wqe Pointer to WQ entry.
3597 _hw_wq_write(hw_wq_t *wq, ocs_hw_wqe_t *wqe) argument
3607 sli4_generic_wqe_t *genwqe = (void*)wqe->wqebuf;
3615 queue_rc = _sli_queue_write(&wq->hw->sli, wq->queue, wqe->wqebuf);
3621 ocs_queue_history_wq(&wq->hw->q_hist, (void *) wqe->wqebuf, wq->queue->id, queue_rc);
3634 * @param wqe Pointe
3641 hw_wq_write(hw_wq_t *wq, ocs_hw_wqe_t *wqe) argument
3692 ocs_hw_wqe_t *wqe; local
4356 ocs_hw_wqe_t *wqe; local
[all...]
H A Docs_hw.h543 * @brief HW wqe object
546 uint32_t abort_wqe_submit_needed:1, /**< set if abort wqe needs to be submitted */
568 ocs_hw_wqe_t wqe; /**< Work queue object, with link for pending */ member in struct:ocs_hw_io_s
925 uint8_t emulate_tgt_wqe_timeout; /** Enable driver target wqe timeouts */
1080 uint32_t in_active_wqe_timer:1, /**< TRUE if currently in active wqe timer handler */
1081 active_wqe_timer_shutdown:1, /** TRUE if wqe timer is to be shutdown */
1372 extern int32_t hw_wq_write(hw_wq_t *wq, ocs_hw_wqe_t *wqe);
1391 ocs_hw_wqe_t wqe; /**> WQE buffer object (may be queued on WQ pending list) */ member in struct:__anon14821
H A Docs_utils.c946 static uint32_t ocs_q_hist_get_wqe_mask(sli4_generic_wqe_t *wqe) argument
950 if (ocs_q_hist_wqe_masks[i].command == wqe->command) {
/freebsd-12-stable/sys/dev/mlx5/mlx5_ib/
H A Dmlx5_ib_qp.c402 mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n",
2990 if (unlikely(copysz < left)) { /* the last wqe in the queue */
3282 static u8 calc_sig(void *wqe, int size) argument
3284 u8 *p = wqe;
3294 static u8 wq_sig(void *wqe) argument
3296 return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4);
3300 void *wqe, int *sz)
3310 seg = wqe;
3311 wqe
3299 set_data_inl_seg(struct mlx5_ib_qp *qp, struct ib_send_wr *wr, void *wqe, int *sz) argument
[all...]
/freebsd-12-stable/sys/dev/mlx5/
H A Dqp.h448 } wqe; member in union:mlx5_pagefault::__anon14169
/freebsd-12-stable/sys/dev/mlx5/mlx5_fpga/
H A Dmlx5fpga_conn.c131 static void mlx5_fpga_conn_notify_hw(struct mlx5_fpga_conn *conn, void *wqe) argument
133 /* ensure wqe is visible to device before updating doorbell record */
138 mlx5_write64(wqe, conn->fdev->conn_res.uar->map + MLX5_BF_OFFSET, NULL);
899 /* Allow for one cqe per rx/tx wqe, plus one cqe for the next wqe,

Completed in 457 milliseconds

12