Lines Matching refs:wr

121 static void set_bind_seg(struct mlx4_wqe_bind_seg *bseg, struct ibv_send_wr *wr)
123 int acc = wr->bind_mw.bind_info.mw_access_flags;
133 if (((struct ibv_mw *)(wr->bind_mw.mw))->type == IBV_MW_TYPE_2)
138 bseg->new_rkey = htobe32(wr->bind_mw.rkey);
139 bseg->lkey = htobe32(wr->bind_mw.bind_info.mr->lkey);
140 bseg->addr = htobe64((uint64_t) wr->bind_mw.bind_info.addr);
141 bseg->length = htobe64(wr->bind_mw.bind_info.length);
163 static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg, struct ibv_send_wr *wr)
165 if (wr->opcode == IBV_WR_ATOMIC_CMP_AND_SWP) {
166 aseg->swap_add = htobe64(wr->wr.atomic.swap);
167 aseg->compare = htobe64(wr->wr.atomic.compare_add);
169 aseg->swap_add = htobe64(wr->wr.atomic.compare_add);
176 struct ibv_send_wr *wr)
178 memcpy(dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof (struct mlx4_av));
179 dseg->dqpn = htobe32(wr->wr.ud.remote_qpn);
180 dseg->qkey = htobe32(wr->wr.ud.remote_qkey);
181 dseg->vlan = htobe16(to_mah(wr->wr.ud.ah)->vlan);
182 memcpy(dseg->mac, to_mah(wr->wr.ud.ah)->mac, 6);
213 int mlx4_post_send(struct ibv_qp *ibqp, struct ibv_send_wr *wr,
233 for (nreq = 0; wr; ++nreq, wr = wr->next) {
236 *bad_wr = wr;
240 if (wr->num_sge > qp->sq.max_gs) {
242 *bad_wr = wr;
246 if (wr->opcode >= sizeof mlx4_ib_opcode / sizeof mlx4_ib_opcode[0]) {
248 *bad_wr = wr;
253 qp->sq.wrid[ind & (qp->sq.wqe_cnt - 1)] = wr->wr_id;
256 (wr->send_flags & IBV_SEND_SIGNALED ?
258 (wr->send_flags & IBV_SEND_SOLICITED ?
262 if (wr->opcode == IBV_WR_SEND_WITH_IMM ||
263 wr->opcode == IBV_WR_RDMA_WRITE_WITH_IMM)
264 ctrl->imm = wr->imm_data;
273 ctrl->srcrb_flags |= MLX4_REMOTE_SRQN_FLAGS(wr);
277 switch (wr->opcode) {
280 set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
281 wr->wr.atomic.rkey);
284 set_atomic_seg(wqe, wr);
296 if (!wr->num_sge)
298 set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
299 wr->wr.rdma.rkey);
307 set_local_inv_seg(wqe, wr->imm_data);
316 set_bind_seg(wqe, wr);
323 ctrl->imm = htobe32(wr->imm_data);
333 set_datagram_seg(wqe, wr);
337 if (wr->send_flags & IBV_SEND_IP_CSUM) {
340 *bad_wr = wr;
352 if (wr->send_flags & IBV_SEND_IP_CSUM) {
355 *bad_wr = wr;
367 if (wr->send_flags & IBV_SEND_INLINE && wr->num_sge) {
382 for (i = 0; i < wr->num_sge; ++i) {
383 addr = (void *) (uintptr_t) wr->sg_list[i].addr;
384 len = wr->sg_list[i].length;
390 *bad_wr = wr;
437 for (i = wr->num_sge - 1; i >= 0 ; --i)
438 set_data_seg(seg + i, wr->sg_list + i);
440 size += wr->num_sge * (sizeof *seg / 16);
443 ctrl->fence_size = (wr->send_flags & IBV_SEND_FENCE ?
453 ctrl->owner_opcode = htobe32(mlx4_ib_opcode[wr->opcode]) |
461 if (wr->next)
512 int mlx4_post_recv(struct ibv_qp *ibqp, struct ibv_recv_wr *wr,
528 for (nreq = 0; wr; ++nreq, wr = wr->next) {
531 *bad_wr = wr;
535 if (wr->num_sge > qp->rq.max_gs) {
537 *bad_wr = wr;
543 for (i = 0; i < wr->num_sge; ++i)
544 __set_data_seg(scat + i, wr->sg_list + i);
552 qp->rq.wrid[ind] = wr->wr_id;