Lines Matching refs:wr

2298 				  struct ib_ud_wr *wr,
2305 struct mlx4_ib_ah *ah = to_mah(wr->ah);
2313 if (wr->wr.opcode != IB_WR_SEND)
2318 for (i = 0; i < wr->wr.num_sge; ++i)
2319 send_size += wr->wr.sg_list[i].length;
2344 sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
2348 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
2427 static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr,
2434 struct mlx4_ib_ah *ah = to_mah(wr->ah);
2450 for (i = 0; i < wr->wr.num_sge; ++i)
2451 send_size += wr->wr.sg_list[i].length;
2571 switch (wr->wr.opcode) {
2579 sqp->ud_header.immediate_data = wr->wr.ex.imm_data;
2620 sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
2624 ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->pkey_index, &pkey);
2626 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
2628 sqp->ud_header.deth.qkey = cpu_to_be32(wr->remote_qkey & 0x80000000 ?
2629 sqp->qkey : wr->remote_qkey);
2718 struct ib_reg_wr *wr)
2720 struct mlx4_ib_mr *mr = to_mmr(wr->mr);
2722 fseg->flags = convert_access(wr->access);
2723 fseg->mem_key = cpu_to_be32(wr->key);
2748 struct ib_atomic_wr *wr)
2750 if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
2751 aseg->swap_add = cpu_to_be64(wr->swap);
2752 aseg->compare = cpu_to_be64(wr->compare_add);
2753 } else if (wr->wr.opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) {
2754 aseg->swap_add = cpu_to_be64(wr->compare_add);
2755 aseg->compare = cpu_to_be64(wr->compare_add_mask);
2757 aseg->swap_add = cpu_to_be64(wr->compare_add);
2764 struct ib_atomic_wr *wr)
2766 aseg->swap_add = cpu_to_be64(wr->swap);
2767 aseg->swap_add_mask = cpu_to_be64(wr->swap_mask);
2768 aseg->compare = cpu_to_be64(wr->compare_add);
2769 aseg->compare_mask = cpu_to_be64(wr->compare_add_mask);
2773 struct ib_ud_wr *wr)
2775 memcpy(dseg->av, &to_mah(wr->ah)->av, sizeof (struct mlx4_av));
2776 dseg->dqpn = cpu_to_be32(wr->remote_qpn);
2777 dseg->qkey = cpu_to_be32(wr->remote_qkey);
2778 dseg->vlan = to_mah(wr->ah)->av.eth.vlan;
2779 memcpy(dseg->mac, to_mah(wr->ah)->av.eth.mac, 6);
2784 struct ib_ud_wr *wr,
2787 union mlx4_ext_av *av = &to_mah(wr->ah)->av;
2806 static void build_tunnel_header(struct ib_ud_wr *wr, void *wqe, unsigned *mlx_seg_len)
2810 struct mlx4_ib_ah *ah = to_mah(wr->ah);
2815 hdr.remote_qpn = cpu_to_be32(wr->remote_qpn);
2816 hdr.pkey_index = cpu_to_be16(wr->pkey_index);
2817 hdr.qkey = cpu_to_be32(wr->remote_qkey);
2889 static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_ud_wr *wr,
2893 unsigned halign = ALIGN(sizeof *wqe + wr->hlen, 16);
2899 wr->wr.num_sge > qp->sq.max_gs - (halign >> 4)))
2902 memcpy(wqe->header, wr->header, wr->hlen);
2904 *lso_hdr_sz = cpu_to_be32(wr->mss << 16 | wr->hlen);
2909 static __be32 send_ieth(struct ib_send_wr *wr)
2911 switch (wr->opcode) {
2914 return wr->ex.imm_data;
2917 return cpu_to_be32(wr->ex.invalidate_rkey);
2931 int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2956 struct mlx4_ib_ah *ah = to_mah(ud_wr(wr)->ah);
2978 *bad_wr = wr;
2985 for (nreq = 0; wr; ++nreq, wr = wr->next) {
2991 *bad_wr = wr;
2995 if (unlikely(wr->num_sge > qp->sq.max_gs)) {
2997 *bad_wr = wr;
3002 qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = wr->wr_id;
3005 (wr->send_flags & IB_SEND_SIGNALED ?
3007 (wr->send_flags & IB_SEND_SOLICITED ?
3009 ((wr->send_flags & IB_SEND_IP_CSUM) ?
3014 ctrl->imm = send_ieth(wr);
3022 switch (wr->opcode) {
3026 set_raddr_seg(wqe, atomic_wr(wr)->remote_addr,
3027 atomic_wr(wr)->rkey);
3030 set_atomic_seg(wqe, atomic_wr(wr));
3039 set_raddr_seg(wqe, atomic_wr(wr)->remote_addr,
3040 atomic_wr(wr)->rkey);
3043 set_masked_atomic_seg(wqe, atomic_wr(wr));
3054 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
3055 rdma_wr(wr)->rkey);
3063 set_local_inv_seg(wqe, wr->ex.invalidate_rkey);
3071 set_reg_seg(wqe, reg_wr(wr));
3083 err = build_sriov_qp0_header(to_msqp(qp), ud_wr(wr),
3086 *bad_wr = wr;
3095 set_datagram_seg(wqe, ud_wr(wr));
3102 set_datagram_seg(wqe, ud_wr(wr));
3106 if (wr->opcode == IB_WR_LSO) {
3107 err = build_lso_seg(wqe, ud_wr(wr), qp, &seglen,
3110 *bad_wr = wr;
3120 err = build_sriov_qp0_header(to_msqp(qp), ud_wr(wr),
3123 *bad_wr = wr;
3132 build_tunnel_header(ud_wr(wr), wqe, &seglen);
3143 ud_wr(wr),
3147 build_tunnel_header(ud_wr(wr), wqe, &seglen);
3154 err = build_mlx_header(to_msqp(qp), ud_wr(wr), ctrl,
3157 *bad_wr = wr;
3176 dseg += wr->num_sge - 1;
3177 size += wr->num_sge * (sizeof (struct mlx4_wqe_data_seg) / 16);
3188 for (i = wr->num_sge - 1; i >= 0; --i, --dseg)
3189 set_data_seg(dseg, wr->sg_list + i);
3199 ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ?
3209 if (wr->opcode < 0 || wr->opcode >= ARRAY_SIZE(mlx4_ib_opcode)) {
3210 *bad_wr = wr;
3215 ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] |
3230 if (wr->next) {
3266 int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
3284 *bad_wr = wr;
3291 for (nreq = 0; wr; ++nreq, wr = wr->next) {
3294 *bad_wr = wr;
3298 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
3300 *bad_wr = wr;
3315 scat->lkey = cpu_to_be32(wr->sg_list->lkey);
3321 for (i = 0; i < wr->num_sge; ++i)
3322 __set_data_seg(scat + i, wr->sg_list + i);
3330 qp->rq.wrid[ind] = wr->wr_id;