Lines Matching refs:wr

1874 				const struct ib_send_wr *wr)
1878 struct ocrdma_ah *ah = get_ocrdma_ah(ud_wr(wr)->ah);
1880 ud_hdr->rsvd_dest_qpn = ud_wr(wr)->remote_qpn;
1884 ud_hdr->qkey = ud_wr(wr)->remote_qkey;
1921 const struct ib_send_wr *wr, u32 wqe_size)
1926 if (wr->send_flags & IB_SEND_INLINE && qp->qp_type != IB_QPT_UD) {
1927 hdr->total_len = ocrdma_sglist_len(wr->sg_list, wr->num_sge);
1935 for (i = 0; i < wr->num_sge; i++) {
1937 (void *)(unsigned long)wr->sg_list[i].addr,
1938 wr->sg_list[i].length);
1939 dpp_addr += wr->sg_list[i].length;
1947 ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
1948 if (wr->num_sge)
1949 wqe_size += (wr->num_sge * sizeof(struct ocrdma_sge));
1959 const struct ib_send_wr *wr)
1965 ocrdma_build_ud_hdr(qp, hdr, wr);
1972 return ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
1976 const struct ib_send_wr *wr)
1983 status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
1986 ext_rw->addr_lo = rdma_wr(wr)->remote_addr;
1987 ext_rw->addr_hi = upper_32_bits(rdma_wr(wr)->remote_addr);
1988 ext_rw->lrkey = rdma_wr(wr)->rkey;
1994 const struct ib_send_wr *wr)
1998 u32 wqe_size = ((wr->num_sge + 1) * sizeof(struct ocrdma_sge)) +
2001 ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
2006 ext_rw->addr_lo = rdma_wr(wr)->remote_addr;
2007 ext_rw->addr_hi = upper_32_bits(rdma_wr(wr)->remote_addr);
2008 ext_rw->lrkey = rdma_wr(wr)->rkey;
2024 const struct ib_reg_wr *wr)
2028 struct ocrdma_mr *mr = get_ocrdma_mr(wr->mr);
2039 if (wr->access & IB_ACCESS_LOCAL_WRITE)
2041 if (wr->access & IB_ACCESS_REMOTE_WRITE)
2043 if (wr->access & IB_ACCESS_REMOTE_READ)
2045 hdr->lkey = wr->key;
2085 int ocrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
2096 *bad_wr = wr;
2100 while (wr) {
2102 (wr->opcode != IB_WR_SEND &&
2103 wr->opcode != IB_WR_SEND_WITH_IMM)) {
2104 *bad_wr = wr;
2109 wr->num_sge > qp->sq.max_sges) {
2110 *bad_wr = wr;
2116 if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled)
2118 if (wr->send_flags & IB_SEND_FENCE)
2121 if (wr->send_flags & IB_SEND_SOLICITED)
2125 switch (wr->opcode) {
2128 hdr->immdt = ntohl(wr->ex.imm_data);
2132 ocrdma_build_send(qp, hdr, wr);
2137 hdr->lkey = wr->ex.invalidate_rkey;
2138 status = ocrdma_build_send(qp, hdr, wr);
2142 hdr->immdt = ntohl(wr->ex.imm_data);
2146 status = ocrdma_build_write(qp, hdr, wr);
2149 ocrdma_build_read(qp, hdr, wr);
2157 hdr->lkey = wr->ex.invalidate_rkey;
2160 status = ocrdma_build_reg(qp, hdr, reg_wr(wr));
2167 *bad_wr = wr;
2170 if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled)
2174 qp->wqe_wr_id_tbl[qp->sq.head].wrid = wr->wr_id;
2182 /* update pointer, counter for next wr */
2184 wr = wr->next;
2198 const struct ib_recv_wr *wr, u16 tag)
2202 if (wr->num_sge)
2203 wqe_size = (wr->num_sge * sizeof(*sge)) + sizeof(*rqe);
2214 ocrdma_build_sges(rqe, sge, wr->num_sge, wr->sg_list);
2218 int ocrdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
2229 *bad_wr = wr;
2232 while (wr) {
2234 wr->num_sge > qp->rq.max_sges) {
2235 *bad_wr = wr;
2240 ocrdma_build_rqe(rqe, wr, 0);
2242 qp->rqe_wr_id_tbl[qp->rq.head] = wr->wr_id;
2249 /* update pointer, counter for next wr */
2251 wr = wr->next;
2288 int ocrdma_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
2300 while (wr) {
2302 wr->num_sge > srq->rq.max_sges) {
2304 *bad_wr = wr;
2309 ocrdma_build_rqe(rqe, wr, tag);
2311 srq->rqe_wr_id_tbl[tag] = wr->wr_id;
2316 /* update pointer, counter for next wr */
2318 wr = wr->next;