Lines Matching refs:wqe

141 	struct rxe_send_wqe *wqe;
146 wqe = queue_head(qp->sq.queue, QUEUE_TYPE_FROM_CLIENT);
147 *wqe_p = wqe;
150 if (!wqe || wqe->state == wqe_state_posted)
154 if (wqe->state == wqe_state_done)
158 if (wqe->state == wqe_state_error)
174 struct rxe_send_wqe *wqe)
181 diff = psn_compare(pkt->psn, wqe->last_psn);
183 if (wqe->state == wqe_state_pending) {
184 if (wqe->mask & WR_ATOMIC_OR_READ_MASK)
200 if (pkt->psn == wqe->last_psn)
208 } else if ((diff > 0) && (wqe->mask & WR_ATOMIC_OR_READ_MASK)) {
217 struct rxe_send_wqe *wqe)
243 if ((pkt->psn == wqe->first_psn &&
246 (wqe->first_psn == wqe->last_psn &&
268 if (wqe->wr.opcode == IB_WR_ATOMIC_WRITE)
275 if (wqe->wr.opcode != IB_WR_RDMA_READ &&
276 wqe->wr.opcode != IB_WR_RDMA_READ_WITH_INV &&
277 wqe->wr.opcode != IB_WR_FLUSH) {
278 wqe->status = IB_WC_FATAL_ERR;
290 if (wqe->wr.opcode != IB_WR_ATOMIC_CMP_AND_SWP &&
291 wqe->wr.opcode != IB_WR_ATOMIC_FETCH_AND_ADD)
325 wqe->status = IB_WC_REM_INV_REQ_ERR;
329 wqe->status = IB_WC_REM_ACCESS_ERR;
333 wqe->status = IB_WC_REM_OP_ERR;
338 wqe->status = IB_WC_REM_OP_ERR;
356 struct rxe_send_wqe *wqe)
361 &wqe->dma, payload_addr(pkt),
364 wqe->status = IB_WC_LOC_PROT_ERR;
368 if (wqe->dma.resid == 0 && (pkt->mask & RXE_END_MASK))
376 struct rxe_send_wqe *wqe)
383 &wqe->dma, &atomic_orig,
386 wqe->status = IB_WC_LOC_PROT_ERR;
393 static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
402 wc->wr_id = wqe->wr.wr_id;
403 wc->status = wqe->status;
406 uwc->wr_id = wqe->wr.wr_id;
407 uwc->status = wqe->status;
411 if (wqe->status == IB_WC_SUCCESS) {
413 wc->opcode = wr_to_wc_opcode(wqe->wr.opcode);
414 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
415 wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
417 wc->byte_len = wqe->dma.length;
419 uwc->opcode = wr_to_wc_opcode(wqe->wr.opcode);
420 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
421 wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
423 uwc->byte_len = wqe->dma.length;
426 if (wqe->status != IB_WC_WR_FLUSH_ERR)
428 wqe->status);
440 static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
448 (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
449 wqe->status != IB_WC_SUCCESS);
452 make_send_cqe(qp, wqe, &cqe);
459 if (wqe->wr.opcode == IB_WR_SEND ||
460 wqe->wr.opcode == IB_WR_SEND_WITH_IMM ||
461 wqe->wr.opcode == IB_WR_SEND_WITH_INV)
501 struct rxe_send_wqe *wqe)
503 if (wqe->has_rd_atomic) {
504 wqe->has_rd_atomic = 0;
515 do_complete(qp, wqe);
525 struct rxe_send_wqe *wqe)
527 if (pkt && wqe->state == wqe_state_pending) {
528 if (psn_compare(wqe->last_psn, qp->comp.psn) >= 0) {
529 qp->comp.psn = (wqe->last_psn + 1) & BTH_PSN_MASK;
539 do_complete(qp, wqe);
556 /* complete send wqe with flush error */
557 static int flush_send_wqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
565 uwc->wr_id = wqe->wr.wr_id;
569 wc->wr_id = wqe->wr.wr_id;
582 * if unable to complete a wqe, i.e. cq is full, stop
587 struct rxe_send_wqe *wqe;
595 while ((wqe = queue_head(q, q->type))) {
597 err = flush_send_wqe(qp, wqe);
641 struct rxe_send_wqe *wqe = NULL;
687 state = get_wqe(qp, pkt, &wqe);
691 state = check_psn(qp, pkt, wqe);
695 state = check_ack(qp, pkt, wqe);
699 state = do_read(qp, pkt, wqe);
703 state = do_atomic(qp, pkt, wqe);
707 if (wqe->state == wqe_state_pending &&
708 wqe->last_psn == pkt->psn)
715 state = complete_ack(qp, pkt, wqe);
719 state = complete_wqe(qp, pkt, wqe);
743 if (qp->comp.timeout_retry && wqe) {
761 if (!wqe || (wqe->state == wqe_state_posted))
794 wqe->status = IB_WC_RETRY_EXC_ERR;
818 wqe->status = IB_WC_RNR_RETRY_EXC_ERR;
824 WARN_ON_ONCE(wqe->status == IB_WC_SUCCESS);
825 do_complete(qp, wqe);