Lines Matching refs:qp

117 	struct rxe_qp *qp = from_timer(qp, t, retrans_timer);
120 rxe_dbg_qp(qp, "retransmit timer fired\n");
122 spin_lock_irqsave(&qp->state_lock, flags);
123 if (qp->valid) {
124 qp->comp.timeout = 1;
125 rxe_sched_task(&qp->comp.task);
127 spin_unlock_irqrestore(&qp->state_lock, flags);
130 void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
134 skb_queue_tail(&qp->resp_pkts, skb);
136 must_sched = skb_queue_len(&qp->resp_pkts) > 1;
141 rxe_sched_task(&qp->comp.task);
143 rxe_run_task(&qp->comp.task);
146 static inline enum comp_state get_wqe(struct rxe_qp *qp,
155 wqe = queue_head(qp->sq.queue, QUEUE_TYPE_FROM_CLIENT);
174 static inline void reset_retry_counters(struct rxe_qp *qp)
176 qp->comp.retry_cnt = qp->attr.retry_cnt;
177 qp->comp.rnr_retry = qp->attr.rnr_retry;
178 qp->comp.started_retry = 0;
181 static inline enum comp_state check_psn(struct rxe_qp *qp,
196 reset_retry_counters(qp);
204 diff = psn_compare(pkt->psn, qp->comp.psn);
212 (qp->comp.opcode == IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST ||
213 qp->comp.opcode == IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE))
224 static inline enum comp_state check_ack(struct rxe_qp *qp,
230 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
233 switch (qp->comp.opcode) {
290 reset_retry_counters(qp);
302 reset_retry_counters(qp);
309 reset_retry_counters(qp);
322 if (psn_compare(pkt->psn, qp->comp.psn) > 0) {
325 qp->comp.psn = pkt->psn;
326 if (qp->req.wait_psn) {
327 qp->req.wait_psn = 0;
328 rxe_sched_task(&qp->req.task);
346 rxe_dbg_qp(qp, "unexpected nak %x\n", syn);
357 rxe_dbg_qp(qp, "unexpected opcode\n");
363 static inline enum comp_state do_read(struct rxe_qp *qp,
369 ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
383 static inline enum comp_state do_atomic(struct rxe_qp *qp,
391 ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
402 static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
410 if (!qp->is_user) {
413 wc->qp = &qp->ibqp;
417 uwc->qp_num = qp->ibqp.qp_num;
421 if (!qp->is_user) {
436 rxe_err_qp(qp, "non-flush error status = %d\n",
449 static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
451 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
456 post = ((qp->sq_sig_type == IB_SIGNAL_ALL_WR) ||
461 make_send_cqe(qp, wqe, &cqe);
463 queue_advance_consumer(qp->sq.queue, QUEUE_TYPE_FROM_CLIENT);
466 rxe_cq_post(qp->scq, &cqe, 0);
477 if (qp->req.wait_fence) {
478 qp->req.wait_fence = 0;
479 rxe_sched_task(&qp->req.task);
483 static void comp_check_sq_drain_done(struct rxe_qp *qp)
487 spin_lock_irqsave(&qp->state_lock, flags);
488 if (unlikely(qp_state(qp) == IB_QPS_SQD)) {
489 if (qp->attr.sq_draining && qp->comp.psn == qp->req.psn) {
490 qp->attr.sq_draining = 0;
491 spin_unlock_irqrestore(&qp->state_lock, flags);
493 if (qp->ibqp.event_handler) {
496 ev.device = qp->ibqp.device;
497 ev.element.qp = &qp->ibqp;
499 qp->ibqp.event_handler(&ev,
500 qp->ibqp.qp_context);
505 spin_unlock_irqrestore(&qp->state_lock, flags);
508 static inline enum comp_state complete_ack(struct rxe_qp *qp,
514 atomic_inc(&qp->req.rd_atomic);
515 if (qp->req.need_rd_atomic) {
516 qp->comp.timeout_retry = 0;
517 qp->req.need_rd_atomic = 0;
518 rxe_sched_task(&qp->req.task);
522 comp_check_sq_drain_done(qp);
524 do_complete(qp, wqe);
526 if (psn_compare(pkt->psn, qp->comp.psn) >= 0)
532 static inline enum comp_state complete_wqe(struct rxe_qp *qp,
537 if (psn_compare(wqe->last_psn, qp->comp.psn) >= 0) {
538 qp->comp.psn = (wqe->last_psn + 1) & BTH_PSN_MASK;
539 qp->comp.opcode = -1;
542 if (qp->req.wait_psn) {
543 qp->req.wait_psn = 0;
544 rxe_sched_task(&qp->req.task);
548 do_complete(qp, wqe);
554 static void drain_resp_pkts(struct rxe_qp *qp)
558 while ((skb = skb_dequeue(&qp->resp_pkts))) {
559 rxe_put(qp);
561 ib_device_put(qp->ibqp.device);
566 static int flush_send_wqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
573 if (qp->is_user) {
576 uwc->qp_num = qp->ibqp.qp_num;
580 wc->qp = &qp->ibqp;
583 err = rxe_cq_post(qp->scq, &cqe, 0);
585 rxe_dbg_cq(qp->scq, "post cq failed, err = %d\n", err);
594 static void flush_send_queue(struct rxe_qp *qp, bool notify)
597 struct rxe_queue *q = qp->sq.queue;
601 if (!qp->sq.queue)
606 err = flush_send_wqe(qp, wqe);
617 struct rxe_qp *qp = pkt->qp;
618 struct ib_device *dev = qp->ibqp.device;
621 rxe_put(qp);
633 static void reset_retry_timer(struct rxe_qp *qp)
637 if (qp_type(qp) == IB_QPT_RC && qp->qp_timeout_jiffies) {
638 spin_lock_irqsave(&qp->state_lock, flags);
639 if (qp_state(qp) >= IB_QPS_RTS &&
640 psn_compare(qp->req.psn, qp->comp.psn) > 0)
641 mod_timer(&qp->retrans_timer,
642 jiffies + qp->qp_timeout_jiffies);
643 spin_unlock_irqrestore(&qp->state_lock, flags);
647 int rxe_completer(struct rxe_qp *qp)
649 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
657 spin_lock_irqsave(&qp->state_lock, flags);
658 if (!qp->valid || qp_state(qp) == IB_QPS_ERR ||
659 qp_state(qp) == IB_QPS_RESET) {
660 bool notify = qp->valid && (qp_state(qp) == IB_QPS_ERR);
662 drain_resp_pkts(qp);
663 flush_send_queue(qp, notify);
664 spin_unlock_irqrestore(&qp->state_lock, flags);
667 spin_unlock_irqrestore(&qp->state_lock, flags);
669 if (qp->comp.timeout) {
670 qp->comp.timeout_retry = 1;
671 qp->comp.timeout = 0;
673 qp->comp.timeout_retry = 0;
676 if (qp->req.need_retry)
682 rxe_dbg_qp(qp, "state = %s\n", comp_state_name[state]);
685 skb = skb_dequeue(&qp->resp_pkts);
688 qp->comp.timeout_retry = 0;
694 state = get_wqe(qp, pkt, &wqe);
698 state = check_psn(qp, pkt, wqe);
702 state = check_ack(qp, pkt, wqe);
706 state = do_read(qp, pkt, wqe);
710 state = do_atomic(qp, pkt, wqe);
722 state = complete_ack(qp, pkt, wqe);
726 state = complete_wqe(qp, pkt, wqe);
731 qp->comp.opcode = -1;
733 qp->comp.opcode = pkt->opcode;
735 if (psn_compare(pkt->psn, qp->comp.psn) >= 0)
736 qp->comp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
738 if (qp->req.wait_psn) {
739 qp->req.wait_psn = 0;
740 rxe_sched_task(&qp->req.task);
750 if (qp->comp.timeout_retry && wqe) {
755 reset_retry_timer(qp);
774 if (qp->comp.started_retry &&
775 !qp->comp.timeout_retry)
778 if (qp->comp.retry_cnt > 0) {
779 if (qp->comp.retry_cnt != 7)
780 qp->comp.retry_cnt--;
786 if (psn_compare(qp->req.psn,
787 qp->comp.psn) > 0) {
793 qp->req.need_retry = 1;
794 qp->comp.started_retry = 1;
795 rxe_sched_task(&qp->req.task);
808 if (qp->comp.rnr_retry > 0) {
809 if (qp->comp.rnr_retry != 7)
810 qp->comp.rnr_retry--;
815 qp->req.wait_for_rnr_timer = 1;
816 rxe_dbg_qp(qp, "set rnr nak timer\n");
818 mod_timer(&qp->rnr_nak_timer,
832 do_complete(qp, wqe);
833 rxe_qp_error(qp);