Lines Matching refs:qp

8 #include "qp.h"
114 static void hfi1_init_trdma_req(struct rvt_qp *qp,
116 static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx);
118 static void hfi1_add_tid_reap_timer(struct rvt_qp *qp);
119 static void hfi1_mod_tid_reap_timer(struct rvt_qp *qp);
120 static void hfi1_mod_tid_retry_timer(struct rvt_qp *qp);
121 static int hfi1_stop_tid_retry_timer(struct rvt_qp *qp);
123 static int make_tid_rdma_ack(struct rvt_qp *qp,
126 static void hfi1_do_tid_send(struct rvt_qp *qp);
130 struct rvt_qp *qp, u32 psn, int diff, bool fecn);
143 static void tid_rdma_schedule_ack(struct rvt_qp *qp)
145 struct hfi1_qp_priv *priv = qp->priv;
148 hfi1_schedule_tid_send(qp);
151 static void tid_rdma_trigger_ack(struct rvt_qp *qp)
153 validate_r_tid_ack(qp->priv);
154 tid_rdma_schedule_ack(qp);
160 (((u64)p->qp & TID_OPFN_QP_CTXT_MASK) <<
162 ((((u64)p->qp >> 16) & TID_OPFN_QP_KDETH_MASK) <<
185 p->qp =
193 void tid_rdma_opfn_init(struct rvt_qp *qp, struct tid_rdma_params *p)
195 struct hfi1_qp_priv *priv = qp->priv;
197 p->qp = (RVT_KDETH_QP_PREFIX << 16) | priv->rcd->ctxt;
202 p->timeout = qp->timeout;
206 bool tid_rdma_conn_req(struct rvt_qp *qp, u64 *data)
208 struct hfi1_qp_priv *priv = qp->priv;
214 bool tid_rdma_conn_reply(struct rvt_qp *qp, u64 data)
216 struct hfi1_qp_priv *priv = qp->priv;
246 trace_hfi1_opfn_param(qp, 0, &priv->tid_rdma.local);
247 trace_hfi1_opfn_param(qp, 1, remote);
257 priv->pkts_ps = (u16)rvt_div_mtu(qp, remote->max_len);
269 bool tid_rdma_conn_resp(struct rvt_qp *qp, u64 *data)
273 ret = tid_rdma_conn_reply(qp, *data);
281 (void)tid_rdma_conn_req(qp, data);
285 void tid_rdma_conn_error(struct rvt_qp *qp)
287 struct hfi1_qp_priv *priv = qp->priv;
311 * qp_to_rcd - determine the receive context used by a qp
313 * @qp: the qp
316 * with a a qp's qpn.
321 struct rvt_qp *qp)
331 if (qp->ibqp.qp_num == 0)
334 ctxt = hfi1_get_qp_map(dd, qp->ibqp.qp_num >> dd->qos_shift);
338 int hfi1_qp_priv_init(struct rvt_dev_info *rdi, struct rvt_qp *qp,
341 struct hfi1_qp_priv *qpriv = qp->priv;
344 qpriv->rcd = qp_to_rcd(rdi, qp);
376 for (i = 0; i < qp->s_size; i++) {
378 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, i);
385 hfi1_init_trdma_req(qp, &priv->tid_req);
397 hfi1_init_trdma_req(qp, &priv->tid_req);
398 priv->tid_req.e.ack = &qp->s_ack_queue[i];
406 qp->s_ack_queue[i].priv = priv;
413 void hfi1_qp_priv_tid_free(struct rvt_dev_info *rdi, struct rvt_qp *qp)
415 struct hfi1_qp_priv *qpriv = qp->priv;
419 if (qp->ibqp.qp_type == IB_QPT_RC && HFI1_CAP_IS_KSET(TID_RDMA)) {
420 for (i = 0; i < qp->s_size; i++) {
421 wqe = rvt_get_swqe_ptr(qp, i);
426 struct hfi1_ack_priv *priv = qp->s_ack_queue[i].priv;
431 qp->s_ack_queue[i].priv = NULL;
444 * routines: the qp s_lock and the exp_lock.
447 * the send engine, the qp s_lock is already held.
455 * Any qp in the wait list will have the qp reference count held
456 * to hold the qp in memory.
489 * @qp: the head of the qp being processed
493 * list is the indicated qp.
495 * Must hold the qp s_lock and the exp_lock.
500 * 2. The indicated qp is at the head of the list and the
501 * HFI1_S_WAIT_TID_SPACE bit is set in qp->s_flags.
505 struct tid_queue *queue, struct rvt_qp *qp)
506 __must_hold(&rcd->exp_lock) __must_hold(&qp->s_lock)
511 lockdep_assert_held(&qp->s_lock);
514 if (!fqp || (fqp == qp && (qp->s_flags & HFI1_S_WAIT_TID_SPACE)))
521 * dequeue_tid_waiter - dequeue the qp from the list
524 * @qp: the qp to remove the wait list
526 * This routine removes the indicated qp from the
532 * Must hold the qp s_lock and the rcd exp_lock.
538 struct tid_queue *queue, struct rvt_qp *qp)
539 __must_hold(&rcd->exp_lock) __must_hold(&qp->s_lock)
541 struct hfi1_qp_priv *priv = qp->priv;
543 lockdep_assert_held(&qp->s_lock);
548 qp->s_flags &= ~HFI1_S_WAIT_TID_SPACE;
550 rvt_put_qp(qp);
557 * @qp: the qp
559 * The qp is inserted at the tail of the rcd
562 * Must hold the qp s_lock and the exp_lock.
565 struct tid_queue *queue, struct rvt_qp *qp)
566 __must_hold(&rcd->exp_lock) __must_hold(&qp->s_lock)
568 struct hfi1_qp_priv *priv = qp->priv;
570 lockdep_assert_held(&qp->s_lock);
573 qp->s_flags |= HFI1_S_WAIT_TID_SPACE;
577 trace_hfi1_qpsleep(qp, HFI1_S_WAIT_TID_SPACE);
578 rvt_get_qp(qp);
584 * @qp: the qp
586 * This is a private entrance to schedule the qp
587 * assuming the caller is holding the qp->s_lock.
589 static void __trigger_tid_waiter(struct rvt_qp *qp)
590 __must_hold(&qp->s_lock)
592 lockdep_assert_held(&qp->s_lock);
593 if (!(qp->s_flags & HFI1_S_WAIT_TID_SPACE))
595 trace_hfi1_qpwakeup(qp, HFI1_S_WAIT_TID_SPACE);
596 hfi1_schedule_send(qp);
600 * tid_rdma_schedule_tid_wakeup - schedule wakeup for a qp
601 * @qp: the qp
603 * trigger a schedule or a waiting qp in a deadlock
604 * safe manner. The qp reference is held prior
607 * If the qp trigger was already scheduled (!rval)
611 static void tid_rdma_schedule_tid_wakeup(struct rvt_qp *qp)
619 if (!qp)
622 priv = qp->priv;
623 ibp = to_iport(qp->ibqp.device, qp->port_num);
625 dd = dd_from_ibdev(qp->ibqp.device);
633 rvt_put_qp(qp);
640 * Complete the off qp trigger processing by directly
647 struct rvt_qp *qp;
651 qp = priv->owner;
652 spin_lock_irq(&qp->s_lock);
653 if (qp->s_flags & HFI1_S_WAIT_TID_SPACE) {
654 spin_unlock_irq(&qp->s_lock);
657 spin_unlock_irq(&qp->s_lock);
659 rvt_put_qp(qp);
665 * This is called when resetting a qp to
669 static void _tid_rdma_flush_wait(struct rvt_qp *qp, struct tid_queue *queue)
670 __must_hold(&qp->s_lock)
674 if (!qp)
676 lockdep_assert_held(&qp->s_lock);
677 priv = qp->priv;
678 qp->s_flags &= ~HFI1_S_WAIT_TID_SPACE;
682 qp->s_flags &= ~HFI1_S_WAIT_TID_SPACE;
684 rvt_put_qp(qp);
689 void hfi1_tid_rdma_flush_wait(struct rvt_qp *qp)
690 __must_hold(&qp->s_lock)
692 struct hfi1_qp_priv *priv = qp->priv;
694 _tid_rdma_flush_wait(qp, &priv->rcd->flow_queue);
695 _tid_rdma_flush_wait(qp, &priv->rcd->rarr_queue);
780 int hfi1_kern_setup_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp)
782 struct hfi1_qp_priv *qpriv = (struct hfi1_qp_priv *)qp->priv;
793 if (kernel_tid_waiters(rcd, &rcd->flow_queue, qp))
807 dequeue_tid_waiter(rcd, &rcd->flow_queue, qp);
815 queue_qp_for_tid_wait(rcd, &rcd->flow_queue, qp);
820 void hfi1_kern_clear_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp)
822 struct hfi1_qp_priv *qpriv = (struct hfi1_qp_priv *)qp->priv;
840 if (fqp == qp) {
898 trace_hfi1_tid_flow_page(flow->req->qp, flow, 0, 0, 0, vaddr);
901 trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 0, 0,
935 trace_hfi1_tid_pageset(flow->req->qp, setcount,
1034 trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 1, 0, v0);
1037 trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 1, 1, v1);
1183 trace_hfi1_tid_flow_alloc(flow->req->qp, flow->req->setup_head,
1192 if (flow->req->qp->pmtu == enum_to_mtu(OPA_MTU_4096))
1220 trace_hfi1_tid_node_add(flow->req->qp, s, flow->tnode_cnt - 1,
1293 trace_hfi1_msg_alloc_tids(flow->req->qp, " insufficient tids: needed ",
1308 u32 pmtu_pg = flow->req->qp->pmtu >> PAGE_SHIFT;
1347 flow->req->qp, flow->tidcnt - 1,
1416 trace_hfi1_tid_flow_alloc(flow->req->qp, flow->req->setup_head, flow);
1441 * function uses qp, rcd and seg_len members of @req. In the absence of errors,
1452 * For the queuing, caller must hold the flow->req->qp s_lock from the send
1463 __must_hold(&req->qp->s_lock)
1467 struct hfi1_qp_priv *qpriv = req->qp->priv;
1472 lockdep_assert_held(&req->qp->s_lock);
1490 hfi1_wait_kmem(flow->req->qp);
1495 if (kernel_tid_waiters(rcd, &rcd->rarr_queue, flow->req->qp))
1528 dequeue_tid_waiter(rcd, &rcd->rarr_queue, flow->req->qp);
1537 queue_qp_for_tid_wait(rcd, &rcd->rarr_queue, flow->req->qp);
1554 __must_hold(&req->qp->s_lock)
1562 lockdep_assert_held(&req->qp->s_lock);
1582 if (fqp == req->qp) {
1597 __must_hold(&req->qp->s_lock)
1618 * @qp: the queue patch
1621 void __trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe)
1654 static void hfi1_init_trdma_req(struct rvt_qp *qp,
1657 struct hfi1_qp_priv *qpriv = qp->priv;
1669 req->qp = qp;
1709 struct rvt_qp *qp = req->qp;
1710 struct hfi1_qp_priv *qpriv = qp->priv;
1719 trace_hfi1_tid_flow_build_read_pkt(qp, req->flow_idx, flow);
1758 cpu_to_be32(qpriv->tid_rdma.local.qp |
1762 rreq->verbs_qp = cpu_to_be32(qp->remote_qpn);
1764 *bth1 |= remote->qp;
1771 qp->s_state = TID_OP(READ_REQ);
1775 qp->s_num_rd_atomic++;
1787 u32 hfi1_build_tid_rdma_read_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
1790 __must_hold(&qp->s_lock)
1792 struct hfi1_qp_priv *qpriv = qp->priv;
1798 u32 npkts = rvt_div_round_up_mtu(qp, *len);
1800 trace_hfi1_tid_req_build_read_req(qp, 0, wqe->wr.opcode, wqe->psn,
1811 hfi1_kern_clear_hw_flow(req->rcd, qp);
1829 restart_sge(&qp->s_sge, wqe, req->s_next_psn,
1830 qp->pmtu);
1845 if (hfi1_kern_setup_hw_flow(qpriv->rcd, qp))
1852 if (hfi1_kern_exp_rcv_setup(req, &qp->s_sge, &last)) {
1889 static int tid_rdma_rcv_read_request(struct rvt_qp *qp,
1895 struct hfi1_qp_priv *qpriv = qp->priv;
1916 flow->npkts = rvt_div_round_up_mtu(qp, len);
1918 trace_hfi1_tid_entry_rcv_read_req(qp, i,
1954 trace_hfi1_tid_flow_rcv_read_req(qp, req->setup_head, flow);
1980 trace_hfi1_tid_req_rcv_read_req(qp, 0, e->opcode, e->psn, e->lpsn,
1987 struct rvt_qp *qp, u32 psn, int diff)
1989 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
1990 struct hfi1_ctxtdata *rcd = ((struct hfi1_qp_priv *)qp->priv)->rcd;
1991 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
1992 struct hfi1_qp_priv *qpriv = qp->priv;
1999 trace_hfi1_rsp_tid_rcv_error(qp, psn);
2000 trace_hfi1_tid_rdma_rcv_err(qp, 0, psn, diff);
2003 if (!qp->r_nak_state) {
2005 qp->r_nak_state = IB_NAK_PSN_ERROR;
2006 qp->r_ack_psn = qp->r_psn;
2007 rc_defered_ack(rcd, qp);
2014 spin_lock_irqsave(&qp->s_lock, flags);
2015 e = find_prev_entry(qp, psn, &prev, NULL, &old_req);
2022 trace_hfi1_tid_req_rcv_err(qp, 0, e->opcode, e->psn, e->lpsn, req);
2045 qp->r_len = len;
2046 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
2057 * more (qp->s_tail_ack_queue is moved back, see below).
2062 if (tid_rdma_rcv_read_request(qp, e, packet, ohdr, bth0, psn,
2068 * qp->s_tail_ack_queue and qp->r_head_ack_queue);
2086 * qp->s_tail_ack_queue and qp->r_head_ack_queue).
2096 if (i == qp->r_head_ack_queue)
2098 e = &qp->s_ack_queue[i];
2115 * If there is no more allocated segment, just schedule the qp
2158 if (i == qp->r_head_ack_queue)
2160 e = &qp->s_ack_queue[i];
2162 trace_hfi1_tid_req_rcv_err(qp, 0, e->opcode, e->psn,
2183 if (qp->s_acked_ack_queue == qp->s_tail_ack_queue)
2184 qp->s_acked_ack_queue = prev;
2185 qp->s_tail_ack_queue = prev;
2187 * Since the qp->s_tail_ack_queue is modified, the
2188 * qp->s_ack_state must be changed to re-initialize
2189 * qp->s_ack_rdma_sge; Otherwise, we will end up in
2192 qp->s_ack_state = OP(ACKNOWLEDGE);
2199 qp->s_nak_state = 0;
2201 qp->r_psn = e->lpsn + 1;
2202 hfi1_tid_write_alloc_resources(qp, true);
2205 qp->r_state = e->opcode;
2206 qp->r_nak_state = 0;
2207 qp->s_flags |= RVT_S_RESP_PENDING;
2208 hfi1_schedule_send(qp);
2210 spin_unlock_irqrestore(&qp->s_lock, flags);
2226 * 3. Set the qp->s_ack_state.
2231 struct rvt_qp *qp = packet->qp;
2232 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
2237 struct hfi1_qp_priv *qpriv = qp->priv;
2249 fecn = process_ecn(qp, packet);
2251 trace_hfi1_rsp_rcv_tid_read_req(qp, psn);
2253 if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
2254 rvt_comm_est(qp);
2256 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
2266 diff = delta_psn(psn, qp->r_psn);
2268 tid_rdma_rcv_err(packet, ohdr, qp, psn, diff, fecn);
2273 next = qp->r_head_ack_queue + 1;
2274 if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
2276 spin_lock_irqsave(&qp->s_lock, flags);
2277 if (unlikely(next == qp->s_tail_ack_queue)) {
2278 if (!qp->s_ack_queue[next].sent) {
2282 update_ack_queue(qp, next);
2284 e = &qp->s_ack_queue[qp->r_head_ack_queue];
2288 qp->r_len = len;
2290 if (unlikely(!rvt_rkey_ok(qp, &e->rdma_sge, qp->r_len, vaddr,
2295 if (tid_rdma_rcv_read_request(qp, e, packet, ohdr, bth0, psn, vaddr,
2299 qp->r_state = e->opcode;
2300 qp->r_nak_state = 0;
2306 qp->r_msn++;
2307 qp->r_psn += e->lpsn - e->psn + 1;
2309 qp->r_head_ack_queue = next;
2313 * queue, qpriv->r_tid_alloc follows qp->r_head_ack_queue. It is ok to
2317 qpriv->r_tid_alloc = qp->r_head_ack_queue;
2320 qp->s_flags |= RVT_S_RESP_PENDING;
2322 qp->s_flags |= RVT_S_ECN;
2323 hfi1_schedule_send(qp);
2325 spin_unlock_irqrestore(&qp->s_lock, flags);
2329 spin_unlock_irqrestore(&qp->s_lock, flags);
2331 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
2332 qp->r_nak_state = nack_state;
2333 qp->r_ack_psn = qp->r_psn;
2335 rc_defered_ack(rcd, qp);
2338 spin_unlock_irqrestore(&qp->s_lock, flags);
2339 rvt_rc_error(qp, IB_WC_LOC_PROT_ERR);
2340 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
2341 qp->r_ack_psn = qp->r_psn;
2344 u32 hfi1_build_tid_rdma_read_resp(struct rvt_qp *qp, struct rvt_ack_entry *e,
2350 struct hfi1_qp_priv *qpriv = qp->priv;
2360 *len = min_t(u32, qp->pmtu, tidlen - flow->tid_offset);
2365 trace_hfi1_tid_entry_build_read_resp(qp, flow->tid_idx, tidentry);
2366 trace_hfi1_tid_flow_build_read_resp(qp, req->clear_tail, flow);
2382 resp->verbs_qp = cpu_to_be32(qp->remote_qpn);
2385 resp->aeth = rvt_compute_aeth(qp);
2415 find_tid_request(struct rvt_qp *qp, u32 psn, enum ib_wr_opcode opcode)
2416 __must_hold(&qp->s_lock)
2422 end = qp->s_cur + 1;
2423 if (end == qp->s_size)
2425 for (i = qp->s_acked; i != end;) {
2426 wqe = rvt_get_swqe_ptr(qp, i);
2433 if (++i == qp->s_size)
2452 struct rvt_qp *qp = packet->qp;
2453 struct hfi1_qp_priv *priv = qp->priv;
2462 trace_hfi1_sender_rcv_tid_read_resp(qp);
2463 fecn = process_ecn(qp, packet);
2468 spin_lock_irqsave(&qp->s_lock, flags);
2470 req = find_tid_request(qp, ipsn, IB_WR_TID_RDMA_READ);
2497 u32 pmtu = qp->pmtu;
2504 rvt_copy_sge(qp, &ss, packet->payload, pmtu, false,
2515 qp->s_num_rd_atomic--;
2516 if ((qp->s_flags & RVT_S_WAIT_FENCE) &&
2517 !qp->s_num_rd_atomic) {
2518 qp->s_flags &= ~(RVT_S_WAIT_FENCE |
2520 hfi1_schedule_send(qp);
2522 if (qp->s_flags & RVT_S_WAIT_RDMAR) {
2523 qp->s_flags &= ~(RVT_S_WAIT_RDMAR | RVT_S_WAIT_ACK);
2524 hfi1_schedule_send(qp);
2527 trace_hfi1_ack(qp, ipsn);
2528 trace_hfi1_tid_req_rcv_read_resp(qp, 0, req->e.swqe->wr.opcode,
2531 trace_hfi1_tid_flow_rcv_read_resp(qp, req->clear_tail, flow);
2536 if (!do_rc_ack(qp, aeth, ipsn, opcode, 0, rcd))
2553 hfi1_kern_clear_hw_flow(priv->rcd, qp);
2559 hfi1_schedule_send(qp);
2566 * state. However, if the wqe queue is empty (qp->s_acked == qp->s_tail
2567 * == qp->s_head), it would be unsafe to complete the wqe pointed by
2568 * qp->s_acked here. Putting the qp into error state will safely flush
2571 if (qp->s_last == qp->s_acked)
2572 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
2575 spin_unlock_irqrestore(&qp->s_lock, flags);
2578 void hfi1_kern_read_tid_flow_free(struct rvt_qp *qp)
2579 __must_hold(&qp->s_lock)
2581 u32 n = qp->s_acked;
2584 struct hfi1_qp_priv *priv = qp->priv;
2586 lockdep_assert_held(&qp->s_lock);
2588 while (n != qp->s_tail) {
2589 wqe = rvt_get_swqe_ptr(qp, n);
2595 if (++n == qp->s_size)
2599 hfi1_kern_clear_hw_flow(priv->rcd, qp);
2604 struct rvt_qp *qp = packet->qp;
2609 spin_lock(&qp->s_lock);
2619 hfi1_restart_rc(qp, qp->s_last_psn + 1, 1);
2620 hfi1_schedule_send(qp);
2624 spin_unlock(&qp->s_lock);
2630 struct rvt_qp *qp, struct rvt_swqe *wqe)
2636 qp->r_flags |= RVT_R_RDMAR_SEQ;
2639 hfi1_restart_rc(qp, flow->flow_state.ib_spsn, 0);
2640 if (list_empty(&qp->rspwait)) {
2641 qp->r_flags |= RVT_R_RSP_SEND;
2642 rvt_get_qp(qp);
2643 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2653 * The caller must hold the packet->qp->r_lock and the rcu_read_lock.
2658 __must_hold(&packet->qp->r_lock) __must_hold(RCU)
2667 struct rvt_qp *qp = packet->qp;
2668 struct hfi1_qp_priv *priv = qp->priv;
2673 lockdep_assert_held(&qp->r_lock);
2674 trace_hfi1_rsp_read_kdeth_eflags(qp, ibpsn);
2675 trace_hfi1_sender_read_kdeth_eflags(qp);
2676 trace_hfi1_tid_read_sender_kdeth_eflags(qp, 0);
2677 spin_lock(&qp->s_lock);
2679 if (cmp_psn(ibpsn, qp->s_last_psn) < 0 ||
2680 cmp_psn(ibpsn, qp->s_psn) > 0)
2689 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
2690 ibp = to_iport(qp->ibqp.device, qp->port_num);
2704 if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) {
2705 qp->r_flags |= RVT_R_RDMAR_SEQ;
2707 restart_tid_rdma_read_req(rcd, qp,
2710 hfi1_restart_rc(qp, qp->s_last_psn + 1,
2712 if (list_empty(&qp->rspwait)) {
2713 qp->r_flags |= RVT_R_RSP_SEND;
2714 rvt_get_qp(qp);
2716 &qp->rspwait,
2728 wqe = do_rc_completion(qp, wqe, ibp);
2729 if (qp->s_acked == qp->s_tail)
2733 if (qp->s_acked == qp->s_tail)
2741 trace_hfi1_tid_req_read_kdeth_eflags(qp, 0, wqe->wr.opcode, wqe->psn,
2757 trace_hfi1_tid_flow_read_kdeth_eflags(qp,
2772 if (qp->r_flags & RVT_R_RDMAR_SEQ)
2773 qp->r_flags &=
2789 if (qp->r_flags & RVT_R_RDMAR_SEQ)
2790 qp->r_flags &=
2806 if (!(qp->r_flags & RVT_R_RDMAR_SEQ))
2807 restart_tid_rdma_read_req(rcd, qp,
2841 spin_unlock(&qp->s_lock);
2860 struct rvt_qp *qp;
2888 qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
2889 if (!qp)
2892 packet->qp = qp;
2895 spin_lock_irqsave(&qp->r_lock, flags);
2896 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
2930 * qp->s_tail_ack_queue points to the rvt_ack_entry currently being
2934 spin_lock(&qp->s_lock);
2935 qpriv = qp->priv;
2939 e = &qp->s_ack_queue[qpriv->r_tid_tail];
2946 trace_hfi1_eflags_err_write(qp, rcv_type, rte, psn);
2947 trace_hfi1_rsp_handle_kdeth_eflags(qp, psn);
2948 trace_hfi1_tid_write_rsp_handle_kdeth_eflags(qp);
2949 trace_hfi1_tid_req_handle_kdeth_eflags(qp, 0, e->opcode, e->psn,
2951 trace_hfi1_tid_flow_handle_kdeth_eflags(qp, req->clear_tail, flow);
3022 spin_unlock(&qp->s_lock);
3024 spin_unlock_irqrestore(&qp->r_lock, flags);
3035 tid_rdma_trigger_ack(qp);
3046 void hfi1_tid_rdma_restart_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
3051 struct hfi1_qp_priv *qpriv = qp->priv;
3057 *bth2 = mask_psn(qp->s_psn);
3061 qp, "!!!!!! Could not find flow to restart: bth2 ",
3063 trace_hfi1_tid_req_restart_req(qp, 0, wqe->wr.opcode,
3081 trace_hfi1_tid_flow_restart_req(qp, fidx, flow);
3095 tidnpkts = rvt_div_round_up_mtu(qp, tidlen);
3099 npkts * qp->pmtu);
3100 flow->tid_offset += npkts * qp->pmtu;
3131 trace_hfi1_tid_flow_restart_req(qp, fidx, flow);
3132 trace_hfi1_tid_req_restart_req(qp, 0, wqe->wr.opcode, wqe->psn,
3151 i = (++i == qp->s_size ? 0 : i);
3152 wqe = rvt_get_swqe_ptr(qp, i);
3163 void hfi1_qp_kern_exp_rcv_clear_all(struct rvt_qp *qp)
3166 struct hfi1_qp_priv *qpriv = qp->priv;
3169 if (qp->ibqp.qp_type != IB_QPT_RC || !HFI1_CAP_IS_KSET(TID_RDMA))
3178 hfi1_kern_clear_hw_flow(qpriv->rcd, qp);
3180 for (i = qp->s_acked; i != qp->s_head;) {
3181 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, i);
3183 if (++i == qp->s_size)
3194 for (i = qp->s_acked_ack_queue; i != qp->r_head_ack_queue;) {
3195 struct rvt_ack_entry *e = &qp->s_ack_queue[i];
3197 if (++i == rvt_max_atomic(ib_to_rvt(qp->ibqp.device)))
3210 bool hfi1_tid_rdma_wqe_interlock(struct rvt_qp *qp, struct rvt_swqe *wqe)
3213 struct hfi1_qp_priv *priv = qp->priv;
3217 s_prev = (qp->s_cur == 0 ? qp->s_size : qp->s_cur) - 1;
3218 prev = rvt_get_swqe_ptr(qp, s_prev);
3245 if (qp->s_acked != qp->s_cur)
3268 static inline bool hfi1_check_sge_align(struct rvt_qp *qp,
3274 trace_hfi1_sge_check_align(qp, i, sge);
3282 void setup_tid_rdma_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe)
3284 struct hfi1_qp_priv *qpriv = (struct hfi1_qp_priv *)qp->priv;
3291 if ((rdma_ah_get_dlid(&qp->remote_ah_attr) & ~((1 << ppd->lmc) - 1)) ==
3307 if (hfi1_check_sge_align(qp, &wqe->sg_list[0],
3339 wqe->lpsn += rvt_div_round_up_mtu(qp, wqe->length) - 1;
3356 trace_hfi1_tid_req_setup_tid_wqe(qp, 1, wqe->wr.opcode,
3366 u32 hfi1_build_tid_rdma_write_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
3370 struct hfi1_qp_priv *qpriv = qp->priv;
3390 ohdr->u.tid_rdma.w_req.verbs_qp = cpu_to_be32(qp->remote_qpn);
3392 *bth1 |= remote->qp;
3393 qp->s_state = TID_OP(WRITE_REQ);
3394 qp->s_flags |= HFI1_S_WAIT_TID_RESP;
3402 static u32 hfi1_compute_tid_rdma_flow_wt(struct rvt_qp *qp)
3412 return (MAX_TID_FLOW_PSN * qp->pmtu) >> TID_RDMA_SEGMENT_SHIFT;
3422 * @qp: points to rvt_qp context.
3426 static u32 hfi1_compute_tid_rnr_timeout(struct rvt_qp *qp, u32 to_seg)
3428 struct hfi1_qp_priv *qpriv = qp->priv;
3462 * [request: qp->s_tail_ack_queue, segment:req->cur_seg]
3464 static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx)
3467 struct hfi1_qp_priv *qpriv = qp->priv;
3475 lockdep_assert_held(&qp->s_lock);
3478 trace_hfi1_rsp_tid_write_alloc_res(qp, 0);
3479 trace_hfi1_tid_write_rsp_alloc_res(qp);
3482 * scheduled to avoid messing up qp->r_psn: the RNR NAK will
3487 * RNR NAK packet, it will restart with qp->s_last_psn + 1,
3488 * which does not match qp->r_psn and will be dropped.
3490 * put the qp into error state.
3500 hfi1_kern_clear_hw_flow(rcd, qp);
3506 e = &qp->s_ack_queue[qpriv->r_tid_alloc];
3510 trace_hfi1_tid_req_write_alloc_res(qp, 0, e->opcode, e->psn,
3526 hfi1_kern_clear_hw_flow(rcd, qp);
3533 ret = hfi1_kern_setup_hw_flow(qpriv->rcd, qp);
3535 to_seg = hfi1_compute_tid_rdma_flow_wt(qp) *
3542 npkts = rvt_div_round_up_mtu(qp, req->seg_len);
3564 tid_rdma_trigger_ack(qp);
3581 rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
3590 if (ret == -EAGAIN && intr_ctx && !qp->r_nak_state)
3596 lockdep_assert_held(&qp->r_lock);
3599 qp->r_nak_state = hfi1_compute_tid_rnr_timeout(qp, to_seg) | IB_RNR_NAK;
3602 qp->r_psn = e->psn + req->alloc_seg;
3603 qp->r_ack_psn = qp->r_psn;
3609 qp->r_head_ack_queue = qpriv->r_tid_alloc + 1;
3610 if (qp->r_head_ack_queue > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
3611 qp->r_head_ack_queue = 0;
3612 qpriv->r_tid_head = qp->r_head_ack_queue;
3615 * hfi1_send_rc_ack() but must be set here before dropping qp->s_lock
3618 qp->s_nak_state = qp->r_nak_state;
3619 qp->s_ack_psn = qp->r_ack_psn;
3622 * have modified qp->s_ack_psn here.
3624 qp->s_flags &= ~(RVT_S_ACK_PENDING);
3626 trace_hfi1_rsp_tid_write_alloc_res(qp, qp->r_psn);
3629 * has actually been sent. qp->s_flags RVT_S_ACK_PENDING bit cannot be
3630 * used for this because qp->s_lock is dropped before calling
3641 rc_defered_ack(rcd, qp);
3655 * 3. Set the qp->s_ack_state as state diagram in design doc.
3660 struct rvt_qp *qp = packet->qp;
3661 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
3666 struct hfi1_qp_priv *qpriv = qp->priv;
3678 fecn = process_ecn(qp, packet);
3680 trace_hfi1_rsp_rcv_tid_write_req(qp, psn);
3682 if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
3683 rvt_comm_est(qp);
3685 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
3693 diff = delta_psn(psn, qp->r_psn);
3695 tid_rdma_rcv_err(packet, ohdr, qp, psn, diff, fecn);
3705 qp->r_head_ack_queue = qp->r_head_ack_queue ?
3706 qp->r_head_ack_queue - 1 :
3707 rvt_size_atomic(ib_to_rvt(qp->ibqp.device));
3710 next = qp->r_head_ack_queue + 1;
3711 if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
3713 spin_lock_irqsave(&qp->s_lock, flags);
3714 if (unlikely(next == qp->s_acked_ack_queue)) {
3715 if (!qp->s_ack_queue[next].sent)
3717 update_ack_queue(qp, next);
3719 e = &qp->s_ack_queue[qp->r_head_ack_queue];
3724 qp->r_nak_state = 0;
3725 qp->s_nak_state = 0;
3727 qp->r_psn = e->lpsn + 1;
3739 qp->r_len = len;
3746 if (unlikely(!rvt_rkey_ok(qp, &e->rdma_sge, qp->r_len, vaddr,
3750 qp->r_psn += num_segs - 1;
3754 e->lpsn = qp->r_psn;
3775 qp->r_state = e->opcode;
3776 qp->r_nak_state = 0;
3782 qp->r_msn++;
3783 qp->r_psn++;
3785 trace_hfi1_tid_req_rcv_write_req(qp, 0, e->opcode, e->psn, e->lpsn,
3789 qpriv->r_tid_tail = qp->r_head_ack_queue;
3793 e = &qp->s_ack_queue[qpriv->r_tid_tail];
3799 qpriv->r_tid_ack = qp->r_head_ack_queue;
3800 qpriv->r_tid_tail = qp->r_head_ack_queue;
3804 qp->r_head_ack_queue = next;
3805 qpriv->r_tid_head = qp->r_head_ack_queue;
3807 hfi1_tid_write_alloc_resources(qp, true);
3808 trace_hfi1_tid_write_rsp_rcv_req(qp);
3811 qp->s_flags |= RVT_S_RESP_PENDING;
3813 qp->s_flags |= RVT_S_ECN;
3814 hfi1_schedule_send(qp);
3816 spin_unlock_irqrestore(&qp->s_lock, flags);
3820 spin_unlock_irqrestore(&qp->s_lock, flags);
3822 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
3823 qp->r_nak_state = IB_NAK_INVALID_REQUEST;
3824 qp->r_ack_psn = qp->r_psn;
3826 rc_defered_ack(rcd, qp);
3829 spin_unlock_irqrestore(&qp->s_lock, flags);
3830 rvt_rc_error(qp, IB_WC_LOC_PROT_ERR);
3831 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
3832 qp->r_ack_psn = qp->r_psn;
3835 u32 hfi1_build_tid_rdma_write_resp(struct rvt_qp *qp, struct rvt_ack_entry *e,
3842 struct hfi1_qp_priv *qpriv = qp->priv;
3848 trace_hfi1_tid_req_build_write_resp(qp, 0, e->opcode, e->psn, e->lpsn,
3850 trace_hfi1_tid_write_rsp_build_resp(qp);
3851 trace_hfi1_rsp_build_tid_write_resp(qp, bth2);
3859 hfi1_tid_write_alloc_resources(qp, false);
3873 trace_hfi1_tid_flow_build_write_resp(qp, req->flow_idx, flow);
3875 hfi1_add_tid_reap_timer(qp);
3880 trace_hfi1_tid_flow_build_write_resp(qp, req->flow_idx, flow);
3885 hfi1_mod_tid_reap_timer(qp);
3918 ohdr->u.tid_rdma.w_rsp.aeth = rvt_compute_aeth(qp);
3925 cpu_to_be32(qpriv->tid_rdma.local.qp |
3929 ohdr->u.tid_rdma.w_rsp.verbs_qp = cpu_to_be32(qp->remote_qpn);
3930 *bth1 = remote->qp;
3938 static void hfi1_add_tid_reap_timer(struct rvt_qp *qp)
3940 struct hfi1_qp_priv *qpriv = qp->priv;
3942 lockdep_assert_held(&qp->s_lock);
3951 static void hfi1_mod_tid_reap_timer(struct rvt_qp *qp)
3953 struct hfi1_qp_priv *qpriv = qp->priv;
3955 lockdep_assert_held(&qp->s_lock);
3961 static int hfi1_stop_tid_reap_timer(struct rvt_qp *qp)
3963 struct hfi1_qp_priv *qpriv = qp->priv;
3966 lockdep_assert_held(&qp->s_lock);
3974 void hfi1_del_tid_reap_timer(struct rvt_qp *qp)
3976 struct hfi1_qp_priv *qpriv = qp->priv;
3985 struct rvt_qp *qp = qpriv->owner;
3986 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
3990 spin_lock_irqsave(&qp->r_lock, flags);
3991 spin_lock(&qp->s_lock);
3993 dd_dev_warn(dd_from_ibdev(qp->ibqp.device), "[QP%u] %s %d\n",
3994 qp->ibqp.qp_num, __func__, __LINE__);
3996 qp, "resource timeout = ",
3998 hfi1_stop_tid_reap_timer(qp);
4003 hfi1_kern_clear_hw_flow(qpriv->rcd, qp);
4006 ack_to_tid_req(&qp->s_ack_queue[i]);
4010 spin_unlock(&qp->s_lock);
4011 if (qp->ibqp.event_handler) {
4014 ev.device = qp->ibqp.device;
4015 ev.element.qp = &qp->ibqp;
4017 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
4019 rvt_rc_error(qp, IB_WC_RESP_TIMEOUT_ERR);
4022 spin_unlock(&qp->s_lock);
4024 spin_unlock_irqrestore(&qp->r_lock, flags);
4037 * 5. Set qp->s_state
4041 struct rvt_qp *qp = packet->qp;
4042 struct hfi1_qp_priv *qpriv = qp->priv;
4052 fecn = process_ecn(qp, packet);
4057 spin_lock_irqsave(&qp->s_lock, flags);
4060 if (cmp_psn(psn, qp->s_next_psn) >= 0)
4064 if (unlikely(cmp_psn(psn, qp->s_last_psn) <= 0))
4067 if (unlikely(qp->s_acked == qp->s_tail))
4075 if (qp->r_flags & RVT_R_RDMAR_SEQ) {
4076 if (cmp_psn(psn, qp->s_last_psn + 1) != 0)
4078 qp->r_flags &= ~RVT_R_RDMAR_SEQ;
4081 wqe = rvt_get_swqe_ptr(qp, qpriv->s_tid_cur);
4101 if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd))
4104 trace_hfi1_ack(qp, psn);
4122 flow->npkts = rvt_div_round_up_mtu(qp, flow->length);
4133 trace_hfi1_tid_flow_rcv_write_resp(qp, req->setup_head, flow);
4136 trace_hfi1_tid_write_sender_rcv_resp(qp, 0);
4143 qp, i, flow->tid_entry[i]);
4155 trace_hfi1_tid_req_rcv_write_resp(qp, 0, wqe->wr.opcode, wqe->psn,
4181 if (i == qp->s_size)
4183 wqe = rvt_get_swqe_ptr(qp, i);
4191 qp->s_flags &= ~HFI1_S_WAIT_TID_RESP;
4192 hfi1_schedule_tid_send(qp);
4198 rvt_error_qp(qp, status);
4201 qp->s_flags |= RVT_S_ECN;
4202 spin_unlock_irqrestore(&qp->s_lock, flags);
4212 struct rvt_qp *qp = req->qp;
4213 struct hfi1_qp_priv *qpriv = qp->priv;
4221 hfi1_trdma_send_complete(qp, wqe, IB_WC_REM_INV_RD_REQ_ERR);
4222 rvt_error_qp(qp, IB_WC_REM_INV_RD_REQ_ERR);
4225 *len = min_t(u32, qp->pmtu, tidlen - flow->tid_offset);
4230 trace_hfi1_tid_entry_build_write_data(qp, flow->tid_idx, tidentry);
4231 trace_hfi1_tid_flow_build_write_data(qp, req->clear_tail, flow);
4243 wd->verbs_qp = cpu_to_be32(qp->remote_qpn);
4254 rvt_div_round_up_mtu(qp, req->seg_len) >
4271 struct rvt_qp *qp = packet->qp;
4272 struct hfi1_qp_priv *priv = qp->priv;
4278 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
4284 fecn = process_ecn(qp, packet);
4292 spin_lock_irqsave(&qp->s_lock, flags);
4293 e = &qp->s_ack_queue[priv->r_tid_tail];
4318 u32 pmtu = qp->pmtu;
4338 rvt_copy_sge(qp, &ss, packet->payload, pmtu, false,
4360 trace_hfi1_rsp_rcv_tid_write_data(qp, psn);
4361 trace_hfi1_tid_req_rcv_write_data(qp, 0, e->opcode, e->psn, e->lpsn,
4363 trace_hfi1_tid_write_rsp_rcv_data(qp);
4373 e = &qp->s_ack_queue[next];
4378 if (++qp->s_acked_ack_queue > rvt_size_atomic(&dev->rdi))
4379 qp->s_acked_ack_queue = 0;
4382 hfi1_tid_write_alloc_resources(qp, true);
4389 qp->s_tail_ack_queue != qp->r_head_ack_queue) {
4390 qp->s_flags |= RVT_S_RESP_PENDING;
4391 hfi1_schedule_send(qp);
4397 hfi1_mod_tid_reap_timer(req->qp);
4399 hfi1_stop_tid_reap_timer(req->qp);
4403 tid_rdma_schedule_ack(qp);
4407 qp->s_flags |= RVT_S_ECN;
4408 spin_unlock_irqrestore(&qp->s_lock, flags);
4415 tid_rdma_trigger_ack(qp);
4426 u32 hfi1_build_tid_rdma_write_ack(struct rvt_qp *qp, struct rvt_ack_entry *e,
4430 struct hfi1_qp_priv *qpriv = qp->priv;
4439 ohdr->u.tid_rdma.ack.verbs_qp = cpu_to_be32(qp->remote_qpn);
4440 *bth1 = remote->qp;
4446 ohdr->u.tid_rdma.ack.aeth = rvt_compute_aeth(qp);
4450 cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
4455 ohdr->u.tid_rdma.ack.aeth = rvt_compute_aeth(qp);
4459 cpu_to_be32(qpriv->tid_rdma.local.qp |
4500 struct rvt_qp *qp = packet->qp;
4501 struct hfi1_qp_priv *qpriv = qp->priv;
4509 trace_hfi1_tid_write_sender_rcv_tid_ack(qp, 0);
4510 process_ecn(qp, packet);
4516 spin_lock_irqsave(&qp->s_lock, flags);
4517 trace_hfi1_rcv_tid_ack(qp, aeth, psn, req_psn, resync_psn);
4520 if ((qp->s_flags & HFI1_S_WAIT_HALT) &&
4534 if (unlikely(qp->s_acked == qp->s_tail))
4537 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
4543 trace_hfi1_tid_req_rcv_tid_ack(qp, 0, wqe->wr.opcode, wqe->psn,
4546 trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow);
4560 trace_hfi1_tid_req_rcv_tid_ack(qp, 0, wqe->wr.opcode, wqe->psn,
4564 wqe = do_rc_completion(qp, wqe,
4565 to_iport(qp->ibqp.device,
4566 qp->port_num));
4567 trace_hfi1_sender_rcv_tid_ack(qp);
4569 if (qp->s_acked == qp->s_tail)
4576 trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow);
4579 trace_hfi1_tid_req_rcv_tid_ack(qp, 0, wqe->wr.opcode, wqe->psn,
4589 hfi1_mod_tid_retry_timer(qp);
4591 hfi1_stop_tid_retry_timer(qp);
4592 hfi1_schedule_send(qp);
4598 hfi1_stop_tid_retry_timer(qp);
4600 qp->s_flags &= ~HFI1_S_WAIT_HALT;
4608 hfi1_schedule_send(qp);
4610 if ((qp->s_acked == qpriv->s_tid_tail &&
4612 qp->s_acked == qp->s_tail) {
4635 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
4664 last_acked = qp->s_acked;
4696 trace_hfi1_tid_flow_rcv_tid_ack(qp,
4702 if (last_acked == qp->s_size)
4704 wqe = rvt_get_swqe_ptr(qp, last_acked);
4708 qpriv->s_tid_tail = qp->s_acked;
4710 hfi1_schedule_tid_send(qp);
4713 qpriv->s_retry = qp->s_retry_cnt;
4717 hfi1_stop_tid_retry_timer(qp);
4727 trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail,
4731 qpriv->s_tid_tail = qp->s_acked;
4733 qpriv->s_retry = qp->s_retry_cnt;
4734 hfi1_schedule_tid_send(qp);
4747 spin_unlock_irqrestore(&qp->s_lock, flags);
4750 void hfi1_add_tid_retry_timer(struct rvt_qp *qp)
4752 struct hfi1_qp_priv *priv = qp->priv;
4753 struct ib_qp *ibqp = &qp->ibqp;
4756 lockdep_assert_held(&qp->s_lock);
4765 static void hfi1_mod_tid_retry_timer(struct rvt_qp *qp)
4767 struct hfi1_qp_priv *priv = qp->priv;
4768 struct ib_qp *ibqp = &qp->ibqp;
4771 lockdep_assert_held(&qp->s_lock);
4777 static int hfi1_stop_tid_retry_timer(struct rvt_qp *qp)
4779 struct hfi1_qp_priv *priv = qp->priv;
4782 lockdep_assert_held(&qp->s_lock);
4790 void hfi1_del_tid_retry_timer(struct rvt_qp *qp)
4792 struct hfi1_qp_priv *priv = qp->priv;
4801 struct rvt_qp *qp = priv->owner;
4806 spin_lock_irqsave(&qp->r_lock, flags);
4807 spin_lock(&qp->s_lock);
4808 trace_hfi1_tid_write_sender_retry_timeout(qp, 0);
4810 hfi1_stop_tid_retry_timer(qp);
4813 qp,
4817 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
4818 hfi1_trdma_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
4819 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
4821 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
4824 qp, 0, wqe->wr.opcode, wqe->psn, wqe->lpsn, req);
4833 qp->s_flags |= HFI1_S_WAIT_HALT;
4836 hfi1_schedule_tid_send(qp);
4839 spin_unlock(&qp->s_lock);
4840 spin_unlock_irqrestore(&qp->r_lock, flags);
4843 u32 hfi1_build_tid_rdma_resync(struct rvt_qp *qp, struct rvt_swqe *wqe,
4847 struct hfi1_qp_priv *qpriv = qp->priv;
4856 ohdr->u.tid_rdma.ack.verbs_qp = cpu_to_be32(qp->remote_qpn);
4857 *bth1 = remote->qp;
4872 struct rvt_qp *qp = packet->qp;
4873 struct hfi1_qp_priv *qpriv = qp->priv;
4875 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
4884 fecn = process_ecn(qp, packet);
4888 spin_lock_irqsave(&qp->s_lock, flags);
4921 trace_hfi1_tid_write_rsp_rcv_resync(qp);
4933 e = &qp->s_ack_queue[idx];
4936 trace_hfi1_tid_req_rcv_resync(qp, 0, e->opcode, e->psn,
4960 trace_hfi1_tid_flow_rcv_resync(qp, flow_idx,
4964 if (idx == qp->s_tail_ack_queue)
4972 tid_rdma_trigger_ack(qp);
4975 qp->s_flags |= RVT_S_ECN;
4976 spin_unlock_irqrestore(&qp->s_lock, flags);
4983 static void update_tid_tail(struct rvt_qp *qp)
4984 __must_hold(&qp->s_lock)
4986 struct hfi1_qp_priv *priv = qp->priv;
4990 lockdep_assert_held(&qp->s_lock);
4995 if (i == qp->s_size)
5000 wqe = rvt_get_swqe_ptr(qp, i);
5008 int hfi1_make_tid_rdma_pkt(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
5009 __must_hold(&qp->s_lock)
5011 struct hfi1_qp_priv *priv = qp->priv;
5015 struct rvt_sge_state *ss = &qp->s_sge;
5016 struct rvt_ack_entry *e = &qp->s_ack_queue[qp->s_tail_ack_queue];
5021 lockdep_assert_held(&qp->s_lock);
5022 trace_hfi1_tid_write_sender_make_tid_pkt(qp, 0);
5029 !(qp->s_flags & (RVT_S_BUSY | RVT_S_WAIT_ACK |
5032 !(qp->s_flags & (RVT_S_BUSY | HFI1_S_ANY_WAIT_IO)))) {
5037 if (ps->s_txreq || hfi1_make_rc_req(qp, ps)) {
5043 ps->s_txreq = get_txreq(ps->dev, qp);
5050 make_tid_rdma_ack(qp, ohdr, ps))
5059 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK))
5068 wqe = rvt_get_swqe_ptr(qp, priv->s_tid_tail);
5070 trace_hfi1_tid_req_make_tid_pkt(qp, 0, wqe->wr.opcode, wqe->psn,
5081 hfi1_tid_rdma_restart_req(qp, wqe, &bth2);
5099 trace_hfi1_sender_make_tid_pkt(qp);
5100 trace_hfi1_tid_write_sender_make_tid_pkt(qp, 0);
5101 wqe = rvt_get_swqe_ptr(qp, priv->s_tid_tail);
5108 trace_hfi1_tid_req_make_tid_pkt(qp, 0, wqe->wr.opcode,
5120 qp->s_flags |= HFI1_S_WAIT_TID_RESP;
5126 update_tid_tail(qp);
5134 trace_hfi1_sender_make_tid_pkt(qp);
5136 wqe = rvt_get_swqe_ptr(qp, priv->s_tid_cur);
5140 wqe = rvt_get_swqe_ptr(qp,
5141 (!priv->s_tid_cur ? qp->s_size :
5145 hwords += hfi1_build_tid_rdma_resync(qp, wqe, ohdr, &bth1,
5162 qp->s_len -= len;
5167 hfi1_make_ruc_header(qp, ohdr, (opcode << 24), bth1, bth2,
5186 static int make_tid_rdma_ack(struct rvt_qp *qp,
5191 struct hfi1_qp_priv *qpriv = qp->priv;
5192 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
5200 trace_hfi1_tid_write_rsp_make_tid_ack(qp);
5202 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
5208 e = &qp->s_ack_queue[qpriv->r_tid_ack];
5227 e = &qp->s_ack_queue[qpriv->r_tid_ack];
5231 trace_hfi1_rsp_make_tid_ack(qp, e->psn);
5232 trace_hfi1_tid_req_make_tid_ack(qp, 0, e->opcode, e->psn, e->lpsn,
5270 if (qp->s_ack_queue[next].opcode != TID_OP(WRITE_REQ))
5272 nreq = ack_to_tid_req(&qp->s_ack_queue[next]);
5277 e = &qp->s_ack_queue[qpriv->r_tid_ack];
5298 e = &qp->s_ack_queue[qpriv->r_tid_ack];
5305 trace_hfi1_tid_write_rsp_make_tid_ack(qp);
5306 trace_hfi1_tid_req_make_tid_ack(qp, 0, e->opcode, e->psn, e->lpsn,
5308 hwords += hfi1_build_tid_rdma_write_ack(qp, e, ohdr, flow, &bth1,
5316 hfi1_make_ruc_header(qp, ohdr, (TID_OP(ACK) << 24), bth1, bth2, middle,
5330 static int hfi1_send_tid_ok(struct rvt_qp *qp)
5332 struct hfi1_qp_priv *priv = qp->priv;
5335 qp->s_flags & HFI1_S_ANY_WAIT_IO) &&
5338 !(qp->s_flags & HFI1_S_ANY_TID_WAIT_SEND));
5344 struct rvt_qp *qp = iowait_to_qp(w->iow);
5346 hfi1_do_tid_send(qp);
5349 static void hfi1_do_tid_send(struct rvt_qp *qp)
5352 struct hfi1_qp_priv *priv = qp->priv;
5354 ps.dev = to_idev(qp->ibqp.device);
5355 ps.ibp = to_iport(qp->ibqp.device, qp->port_num);
5359 ps.timeout_int = qp->timeout_jiffies / 8;
5361 trace_hfi1_rc_do_tid_send(qp, false);
5362 spin_lock_irqsave(&qp->s_lock, ps.flags);
5365 if (!hfi1_send_tid_ok(qp)) {
5366 if (qp->s_flags & HFI1_S_ANY_WAIT_IO)
5368 spin_unlock_irqrestore(&qp->s_lock, ps.flags);
5385 qp->s_flags |= RVT_S_BUSY;
5388 spin_unlock_irqrestore(&qp->s_lock, ps.flags);
5394 if (hfi1_verbs_send(qp, &ps))
5398 if (hfi1_schedule_send_yield(qp, &ps, true))
5401 spin_lock_irqsave(&qp->s_lock, ps.flags);
5403 qp->s_flags &= ~RVT_S_BUSY;
5408 hfi1_schedule_send(qp);
5411 } while (hfi1_make_tid_rdma_pkt(qp, &ps));
5413 spin_unlock_irqrestore(&qp->s_lock, ps.flags);
5416 static bool _hfi1_schedule_tid_send(struct rvt_qp *qp)
5418 struct hfi1_qp_priv *priv = qp->priv;
5420 to_iport(qp->ibqp.device, qp->port_num);
5435 * @qp: the QP
5437 * This schedules qp progress on the TID RDMA state machine. Caller
5447 bool hfi1_schedule_tid_send(struct rvt_qp *qp)
5449 lockdep_assert_held(&qp->s_lock);
5450 if (hfi1_send_tid_ok(qp)) {
5452 * The following call returns true if the qp is not on the
5453 * queue and false if the qp is already on the queue before
5454 * this call. Either way, the qp will be on the queue when the
5457 _hfi1_schedule_tid_send(qp);
5460 if (qp->s_flags & HFI1_S_ANY_WAIT_IO)
5461 iowait_set_flag(&((struct hfi1_qp_priv *)qp->priv)->s_iowait,
5466 bool hfi1_tid_rdma_ack_interlock(struct rvt_qp *qp, struct rvt_ack_entry *e)
5470 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
5471 struct hfi1_qp_priv *priv = qp->priv;
5474 s_prev = qp->s_tail_ack_queue == 0 ? rvt_size_atomic(&dev->rdi) :
5475 (qp->s_tail_ack_queue - 1);
5476 prev = &qp->s_ack_queue[s_prev];
5504 struct rvt_qp *qp, u32 psn, int diff, bool fecn)
5508 tid_rdma_rcv_error(packet, ohdr, qp, psn, diff);
5510 spin_lock_irqsave(&qp->s_lock, flags);
5511 qp->s_flags |= RVT_S_ECN;
5512 spin_unlock_irqrestore(&qp->s_lock, flags);