Lines Matching defs:flow

37 /* Maximum number of packets within a flow generation. */
134 struct tid_rdma_flow *flow,
529 * This should be done after the hardware flow and
700 * kern_reserve_flow - allocate a hardware flow
702 * @last: the index of the preferred flow. Use RXE_NUM_TID_FLOWS to
706 * flow for use in receiving KDETH data packets. If a preferred flow is
707 * specified the function will attempt to reserve that flow again, if
721 /* Attempt to reserve the preferred flow index */
788 /* The QP already has an allocated flow */
802 /* Generation received in a RESYNC overrides default flow generation */
868 * @flow: overall info for a TID RDMA segment
881 static u32 tid_rdma_find_phys_blocks_4k(struct tid_rdma_flow *flow,
898 trace_hfi1_tid_flow_page(flow->req->qp, flow, 0, 0, 0, vaddr);
901 trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 0, 0,
935 trace_hfi1_tid_pageset(flow->req->qp, setcount,
999 * @flow: overall info for a TID RDMA segment
1020 static u32 tid_rdma_find_phys_blocks_8k(struct tid_rdma_flow *flow,
1034 trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 1, 0, v0);
1037 trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 1, 1, v1);
1087 static u32 kern_find_pages(struct tid_rdma_flow *flow,
1091 struct tid_rdma_request *req = flow->req;
1093 u32 length = flow->req->seg_len;
1117 flow->length = flow->req->seg_len - length;
1122 static void dma_unmap_flow(struct tid_rdma_flow *flow)
1128 dd = flow->req->rcd->dd;
1129 for (i = 0, pset = &flow->pagesets[0]; i < flow->npagesets;
1141 static int dma_map_flow(struct tid_rdma_flow *flow, struct page **pages)
1144 struct hfi1_devdata *dd = flow->req->rcd->dd;
1147 for (i = 0, pset = &flow->pagesets[0]; i < flow->npagesets;
1157 dma_unmap_flow(flow);
1166 static inline bool dma_mapped(struct tid_rdma_flow *flow)
1168 return !!flow->pagesets[0].mapped;
1173 * segment. All segments are of length flow->req->seg_len.
1175 static int kern_get_phys_blocks(struct tid_rdma_flow *flow,
1182 if (flow->npagesets) {
1183 trace_hfi1_tid_flow_alloc(flow->req->qp, flow->req->setup_head,
1184 flow);
1185 if (!dma_mapped(flow))
1186 return dma_map_flow(flow, pages);
1190 npages = kern_find_pages(flow, pages, ss, last);
1192 if (flow->req->qp->pmtu == enum_to_mtu(OPA_MTU_4096))
1193 flow->npagesets =
1194 tid_rdma_find_phys_blocks_4k(flow, pages, npages,
1195 flow->pagesets);
1197 flow->npagesets =
1198 tid_rdma_find_phys_blocks_8k(flow, pages, npages,
1199 flow->pagesets);
1201 return dma_map_flow(flow, pages);
1204 static inline void kern_add_tid_node(struct tid_rdma_flow *flow,
1208 struct kern_tid_node *node = &flow->tnode[flow->tnode_cnt++];
1210 WARN_ON_ONCE(flow->tnode_cnt >=
1220 trace_hfi1_tid_node_add(flow->req->qp, s, flow->tnode_cnt - 1,
1237 static int kern_alloc_tids(struct tid_rdma_flow *flow)
1239 struct hfi1_ctxtdata *rcd = flow->req->rcd;
1245 flow->tnode_cnt = 0;
1246 ngroups = flow->npagesets / dd->rcv_entries.group_size;
1252 kern_add_tid_node(flow, rcd, "complete groups", group,
1260 if (pageidx >= flow->npagesets)
1266 use = min_t(u32, flow->npagesets - pageidx,
1268 kern_add_tid_node(flow, rcd, "used groups", used, use);
1271 if (pageidx >= flow->npagesets)
1287 use = min_t(u32, flow->npagesets - pageidx, group->size);
1288 kern_add_tid_node(flow, rcd, "complete continue", group, use);
1290 if (pageidx >= flow->npagesets)
1293 trace_hfi1_msg_alloc_tids(flow->req->qp, " insufficient tids: needed ",
1294 (u64)flow->npagesets);
1300 static void kern_program_rcv_group(struct tid_rdma_flow *flow, int grp_num,
1303 struct hfi1_ctxtdata *rcd = flow->req->rcd;
1305 struct kern_tid_node *node = &flow->tnode[grp_num];
1308 u32 pmtu_pg = flow->req->qp->pmtu >> PAGE_SHIFT;
1319 pset = &flow->pagesets[(*pset_idx)++];
1342 flow->tid_entry[flow->tidcnt++] =
1347 flow->req->qp, flow->tidcnt - 1,
1348 flow->tid_entry[flow->tidcnt - 1]);
1351 flow->npkts += (npages + pmtu_pg - 1) >> ilog2(pmtu_pg);
1368 static void kern_unprogram_rcv_group(struct tid_rdma_flow *flow, int grp_num)
1370 struct hfi1_ctxtdata *rcd = flow->req->rcd;
1372 struct kern_tid_node *node = &flow->tnode[grp_num];
1399 struct hfi1_ctxtdata *rcd = flow->req->rcd;
1407 static void kern_program_rcvarray(struct tid_rdma_flow *flow)
1412 flow->npkts = 0;
1413 flow->tidcnt = 0;
1414 for (i = 0; i < flow->tnode_cnt; i++)
1415 kern_program_rcv_group(flow, i, &pset_idx);
1416 trace_hfi1_tid_flow_alloc(flow->req->qp, flow->req->setup_head, flow);
1420 * hfi1_kern_exp_rcv_setup() - setup TID's and flow for one segment of a
1423 * @req: TID RDMA request for which the segment/flow is being set up
1428 * (1) finds a free flow entry in the flow circular buffer
1437 * (7) It also manages queueing the QP when TID/flow resources are not
1442 * req->flow_idx is the index of the flow which has been prepared in this
1443 * invocation of function call. With flow = &req->flows[req->flow_idx],
1444 * flow->tid_entry contains the TID array which the sender can use for TID RDMA
1445 * sends and flow->npkts contains number of packets required to send the
1452 * For the queuing, caller must hold the flow->req->qp s_lock from the send
1456 * The function returns -EAGAIN if sufficient number of TID/flow resources to
1465 struct tid_rdma_flow *flow = &req->flows[req->setup_head];
1474 * We return error if either (a) we don't have space in the flow
1489 if (kern_get_phys_blocks(flow, qpriv->pages, ss, last)) {
1490 hfi1_wait_kmem(flow->req->qp);
1495 if (kernel_tid_waiters(rcd, &rcd->rarr_queue, flow->req->qp))
1503 if (kern_alloc_tids(flow))
1507 * tidarray and enable the HW flow
1509 kern_program_rcvarray(flow);
1512 * Setup the flow state with relevant information.
1515 * The flow is setup here as this is the most accurate time and place
1516 * to do so. Doing at a later time runs the risk of the flow data in
1519 memset(&flow->flow_state, 0x0, sizeof(flow->flow_state));
1520 flow->idx = qpriv->flow_state.index;
1521 flow->flow_state.generation = qpriv->flow_state.generation;
1522 flow->flow_state.spsn = qpriv->flow_state.psn;
1523 flow->flow_state.lpsn = flow->flow_state.spsn + flow->npkts - 1;
1524 flow->flow_state.r_next_psn =
1525 full_flow_psn(flow, flow->flow_state.spsn);
1526 qpriv->flow_state.psn += flow->npkts;
1528 dequeue_tid_waiter(rcd, &rcd->rarr_queue, flow->req->qp);
1537 queue_qp_for_tid_wait(rcd, &rcd->rarr_queue, flow->req->qp);
1542 static void hfi1_tid_rdma_reset_flow(struct tid_rdma_flow *flow)
1544 flow->npagesets = 0;
1549 * release the flow and TID HW/SW resources for that segment. The segments for a
1556 struct tid_rdma_flow *flow = &req->flows[req->clear_tail];
1563 /* Exit if we have nothing in the flow circular buffer */
1569 for (i = 0; i < flow->tnode_cnt; i++)
1570 kern_unprogram_rcv_group(flow, i);
1572 flow->tnode_cnt = 0;
1577 dma_unmap_flow(flow);
1579 hfi1_tid_rdma_reset_flow(flow);
1607 * hfi1_kern_exp_rcv_free_flows - free previously allocated flow information
1685 struct tid_rdma_flow *flow;
1691 flow = &req->flows[tail];
1692 if (cmp_psn(psn, flow->flow_state.ib_spsn) >= 0 &&
1693 cmp_psn(psn, flow->flow_state.ib_lpsn) <= 0) {
1696 return flow;
1708 struct tid_rdma_flow *flow = &req->flows[req->flow_idx];
1718 *bth2 = mask_psn(flow->flow_state.ib_spsn + flow->pkt);
1719 trace_hfi1_tid_flow_build_read_pkt(qp, req->flow_idx, flow);
1722 req_addr = &flow->tid_entry[flow->tid_idx];
1723 req_len = sizeof(*flow->tid_entry) *
1724 (flow->tidcnt - flow->tid_idx);
1749 req->cur_seg * req->seg_len + flow->sent);
1753 cpu_to_be32((flow->flow_state.generation <<
1755 ((flow->flow_state.spsn + flow->pkt) &
1759 ((flow->idx & TID_RDMA_DESTQP_FLOW_MASK) <<
1769 flow->sent += *len;
1794 struct tid_rdma_flow *flow = NULL;
1804 * segments before freeing the flow.
1844 /* Allocate the flow if not yet */
1864 flow = &req->flows[req->flow_idx];
1865 flow->pkt = 0;
1866 flow->tid_idx = 0;
1867 flow->sent = 0;
1869 /* Set the first and last IB PSN for the flow in use.*/
1870 flow->flow_state.ib_spsn = req->s_next_psn;
1871 flow->flow_state.ib_lpsn =
1872 flow->flow_state.ib_spsn + flow->npkts - 1;
1876 req->s_next_psn += flow->npkts;
1897 struct tid_rdma_flow *flow;
1903 flow = &req->flows[req->setup_head];
1907 if (pktlen > sizeof(flow->tid_entry))
1909 memcpy(flow->tid_entry, packet->ebuf, pktlen);
1910 flow->tidcnt = pktlen / sizeof(*flow->tid_entry);
1916 flow->npkts = rvt_div_round_up_mtu(qp, len);
1917 for (i = 0; i < flow->tidcnt; i++) {
1919 flow->tid_entry[i]);
1920 tlen = EXP_TID_GET(flow->tid_entry[i], LEN);
1935 /* Empty the flow array */
1937 flow->pkt = 0;
1938 flow->tid_idx = 0;
1939 flow->tid_offset = 0;
1940 flow->sent = 0;
1941 flow->tid_qpn = be32_to_cpu(ohdr->u.tid_rdma.r_req.tid_flow_qp);
1942 flow->idx = (flow->tid_qpn >> TID_RDMA_DESTQP_FLOW_SHIFT) &
1945 flow->flow_state.generation = flow_psn >> HFI1_KDETH_BTH_SEQ_SHIFT;
1946 flow->flow_state.spsn = flow_psn & HFI1_KDETH_BTH_SEQ_MASK;
1947 flow->length = len;
1949 flow->flow_state.lpsn = flow->flow_state.spsn +
1950 flow->npkts - 1;
1951 flow->flow_state.ib_spsn = psn;
1952 flow->flow_state.ib_lpsn = flow->flow_state.ib_spsn + flow->npkts - 1;
1954 trace_hfi1_tid_flow_rcv_read_req(qp, req->setup_head, flow);
1955 /* Set the initial flow index to the current flow. */
1966 e->lpsn = psn + flow->npkts - 1;
2054 * == false) and the TID flow may be unusable (the
2058 * Consequently, we need to update the TID flow info every time
2351 struct tid_rdma_flow *flow = &req->flows[req->clear_tail];
2352 u32 tidentry = flow->tid_entry[flow->tid_idx];
2360 *len = min_t(u32, qp->pmtu, tidlen - flow->tid_offset);
2361 flow->sent += *len;
2362 next_offset = flow->tid_offset + *len;
2363 last_pkt = (flow->sent >= flow->length);
2365 trace_hfi1_tid_entry_build_read_resp(qp, flow->tid_idx, tidentry);
2366 trace_hfi1_tid_flow_build_read_resp(qp, req->clear_tail, flow);
2380 KDETH_SET(resp->kdeth0, OFFSET, flow->tid_offset / om);
2386 resp->verbs_psn = cpu_to_be32(mask_psn(flow->flow_state.ib_spsn +
2387 flow->pkt));
2390 *bth1 = flow->tid_qpn;
2391 *bth2 = mask_psn(((flow->flow_state.spsn + flow->pkt++) &
2393 (flow->flow_state.generation <<
2397 /* Advance to next flow */
2402 flow->tid_offset = 0;
2403 flow->tid_idx++;
2405 flow->tid_offset = next_offset;
2448 * 4. Free the TID flow resources.
2456 struct tid_rdma_flow *flow;
2474 flow = &req->flows[req->clear_tail];
2476 if (cmp_psn(ipsn, flow->flow_state.ib_lpsn)) {
2477 update_r_next_psn_fecn(packet, priv, rcd, flow, fecn);
2479 if (cmp_psn(kpsn, flow->flow_state.r_next_psn))
2481 flow->flow_state.r_next_psn = mask_psn(kpsn + 1);
2512 flow->flow_state.r_next_psn = mask_psn(kpsn + 1);
2531 trace_hfi1_tid_flow_rcv_read_resp(qp, req->clear_tail, flow);
2546 * Clear the hw flow under two conditions:
2598 /* Free flow */
2633 struct tid_rdma_flow *flow;
2638 flow = &req->flows[req->clear_tail];
2639 hfi1_restart_rc(qp, flow->flow_state.ib_spsn, 0);
2665 struct tid_rdma_flow *flow;
2751 * After that, the flow is *not* reprogrammed and the
2756 flow = &req->flows[req->clear_tail];
2759 flow);
2762 flow->flow_state.r_next_psn);
2785 fpsn = full_flow_psn(flow,
2786 flow->flow_state.lpsn);
2793 flow->flow_state.r_next_psn =
2799 flow->idx);
2800 flow->flow_state.r_next_psn = last_psn;
2815 * Since the TID flow is able to ride through
2866 struct tid_rdma_flow *flow;
2945 flow = &req->flows[req->clear_tail];
2951 trace_hfi1_tid_flow_handle_kdeth_eflags(qp, req->clear_tail, flow);
2959 flow->flow_state.r_next_psn =
2961 flow->idx);
2963 flow->flow_state.r_next_psn;
2975 flow->flow_state.r_next_psn);
2987 if (psn == full_flow_psn(flow,
2988 flow->flow_state.lpsn))
2990 flow->flow_state.r_next_psn =
2993 flow->flow_state.r_next_psn;
3034 qpriv->s_nak_psn = mask_psn(flow->flow_state.r_next_psn);
3043 * find the proper flow, set the flow index to that flow,
3044 * and reset the flow information.
3050 struct tid_rdma_flow *flow;
3058 flow = find_flow_ib(req, *bth2, &fidx);
3059 if (!flow) {
3061 qp, "!!!!!! Could not find flow to restart: bth2 ",
3070 flow = &req->flows[fidx];
3075 delta_pkts = delta_psn(*bth2, flow->flow_state.ib_spsn);
3078 full_flow_psn(flow,
3079 flow->flow_state.spsn));
3081 trace_hfi1_tid_flow_restart_req(qp, fidx, flow);
3082 diff = delta_pkts + flow->resync_npkts;
3084 flow->sent = 0;
3085 flow->pkt = 0;
3086 flow->tid_idx = 0;
3087 flow->tid_offset = 0;
3089 for (tididx = 0; tididx < flow->tidcnt; tididx++) {
3090 u32 tidentry = flow->tid_entry[tididx], tidlen,
3093 flow->tid_offset = 0;
3097 flow->pkt += npkts;
3098 flow->sent += (npkts == tidnpkts ? tidlen :
3100 flow->tid_offset += npkts * qp->pmtu;
3108 flow->sent, 0);
3110 * Packet PSN is based on flow_state.spsn + flow->pkt. However,
3113 * flow and the SGE has been sufficiently advanced, we have to
3114 * adjust flow->pkt in order to calculate the correct PSN.
3116 flow->pkt -= flow->resync_npkts;
3119 if (flow->tid_offset ==
3120 EXP_TID_GET(flow->tid_entry[tididx], LEN) * PAGE_SIZE) {
3122 flow->tid_offset = 0;
3124 flow->tid_idx = tididx;
3131 trace_hfi1_tid_flow_restart_req(qp, fidx, flow);
3173 * First, clear the flow to help prevent any delayed packets from
3377 * Set the number of flow to be used based on negotiated
3405 * Heuristic for computing the RNR timeout when waiting on the flow
3407 * a flow will be available, we assume that if a QP is at position N in
3408 * the flow queue it has to wait approximately (N + 1) * (number of
3497 /* If all data has been received, clear the flow */
3531 /* Allocate flow if we don't have one */
3586 * Schedule an RNR NAK to be sent if (a) flow or rcv array allocation
3843 struct tid_rdma_flow *flow = NULL;
3852 flow = &req->flows[req->flow_idx];
3873 trace_hfi1_tid_flow_build_write_resp(qp, req->flow_idx, flow);
3880 trace_hfi1_tid_flow_build_write_resp(qp, req->flow_idx, flow);
3888 flow->flow_state.resp_ib_psn = bth2;
3889 resp_addr = (void *)flow->tid_entry;
3890 resp_len = sizeof(*flow->tid_entry) * flow->tidcnt;
3920 cpu_to_be32((flow->flow_state.generation <<
3922 (flow->flow_state.spsn &
3926 ((flow->idx & TID_RDMA_DESTQP_FLOW_MASK) <<
4001 * HW flow and RcvArray resources.
4046 struct tid_rdma_flow *flow;
4106 flow = &req->flows[req->setup_head];
4107 flow->pkt = 0;
4108 flow->tid_idx = 0;
4109 flow->tid_offset = 0;
4110 flow->sent = 0;
4111 flow->resync_npkts = 0;
4112 flow->tid_qpn = be32_to_cpu(ohdr->u.tid_rdma.w_rsp.tid_flow_qp);
4113 flow->idx = (flow->tid_qpn >> TID_RDMA_DESTQP_FLOW_SHIFT) &
4116 flow->flow_state.generation = flow_psn >> HFI1_KDETH_BTH_SEQ_SHIFT;
4117 flow->flow_state.spsn = flow_psn & HFI1_KDETH_BTH_SEQ_MASK;
4118 flow->flow_state.resp_ib_psn = psn;
4119 flow->length = min_t(u32, req->seg_len,
4122 flow->npkts = rvt_div_round_up_mtu(qp, flow->length);
4123 flow->flow_state.lpsn = flow->flow_state.spsn +
4124 flow->npkts - 1;
4127 if (pktlen > sizeof(flow->tid_entry)) {
4131 memcpy(flow->tid_entry, packet->ebuf, pktlen);
4132 flow->tidcnt = pktlen / sizeof(*flow->tid_entry);
4133 trace_hfi1_tid_flow_rcv_write_resp(qp, req->setup_head, flow);
4141 for (i = 0; i < flow->tidcnt; i++) {
4143 qp, i, flow->tid_entry[i]);
4144 if (!EXP_TID_GET(flow->tid_entry[i], LEN)) {
4148 tidlen += EXP_TID_GET(flow->tid_entry[i], LEN);
4150 if (tidlen * PAGE_SIZE < flow->length) {
4159 * flow index to the current flow.
4163 /* Set acked flow index to head index */
4210 struct tid_rdma_flow *flow = &req->flows[req->clear_tail];
4214 u32 tidentry = flow->tid_entry[flow->tid_idx];
4225 *len = min_t(u32, qp->pmtu, tidlen - flow->tid_offset);
4226 flow->sent += *len;
4227 next_offset = flow->tid_offset + *len;
4228 last_pkt = (flow->tid_idx == (flow->tidcnt - 1) &&
4229 next_offset >= tidlen) || (flow->sent >= flow->length);
4230 trace_hfi1_tid_entry_build_write_data(qp, flow->tid_idx, tidentry);
4231 trace_hfi1_tid_flow_build_write_data(qp, req->clear_tail, flow);
4241 KDETH_SET(wd->kdeth0, OFFSET, flow->tid_offset / om);
4246 *bth1 = flow->tid_qpn;
4247 *bth2 = mask_psn(((flow->flow_state.spsn + flow->pkt++) &
4249 (flow->flow_state.generation <<
4253 if (flow->flow_state.lpsn + 1 +
4261 flow->tid_offset = 0;
4262 flow->tid_idx++;
4264 flow->tid_offset = next_offset;
4277 struct tid_rdma_flow *flow;
4295 flow = &req->flows[req->clear_tail];
4296 if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.lpsn))) {
4297 update_r_next_psn_fecn(packet, priv, rcd, flow, fecn);
4299 if (cmp_psn(psn, flow->flow_state.r_next_psn))
4302 flow->flow_state.r_next_psn = mask_psn(psn + 1);
4324 full_flow_psn(flow, flow->flow_state.spsn)) *
4346 flow->flow_state.r_next_psn = mask_psn(psn + 1);
4349 rcd->flows[flow->idx].psn = psn & HFI1_KDETH_BTH_SEQ_MASK;
4354 * Release the flow if one of the following conditions has been met:
4405 priv->r_next_psn_kdeth = flow->flow_state.r_next_psn;
4414 priv->s_nak_psn = flow->flow_state.r_next_psn;
4433 struct tid_rdma_flow *flow = &req->flows[iflow];
4454 *bth2 = full_flow_psn(flow, flow->flow_state.lpsn);
4460 ((flow->idx & TID_RDMA_DESTQP_FLOW_MASK) <<
4466 cpu_to_be32(flow->flow_state.resp_ib_psn);
4504 struct tid_rdma_flow *flow;
4545 flow = &req->flows[req->acked_tail];
4546 trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow);
4549 if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.spsn)) < 0 ||
4550 cmp_psn(req_psn, flow->flow_state.resp_ib_psn) < 0)
4554 full_flow_psn(flow, flow->flow_state.lpsn)) >= 0 &&
4559 req->r_last_acked = flow->flow_state.resp_ib_psn;
4575 flow = &req->flows[req->acked_tail];
4576 trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow);
4637 flow = &req->flows[req->acked_tail];
4642 * default number of packets. flow->resync_npkts is used
4647 fpsn = full_flow_psn(flow, flow->flow_state.spsn);
4650 * If resync_psn points to the last flow PSN for a
4655 if (flow->flow_state.generation !=
4658 flow->resync_npkts +=
4675 flow = &rptr->flows[fidx];
4676 gen = flow->flow_state.generation;
4678 flow->flow_state.spsn !=
4681 lpsn = flow->flow_state.lpsn;
4682 lpsn = full_flow_psn(flow, lpsn);
4683 flow->npkts =
4687 flow->flow_state.generation =
4689 flow->flow_state.spsn = spsn;
4690 flow->flow_state.lpsn =
4691 flow->flow_state.spsn +
4692 flow->npkts - 1;
4693 flow->pkt = 0;
4694 spsn += flow->npkts;
4695 resync_psn += flow->npkts;
4698 flow);
4723 flow = &req->flows[req->acked_tail];
4724 flpsn = full_flow_psn(flow, flow->flow_state.lpsn);
4728 flow);
4850 struct tid_rdma_flow *flow = &req->flows[fidx];
4860 generation = kern_flow_generation_next(flow->flow_state.generation);
4878 struct tid_rdma_flow *flow;
4906 * If we don't have a flow, save the generation so it can be
4907 * applied when a new flow is allocated
4911 /* Reprogram the QP flow with new generation */
4918 * sync point and the flow has/will be reprogrammed
4924 * Reset all TID flow information with the new generation.
4947 flow = &req->flows[flow_idx];
4948 lpsn = full_flow_psn(flow,
4949 flow->flow_state.lpsn);
4950 next = flow->flow_state.r_next_psn;
4951 flow->npkts = delta_psn(lpsn, next - 1);
4952 flow->flow_state.generation = fs->generation;
4953 flow->flow_state.spsn = fs->psn;
4954 flow->flow_state.lpsn =
4955 flow->flow_state.spsn + flow->npkts - 1;
4956 flow->flow_state.r_next_psn =
4957 full_flow_psn(flow,
4958 flow->flow_state.spsn);
4959 fs->psn += flow->npkts;
4961 flow);
5114 /* move pointer to next flow */
5197 u16 flow;
5245 * into the flow array is used. The distance between it
5261 flow = CIRC_PREV(req->acked_tail, MAX_FLOWS);
5289 full_flow_psn(&req->flows[flow],
5290 req->flows[flow].flow_state.lpsn)) > 0))) {
5300 flow = req->acked_tail;
5308 hwords += hfi1_build_tid_rdma_write_ack(qp, e, ohdr, flow, &bth1,
5496 * progress is to read the HW flow state.
5519 struct tid_rdma_flow *flow,
5530 flow->flow_state.r_next_psn =
5531 read_r_next_psn(dd, rcd->ctxt, flow->idx);