t4_cpl_io.c revision 300895
1237263Snp/*-
2292736Snp * Copyright (c) 2012, 2015 Chelsio Communications, Inc.
3237263Snp * All rights reserved.
4237263Snp * Written by: Navdeep Parhar <np@FreeBSD.org>
5237263Snp *
6237263Snp * Redistribution and use in source and binary forms, with or without
7237263Snp * modification, are permitted provided that the following conditions
8237263Snp * are met:
9237263Snp * 1. Redistributions of source code must retain the above copyright
10237263Snp *    notice, this list of conditions and the following disclaimer.
11237263Snp * 2. Redistributions in binary form must reproduce the above copyright
12237263Snp *    notice, this list of conditions and the following disclaimer in the
13237263Snp *    documentation and/or other materials provided with the distribution.
14237263Snp *
15237263Snp * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16237263Snp * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17237263Snp * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18237263Snp * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19237263Snp * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20237263Snp * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21237263Snp * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22237263Snp * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23237263Snp * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24237263Snp * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25237263Snp * SUCH DAMAGE.
26237263Snp */
27237263Snp
28237263Snp#include <sys/cdefs.h>
29237263Snp__FBSDID("$FreeBSD: head/sys/dev/cxgbe/tom/t4_cpl_io.c 300895 2016-05-28 00:38:17Z np $");
30237263Snp
31237263Snp#include "opt_inet.h"
32237263Snp
33237263Snp#ifdef TCP_OFFLOAD
34237263Snp#include <sys/param.h>
35237263Snp#include <sys/types.h>
36237263Snp#include <sys/kernel.h>
37237263Snp#include <sys/ktr.h>
38237263Snp#include <sys/module.h>
39237263Snp#include <sys/protosw.h>
40237263Snp#include <sys/domain.h>
41237263Snp#include <sys/socket.h>
42237263Snp#include <sys/socketvar.h>
43237263Snp#include <sys/sglist.h>
44237263Snp#include <netinet/in.h>
45237263Snp#include <netinet/in_pcb.h>
46237263Snp#include <netinet/ip.h>
47276574Snp#include <netinet/ip6.h>
48237263Snp#define TCPSTATES
49237263Snp#include <netinet/tcp_fsm.h>
50237263Snp#include <netinet/tcp_seq.h>
51294869Sglebius#include <netinet/tcp_var.h>
52237263Snp#include <netinet/toecore.h>
53237263Snp
54237263Snp#include "common/common.h"
55237263Snp#include "common/t4_msg.h"
56237263Snp#include "common/t4_regs.h"
57239344Snp#include "common/t4_tcb.h"
58237263Snp#include "tom/t4_tom_l2t.h"
59237263Snp#include "tom/t4_tom.h"
60237263Snp
61237263SnpVNET_DECLARE(int, tcp_do_autosndbuf);
62237263Snp#define V_tcp_do_autosndbuf VNET(tcp_do_autosndbuf)
63237263SnpVNET_DECLARE(int, tcp_autosndbuf_inc);
64237263Snp#define V_tcp_autosndbuf_inc VNET(tcp_autosndbuf_inc)
65237263SnpVNET_DECLARE(int, tcp_autosndbuf_max);
66237263Snp#define V_tcp_autosndbuf_max VNET(tcp_autosndbuf_max)
67237263SnpVNET_DECLARE(int, tcp_do_autorcvbuf);
68237263Snp#define V_tcp_do_autorcvbuf VNET(tcp_do_autorcvbuf)
69237263SnpVNET_DECLARE(int, tcp_autorcvbuf_inc);
70237263Snp#define V_tcp_autorcvbuf_inc VNET(tcp_autorcvbuf_inc)
71237263SnpVNET_DECLARE(int, tcp_autorcvbuf_max);
72237263Snp#define V_tcp_autorcvbuf_max VNET(tcp_autorcvbuf_max)
73237263Snp
74237263Snpvoid
75237263Snpsend_flowc_wr(struct toepcb *toep, struct flowc_tx_params *ftxp)
76237263Snp{
77241626Snp	struct wrqe *wr;
78241626Snp	struct fw_flowc_wr *flowc;
79241642Snp	unsigned int nparams = ftxp ? 8 : 6, flowclen;
80291665Sjhb	struct vi_info *vi = toep->vi;
81291665Sjhb	struct port_info *pi = vi->pi;
82237263Snp	struct adapter *sc = pi->adapter;
83291665Sjhb	unsigned int pfvf = G_FW_VIID_PFN(vi->viid) << S_FW_VIID_PFN;
84237263Snp	struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx];
85237263Snp
86239514Snp	KASSERT(!(toep->flags & TPF_FLOWC_WR_SENT),
87237263Snp	    ("%s: flowc for tid %u sent already", __func__, toep->tid));
88237263Snp
89237263Snp	flowclen = sizeof(*flowc) + nparams * sizeof(struct fw_flowc_mnemval);
90237263Snp
91248925Snp	wr = alloc_wrqe(roundup2(flowclen, 16), toep->ofld_txq);
92237263Snp	if (wr == NULL) {
93237263Snp		/* XXX */
94237263Snp		panic("%s: allocation failure.", __func__);
95237263Snp	}
96237263Snp	flowc = wrtod(wr);
97237263Snp	memset(flowc, 0, wr->wr_len);
98237263Snp
99237263Snp	flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) |
100237263Snp	    V_FW_FLOWC_WR_NPARAMS(nparams));
101237263Snp	flowc->flowid_len16 = htonl(V_FW_WR_LEN16(howmany(flowclen, 16)) |
102237263Snp	    V_FW_WR_FLOWID(toep->tid));
103237263Snp
104237263Snp	flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
105241626Snp	flowc->mnemval[0].val = htobe32(pfvf);
106241626Snp	flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
107241626Snp	flowc->mnemval[1].val = htobe32(pi->tx_chan);
108241626Snp	flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
109241626Snp	flowc->mnemval[2].val = htobe32(pi->tx_chan);
110241626Snp	flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
111241626Snp	flowc->mnemval[3].val = htobe32(toep->ofld_rxq->iq.abs_id);
112237263Snp	if (ftxp) {
113237263Snp		uint32_t sndbuf = min(ftxp->snd_space, sc->tt.sndbuf);
114237263Snp
115237263Snp		flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
116237263Snp		flowc->mnemval[4].val = htobe32(ftxp->snd_nxt);
117237263Snp		flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
118237263Snp		flowc->mnemval[5].val = htobe32(ftxp->rcv_nxt);
119237263Snp		flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
120237263Snp		flowc->mnemval[6].val = htobe32(sndbuf);
121237263Snp		flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
122237263Snp		flowc->mnemval[7].val = htobe32(ftxp->mss);
123276570Snp
124276570Snp		CTR6(KTR_CXGBE,
125276570Snp		    "%s: tid %u, mss %u, sndbuf %u, snd_nxt 0x%x, rcv_nxt 0x%x",
126276570Snp		    __func__, toep->tid, ftxp->mss, sndbuf, ftxp->snd_nxt,
127276570Snp		    ftxp->rcv_nxt);
128241642Snp	} else {
129241642Snp		flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDBUF;
130241642Snp		flowc->mnemval[4].val = htobe32(512);
131241642Snp		flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_MSS;
132241642Snp		flowc->mnemval[5].val = htobe32(512);
133276570Snp
134276570Snp		CTR2(KTR_CXGBE, "%s: tid %u", __func__, toep->tid);
135237263Snp	}
136237263Snp
137237263Snp	txsd->tx_credits = howmany(flowclen, 16);
138237263Snp	txsd->plen = 0;
139237263Snp	KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0,
140237263Snp	    ("%s: not enough credits (%d)", __func__, toep->tx_credits));
141237263Snp	toep->tx_credits -= txsd->tx_credits;
142237263Snp	if (__predict_false(++toep->txsd_pidx == toep->txsd_total))
143237263Snp		toep->txsd_pidx = 0;
144237263Snp	toep->txsd_avail--;
145237263Snp
146239514Snp	toep->flags |= TPF_FLOWC_WR_SENT;
147237263Snp        t4_wrq_tx(sc, wr);
148237263Snp}
149237263Snp
150237263Snpvoid
151237263Snpsend_reset(struct adapter *sc, struct toepcb *toep, uint32_t snd_nxt)
152237263Snp{
153237263Snp	struct wrqe *wr;
154237263Snp	struct cpl_abort_req *req;
155237263Snp	int tid = toep->tid;
156237263Snp	struct inpcb *inp = toep->inp;
157237263Snp	struct tcpcb *tp = intotcpcb(inp);	/* don't use if INP_DROPPED */
158237263Snp
159237263Snp	INP_WLOCK_ASSERT(inp);
160237263Snp
161237263Snp	CTR6(KTR_CXGBE, "%s: tid %d (%s), toep_flags 0x%x, inp_flags 0x%x%s",
162237263Snp	    __func__, toep->tid,
163237263Snp	    inp->inp_flags & INP_DROPPED ? "inp dropped" :
164237263Snp	    tcpstates[tp->t_state],
165237263Snp	    toep->flags, inp->inp_flags,
166239514Snp	    toep->flags & TPF_ABORT_SHUTDOWN ?
167237263Snp	    " (abort already in progress)" : "");
168237263Snp
169239514Snp	if (toep->flags & TPF_ABORT_SHUTDOWN)
170237263Snp		return;	/* abort already in progress */
171237263Snp
172239514Snp	toep->flags |= TPF_ABORT_SHUTDOWN;
173237263Snp
174239514Snp	KASSERT(toep->flags & TPF_FLOWC_WR_SENT,
175237263Snp	    ("%s: flowc_wr not sent for tid %d.", __func__, tid));
176237263Snp
177237263Snp	wr = alloc_wrqe(sizeof(*req), toep->ofld_txq);
178237263Snp	if (wr == NULL) {
179237263Snp		/* XXX */
180237263Snp		panic("%s: allocation failure.", __func__);
181237263Snp	}
182237263Snp	req = wrtod(wr);
183237263Snp
184237263Snp	INIT_TP_WR_MIT_CPL(req, CPL_ABORT_REQ, tid);
185237263Snp	if (inp->inp_flags & INP_DROPPED)
186237263Snp		req->rsvd0 = htobe32(snd_nxt);
187237263Snp	else
188237263Snp		req->rsvd0 = htobe32(tp->snd_nxt);
189239514Snp	req->rsvd1 = !(toep->flags & TPF_TX_DATA_SENT);
190237263Snp	req->cmd = CPL_ABORT_SEND_RST;
191237263Snp
192237263Snp	/*
193237263Snp	 * XXX: What's the correct way to tell that the inp hasn't been detached
194237263Snp	 * from its socket?  Should I even be flushing the snd buffer here?
195237263Snp	 */
196237263Snp	if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) == 0) {
197237263Snp		struct socket *so = inp->inp_socket;
198237263Snp
199237263Snp		if (so != NULL)	/* because I'm not sure.  See comment above */
200237263Snp			sbflush(&so->so_snd);
201237263Snp	}
202237263Snp
203237263Snp	t4_l2t_send(sc, wr, toep->l2te);
204237263Snp}
205237263Snp
206237263Snp/*
207237263Snp * Called when a connection is established to translate the TCP options
208237263Snp * reported by HW to FreeBSD's native format.
209237263Snp */
210237263Snpstatic void
211237263Snpassign_rxopt(struct tcpcb *tp, unsigned int opt)
212237263Snp{
213237263Snp	struct toepcb *toep = tp->t_toe;
214276574Snp	struct inpcb *inp = tp->t_inpcb;
215237263Snp	struct adapter *sc = td_adapter(toep->td);
216276574Snp	int n;
217237263Snp
218276574Snp	INP_LOCK_ASSERT(inp);
219237263Snp
220276574Snp	if (inp->inp_inc.inc_flags & INC_ISIPV6)
221276574Snp		n = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
222276574Snp	else
223276574Snp		n = sizeof(struct ip) + sizeof(struct tcphdr);
224293284Sglebius	tp->t_maxseg = sc->params.mtus[G_TCPOPT_MSS(opt)] - n;
225237263Snp
226276574Snp	CTR4(KTR_CXGBE, "%s: tid %d, mtu_idx %u (%u)", __func__, toep->tid,
227276574Snp	    G_TCPOPT_MSS(opt), sc->params.mtus[G_TCPOPT_MSS(opt)]);
228276574Snp
229237263Snp	if (G_TCPOPT_TSTAMP(opt)) {
230237263Snp		tp->t_flags |= TF_RCVD_TSTMP;	/* timestamps ok */
231237263Snp		tp->ts_recent = 0;		/* hmmm */
232237263Snp		tp->ts_recent_age = tcp_ts_getticks();
233237263Snp	}
234237263Snp
235237263Snp	if (G_TCPOPT_SACK(opt))
236237263Snp		tp->t_flags |= TF_SACK_PERMIT;	/* should already be set */
237237263Snp	else
238237263Snp		tp->t_flags &= ~TF_SACK_PERMIT;	/* sack disallowed by peer */
239237263Snp
240237263Snp	if (G_TCPOPT_WSCALE_OK(opt))
241237263Snp		tp->t_flags |= TF_RCVD_SCALE;
242237263Snp
243237263Snp	/* Doing window scaling? */
244237263Snp	if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
245237263Snp	    (TF_RCVD_SCALE | TF_REQ_SCALE)) {
246237263Snp		tp->rcv_scale = tp->request_r_scale;
247237263Snp		tp->snd_scale = G_TCPOPT_SND_WSCALE(opt);
248237263Snp	}
249237263Snp}
250237263Snp
251237263Snp/*
252237263Snp * Completes some final bits of initialization for just established connections
253237263Snp * and changes their state to TCPS_ESTABLISHED.
254237263Snp *
255237263Snp * The ISNs are from after the exchange of SYNs.  i.e., the true ISN + 1.
256237263Snp */
257237263Snpvoid
258237263Snpmake_established(struct toepcb *toep, uint32_t snd_isn, uint32_t rcv_isn,
259237263Snp    uint16_t opt)
260237263Snp{
261237263Snp	struct inpcb *inp = toep->inp;
262237263Snp	struct socket *so = inp->inp_socket;
263237263Snp	struct tcpcb *tp = intotcpcb(inp);
264237263Snp	long bufsize;
265237263Snp	uint32_t iss = be32toh(snd_isn) - 1;	/* true ISS */
266237263Snp	uint32_t irs = be32toh(rcv_isn) - 1;	/* true IRS */
267237263Snp	uint16_t tcpopt = be16toh(opt);
268237263Snp	struct flowc_tx_params ftxp;
269237263Snp
270299206Sjhb	CURVNET_SET(so->so_vnet);
271237263Snp	INP_WLOCK_ASSERT(inp);
272237263Snp	KASSERT(tp->t_state == TCPS_SYN_SENT ||
273237263Snp	    tp->t_state == TCPS_SYN_RECEIVED,
274237263Snp	    ("%s: TCP state %s", __func__, tcpstates[tp->t_state]));
275237263Snp
276237263Snp	CTR4(KTR_CXGBE, "%s: tid %d, toep %p, inp %p",
277237263Snp	    __func__, toep->tid, toep, inp);
278237263Snp
279237263Snp	tp->t_state = TCPS_ESTABLISHED;
280237263Snp	tp->t_starttime = ticks;
281237263Snp	TCPSTAT_INC(tcps_connects);
282237263Snp
283237263Snp	tp->irs = irs;
284237263Snp	tcp_rcvseqinit(tp);
285237263Snp	tp->rcv_wnd = toep->rx_credits << 10;
286237263Snp	tp->rcv_adv += tp->rcv_wnd;
287237263Snp	tp->last_ack_sent = tp->rcv_nxt;
288237263Snp
289237263Snp	/*
290237263Snp	 * If we were unable to send all rx credits via opt0, save the remainder
291237263Snp	 * in rx_credits so that they can be handed over with the next credit
292237263Snp	 * update.
293237263Snp	 */
294237263Snp	SOCKBUF_LOCK(&so->so_rcv);
295237263Snp	bufsize = select_rcv_wnd(so);
296237263Snp	SOCKBUF_UNLOCK(&so->so_rcv);
297237263Snp	toep->rx_credits = bufsize - tp->rcv_wnd;
298237263Snp
299237263Snp	tp->iss = iss;
300237263Snp	tcp_sendseqinit(tp);
301237263Snp	tp->snd_una = iss + 1;
302237263Snp	tp->snd_nxt = iss + 1;
303237263Snp	tp->snd_max = iss + 1;
304237263Snp
305237263Snp	assign_rxopt(tp, tcpopt);
306237263Snp
307237263Snp	SOCKBUF_LOCK(&so->so_snd);
308237263Snp	if (so->so_snd.sb_flags & SB_AUTOSIZE && V_tcp_do_autosndbuf)
309237263Snp		bufsize = V_tcp_autosndbuf_max;
310237263Snp	else
311237263Snp		bufsize = sbspace(&so->so_snd);
312237263Snp	SOCKBUF_UNLOCK(&so->so_snd);
313237263Snp
314237263Snp	ftxp.snd_nxt = tp->snd_nxt;
315237263Snp	ftxp.rcv_nxt = tp->rcv_nxt;
316237263Snp	ftxp.snd_space = bufsize;
317237263Snp	ftxp.mss = tp->t_maxseg;
318237263Snp	send_flowc_wr(toep, &ftxp);
319237263Snp
320237263Snp	soisconnected(so);
321299206Sjhb	CURVNET_RESTORE();
322237263Snp}
323237263Snp
324237263Snpstatic int
325239344Snpsend_rx_credits(struct adapter *sc, struct toepcb *toep, int credits)
326237263Snp{
327237263Snp	struct wrqe *wr;
328237263Snp	struct cpl_rx_data_ack *req;
329237263Snp	uint32_t dack = F_RX_DACK_CHANGE | V_RX_DACK_MODE(1);
330237263Snp
331239344Snp	KASSERT(credits >= 0, ("%s: %d credits", __func__, credits));
332239344Snp
333237263Snp	wr = alloc_wrqe(sizeof(*req), toep->ctrlq);
334237263Snp	if (wr == NULL)
335237263Snp		return (0);
336237263Snp	req = wrtod(wr);
337237263Snp
338237263Snp	INIT_TP_WR_MIT_CPL(req, CPL_RX_DATA_ACK, toep->tid);
339237263Snp	req->credit_dack = htobe32(dack | V_RX_CREDITS(credits));
340237263Snp
341237263Snp	t4_wrq_tx(sc, wr);
342237263Snp	return (credits);
343237263Snp}
344237263Snp
345237263Snpvoid
346299210Sjhbt4_rcvd_locked(struct toedev *tod, struct tcpcb *tp)
347237263Snp{
348237263Snp	struct adapter *sc = tod->tod_softc;
349237263Snp	struct inpcb *inp = tp->t_inpcb;
350237263Snp	struct socket *so = inp->inp_socket;
351239344Snp	struct sockbuf *sb = &so->so_rcv;
352237263Snp	struct toepcb *toep = tp->t_toe;
353239344Snp	int credits;
354237263Snp
355237263Snp	INP_WLOCK_ASSERT(inp);
356237263Snp
357299210Sjhb	SOCKBUF_LOCK_ASSERT(sb);
358274421Sglebius	KASSERT(toep->sb_cc >= sbused(sb),
359239344Snp	    ("%s: sb %p has more data (%d) than last time (%d).",
360274421Sglebius	    __func__, sb, sbused(sb), toep->sb_cc));
361292736Snp
362292736Snp	toep->rx_credits += toep->sb_cc - sbused(sb);
363292736Snp	toep->sb_cc = sbused(sb);
364292736Snp
365280878Snp	if (toep->rx_credits > 0 &&
366280878Snp	    (tp->rcv_wnd <= 32 * 1024 || toep->rx_credits >= 64 * 1024 ||
367280878Snp	    (toep->rx_credits >= 16 * 1024 && tp->rcv_wnd <= 128 * 1024) ||
368280878Snp	    toep->sb_cc + tp->rcv_wnd < sb->sb_lowat)) {
369237263Snp
370280878Snp		credits = send_rx_credits(sc, toep, toep->rx_credits);
371237263Snp		toep->rx_credits -= credits;
372237263Snp		tp->rcv_wnd += credits;
373237263Snp		tp->rcv_adv += credits;
374237263Snp	}
375299210Sjhb}
376299210Sjhb
377299210Sjhbvoid
378299210Sjhbt4_rcvd(struct toedev *tod, struct tcpcb *tp)
379299210Sjhb{
380299210Sjhb	struct inpcb *inp = tp->t_inpcb;
381299210Sjhb	struct socket *so = inp->inp_socket;
382299210Sjhb	struct sockbuf *sb = &so->so_rcv;
383299210Sjhb
384299210Sjhb	SOCKBUF_LOCK(sb);
385299210Sjhb	t4_rcvd_locked(tod, tp);
386280878Snp	SOCKBUF_UNLOCK(sb);
387237263Snp}
388237263Snp
389237263Snp/*
390237263Snp * Close a connection by sending a CPL_CLOSE_CON_REQ message.
391237263Snp */
392237263Snpstatic int
393237263Snpclose_conn(struct adapter *sc, struct toepcb *toep)
394237263Snp{
395237263Snp	struct wrqe *wr;
396237263Snp	struct cpl_close_con_req *req;
397237263Snp	unsigned int tid = toep->tid;
398237263Snp
399237263Snp	CTR3(KTR_CXGBE, "%s: tid %u%s", __func__, toep->tid,
400239514Snp	    toep->flags & TPF_FIN_SENT ? ", IGNORED" : "");
401237263Snp
402239514Snp	if (toep->flags & TPF_FIN_SENT)
403237263Snp		return (0);
404237263Snp
405239514Snp	KASSERT(toep->flags & TPF_FLOWC_WR_SENT,
406237263Snp	    ("%s: flowc_wr not sent for tid %u.", __func__, tid));
407237263Snp
408237263Snp	wr = alloc_wrqe(sizeof(*req), toep->ofld_txq);
409237263Snp	if (wr == NULL) {
410237263Snp		/* XXX */
411237263Snp		panic("%s: allocation failure.", __func__);
412237263Snp	}
413237263Snp	req = wrtod(wr);
414237263Snp
415237263Snp        req->wr.wr_hi = htonl(V_FW_WR_OP(FW_TP_WR) |
416237263Snp	    V_FW_WR_IMMDLEN(sizeof(*req) - sizeof(req->wr)));
417237263Snp	req->wr.wr_mid = htonl(V_FW_WR_LEN16(howmany(sizeof(*req), 16)) |
418237263Snp	    V_FW_WR_FLOWID(tid));
419237263Snp        req->wr.wr_lo = cpu_to_be64(0);
420237263Snp        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
421237263Snp	req->rsvd = 0;
422237263Snp
423239514Snp	toep->flags |= TPF_FIN_SENT;
424239514Snp	toep->flags &= ~TPF_SEND_FIN;
425237263Snp	t4_l2t_send(sc, wr, toep->l2te);
426237263Snp
427237263Snp	return (0);
428237263Snp}
429237263Snp
430237263Snp#define MAX_OFLD_TX_CREDITS (SGE_MAX_WR_LEN / 16)
431237263Snp#define MIN_OFLD_TX_CREDITS (howmany(sizeof(struct fw_ofld_tx_data_wr) + 1, 16))
432237263Snp
433237263Snp/* Maximum amount of immediate data we could stuff in a WR */
434237263Snpstatic inline int
435237263Snpmax_imm_payload(int tx_credits)
436237263Snp{
437237263Snp	const int n = 2;	/* Use only up to 2 desc for imm. data WR */
438237263Snp
439237263Snp	KASSERT(tx_credits >= 0 &&
440237263Snp		tx_credits <= MAX_OFLD_TX_CREDITS,
441237263Snp		("%s: %d credits", __func__, tx_credits));
442237263Snp
443237263Snp	if (tx_credits < MIN_OFLD_TX_CREDITS)
444237263Snp		return (0);
445237263Snp
446237263Snp	if (tx_credits >= (n * EQ_ESIZE) / 16)
447237263Snp		return ((n * EQ_ESIZE) - sizeof(struct fw_ofld_tx_data_wr));
448237263Snp	else
449237263Snp		return (tx_credits * 16 - sizeof(struct fw_ofld_tx_data_wr));
450237263Snp}
451237263Snp
452237263Snp/* Maximum number of SGL entries we could stuff in a WR */
453237263Snpstatic inline int
454237263Snpmax_dsgl_nsegs(int tx_credits)
455237263Snp{
456237263Snp	int nseg = 1;	/* ulptx_sgl has room for 1, rest ulp_tx_sge_pair */
457237263Snp	int sge_pair_credits = tx_credits - MIN_OFLD_TX_CREDITS;
458237263Snp
459237263Snp	KASSERT(tx_credits >= 0 &&
460237263Snp		tx_credits <= MAX_OFLD_TX_CREDITS,
461237263Snp		("%s: %d credits", __func__, tx_credits));
462237263Snp
463237263Snp	if (tx_credits < MIN_OFLD_TX_CREDITS)
464237263Snp		return (0);
465237263Snp
466237263Snp	nseg += 2 * (sge_pair_credits * 16 / 24);
467237263Snp	if ((sge_pair_credits * 16) % 24 == 16)
468237263Snp		nseg++;
469237263Snp
470237263Snp	return (nseg);
471237263Snp}
472237263Snp
473237263Snpstatic inline void
474237263Snpwrite_tx_wr(void *dst, struct toepcb *toep, unsigned int immdlen,
475292736Snp    unsigned int plen, uint8_t credits, int shove, int ulp_submode, int txalign)
476237263Snp{
477237263Snp	struct fw_ofld_tx_data_wr *txwr = dst;
478237263Snp
479237263Snp	txwr->op_to_immdlen = htobe32(V_WR_OP(FW_OFLD_TX_DATA_WR) |
480255411Snp	    V_FW_WR_IMMDLEN(immdlen));
481237263Snp	txwr->flowid_len16 = htobe32(V_FW_WR_FLOWID(toep->tid) |
482237263Snp	    V_FW_WR_LEN16(credits));
483292736Snp	txwr->lsodisable_to_flags = htobe32(V_TX_ULP_MODE(toep->ulp_mode) |
484292736Snp	    V_TX_ULP_SUBMODE(ulp_submode) | V_TX_URG(0) | V_TX_SHOVE(shove));
485237263Snp	txwr->plen = htobe32(plen);
486276597Snp
487276597Snp	if (txalign > 0) {
488276597Snp		struct tcpcb *tp = intotcpcb(toep->inp);
489276597Snp
490291665Sjhb		if (plen < 2 * tp->t_maxseg || is_10G_port(toep->vi->pi))
491285527Snp			txwr->lsodisable_to_flags |=
492276597Snp			    htobe32(F_FW_OFLD_TX_DATA_WR_LSODISABLE);
493276597Snp		else
494285527Snp			txwr->lsodisable_to_flags |=
495276597Snp			    htobe32(F_FW_OFLD_TX_DATA_WR_ALIGNPLD |
496276597Snp				(tp->t_flags & TF_NODELAY ? 0 :
497276597Snp				F_FW_OFLD_TX_DATA_WR_ALIGNPLDSHOVE));
498276597Snp	}
499237263Snp}
500237263Snp
501237263Snp/*
502237263Snp * Generate a DSGL from a starting mbuf.  The total number of segments and the
503237263Snp * maximum segments in any one mbuf are provided.
504237263Snp */
505237263Snpstatic void
506237263Snpwrite_tx_sgl(void *dst, struct mbuf *start, struct mbuf *stop, int nsegs, int n)
507237263Snp{
508237263Snp	struct mbuf *m;
509237263Snp	struct ulptx_sgl *usgl = dst;
510237263Snp	int i, j, rc;
511237263Snp	struct sglist sg;
512237263Snp	struct sglist_seg segs[n];
513237263Snp
514237263Snp	KASSERT(nsegs > 0, ("%s: nsegs 0", __func__));
515237263Snp
516237263Snp	sglist_init(&sg, n, segs);
517237263Snp	usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
518237263Snp	    V_ULPTX_NSGE(nsegs));
519237263Snp
520237263Snp	i = -1;
521237263Snp	for (m = start; m != stop; m = m->m_next) {
522237263Snp		rc = sglist_append(&sg, mtod(m, void *), m->m_len);
523237263Snp		if (__predict_false(rc != 0))
524237263Snp			panic("%s: sglist_append %d", __func__, rc);
525237263Snp
526237263Snp		for (j = 0; j < sg.sg_nseg; i++, j++) {
527237263Snp			if (i < 0) {
528237263Snp				usgl->len0 = htobe32(segs[j].ss_len);
529237263Snp				usgl->addr0 = htobe64(segs[j].ss_paddr);
530237263Snp			} else {
531237263Snp				usgl->sge[i / 2].len[i & 1] =
532237263Snp				    htobe32(segs[j].ss_len);
533237263Snp				usgl->sge[i / 2].addr[i & 1] =
534237263Snp				    htobe64(segs[j].ss_paddr);
535237263Snp			}
536237263Snp#ifdef INVARIANTS
537237263Snp			nsegs--;
538237263Snp#endif
539237263Snp		}
540237263Snp		sglist_reset(&sg);
541237263Snp	}
542237263Snp	if (i & 1)
543237263Snp		usgl->sge[i / 2].len[1] = htobe32(0);
544237263Snp	KASSERT(nsegs == 0, ("%s: nsegs %d, start %p, stop %p",
545237263Snp	    __func__, nsegs, start, stop));
546237263Snp}
547237263Snp
548237263Snp/*
549237263Snp * Max number of SGL entries an offload tx work request can have.  This is 41
550237263Snp * (1 + 40) for a full 512B work request.
551237263Snp * fw_ofld_tx_data_wr(16B) + ulptx_sgl(16B, 1) + ulptx_sge_pair(480B, 40)
552237263Snp */
553237263Snp#define OFLD_SGL_LEN (41)
554237263Snp
555237263Snp/*
556237263Snp * Send data and/or a FIN to the peer.
557237263Snp *
558237263Snp * The socket's so_snd buffer consists of a stream of data starting with sb_mb
559237263Snp * and linked together with m_next.  sb_sndptr, if set, is the last mbuf that
560237263Snp * was transmitted.
561255411Snp *
562255411Snp * drop indicates the number of bytes that should be dropped from the head of
563255411Snp * the send buffer.  It is an optimization that lets do_fw4_ack avoid creating
564255411Snp * contention on the send buffer lock (before this change it used to do
565255411Snp * sowwakeup and then t4_push_frames right after that when recovering from tx
566255411Snp * stalls).  When drop is set this function MUST drop the bytes and wake up any
567255411Snp * writers.
568237263Snp */
569269076Snpvoid
570255411Snpt4_push_frames(struct adapter *sc, struct toepcb *toep, int drop)
571237263Snp{
572237263Snp	struct mbuf *sndptr, *m, *sb_sndptr;
573237263Snp	struct fw_ofld_tx_data_wr *txwr;
574237263Snp	struct wrqe *wr;
575255411Snp	u_int plen, nsegs, credits, max_imm, max_nsegs, max_nsegs_1mbuf;
576237263Snp	struct inpcb *inp = toep->inp;
577237263Snp	struct tcpcb *tp = intotcpcb(inp);
578237263Snp	struct socket *so = inp->inp_socket;
579237263Snp	struct sockbuf *sb = &so->so_snd;
580255411Snp	int tx_credits, shove, compl, space, sowwakeup;
581237263Snp	struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx];
582237263Snp
583237263Snp	INP_WLOCK_ASSERT(inp);
584239514Snp	KASSERT(toep->flags & TPF_FLOWC_WR_SENT,
585237263Snp	    ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid));
586237263Snp
587255005Snp	KASSERT(toep->ulp_mode == ULP_MODE_NONE ||
588255005Snp	    toep->ulp_mode == ULP_MODE_TCPDDP ||
589255005Snp	    toep->ulp_mode == ULP_MODE_RDMA,
590255005Snp	    ("%s: ulp_mode %u for toep %p", __func__, toep->ulp_mode, toep));
591237263Snp
592292736Snp	if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN))
593292736Snp		return;
594292736Snp
595237263Snp	/*
596237263Snp	 * This function doesn't resume by itself.  Someone else must clear the
597237263Snp	 * flag and call this function.
598237263Snp	 */
599255411Snp	if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) {
600255411Snp		KASSERT(drop == 0,
601255411Snp		    ("%s: drop (%d) != 0 but tx is suspended", __func__, drop));
602237263Snp		return;
603255411Snp	}
604237263Snp
605237263Snp	do {
606237263Snp		tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS);
607237263Snp		max_imm = max_imm_payload(tx_credits);
608237263Snp		max_nsegs = max_dsgl_nsegs(tx_credits);
609237263Snp
610237263Snp		SOCKBUF_LOCK(sb);
611255411Snp		sowwakeup = drop;
612255411Snp		if (drop) {
613255411Snp			sbdrop_locked(sb, drop);
614255411Snp			drop = 0;
615255411Snp		}
616237263Snp		sb_sndptr = sb->sb_sndptr;
617237263Snp		sndptr = sb_sndptr ? sb_sndptr->m_next : sb->sb_mb;
618237263Snp		plen = 0;
619237263Snp		nsegs = 0;
620237263Snp		max_nsegs_1mbuf = 0; /* max # of SGL segments in any one mbuf */
621237263Snp		for (m = sndptr; m != NULL; m = m->m_next) {
622237263Snp			int n = sglist_count(mtod(m, void *), m->m_len);
623237263Snp
624237263Snp			nsegs += n;
625237263Snp			plen += m->m_len;
626237263Snp
627237263Snp			/* This mbuf sent us _over_ the nsegs limit, back out */
628237263Snp			if (plen > max_imm && nsegs > max_nsegs) {
629237263Snp				nsegs -= n;
630237263Snp				plen -= m->m_len;
631237263Snp				if (plen == 0) {
632237263Snp					/* Too few credits */
633239514Snp					toep->flags |= TPF_TX_SUSPENDED;
634255411Snp					if (sowwakeup)
635255411Snp						sowwakeup_locked(so);
636255411Snp					else
637255411Snp						SOCKBUF_UNLOCK(sb);
638255411Snp					SOCKBUF_UNLOCK_ASSERT(sb);
639237263Snp					return;
640237263Snp				}
641237263Snp				break;
642237263Snp			}
643237263Snp
644237263Snp			if (max_nsegs_1mbuf < n)
645237263Snp				max_nsegs_1mbuf = n;
646237263Snp			sb_sndptr = m;	/* new sb->sb_sndptr if all goes well */
647237263Snp
648237263Snp			/* This mbuf put us right at the max_nsegs limit */
649237263Snp			if (plen > max_imm && nsegs == max_nsegs) {
650237263Snp				m = m->m_next;
651237263Snp				break;
652237263Snp			}
653237263Snp		}
654237263Snp
655255411Snp		space = sbspace(sb);
656255411Snp
657255411Snp		if (space <= sb->sb_hiwat * 3 / 8 &&
658255411Snp		    toep->plen_nocompl + plen >= sb->sb_hiwat / 4)
659255411Snp			compl = 1;
660255411Snp		else
661255411Snp			compl = 0;
662255411Snp
663237263Snp		if (sb->sb_flags & SB_AUTOSIZE &&
664237263Snp		    V_tcp_do_autosndbuf &&
665237263Snp		    sb->sb_hiwat < V_tcp_autosndbuf_max &&
666255411Snp		    space < sb->sb_hiwat / 8) {
667237263Snp			int newsize = min(sb->sb_hiwat + V_tcp_autosndbuf_inc,
668237263Snp			    V_tcp_autosndbuf_max);
669237263Snp
670237263Snp			if (!sbreserve_locked(sb, newsize, so, NULL))
671237263Snp				sb->sb_flags &= ~SB_AUTOSIZE;
672255411Snp			else
673255411Snp				sowwakeup = 1;	/* room available */
674237263Snp		}
675255411Snp		if (sowwakeup)
676255411Snp			sowwakeup_locked(so);
677255411Snp		else
678255411Snp			SOCKBUF_UNLOCK(sb);
679255411Snp		SOCKBUF_UNLOCK_ASSERT(sb);
680237263Snp
681237263Snp		/* nothing to send */
682237263Snp		if (plen == 0) {
683237263Snp			KASSERT(m == NULL,
684237263Snp			    ("%s: nothing to send, but m != NULL", __func__));
685237263Snp			break;
686237263Snp		}
687237263Snp
688239514Snp		if (__predict_false(toep->flags & TPF_FIN_SENT))
689237263Snp			panic("%s: excess tx.", __func__);
690237263Snp
691290175Snp		shove = m == NULL && !(tp->t_flags & TF_MORETOCOME);
692237263Snp		if (plen <= max_imm) {
693237263Snp
694237263Snp			/* Immediate data tx */
695237263Snp
696248925Snp			wr = alloc_wrqe(roundup2(sizeof(*txwr) + plen, 16),
697237263Snp					toep->ofld_txq);
698237263Snp			if (wr == NULL) {
699237263Snp				/* XXX: how will we recover from this? */
700239514Snp				toep->flags |= TPF_TX_SUSPENDED;
701237263Snp				return;
702237263Snp			}
703237263Snp			txwr = wrtod(wr);
704237263Snp			credits = howmany(wr->wr_len, 16);
705276597Snp			write_tx_wr(txwr, toep, plen, plen, credits, shove, 0,
706276597Snp			    sc->tt.tx_align);
707237263Snp			m_copydata(sndptr, 0, plen, (void *)(txwr + 1));
708255411Snp			nsegs = 0;
709237263Snp		} else {
710237263Snp			int wr_len;
711237263Snp
712237263Snp			/* DSGL tx */
713237263Snp
714237263Snp			wr_len = sizeof(*txwr) + sizeof(struct ulptx_sgl) +
715237263Snp			    ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8;
716248925Snp			wr = alloc_wrqe(roundup2(wr_len, 16), toep->ofld_txq);
717237263Snp			if (wr == NULL) {
718237263Snp				/* XXX: how will we recover from this? */
719239514Snp				toep->flags |= TPF_TX_SUSPENDED;
720237263Snp				return;
721237263Snp			}
722237263Snp			txwr = wrtod(wr);
723237263Snp			credits = howmany(wr_len, 16);
724276597Snp			write_tx_wr(txwr, toep, 0, plen, credits, shove, 0,
725276597Snp			    sc->tt.tx_align);
726237263Snp			write_tx_sgl(txwr + 1, sndptr, m, nsegs,
727237263Snp			    max_nsegs_1mbuf);
728237263Snp			if (wr_len & 0xf) {
729237263Snp				uint64_t *pad = (uint64_t *)
730237263Snp				    ((uintptr_t)txwr + wr_len);
731237263Snp				*pad = 0;
732237263Snp			}
733237263Snp		}
734237263Snp
735237263Snp		KASSERT(toep->tx_credits >= credits,
736237263Snp			("%s: not enough credits", __func__));
737237263Snp
738237263Snp		toep->tx_credits -= credits;
739255411Snp		toep->tx_nocompl += credits;
740255411Snp		toep->plen_nocompl += plen;
741255411Snp		if (toep->tx_credits <= toep->tx_total * 3 / 8 &&
742255411Snp		    toep->tx_nocompl >= toep->tx_total / 4)
743255411Snp			compl = 1;
744237263Snp
745273797Snp		if (compl || toep->ulp_mode == ULP_MODE_RDMA) {
746255411Snp			txwr->op_to_immdlen |= htobe32(F_FW_WR_COMPL);
747255411Snp			toep->tx_nocompl = 0;
748255411Snp			toep->plen_nocompl = 0;
749255411Snp		}
750255411Snp
751237263Snp		tp->snd_nxt += plen;
752237263Snp		tp->snd_max += plen;
753237263Snp
754237263Snp		SOCKBUF_LOCK(sb);
755237263Snp		KASSERT(sb_sndptr, ("%s: sb_sndptr is NULL", __func__));
756237263Snp		sb->sb_sndptr = sb_sndptr;
757237263Snp		SOCKBUF_UNLOCK(sb);
758237263Snp
759239514Snp		toep->flags |= TPF_TX_DATA_SENT;
760255411Snp		if (toep->tx_credits < MIN_OFLD_TX_CREDITS)
761255411Snp			toep->flags |= TPF_TX_SUSPENDED;
762237263Snp
763237263Snp		KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__));
764237263Snp		txsd->plen = plen;
765237263Snp		txsd->tx_credits = credits;
766237263Snp		txsd++;
767237263Snp		if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) {
768237263Snp			toep->txsd_pidx = 0;
769237263Snp			txsd = &toep->txsd[0];
770237263Snp		}
771237263Snp		toep->txsd_avail--;
772237263Snp
773237263Snp		t4_l2t_send(sc, wr, toep->l2te);
774237263Snp	} while (m != NULL);
775237263Snp
776237263Snp	/* Send a FIN if requested, but only if there's no more data to send */
777239514Snp	if (m == NULL && toep->flags & TPF_SEND_FIN)
778237263Snp		close_conn(sc, toep);
779237263Snp}
780237263Snp
781292736Snpstatic inline void
782292736Snprqdrop_locked(struct mbufq *q, int plen)
783292736Snp{
784292736Snp	struct mbuf *m;
785292736Snp
786292736Snp	while (plen > 0) {
787292736Snp		m = mbufq_dequeue(q);
788292736Snp
789292736Snp		/* Too many credits. */
790292736Snp		MPASS(m != NULL);
791292736Snp		M_ASSERTPKTHDR(m);
792292736Snp
793292736Snp		/* Partial credits. */
794292736Snp		MPASS(plen >= m->m_pkthdr.len);
795292736Snp
796292736Snp		plen -= m->m_pkthdr.len;
797292736Snp		m_freem(m);
798292736Snp	}
799292736Snp}
800292736Snp
801269076Snpvoid
802292736Snpt4_push_pdus(struct adapter *sc, struct toepcb *toep, int drop)
803269076Snp{
804292736Snp	struct mbuf *sndptr, *m;
805269076Snp	struct fw_ofld_tx_data_wr *txwr;
806269076Snp	struct wrqe *wr;
807292736Snp	u_int plen, nsegs, credits, max_imm, max_nsegs, max_nsegs_1mbuf;
808292736Snp	u_int adjusted_plen, ulp_submode;
809269076Snp	struct inpcb *inp = toep->inp;
810292736Snp	struct tcpcb *tp = intotcpcb(inp);
811292736Snp	int tx_credits, shove;
812292736Snp	struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx];
813292736Snp	struct mbufq *pduq = &toep->ulp_pduq;
814292736Snp	static const u_int ulp_extra_len[] = {0, 4, 4, 8};
815269076Snp
816269076Snp	INP_WLOCK_ASSERT(inp);
817269076Snp	KASSERT(toep->flags & TPF_FLOWC_WR_SENT,
818269076Snp	    ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid));
819292736Snp	KASSERT(toep->ulp_mode == ULP_MODE_ISCSI,
820292736Snp	    ("%s: ulp_mode %u for toep %p", __func__, toep->ulp_mode, toep));
821269076Snp
822292736Snp	if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN))
823292736Snp		return;
824292736Snp
825269076Snp	/*
826269076Snp	 * This function doesn't resume by itself.  Someone else must clear the
827269076Snp	 * flag and call this function.
828269076Snp	 */
829292736Snp	if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) {
830292736Snp		KASSERT(drop == 0,
831292736Snp		    ("%s: drop (%d) != 0 but tx is suspended", __func__, drop));
832269076Snp		return;
833292736Snp	}
834269076Snp
835292736Snp	if (drop)
836292736Snp		rqdrop_locked(&toep->ulp_pdu_reclaimq, drop);
837269076Snp
838292736Snp	while ((sndptr = mbufq_first(pduq)) != NULL) {
839292736Snp		M_ASSERTPKTHDR(sndptr);
840292736Snp
841269076Snp		tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS);
842269076Snp		max_imm = max_imm_payload(tx_credits);
843269076Snp		max_nsegs = max_dsgl_nsegs(tx_credits);
844269076Snp
845269076Snp		plen = 0;
846269076Snp		nsegs = 0;
847269076Snp		max_nsegs_1mbuf = 0; /* max # of SGL segments in any one mbuf */
848269076Snp		for (m = sndptr; m != NULL; m = m->m_next) {
849269076Snp			int n = sglist_count(mtod(m, void *), m->m_len);
850269076Snp
851269076Snp			nsegs += n;
852269076Snp			plen += m->m_len;
853269076Snp
854292736Snp			/*
855292736Snp			 * This mbuf would send us _over_ the nsegs limit.
856292736Snp			 * Suspend tx because the PDU can't be sent out.
857292736Snp			 */
858269076Snp			if (plen > max_imm && nsegs > max_nsegs) {
859269076Snp				toep->flags |= TPF_TX_SUSPENDED;
860269076Snp				return;
861269076Snp			}
862269076Snp
863269076Snp			if (max_nsegs_1mbuf < n)
864269076Snp				max_nsegs_1mbuf = n;
865269076Snp		}
866269076Snp
867269076Snp		if (__predict_false(toep->flags & TPF_FIN_SENT))
868269076Snp			panic("%s: excess tx.", __func__);
869269076Snp
870292736Snp		/*
871292736Snp		 * We have a PDU to send.  All of it goes out in one WR so 'm'
872292736Snp		 * is NULL.  A PDU's length is always a multiple of 4.
873292736Snp		 */
874292736Snp		MPASS(m == NULL);
875292736Snp		MPASS((plen & 3) == 0);
876292736Snp		MPASS(sndptr->m_pkthdr.len == plen);
877292736Snp
878292736Snp		shove = !(tp->t_flags & TF_MORETOCOME);
879292736Snp		ulp_submode = mbuf_ulp_submode(sndptr);
880292736Snp		MPASS(ulp_submode < nitems(ulp_extra_len));
881292736Snp
882292736Snp		/*
883292736Snp		 * plen doesn't include header and data digests, which are
884292736Snp		 * generated and inserted in the right places by the TOE, but
885292736Snp		 * they do occupy TCP sequence space and need to be accounted
886292736Snp		 * for.
887292736Snp		 */
888292736Snp		adjusted_plen = plen + ulp_extra_len[ulp_submode];
889269076Snp		if (plen <= max_imm) {
890269076Snp
891269076Snp			/* Immediate data tx */
892292736Snp
893292736Snp			wr = alloc_wrqe(roundup2(sizeof(*txwr) + plen, 16),
894269076Snp					toep->ofld_txq);
895269076Snp			if (wr == NULL) {
896269076Snp				/* XXX: how will we recover from this? */
897269076Snp				toep->flags |= TPF_TX_SUSPENDED;
898269076Snp				return;
899269076Snp			}
900269076Snp			txwr = wrtod(wr);
901269076Snp			credits = howmany(wr->wr_len, 16);
902292736Snp			write_tx_wr(txwr, toep, plen, adjusted_plen, credits,
903292736Snp			    shove, ulp_submode, sc->tt.tx_align);
904269076Snp			m_copydata(sndptr, 0, plen, (void *)(txwr + 1));
905292736Snp			nsegs = 0;
906269076Snp		} else {
907269076Snp			int wr_len;
908269076Snp
909269076Snp			/* DSGL tx */
910269076Snp			wr_len = sizeof(*txwr) + sizeof(struct ulptx_sgl) +
911269076Snp			    ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8;
912292736Snp			wr = alloc_wrqe(roundup2(wr_len, 16), toep->ofld_txq);
913269076Snp			if (wr == NULL) {
914269076Snp				/* XXX: how will we recover from this? */
915269076Snp				toep->flags |= TPF_TX_SUSPENDED;
916269076Snp				return;
917269076Snp			}
918269076Snp			txwr = wrtod(wr);
919269076Snp			credits = howmany(wr_len, 16);
920292736Snp			write_tx_wr(txwr, toep, 0, adjusted_plen, credits,
921292736Snp			    shove, ulp_submode, sc->tt.tx_align);
922269076Snp			write_tx_sgl(txwr + 1, sndptr, m, nsegs,
923269076Snp			    max_nsegs_1mbuf);
924269076Snp			if (wr_len & 0xf) {
925269076Snp				uint64_t *pad = (uint64_t *)
926269076Snp				    ((uintptr_t)txwr + wr_len);
927269076Snp				*pad = 0;
928269076Snp			}
929269076Snp		}
930269076Snp
931269076Snp		KASSERT(toep->tx_credits >= credits,
932269076Snp			("%s: not enough credits", __func__));
933269076Snp
934292736Snp		m = mbufq_dequeue(pduq);
935292736Snp		MPASS(m == sndptr);
936292736Snp		mbufq_enqueue(&toep->ulp_pdu_reclaimq, m);
937292736Snp
938269076Snp		toep->tx_credits -= credits;
939269076Snp		toep->tx_nocompl += credits;
940269076Snp		toep->plen_nocompl += plen;
941269076Snp		if (toep->tx_credits <= toep->tx_total * 3 / 8 &&
942292736Snp		    toep->tx_nocompl >= toep->tx_total / 4) {
943269076Snp			txwr->op_to_immdlen |= htobe32(F_FW_WR_COMPL);
944269076Snp			toep->tx_nocompl = 0;
945269076Snp			toep->plen_nocompl = 0;
946269076Snp		}
947269076Snp
948292736Snp		tp->snd_nxt += adjusted_plen;
949292736Snp		tp->snd_max += adjusted_plen;
950269076Snp
951269076Snp		toep->flags |= TPF_TX_DATA_SENT;
952292736Snp		if (toep->tx_credits < MIN_OFLD_TX_CREDITS)
953269076Snp			toep->flags |= TPF_TX_SUSPENDED;
954269076Snp
955269076Snp		KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__));
956269076Snp		txsd->plen = plen;
957269076Snp		txsd->tx_credits = credits;
958269076Snp		txsd++;
959269076Snp		if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) {
960269076Snp			toep->txsd_pidx = 0;
961269076Snp			txsd = &toep->txsd[0];
962269076Snp		}
963269076Snp		toep->txsd_avail--;
964269076Snp
965269076Snp		t4_l2t_send(sc, wr, toep->l2te);
966292736Snp	}
967269076Snp
968292736Snp	/* Send a FIN if requested, but only if there are no more PDUs to send */
969292736Snp	if (mbufq_first(pduq) == NULL && toep->flags & TPF_SEND_FIN)
970269076Snp		close_conn(sc, toep);
971269076Snp}
972269076Snp
973237263Snpint
974237263Snpt4_tod_output(struct toedev *tod, struct tcpcb *tp)
975237263Snp{
976237263Snp	struct adapter *sc = tod->tod_softc;
977237263Snp#ifdef INVARIANTS
978237263Snp	struct inpcb *inp = tp->t_inpcb;
979237263Snp#endif
980237263Snp	struct toepcb *toep = tp->t_toe;
981237263Snp
982237263Snp	INP_WLOCK_ASSERT(inp);
983237263Snp	KASSERT((inp->inp_flags & INP_DROPPED) == 0,
984237263Snp	    ("%s: inp %p dropped.", __func__, inp));
985237263Snp	KASSERT(toep != NULL, ("%s: toep is NULL", __func__));
986237263Snp
987292736Snp	if (toep->ulp_mode == ULP_MODE_ISCSI)
988292736Snp		t4_push_pdus(sc, toep, 0);
989292736Snp	else
990292736Snp		t4_push_frames(sc, toep, 0);
991237263Snp
992237263Snp	return (0);
993237263Snp}
994237263Snp
995237263Snpint
996237263Snpt4_send_fin(struct toedev *tod, struct tcpcb *tp)
997237263Snp{
998237263Snp	struct adapter *sc = tod->tod_softc;
999237263Snp#ifdef INVARIANTS
1000237263Snp	struct inpcb *inp = tp->t_inpcb;
1001237263Snp#endif
1002237263Snp	struct toepcb *toep = tp->t_toe;
1003237263Snp
1004237263Snp	INP_WLOCK_ASSERT(inp);
1005237263Snp	KASSERT((inp->inp_flags & INP_DROPPED) == 0,
1006237263Snp	    ("%s: inp %p dropped.", __func__, inp));
1007237263Snp	KASSERT(toep != NULL, ("%s: toep is NULL", __func__));
1008237263Snp
1009239514Snp	toep->flags |= TPF_SEND_FIN;
1010269076Snp	if (tp->t_state >= TCPS_ESTABLISHED) {
1011269076Snp		if (toep->ulp_mode == ULP_MODE_ISCSI)
1012292736Snp			t4_push_pdus(sc, toep, 0);
1013269076Snp		else
1014269076Snp			t4_push_frames(sc, toep, 0);
1015269076Snp	}
1016237263Snp
1017237263Snp	return (0);
1018237263Snp}
1019237263Snp
1020237263Snpint
1021237263Snpt4_send_rst(struct toedev *tod, struct tcpcb *tp)
1022237263Snp{
1023237263Snp	struct adapter *sc = tod->tod_softc;
1024237263Snp#if defined(INVARIANTS)
1025237263Snp	struct inpcb *inp = tp->t_inpcb;
1026237263Snp#endif
1027237263Snp	struct toepcb *toep = tp->t_toe;
1028237263Snp
1029237263Snp	INP_WLOCK_ASSERT(inp);
1030237263Snp	KASSERT((inp->inp_flags & INP_DROPPED) == 0,
1031237263Snp	    ("%s: inp %p dropped.", __func__, inp));
1032237263Snp	KASSERT(toep != NULL, ("%s: toep is NULL", __func__));
1033237263Snp
1034237263Snp	/* hmmmm */
1035239514Snp	KASSERT(toep->flags & TPF_FLOWC_WR_SENT,
1036237263Snp	    ("%s: flowc for tid %u [%s] not sent already",
1037237263Snp	    __func__, toep->tid, tcpstates[tp->t_state]));
1038237263Snp
1039237263Snp	send_reset(sc, toep, 0);
1040237263Snp	return (0);
1041237263Snp}
1042237263Snp
1043237263Snp/*
1044237263Snp * Peer has sent us a FIN.
1045237263Snp */
1046237263Snpstatic int
1047237263Snpdo_peer_close(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1048237263Snp{
1049237263Snp	struct adapter *sc = iq->adapter;
1050237263Snp	const struct cpl_peer_close *cpl = (const void *)(rss + 1);
1051237263Snp	unsigned int tid = GET_TID(cpl);
1052237263Snp	struct toepcb *toep = lookup_tid(sc, tid);
1053237263Snp	struct inpcb *inp = toep->inp;
1054237263Snp	struct tcpcb *tp = NULL;
1055239344Snp	struct socket *so;
1056237263Snp#ifdef INVARIANTS
1057237263Snp	unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl)));
1058237263Snp#endif
1059237263Snp
1060237263Snp	KASSERT(opcode == CPL_PEER_CLOSE,
1061237263Snp	    ("%s: unexpected opcode 0x%x", __func__, opcode));
1062237263Snp	KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
1063243680Snp
1064243680Snp	if (__predict_false(toep->flags & TPF_SYNQE)) {
1065243680Snp#ifdef INVARIANTS
1066243680Snp		struct synq_entry *synqe = (void *)toep;
1067243680Snp
1068243680Snp		INP_WLOCK(synqe->lctx->inp);
1069243680Snp		if (synqe->flags & TPF_SYNQE_HAS_L2TE) {
1070243680Snp			KASSERT(synqe->flags & TPF_ABORT_SHUTDOWN,
1071243680Snp			    ("%s: listen socket closed but tid %u not aborted.",
1072243680Snp			    __func__, tid));
1073243680Snp		} else {
1074243680Snp			/*
1075243680Snp			 * do_pass_accept_req is still running and will
1076243680Snp			 * eventually take care of this tid.
1077243680Snp			 */
1078243680Snp		}
1079243680Snp		INP_WUNLOCK(synqe->lctx->inp);
1080243680Snp#endif
1081243680Snp		CTR4(KTR_CXGBE, "%s: tid %u, synqe %p (0x%x)", __func__, tid,
1082243680Snp		    toep, toep->flags);
1083243680Snp		return (0);
1084243680Snp	}
1085243680Snp
1086237263Snp	KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__));
1087237263Snp
1088286227Sjch	INP_INFO_RLOCK(&V_tcbinfo);
1089237263Snp	INP_WLOCK(inp);
1090237263Snp	tp = intotcpcb(inp);
1091237263Snp
1092237263Snp	CTR5(KTR_CXGBE, "%s: tid %u (%s), toep_flags 0x%x, inp %p", __func__,
1093237263Snp	    tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags, inp);
1094237263Snp
1095239514Snp	if (toep->flags & TPF_ABORT_SHUTDOWN)
1096237263Snp		goto done;
1097237263Snp
1098239344Snp	tp->rcv_nxt++;	/* FIN */
1099239344Snp
1100237263Snp	so = inp->inp_socket;
1101299210Sjhb	if (toep->ulp_mode == ULP_MODE_TCPDDP) {
1102299210Sjhb		DDP_LOCK(toep);
1103299210Sjhb		if (__predict_false(toep->ddp_flags &
1104299210Sjhb		    (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE)))
1105299210Sjhb			handle_ddp_close(toep, tp, cpl->rcv_nxt);
1106299210Sjhb		DDP_UNLOCK(toep);
1107239344Snp	}
1108299210Sjhb	socantrcvmore(so);
1109239344Snp
1110255005Snp	if (toep->ulp_mode != ULP_MODE_RDMA) {
1111255005Snp		KASSERT(tp->rcv_nxt == be32toh(cpl->rcv_nxt),
1112255005Snp	    		("%s: rcv_nxt mismatch: %u %u", __func__, tp->rcv_nxt,
1113255005Snp	    		be32toh(cpl->rcv_nxt)));
1114255005Snp	}
1115237263Snp
1116237263Snp	switch (tp->t_state) {
1117237263Snp	case TCPS_SYN_RECEIVED:
1118237263Snp		tp->t_starttime = ticks;
1119237263Snp		/* FALLTHROUGH */
1120237263Snp
1121237263Snp	case TCPS_ESTABLISHED:
1122237263Snp		tp->t_state = TCPS_CLOSE_WAIT;
1123237263Snp		break;
1124237263Snp
1125237263Snp	case TCPS_FIN_WAIT_1:
1126237263Snp		tp->t_state = TCPS_CLOSING;
1127237263Snp		break;
1128237263Snp
1129237263Snp	case TCPS_FIN_WAIT_2:
1130237263Snp		tcp_twstart(tp);
1131237263Snp		INP_UNLOCK_ASSERT(inp);	 /* safe, we have a ref on the inp */
1132286227Sjch		INP_INFO_RUNLOCK(&V_tcbinfo);
1133237263Snp
1134237263Snp		INP_WLOCK(inp);
1135237263Snp		final_cpl_received(toep);
1136237263Snp		return (0);
1137237263Snp
1138237263Snp	default:
1139237263Snp		log(LOG_ERR, "%s: TID %u received CPL_PEER_CLOSE in state %d\n",
1140237263Snp		    __func__, tid, tp->t_state);
1141237263Snp	}
1142237263Snpdone:
1143237263Snp	INP_WUNLOCK(inp);
1144286227Sjch	INP_INFO_RUNLOCK(&V_tcbinfo);
1145237263Snp	return (0);
1146237263Snp}
1147237263Snp
1148237263Snp/*
1149237263Snp * Peer has ACK'd our FIN.
1150237263Snp */
1151237263Snpstatic int
1152237263Snpdo_close_con_rpl(struct sge_iq *iq, const struct rss_header *rss,
1153237263Snp    struct mbuf *m)
1154237263Snp{
1155237263Snp	struct adapter *sc = iq->adapter;
1156237263Snp	const struct cpl_close_con_rpl *cpl = (const void *)(rss + 1);
1157237263Snp	unsigned int tid = GET_TID(cpl);
1158237263Snp	struct toepcb *toep = lookup_tid(sc, tid);
1159237263Snp	struct inpcb *inp = toep->inp;
1160237263Snp	struct tcpcb *tp = NULL;
1161237263Snp	struct socket *so = NULL;
1162237263Snp#ifdef INVARIANTS
1163237263Snp	unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl)));
1164237263Snp#endif
1165237263Snp
1166237263Snp	KASSERT(opcode == CPL_CLOSE_CON_RPL,
1167237263Snp	    ("%s: unexpected opcode 0x%x", __func__, opcode));
1168237263Snp	KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
1169237263Snp	KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__));
1170237263Snp
1171286227Sjch	INP_INFO_RLOCK(&V_tcbinfo);
1172237263Snp	INP_WLOCK(inp);
1173237263Snp	tp = intotcpcb(inp);
1174237263Snp
1175237263Snp	CTR4(KTR_CXGBE, "%s: tid %u (%s), toep_flags 0x%x",
1176237263Snp	    __func__, tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags);
1177237263Snp
1178239514Snp	if (toep->flags & TPF_ABORT_SHUTDOWN)
1179237263Snp		goto done;
1180237263Snp
1181237263Snp	so = inp->inp_socket;
1182237263Snp	tp->snd_una = be32toh(cpl->snd_nxt) - 1;	/* exclude FIN */
1183237263Snp
1184237263Snp	switch (tp->t_state) {
1185237263Snp	case TCPS_CLOSING:	/* see TCPS_FIN_WAIT_2 in do_peer_close too */
1186237263Snp		tcp_twstart(tp);
1187237263Snprelease:
1188237263Snp		INP_UNLOCK_ASSERT(inp);	/* safe, we have a ref on the  inp */
1189286227Sjch		INP_INFO_RUNLOCK(&V_tcbinfo);
1190237263Snp
1191237263Snp		INP_WLOCK(inp);
1192237263Snp		final_cpl_received(toep);	/* no more CPLs expected */
1193237263Snp
1194237263Snp		return (0);
1195237263Snp	case TCPS_LAST_ACK:
1196237263Snp		if (tcp_close(tp))
1197237263Snp			INP_WUNLOCK(inp);
1198237263Snp		goto release;
1199237263Snp
1200237263Snp	case TCPS_FIN_WAIT_1:
1201237263Snp		if (so->so_rcv.sb_state & SBS_CANTRCVMORE)
1202237263Snp			soisdisconnected(so);
1203237263Snp		tp->t_state = TCPS_FIN_WAIT_2;
1204237263Snp		break;
1205237263Snp
1206237263Snp	default:
1207237263Snp		log(LOG_ERR,
1208237263Snp		    "%s: TID %u received CPL_CLOSE_CON_RPL in state %s\n",
1209237263Snp		    __func__, tid, tcpstates[tp->t_state]);
1210237263Snp	}
1211237263Snpdone:
1212237263Snp	INP_WUNLOCK(inp);
1213286227Sjch	INP_INFO_RUNLOCK(&V_tcbinfo);
1214237263Snp	return (0);
1215237263Snp}
1216237263Snp
1217237263Snpvoid
1218237263Snpsend_abort_rpl(struct adapter *sc, struct sge_wrq *ofld_txq, int tid,
1219237263Snp    int rst_status)
1220237263Snp{
1221237263Snp	struct wrqe *wr;
1222237263Snp	struct cpl_abort_rpl *cpl;
1223237263Snp
1224237263Snp	wr = alloc_wrqe(sizeof(*cpl), ofld_txq);
1225237263Snp	if (wr == NULL) {
1226237263Snp		/* XXX */
1227237263Snp		panic("%s: allocation failure.", __func__);
1228237263Snp	}
1229237263Snp	cpl = wrtod(wr);
1230237263Snp
1231237263Snp	INIT_TP_WR_MIT_CPL(cpl, CPL_ABORT_RPL, tid);
1232237263Snp	cpl->cmd = rst_status;
1233237263Snp
1234237263Snp	t4_wrq_tx(sc, wr);
1235237263Snp}
1236237263Snp
1237237263Snpstatic int
1238237263Snpabort_status_to_errno(struct tcpcb *tp, unsigned int abort_reason)
1239237263Snp{
1240237263Snp	switch (abort_reason) {
1241237263Snp	case CPL_ERR_BAD_SYN:
1242237263Snp	case CPL_ERR_CONN_RESET:
1243237263Snp		return (tp->t_state == TCPS_CLOSE_WAIT ? EPIPE : ECONNRESET);
1244237263Snp	case CPL_ERR_XMIT_TIMEDOUT:
1245237263Snp	case CPL_ERR_PERSIST_TIMEDOUT:
1246237263Snp	case CPL_ERR_FINWAIT2_TIMEDOUT:
1247237263Snp	case CPL_ERR_KEEPALIVE_TIMEDOUT:
1248237263Snp		return (ETIMEDOUT);
1249237263Snp	default:
1250237263Snp		return (EIO);
1251237263Snp	}
1252237263Snp}
1253237263Snp
1254237263Snp/*
1255237263Snp * TCP RST from the peer, timeout, or some other such critical error.
1256237263Snp */
1257237263Snpstatic int
1258237263Snpdo_abort_req(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1259237263Snp{
1260237263Snp	struct adapter *sc = iq->adapter;
1261237263Snp	const struct cpl_abort_req_rss *cpl = (const void *)(rss + 1);
1262237263Snp	unsigned int tid = GET_TID(cpl);
1263237263Snp	struct toepcb *toep = lookup_tid(sc, tid);
1264237263Snp	struct sge_wrq *ofld_txq = toep->ofld_txq;
1265237263Snp	struct inpcb *inp;
1266237263Snp	struct tcpcb *tp;
1267237263Snp#ifdef INVARIANTS
1268237263Snp	unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl)));
1269237263Snp#endif
1270237263Snp
1271237263Snp	KASSERT(opcode == CPL_ABORT_REQ_RSS,
1272237263Snp	    ("%s: unexpected opcode 0x%x", __func__, opcode));
1273237263Snp	KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
1274237263Snp
1275239514Snp	if (toep->flags & TPF_SYNQE)
1276237263Snp		return (do_abort_req_synqe(iq, rss, m));
1277237263Snp
1278237263Snp	KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__));
1279237263Snp
1280245935Snp	if (negative_advice(cpl->status)) {
1281237263Snp		CTR4(KTR_CXGBE, "%s: negative advice %d for tid %d (0x%x)",
1282237263Snp		    __func__, cpl->status, tid, toep->flags);
1283237263Snp		return (0);	/* Ignore negative advice */
1284237263Snp	}
1285237263Snp
1286237263Snp	inp = toep->inp;
1287286227Sjch	INP_INFO_RLOCK(&V_tcbinfo);	/* for tcp_close */
1288237263Snp	INP_WLOCK(inp);
1289237263Snp
1290237263Snp	tp = intotcpcb(inp);
1291237263Snp
1292237263Snp	CTR6(KTR_CXGBE,
1293237263Snp	    "%s: tid %d (%s), toep_flags 0x%x, inp_flags 0x%x, status %d",
1294239528Snp	    __func__, tid, tp ? tcpstates[tp->t_state] : "no tp", toep->flags,
1295239528Snp	    inp->inp_flags, cpl->status);
1296237263Snp
1297237263Snp	/*
1298237263Snp	 * If we'd initiated an abort earlier the reply to it is responsible for
1299237263Snp	 * cleaning up resources.  Otherwise we tear everything down right here
1300237263Snp	 * right now.  We owe the T4 a CPL_ABORT_RPL no matter what.
1301237263Snp	 */
1302239514Snp	if (toep->flags & TPF_ABORT_SHUTDOWN) {
1303237263Snp		INP_WUNLOCK(inp);
1304237263Snp		goto done;
1305237263Snp	}
1306239514Snp	toep->flags |= TPF_ABORT_SHUTDOWN;
1307237263Snp
1308242671Snp	if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) == 0) {
1309242671Snp		struct socket *so = inp->inp_socket;
1310237263Snp
1311242671Snp		if (so != NULL)
1312242671Snp			so_error_set(so, abort_status_to_errno(tp,
1313242671Snp			    cpl->status));
1314242671Snp		tp = tcp_close(tp);
1315242671Snp		if (tp == NULL)
1316242671Snp			INP_WLOCK(inp);	/* re-acquire */
1317242671Snp	}
1318242671Snp
1319237263Snp	final_cpl_received(toep);
1320237263Snpdone:
1321286227Sjch	INP_INFO_RUNLOCK(&V_tcbinfo);
1322237263Snp	send_abort_rpl(sc, ofld_txq, tid, CPL_ABORT_NO_RST);
1323237263Snp	return (0);
1324237263Snp}
1325237263Snp
1326237263Snp/*
1327237263Snp * Reply to the CPL_ABORT_REQ (send_reset)
1328237263Snp */
1329237263Snpstatic int
1330237263Snpdo_abort_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1331237263Snp{
1332237263Snp	struct adapter *sc = iq->adapter;
1333237263Snp	const struct cpl_abort_rpl_rss *cpl = (const void *)(rss + 1);
1334237263Snp	unsigned int tid = GET_TID(cpl);
1335237263Snp	struct toepcb *toep = lookup_tid(sc, tid);
1336237263Snp	struct inpcb *inp = toep->inp;
1337237263Snp#ifdef INVARIANTS
1338237263Snp	unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl)));
1339237263Snp#endif
1340237263Snp
1341237263Snp	KASSERT(opcode == CPL_ABORT_RPL_RSS,
1342237263Snp	    ("%s: unexpected opcode 0x%x", __func__, opcode));
1343237263Snp	KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
1344237263Snp
1345239514Snp	if (toep->flags & TPF_SYNQE)
1346237263Snp		return (do_abort_rpl_synqe(iq, rss, m));
1347237263Snp
1348237263Snp	KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__));
1349237263Snp
1350237263Snp	CTR5(KTR_CXGBE, "%s: tid %u, toep %p, inp %p, status %d",
1351237263Snp	    __func__, tid, toep, inp, cpl->status);
1352237263Snp
1353239514Snp	KASSERT(toep->flags & TPF_ABORT_SHUTDOWN,
1354237263Snp	    ("%s: wasn't expecting abort reply", __func__));
1355237263Snp
1356237263Snp	INP_WLOCK(inp);
1357237263Snp	final_cpl_received(toep);
1358237263Snp
1359237263Snp	return (0);
1360237263Snp}
1361237263Snp
1362237263Snpstatic int
1363237263Snpdo_rx_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1364237263Snp{
1365237263Snp	struct adapter *sc = iq->adapter;
1366237263Snp	const struct cpl_rx_data *cpl = mtod(m, const void *);
1367237263Snp	unsigned int tid = GET_TID(cpl);
1368237263Snp	struct toepcb *toep = lookup_tid(sc, tid);
1369237263Snp	struct inpcb *inp = toep->inp;
1370237263Snp	struct tcpcb *tp;
1371237263Snp	struct socket *so;
1372239344Snp	struct sockbuf *sb;
1373239344Snp	int len;
1374243681Snp	uint32_t ddp_placed = 0;
1375237263Snp
1376239514Snp	if (__predict_false(toep->flags & TPF_SYNQE)) {
1377243680Snp#ifdef INVARIANTS
1378243680Snp		struct synq_entry *synqe = (void *)toep;
1379243680Snp
1380243680Snp		INP_WLOCK(synqe->lctx->inp);
1381243680Snp		if (synqe->flags & TPF_SYNQE_HAS_L2TE) {
1382243680Snp			KASSERT(synqe->flags & TPF_ABORT_SHUTDOWN,
1383243680Snp			    ("%s: listen socket closed but tid %u not aborted.",
1384243680Snp			    __func__, tid));
1385243680Snp		} else {
1386243680Snp			/*
1387243680Snp			 * do_pass_accept_req is still running and will
1388243680Snp			 * eventually take care of this tid.
1389243680Snp			 */
1390243680Snp		}
1391243680Snp		INP_WUNLOCK(synqe->lctx->inp);
1392243680Snp#endif
1393243680Snp		CTR4(KTR_CXGBE, "%s: tid %u, synqe %p (0x%x)", __func__, tid,
1394243680Snp		    toep, toep->flags);
1395237263Snp		m_freem(m);
1396237263Snp		return (0);
1397237263Snp	}
1398237263Snp
1399237263Snp	KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__));
1400237263Snp
1401237263Snp	/* strip off CPL header */
1402237263Snp	m_adj(m, sizeof(*cpl));
1403239344Snp	len = m->m_pkthdr.len;
1404237263Snp
1405237263Snp	INP_WLOCK(inp);
1406237263Snp	if (inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) {
1407237263Snp		CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x",
1408239344Snp		    __func__, tid, len, inp->inp_flags);
1409237263Snp		INP_WUNLOCK(inp);
1410237263Snp		m_freem(m);
1411237263Snp		return (0);
1412237263Snp	}
1413237263Snp
1414237263Snp	tp = intotcpcb(inp);
1415237263Snp
1416243681Snp	if (__predict_false(tp->rcv_nxt != be32toh(cpl->seq)))
1417243681Snp		ddp_placed = be32toh(cpl->seq) - tp->rcv_nxt;
1418237263Snp
1419239344Snp	tp->rcv_nxt += len;
1420300895Snp	if (tp->rcv_wnd < len) {
1421300895Snp		KASSERT(toep->ulp_mode != ULP_MODE_RDMA,
1422300895Snp				("%s: negative window size", __func__));
1423300895Snp	}
1424300895Snp
1425239344Snp	tp->rcv_wnd -= len;
1426237263Snp	tp->t_rcvtime = ticks;
1427237263Snp
1428299210Sjhb	if (toep->ulp_mode == ULP_MODE_TCPDDP)
1429299210Sjhb		DDP_LOCK(toep);
1430237263Snp	so = inp_inpcbtosocket(inp);
1431239344Snp	sb = &so->so_rcv;
1432239344Snp	SOCKBUF_LOCK(sb);
1433237263Snp
1434239344Snp	if (__predict_false(sb->sb_state & SBS_CANTRCVMORE)) {
1435237263Snp		CTR3(KTR_CXGBE, "%s: tid %u, excess rx (%d bytes)",
1436239344Snp		    __func__, tid, len);
1437237263Snp		m_freem(m);
1438239344Snp		SOCKBUF_UNLOCK(sb);
1439299210Sjhb		if (toep->ulp_mode == ULP_MODE_TCPDDP)
1440299210Sjhb			DDP_UNLOCK(toep);
1441237263Snp		INP_WUNLOCK(inp);
1442237263Snp
1443286227Sjch		INP_INFO_RLOCK(&V_tcbinfo);
1444237263Snp		INP_WLOCK(inp);
1445237263Snp		tp = tcp_drop(tp, ECONNRESET);
1446237263Snp		if (tp)
1447237263Snp			INP_WUNLOCK(inp);
1448286227Sjch		INP_INFO_RUNLOCK(&V_tcbinfo);
1449237263Snp
1450237263Snp		return (0);
1451237263Snp	}
1452237263Snp
1453237263Snp	/* receive buffer autosize */
1454299206Sjhb	CURVNET_SET(so->so_vnet);
1455239344Snp	if (sb->sb_flags & SB_AUTOSIZE &&
1456237263Snp	    V_tcp_do_autorcvbuf &&
1457239344Snp	    sb->sb_hiwat < V_tcp_autorcvbuf_max &&
1458239344Snp	    len > (sbspace(sb) / 8 * 7)) {
1459239344Snp		unsigned int hiwat = sb->sb_hiwat;
1460237263Snp		unsigned int newsize = min(hiwat + V_tcp_autorcvbuf_inc,
1461237263Snp		    V_tcp_autorcvbuf_max);
1462237263Snp
1463239344Snp		if (!sbreserve_locked(sb, newsize, so, NULL))
1464239344Snp			sb->sb_flags &= ~SB_AUTOSIZE;
1465237263Snp		else
1466237263Snp			toep->rx_credits += newsize - hiwat;
1467237263Snp	}
1468239344Snp
1469299210Sjhb	if (toep->ddp_waiting_count != 0 || toep->ddp_active_count != 0)
1470299210Sjhb		CTR3(KTR_CXGBE, "%s: tid %u, non-ddp rx (%d bytes)", __func__,
1471299210Sjhb		    tid, len);
1472299210Sjhb
1473239344Snp	if (toep->ulp_mode == ULP_MODE_TCPDDP) {
1474239344Snp		int changed = !(toep->ddp_flags & DDP_ON) ^ cpl->ddp_off;
1475239344Snp
1476239344Snp		if (changed) {
1477243681Snp			if (toep->ddp_flags & DDP_SC_REQ)
1478243681Snp				toep->ddp_flags ^= DDP_ON | DDP_SC_REQ;
1479243681Snp			else {
1480243681Snp				KASSERT(cpl->ddp_off == 1,
1481243681Snp				    ("%s: DDP switched on by itself.",
1482243681Snp				    __func__));
1483243681Snp
1484243681Snp				/* Fell out of DDP mode */
1485299210Sjhb				toep->ddp_flags &= ~DDP_ON;
1486299210Sjhb				CTR1(KTR_CXGBE, "%s: fell out of DDP mode",
1487299210Sjhb				    __func__);
1488243681Snp
1489299210Sjhb				insert_ddp_data(toep, ddp_placed);
1490239344Snp			}
1491239344Snp		}
1492239344Snp
1493239344Snp		if (toep->ddp_flags & DDP_ON) {
1494239344Snp			/*
1495299210Sjhb			 * CPL_RX_DATA with DDP on can only be an indicate.
1496299210Sjhb			 * Start posting queued AIO requests via DDP.  The
1497299210Sjhb			 * payload that arrived in this indicate is appended
1498299210Sjhb			 * to the socket buffer as usual.
1499239344Snp			 */
1500299210Sjhb			handle_ddp_indicate(toep);
1501239344Snp		}
1502239344Snp	}
1503239344Snp
1504274421Sglebius	KASSERT(toep->sb_cc >= sbused(sb),
1505239344Snp	    ("%s: sb %p has more data (%d) than last time (%d).",
1506274421Sglebius	    __func__, sb, sbused(sb), toep->sb_cc));
1507274421Sglebius	toep->rx_credits += toep->sb_cc - sbused(sb);
1508275329Sglebius	sbappendstream_locked(sb, m, 0);
1509274421Sglebius	toep->sb_cc = sbused(sb);
1510280878Snp	if (toep->rx_credits > 0 && toep->sb_cc + tp->rcv_wnd < sb->sb_lowat) {
1511280878Snp		int credits;
1512280878Snp
1513280878Snp		credits = send_rx_credits(sc, toep, toep->rx_credits);
1514280878Snp		toep->rx_credits -= credits;
1515280878Snp		tp->rcv_wnd += credits;
1516280878Snp		tp->rcv_adv += credits;
1517280878Snp	}
1518299210Sjhb
1519299210Sjhb	if (toep->ddp_waiting_count > 0 && sbavail(sb) != 0) {
1520299210Sjhb		CTR2(KTR_CXGBE, "%s: tid %u queueing AIO task", __func__,
1521299210Sjhb		    tid);
1522299210Sjhb		ddp_queue_toep(toep);
1523299210Sjhb	}
1524237263Snp	sorwakeup_locked(so);
1525239344Snp	SOCKBUF_UNLOCK_ASSERT(sb);
1526299210Sjhb	if (toep->ulp_mode == ULP_MODE_TCPDDP)
1527299210Sjhb		DDP_UNLOCK(toep);
1528237263Snp
1529237263Snp	INP_WUNLOCK(inp);
1530299206Sjhb	CURVNET_RESTORE();
1531237263Snp	return (0);
1532237263Snp}
1533237263Snp
1534237263Snp#define S_CPL_FW4_ACK_OPCODE    24
1535237263Snp#define M_CPL_FW4_ACK_OPCODE    0xff
1536237263Snp#define V_CPL_FW4_ACK_OPCODE(x) ((x) << S_CPL_FW4_ACK_OPCODE)
1537237263Snp#define G_CPL_FW4_ACK_OPCODE(x) \
1538237263Snp    (((x) >> S_CPL_FW4_ACK_OPCODE) & M_CPL_FW4_ACK_OPCODE)
1539259382Snp
1540237263Snp#define S_CPL_FW4_ACK_FLOWID    0
1541237263Snp#define M_CPL_FW4_ACK_FLOWID    0xffffff
1542237263Snp#define V_CPL_FW4_ACK_FLOWID(x) ((x) << S_CPL_FW4_ACK_FLOWID)
1543237263Snp#define G_CPL_FW4_ACK_FLOWID(x) \
1544237263Snp    (((x) >> S_CPL_FW4_ACK_FLOWID) & M_CPL_FW4_ACK_FLOWID)
1545259382Snp
1546237263Snp#define S_CPL_FW4_ACK_CR        24
1547237263Snp#define M_CPL_FW4_ACK_CR        0xff
1548237263Snp#define V_CPL_FW4_ACK_CR(x)     ((x) << S_CPL_FW4_ACK_CR)
1549237263Snp#define G_CPL_FW4_ACK_CR(x)     (((x) >> S_CPL_FW4_ACK_CR) & M_CPL_FW4_ACK_CR)
1550259382Snp
1551237263Snp#define S_CPL_FW4_ACK_SEQVAL    0
1552237263Snp#define M_CPL_FW4_ACK_SEQVAL    0x1
1553237263Snp#define V_CPL_FW4_ACK_SEQVAL(x) ((x) << S_CPL_FW4_ACK_SEQVAL)
1554237263Snp#define G_CPL_FW4_ACK_SEQVAL(x) \
1555237263Snp    (((x) >> S_CPL_FW4_ACK_SEQVAL) & M_CPL_FW4_ACK_SEQVAL)
1556237263Snp#define F_CPL_FW4_ACK_SEQVAL    V_CPL_FW4_ACK_SEQVAL(1U)
1557237263Snp
1558237263Snpstatic int
1559237263Snpdo_fw4_ack(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1560237263Snp{
1561237263Snp	struct adapter *sc = iq->adapter;
1562237263Snp	const struct cpl_fw4_ack *cpl = (const void *)(rss + 1);
1563237263Snp	unsigned int tid = G_CPL_FW4_ACK_FLOWID(be32toh(OPCODE_TID(cpl)));
1564237263Snp	struct toepcb *toep = lookup_tid(sc, tid);
1565237263Snp	struct inpcb *inp;
1566237263Snp	struct tcpcb *tp;
1567237263Snp	struct socket *so;
1568237263Snp	uint8_t credits = cpl->credits;
1569237263Snp	struct ofld_tx_sdesc *txsd;
1570237263Snp	int plen;
1571237263Snp#ifdef INVARIANTS
1572237263Snp	unsigned int opcode = G_CPL_FW4_ACK_OPCODE(be32toh(OPCODE_TID(cpl)));
1573237263Snp#endif
1574237263Snp
1575237263Snp	/*
1576237263Snp	 * Very unusual case: we'd sent a flowc + abort_req for a synq entry and
1577237263Snp	 * now this comes back carrying the credits for the flowc.
1578237263Snp	 */
1579239514Snp	if (__predict_false(toep->flags & TPF_SYNQE)) {
1580239514Snp		KASSERT(toep->flags & TPF_ABORT_SHUTDOWN,
1581237263Snp		    ("%s: credits for a synq entry %p", __func__, toep));
1582237263Snp		return (0);
1583237263Snp	}
1584237263Snp
1585237263Snp	inp = toep->inp;
1586237263Snp
1587237263Snp	KASSERT(opcode == CPL_FW4_ACK,
1588237263Snp	    ("%s: unexpected opcode 0x%x", __func__, opcode));
1589237263Snp	KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
1590237263Snp	KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__));
1591237263Snp
1592237263Snp	INP_WLOCK(inp);
1593237263Snp
1594239514Snp	if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN)) {
1595237263Snp		INP_WUNLOCK(inp);
1596237263Snp		return (0);
1597237263Snp	}
1598237263Snp
1599237263Snp	KASSERT((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) == 0,
1600237263Snp	    ("%s: inp_flags 0x%x", __func__, inp->inp_flags));
1601237263Snp
1602237263Snp	tp = intotcpcb(inp);
1603237263Snp
1604237436Snp	if (cpl->flags & CPL_FW4_ACK_FLAGS_SEQVAL) {
1605237263Snp		tcp_seq snd_una = be32toh(cpl->snd_una);
1606237263Snp
1607237263Snp#ifdef INVARIANTS
1608237263Snp		if (__predict_false(SEQ_LT(snd_una, tp->snd_una))) {
1609237263Snp			log(LOG_ERR,
1610237263Snp			    "%s: unexpected seq# %x for TID %u, snd_una %x\n",
1611237263Snp			    __func__, snd_una, toep->tid, tp->snd_una);
1612237263Snp		}
1613237263Snp#endif
1614237263Snp
1615237263Snp		if (tp->snd_una != snd_una) {
1616237263Snp			tp->snd_una = snd_una;
1617237263Snp			tp->ts_recent_age = tcp_ts_getticks();
1618237263Snp		}
1619237263Snp	}
1620237263Snp
1621237263Snp	so = inp->inp_socket;
1622237263Snp	txsd = &toep->txsd[toep->txsd_cidx];
1623237263Snp	plen = 0;
1624237263Snp	while (credits) {
1625237263Snp		KASSERT(credits >= txsd->tx_credits,
1626237263Snp		    ("%s: too many (or partial) credits", __func__));
1627237263Snp		credits -= txsd->tx_credits;
1628237263Snp		toep->tx_credits += txsd->tx_credits;
1629237263Snp		plen += txsd->plen;
1630237263Snp		txsd++;
1631237263Snp		toep->txsd_avail++;
1632237263Snp		KASSERT(toep->txsd_avail <= toep->txsd_total,
1633237263Snp		    ("%s: txsd avail > total", __func__));
1634237263Snp		if (__predict_false(++toep->txsd_cidx == toep->txsd_total)) {
1635237263Snp			txsd = &toep->txsd[0];
1636237263Snp			toep->txsd_cidx = 0;
1637237263Snp		}
1638237263Snp	}
1639237263Snp
1640255411Snp	if (toep->tx_credits == toep->tx_total) {
1641255411Snp		toep->tx_nocompl = 0;
1642255411Snp		toep->plen_nocompl = 0;
1643255411Snp	}
1644255411Snp
1645255411Snp	if (toep->flags & TPF_TX_SUSPENDED &&
1646255411Snp	    toep->tx_credits >= toep->tx_total / 4) {
1647255411Snp		toep->flags &= ~TPF_TX_SUSPENDED;
1648269076Snp		if (toep->ulp_mode == ULP_MODE_ISCSI)
1649292736Snp			t4_push_pdus(sc, toep, plen);
1650269076Snp		else
1651269076Snp			t4_push_frames(sc, toep, plen);
1652255411Snp	} else if (plen > 0) {
1653237263Snp		struct sockbuf *sb = &so->so_snd;
1654292736Snp		int sbu;
1655237263Snp
1656292736Snp		SOCKBUF_LOCK(sb);
1657292736Snp		sbu = sbused(sb);
1658292736Snp		if (toep->ulp_mode == ULP_MODE_ISCSI) {
1659292736Snp
1660292736Snp			if (__predict_false(sbu > 0)) {
1661292736Snp				/*
1662292736Snp				 * The data trasmitted before the tid's ULP mode
1663292736Snp				 * changed to ISCSI is still in so_snd.
1664292736Snp				 * Incoming credits should account for so_snd
1665292736Snp				 * first.
1666292736Snp				 */
1667292736Snp				sbdrop_locked(sb, min(sbu, plen));
1668292736Snp				plen -= min(sbu, plen);
1669292736Snp			}
1670292736Snp			sowwakeup_locked(so);	/* unlocks so_snd */
1671292736Snp			rqdrop_locked(&toep->ulp_pdu_reclaimq, plen);
1672292736Snp		} else {
1673269076Snp			sbdrop_locked(sb, plen);
1674292736Snp			sowwakeup_locked(so);	/* unlocks so_snd */
1675269076Snp		}
1676292736Snp		SOCKBUF_UNLOCK_ASSERT(sb);
1677237263Snp	}
1678237263Snp
1679237263Snp	INP_WUNLOCK(inp);
1680237263Snp
1681237263Snp	return (0);
1682237263Snp}
1683237263Snp
1684239338Snpstatic int
1685239338Snpdo_set_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1686239338Snp{
1687239338Snp	struct adapter *sc = iq->adapter;
1688239338Snp	const struct cpl_set_tcb_rpl *cpl = (const void *)(rss + 1);
1689239338Snp	unsigned int tid = GET_TID(cpl);
1690299210Sjhb	struct toepcb *toep;
1691239338Snp#ifdef INVARIANTS
1692239338Snp	unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl)));
1693239338Snp#endif
1694239338Snp
1695239338Snp	KASSERT(opcode == CPL_SET_TCB_RPL,
1696239338Snp	    ("%s: unexpected opcode 0x%x", __func__, opcode));
1697239338Snp	KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
1698239338Snp
1699259382Snp	if (is_ftid(sc, tid))
1700239338Snp		return (t4_filter_rpl(iq, rss, m)); /* TCB is a filter */
1701239338Snp
1702299210Sjhb	toep = lookup_tid(sc, tid);
1703299210Sjhb	if (toep->ulp_mode == ULP_MODE_TCPDDP) {
1704299210Sjhb		handle_ddp_tcb_rpl(toep, cpl);
1705299210Sjhb		return (0);
1706299210Sjhb	}
1707299210Sjhb
1708292736Snp	/*
1709292736Snp	 * TOM and/or other ULPs don't request replies for CPL_SET_TCB or
1710292736Snp	 * CPL_SET_TCB_FIELD requests.  This can easily change and when it does
1711292736Snp	 * the dispatch code will go here.
1712292736Snp	 */
1713292736Snp#ifdef INVARIANTS
1714292736Snp	panic("%s: Unexpected CPL_SET_TCB_RPL for tid %u on iq %p", __func__,
1715292736Snp	    tid, iq);
1716292736Snp#else
1717292736Snp	log(LOG_ERR, "%s: Unexpected CPL_SET_TCB_RPL for tid %u on iq %p\n",
1718292736Snp	    __func__, tid, iq);
1719292736Snp#endif
1720269076Snp
1721292736Snp	return (0);
1722239338Snp}
1723239338Snp
1724237263Snpvoid
1725251638Snpt4_set_tcb_field(struct adapter *sc, struct toepcb *toep, int ctrl,
1726251638Snp    uint16_t word, uint64_t mask, uint64_t val)
1727239338Snp{
1728239338Snp	struct wrqe *wr;
1729239338Snp	struct cpl_set_tcb_field *req;
1730239338Snp
1731251638Snp	wr = alloc_wrqe(sizeof(*req), ctrl ? toep->ctrlq : toep->ofld_txq);
1732239338Snp	if (wr == NULL) {
1733239338Snp		/* XXX */
1734239338Snp		panic("%s: allocation failure.", __func__);
1735239338Snp	}
1736239338Snp	req = wrtod(wr);
1737239338Snp
1738239338Snp	INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, toep->tid);
1739239338Snp	req->reply_ctrl = htobe16(V_NO_REPLY(1) |
1740239338Snp	    V_QUEUENO(toep->ofld_rxq->iq.abs_id));
1741239338Snp	req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0));
1742239338Snp	req->mask = htobe64(mask);
1743239338Snp	req->val = htobe64(val);
1744239338Snp
1745239338Snp	t4_wrq_tx(sc, wr);
1746239338Snp}
1747239338Snp
1748239338Snpvoid
1749299210Sjhbt4_set_tcb_field_rpl(struct adapter *sc, struct toepcb *toep, int ctrl,
1750299210Sjhb    uint16_t word, uint64_t mask, uint64_t val, uint8_t cookie)
1751299210Sjhb{
1752299210Sjhb	struct wrqe *wr;
1753299210Sjhb	struct cpl_set_tcb_field *req;
1754299210Sjhb
1755299210Sjhb	KASSERT((cookie & ~M_COOKIE) == 0, ("%s: invalid cookie %#x", __func__,
1756299210Sjhb	    cookie));
1757299210Sjhb	wr = alloc_wrqe(sizeof(*req), ctrl ? toep->ctrlq : toep->ofld_txq);
1758299210Sjhb	if (wr == NULL) {
1759299210Sjhb		/* XXX */
1760299210Sjhb		panic("%s: allocation failure.", __func__);
1761299210Sjhb	}
1762299210Sjhb	req = wrtod(wr);
1763299210Sjhb
1764299210Sjhb	INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, toep->tid);
1765299210Sjhb	req->reply_ctrl = htobe16(V_QUEUENO(toep->ofld_rxq->iq.abs_id));
1766299210Sjhb	req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(cookie));
1767299210Sjhb	req->mask = htobe64(mask);
1768299210Sjhb	req->val = htobe64(val);
1769299210Sjhb
1770299210Sjhb	t4_wrq_tx(sc, wr);
1771299210Sjhb}
1772299210Sjhb
1773299210Sjhbvoid
1774237263Snpt4_init_cpl_io_handlers(struct adapter *sc)
1775237263Snp{
1776237263Snp
1777237263Snp	t4_register_cpl_handler(sc, CPL_PEER_CLOSE, do_peer_close);
1778237263Snp	t4_register_cpl_handler(sc, CPL_CLOSE_CON_RPL, do_close_con_rpl);
1779237263Snp	t4_register_cpl_handler(sc, CPL_ABORT_REQ_RSS, do_abort_req);
1780237263Snp	t4_register_cpl_handler(sc, CPL_ABORT_RPL_RSS, do_abort_rpl);
1781237263Snp	t4_register_cpl_handler(sc, CPL_RX_DATA, do_rx_data);
1782237263Snp	t4_register_cpl_handler(sc, CPL_FW4_ACK, do_fw4_ack);
1783239338Snp	t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, do_set_tcb_rpl);
1784237263Snp}
1785239338Snp
1786239338Snpvoid
1787239338Snpt4_uninit_cpl_io_handlers(struct adapter *sc)
1788239338Snp{
1789239338Snp
1790239338Snp	t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl);
1791239338Snp}
1792237263Snp#endif
1793