t4_connect.c revision 346805
1/*-
2 * Copyright (c) 2012 Chelsio Communications, Inc.
3 * All rights reserved.
4 * Written by: Navdeep Parhar <np@FreeBSD.org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: stable/11/sys/dev/cxgbe/tom/t4_connect.c 346805 2019-04-28 06:51:59Z np $");
30
31#include "opt_inet.h"
32#include "opt_inet6.h"
33
34#ifdef TCP_OFFLOAD
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/kernel.h>
38#include <sys/ktr.h>
39#include <sys/module.h>
40#include <sys/protosw.h>
41#include <sys/domain.h>
42#include <sys/socket.h>
43#include <sys/socketvar.h>
44#include <sys/sysctl.h>
45#include <net/ethernet.h>
46#include <net/if.h>
47#include <net/if_types.h>
48#include <net/if_vlan_var.h>
49#include <net/route.h>
50#include <netinet/in.h>
51#include <netinet/in_pcb.h>
52#include <netinet/ip.h>
53#define TCPSTATES
54#include <netinet/tcp_fsm.h>
55#include <netinet/tcp_var.h>
56#include <netinet/toecore.h>
57#include <netinet/cc/cc.h>
58
59#include "common/common.h"
60#include "common/t4_msg.h"
61#include "common/t4_regs.h"
62#include "common/t4_regs_values.h"
63#include "tom/t4_tom_l2t.h"
64#include "tom/t4_tom.h"
65
66/* atid services */
67static int alloc_atid(struct adapter *, void *);
68static void *lookup_atid(struct adapter *, int);
69static void free_atid(struct adapter *, int);
70
71static int
72alloc_atid(struct adapter *sc, void *ctx)
73{
74	struct tid_info *t = &sc->tids;
75	int atid = -1;
76
77	mtx_lock(&t->atid_lock);
78	if (t->afree) {
79		union aopen_entry *p = t->afree;
80
81		atid = p - t->atid_tab;
82		t->afree = p->next;
83		p->data = ctx;
84		t->atids_in_use++;
85	}
86	mtx_unlock(&t->atid_lock);
87	return (atid);
88}
89
90static void *
91lookup_atid(struct adapter *sc, int atid)
92{
93	struct tid_info *t = &sc->tids;
94
95	return (t->atid_tab[atid].data);
96}
97
98static void
99free_atid(struct adapter *sc, int atid)
100{
101	struct tid_info *t = &sc->tids;
102	union aopen_entry *p = &t->atid_tab[atid];
103
104	mtx_lock(&t->atid_lock);
105	p->next = t->afree;
106	t->afree = p;
107	t->atids_in_use--;
108	mtx_unlock(&t->atid_lock);
109}
110
111/*
112 * Active open succeeded.
113 */
114static int
115do_act_establish(struct sge_iq *iq, const struct rss_header *rss,
116    struct mbuf *m)
117{
118	struct adapter *sc = iq->adapter;
119	const struct cpl_act_establish *cpl = (const void *)(rss + 1);
120	u_int tid = GET_TID(cpl);
121	u_int atid = G_TID_TID(ntohl(cpl->tos_atid));
122	struct toepcb *toep = lookup_atid(sc, atid);
123	struct inpcb *inp = toep->inp;
124
125	KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
126	KASSERT(toep->tid == atid, ("%s: toep tid/atid mismatch", __func__));
127
128	CTR3(KTR_CXGBE, "%s: atid %u, tid %u", __func__, atid, tid);
129	free_atid(sc, atid);
130
131	CURVNET_SET(toep->vnet);
132	INP_WLOCK(inp);
133	toep->tid = tid;
134	insert_tid(sc, tid, toep, inp->inp_vflag & INP_IPV6 ? 2 : 1);
135	if (inp->inp_flags & INP_DROPPED) {
136
137		/* socket closed by the kernel before hw told us it connected */
138
139		send_flowc_wr(toep, NULL);
140		send_reset(sc, toep, be32toh(cpl->snd_isn));
141		goto done;
142	}
143
144	make_established(toep, cpl->snd_isn, cpl->rcv_isn, cpl->tcp_opt);
145
146	if (toep->ulp_mode == ULP_MODE_TLS)
147		tls_establish(toep);
148
149done:
150	INP_WUNLOCK(inp);
151	CURVNET_RESTORE();
152	return (0);
153}
154
155/*
156 * Convert an ACT_OPEN_RPL status to an errno.
157 */
158static inline int
159act_open_rpl_status_to_errno(int status)
160{
161
162	switch (status) {
163	case CPL_ERR_CONN_RESET:
164		return (ECONNREFUSED);
165	case CPL_ERR_ARP_MISS:
166		return (EHOSTUNREACH);
167	case CPL_ERR_CONN_TIMEDOUT:
168		return (ETIMEDOUT);
169	case CPL_ERR_TCAM_FULL:
170		return (EAGAIN);
171	case CPL_ERR_CONN_EXIST:
172		log(LOG_ERR, "ACTIVE_OPEN_RPL: 4-tuple in use\n");
173		return (EAGAIN);
174	default:
175		return (EIO);
176	}
177}
178
179void
180act_open_failure_cleanup(struct adapter *sc, u_int atid, u_int status)
181{
182	struct toepcb *toep = lookup_atid(sc, atid);
183	struct inpcb *inp = toep->inp;
184	struct toedev *tod = &toep->td->tod;
185
186	free_atid(sc, atid);
187	toep->tid = -1;
188
189	CURVNET_SET(toep->vnet);
190	if (status != EAGAIN)
191		INP_INFO_RLOCK(&V_tcbinfo);
192	INP_WLOCK(inp);
193	toe_connect_failed(tod, inp, status);
194	final_cpl_received(toep);	/* unlocks inp */
195	if (status != EAGAIN)
196		INP_INFO_RUNLOCK(&V_tcbinfo);
197	CURVNET_RESTORE();
198}
199
200/*
201 * Active open failed.
202 */
203static int
204do_act_open_rpl(struct sge_iq *iq, const struct rss_header *rss,
205    struct mbuf *m)
206{
207	struct adapter *sc = iq->adapter;
208	const struct cpl_act_open_rpl *cpl = (const void *)(rss + 1);
209	u_int atid = G_TID_TID(G_AOPEN_ATID(be32toh(cpl->atid_status)));
210	u_int status = G_AOPEN_STATUS(be32toh(cpl->atid_status));
211	struct toepcb *toep = lookup_atid(sc, atid);
212	int rc;
213
214	KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
215	KASSERT(toep->tid == atid, ("%s: toep tid/atid mismatch", __func__));
216
217	CTR3(KTR_CXGBE, "%s: atid %u, status %u ", __func__, atid, status);
218
219	/* Ignore negative advice */
220	if (negative_advice(status))
221		return (0);
222
223	if (status && act_open_has_tid(status))
224		release_tid(sc, GET_TID(cpl), toep->ctrlq);
225
226	rc = act_open_rpl_status_to_errno(status);
227	act_open_failure_cleanup(sc, atid, rc);
228
229	return (0);
230}
231
232/*
233 * Options2 for active open.
234 */
235static uint32_t
236calc_opt2a(struct socket *so, struct toepcb *toep,
237    const struct offload_settings *s)
238{
239	struct tcpcb *tp = so_sototcpcb(so);
240	struct port_info *pi = toep->vi->pi;
241	struct adapter *sc = pi->adapter;
242	uint32_t opt2 = 0;
243
244	/*
245	 * rx flow control, rx coalesce, congestion control, and tx pace are all
246	 * explicitly set by the driver.  On T5+ the ISS is also set by the
247	 * driver to the value picked by the kernel.
248	 */
249	if (is_t4(sc)) {
250		opt2 |= F_RX_FC_VALID | F_RX_COALESCE_VALID;
251		opt2 |= F_CONG_CNTRL_VALID | F_PACE_VALID;
252	} else {
253		opt2 |= F_T5_OPT_2_VALID;	/* all 4 valid */
254		opt2 |= F_T5_ISS;		/* ISS provided in CPL */
255	}
256
257	if (s->sack > 0 || (s->sack < 0 && (tp->t_flags & TF_SACK_PERMIT)))
258		opt2 |= F_SACK_EN;
259
260	if (s->tstamp > 0 || (s->tstamp < 0 && (tp->t_flags & TF_REQ_TSTMP)))
261		opt2 |= F_TSTAMPS_EN;
262
263	if (tp->t_flags & TF_REQ_SCALE)
264		opt2 |= F_WND_SCALE_EN;
265
266	if (s->ecn > 0 || (s->ecn < 0 && V_tcp_do_ecn == 1))
267		opt2 |= F_CCTRL_ECN;
268
269	/* XXX: F_RX_CHANNEL for multiple rx c-chan support goes here. */
270
271	opt2 |= V_TX_QUEUE(sc->params.tp.tx_modq[pi->tx_chan]);
272
273	/* These defaults are subject to ULP specific fixups later. */
274	opt2 |= V_RX_FC_DDP(0) | V_RX_FC_DISABLE(0);
275
276	opt2 |= V_PACE(0);
277
278	if (s->cong_algo >= 0)
279		opt2 |= V_CONG_CNTRL(s->cong_algo);
280	else if (sc->tt.cong_algorithm >= 0)
281		opt2 |= V_CONG_CNTRL(sc->tt.cong_algorithm & M_CONG_CNTRL);
282	else {
283		struct cc_algo *cc = CC_ALGO(tp);
284
285		if (strcasecmp(cc->name, "reno") == 0)
286			opt2 |= V_CONG_CNTRL(CONG_ALG_RENO);
287		else if (strcasecmp(cc->name, "tahoe") == 0)
288			opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
289		if (strcasecmp(cc->name, "newreno") == 0)
290			opt2 |= V_CONG_CNTRL(CONG_ALG_NEWRENO);
291		if (strcasecmp(cc->name, "highspeed") == 0)
292			opt2 |= V_CONG_CNTRL(CONG_ALG_HIGHSPEED);
293		else {
294			/*
295			 * Use newreno in case the algorithm selected by the
296			 * host stack is not supported by the hardware.
297			 */
298			opt2 |= V_CONG_CNTRL(CONG_ALG_NEWRENO);
299		}
300	}
301
302	if (s->rx_coalesce > 0 || (s->rx_coalesce < 0 && sc->tt.rx_coalesce))
303		opt2 |= V_RX_COALESCE(M_RX_COALESCE);
304
305	/* Note that ofld_rxq is already set according to s->rxq. */
306	opt2 |= F_RSS_QUEUE_VALID;
307	opt2 |= V_RSS_QUEUE(toep->ofld_rxq->iq.abs_id);
308
309#ifdef USE_DDP_RX_FLOW_CONTROL
310	if (toep->ulp_mode == ULP_MODE_TCPDDP)
311		opt2 |= F_RX_FC_DDP;
312#endif
313
314	if (toep->ulp_mode == ULP_MODE_TLS) {
315		opt2 &= ~V_RX_COALESCE(M_RX_COALESCE);
316		opt2 |= F_RX_FC_DISABLE;
317	}
318
319	return (htobe32(opt2));
320}
321
322void
323t4_init_connect_cpl_handlers(void)
324{
325
326	t4_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish);
327	t4_register_cpl_handler(CPL_ACT_OPEN_RPL, do_act_open_rpl);
328}
329
330void
331t4_uninit_connect_cpl_handlers(void)
332{
333
334	t4_register_cpl_handler(CPL_ACT_ESTABLISH, NULL);
335	t4_register_cpl_handler(CPL_ACT_OPEN_RPL, NULL);
336}
337
338#define DONT_OFFLOAD_ACTIVE_OPEN(x)	do { \
339	reason = __LINE__; \
340	rc = (x); \
341	goto failed; \
342} while (0)
343
344static inline int
345act_open_cpl_size(struct adapter *sc, int isipv6)
346{
347	int idx;
348	static const int sz_table[3][2] = {
349		{
350			sizeof (struct cpl_act_open_req),
351			sizeof (struct cpl_act_open_req6)
352		},
353		{
354			sizeof (struct cpl_t5_act_open_req),
355			sizeof (struct cpl_t5_act_open_req6)
356		},
357		{
358			sizeof (struct cpl_t6_act_open_req),
359			sizeof (struct cpl_t6_act_open_req6)
360		},
361	};
362
363	MPASS(chip_id(sc) >= CHELSIO_T4);
364	idx = min(chip_id(sc) - CHELSIO_T4, 2);
365
366	return (sz_table[idx][!!isipv6]);
367}
368
369/*
370 * active open (soconnect).
371 *
372 * State of affairs on entry:
373 * soisconnecting (so_state |= SS_ISCONNECTING)
374 * tcbinfo not locked (This has changed - used to be WLOCKed)
375 * inp WLOCKed
376 * tp->t_state = TCPS_SYN_SENT
377 * rtalloc1, RT_UNLOCK on rt.
378 */
379int
380t4_connect(struct toedev *tod, struct socket *so, struct rtentry *rt,
381    struct sockaddr *nam)
382{
383	struct adapter *sc = tod->tod_softc;
384	struct tom_data *td = tod_td(tod);
385	struct toepcb *toep = NULL;
386	struct wrqe *wr = NULL;
387	struct ifnet *rt_ifp = rt->rt_ifp;
388	struct vi_info *vi;
389	int mtu_idx, rscale, qid_atid, rc, isipv6, txqid, rxqid;
390	struct inpcb *inp = sotoinpcb(so);
391	struct tcpcb *tp = intotcpcb(inp);
392	int reason;
393	struct offload_settings settings;
394	uint16_t vid = 0xffff;
395
396	INP_WLOCK_ASSERT(inp);
397	KASSERT(nam->sa_family == AF_INET || nam->sa_family == AF_INET6,
398	    ("%s: dest addr %p has family %u", __func__, nam, nam->sa_family));
399
400	if (rt_ifp->if_type == IFT_ETHER)
401		vi = rt_ifp->if_softc;
402	else if (rt_ifp->if_type == IFT_L2VLAN) {
403		struct ifnet *ifp = VLAN_COOKIE(rt_ifp);
404
405		vi = ifp->if_softc;
406		VLAN_TAG(rt_ifp, &vid);
407	} else if (rt_ifp->if_type == IFT_IEEE8023ADLAG)
408		DONT_OFFLOAD_ACTIVE_OPEN(ENOSYS); /* XXX: implement lagg+TOE */
409	else
410		DONT_OFFLOAD_ACTIVE_OPEN(ENOTSUP);
411
412	rw_rlock(&sc->policy_lock);
413	settings = *lookup_offload_policy(sc, OPEN_TYPE_ACTIVE, NULL, vid, inp);
414	rw_runlock(&sc->policy_lock);
415	if (!settings.offload)
416		DONT_OFFLOAD_ACTIVE_OPEN(EPERM);
417
418	if (settings.txq >= 0 && settings.txq < vi->nofldtxq)
419		txqid = settings.txq;
420	else
421		txqid = arc4random() % vi->nofldtxq;
422	txqid += vi->first_ofld_txq;
423	if (settings.rxq >= 0 && settings.rxq < vi->nofldrxq)
424		rxqid = settings.rxq;
425	else
426		rxqid = arc4random() % vi->nofldrxq;
427	rxqid += vi->first_ofld_rxq;
428
429	toep = alloc_toepcb(vi, txqid, rxqid, M_NOWAIT | M_ZERO);
430	if (toep == NULL)
431		DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM);
432
433	toep->tid = alloc_atid(sc, toep);
434	if (toep->tid < 0)
435		DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM);
436
437	toep->l2te = t4_l2t_get(vi->pi, rt_ifp,
438	    rt->rt_flags & RTF_GATEWAY ? rt->rt_gateway : nam);
439	if (toep->l2te == NULL)
440		DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM);
441
442	isipv6 = nam->sa_family == AF_INET6;
443	wr = alloc_wrqe(act_open_cpl_size(sc, isipv6), toep->ctrlq);
444	if (wr == NULL)
445		DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM);
446
447	toep->vnet = so->so_vnet;
448	set_ulp_mode(toep, select_ulp_mode(so, sc, &settings));
449	SOCKBUF_LOCK(&so->so_rcv);
450	/* opt0 rcv_bufsiz initially, assumes its normal meaning later */
451	toep->rx_credits = min(select_rcv_wnd(so) >> 10, M_RCV_BUFSIZ);
452	SOCKBUF_UNLOCK(&so->so_rcv);
453
454	/*
455	 * The kernel sets request_r_scale based on sb_max whereas we need to
456	 * take hardware's MAX_RCV_WND into account too.  This is normally a
457	 * no-op as MAX_RCV_WND is much larger than the default sb_max.
458	 */
459	if (tp->t_flags & TF_REQ_SCALE)
460		rscale = tp->request_r_scale = select_rcv_wscale();
461	else
462		rscale = 0;
463	mtu_idx = find_best_mtu_idx(sc, &inp->inp_inc, &settings);
464	qid_atid = (toep->ofld_rxq->iq.abs_id << 14) | toep->tid;
465
466	if (isipv6) {
467		struct cpl_act_open_req6 *cpl = wrtod(wr);
468		struct cpl_t5_act_open_req6 *cpl5 = (void *)cpl;
469		struct cpl_t6_act_open_req6 *cpl6 = (void *)cpl;
470
471		if ((inp->inp_vflag & INP_IPV6) == 0)
472			DONT_OFFLOAD_ACTIVE_OPEN(ENOTSUP);
473
474		toep->ce = hold_lip(td, &inp->in6p_laddr, NULL);
475		if (toep->ce == NULL)
476			DONT_OFFLOAD_ACTIVE_OPEN(ENOENT);
477
478		switch (chip_id(sc)) {
479		case CHELSIO_T4:
480			INIT_TP_WR(cpl, 0);
481			cpl->params = select_ntuple(vi, toep->l2te);
482			break;
483		case CHELSIO_T5:
484			INIT_TP_WR(cpl5, 0);
485			cpl5->iss = htobe32(tp->iss);
486			cpl5->params = select_ntuple(vi, toep->l2te);
487			break;
488		case CHELSIO_T6:
489		default:
490			INIT_TP_WR(cpl6, 0);
491			cpl6->iss = htobe32(tp->iss);
492			cpl6->params = select_ntuple(vi, toep->l2te);
493			break;
494		}
495		OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
496		    qid_atid));
497		cpl->local_port = inp->inp_lport;
498		cpl->local_ip_hi = *(uint64_t *)&inp->in6p_laddr.s6_addr[0];
499		cpl->local_ip_lo = *(uint64_t *)&inp->in6p_laddr.s6_addr[8];
500		cpl->peer_port = inp->inp_fport;
501		cpl->peer_ip_hi = *(uint64_t *)&inp->in6p_faddr.s6_addr[0];
502		cpl->peer_ip_lo = *(uint64_t *)&inp->in6p_faddr.s6_addr[8];
503		cpl->opt0 = calc_opt0(so, vi, toep->l2te, mtu_idx, rscale,
504		    toep->rx_credits, toep->ulp_mode, &settings);
505		cpl->opt2 = calc_opt2a(so, toep, &settings);
506	} else {
507		struct cpl_act_open_req *cpl = wrtod(wr);
508		struct cpl_t5_act_open_req *cpl5 = (void *)cpl;
509		struct cpl_t6_act_open_req *cpl6 = (void *)cpl;
510
511		switch (chip_id(sc)) {
512		case CHELSIO_T4:
513			INIT_TP_WR(cpl, 0);
514			cpl->params = select_ntuple(vi, toep->l2te);
515			break;
516		case CHELSIO_T5:
517			INIT_TP_WR(cpl5, 0);
518			cpl5->iss = htobe32(tp->iss);
519			cpl5->params = select_ntuple(vi, toep->l2te);
520			break;
521		case CHELSIO_T6:
522		default:
523			INIT_TP_WR(cpl6, 0);
524			cpl6->iss = htobe32(tp->iss);
525			cpl6->params = select_ntuple(vi, toep->l2te);
526			break;
527		}
528		OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
529		    qid_atid));
530		inp_4tuple_get(inp, &cpl->local_ip, &cpl->local_port,
531		    &cpl->peer_ip, &cpl->peer_port);
532		cpl->opt0 = calc_opt0(so, vi, toep->l2te, mtu_idx, rscale,
533		    toep->rx_credits, toep->ulp_mode, &settings);
534		cpl->opt2 = calc_opt2a(so, toep, &settings);
535	}
536
537	CTR5(KTR_CXGBE, "%s: atid %u (%s), toep %p, inp %p", __func__,
538	    toep->tid, tcpstates[tp->t_state], toep, inp);
539
540	offload_socket(so, toep);
541	rc = t4_l2t_send(sc, wr, toep->l2te);
542	if (rc == 0) {
543		toep->flags |= TPF_CPL_PENDING;
544		return (0);
545	}
546
547	undo_offload_socket(so);
548	reason = __LINE__;
549failed:
550	CTR3(KTR_CXGBE, "%s: not offloading (%d), rc %d", __func__, reason, rc);
551
552	if (wr)
553		free_wrqe(wr);
554
555	if (toep) {
556		if (toep->tid >= 0)
557			free_atid(sc, toep->tid);
558		if (toep->l2te)
559			t4_l2t_release(toep->l2te);
560		if (toep->ce)
561			release_lip(td, toep->ce);
562		free_toepcb(toep);
563	}
564
565	return (rc);
566}
567#endif
568