1330884Sjhb/*-
2330884Sjhb * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3330884Sjhb *
4330884Sjhb * Copyright (c) 2017-2018 Chelsio Communications, Inc.
5330884Sjhb * All rights reserved.
6330884Sjhb * Written by: John Baldwin <jhb@FreeBSD.org>
7330884Sjhb *
8330884Sjhb * Redistribution and use in source and binary forms, with or without
9330884Sjhb * modification, are permitted provided that the following conditions
10330884Sjhb * are met:
11330884Sjhb * 1. Redistributions of source code must retain the above copyright
12330884Sjhb *    notice, this list of conditions and the following disclaimer.
13330884Sjhb * 2. Redistributions in binary form must reproduce the above copyright
14330884Sjhb *    notice, this list of conditions and the following disclaimer in the
15330884Sjhb *    documentation and/or other materials provided with the distribution.
16330884Sjhb *
17330884Sjhb * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18330884Sjhb * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19330884Sjhb * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20330884Sjhb * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21330884Sjhb * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22330884Sjhb * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23330884Sjhb * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24330884Sjhb * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25330884Sjhb * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26330884Sjhb * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27330884Sjhb * SUCH DAMAGE.
28330884Sjhb */
29330884Sjhb
30330884Sjhb#include "opt_inet.h"
31330884Sjhb
32330884Sjhb#include <sys/cdefs.h>
33330884Sjhb__FBSDID("$FreeBSD: stable/11/sys/dev/cxgbe/tom/t4_tls.c 351228 2019-08-19 18:50:56Z jhb $");
34330884Sjhb
35330884Sjhb#include <sys/param.h>
36330884Sjhb#include <sys/sglist.h>
37330884Sjhb#include <sys/socket.h>
38330884Sjhb#include <sys/socketvar.h>
39330884Sjhb#include <sys/systm.h>
40330884Sjhb#include <netinet/in.h>
41330884Sjhb#include <netinet/in_pcb.h>
42330884Sjhb#include <netinet/tcp_var.h>
43330884Sjhb#include <netinet/toecore.h>
44330884Sjhb
45330884Sjhb#ifdef TCP_OFFLOAD
46330884Sjhb#include "common/common.h"
47330884Sjhb#include "common/t4_tcb.h"
48330884Sjhb#include "tom/t4_tom_l2t.h"
49330884Sjhb#include "tom/t4_tom.h"
50330884Sjhb
51345664SjhbVNET_DECLARE(int, tcp_do_autosndbuf);
52345664Sjhb#define V_tcp_do_autosndbuf VNET(tcp_do_autosndbuf)
53345664SjhbVNET_DECLARE(int, tcp_autosndbuf_inc);
54345664Sjhb#define V_tcp_autosndbuf_inc VNET(tcp_autosndbuf_inc)
55345664SjhbVNET_DECLARE(int, tcp_autosndbuf_max);
56345664Sjhb#define V_tcp_autosndbuf_max VNET(tcp_autosndbuf_max)
57345664SjhbVNET_DECLARE(int, tcp_do_autorcvbuf);
58345664Sjhb#define V_tcp_do_autorcvbuf VNET(tcp_do_autorcvbuf)
59345664SjhbVNET_DECLARE(int, tcp_autorcvbuf_inc);
60345664Sjhb#define V_tcp_autorcvbuf_inc VNET(tcp_autorcvbuf_inc)
61345664SjhbVNET_DECLARE(int, tcp_autorcvbuf_max);
62345664Sjhb#define V_tcp_autorcvbuf_max VNET(tcp_autorcvbuf_max)
63345664Sjhb
64330884Sjhb/*
65330884Sjhb * The TCP sequence number of a CPL_TLS_DATA mbuf is saved here while
66330884Sjhb * the mbuf is in the ulp_pdu_reclaimq.
67330884Sjhb */
68330884Sjhb#define	tls_tcp_seq	PH_loc.thirtytwo[0]
69330884Sjhb
70330884Sjhb/*
71330884Sjhb * Handshake lock used for the handshake timer.  Having a global lock
72330884Sjhb * is perhaps not ideal, but it avoids having to use callout_drain()
73330884Sjhb * in tls_uninit_toep() which can't block.  Also, the timer shouldn't
74330884Sjhb * actually fire for most connections.
75330884Sjhb */
76330884Sjhbstatic struct mtx tls_handshake_lock;
77330884Sjhb
78330884Sjhbstatic void
79330884Sjhbt4_set_tls_tcb_field(struct toepcb *toep, uint16_t word, uint64_t mask,
80330884Sjhb    uint64_t val)
81330884Sjhb{
82330884Sjhb	struct adapter *sc = td_adapter(toep->td);
83330884Sjhb
84345664Sjhb	t4_set_tcb_field(sc, toep->ofld_txq, toep, word, mask, val, 0, 0);
85330884Sjhb}
86330884Sjhb
87330884Sjhb/* TLS and DTLS common routines */
88345664Sjhbbool
89345664Sjhbcan_tls_offload(struct adapter *sc)
90345664Sjhb{
91345664Sjhb
92345664Sjhb	return (sc->tt.tls && sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS);
93345664Sjhb}
94345664Sjhb
95330884Sjhbint
96330884Sjhbtls_tx_key(struct toepcb *toep)
97330884Sjhb{
98330884Sjhb	struct tls_ofld_info *tls_ofld = &toep->tls;
99330884Sjhb
100330884Sjhb	return (tls_ofld->tx_key_addr >= 0);
101330884Sjhb}
102330884Sjhb
103330884Sjhbint
104330884Sjhbtls_rx_key(struct toepcb *toep)
105330884Sjhb{
106330884Sjhb	struct tls_ofld_info *tls_ofld = &toep->tls;
107330884Sjhb
108330884Sjhb	return (tls_ofld->rx_key_addr >= 0);
109330884Sjhb}
110330884Sjhb
111330884Sjhbstatic int
112330884Sjhbkey_size(struct toepcb *toep)
113330884Sjhb{
114330884Sjhb	struct tls_ofld_info *tls_ofld = &toep->tls;
115330884Sjhb
116330884Sjhb	return ((tls_ofld->key_location == TLS_SFO_WR_CONTEXTLOC_IMMEDIATE) ?
117330884Sjhb		tls_ofld->k_ctx.tx_key_info_size : KEY_IN_DDR_SIZE);
118330884Sjhb}
119330884Sjhb
120330884Sjhb/* Set TLS Key-Id in TCB */
121330884Sjhbstatic void
122330884Sjhbt4_set_tls_keyid(struct toepcb *toep, unsigned int key_id)
123330884Sjhb{
124330884Sjhb
125330884Sjhb	t4_set_tls_tcb_field(toep, W_TCB_RX_TLS_KEY_TAG,
126330884Sjhb			 V_TCB_RX_TLS_KEY_TAG(M_TCB_RX_TLS_BUF_TAG),
127330884Sjhb			 V_TCB_RX_TLS_KEY_TAG(key_id));
128330884Sjhb}
129330884Sjhb
130330884Sjhb/* Clear TF_RX_QUIESCE to re-enable receive. */
131330884Sjhbstatic void
132330884Sjhbt4_clear_rx_quiesce(struct toepcb *toep)
133330884Sjhb{
134330884Sjhb
135330884Sjhb	t4_set_tls_tcb_field(toep, W_TCB_T_FLAGS, V_TF_RX_QUIESCE(1), 0);
136330884Sjhb}
137330884Sjhb
138330884Sjhbstatic void
139330884Sjhbtls_clr_ofld_mode(struct toepcb *toep)
140330884Sjhb{
141330884Sjhb
142330884Sjhb	tls_stop_handshake_timer(toep);
143330884Sjhb
144330884Sjhb	/* Operate in PDU extraction mode only. */
145330884Sjhb	t4_set_tls_tcb_field(toep, W_TCB_ULP_RAW,
146330884Sjhb	    V_TCB_ULP_RAW(M_TCB_ULP_RAW),
147330884Sjhb	    V_TCB_ULP_RAW(V_TF_TLS_ENABLE(1)));
148330884Sjhb	t4_clear_rx_quiesce(toep);
149330884Sjhb}
150330884Sjhb
151330884Sjhbstatic void
152330884Sjhbtls_clr_quiesce(struct toepcb *toep)
153330884Sjhb{
154330884Sjhb
155330884Sjhb	tls_stop_handshake_timer(toep);
156330884Sjhb	t4_clear_rx_quiesce(toep);
157330884Sjhb}
158330884Sjhb
159330884Sjhb/*
160330884Sjhb * Calculate the TLS data expansion size
161330884Sjhb */
162330884Sjhbstatic int
163330884Sjhbtls_expansion_size(struct toepcb *toep, int data_len, int full_pdus_only,
164330884Sjhb    unsigned short *pdus_per_ulp)
165330884Sjhb{
166330884Sjhb	struct tls_ofld_info *tls_ofld = &toep->tls;
167330884Sjhb	struct tls_scmd *scmd = &tls_ofld->scmd0;
168330884Sjhb	int expn_size = 0, frag_count = 0, pad_per_pdu = 0,
169330884Sjhb	    pad_last_pdu = 0, last_frag_size = 0, max_frag_size = 0;
170330884Sjhb	int exp_per_pdu = 0;
171330884Sjhb	int hdr_len = TLS_HEADER_LENGTH;
172330884Sjhb
173330884Sjhb	do {
174330884Sjhb		max_frag_size = tls_ofld->k_ctx.frag_size;
175330884Sjhb		if (G_SCMD_CIPH_MODE(scmd->seqno_numivs) ==
176330884Sjhb		   SCMD_CIPH_MODE_AES_GCM) {
177330884Sjhb			frag_count = (data_len / max_frag_size);
178330884Sjhb			exp_per_pdu = GCM_TAG_SIZE + AEAD_EXPLICIT_DATA_SIZE +
179330884Sjhb				hdr_len;
180330884Sjhb			expn_size =  frag_count * exp_per_pdu;
181330884Sjhb			if (full_pdus_only) {
182330884Sjhb				*pdus_per_ulp = data_len / (exp_per_pdu +
183330884Sjhb					max_frag_size);
184330884Sjhb				if (*pdus_per_ulp > 32)
185330884Sjhb					*pdus_per_ulp = 32;
186330884Sjhb				else if(!*pdus_per_ulp)
187330884Sjhb					*pdus_per_ulp = 1;
188330884Sjhb				expn_size = (*pdus_per_ulp) * exp_per_pdu;
189330884Sjhb				break;
190330884Sjhb			}
191330884Sjhb			if ((last_frag_size = data_len % max_frag_size) > 0) {
192330884Sjhb				frag_count += 1;
193330884Sjhb				expn_size += exp_per_pdu;
194330884Sjhb			}
195330884Sjhb			break;
196330884Sjhb		} else if (G_SCMD_CIPH_MODE(scmd->seqno_numivs) !=
197330884Sjhb			   SCMD_CIPH_MODE_NOP) {
198330884Sjhb			/* Calculate the number of fragments we can make */
199330884Sjhb			frag_count  = (data_len / max_frag_size);
200330884Sjhb			if (frag_count > 0) {
201330884Sjhb				pad_per_pdu = (((howmany((max_frag_size +
202330884Sjhb						       tls_ofld->mac_length),
203330884Sjhb						      CIPHER_BLOCK_SIZE)) *
204330884Sjhb						CIPHER_BLOCK_SIZE) -
205330884Sjhb					       (max_frag_size +
206330884Sjhb						tls_ofld->mac_length));
207330884Sjhb				if (!pad_per_pdu)
208330884Sjhb					pad_per_pdu = CIPHER_BLOCK_SIZE;
209330884Sjhb				exp_per_pdu = pad_per_pdu +
210330884Sjhb				       	tls_ofld->mac_length +
211330884Sjhb					hdr_len + CIPHER_BLOCK_SIZE;
212330884Sjhb				expn_size = frag_count * exp_per_pdu;
213330884Sjhb			}
214330884Sjhb			if (full_pdus_only) {
215330884Sjhb				*pdus_per_ulp = data_len / (exp_per_pdu +
216330884Sjhb					max_frag_size);
217330884Sjhb				if (*pdus_per_ulp > 32)
218330884Sjhb					*pdus_per_ulp = 32;
219330884Sjhb				else if (!*pdus_per_ulp)
220330884Sjhb					*pdus_per_ulp = 1;
221330884Sjhb				expn_size = (*pdus_per_ulp) * exp_per_pdu;
222330884Sjhb				break;
223330884Sjhb			}
224330884Sjhb			/* Consider the last fragment */
225330884Sjhb			if ((last_frag_size = data_len % max_frag_size) > 0) {
226330884Sjhb				pad_last_pdu = (((howmany((last_frag_size +
227330884Sjhb							tls_ofld->mac_length),
228330884Sjhb						       CIPHER_BLOCK_SIZE)) *
229330884Sjhb						 CIPHER_BLOCK_SIZE) -
230330884Sjhb						(last_frag_size +
231330884Sjhb						 tls_ofld->mac_length));
232330884Sjhb				if (!pad_last_pdu)
233330884Sjhb					pad_last_pdu = CIPHER_BLOCK_SIZE;
234330884Sjhb				expn_size += (pad_last_pdu +
235330884Sjhb					      tls_ofld->mac_length + hdr_len +
236330884Sjhb					      CIPHER_BLOCK_SIZE);
237330884Sjhb			}
238330884Sjhb		}
239330884Sjhb	} while (0);
240330884Sjhb
241330884Sjhb	return (expn_size);
242330884Sjhb}
243330884Sjhb
244330884Sjhb/* Copy Key to WR */
245330884Sjhbstatic void
246330884Sjhbtls_copy_tx_key(struct toepcb *toep, void *dst)
247330884Sjhb{
248330884Sjhb	struct tls_ofld_info *tls_ofld = &toep->tls;
249330884Sjhb	struct ulptx_sc_memrd *sc_memrd;
250330884Sjhb	struct ulptx_idata *sc;
251330884Sjhb
252330884Sjhb	if (tls_ofld->k_ctx.tx_key_info_size <= 0)
253330884Sjhb		return;
254330884Sjhb
255330884Sjhb	if (tls_ofld->key_location == TLS_SFO_WR_CONTEXTLOC_DDR) {
256330884Sjhb		sc = dst;
257330884Sjhb		sc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
258330884Sjhb		sc->len = htobe32(0);
259330884Sjhb		sc_memrd = (struct ulptx_sc_memrd *)(sc + 1);
260330884Sjhb		sc_memrd->cmd_to_len = htobe32(V_ULPTX_CMD(ULP_TX_SC_MEMRD) |
261330884Sjhb		    V_ULP_TX_SC_MORE(1) |
262330884Sjhb		    V_ULPTX_LEN16(tls_ofld->k_ctx.tx_key_info_size >> 4));
263330884Sjhb		sc_memrd->addr = htobe32(tls_ofld->tx_key_addr >> 5);
264330884Sjhb	} else if (tls_ofld->key_location == TLS_SFO_WR_CONTEXTLOC_IMMEDIATE) {
265330884Sjhb		memcpy(dst, &tls_ofld->k_ctx.tx,
266330884Sjhb		    tls_ofld->k_ctx.tx_key_info_size);
267330884Sjhb	}
268330884Sjhb}
269330884Sjhb
270330884Sjhb/* TLS/DTLS content type  for CPL SFO */
271330884Sjhbstatic inline unsigned char
272330884Sjhbtls_content_type(unsigned char content_type)
273330884Sjhb{
274330884Sjhb	/*
275330884Sjhb	 * XXX: Shouldn't this map CONTENT_TYPE_APP_DATA to DATA and
276330884Sjhb	 * default to "CUSTOM" for all other types including
277330884Sjhb	 * heartbeat?
278330884Sjhb	 */
279330884Sjhb	switch (content_type) {
280330884Sjhb	case CONTENT_TYPE_CCS:
281330884Sjhb		return CPL_TX_TLS_SFO_TYPE_CCS;
282330884Sjhb	case CONTENT_TYPE_ALERT:
283330884Sjhb		return CPL_TX_TLS_SFO_TYPE_ALERT;
284330884Sjhb	case CONTENT_TYPE_HANDSHAKE:
285330884Sjhb		return CPL_TX_TLS_SFO_TYPE_HANDSHAKE;
286330884Sjhb	case CONTENT_TYPE_HEARTBEAT:
287330884Sjhb		return CPL_TX_TLS_SFO_TYPE_HEARTBEAT;
288330884Sjhb	}
289330884Sjhb	return CPL_TX_TLS_SFO_TYPE_DATA;
290330884Sjhb}
291330884Sjhb
292330884Sjhbstatic unsigned char
293330884Sjhbget_cipher_key_size(unsigned int ck_size)
294330884Sjhb{
295330884Sjhb	switch (ck_size) {
296330884Sjhb	case AES_NOP: /* NOP */
297330884Sjhb		return 15;
298330884Sjhb	case AES_128: /* AES128 */
299330884Sjhb		return CH_CK_SIZE_128;
300330884Sjhb	case AES_192: /* AES192 */
301330884Sjhb		return CH_CK_SIZE_192;
302330884Sjhb	case AES_256: /* AES256 */
303330884Sjhb		return CH_CK_SIZE_256;
304330884Sjhb	default:
305330884Sjhb		return CH_CK_SIZE_256;
306330884Sjhb	}
307330884Sjhb}
308330884Sjhb
309330884Sjhbstatic unsigned char
310330884Sjhbget_mac_key_size(unsigned int mk_size)
311330884Sjhb{
312330884Sjhb	switch (mk_size) {
313330884Sjhb	case SHA_NOP: /* NOP */
314330884Sjhb		return CH_MK_SIZE_128;
315330884Sjhb	case SHA_GHASH: /* GHASH */
316330884Sjhb	case SHA_512: /* SHA512 */
317330884Sjhb		return CH_MK_SIZE_512;
318330884Sjhb	case SHA_224: /* SHA2-224 */
319330884Sjhb		return CH_MK_SIZE_192;
320330884Sjhb	case SHA_256: /* SHA2-256*/
321330884Sjhb		return CH_MK_SIZE_256;
322330884Sjhb	case SHA_384: /* SHA384 */
323330884Sjhb		return CH_MK_SIZE_512;
324330884Sjhb	case SHA1: /* SHA1 */
325330884Sjhb	default:
326330884Sjhb		return CH_MK_SIZE_160;
327330884Sjhb	}
328330884Sjhb}
329330884Sjhb
330330884Sjhbstatic unsigned int
331330884Sjhbget_proto_ver(int proto_ver)
332330884Sjhb{
333330884Sjhb	switch (proto_ver) {
334330884Sjhb	case TLS1_2_VERSION:
335330884Sjhb		return TLS_1_2_VERSION;
336330884Sjhb	case TLS1_1_VERSION:
337330884Sjhb		return TLS_1_1_VERSION;
338330884Sjhb	case DTLS1_2_VERSION:
339330884Sjhb		return DTLS_1_2_VERSION;
340330884Sjhb	default:
341330884Sjhb		return TLS_VERSION_MAX;
342330884Sjhb	}
343330884Sjhb}
344330884Sjhb
345330884Sjhbstatic void
346330884Sjhbtls_rxkey_flit1(struct tls_keyctx *kwr, struct tls_key_context *kctx)
347330884Sjhb{
348330884Sjhb
349330884Sjhb	if (kctx->state.enc_mode == CH_EVP_CIPH_GCM_MODE) {
350330884Sjhb		kwr->u.rxhdr.ivinsert_to_authinsrt =
351330884Sjhb		    htobe64(V_TLS_KEYCTX_TX_WR_IVINSERT(6ULL) |
352330884Sjhb			V_TLS_KEYCTX_TX_WR_AADSTRTOFST(1ULL) |
353330884Sjhb			V_TLS_KEYCTX_TX_WR_AADSTOPOFST(5ULL) |
354330884Sjhb			V_TLS_KEYCTX_TX_WR_AUTHSRTOFST(14ULL) |
355330884Sjhb			V_TLS_KEYCTX_TX_WR_AUTHSTOPOFST(16ULL) |
356330884Sjhb			V_TLS_KEYCTX_TX_WR_CIPHERSRTOFST(14ULL) |
357330884Sjhb			V_TLS_KEYCTX_TX_WR_CIPHERSTOPOFST(0ULL) |
358330884Sjhb			V_TLS_KEYCTX_TX_WR_AUTHINSRT(16ULL));
359330884Sjhb		kwr->u.rxhdr.ivpresent_to_rxmk_size &=
360330884Sjhb			~(V_TLS_KEYCTX_TX_WR_RXOPAD_PRESENT(1));
361330884Sjhb		kwr->u.rxhdr.authmode_to_rxvalid &=
362330884Sjhb			~(V_TLS_KEYCTX_TX_WR_CIPHAUTHSEQCTRL(1));
363330884Sjhb	} else {
364330884Sjhb		kwr->u.rxhdr.ivinsert_to_authinsrt =
365330884Sjhb		    htobe64(V_TLS_KEYCTX_TX_WR_IVINSERT(6ULL) |
366330884Sjhb			V_TLS_KEYCTX_TX_WR_AADSTRTOFST(1ULL) |
367330884Sjhb			V_TLS_KEYCTX_TX_WR_AADSTOPOFST(5ULL) |
368330884Sjhb			V_TLS_KEYCTX_TX_WR_AUTHSRTOFST(22ULL) |
369330884Sjhb			V_TLS_KEYCTX_TX_WR_AUTHSTOPOFST(0ULL) |
370330884Sjhb			V_TLS_KEYCTX_TX_WR_CIPHERSRTOFST(22ULL) |
371330884Sjhb			V_TLS_KEYCTX_TX_WR_CIPHERSTOPOFST(0ULL) |
372330884Sjhb			V_TLS_KEYCTX_TX_WR_AUTHINSRT(0ULL));
373330884Sjhb	}
374330884Sjhb}
375330884Sjhb
376330884Sjhb/* Rx key */
377330884Sjhbstatic void
378330884Sjhbprepare_rxkey_wr(struct tls_keyctx *kwr, struct tls_key_context *kctx)
379330884Sjhb{
380330884Sjhb	unsigned int ck_size = kctx->cipher_secret_size;
381330884Sjhb	unsigned int mk_size = kctx->mac_secret_size;
382330884Sjhb	int proto_ver = kctx->proto_ver;
383330884Sjhb
384330884Sjhb	kwr->u.rxhdr.flitcnt_hmacctrl =
385330884Sjhb		((kctx->tx_key_info_size >> 4) << 3) | kctx->hmac_ctrl;
386330884Sjhb
387330884Sjhb	kwr->u.rxhdr.protover_ciphmode =
388330884Sjhb		V_TLS_KEYCTX_TX_WR_PROTOVER(get_proto_ver(proto_ver)) |
389330884Sjhb		V_TLS_KEYCTX_TX_WR_CIPHMODE(kctx->state.enc_mode);
390330884Sjhb
391330884Sjhb	kwr->u.rxhdr.authmode_to_rxvalid =
392330884Sjhb		V_TLS_KEYCTX_TX_WR_AUTHMODE(kctx->state.auth_mode) |
393330884Sjhb		V_TLS_KEYCTX_TX_WR_CIPHAUTHSEQCTRL(1) |
394330884Sjhb		V_TLS_KEYCTX_TX_WR_SEQNUMCTRL(3) |
395330884Sjhb		V_TLS_KEYCTX_TX_WR_RXVALID(1);
396330884Sjhb
397330884Sjhb	kwr->u.rxhdr.ivpresent_to_rxmk_size =
398330884Sjhb		V_TLS_KEYCTX_TX_WR_IVPRESENT(0) |
399330884Sjhb		V_TLS_KEYCTX_TX_WR_RXOPAD_PRESENT(1) |
400330884Sjhb		V_TLS_KEYCTX_TX_WR_RXCK_SIZE(get_cipher_key_size(ck_size)) |
401330884Sjhb		V_TLS_KEYCTX_TX_WR_RXMK_SIZE(get_mac_key_size(mk_size));
402330884Sjhb
403330884Sjhb	tls_rxkey_flit1(kwr, kctx);
404330884Sjhb
405330884Sjhb	/* No key reversal for GCM */
406330884Sjhb	if (kctx->state.enc_mode != CH_EVP_CIPH_GCM_MODE) {
407330884Sjhb		t4_aes_getdeckey(kwr->keys.edkey, kctx->rx.key,
408330884Sjhb				 (kctx->cipher_secret_size << 3));
409330884Sjhb		memcpy(kwr->keys.edkey + kctx->cipher_secret_size,
410330884Sjhb		       kctx->rx.key + kctx->cipher_secret_size,
411330884Sjhb		       (IPAD_SIZE + OPAD_SIZE));
412330884Sjhb	} else {
413330884Sjhb		memcpy(kwr->keys.edkey, kctx->rx.key,
414330884Sjhb		       (kctx->tx_key_info_size - SALT_SIZE));
415330884Sjhb		memcpy(kwr->u.rxhdr.rxsalt, kctx->rx.salt, SALT_SIZE);
416330884Sjhb	}
417330884Sjhb}
418330884Sjhb
419330884Sjhb/* Tx key */
420330884Sjhbstatic void
421330884Sjhbprepare_txkey_wr(struct tls_keyctx *kwr, struct tls_key_context *kctx)
422330884Sjhb{
423330884Sjhb	unsigned int ck_size = kctx->cipher_secret_size;
424330884Sjhb	unsigned int mk_size = kctx->mac_secret_size;
425330884Sjhb
426330884Sjhb	kwr->u.txhdr.ctxlen =
427330884Sjhb		(kctx->tx_key_info_size >> 4);
428330884Sjhb	kwr->u.txhdr.dualck_to_txvalid =
429330884Sjhb		V_TLS_KEYCTX_TX_WR_TXOPAD_PRESENT(1) |
430330884Sjhb		V_TLS_KEYCTX_TX_WR_SALT_PRESENT(1) |
431330884Sjhb		V_TLS_KEYCTX_TX_WR_TXCK_SIZE(get_cipher_key_size(ck_size)) |
432330884Sjhb		V_TLS_KEYCTX_TX_WR_TXMK_SIZE(get_mac_key_size(mk_size)) |
433330884Sjhb		V_TLS_KEYCTX_TX_WR_TXVALID(1);
434330884Sjhb
435330884Sjhb	memcpy(kwr->keys.edkey, kctx->tx.key, HDR_KCTX_SIZE);
436330884Sjhb	if (kctx->state.enc_mode == CH_EVP_CIPH_GCM_MODE) {
437330884Sjhb		memcpy(kwr->u.txhdr.txsalt, kctx->tx.salt, SALT_SIZE);
438330884Sjhb		kwr->u.txhdr.dualck_to_txvalid &=
439330884Sjhb			~(V_TLS_KEYCTX_TX_WR_TXOPAD_PRESENT(1));
440330884Sjhb	}
441330884Sjhb	kwr->u.txhdr.dualck_to_txvalid = htons(kwr->u.txhdr.dualck_to_txvalid);
442330884Sjhb}
443330884Sjhb
444330884Sjhb/* TLS Key memory management */
445330884Sjhbstatic int
446330884Sjhbget_new_keyid(struct toepcb *toep, struct tls_key_context *k_ctx)
447330884Sjhb{
448345664Sjhb	struct adapter *sc = td_adapter(toep->td);
449330884Sjhb	vmem_addr_t addr;
450330884Sjhb
451345664Sjhb	if (vmem_alloc(sc->key_map, TLS_KEY_CONTEXT_SZ, M_NOWAIT | M_FIRSTFIT,
452330884Sjhb	    &addr) != 0)
453330884Sjhb		return (-1);
454330884Sjhb
455330884Sjhb	return (addr);
456330884Sjhb}
457330884Sjhb
458330884Sjhbstatic void
459330884Sjhbfree_keyid(struct toepcb *toep, int keyid)
460330884Sjhb{
461345664Sjhb	struct adapter *sc = td_adapter(toep->td);
462330884Sjhb
463345664Sjhb	vmem_free(sc->key_map, keyid, TLS_KEY_CONTEXT_SZ);
464330884Sjhb}
465330884Sjhb
466330884Sjhbstatic void
467330884Sjhbclear_tls_keyid(struct toepcb *toep)
468330884Sjhb{
469330884Sjhb	struct tls_ofld_info *tls_ofld = &toep->tls;
470330884Sjhb
471330884Sjhb	if (tls_ofld->rx_key_addr >= 0) {
472330884Sjhb		free_keyid(toep, tls_ofld->rx_key_addr);
473330884Sjhb		tls_ofld->rx_key_addr = -1;
474330884Sjhb	}
475330884Sjhb	if (tls_ofld->tx_key_addr >= 0) {
476330884Sjhb		free_keyid(toep, tls_ofld->tx_key_addr);
477330884Sjhb		tls_ofld->tx_key_addr = -1;
478330884Sjhb	}
479330884Sjhb}
480330884Sjhb
481330884Sjhbstatic int
482330884Sjhbget_keyid(struct tls_ofld_info *tls_ofld, unsigned int ops)
483330884Sjhb{
484330884Sjhb	return (ops & KEY_WRITE_RX ? tls_ofld->rx_key_addr :
485345664Sjhb		((ops & KEY_WRITE_TX) ? tls_ofld->tx_key_addr : -1));
486330884Sjhb}
487330884Sjhb
488330884Sjhbstatic int
489330884Sjhbget_tp_plen_max(struct tls_ofld_info *tls_ofld)
490330884Sjhb{
491330884Sjhb	int plen = ((min(3*4096, TP_TX_PG_SZ))/1448) * 1448;
492330884Sjhb
493330884Sjhb	return (tls_ofld->k_ctx.frag_size <= 8192 ? plen : FC_TP_PLEN_MAX);
494330884Sjhb}
495330884Sjhb
496330884Sjhb/* Send request to get the key-id */
497330884Sjhbstatic int
498330884Sjhbtls_program_key_id(struct toepcb *toep, struct tls_key_context *k_ctx)
499330884Sjhb{
500330884Sjhb	struct tls_ofld_info *tls_ofld = &toep->tls;
501330884Sjhb	struct adapter *sc = td_adapter(toep->td);
502330884Sjhb	struct ofld_tx_sdesc *txsd;
503330884Sjhb	int kwrlen, kctxlen, keyid, len;
504330884Sjhb	struct wrqe *wr;
505330884Sjhb	struct tls_key_req *kwr;
506330884Sjhb	struct tls_keyctx *kctx;
507330884Sjhb
508345664Sjhb	kwrlen = sizeof(*kwr);
509330884Sjhb	kctxlen = roundup2(sizeof(*kctx), 32);
510345664Sjhb	len = roundup2(kwrlen + kctxlen, 16);
511330884Sjhb
512330884Sjhb	if (toep->txsd_avail == 0)
513330884Sjhb		return (EAGAIN);
514330884Sjhb
515330884Sjhb	/* Dont initialize key for re-neg */
516330884Sjhb	if (!G_KEY_CLR_LOC(k_ctx->l_p_key)) {
517330884Sjhb		if ((keyid = get_new_keyid(toep, k_ctx)) < 0) {
518330884Sjhb			return (ENOSPC);
519330884Sjhb		}
520330884Sjhb	} else {
521330884Sjhb		keyid = get_keyid(tls_ofld, k_ctx->l_p_key);
522330884Sjhb	}
523330884Sjhb
524330884Sjhb	wr = alloc_wrqe(len, toep->ofld_txq);
525330884Sjhb	if (wr == NULL) {
526330884Sjhb		free_keyid(toep, keyid);
527330884Sjhb		return (ENOMEM);
528330884Sjhb	}
529330884Sjhb	kwr = wrtod(wr);
530330884Sjhb	memset(kwr, 0, kwrlen);
531330884Sjhb
532330884Sjhb	kwr->wr_hi = htobe32(V_FW_WR_OP(FW_ULPTX_WR) | F_FW_WR_COMPL |
533330884Sjhb	    F_FW_WR_ATOMIC);
534330884Sjhb	kwr->wr_mid = htobe32(V_FW_WR_LEN16(DIV_ROUND_UP(len, 16)) |
535330884Sjhb	    V_FW_WR_FLOWID(toep->tid));
536330884Sjhb	kwr->protocol = get_proto_ver(k_ctx->proto_ver);
537330884Sjhb	kwr->mfs = htons(k_ctx->frag_size);
538330884Sjhb	kwr->reneg_to_write_rx = k_ctx->l_p_key;
539330884Sjhb
540330884Sjhb	/* master command */
541330884Sjhb	kwr->cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE) |
542330884Sjhb	    V_T5_ULP_MEMIO_ORDER(1) | V_T5_ULP_MEMIO_IMM(1));
543330884Sjhb	kwr->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(kctxlen >> 5));
544330884Sjhb	kwr->len16 = htobe32((toep->tid << 8) |
545330884Sjhb	    DIV_ROUND_UP(len - sizeof(struct work_request_hdr), 16));
546330884Sjhb	kwr->kaddr = htobe32(V_ULP_MEMIO_ADDR(keyid >> 5));
547330884Sjhb
548330884Sjhb	/* sub command */
549330884Sjhb	kwr->sc_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
550330884Sjhb	kwr->sc_len = htobe32(kctxlen);
551330884Sjhb
552330884Sjhb	kctx = (struct tls_keyctx *)(kwr + 1);
553330884Sjhb	memset(kctx, 0, kctxlen);
554330884Sjhb
555330884Sjhb	if (G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_TX) {
556330884Sjhb		tls_ofld->tx_key_addr = keyid;
557330884Sjhb		prepare_txkey_wr(kctx, k_ctx);
558330884Sjhb	} else if (G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_RX) {
559330884Sjhb		tls_ofld->rx_key_addr = keyid;
560330884Sjhb		prepare_rxkey_wr(kctx, k_ctx);
561330884Sjhb	}
562330884Sjhb
563330884Sjhb	txsd = &toep->txsd[toep->txsd_pidx];
564330884Sjhb	txsd->tx_credits = DIV_ROUND_UP(len, 16);
565330884Sjhb	txsd->plen = 0;
566330884Sjhb	toep->tx_credits -= txsd->tx_credits;
567330884Sjhb	if (__predict_false(++toep->txsd_pidx == toep->txsd_total))
568330884Sjhb		toep->txsd_pidx = 0;
569330884Sjhb	toep->txsd_avail--;
570330884Sjhb
571330884Sjhb	t4_wrq_tx(sc, wr);
572330884Sjhb
573330884Sjhb	return (0);
574330884Sjhb}
575330884Sjhb
576330884Sjhb/* Store a key received from SSL in DDR. */
577330884Sjhbstatic int
578330884Sjhbprogram_key_context(struct tcpcb *tp, struct toepcb *toep,
579330884Sjhb    struct tls_key_context *uk_ctx)
580330884Sjhb{
581330884Sjhb	struct adapter *sc = td_adapter(toep->td);
582330884Sjhb	struct tls_ofld_info *tls_ofld = &toep->tls;
583330884Sjhb	struct tls_key_context *k_ctx;
584330884Sjhb	int error, key_offset;
585330884Sjhb
586330884Sjhb	if (tp->t_state != TCPS_ESTABLISHED) {
587330884Sjhb		/*
588330884Sjhb		 * XXX: Matches Linux driver, but not sure this is a
589330884Sjhb		 * very appropriate error.
590330884Sjhb		 */
591330884Sjhb		return (ENOENT);
592330884Sjhb	}
593330884Sjhb
594330884Sjhb	/* Stop timer on handshake completion */
595330884Sjhb	tls_stop_handshake_timer(toep);
596330884Sjhb
597330884Sjhb	toep->flags &= ~TPF_FORCE_CREDITS;
598330884Sjhb
599330884Sjhb	CTR4(KTR_CXGBE, "%s: tid %d %s proto_ver %#x", __func__, toep->tid,
600330884Sjhb	    G_KEY_GET_LOC(uk_ctx->l_p_key) == KEY_WRITE_RX ? "KEY_WRITE_RX" :
601330884Sjhb	    "KEY_WRITE_TX", uk_ctx->proto_ver);
602330884Sjhb
603330884Sjhb	if (G_KEY_GET_LOC(uk_ctx->l_p_key) == KEY_WRITE_RX &&
604330884Sjhb	    toep->ulp_mode != ULP_MODE_TLS)
605330884Sjhb		return (EOPNOTSUPP);
606330884Sjhb
607330884Sjhb	/* Don't copy the 'tx' and 'rx' fields. */
608330884Sjhb	k_ctx = &tls_ofld->k_ctx;
609330884Sjhb	memcpy(&k_ctx->l_p_key, &uk_ctx->l_p_key,
610330884Sjhb	    sizeof(*k_ctx) - offsetof(struct tls_key_context, l_p_key));
611330884Sjhb
612330884Sjhb	/* TLS version != 1.1 and !1.2 OR DTLS != 1.2 */
613330884Sjhb	if (get_proto_ver(k_ctx->proto_ver) > DTLS_1_2_VERSION) {
614330884Sjhb		if (G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_RX) {
615330884Sjhb			tls_ofld->rx_key_addr = -1;
616330884Sjhb			t4_clear_rx_quiesce(toep);
617330884Sjhb		} else {
618330884Sjhb			tls_ofld->tx_key_addr = -1;
619330884Sjhb		}
620330884Sjhb		return (0);
621330884Sjhb	}
622330884Sjhb
623330884Sjhb	if (k_ctx->state.enc_mode == CH_EVP_CIPH_GCM_MODE) {
624330884Sjhb		k_ctx->iv_size = 4;
625330884Sjhb		k_ctx->mac_first = 0;
626330884Sjhb		k_ctx->hmac_ctrl = 0;
627330884Sjhb	} else {
628330884Sjhb		k_ctx->iv_size = 8; /* for CBC, iv is 16B, unit of 2B */
629330884Sjhb		k_ctx->mac_first = 1;
630330884Sjhb	}
631330884Sjhb
632330884Sjhb	tls_ofld->scmd0.seqno_numivs =
633330884Sjhb		(V_SCMD_SEQ_NO_CTRL(3) |
634330884Sjhb		 V_SCMD_PROTO_VERSION(get_proto_ver(k_ctx->proto_ver)) |
635330884Sjhb		 V_SCMD_ENC_DEC_CTRL(SCMD_ENCDECCTRL_ENCRYPT) |
636330884Sjhb		 V_SCMD_CIPH_AUTH_SEQ_CTRL((k_ctx->mac_first == 0)) |
637330884Sjhb		 V_SCMD_CIPH_MODE(k_ctx->state.enc_mode) |
638330884Sjhb		 V_SCMD_AUTH_MODE(k_ctx->state.auth_mode) |
639330884Sjhb		 V_SCMD_HMAC_CTRL(k_ctx->hmac_ctrl) |
640330884Sjhb		 V_SCMD_IV_SIZE(k_ctx->iv_size));
641330884Sjhb
642330884Sjhb	tls_ofld->scmd0.ivgen_hdrlen =
643330884Sjhb		(V_SCMD_IV_GEN_CTRL(k_ctx->iv_ctrl) |
644330884Sjhb		 V_SCMD_KEY_CTX_INLINE(0) |
645330884Sjhb		 V_SCMD_TLS_FRAG_ENABLE(1));
646330884Sjhb
647330884Sjhb	tls_ofld->mac_length = k_ctx->mac_secret_size;
648330884Sjhb
649330884Sjhb	if (G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_RX) {
650330884Sjhb		k_ctx->rx = uk_ctx->rx;
651330884Sjhb		/* Dont initialize key for re-neg */
652330884Sjhb		if (!G_KEY_CLR_LOC(k_ctx->l_p_key))
653330884Sjhb			tls_ofld->rx_key_addr = -1;
654330884Sjhb	} else {
655330884Sjhb		k_ctx->tx = uk_ctx->tx;
656330884Sjhb		/* Dont initialize key for re-neg */
657330884Sjhb		if (!G_KEY_CLR_LOC(k_ctx->l_p_key))
658330884Sjhb			tls_ofld->tx_key_addr = -1;
659330884Sjhb	}
660330884Sjhb
661330884Sjhb	/* Flush pending data before new Tx key becomes active */
662330884Sjhb	if (G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_TX) {
663330884Sjhb		struct sockbuf *sb;
664330884Sjhb
665330884Sjhb		/* XXX: This might not drain everything. */
666330884Sjhb		t4_push_frames(sc, toep, 0);
667330884Sjhb		sb = &toep->inp->inp_socket->so_snd;
668330884Sjhb		SOCKBUF_LOCK(sb);
669330884Sjhb
670330884Sjhb		/* XXX: This asserts that everything has been pushed. */
671330884Sjhb		MPASS(sb->sb_sndptr == NULL || sb->sb_sndptr->m_next == NULL);
672330884Sjhb		sb->sb_sndptr = NULL;
673330884Sjhb		tls_ofld->sb_off = sbavail(sb);
674330884Sjhb		SOCKBUF_UNLOCK(sb);
675330884Sjhb		tls_ofld->tx_seq_no = 0;
676330884Sjhb	}
677330884Sjhb
678330884Sjhb	if ((G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_RX) ||
679330884Sjhb	    (tls_ofld->key_location == TLS_SFO_WR_CONTEXTLOC_DDR)) {
680330884Sjhb		error = tls_program_key_id(toep, k_ctx);
681330884Sjhb		if (error) {
682330884Sjhb			/* XXX: Only clear quiesce for KEY_WRITE_RX? */
683330884Sjhb			t4_clear_rx_quiesce(toep);
684330884Sjhb			return (error);
685330884Sjhb		}
686330884Sjhb	}
687330884Sjhb
688330884Sjhb	if (G_KEY_GET_LOC(k_ctx->l_p_key) == KEY_WRITE_RX) {
689330884Sjhb		/*
690330884Sjhb		 * RX key tags are an index into the key portion of MA
691330884Sjhb		 * memory stored as an offset from the base address in
692330884Sjhb		 * units of 64 bytes.
693330884Sjhb		 */
694330884Sjhb		key_offset = tls_ofld->rx_key_addr - sc->vres.key.start;
695330884Sjhb		t4_set_tls_keyid(toep, key_offset / 64);
696330884Sjhb		t4_set_tls_tcb_field(toep, W_TCB_ULP_RAW,
697330884Sjhb				 V_TCB_ULP_RAW(M_TCB_ULP_RAW),
698330884Sjhb				 V_TCB_ULP_RAW((V_TF_TLS_KEY_SIZE(3) |
699330884Sjhb						V_TF_TLS_CONTROL(1) |
700330884Sjhb						V_TF_TLS_ACTIVE(1) |
701330884Sjhb						V_TF_TLS_ENABLE(1))));
702330884Sjhb		t4_set_tls_tcb_field(toep, W_TCB_TLS_SEQ,
703330884Sjhb				 V_TCB_TLS_SEQ(M_TCB_TLS_SEQ),
704330884Sjhb				 V_TCB_TLS_SEQ(0));
705330884Sjhb		t4_clear_rx_quiesce(toep);
706330884Sjhb	} else {
707330884Sjhb		unsigned short pdus_per_ulp;
708330884Sjhb
709330884Sjhb		if (tls_ofld->key_location == TLS_SFO_WR_CONTEXTLOC_IMMEDIATE)
710330884Sjhb			tls_ofld->tx_key_addr = 1;
711330884Sjhb
712330884Sjhb		tls_ofld->fcplenmax = get_tp_plen_max(tls_ofld);
713330884Sjhb		tls_ofld->expn_per_ulp = tls_expansion_size(toep,
714330884Sjhb				tls_ofld->fcplenmax, 1, &pdus_per_ulp);
715330884Sjhb		tls_ofld->pdus_per_ulp = pdus_per_ulp;
716330884Sjhb		tls_ofld->adjusted_plen = tls_ofld->pdus_per_ulp *
717330884Sjhb			((tls_ofld->expn_per_ulp/tls_ofld->pdus_per_ulp) +
718330884Sjhb			 tls_ofld->k_ctx.frag_size);
719330884Sjhb	}
720330884Sjhb
721330884Sjhb	return (0);
722330884Sjhb}
723330884Sjhb
724330884Sjhb/*
725330884Sjhb * In some cases a client connection can hang without sending the
726330884Sjhb * ServerHelloDone message from the NIC to the host.  Send a dummy
727330884Sjhb * RX_DATA_ACK with RX_MODULATE to unstick the connection.
728330884Sjhb */
729330884Sjhbstatic void
730330884Sjhbtls_send_handshake_ack(void *arg)
731330884Sjhb{
732330884Sjhb	struct toepcb *toep = arg;
733330884Sjhb	struct tls_ofld_info *tls_ofld = &toep->tls;
734330884Sjhb	struct adapter *sc = td_adapter(toep->td);
735330884Sjhb
736330884Sjhb	/*
737330884Sjhb	 * XXX: Does not have the t4_get_tcb() checks to refine the
738330884Sjhb	 * workaround.
739330884Sjhb	 */
740330884Sjhb	callout_schedule(&tls_ofld->handshake_timer, TLS_SRV_HELLO_RD_TM * hz);
741330884Sjhb
742330884Sjhb	CTR2(KTR_CXGBE, "%s: tid %d sending RX_DATA_ACK", __func__, toep->tid);
743330884Sjhb	send_rx_modulate(sc, toep);
744330884Sjhb}
745330884Sjhb
746330884Sjhbstatic void
747330884Sjhbtls_start_handshake_timer(struct toepcb *toep)
748330884Sjhb{
749330884Sjhb	struct tls_ofld_info *tls_ofld = &toep->tls;
750330884Sjhb
751330884Sjhb	mtx_lock(&tls_handshake_lock);
752330884Sjhb	callout_reset(&tls_ofld->handshake_timer, TLS_SRV_HELLO_BKOFF_TM * hz,
753330884Sjhb	    tls_send_handshake_ack, toep);
754330884Sjhb	mtx_unlock(&tls_handshake_lock);
755330884Sjhb}
756330884Sjhb
757330884Sjhbvoid
758330884Sjhbtls_stop_handshake_timer(struct toepcb *toep)
759330884Sjhb{
760330884Sjhb	struct tls_ofld_info *tls_ofld = &toep->tls;
761330884Sjhb
762330884Sjhb	mtx_lock(&tls_handshake_lock);
763330884Sjhb	callout_stop(&tls_ofld->handshake_timer);
764330884Sjhb	mtx_unlock(&tls_handshake_lock);
765330884Sjhb}
766330884Sjhb
767330884Sjhbint
768330884Sjhbt4_ctloutput_tls(struct socket *so, struct sockopt *sopt)
769330884Sjhb{
770330884Sjhb	struct tls_key_context uk_ctx;
771330884Sjhb	struct inpcb *inp;
772330884Sjhb	struct tcpcb *tp;
773330884Sjhb	struct toepcb *toep;
774330884Sjhb	int error, optval;
775330884Sjhb
776330884Sjhb	error = 0;
777330884Sjhb	if (sopt->sopt_dir == SOPT_SET &&
778330884Sjhb	    sopt->sopt_name == TCP_TLSOM_SET_TLS_CONTEXT) {
779330884Sjhb		error = sooptcopyin(sopt, &uk_ctx, sizeof(uk_ctx),
780330884Sjhb		    sizeof(uk_ctx));
781330884Sjhb		if (error)
782330884Sjhb			return (error);
783330884Sjhb	}
784330884Sjhb
785330884Sjhb	inp = sotoinpcb(so);
786330884Sjhb	KASSERT(inp != NULL, ("tcp_ctloutput: inp == NULL"));
787330884Sjhb	INP_WLOCK(inp);
788330884Sjhb	if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
789330884Sjhb		INP_WUNLOCK(inp);
790330884Sjhb		return (ECONNRESET);
791330884Sjhb	}
792330884Sjhb	tp = intotcpcb(inp);
793330884Sjhb	toep = tp->t_toe;
794330884Sjhb	switch (sopt->sopt_dir) {
795330884Sjhb	case SOPT_SET:
796330884Sjhb		switch (sopt->sopt_name) {
797330884Sjhb		case TCP_TLSOM_SET_TLS_CONTEXT:
798330884Sjhb			error = program_key_context(tp, toep, &uk_ctx);
799330884Sjhb			INP_WUNLOCK(inp);
800330884Sjhb			break;
801330884Sjhb		case TCP_TLSOM_CLR_TLS_TOM:
802330884Sjhb			if (toep->ulp_mode == ULP_MODE_TLS) {
803330884Sjhb				CTR2(KTR_CXGBE, "%s: tid %d CLR_TLS_TOM",
804330884Sjhb				    __func__, toep->tid);
805330884Sjhb				tls_clr_ofld_mode(toep);
806330884Sjhb			} else
807330884Sjhb				error = EOPNOTSUPP;
808330884Sjhb			INP_WUNLOCK(inp);
809330884Sjhb			break;
810330884Sjhb		case TCP_TLSOM_CLR_QUIES:
811330884Sjhb			if (toep->ulp_mode == ULP_MODE_TLS) {
812330884Sjhb				CTR2(KTR_CXGBE, "%s: tid %d CLR_QUIES",
813330884Sjhb				    __func__, toep->tid);
814330884Sjhb				tls_clr_quiesce(toep);
815330884Sjhb			} else
816330884Sjhb				error = EOPNOTSUPP;
817330884Sjhb			INP_WUNLOCK(inp);
818330884Sjhb			break;
819330884Sjhb		default:
820330884Sjhb			INP_WUNLOCK(inp);
821330884Sjhb			error = EOPNOTSUPP;
822330884Sjhb			break;
823330884Sjhb		}
824330884Sjhb		break;
825330884Sjhb	case SOPT_GET:
826330884Sjhb		switch (sopt->sopt_name) {
827330884Sjhb		case TCP_TLSOM_GET_TLS_TOM:
828330884Sjhb			/*
829330884Sjhb			 * TLS TX is permitted on any TOE socket, but
830330884Sjhb			 * TLS RX requires a TLS ULP mode.
831330884Sjhb			 */
832330884Sjhb			optval = TLS_TOM_NONE;
833330884Sjhb			if (can_tls_offload(td_adapter(toep->td))) {
834330884Sjhb				switch (toep->ulp_mode) {
835330884Sjhb				case ULP_MODE_NONE:
836330884Sjhb				case ULP_MODE_TCPDDP:
837330884Sjhb					optval = TLS_TOM_TXONLY;
838330884Sjhb					break;
839330884Sjhb				case ULP_MODE_TLS:
840330884Sjhb					optval = TLS_TOM_BOTH;
841330884Sjhb					break;
842330884Sjhb				}
843330884Sjhb			}
844330884Sjhb			CTR3(KTR_CXGBE, "%s: tid %d GET_TLS_TOM = %d",
845330884Sjhb			    __func__, toep->tid, optval);
846330884Sjhb			INP_WUNLOCK(inp);
847330884Sjhb			error = sooptcopyout(sopt, &optval, sizeof(optval));
848330884Sjhb			break;
849330884Sjhb		default:
850330884Sjhb			INP_WUNLOCK(inp);
851330884Sjhb			error = EOPNOTSUPP;
852330884Sjhb			break;
853330884Sjhb		}
854330884Sjhb		break;
855330884Sjhb	}
856330884Sjhb	return (error);
857330884Sjhb}
858330884Sjhb
859330884Sjhbvoid
860330884Sjhbtls_init_toep(struct toepcb *toep)
861330884Sjhb{
862330884Sjhb	struct tls_ofld_info *tls_ofld = &toep->tls;
863330884Sjhb
864330884Sjhb	tls_ofld->key_location = TLS_SFO_WR_CONTEXTLOC_DDR;
865330884Sjhb	tls_ofld->rx_key_addr = -1;
866330884Sjhb	tls_ofld->tx_key_addr = -1;
867330884Sjhb	if (toep->ulp_mode == ULP_MODE_TLS)
868330884Sjhb		callout_init_mtx(&tls_ofld->handshake_timer,
869330884Sjhb		    &tls_handshake_lock, 0);
870330884Sjhb}
871330884Sjhb
872330884Sjhbvoid
873330884Sjhbtls_establish(struct toepcb *toep)
874330884Sjhb{
875330884Sjhb
876330884Sjhb	/*
877330884Sjhb	 * Enable PDU extraction.
878330884Sjhb	 *
879330884Sjhb	 * XXX: Supposedly this should be done by the firmware when
880330884Sjhb	 * the ULP_MODE FLOWC parameter is set in send_flowc_wr(), but
881330884Sjhb	 * in practice this seems to be required.
882330884Sjhb	 */
883330884Sjhb	CTR2(KTR_CXGBE, "%s: tid %d setting TLS_ENABLE", __func__, toep->tid);
884330884Sjhb	t4_set_tls_tcb_field(toep, W_TCB_ULP_RAW, V_TCB_ULP_RAW(M_TCB_ULP_RAW),
885330884Sjhb	    V_TCB_ULP_RAW(V_TF_TLS_ENABLE(1)));
886330884Sjhb
887330884Sjhb	toep->flags |= TPF_FORCE_CREDITS;
888330884Sjhb
889330884Sjhb	tls_start_handshake_timer(toep);
890330884Sjhb}
891330884Sjhb
892330884Sjhbvoid
893330884Sjhbtls_uninit_toep(struct toepcb *toep)
894330884Sjhb{
895330884Sjhb
896330884Sjhb	if (toep->ulp_mode == ULP_MODE_TLS)
897330884Sjhb		tls_stop_handshake_timer(toep);
898330884Sjhb	clear_tls_keyid(toep);
899330884Sjhb}
900330884Sjhb
901330884Sjhb#define MAX_OFLD_TX_CREDITS (SGE_MAX_WR_LEN / 16)
902330884Sjhb#define	MIN_OFLD_TLSTX_CREDITS(toep)					\
903330884Sjhb	(howmany(sizeof(struct fw_tlstx_data_wr) +			\
904330884Sjhb	    sizeof(struct cpl_tx_tls_sfo) + key_size((toep)) +		\
905330884Sjhb	    CIPHER_BLOCK_SIZE + 1, 16))
906330884Sjhb
907330884Sjhbstatic inline u_int
908330884Sjhbmax_imm_tls_space(int tx_credits)
909330884Sjhb{
910330884Sjhb	const int n = 2;	/* Use only up to 2 desc for imm. data WR */
911330884Sjhb	int space;
912330884Sjhb
913330884Sjhb	KASSERT(tx_credits >= 0 &&
914330884Sjhb		tx_credits <= MAX_OFLD_TX_CREDITS,
915330884Sjhb		("%s: %d credits", __func__, tx_credits));
916330884Sjhb
917330884Sjhb	if (tx_credits >= (n * EQ_ESIZE) / 16)
918330884Sjhb		space = (n * EQ_ESIZE);
919330884Sjhb	else
920330884Sjhb		space = tx_credits * 16;
921330884Sjhb	return (space);
922330884Sjhb}
923330884Sjhb
924330884Sjhbstatic int
925330884Sjhbcount_mbuf_segs(struct mbuf *m, int skip, int len, int *max_nsegs_1mbufp)
926330884Sjhb{
927330884Sjhb	int max_nsegs_1mbuf, n, nsegs;
928330884Sjhb
929330884Sjhb	while (skip >= m->m_len) {
930330884Sjhb		skip -= m->m_len;
931330884Sjhb		m = m->m_next;
932330884Sjhb	}
933330884Sjhb
934330884Sjhb	nsegs = 0;
935330884Sjhb	max_nsegs_1mbuf = 0;
936330884Sjhb	while (len > 0) {
937330884Sjhb		n = sglist_count(mtod(m, char *) + skip, m->m_len - skip);
938330884Sjhb		if (n > max_nsegs_1mbuf)
939330884Sjhb			max_nsegs_1mbuf = n;
940330884Sjhb		nsegs += n;
941330884Sjhb		len -= m->m_len - skip;
942330884Sjhb		skip = 0;
943330884Sjhb		m = m->m_next;
944330884Sjhb	}
945330884Sjhb	*max_nsegs_1mbufp = max_nsegs_1mbuf;
946330884Sjhb	return (nsegs);
947330884Sjhb}
948330884Sjhb
949330884Sjhbstatic void
950330884Sjhbwrite_tlstx_wr(struct fw_tlstx_data_wr *txwr, struct toepcb *toep,
951330884Sjhb    unsigned int immdlen, unsigned int plen, unsigned int expn,
952330884Sjhb    unsigned int pdus, uint8_t credits, int shove, int imm_ivs)
953330884Sjhb{
954330884Sjhb	struct tls_ofld_info *tls_ofld = &toep->tls;
955330884Sjhb	unsigned int len = plen + expn;
956330884Sjhb
957330884Sjhb	txwr->op_to_immdlen = htobe32(V_WR_OP(FW_TLSTX_DATA_WR) |
958330884Sjhb	    V_FW_TLSTX_DATA_WR_COMPL(1) |
959330884Sjhb	    V_FW_TLSTX_DATA_WR_IMMDLEN(immdlen));
960330884Sjhb	txwr->flowid_len16 = htobe32(V_FW_TLSTX_DATA_WR_FLOWID(toep->tid) |
961330884Sjhb	    V_FW_TLSTX_DATA_WR_LEN16(credits));
962330884Sjhb	txwr->plen = htobe32(len);
963330884Sjhb	txwr->lsodisable_to_flags = htobe32(V_TX_ULP_MODE(ULP_MODE_TLS) |
964330884Sjhb	    V_TX_URG(0) | /* F_T6_TX_FORCE | */ V_TX_SHOVE(shove));
965330884Sjhb	txwr->ctxloc_to_exp = htobe32(V_FW_TLSTX_DATA_WR_NUMIVS(pdus) |
966330884Sjhb	    V_FW_TLSTX_DATA_WR_EXP(expn) |
967330884Sjhb	    V_FW_TLSTX_DATA_WR_CTXLOC(tls_ofld->key_location) |
968330884Sjhb	    V_FW_TLSTX_DATA_WR_IVDSGL(!imm_ivs) |
969330884Sjhb	    V_FW_TLSTX_DATA_WR_KEYSIZE(tls_ofld->k_ctx.tx_key_info_size >> 4));
970330884Sjhb	txwr->mfs = htobe16(tls_ofld->k_ctx.frag_size);
971330884Sjhb	txwr->adjustedplen_pkd = htobe16(
972330884Sjhb	    V_FW_TLSTX_DATA_WR_ADJUSTEDPLEN(tls_ofld->adjusted_plen));
973330884Sjhb	txwr->expinplenmax_pkd = htobe16(
974330884Sjhb	    V_FW_TLSTX_DATA_WR_EXPINPLENMAX(tls_ofld->expn_per_ulp));
975330884Sjhb	txwr->pdusinplenmax_pkd = htobe16(
976330884Sjhb	    V_FW_TLSTX_DATA_WR_PDUSINPLENMAX(tls_ofld->pdus_per_ulp));
977330884Sjhb}
978330884Sjhb
979330884Sjhbstatic void
980330884Sjhbwrite_tlstx_cpl(struct cpl_tx_tls_sfo *cpl, struct toepcb *toep,
981330884Sjhb    struct tls_hdr *tls_hdr, unsigned int plen, unsigned int pdus)
982330884Sjhb{
983330884Sjhb	struct tls_ofld_info *tls_ofld = &toep->tls;
984330884Sjhb	int data_type, seglen;
985330884Sjhb
986330884Sjhb	if (plen < tls_ofld->k_ctx.frag_size)
987330884Sjhb		seglen = plen;
988330884Sjhb	else
989330884Sjhb		seglen = tls_ofld->k_ctx.frag_size;
990330884Sjhb	data_type = tls_content_type(tls_hdr->type);
991330884Sjhb	cpl->op_to_seg_len = htobe32(V_CPL_TX_TLS_SFO_OPCODE(CPL_TX_TLS_SFO) |
992330884Sjhb	    V_CPL_TX_TLS_SFO_DATA_TYPE(data_type) |
993330884Sjhb	    V_CPL_TX_TLS_SFO_CPL_LEN(2) | V_CPL_TX_TLS_SFO_SEG_LEN(seglen));
994330884Sjhb	cpl->pld_len = htobe32(plen);
995330884Sjhb	if (data_type == CPL_TX_TLS_SFO_TYPE_HEARTBEAT)
996330884Sjhb		cpl->type_protover = htobe32(
997330884Sjhb		    V_CPL_TX_TLS_SFO_TYPE(tls_hdr->type));
998330884Sjhb	cpl->seqno_numivs = htobe32(tls_ofld->scmd0.seqno_numivs |
999330884Sjhb	    V_SCMD_NUM_IVS(pdus));
1000330884Sjhb	cpl->ivgen_hdrlen = htobe32(tls_ofld->scmd0.ivgen_hdrlen);
1001330884Sjhb	cpl->scmd1 = htobe64(tls_ofld->tx_seq_no);
1002330884Sjhb	tls_ofld->tx_seq_no += pdus;
1003330884Sjhb}
1004330884Sjhb
1005330884Sjhb/*
1006330884Sjhb * Similar to write_tx_sgl() except that it accepts an optional
1007330884Sjhb * trailer buffer for IVs.
1008330884Sjhb */
1009330884Sjhbstatic void
1010330884Sjhbwrite_tlstx_sgl(void *dst, struct mbuf *start, int skip, int plen,
1011330884Sjhb    void *iv_buffer, int iv_len, int nsegs, int n)
1012330884Sjhb{
1013330884Sjhb	struct mbuf *m;
1014330884Sjhb	struct ulptx_sgl *usgl = dst;
1015330884Sjhb	int i, j, rc;
1016330884Sjhb	struct sglist sg;
1017330884Sjhb	struct sglist_seg segs[n];
1018330884Sjhb
1019330884Sjhb	KASSERT(nsegs > 0, ("%s: nsegs 0", __func__));
1020330884Sjhb
1021330884Sjhb	sglist_init(&sg, n, segs);
1022330884Sjhb	usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
1023330884Sjhb	    V_ULPTX_NSGE(nsegs));
1024330884Sjhb
1025330884Sjhb	for (m = start; skip >= m->m_len; m = m->m_next)
1026330884Sjhb		skip -= m->m_len;
1027330884Sjhb
1028330884Sjhb	i = -1;
1029330884Sjhb	for (m = start; plen > 0; m = m->m_next) {
1030330884Sjhb		rc = sglist_append(&sg, mtod(m, char *) + skip,
1031330884Sjhb		    m->m_len - skip);
1032330884Sjhb		if (__predict_false(rc != 0))
1033330884Sjhb			panic("%s: sglist_append %d", __func__, rc);
1034330884Sjhb		plen -= m->m_len - skip;
1035330884Sjhb		skip = 0;
1036330884Sjhb
1037330884Sjhb		for (j = 0; j < sg.sg_nseg; i++, j++) {
1038330884Sjhb			if (i < 0) {
1039330884Sjhb				usgl->len0 = htobe32(segs[j].ss_len);
1040330884Sjhb				usgl->addr0 = htobe64(segs[j].ss_paddr);
1041330884Sjhb			} else {
1042330884Sjhb				usgl->sge[i / 2].len[i & 1] =
1043330884Sjhb				    htobe32(segs[j].ss_len);
1044330884Sjhb				usgl->sge[i / 2].addr[i & 1] =
1045330884Sjhb				    htobe64(segs[j].ss_paddr);
1046330884Sjhb			}
1047330884Sjhb#ifdef INVARIANTS
1048330884Sjhb			nsegs--;
1049330884Sjhb#endif
1050330884Sjhb		}
1051330884Sjhb		sglist_reset(&sg);
1052330884Sjhb	}
1053330884Sjhb	if (iv_buffer != NULL) {
1054330884Sjhb		rc = sglist_append(&sg, iv_buffer, iv_len);
1055330884Sjhb		if (__predict_false(rc != 0))
1056330884Sjhb			panic("%s: sglist_append %d", __func__, rc);
1057330884Sjhb
1058330884Sjhb		for (j = 0; j < sg.sg_nseg; i++, j++) {
1059330884Sjhb			if (i < 0) {
1060330884Sjhb				usgl->len0 = htobe32(segs[j].ss_len);
1061330884Sjhb				usgl->addr0 = htobe64(segs[j].ss_paddr);
1062330884Sjhb			} else {
1063330884Sjhb				usgl->sge[i / 2].len[i & 1] =
1064330884Sjhb				    htobe32(segs[j].ss_len);
1065330884Sjhb				usgl->sge[i / 2].addr[i & 1] =
1066330884Sjhb				    htobe64(segs[j].ss_paddr);
1067330884Sjhb			}
1068330884Sjhb#ifdef INVARIANTS
1069330884Sjhb			nsegs--;
1070330884Sjhb#endif
1071330884Sjhb		}
1072330884Sjhb	}
1073330884Sjhb	if (i & 1)
1074330884Sjhb		usgl->sge[i / 2].len[1] = htobe32(0);
1075330884Sjhb	KASSERT(nsegs == 0, ("%s: nsegs %d, start %p, iv_buffer %p",
1076330884Sjhb	    __func__, nsegs, start, iv_buffer));
1077330884Sjhb}
1078330884Sjhb
1079330884Sjhb/*
1080330884Sjhb * Similar to t4_push_frames() but handles TLS sockets when TLS offload
1081330884Sjhb * is enabled.  Rather than transmitting bulk data, the socket buffer
1082330884Sjhb * contains TLS records.  The work request requires a full TLS record,
1083330884Sjhb * so batch mbufs up until a full TLS record is seen.  This requires
1084330884Sjhb * reading the TLS header out of the start of each record to determine
1085330884Sjhb * its length.
1086330884Sjhb */
1087330884Sjhbvoid
1088330884Sjhbt4_push_tls_records(struct adapter *sc, struct toepcb *toep, int drop)
1089330884Sjhb{
1090330884Sjhb	struct tls_hdr thdr;
1091330884Sjhb	struct mbuf *sndptr;
1092330884Sjhb	struct fw_tlstx_data_wr *txwr;
1093330884Sjhb	struct cpl_tx_tls_sfo *cpl;
1094330884Sjhb	struct wrqe *wr;
1095330884Sjhb	u_int plen, nsegs, credits, space, max_nsegs_1mbuf, wr_len;
1096330884Sjhb	u_int expn_size, iv_len, pdus, sndptroff;
1097330884Sjhb	struct tls_ofld_info *tls_ofld = &toep->tls;
1098330884Sjhb	struct inpcb *inp = toep->inp;
1099330884Sjhb	struct tcpcb *tp = intotcpcb(inp);
1100330884Sjhb	struct socket *so = inp->inp_socket;
1101330884Sjhb	struct sockbuf *sb = &so->so_snd;
1102330884Sjhb	int tls_size, tx_credits, shove, /* compl,*/ sowwakeup;
1103330884Sjhb	struct ofld_tx_sdesc *txsd;
1104330884Sjhb	bool imm_ivs, imm_payload;
1105330884Sjhb	void *iv_buffer, *iv_dst, *buf;
1106330884Sjhb
1107330884Sjhb	INP_WLOCK_ASSERT(inp);
1108330884Sjhb	KASSERT(toep->flags & TPF_FLOWC_WR_SENT,
1109330884Sjhb	    ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid));
1110330884Sjhb
1111330884Sjhb	KASSERT(toep->ulp_mode == ULP_MODE_NONE ||
1112330884Sjhb	    toep->ulp_mode == ULP_MODE_TCPDDP || toep->ulp_mode == ULP_MODE_TLS,
1113330884Sjhb	    ("%s: ulp_mode %u for toep %p", __func__, toep->ulp_mode, toep));
1114330884Sjhb	KASSERT(tls_tx_key(toep),
1115330884Sjhb	    ("%s: TX key not set for toep %p", __func__, toep));
1116330884Sjhb
1117330884Sjhb#ifdef VERBOSE_TRACES
1118330884Sjhb	CTR4(KTR_CXGBE, "%s: tid %d toep flags %#x tp flags %#x drop %d",
1119330884Sjhb	    __func__, toep->tid, toep->flags, tp->t_flags);
1120330884Sjhb#endif
1121330884Sjhb	if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN))
1122330884Sjhb		return;
1123330884Sjhb
1124330884Sjhb#ifdef RATELIMIT
1125330884Sjhb	if (__predict_false(inp->inp_flags2 & INP_RATE_LIMIT_CHANGED) &&
1126330884Sjhb	    (update_tx_rate_limit(sc, toep, so->so_max_pacing_rate) == 0)) {
1127330884Sjhb		inp->inp_flags2 &= ~INP_RATE_LIMIT_CHANGED;
1128330884Sjhb	}
1129330884Sjhb#endif
1130330884Sjhb
1131330884Sjhb	/*
1132330884Sjhb	 * This function doesn't resume by itself.  Someone else must clear the
1133330884Sjhb	 * flag and call this function.
1134330884Sjhb	 */
1135330884Sjhb	if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) {
1136330884Sjhb		KASSERT(drop == 0,
1137330884Sjhb		    ("%s: drop (%d) != 0 but tx is suspended", __func__, drop));
1138330884Sjhb		return;
1139330884Sjhb	}
1140330884Sjhb
1141330884Sjhb	txsd = &toep->txsd[toep->txsd_pidx];
1142330884Sjhb	for (;;) {
1143330884Sjhb		tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS);
1144330884Sjhb		space = max_imm_tls_space(tx_credits);
1145330884Sjhb		wr_len = sizeof(struct fw_tlstx_data_wr) +
1146330884Sjhb		    sizeof(struct cpl_tx_tls_sfo) + key_size(toep);
1147330884Sjhb		if (wr_len + CIPHER_BLOCK_SIZE + 1 > space) {
1148330884Sjhb#ifdef VERBOSE_TRACES
1149330884Sjhb			CTR5(KTR_CXGBE,
1150330884Sjhb			    "%s: tid %d tx_credits %d min_wr %d space %d",
1151330884Sjhb			    __func__, toep->tid, tx_credits, wr_len +
1152330884Sjhb			    CIPHER_BLOCK_SIZE + 1, space);
1153330884Sjhb#endif
1154330884Sjhb			return;
1155330884Sjhb		}
1156330884Sjhb
1157330884Sjhb		SOCKBUF_LOCK(sb);
1158330884Sjhb		sowwakeup = drop;
1159330884Sjhb		if (drop) {
1160330884Sjhb			sbdrop_locked(sb, drop);
1161330884Sjhb			MPASS(tls_ofld->sb_off >= drop);
1162330884Sjhb			tls_ofld->sb_off -= drop;
1163330884Sjhb			drop = 0;
1164330884Sjhb		}
1165330884Sjhb
1166330884Sjhb		/*
1167330884Sjhb		 * Send a FIN if requested, but only if there's no
1168330884Sjhb		 * more data to send.
1169330884Sjhb		 */
1170345664Sjhb		if (sbavail(sb) == tls_ofld->sb_off &&
1171345664Sjhb		    toep->flags & TPF_SEND_FIN) {
1172330884Sjhb			if (sowwakeup)
1173330884Sjhb				sowwakeup_locked(so);
1174330884Sjhb			else
1175330884Sjhb				SOCKBUF_UNLOCK(sb);
1176330884Sjhb			SOCKBUF_UNLOCK_ASSERT(sb);
1177330884Sjhb			t4_close_conn(sc, toep);
1178330884Sjhb			return;
1179330884Sjhb		}
1180330884Sjhb
1181330884Sjhb		if (sbavail(sb) < tls_ofld->sb_off + TLS_HEADER_LENGTH) {
1182330884Sjhb			/*
1183330884Sjhb			 * A full TLS header is not yet queued, stop
1184330884Sjhb			 * for now until more data is added to the
1185345664Sjhb			 * socket buffer.  However, if the connection
1186345664Sjhb			 * has been closed, we will never get the rest
1187345664Sjhb			 * of the header so just discard the partial
1188345664Sjhb			 * header and close the connection.
1189330884Sjhb			 */
1190330884Sjhb#ifdef VERBOSE_TRACES
1191345664Sjhb			CTR5(KTR_CXGBE, "%s: tid %d sbavail %d sb_off %d%s",
1192345664Sjhb			    __func__, toep->tid, sbavail(sb), tls_ofld->sb_off,
1193345664Sjhb			    toep->flags & TPF_SEND_FIN ? "" : " SEND_FIN");
1194330884Sjhb#endif
1195330884Sjhb			if (sowwakeup)
1196330884Sjhb				sowwakeup_locked(so);
1197330884Sjhb			else
1198330884Sjhb				SOCKBUF_UNLOCK(sb);
1199330884Sjhb			SOCKBUF_UNLOCK_ASSERT(sb);
1200345664Sjhb			if (toep->flags & TPF_SEND_FIN)
1201345664Sjhb				t4_close_conn(sc, toep);
1202330884Sjhb			return;
1203330884Sjhb		}
1204330884Sjhb
1205330884Sjhb		/* Read the header of the next TLS record. */
1206330884Sjhb		sndptr = sbsndmbuf(sb, tls_ofld->sb_off, &sndptroff);
1207330884Sjhb		MPASS(!IS_AIOTX_MBUF(sndptr));
1208330884Sjhb		m_copydata(sndptr, sndptroff, sizeof(thdr), (caddr_t)&thdr);
1209330884Sjhb		tls_size = htons(thdr.length);
1210330884Sjhb		plen = TLS_HEADER_LENGTH + tls_size;
1211330884Sjhb		pdus = howmany(tls_size, tls_ofld->k_ctx.frag_size);
1212330884Sjhb		iv_len = pdus * CIPHER_BLOCK_SIZE;
1213330884Sjhb
1214330884Sjhb		if (sbavail(sb) < tls_ofld->sb_off + plen) {
1215330884Sjhb			/*
1216330884Sjhb			 * The full TLS record is not yet queued, stop
1217330884Sjhb			 * for now until more data is added to the
1218345664Sjhb			 * socket buffer.  However, if the connection
1219345664Sjhb			 * has been closed, we will never get the rest
1220345664Sjhb			 * of the record so just discard the partial
1221345664Sjhb			 * record and close the connection.
1222330884Sjhb			 */
1223330884Sjhb#ifdef VERBOSE_TRACES
1224345664Sjhb			CTR6(KTR_CXGBE,
1225345664Sjhb			    "%s: tid %d sbavail %d sb_off %d plen %d%s",
1226330884Sjhb			    __func__, toep->tid, sbavail(sb), tls_ofld->sb_off,
1227345664Sjhb			    plen, toep->flags & TPF_SEND_FIN ? "" :
1228345664Sjhb			    " SEND_FIN");
1229330884Sjhb#endif
1230330884Sjhb			if (sowwakeup)
1231330884Sjhb				sowwakeup_locked(so);
1232330884Sjhb			else
1233330884Sjhb				SOCKBUF_UNLOCK(sb);
1234330884Sjhb			SOCKBUF_UNLOCK_ASSERT(sb);
1235345664Sjhb			if (toep->flags & TPF_SEND_FIN)
1236345664Sjhb				t4_close_conn(sc, toep);
1237330884Sjhb			return;
1238330884Sjhb		}
1239330884Sjhb
1240330884Sjhb		/* Shove if there is no additional data pending. */
1241330884Sjhb		shove = (sbavail(sb) == tls_ofld->sb_off + plen) &&
1242330884Sjhb		    !(tp->t_flags & TF_MORETOCOME);
1243330884Sjhb
1244330884Sjhb		if (sb->sb_flags & SB_AUTOSIZE &&
1245330884Sjhb		    V_tcp_do_autosndbuf &&
1246330884Sjhb		    sb->sb_hiwat < V_tcp_autosndbuf_max &&
1247330884Sjhb		    sbused(sb) >= sb->sb_hiwat * 7 / 8) {
1248330884Sjhb			int newsize = min(sb->sb_hiwat + V_tcp_autosndbuf_inc,
1249330884Sjhb			    V_tcp_autosndbuf_max);
1250330884Sjhb
1251330884Sjhb			if (!sbreserve_locked(sb, newsize, so, NULL))
1252330884Sjhb				sb->sb_flags &= ~SB_AUTOSIZE;
1253330884Sjhb			else
1254330884Sjhb				sowwakeup = 1;	/* room available */
1255330884Sjhb		}
1256330884Sjhb		if (sowwakeup)
1257330884Sjhb			sowwakeup_locked(so);
1258330884Sjhb		else
1259330884Sjhb			SOCKBUF_UNLOCK(sb);
1260330884Sjhb		SOCKBUF_UNLOCK_ASSERT(sb);
1261330884Sjhb
1262330884Sjhb		if (__predict_false(toep->flags & TPF_FIN_SENT))
1263330884Sjhb			panic("%s: excess tx.", __func__);
1264330884Sjhb
1265330884Sjhb		/* Determine whether to use immediate vs SGL. */
1266330884Sjhb		imm_payload = false;
1267330884Sjhb		imm_ivs = false;
1268330884Sjhb		if (wr_len + iv_len <= space) {
1269330884Sjhb			imm_ivs = true;
1270330884Sjhb			wr_len += iv_len;
1271330884Sjhb			if (wr_len + tls_size <= space) {
1272330884Sjhb				wr_len += tls_size;
1273330884Sjhb				imm_payload = true;
1274330884Sjhb			}
1275330884Sjhb		}
1276330884Sjhb
1277330884Sjhb		/* Allocate space for IVs if needed. */
1278330884Sjhb		if (!imm_ivs) {
1279330884Sjhb			iv_buffer = malloc(iv_len, M_CXGBE, M_NOWAIT);
1280330884Sjhb			if (iv_buffer == NULL) {
1281330884Sjhb				/*
1282330884Sjhb				 * XXX: How to restart this?
1283330884Sjhb				 */
1284330884Sjhb				if (sowwakeup)
1285330884Sjhb					sowwakeup_locked(so);
1286330884Sjhb				else
1287330884Sjhb					SOCKBUF_UNLOCK(sb);
1288330884Sjhb				SOCKBUF_UNLOCK_ASSERT(sb);
1289330884Sjhb				CTR3(KTR_CXGBE,
1290330884Sjhb			    "%s: tid %d failed to alloc IV space len %d",
1291330884Sjhb				    __func__, toep->tid, iv_len);
1292330884Sjhb				return;
1293330884Sjhb			}
1294330884Sjhb		} else
1295330884Sjhb			iv_buffer = NULL;
1296330884Sjhb
1297330884Sjhb		/* Determine size of SGL. */
1298330884Sjhb		nsegs = 0;
1299330884Sjhb		max_nsegs_1mbuf = 0; /* max # of SGL segments in any one mbuf */
1300330884Sjhb		if (!imm_payload) {
1301330884Sjhb			nsegs = count_mbuf_segs(sndptr, sndptroff +
1302330884Sjhb			    TLS_HEADER_LENGTH, tls_size, &max_nsegs_1mbuf);
1303330884Sjhb			if (!imm_ivs) {
1304330884Sjhb				int n = sglist_count(iv_buffer, iv_len);
1305330884Sjhb				nsegs += n;
1306330884Sjhb				if (n > max_nsegs_1mbuf)
1307330884Sjhb					max_nsegs_1mbuf = n;
1308330884Sjhb			}
1309330884Sjhb
1310330884Sjhb			/* Account for SGL in work request length. */
1311330884Sjhb			wr_len += sizeof(struct ulptx_sgl) +
1312330884Sjhb			    ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8;
1313330884Sjhb		}
1314330884Sjhb
1315330884Sjhb		wr = alloc_wrqe(roundup2(wr_len, 16), toep->ofld_txq);
1316330884Sjhb		if (wr == NULL) {
1317330884Sjhb			/* XXX: how will we recover from this? */
1318330884Sjhb			toep->flags |= TPF_TX_SUSPENDED;
1319330884Sjhb			return;
1320330884Sjhb		}
1321330884Sjhb
1322330884Sjhb#ifdef VERBOSE_TRACES
1323330884Sjhb		CTR5(KTR_CXGBE, "%s: tid %d TLS record %d len %#x pdus %d",
1324330884Sjhb		    __func__, toep->tid, thdr.type, tls_size, pdus);
1325330884Sjhb#endif
1326330884Sjhb		txwr = wrtod(wr);
1327330884Sjhb		cpl = (struct cpl_tx_tls_sfo *)(txwr + 1);
1328330884Sjhb		memset(txwr, 0, roundup2(wr_len, 16));
1329330884Sjhb		credits = howmany(wr_len, 16);
1330330884Sjhb		expn_size = tls_expansion_size(toep, tls_size, 0, NULL);
1331330884Sjhb		write_tlstx_wr(txwr, toep, imm_payload ? tls_size : 0,
1332330884Sjhb		    tls_size, expn_size, pdus, credits, shove, imm_ivs ? 1 : 0);
1333330884Sjhb		write_tlstx_cpl(cpl, toep, &thdr, tls_size, pdus);
1334330884Sjhb		tls_copy_tx_key(toep, cpl + 1);
1335330884Sjhb
1336330884Sjhb		/* Generate random IVs */
1337330884Sjhb		buf = (char *)(cpl + 1) + key_size(toep);
1338330884Sjhb		if (imm_ivs) {
1339330884Sjhb			MPASS(iv_buffer == NULL);
1340330884Sjhb			iv_dst = buf;
1341330884Sjhb			buf = (char *)iv_dst + iv_len;
1342330884Sjhb		} else
1343330884Sjhb			iv_dst = iv_buffer;
1344330884Sjhb		arc4rand(iv_dst, iv_len, 0);
1345330884Sjhb
1346330884Sjhb		if (imm_payload) {
1347330884Sjhb			m_copydata(sndptr, sndptroff + TLS_HEADER_LENGTH,
1348330884Sjhb			    tls_size, buf);
1349330884Sjhb		} else {
1350330884Sjhb			write_tlstx_sgl(buf, sndptr,
1351330884Sjhb			    sndptroff + TLS_HEADER_LENGTH, tls_size, iv_buffer,
1352330884Sjhb			    iv_len, nsegs, max_nsegs_1mbuf);
1353330884Sjhb		}
1354330884Sjhb
1355330884Sjhb		KASSERT(toep->tx_credits >= credits,
1356330884Sjhb			("%s: not enough credits", __func__));
1357330884Sjhb
1358330884Sjhb		toep->tx_credits -= credits;
1359330884Sjhb
1360330884Sjhb		tp->snd_nxt += plen;
1361330884Sjhb		tp->snd_max += plen;
1362330884Sjhb
1363330884Sjhb		SOCKBUF_LOCK(sb);
1364330884Sjhb		sbsndptr(sb, tls_ofld->sb_off, plen, &sndptroff);
1365330884Sjhb		tls_ofld->sb_off += plen;
1366330884Sjhb		SOCKBUF_UNLOCK(sb);
1367330884Sjhb
1368330884Sjhb		toep->flags |= TPF_TX_DATA_SENT;
1369330884Sjhb		if (toep->tx_credits < MIN_OFLD_TLSTX_CREDITS(toep))
1370330884Sjhb			toep->flags |= TPF_TX_SUSPENDED;
1371330884Sjhb
1372330884Sjhb		KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__));
1373330884Sjhb		txsd->plen = plen;
1374330884Sjhb		txsd->tx_credits = credits;
1375330884Sjhb		txsd->iv_buffer = iv_buffer;
1376330884Sjhb		txsd++;
1377330884Sjhb		if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) {
1378330884Sjhb			toep->txsd_pidx = 0;
1379330884Sjhb			txsd = &toep->txsd[0];
1380330884Sjhb		}
1381330884Sjhb		toep->txsd_avail--;
1382330884Sjhb
1383330884Sjhb		atomic_add_long(&toep->vi->pi->tx_tls_records, 1);
1384330884Sjhb		atomic_add_long(&toep->vi->pi->tx_tls_octets, plen);
1385330884Sjhb
1386330884Sjhb		t4_l2t_send(sc, wr, toep->l2te);
1387330884Sjhb	}
1388330884Sjhb}
1389330884Sjhb
1390330884Sjhb/*
1391330884Sjhb * For TLS data we place received mbufs received via CPL_TLS_DATA into
1392330884Sjhb * an mbufq in the TLS offload state.  When CPL_RX_TLS_CMP is
1393330884Sjhb * received, the completed PDUs are placed into the socket receive
1394330884Sjhb * buffer.
1395330884Sjhb *
1396330884Sjhb * The TLS code reuses the ulp_pdu_reclaimq to hold the pending mbufs.
1397330884Sjhb */
1398330884Sjhbstatic int
1399330884Sjhbdo_tls_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1400330884Sjhb{
1401330884Sjhb	struct adapter *sc = iq->adapter;
1402330884Sjhb	const struct cpl_tls_data *cpl = mtod(m, const void *);
1403330884Sjhb	unsigned int tid = GET_TID(cpl);
1404330884Sjhb	struct toepcb *toep = lookup_tid(sc, tid);
1405330884Sjhb	struct inpcb *inp = toep->inp;
1406330884Sjhb	struct tcpcb *tp;
1407330884Sjhb	int len;
1408330884Sjhb
1409330884Sjhb	/* XXX: Should this match do_rx_data instead? */
1410330884Sjhb	KASSERT(!(toep->flags & TPF_SYNQE),
1411330884Sjhb	    ("%s: toep %p claims to be a synq entry", __func__, toep));
1412330884Sjhb
1413330884Sjhb	KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__));
1414330884Sjhb
1415330884Sjhb	/* strip off CPL header */
1416330884Sjhb	m_adj(m, sizeof(*cpl));
1417330884Sjhb	len = m->m_pkthdr.len;
1418330884Sjhb
1419330884Sjhb	atomic_add_long(&toep->vi->pi->rx_tls_octets, len);
1420330884Sjhb
1421330884Sjhb	KASSERT(len == G_CPL_TLS_DATA_LENGTH(be32toh(cpl->length_pkd)),
1422330884Sjhb	    ("%s: payload length mismatch", __func__));
1423330884Sjhb
1424330884Sjhb	INP_WLOCK(inp);
1425330884Sjhb	if (inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) {
1426330884Sjhb		CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x",
1427330884Sjhb		    __func__, tid, len, inp->inp_flags);
1428330884Sjhb		INP_WUNLOCK(inp);
1429330884Sjhb		m_freem(m);
1430330884Sjhb		return (0);
1431330884Sjhb	}
1432330884Sjhb
1433330884Sjhb	/* Save TCP sequence number. */
1434330884Sjhb	m->m_pkthdr.tls_tcp_seq = be32toh(cpl->seq);
1435330884Sjhb
1436330884Sjhb	if (mbufq_enqueue(&toep->ulp_pdu_reclaimq, m)) {
1437330884Sjhb#ifdef INVARIANTS
1438330884Sjhb		panic("Failed to queue TLS data packet");
1439330884Sjhb#else
1440330884Sjhb		printf("%s: Failed to queue TLS data packet\n", __func__);
1441330884Sjhb		INP_WUNLOCK(inp);
1442330884Sjhb		m_freem(m);
1443330884Sjhb		return (0);
1444330884Sjhb#endif
1445330884Sjhb	}
1446330884Sjhb
1447330884Sjhb	tp = intotcpcb(inp);
1448330884Sjhb	tp->t_rcvtime = ticks;
1449330884Sjhb
1450330884Sjhb#ifdef VERBOSE_TRACES
1451330884Sjhb	CTR4(KTR_CXGBE, "%s: tid %u len %d seq %u", __func__, tid, len,
1452330884Sjhb	    be32toh(cpl->seq));
1453330884Sjhb#endif
1454330884Sjhb
1455330884Sjhb	INP_WUNLOCK(inp);
1456330884Sjhb	return (0);
1457330884Sjhb}
1458330884Sjhb
1459330884Sjhbstatic int
1460330884Sjhbdo_rx_tls_cmp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1461330884Sjhb{
1462330884Sjhb	struct adapter *sc = iq->adapter;
1463330884Sjhb	const struct cpl_rx_tls_cmp *cpl = mtod(m, const void *);
1464330884Sjhb	struct tlsrx_hdr_pkt *tls_hdr_pkt;
1465330884Sjhb	unsigned int tid = GET_TID(cpl);
1466330884Sjhb	struct toepcb *toep = lookup_tid(sc, tid);
1467330884Sjhb	struct inpcb *inp = toep->inp;
1468330884Sjhb	struct tcpcb *tp;
1469330884Sjhb	struct socket *so;
1470330884Sjhb	struct sockbuf *sb;
1471330884Sjhb	struct mbuf *tls_data;
1472348704Snp	int len, pdu_length, rx_credits;
1473330884Sjhb
1474330884Sjhb	KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__));
1475330884Sjhb	KASSERT(!(toep->flags & TPF_SYNQE),
1476330884Sjhb	    ("%s: toep %p claims to be a synq entry", __func__, toep));
1477330884Sjhb
1478330884Sjhb	/* strip off CPL header */
1479330884Sjhb	m_adj(m, sizeof(*cpl));
1480330884Sjhb	len = m->m_pkthdr.len;
1481330884Sjhb
1482330884Sjhb	atomic_add_long(&toep->vi->pi->rx_tls_records, 1);
1483330884Sjhb
1484330884Sjhb	KASSERT(len == G_CPL_RX_TLS_CMP_LENGTH(be32toh(cpl->pdulength_length)),
1485330884Sjhb	    ("%s: payload length mismatch", __func__));
1486330884Sjhb
1487330884Sjhb	INP_WLOCK(inp);
1488330884Sjhb	if (inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) {
1489330884Sjhb		CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x",
1490330884Sjhb		    __func__, tid, len, inp->inp_flags);
1491330884Sjhb		INP_WUNLOCK(inp);
1492330884Sjhb		m_freem(m);
1493330884Sjhb		return (0);
1494330884Sjhb	}
1495330884Sjhb
1496330884Sjhb	pdu_length = G_CPL_RX_TLS_CMP_PDULENGTH(be32toh(cpl->pdulength_length));
1497330884Sjhb
1498330884Sjhb	tp = intotcpcb(inp);
1499330884Sjhb
1500330884Sjhb#ifdef VERBOSE_TRACES
1501330884Sjhb	CTR6(KTR_CXGBE, "%s: tid %u PDU len %d len %d seq %u, rcv_nxt %u",
1502330884Sjhb	    __func__, tid, pdu_length, len, be32toh(cpl->seq), tp->rcv_nxt);
1503330884Sjhb#endif
1504330884Sjhb
1505330884Sjhb	tp->rcv_nxt += pdu_length;
1506330884Sjhb	if (tp->rcv_wnd < pdu_length) {
1507330884Sjhb		toep->tls.rcv_over += pdu_length - tp->rcv_wnd;
1508330884Sjhb		tp->rcv_wnd = 0;
1509330884Sjhb	} else
1510330884Sjhb		tp->rcv_wnd -= pdu_length;
1511330884Sjhb
1512330884Sjhb	/* XXX: Not sure what to do about urgent data. */
1513330884Sjhb
1514330884Sjhb	/*
1515330884Sjhb	 * The payload of this CPL is the TLS header followed by
1516330884Sjhb	 * additional fields.
1517330884Sjhb	 */
1518330884Sjhb	KASSERT(m->m_len >= sizeof(*tls_hdr_pkt),
1519330884Sjhb	    ("%s: payload too small", __func__));
1520330884Sjhb	tls_hdr_pkt = mtod(m, void *);
1521330884Sjhb
1522330884Sjhb	/*
1523330884Sjhb	 * Only the TLS header is sent to OpenSSL, so report errors by
1524330884Sjhb	 * altering the record type.
1525330884Sjhb	 */
1526330884Sjhb	if ((tls_hdr_pkt->res_to_mac_error & M_TLSRX_HDR_PKT_ERROR) != 0)
1527330884Sjhb		tls_hdr_pkt->type = CONTENT_TYPE_ERROR;
1528330884Sjhb
1529330884Sjhb	/* Trim this CPL's mbuf to only include the TLS header. */
1530330884Sjhb	KASSERT(m->m_len == len && m->m_next == NULL,
1531330884Sjhb	    ("%s: CPL spans multiple mbufs", __func__));
1532330884Sjhb	m->m_len = TLS_HEADER_LENGTH;
1533330884Sjhb	m->m_pkthdr.len = TLS_HEADER_LENGTH;
1534330884Sjhb
1535330884Sjhb	tls_data = mbufq_dequeue(&toep->ulp_pdu_reclaimq);
1536330884Sjhb	if (tls_data != NULL) {
1537330884Sjhb		KASSERT(be32toh(cpl->seq) == tls_data->m_pkthdr.tls_tcp_seq,
1538330884Sjhb		    ("%s: sequence mismatch", __func__));
1539330884Sjhb
1540330884Sjhb		/*
1541330884Sjhb		 * Update the TLS header length to be the length of
1542330884Sjhb		 * the payload data.
1543330884Sjhb		 */
1544330884Sjhb		tls_hdr_pkt->length = htobe16(tls_data->m_pkthdr.len);
1545330884Sjhb
1546330884Sjhb		m->m_next = tls_data;
1547330884Sjhb		m->m_pkthdr.len += tls_data->m_len;
1548330884Sjhb	}
1549330884Sjhb
1550330884Sjhb	so = inp_inpcbtosocket(inp);
1551330884Sjhb	sb = &so->so_rcv;
1552330884Sjhb	SOCKBUF_LOCK(sb);
1553330884Sjhb
1554330884Sjhb	if (__predict_false(sb->sb_state & SBS_CANTRCVMORE)) {
1555330884Sjhb		CTR3(KTR_CXGBE, "%s: tid %u, excess rx (%d bytes)",
1556330884Sjhb		    __func__, tid, pdu_length);
1557330884Sjhb		m_freem(m);
1558330884Sjhb		SOCKBUF_UNLOCK(sb);
1559330884Sjhb		INP_WUNLOCK(inp);
1560330884Sjhb
1561330884Sjhb		CURVNET_SET(toep->vnet);
1562330884Sjhb		INP_INFO_RLOCK(&V_tcbinfo);
1563330884Sjhb		INP_WLOCK(inp);
1564330884Sjhb		tp = tcp_drop(tp, ECONNRESET);
1565330884Sjhb		if (tp)
1566330884Sjhb			INP_WUNLOCK(inp);
1567330884Sjhb		INP_INFO_RUNLOCK(&V_tcbinfo);
1568330884Sjhb		CURVNET_RESTORE();
1569330884Sjhb
1570330884Sjhb		return (0);
1571330884Sjhb	}
1572330884Sjhb
1573330884Sjhb	/*
1574348704Snp	 * Not all of the bytes on the wire are included in the socket buffer
1575348704Snp	 * (e.g. the MAC of the TLS record).  However, those bytes are included
1576348704Snp	 * in the TCP sequence space.
1577330884Sjhb	 */
1578330884Sjhb
1579330884Sjhb	/* receive buffer autosize */
1580330884Sjhb	MPASS(toep->vnet == so->so_vnet);
1581330884Sjhb	CURVNET_SET(toep->vnet);
1582330884Sjhb	if (sb->sb_flags & SB_AUTOSIZE &&
1583330884Sjhb	    V_tcp_do_autorcvbuf &&
1584330884Sjhb	    sb->sb_hiwat < V_tcp_autorcvbuf_max &&
1585348704Snp	    m->m_pkthdr.len > (sbspace(sb) / 8 * 7)) {
1586330884Sjhb		unsigned int hiwat = sb->sb_hiwat;
1587330884Sjhb		unsigned int newsize = min(hiwat + V_tcp_autorcvbuf_inc,
1588330884Sjhb		    V_tcp_autorcvbuf_max);
1589330884Sjhb
1590330884Sjhb		if (!sbreserve_locked(sb, newsize, so, NULL))
1591330884Sjhb			sb->sb_flags &= ~SB_AUTOSIZE;
1592330884Sjhb	}
1593330884Sjhb
1594330884Sjhb	sbappendstream_locked(sb, m, 0);
1595348704Snp	rx_credits = sbspace(sb) > tp->rcv_wnd ? sbspace(sb) - tp->rcv_wnd : 0;
1596330884Sjhb#ifdef VERBOSE_TRACES
1597351228Sjhb	CTR4(KTR_CXGBE, "%s: tid %u rx_credits %u rcv_wnd %u",
1598351228Sjhb	    __func__, tid, rx_credits, tp->rcv_wnd);
1599330884Sjhb#endif
1600348704Snp	if (rx_credits > 0 && sbused(sb) + tp->rcv_wnd < sb->sb_lowat) {
1601348704Snp		rx_credits = send_rx_credits(sc, toep, rx_credits);
1602348704Snp		tp->rcv_wnd += rx_credits;
1603348704Snp		tp->rcv_adv += rx_credits;
1604330884Sjhb	}
1605330884Sjhb
1606330884Sjhb	sorwakeup_locked(so);
1607330884Sjhb	SOCKBUF_UNLOCK_ASSERT(sb);
1608330884Sjhb
1609330884Sjhb	INP_WUNLOCK(inp);
1610330884Sjhb	CURVNET_RESTORE();
1611330884Sjhb	return (0);
1612330884Sjhb}
1613330884Sjhb
1614330884Sjhbvoid
1615330884Sjhbt4_tls_mod_load(void)
1616330884Sjhb{
1617330884Sjhb
1618330884Sjhb	mtx_init(&tls_handshake_lock, "t4tls handshake", NULL, MTX_DEF);
1619330884Sjhb	t4_register_cpl_handler(CPL_TLS_DATA, do_tls_data);
1620330884Sjhb	t4_register_cpl_handler(CPL_RX_TLS_CMP, do_rx_tls_cmp);
1621330884Sjhb}
1622330884Sjhb
1623330884Sjhbvoid
1624330884Sjhbt4_tls_mod_unload(void)
1625330884Sjhb{
1626330884Sjhb
1627330884Sjhb	t4_register_cpl_handler(CPL_TLS_DATA, NULL);
1628330884Sjhb	t4_register_cpl_handler(CPL_RX_TLS_CMP, NULL);
1629330884Sjhb	mtx_destroy(&tls_handshake_lock);
1630330884Sjhb}
1631330884Sjhb#endif	/* TCP_OFFLOAD */
1632