1/*-
2 * Copyright (c) 2019-2021 Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2022 NVIDIA corporation & affiliates.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include "opt_kern_tls.h"
28#include "opt_rss.h"
29#include "opt_ratelimit.h"
30
31#include <dev/mlx5/mlx5_en/en.h>
32
33#include <dev/mlx5/tls.h>
34
35#include <linux/delay.h>
36#include <sys/ktls.h>
37#include <opencrypto/cryptodev.h>
38
39#ifdef KERN_TLS
40
41#ifdef RATELIMIT
42static if_snd_tag_modify_t mlx5e_tls_rl_snd_tag_modify;
43#endif
44static if_snd_tag_query_t mlx5e_tls_snd_tag_query;
45static if_snd_tag_free_t mlx5e_tls_snd_tag_free;
46
47static const struct if_snd_tag_sw mlx5e_tls_snd_tag_sw = {
48	.snd_tag_query = mlx5e_tls_snd_tag_query,
49	.snd_tag_free = mlx5e_tls_snd_tag_free,
50	.type = IF_SND_TAG_TYPE_TLS
51};
52
53#ifdef RATELIMIT
54static const struct if_snd_tag_sw mlx5e_tls_rl_snd_tag_sw = {
55	.snd_tag_modify = mlx5e_tls_rl_snd_tag_modify,
56	.snd_tag_query = mlx5e_tls_snd_tag_query,
57	.snd_tag_free = mlx5e_tls_snd_tag_free,
58	.type = IF_SND_TAG_TYPE_TLS_RATE_LIMIT
59};
60#endif
61
62MALLOC_DEFINE(M_MLX5E_TLS, "MLX5E_TLS", "MLX5 ethernet HW TLS");
63
64/* software TLS context */
65struct mlx5_ifc_sw_tls_cntx_bits {
66	struct mlx5_ifc_tls_static_params_bits param;
67	struct mlx5_ifc_tls_progress_params_bits progress;
68	struct {
69		uint8_t key_data[8][0x20];
70		uint8_t key_len[0x20];
71	} key;
72};
73
74CTASSERT(MLX5_ST_SZ_BYTES(sw_tls_cntx) <= sizeof(((struct mlx5e_tls_tag *)0)->crypto_params));
75CTASSERT(MLX5_ST_SZ_BYTES(mkc) == sizeof(((struct mlx5e_tx_umr_wqe *)0)->mkc));
76
77static const char *mlx5e_tls_stats_desc[] = {
78	MLX5E_TLS_STATS(MLX5E_STATS_DESC)
79};
80
81static void mlx5e_tls_work(struct work_struct *);
82
83static int
84mlx5e_tls_tag_import(void *arg, void **store, int cnt, int domain, int flags)
85{
86	struct mlx5e_tls_tag *ptag;
87	int i;
88
89	for (i = 0; i != cnt; i++) {
90		ptag = malloc_domainset(sizeof(*ptag), M_MLX5E_TLS,
91		    mlx5_dev_domainset(arg), flags | M_ZERO);
92		mtx_init(&ptag->mtx, "mlx5-tls-tag-mtx", NULL, MTX_DEF);
93		INIT_WORK(&ptag->work, mlx5e_tls_work);
94		store[i] = ptag;
95	}
96	return (i);
97}
98
99static void
100mlx5e_tls_tag_release(void *arg, void **store, int cnt)
101{
102	struct mlx5e_tls_tag *ptag;
103	struct mlx5e_priv *priv;
104	struct mlx5e_tls *ptls;
105	int i;
106
107	for (i = 0; i != cnt; i++) {
108		ptag = store[i];
109		ptls = ptag->tls;
110		priv = container_of(ptls, struct mlx5e_priv, tls);
111
112		flush_work(&ptag->work);
113
114		if (ptag->tisn != 0) {
115			mlx5_tls_close_tis(priv->mdev, ptag->tisn);
116			atomic_add_32(&ptls->num_resources, -1U);
117		}
118
119		mtx_destroy(&ptag->mtx);
120
121		free(ptag, M_MLX5E_TLS);
122	}
123}
124
125static void
126mlx5e_tls_tag_zfree(struct mlx5e_tls_tag *ptag)
127{
128	/* make sure any unhandled taskqueue events are ignored */
129	ptag->state = MLX5E_TLS_ST_FREED;
130
131	/* reset some variables */
132	ptag->dek_index = 0;
133	ptag->dek_index_ok = 0;
134
135	/* avoid leaking keys */
136	memset(ptag->crypto_params, 0, sizeof(ptag->crypto_params));
137
138	/* update number of TIS contexts */
139	if (ptag->tisn == 0)
140		atomic_add_32(&ptag->tls->num_resources, -1U);
141
142	/* return tag to UMA */
143	uma_zfree(ptag->tls->zone, ptag);
144}
145
146int
147mlx5e_tls_init(struct mlx5e_priv *priv)
148{
149	struct mlx5e_tls *ptls = &priv->tls;
150	struct sysctl_oid *node;
151	uint32_t x;
152
153	if (MLX5_CAP_GEN(priv->mdev, tls_tx) == 0 ||
154	    MLX5_CAP_GEN(priv->mdev, log_max_dek) == 0)
155		return (0);
156
157	ptls->wq = create_singlethread_workqueue("mlx5-tls-wq");
158	if (ptls->wq == NULL)
159		return (ENOMEM);
160
161	sysctl_ctx_init(&ptls->ctx);
162
163	snprintf(ptls->zname, sizeof(ptls->zname),
164	    "mlx5_%u_tls", device_get_unit(priv->mdev->pdev->dev.bsddev));
165
166	ptls->zone = uma_zcache_create(ptls->zname,
167	     sizeof(struct mlx5e_tls_tag), NULL, NULL, NULL, NULL,
168	     mlx5e_tls_tag_import, mlx5e_tls_tag_release, priv->mdev,
169	     UMA_ZONE_UNMANAGED);
170
171	/* shared between RX and TX TLS */
172	ptls->max_resources = 1U << (MLX5_CAP_GEN(priv->mdev, log_max_dek) - 1);
173
174	for (x = 0; x != MLX5E_TLS_STATS_NUM; x++)
175		ptls->stats.arg[x] = counter_u64_alloc(M_WAITOK);
176
177	ptls->init = 1;
178
179	node = SYSCTL_ADD_NODE(&priv->sysctl_ctx,
180	    SYSCTL_CHILDREN(priv->sysctl_ifnet), OID_AUTO,
181	    "tls", CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, "Hardware TLS offload");
182	if (node == NULL)
183		return (0);
184
185	mlx5e_create_counter_stats(&ptls->ctx,
186	    SYSCTL_CHILDREN(node), "stats",
187	    mlx5e_tls_stats_desc, MLX5E_TLS_STATS_NUM,
188	    ptls->stats.arg);
189
190	return (0);
191}
192
193void
194mlx5e_tls_cleanup(struct mlx5e_priv *priv)
195{
196	struct mlx5e_tls *ptls = &priv->tls;
197	uint32_t x;
198
199	if (ptls->init == 0)
200		return;
201
202	ptls->init = 0;
203	flush_workqueue(ptls->wq);
204	sysctl_ctx_free(&ptls->ctx);
205	uma_zdestroy(ptls->zone);
206	destroy_workqueue(ptls->wq);
207
208	/* check if all resources are freed */
209	MPASS(priv->tls.num_resources == 0);
210
211	for (x = 0; x != MLX5E_TLS_STATS_NUM; x++)
212		counter_u64_free(ptls->stats.arg[x]);
213}
214
215static void
216mlx5e_tls_work(struct work_struct *work)
217{
218	struct mlx5e_tls_tag *ptag;
219	struct mlx5e_priv *priv;
220	int err;
221
222	ptag = container_of(work, struct mlx5e_tls_tag, work);
223	priv = container_of(ptag->tls, struct mlx5e_priv, tls);
224
225	switch (ptag->state) {
226	case MLX5E_TLS_ST_INIT:
227		/* try to open TIS, if not present */
228		if (ptag->tisn == 0) {
229			err = mlx5_tls_open_tis(priv->mdev, 0, priv->tdn,
230			    priv->pdn, &ptag->tisn);
231			if (err) {
232				MLX5E_TLS_STAT_INC(ptag, tx_error, 1);
233				break;
234			}
235		}
236		MLX5_SET(sw_tls_cntx, ptag->crypto_params, progress.pd, ptag->tisn);
237
238		/* try to allocate a DEK context ID */
239		err = mlx5_encryption_key_create(priv->mdev, priv->pdn,
240		    MLX5_ADDR_OF(sw_tls_cntx, ptag->crypto_params, key.key_data),
241		    MLX5_GET(sw_tls_cntx, ptag->crypto_params, key.key_len),
242		    &ptag->dek_index);
243		if (err) {
244			MLX5E_TLS_STAT_INC(ptag, tx_error, 1);
245			break;
246		}
247
248		MLX5_SET(sw_tls_cntx, ptag->crypto_params, param.dek_index, ptag->dek_index);
249
250		ptag->dek_index_ok = 1;
251
252		MLX5E_TLS_TAG_LOCK(ptag);
253		if (ptag->state == MLX5E_TLS_ST_INIT)
254			ptag->state = MLX5E_TLS_ST_SETUP;
255		MLX5E_TLS_TAG_UNLOCK(ptag);
256		break;
257
258	case MLX5E_TLS_ST_RELEASE:
259		/* try to destroy DEK context by ID */
260		if (ptag->dek_index_ok)
261			err = mlx5_encryption_key_destroy(priv->mdev, ptag->dek_index);
262
263		/* free tag */
264		mlx5e_tls_tag_zfree(ptag);
265		break;
266
267	default:
268		break;
269	}
270}
271
272static int
273mlx5e_tls_set_params(void *ctx, const struct tls_session_params *en)
274{
275
276	MLX5_SET(sw_tls_cntx, ctx, param.const_2, 2);
277	if (en->tls_vminor == TLS_MINOR_VER_TWO)
278		MLX5_SET(sw_tls_cntx, ctx, param.tls_version, 2); /* v1.2 */
279	else
280		MLX5_SET(sw_tls_cntx, ctx, param.tls_version, 3); /* v1.3 */
281	MLX5_SET(sw_tls_cntx, ctx, param.const_1, 1);
282	MLX5_SET(sw_tls_cntx, ctx, param.encryption_standard, 1); /* TLS */
283
284	/* copy the initial vector in place */
285	switch (en->iv_len) {
286	case MLX5_FLD_SZ_BYTES(sw_tls_cntx, param.gcm_iv):
287	case MLX5_FLD_SZ_BYTES(sw_tls_cntx, param.gcm_iv) +
288	     MLX5_FLD_SZ_BYTES(sw_tls_cntx, param.implicit_iv):
289		memcpy(MLX5_ADDR_OF(sw_tls_cntx, ctx, param.gcm_iv),
290		    en->iv, en->iv_len);
291		break;
292	default:
293		return (EINVAL);
294	}
295
296	if (en->cipher_key_len <= MLX5_FLD_SZ_BYTES(sw_tls_cntx, key.key_data)) {
297		memcpy(MLX5_ADDR_OF(sw_tls_cntx, ctx, key.key_data),
298		    en->cipher_key, en->cipher_key_len);
299		MLX5_SET(sw_tls_cntx, ctx, key.key_len, en->cipher_key_len);
300	} else {
301		return (EINVAL);
302	}
303	return (0);
304}
305
306/* Verify zero default */
307CTASSERT(MLX5E_TLS_ST_INIT == 0);
308
309int
310mlx5e_tls_snd_tag_alloc(if_t ifp,
311    union if_snd_tag_alloc_params *params,
312    struct m_snd_tag **ppmt)
313{
314	union if_snd_tag_alloc_params rl_params;
315	const struct if_snd_tag_sw *snd_tag_sw;
316	struct mlx5e_priv *priv;
317	struct mlx5e_tls_tag *ptag;
318	const struct tls_session_params *en;
319	int error;
320
321	priv = if_getsoftc(ifp);
322
323	if (priv->gone != 0 || priv->tls.init == 0)
324		return (EOPNOTSUPP);
325
326	/* allocate new tag from zone, if any */
327	ptag = uma_zalloc(priv->tls.zone, M_NOWAIT);
328	if (ptag == NULL)
329		return (ENOMEM);
330
331	/* sanity check default values */
332	MPASS(ptag->dek_index == 0);
333	MPASS(ptag->dek_index_ok == 0);
334
335	/* setup TLS tag */
336	ptag->tls = &priv->tls;
337
338	/* check if there is no TIS context */
339	if (ptag->tisn == 0) {
340		uint32_t value;
341
342		value = atomic_fetchadd_32(&priv->tls.num_resources, 1U);
343
344		/* check resource limits */
345		if (value >= priv->tls.max_resources) {
346			error = ENOMEM;
347			goto failure;
348		}
349	}
350
351	en = &params->tls.tls->params;
352
353	/* only TLS v1.2 and v1.3 is currently supported */
354	if (en->tls_vmajor != TLS_MAJOR_VER_ONE ||
355	    (en->tls_vminor != TLS_MINOR_VER_TWO
356#ifdef TLS_MINOR_VER_THREE
357	     && en->tls_vminor != TLS_MINOR_VER_THREE
358#endif
359	     )) {
360		error = EPROTONOSUPPORT;
361		goto failure;
362	}
363
364	switch (en->cipher_algorithm) {
365	case CRYPTO_AES_NIST_GCM_16:
366		switch (en->cipher_key_len) {
367		case 128 / 8:
368			if (en->tls_vminor == TLS_MINOR_VER_TWO) {
369				if (MLX5_CAP_TLS(priv->mdev, tls_1_2_aes_gcm_128) == 0) {
370					error = EPROTONOSUPPORT;
371					goto failure;
372				}
373			} else {
374				if (MLX5_CAP_TLS(priv->mdev, tls_1_3_aes_gcm_128) == 0) {
375					error = EPROTONOSUPPORT;
376					goto failure;
377				}
378			}
379			error = mlx5e_tls_set_params(ptag->crypto_params, en);
380			if (error)
381				goto failure;
382			break;
383
384		case 256 / 8:
385			if (en->tls_vminor == TLS_MINOR_VER_TWO) {
386				if (MLX5_CAP_TLS(priv->mdev, tls_1_2_aes_gcm_256) == 0) {
387					error = EPROTONOSUPPORT;
388					goto failure;
389				}
390			} else {
391				if (MLX5_CAP_TLS(priv->mdev, tls_1_3_aes_gcm_256) == 0) {
392					error = EPROTONOSUPPORT;
393					goto failure;
394				}
395			}
396			error = mlx5e_tls_set_params(ptag->crypto_params, en);
397			if (error)
398				goto failure;
399			break;
400
401		default:
402			error = EINVAL;
403			goto failure;
404		}
405		break;
406	default:
407		error = EPROTONOSUPPORT;
408		goto failure;
409	}
410
411	memset(&rl_params, 0, sizeof(rl_params));
412	rl_params.hdr = params->hdr;
413	switch (params->hdr.type) {
414#ifdef RATELIMIT
415	case IF_SND_TAG_TYPE_TLS_RATE_LIMIT:
416		rl_params.hdr.type = IF_SND_TAG_TYPE_RATE_LIMIT;
417		rl_params.rate_limit.max_rate = params->tls_rate_limit.max_rate;
418		snd_tag_sw = &mlx5e_tls_rl_snd_tag_sw;
419		break;
420#endif
421	case IF_SND_TAG_TYPE_TLS:
422		rl_params.hdr.type = IF_SND_TAG_TYPE_UNLIMITED;
423		snd_tag_sw = &mlx5e_tls_snd_tag_sw;
424		break;
425	default:
426		error = EOPNOTSUPP;
427		goto failure;
428	}
429
430	error = m_snd_tag_alloc(ifp, &rl_params, &ptag->rl_tag);
431	if (error)
432		goto failure;
433
434	/* store pointer to mbuf tag */
435	MPASS(ptag->tag.refcount == 0);
436	m_snd_tag_init(&ptag->tag, ifp, snd_tag_sw);
437	*ppmt = &ptag->tag;
438
439	/* reset state */
440	ptag->state = MLX5E_TLS_ST_INIT;
441
442	queue_work(priv->tls.wq, &ptag->work);
443	flush_work(&ptag->work);
444
445	return (0);
446
447failure:
448	mlx5e_tls_tag_zfree(ptag);
449	return (error);
450}
451
452#ifdef RATELIMIT
453static int
454mlx5e_tls_rl_snd_tag_modify(struct m_snd_tag *pmt, union if_snd_tag_modify_params *params)
455{
456	union if_snd_tag_modify_params rl_params;
457	struct mlx5e_tls_tag *ptag =
458	    container_of(pmt, struct mlx5e_tls_tag, tag);
459	int error;
460
461	memset(&rl_params, 0, sizeof(rl_params));
462	rl_params.rate_limit.max_rate = params->tls_rate_limit.max_rate;
463	error = ptag->rl_tag->sw->snd_tag_modify(ptag->rl_tag, &rl_params);
464	return (error);
465}
466#endif
467
468static int
469mlx5e_tls_snd_tag_query(struct m_snd_tag *pmt, union if_snd_tag_query_params *params)
470{
471	struct mlx5e_tls_tag *ptag =
472	    container_of(pmt, struct mlx5e_tls_tag, tag);
473
474	return (ptag->rl_tag->sw->snd_tag_query(ptag->rl_tag, params));
475}
476
477static void
478mlx5e_tls_snd_tag_free(struct m_snd_tag *pmt)
479{
480	struct mlx5e_tls_tag *ptag =
481	    container_of(pmt, struct mlx5e_tls_tag, tag);
482	struct mlx5e_priv *priv;
483
484	m_snd_tag_rele(ptag->rl_tag);
485
486	MLX5E_TLS_TAG_LOCK(ptag);
487	ptag->state = MLX5E_TLS_ST_RELEASE;
488	MLX5E_TLS_TAG_UNLOCK(ptag);
489
490	priv = if_getsoftc(ptag->tag.ifp);
491	queue_work(priv->tls.wq, &ptag->work);
492}
493
494CTASSERT((MLX5_FLD_SZ_BYTES(sw_tls_cntx, param) % 16) == 0);
495
496static void
497mlx5e_tls_send_static_parameters(struct mlx5e_sq *sq, struct mlx5e_tls_tag *ptag)
498{
499	const u32 ds_cnt = DIV_ROUND_UP(sizeof(struct mlx5e_tx_umr_wqe) +
500	    MLX5_FLD_SZ_BYTES(sw_tls_cntx, param), MLX5_SEND_WQE_DS);
501	struct mlx5e_tx_umr_wqe *wqe;
502	u16 pi;
503
504	pi = sq->pc & sq->wq.sz_m1;
505	wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
506
507	memset(wqe, 0, sizeof(*wqe));
508
509	wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) |
510	    MLX5_OPCODE_UMR | (MLX5_OPCODE_MOD_UMR_TLS_TIS_STATIC_PARAMS << 24));
511	wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
512	wqe->ctrl.imm = cpu_to_be32(ptag->tisn << 8);
513
514	if (mlx5e_do_send_cqe(sq))
515		wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE | MLX5_FENCE_MODE_INITIATOR_SMALL;
516	else
517		wqe->ctrl.fm_ce_se = MLX5_FENCE_MODE_INITIATOR_SMALL;
518
519	/* fill out UMR control segment */
520	wqe->umr.flags = 0x80;	/* inline data */
521	wqe->umr.bsf_octowords = cpu_to_be16(MLX5_FLD_SZ_BYTES(sw_tls_cntx, param) / 16);
522
523	/* copy in the static crypto parameters */
524	memcpy(wqe + 1, MLX5_ADDR_OF(sw_tls_cntx, ptag->crypto_params, param),
525	    MLX5_FLD_SZ_BYTES(sw_tls_cntx, param));
526
527	/* copy data for doorbell */
528	memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32));
529
530	sq->mbuf[pi].mbuf = NULL;
531	sq->mbuf[pi].num_bytes = 0;
532	sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
533	sq->mbuf[pi].mst = m_snd_tag_ref(&ptag->tag);
534
535	sq->pc += sq->mbuf[pi].num_wqebbs;
536}
537
538CTASSERT(MLX5_FLD_SZ_BYTES(sw_tls_cntx, progress) ==
539    sizeof(((struct mlx5e_tx_psv_wqe *)0)->psv));
540
541static void
542mlx5e_tls_send_progress_parameters(struct mlx5e_sq *sq, struct mlx5e_tls_tag *ptag)
543{
544	const u32 ds_cnt = DIV_ROUND_UP(sizeof(struct mlx5e_tx_psv_wqe),
545	    MLX5_SEND_WQE_DS);
546	struct mlx5e_tx_psv_wqe *wqe;
547	u16 pi;
548
549	pi = sq->pc & sq->wq.sz_m1;
550	wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
551
552	memset(wqe, 0, sizeof(*wqe));
553
554	wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) |
555	    MLX5_OPCODE_SET_PSV | (MLX5_OPCODE_MOD_PSV_TLS_TIS_PROGRESS_PARAMS << 24));
556	wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
557
558	if (mlx5e_do_send_cqe(sq))
559		wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
560
561	/* copy in the PSV control segment */
562	memcpy(&wqe->psv, MLX5_ADDR_OF(sw_tls_cntx, ptag->crypto_params, progress),
563	    sizeof(wqe->psv));
564
565	/* copy data for doorbell */
566	memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32));
567
568	sq->mbuf[pi].mbuf = NULL;
569	sq->mbuf[pi].num_bytes = 0;
570	sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
571	sq->mbuf[pi].mst = m_snd_tag_ref(&ptag->tag);
572
573	sq->pc += sq->mbuf[pi].num_wqebbs;
574}
575
576static void
577mlx5e_tls_send_nop(struct mlx5e_sq *sq, struct mlx5e_tls_tag *ptag)
578{
579	const u32 ds_cnt = MLX5_SEND_WQEBB_NUM_DS;
580	struct mlx5e_tx_wqe *wqe;
581	u16 pi;
582
583	pi = sq->pc & sq->wq.sz_m1;
584	wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
585
586	memset(&wqe->ctrl, 0, sizeof(wqe->ctrl));
587
588	wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP);
589	wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
590	if (mlx5e_do_send_cqe(sq))
591		wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE | MLX5_FENCE_MODE_INITIATOR_SMALL;
592	else
593		wqe->ctrl.fm_ce_se = MLX5_FENCE_MODE_INITIATOR_SMALL;
594
595	/* Copy data for doorbell */
596	memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32));
597
598	sq->mbuf[pi].mbuf = NULL;
599	sq->mbuf[pi].num_bytes = 0;
600	sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
601	sq->mbuf[pi].mst = m_snd_tag_ref(&ptag->tag);
602
603	sq->pc += sq->mbuf[pi].num_wqebbs;
604}
605
606#define	SBTLS_MBUF_NO_DATA ((struct mbuf *)1)
607
608static struct mbuf *
609sbtls_recover_record(struct mbuf *mb, int wait, uint32_t tcp_old, uint32_t *ptcp_seq, bool *pis_start)
610{
611	struct mbuf *mr, *top;
612	uint32_t offset;
613	uint32_t delta;
614
615	/* check format of incoming mbuf */
616	if (mb->m_next == NULL ||
617	    (mb->m_next->m_flags & (M_EXTPG | M_EXT)) != (M_EXTPG | M_EXT)) {
618		top = NULL;
619		goto done;
620	}
621
622	/* get unmapped data offset */
623	offset = mtod(mb->m_next, uintptr_t);
624
625	/* check if we don't need to re-transmit anything */
626	if (offset == 0) {
627		top = SBTLS_MBUF_NO_DATA;
628		*pis_start = true;
629		goto done;
630	}
631
632	/* try to get a new  packet header */
633	top = m_gethdr(wait, MT_DATA);
634	if (top == NULL)
635		goto done;
636
637	mr = m_get(wait, MT_DATA);
638	if (mr == NULL) {
639		m_free(top);
640		top = NULL;
641		goto done;
642	}
643
644	top->m_next = mr;
645
646	mb_dupcl(mr, mb->m_next);
647
648	/* the beginning of the TLS record */
649	mr->m_data = NULL;
650
651	/* setup packet header length */
652	top->m_pkthdr.len = mr->m_len = offset;
653	top->m_len = 0;
654
655	/* check for partial re-transmit */
656	delta = *ptcp_seq - tcp_old;
657
658	if (delta < offset) {
659		m_adj(top, offset - delta);
660		offset = delta;
661
662		/* continue where we left off */
663		*pis_start = false;
664	} else {
665		*pis_start = true;
666	}
667
668	/*
669	 * Rewind the TCP sequence number by the amount of data
670	 * retransmitted:
671	 */
672	*ptcp_seq -= offset;
673done:
674	return (top);
675}
676
677static int
678mlx5e_sq_tls_populate(struct mbuf *mb, uint64_t *pseq)
679{
680
681	for (; mb != NULL; mb = mb->m_next) {
682		if (!(mb->m_flags & M_EXTPG))
683			continue;
684		*pseq = mb->m_epg_seqno;
685		return (1);
686	}
687	return (0);
688}
689
690int
691mlx5e_sq_tls_xmit(struct mlx5e_sq *sq, struct mlx5e_xmit_args *parg, struct mbuf **ppmb)
692{
693	struct mlx5e_tls_tag *ptls_tag;
694	struct m_snd_tag *ptag;
695	const struct tcphdr *th;
696	struct mbuf *mb = *ppmb;
697	u64 rcd_sn;
698	u32 header_size;
699	u32 mb_seq;
700
701	if ((mb->m_pkthdr.csum_flags & CSUM_SND_TAG) == 0)
702		return (MLX5E_TLS_CONTINUE);
703
704	ptag = mb->m_pkthdr.snd_tag;
705
706	if (
707#ifdef RATELIMIT
708	    ptag->sw->type != IF_SND_TAG_TYPE_TLS_RATE_LIMIT &&
709#endif
710	    ptag->sw->type != IF_SND_TAG_TYPE_TLS)
711		return (MLX5E_TLS_CONTINUE);
712
713	ptls_tag = container_of(ptag, struct mlx5e_tls_tag, tag);
714
715	header_size = mlx5e_get_full_header_size(mb, &th);
716	if (unlikely(header_size == 0 || th == NULL))
717		return (MLX5E_TLS_FAILURE);
718
719	/*
720	 * Send non-TLS TCP packets AS-IS:
721	 */
722	if (header_size == mb->m_pkthdr.len ||
723	    mlx5e_sq_tls_populate(mb, &rcd_sn) == 0) {
724		parg->tisn = 0;
725		parg->ihs = header_size;
726		return (MLX5E_TLS_CONTINUE);
727	}
728
729	mb_seq = ntohl(th->th_seq);
730
731	MLX5E_TLS_TAG_LOCK(ptls_tag);
732	switch (ptls_tag->state) {
733	case MLX5E_TLS_ST_INIT:
734		MLX5E_TLS_TAG_UNLOCK(ptls_tag);
735		return (MLX5E_TLS_FAILURE);
736	case MLX5E_TLS_ST_SETUP:
737		ptls_tag->state = MLX5E_TLS_ST_TXRDY;
738		ptls_tag->expected_seq = ~mb_seq;	/* force setup */
739	default:
740		MLX5E_TLS_TAG_UNLOCK(ptls_tag);
741		break;
742	}
743
744	if (unlikely(ptls_tag->expected_seq != mb_seq)) {
745		bool is_start;
746		struct mbuf *r_mb;
747		uint32_t tcp_seq = mb_seq;
748
749		r_mb = sbtls_recover_record(mb, M_NOWAIT, ptls_tag->expected_seq, &tcp_seq, &is_start);
750		if (r_mb == NULL) {
751			MLX5E_TLS_STAT_INC(ptls_tag, tx_error, 1);
752			return (MLX5E_TLS_FAILURE);
753		}
754
755		MLX5E_TLS_STAT_INC(ptls_tag, tx_packets_ooo, 1);
756
757		/* check if this is the first fragment of a TLS record */
758		if (is_start) {
759			/* setup TLS static parameters */
760			MLX5_SET64(sw_tls_cntx, ptls_tag->crypto_params,
761			    param.initial_record_number, rcd_sn);
762
763			/*
764			 * NOTE: The sendqueue should have enough room to
765			 * carry both the static and the progress parameters
766			 * when we get here!
767			 */
768			mlx5e_tls_send_static_parameters(sq, ptls_tag);
769			mlx5e_tls_send_progress_parameters(sq, ptls_tag);
770
771			if (r_mb == SBTLS_MBUF_NO_DATA) {
772				mlx5e_tls_send_nop(sq, ptls_tag);
773				ptls_tag->expected_seq = mb_seq;
774				return (MLX5E_TLS_LOOP);
775			}
776		}
777
778		MLX5E_TLS_STAT_INC(ptls_tag, tx_bytes_ooo, r_mb->m_pkthdr.len);
779
780		/* setup transmit arguments */
781		parg->tisn = ptls_tag->tisn;
782		parg->mst = &ptls_tag->tag;
783
784		/* try to send DUMP data */
785		if (mlx5e_sq_dump_xmit(sq, parg, &r_mb) != 0) {
786			m_freem(r_mb);
787			ptls_tag->expected_seq = tcp_seq;
788			return (MLX5E_TLS_FAILURE);
789		} else {
790			ptls_tag->expected_seq = mb_seq;
791			return (MLX5E_TLS_LOOP);
792		}
793	} else {
794		MLX5E_TLS_STAT_INC(ptls_tag, tx_packets, 1);
795		MLX5E_TLS_STAT_INC(ptls_tag, tx_bytes, mb->m_pkthdr.len);
796	}
797	ptls_tag->expected_seq += mb->m_pkthdr.len - header_size;
798
799	parg->tisn = ptls_tag->tisn;
800	parg->ihs = header_size;
801	parg->mst = &ptls_tag->tag;
802	return (MLX5E_TLS_CONTINUE);
803}
804
805#else
806
807int
808mlx5e_tls_init(struct mlx5e_priv *priv)
809{
810
811	return (0);
812}
813
814void
815mlx5e_tls_cleanup(struct mlx5e_priv *priv)
816{
817	/* NOP */
818}
819
820#endif		/* KERN_TLS */
821