1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2// Copyright (c) 2019 Mellanox Technologies.
3
4#include <linux/debugfs.h>
5#include "en_accel/ktls.h"
6#include "en_accel/ktls_txrx.h"
7#include "en_accel/ktls_utils.h"
8
9struct mlx5e_dump_wqe {
10	struct mlx5_wqe_ctrl_seg ctrl;
11	struct mlx5_wqe_data_seg data;
12};
13
14#define MLX5E_KTLS_DUMP_WQEBBS \
15	(DIV_ROUND_UP(sizeof(struct mlx5e_dump_wqe), MLX5_SEND_WQE_BB))
16
17static u8
18mlx5e_ktls_dumps_num_wqes(struct mlx5e_params *params, unsigned int nfrags,
19			  unsigned int sync_len)
20{
21	/* Given the MTU and sync_len, calculates an upper bound for the
22	 * number of DUMP WQEs needed for the TX resync of a record.
23	 */
24	return nfrags + DIV_ROUND_UP(sync_len, MLX5E_SW2HW_MTU(params, params->sw_mtu));
25}
26
27u16 mlx5e_ktls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
28{
29	u16 num_dumps, stop_room = 0;
30
31	if (!mlx5e_is_ktls_tx(mdev))
32		return 0;
33
34	num_dumps = mlx5e_ktls_dumps_num_wqes(params, MAX_SKB_FRAGS, TLS_MAX_PAYLOAD_SIZE);
35
36	stop_room += mlx5e_stop_room_for_wqe(mdev, MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS);
37	stop_room += mlx5e_stop_room_for_wqe(mdev, MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS);
38	stop_room += num_dumps * mlx5e_stop_room_for_wqe(mdev, MLX5E_KTLS_DUMP_WQEBBS);
39	stop_room += 1; /* fence nop */
40
41	return stop_room;
42}
43
44static void mlx5e_ktls_set_tisc(struct mlx5_core_dev *mdev, void *tisc)
45{
46	MLX5_SET(tisc, tisc, tls_en, 1);
47	MLX5_SET(tisc, tisc, pd, mdev->mlx5e_res.hw_objs.pdn);
48	MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn);
49}
50
51static int mlx5e_ktls_create_tis(struct mlx5_core_dev *mdev, u32 *tisn)
52{
53	u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
54
55	mlx5e_ktls_set_tisc(mdev, MLX5_ADDR_OF(create_tis_in, in, ctx));
56
57	return mlx5_core_create_tis(mdev, in, tisn);
58}
59
60static int mlx5e_ktls_create_tis_cb(struct mlx5_core_dev *mdev,
61				    struct mlx5_async_ctx *async_ctx,
62				    u32 *out, int outlen,
63				    mlx5_async_cbk_t callback,
64				    struct mlx5_async_work *context)
65{
66	u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
67
68	mlx5e_ktls_set_tisc(mdev, MLX5_ADDR_OF(create_tis_in, in, ctx));
69	MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);
70
71	return mlx5_cmd_exec_cb(async_ctx, in, sizeof(in),
72				out, outlen, callback, context);
73}
74
75static int mlx5e_ktls_destroy_tis_cb(struct mlx5_core_dev *mdev, u32 tisn,
76				     struct mlx5_async_ctx *async_ctx,
77				     u32 *out, int outlen,
78				     mlx5_async_cbk_t callback,
79				     struct mlx5_async_work *context)
80{
81	u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {};
82
83	MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
84	MLX5_SET(destroy_tis_in, in, tisn, tisn);
85
86	return mlx5_cmd_exec_cb(async_ctx, in, sizeof(in),
87				out, outlen, callback, context);
88}
89
90struct mlx5e_ktls_offload_context_tx {
91	/* fast path */
92	u32 expected_seq;
93	u32 tisn;
94	bool ctx_post_pending;
95	/* control / resync */
96	struct list_head list_node; /* member of the pool */
97	union mlx5e_crypto_info crypto_info;
98	struct tls_offload_context_tx *tx_ctx;
99	struct mlx5_core_dev *mdev;
100	struct mlx5e_tls_sw_stats *sw_stats;
101	struct mlx5_crypto_dek *dek;
102	u8 create_err : 1;
103};
104
105static void
106mlx5e_set_ktls_tx_priv_ctx(struct tls_context *tls_ctx,
107			   struct mlx5e_ktls_offload_context_tx *priv_tx)
108{
109	struct mlx5e_ktls_offload_context_tx **ctx =
110		__tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX);
111
112	BUILD_BUG_ON(sizeof(priv_tx) > TLS_DRIVER_STATE_SIZE_TX);
113
114	*ctx = priv_tx;
115}
116
117static struct mlx5e_ktls_offload_context_tx *
118mlx5e_get_ktls_tx_priv_ctx(struct tls_context *tls_ctx)
119{
120	struct mlx5e_ktls_offload_context_tx **ctx =
121		__tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX);
122
123	return *ctx;
124}
125
126/* struct for callback API management */
127struct mlx5e_async_ctx {
128	struct mlx5_async_work context;
129	struct mlx5_async_ctx *async_ctx;
130	struct mlx5e_ktls_offload_context_tx *priv_tx;
131	int err;
132	union {
133		u32 out_create[MLX5_ST_SZ_DW(create_tis_out)];
134		u32 out_destroy[MLX5_ST_SZ_DW(destroy_tis_out)];
135	};
136};
137
138struct mlx5e_bulk_async_ctx {
139	struct mlx5_async_ctx async_ctx;
140	DECLARE_FLEX_ARRAY(struct mlx5e_async_ctx, arr);
141};
142
143static struct mlx5e_bulk_async_ctx *mlx5e_bulk_async_init(struct mlx5_core_dev *mdev, int n)
144{
145	struct mlx5e_bulk_async_ctx *bulk_async;
146	int sz;
147	int i;
148
149	sz = struct_size(bulk_async, arr, n);
150	bulk_async = kvzalloc(sz, GFP_KERNEL);
151	if (!bulk_async)
152		return NULL;
153
154	mlx5_cmd_init_async_ctx(mdev, &bulk_async->async_ctx);
155
156	for (i = 0; i < n; i++)
157		bulk_async->arr[i].async_ctx = &bulk_async->async_ctx;
158
159	return bulk_async;
160}
161
162static void mlx5e_bulk_async_cleanup(struct mlx5e_bulk_async_ctx *bulk_async)
163{
164	mlx5_cmd_cleanup_async_ctx(&bulk_async->async_ctx);
165	kvfree(bulk_async);
166}
167
168static void create_tis_callback(int status, struct mlx5_async_work *context)
169{
170	struct mlx5e_async_ctx *async =
171		container_of(context, struct mlx5e_async_ctx, context);
172	struct mlx5e_ktls_offload_context_tx *priv_tx = async->priv_tx;
173
174	if (status) {
175		async->err = status;
176		priv_tx->create_err = 1;
177		return;
178	}
179
180	priv_tx->tisn = MLX5_GET(create_tis_out, async->out_create, tisn);
181}
182
183static void destroy_tis_callback(int status, struct mlx5_async_work *context)
184{
185	struct mlx5e_async_ctx *async =
186		container_of(context, struct mlx5e_async_ctx, context);
187	struct mlx5e_ktls_offload_context_tx *priv_tx = async->priv_tx;
188
189	kfree(priv_tx);
190}
191
192static struct mlx5e_ktls_offload_context_tx *
193mlx5e_tls_priv_tx_init(struct mlx5_core_dev *mdev, struct mlx5e_tls_sw_stats *sw_stats,
194		       struct mlx5e_async_ctx *async)
195{
196	struct mlx5e_ktls_offload_context_tx *priv_tx;
197	int err;
198
199	priv_tx = kzalloc(sizeof(*priv_tx), GFP_KERNEL);
200	if (!priv_tx)
201		return ERR_PTR(-ENOMEM);
202
203	priv_tx->mdev = mdev;
204	priv_tx->sw_stats = sw_stats;
205
206	if (!async) {
207		err = mlx5e_ktls_create_tis(mdev, &priv_tx->tisn);
208		if (err)
209			goto err_out;
210	} else {
211		async->priv_tx = priv_tx;
212		err = mlx5e_ktls_create_tis_cb(mdev, async->async_ctx,
213					       async->out_create, sizeof(async->out_create),
214					       create_tis_callback, &async->context);
215		if (err)
216			goto err_out;
217	}
218
219	return priv_tx;
220
221err_out:
222	kfree(priv_tx);
223	return ERR_PTR(err);
224}
225
226static void mlx5e_tls_priv_tx_cleanup(struct mlx5e_ktls_offload_context_tx *priv_tx,
227				      struct mlx5e_async_ctx *async)
228{
229	if (priv_tx->create_err) {
230		kfree(priv_tx);
231		return;
232	}
233	async->priv_tx = priv_tx;
234	mlx5e_ktls_destroy_tis_cb(priv_tx->mdev, priv_tx->tisn,
235				  async->async_ctx,
236				  async->out_destroy, sizeof(async->out_destroy),
237				  destroy_tis_callback, &async->context);
238}
239
240static void mlx5e_tls_priv_tx_list_cleanup(struct mlx5_core_dev *mdev,
241					   struct list_head *list, int size)
242{
243	struct mlx5e_ktls_offload_context_tx *obj, *n;
244	struct mlx5e_bulk_async_ctx *bulk_async;
245	int i;
246
247	bulk_async = mlx5e_bulk_async_init(mdev, size);
248	if (!bulk_async)
249		return;
250
251	i = 0;
252	list_for_each_entry_safe(obj, n, list, list_node) {
253		mlx5e_tls_priv_tx_cleanup(obj, &bulk_async->arr[i]);
254		i++;
255	}
256
257	mlx5e_bulk_async_cleanup(bulk_async);
258}
259
260/* Recycling pool API */
261
262#define MLX5E_TLS_TX_POOL_BULK (16)
263#define MLX5E_TLS_TX_POOL_HIGH (4 * 1024)
264#define MLX5E_TLS_TX_POOL_LOW (MLX5E_TLS_TX_POOL_HIGH / 4)
265
266struct mlx5e_tls_tx_pool {
267	struct mlx5_core_dev *mdev;
268	struct mlx5e_tls_sw_stats *sw_stats;
269	struct mutex lock; /* Protects access to the pool */
270	struct list_head list;
271	size_t size;
272
273	struct workqueue_struct *wq;
274	struct work_struct create_work;
275	struct work_struct destroy_work;
276};
277
278static void create_work(struct work_struct *work)
279{
280	struct mlx5e_tls_tx_pool *pool =
281		container_of(work, struct mlx5e_tls_tx_pool, create_work);
282	struct mlx5e_ktls_offload_context_tx *obj;
283	struct mlx5e_bulk_async_ctx *bulk_async;
284	LIST_HEAD(local_list);
285	int i, j, err = 0;
286
287	bulk_async = mlx5e_bulk_async_init(pool->mdev, MLX5E_TLS_TX_POOL_BULK);
288	if (!bulk_async)
289		return;
290
291	for (i = 0; i < MLX5E_TLS_TX_POOL_BULK; i++) {
292		obj = mlx5e_tls_priv_tx_init(pool->mdev, pool->sw_stats, &bulk_async->arr[i]);
293		if (IS_ERR(obj)) {
294			err = PTR_ERR(obj);
295			break;
296		}
297		list_add(&obj->list_node, &local_list);
298	}
299
300	for (j = 0; j < i; j++) {
301		struct mlx5e_async_ctx *async = &bulk_async->arr[j];
302
303		if (!err && async->err)
304			err = async->err;
305	}
306	atomic64_add(i, &pool->sw_stats->tx_tls_pool_alloc);
307	mlx5e_bulk_async_cleanup(bulk_async);
308	if (err)
309		goto err_out;
310
311	mutex_lock(&pool->lock);
312	if (pool->size + MLX5E_TLS_TX_POOL_BULK >= MLX5E_TLS_TX_POOL_HIGH) {
313		mutex_unlock(&pool->lock);
314		goto err_out;
315	}
316	list_splice(&local_list, &pool->list);
317	pool->size += MLX5E_TLS_TX_POOL_BULK;
318	if (pool->size <= MLX5E_TLS_TX_POOL_LOW)
319		queue_work(pool->wq, work);
320	mutex_unlock(&pool->lock);
321	return;
322
323err_out:
324	mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &local_list, i);
325	atomic64_add(i, &pool->sw_stats->tx_tls_pool_free);
326}
327
328static void destroy_work(struct work_struct *work)
329{
330	struct mlx5e_tls_tx_pool *pool =
331		container_of(work, struct mlx5e_tls_tx_pool, destroy_work);
332	struct mlx5e_ktls_offload_context_tx *obj;
333	LIST_HEAD(local_list);
334	int i = 0;
335
336	mutex_lock(&pool->lock);
337	if (pool->size < MLX5E_TLS_TX_POOL_HIGH) {
338		mutex_unlock(&pool->lock);
339		return;
340	}
341
342	list_for_each_entry(obj, &pool->list, list_node)
343		if (++i == MLX5E_TLS_TX_POOL_BULK)
344			break;
345
346	list_cut_position(&local_list, &pool->list, &obj->list_node);
347	pool->size -= MLX5E_TLS_TX_POOL_BULK;
348	if (pool->size >= MLX5E_TLS_TX_POOL_HIGH)
349		queue_work(pool->wq, work);
350	mutex_unlock(&pool->lock);
351
352	mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &local_list, MLX5E_TLS_TX_POOL_BULK);
353	atomic64_add(MLX5E_TLS_TX_POOL_BULK, &pool->sw_stats->tx_tls_pool_free);
354}
355
356static struct mlx5e_tls_tx_pool *mlx5e_tls_tx_pool_init(struct mlx5_core_dev *mdev,
357							struct mlx5e_tls_sw_stats *sw_stats)
358{
359	struct mlx5e_tls_tx_pool *pool;
360
361	BUILD_BUG_ON(MLX5E_TLS_TX_POOL_LOW + MLX5E_TLS_TX_POOL_BULK >= MLX5E_TLS_TX_POOL_HIGH);
362
363	pool = kvzalloc(sizeof(*pool), GFP_KERNEL);
364	if (!pool)
365		return NULL;
366
367	pool->wq = create_singlethread_workqueue("mlx5e_tls_tx_pool");
368	if (!pool->wq)
369		goto err_free;
370
371	INIT_LIST_HEAD(&pool->list);
372	mutex_init(&pool->lock);
373
374	INIT_WORK(&pool->create_work, create_work);
375	INIT_WORK(&pool->destroy_work, destroy_work);
376
377	pool->mdev = mdev;
378	pool->sw_stats = sw_stats;
379
380	return pool;
381
382err_free:
383	kvfree(pool);
384	return NULL;
385}
386
387static void mlx5e_tls_tx_pool_list_cleanup(struct mlx5e_tls_tx_pool *pool)
388{
389	while (pool->size > MLX5E_TLS_TX_POOL_BULK) {
390		struct mlx5e_ktls_offload_context_tx *obj;
391		LIST_HEAD(local_list);
392		int i = 0;
393
394		list_for_each_entry(obj, &pool->list, list_node)
395			if (++i == MLX5E_TLS_TX_POOL_BULK)
396				break;
397
398		list_cut_position(&local_list, &pool->list, &obj->list_node);
399		mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &local_list, MLX5E_TLS_TX_POOL_BULK);
400		atomic64_add(MLX5E_TLS_TX_POOL_BULK, &pool->sw_stats->tx_tls_pool_free);
401		pool->size -= MLX5E_TLS_TX_POOL_BULK;
402	}
403	if (pool->size) {
404		mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &pool->list, pool->size);
405		atomic64_add(pool->size, &pool->sw_stats->tx_tls_pool_free);
406	}
407}
408
409static void mlx5e_tls_tx_pool_cleanup(struct mlx5e_tls_tx_pool *pool)
410{
411	mlx5e_tls_tx_pool_list_cleanup(pool);
412	destroy_workqueue(pool->wq);
413	kvfree(pool);
414}
415
416static void pool_push(struct mlx5e_tls_tx_pool *pool, struct mlx5e_ktls_offload_context_tx *obj)
417{
418	mutex_lock(&pool->lock);
419	list_add(&obj->list_node, &pool->list);
420	if (++pool->size == MLX5E_TLS_TX_POOL_HIGH)
421		queue_work(pool->wq, &pool->destroy_work);
422	mutex_unlock(&pool->lock);
423}
424
425static struct mlx5e_ktls_offload_context_tx *pool_pop(struct mlx5e_tls_tx_pool *pool)
426{
427	struct mlx5e_ktls_offload_context_tx *obj;
428
429	mutex_lock(&pool->lock);
430	if (unlikely(pool->size == 0)) {
431		/* pool is empty:
432		 * - trigger the populating work, and
433		 * - serve the current context via the regular blocking api.
434		 */
435		queue_work(pool->wq, &pool->create_work);
436		mutex_unlock(&pool->lock);
437		obj = mlx5e_tls_priv_tx_init(pool->mdev, pool->sw_stats, NULL);
438		if (!IS_ERR(obj))
439			atomic64_inc(&pool->sw_stats->tx_tls_pool_alloc);
440		return obj;
441	}
442
443	obj = list_first_entry(&pool->list, struct mlx5e_ktls_offload_context_tx,
444			       list_node);
445	list_del(&obj->list_node);
446	if (--pool->size == MLX5E_TLS_TX_POOL_LOW)
447		queue_work(pool->wq, &pool->create_work);
448	mutex_unlock(&pool->lock);
449	return obj;
450}
451
452/* End of pool API */
453
454int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
455		      struct tls_crypto_info *crypto_info, u32 start_offload_tcp_sn)
456{
457	struct mlx5e_ktls_offload_context_tx *priv_tx;
458	struct mlx5e_tls_tx_pool *pool;
459	struct tls_context *tls_ctx;
460	struct mlx5_crypto_dek *dek;
461	struct mlx5e_priv *priv;
462	int err;
463
464	tls_ctx = tls_get_ctx(sk);
465	priv = netdev_priv(netdev);
466	pool = priv->tls->tx_pool;
467
468	priv_tx = pool_pop(pool);
469	if (IS_ERR(priv_tx))
470		return PTR_ERR(priv_tx);
471
472	switch (crypto_info->cipher_type) {
473	case TLS_CIPHER_AES_GCM_128:
474		priv_tx->crypto_info.crypto_info_128 =
475			*(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
476		break;
477	case TLS_CIPHER_AES_GCM_256:
478		priv_tx->crypto_info.crypto_info_256 =
479			*(struct tls12_crypto_info_aes_gcm_256 *)crypto_info;
480		break;
481	default:
482		WARN_ONCE(1, "Unsupported cipher type %u\n",
483			  crypto_info->cipher_type);
484		err = -EOPNOTSUPP;
485		goto err_pool_push;
486	}
487
488	dek = mlx5_ktls_create_key(priv->tls->dek_pool, crypto_info);
489	if (IS_ERR(dek)) {
490		err = PTR_ERR(dek);
491		goto err_pool_push;
492	}
493
494	priv_tx->dek = dek;
495	priv_tx->expected_seq = start_offload_tcp_sn;
496	priv_tx->tx_ctx = tls_offload_ctx_tx(tls_ctx);
497
498	mlx5e_set_ktls_tx_priv_ctx(tls_ctx, priv_tx);
499
500	priv_tx->ctx_post_pending = true;
501	atomic64_inc(&priv_tx->sw_stats->tx_tls_ctx);
502
503	return 0;
504
505err_pool_push:
506	pool_push(pool, priv_tx);
507	return err;
508}
509
510void mlx5e_ktls_del_tx(struct net_device *netdev, struct tls_context *tls_ctx)
511{
512	struct mlx5e_ktls_offload_context_tx *priv_tx;
513	struct mlx5e_tls_tx_pool *pool;
514	struct mlx5e_priv *priv;
515
516	priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
517	priv = netdev_priv(netdev);
518	pool = priv->tls->tx_pool;
519
520	atomic64_inc(&priv_tx->sw_stats->tx_tls_del);
521	mlx5_ktls_destroy_key(priv->tls->dek_pool, priv_tx->dek);
522	pool_push(pool, priv_tx);
523}
524
525static void tx_fill_wi(struct mlx5e_txqsq *sq,
526		       u16 pi, u8 num_wqebbs, u32 num_bytes,
527		       struct page *page)
528{
529	struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
530
531	*wi = (struct mlx5e_tx_wqe_info) {
532		.num_wqebbs = num_wqebbs,
533		.num_bytes  = num_bytes,
534		.resync_dump_frag_page = page,
535	};
536}
537
538static bool
539mlx5e_ktls_tx_offload_test_and_clear_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
540{
541	bool ret = priv_tx->ctx_post_pending;
542
543	priv_tx->ctx_post_pending = false;
544
545	return ret;
546}
547
548static void
549post_static_params(struct mlx5e_txqsq *sq,
550		   struct mlx5e_ktls_offload_context_tx *priv_tx,
551		   bool fence)
552{
553	struct mlx5e_set_tls_static_params_wqe *wqe;
554	u16 pi, num_wqebbs;
555
556	num_wqebbs = MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS;
557	pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs);
558	wqe = MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi);
559	mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_tx->crypto_info,
560				       priv_tx->tisn,
561				       mlx5_crypto_dek_get_id(priv_tx->dek),
562				       0, fence, TLS_OFFLOAD_CTX_DIR_TX);
563	tx_fill_wi(sq, pi, num_wqebbs, 0, NULL);
564	sq->pc += num_wqebbs;
565}
566
567static void
568post_progress_params(struct mlx5e_txqsq *sq,
569		     struct mlx5e_ktls_offload_context_tx *priv_tx,
570		     bool fence)
571{
572	struct mlx5e_set_tls_progress_params_wqe *wqe;
573	u16 pi, num_wqebbs;
574
575	num_wqebbs = MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS;
576	pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs);
577	wqe = MLX5E_TLS_FETCH_SET_PROGRESS_PARAMS_WQE(sq, pi);
578	mlx5e_ktls_build_progress_params(wqe, sq->pc, sq->sqn, priv_tx->tisn, fence, 0,
579					 TLS_OFFLOAD_CTX_DIR_TX);
580	tx_fill_wi(sq, pi, num_wqebbs, 0, NULL);
581	sq->pc += num_wqebbs;
582}
583
584static void tx_post_fence_nop(struct mlx5e_txqsq *sq)
585{
586	struct mlx5_wq_cyc *wq = &sq->wq;
587	u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
588
589	tx_fill_wi(sq, pi, 1, 0, NULL);
590
591	mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
592}
593
594static void
595mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
596			      struct mlx5e_ktls_offload_context_tx *priv_tx,
597			      bool skip_static_post, bool fence_first_post)
598{
599	bool progress_fence = skip_static_post || !fence_first_post;
600
601	if (!skip_static_post)
602		post_static_params(sq, priv_tx, fence_first_post);
603
604	post_progress_params(sq, priv_tx, progress_fence);
605	tx_post_fence_nop(sq);
606}
607
608struct tx_sync_info {
609	u64 rcd_sn;
610	u32 sync_len;
611	int nr_frags;
612	skb_frag_t frags[MAX_SKB_FRAGS];
613};
614
615enum mlx5e_ktls_sync_retval {
616	MLX5E_KTLS_SYNC_DONE,
617	MLX5E_KTLS_SYNC_FAIL,
618	MLX5E_KTLS_SYNC_SKIP_NO_DATA,
619};
620
621static enum mlx5e_ktls_sync_retval
622tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
623		 u32 tcp_seq, int datalen, struct tx_sync_info *info)
624{
625	struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx;
626	enum mlx5e_ktls_sync_retval ret = MLX5E_KTLS_SYNC_DONE;
627	struct tls_record_info *record;
628	int remaining, i = 0;
629	unsigned long flags;
630	bool ends_before;
631
632	spin_lock_irqsave(&tx_ctx->lock, flags);
633	record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn);
634
635	if (unlikely(!record)) {
636		ret = MLX5E_KTLS_SYNC_FAIL;
637		goto out;
638	}
639
640	/* There are the following cases:
641	 * 1. packet ends before start marker: bypass offload.
642	 * 2. packet starts before start marker and ends after it: drop,
643	 *    not supported, breaks contract with kernel.
644	 * 3. packet ends before tls record info starts: drop,
645	 *    this packet was already acknowledged and its record info
646	 *    was released.
647	 */
648	ends_before = before(tcp_seq + datalen - 1, tls_record_start_seq(record));
649
650	if (unlikely(tls_record_is_start_marker(record))) {
651		ret = ends_before ? MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
652		goto out;
653	} else if (ends_before) {
654		ret = MLX5E_KTLS_SYNC_FAIL;
655		goto out;
656	}
657
658	info->sync_len = tcp_seq - tls_record_start_seq(record);
659	remaining = info->sync_len;
660	while (remaining > 0) {
661		skb_frag_t *frag = &record->frags[i];
662
663		get_page(skb_frag_page(frag));
664		remaining -= skb_frag_size(frag);
665		info->frags[i++] = *frag;
666	}
667	/* reduce the part which will be sent with the original SKB */
668	if (remaining < 0)
669		skb_frag_size_add(&info->frags[i - 1], remaining);
670	info->nr_frags = i;
671out:
672	spin_unlock_irqrestore(&tx_ctx->lock, flags);
673	return ret;
674}
675
676static void
677tx_post_resync_params(struct mlx5e_txqsq *sq,
678		      struct mlx5e_ktls_offload_context_tx *priv_tx,
679		      u64 rcd_sn)
680{
681	__be64 rn_be = cpu_to_be64(rcd_sn);
682	bool skip_static_post;
683	u16 rec_seq_sz;
684	char *rec_seq;
685
686	switch (priv_tx->crypto_info.crypto_info.cipher_type) {
687	case TLS_CIPHER_AES_GCM_128: {
688		struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info.crypto_info_128;
689
690		rec_seq = info->rec_seq;
691		rec_seq_sz = sizeof(info->rec_seq);
692		break;
693	}
694	case TLS_CIPHER_AES_GCM_256: {
695		struct tls12_crypto_info_aes_gcm_256 *info = &priv_tx->crypto_info.crypto_info_256;
696
697		rec_seq = info->rec_seq;
698		rec_seq_sz = sizeof(info->rec_seq);
699		break;
700	}
701	default:
702		WARN_ONCE(1, "Unsupported cipher type %u\n",
703			  priv_tx->crypto_info.crypto_info.cipher_type);
704		return;
705	}
706
707	skip_static_post = !memcmp(rec_seq, &rn_be, rec_seq_sz);
708	if (!skip_static_post)
709		memcpy(rec_seq, &rn_be, rec_seq_sz);
710
711	mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true);
712}
713
714static int
715tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn)
716{
717	struct mlx5_wqe_ctrl_seg *cseg;
718	struct mlx5_wqe_data_seg *dseg;
719	struct mlx5e_dump_wqe *wqe;
720	dma_addr_t dma_addr = 0;
721	u16 ds_cnt;
722	int fsz;
723	u16 pi;
724
725	BUILD_BUG_ON(MLX5E_KTLS_DUMP_WQEBBS != 1);
726	pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
727	wqe = MLX5E_TLS_FETCH_DUMP_WQE(sq, pi);
728
729	ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
730
731	cseg = &wqe->ctrl;
732	dseg = &wqe->data;
733
734	cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8)  | MLX5_OPCODE_DUMP);
735	cseg->qpn_ds           = cpu_to_be32((sq->sqn << 8) | ds_cnt);
736	cseg->tis_tir_num      = cpu_to_be32(tisn << 8);
737
738	fsz = skb_frag_size(frag);
739	dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
740				    DMA_TO_DEVICE);
741	if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
742		return -ENOMEM;
743
744	dseg->addr       = cpu_to_be64(dma_addr);
745	dseg->lkey       = sq->mkey_be;
746	dseg->byte_count = cpu_to_be32(fsz);
747	mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
748
749	tx_fill_wi(sq, pi, MLX5E_KTLS_DUMP_WQEBBS, fsz, skb_frag_page(frag));
750	sq->pc += MLX5E_KTLS_DUMP_WQEBBS;
751
752	return 0;
753}
754
755void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
756					   struct mlx5e_tx_wqe_info *wi,
757					   u32 *dma_fifo_cc)
758{
759	struct mlx5e_sq_stats *stats;
760	struct mlx5e_sq_dma *dma;
761
762	dma = mlx5e_dma_get(sq, (*dma_fifo_cc)++);
763	stats = sq->stats;
764
765	mlx5e_tx_dma_unmap(sq->pdev, dma);
766	put_page(wi->resync_dump_frag_page);
767	stats->tls_dump_packets++;
768	stats->tls_dump_bytes += wi->num_bytes;
769}
770
771static enum mlx5e_ktls_sync_retval
772mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
773			 struct mlx5e_txqsq *sq,
774			 int datalen,
775			 u32 seq)
776{
777	enum mlx5e_ktls_sync_retval ret;
778	struct tx_sync_info info = {};
779	int i;
780
781	ret = tx_sync_info_get(priv_tx, seq, datalen, &info);
782	if (unlikely(ret != MLX5E_KTLS_SYNC_DONE))
783		/* We might get here with ret == FAIL if a retransmission
784		 * reaches the driver after the relevant record is acked.
785		 * It should be safe to drop the packet in this case
786		 */
787		return ret;
788
789	tx_post_resync_params(sq, priv_tx, info.rcd_sn);
790
791	for (i = 0; i < info.nr_frags; i++) {
792		unsigned int orig_fsz, frag_offset = 0, n = 0;
793		skb_frag_t *f = &info.frags[i];
794
795		orig_fsz = skb_frag_size(f);
796
797		do {
798			unsigned int fsz;
799
800			n++;
801			fsz = min_t(unsigned int, sq->hw_mtu, orig_fsz - frag_offset);
802			skb_frag_size_set(f, fsz);
803			if (tx_post_resync_dump(sq, f, priv_tx->tisn)) {
804				page_ref_add(skb_frag_page(f), n - 1);
805				goto err_out;
806			}
807
808			skb_frag_off_add(f, fsz);
809			frag_offset += fsz;
810		} while (frag_offset < orig_fsz);
811
812		page_ref_add(skb_frag_page(f), n - 1);
813	}
814
815	return MLX5E_KTLS_SYNC_DONE;
816
817err_out:
818	for (; i < info.nr_frags; i++)
819		/* The put_page() here undoes the page ref obtained in tx_sync_info_get().
820		 * Page refs obtained for the DUMP WQEs above (by page_ref_add) will be
821		 * released only upon their completions (or in mlx5e_free_txqsq_descs,
822		 * if channel closes).
823		 */
824		put_page(skb_frag_page(&info.frags[i]));
825
826	return MLX5E_KTLS_SYNC_FAIL;
827}
828
829bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
830			      struct sk_buff *skb,
831			      struct mlx5e_accel_tx_tls_state *state)
832{
833	struct mlx5e_ktls_offload_context_tx *priv_tx;
834	struct mlx5e_sq_stats *stats = sq->stats;
835	struct net_device *tls_netdev;
836	struct tls_context *tls_ctx;
837	int datalen;
838	u32 seq;
839
840	datalen = skb->len - skb_tcp_all_headers(skb);
841	if (!datalen)
842		return true;
843
844	mlx5e_tx_mpwqe_ensure_complete(sq);
845
846	tls_ctx = tls_get_ctx(skb->sk);
847	tls_netdev = rcu_dereference_bh(tls_ctx->netdev);
848	/* Don't WARN on NULL: if tls_device_down is running in parallel,
849	 * netdev might become NULL, even if tls_is_skb_tx_device_offloaded was
850	 * true. Rather continue processing this packet.
851	 */
852	if (WARN_ON_ONCE(tls_netdev && tls_netdev != netdev))
853		goto err_out;
854
855	priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
856
857	if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx)))
858		mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, false, false);
859
860	seq = ntohl(tcp_hdr(skb)->seq);
861	if (unlikely(priv_tx->expected_seq != seq)) {
862		enum mlx5e_ktls_sync_retval ret =
863			mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq);
864
865		stats->tls_ooo++;
866
867		switch (ret) {
868		case MLX5E_KTLS_SYNC_DONE:
869			break;
870		case MLX5E_KTLS_SYNC_SKIP_NO_DATA:
871			stats->tls_skip_no_sync_data++;
872			if (likely(!skb->decrypted))
873				goto out;
874			WARN_ON_ONCE(1);
875			goto err_out;
876		case MLX5E_KTLS_SYNC_FAIL:
877			stats->tls_drop_no_sync_data++;
878			goto err_out;
879		}
880	}
881
882	priv_tx->expected_seq = seq + datalen;
883
884	state->tls_tisn = priv_tx->tisn;
885
886	stats->tls_encrypted_packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
887	stats->tls_encrypted_bytes   += datalen;
888
889out:
890	return true;
891
892err_out:
893	dev_kfree_skb_any(skb);
894	return false;
895}
896
897static void mlx5e_tls_tx_debugfs_init(struct mlx5e_tls *tls,
898				      struct dentry *dfs_root)
899{
900	if (IS_ERR_OR_NULL(dfs_root))
901		return;
902
903	tls->debugfs.dfs_tx = debugfs_create_dir("tx", dfs_root);
904
905	debugfs_create_size_t("pool_size", 0400, tls->debugfs.dfs_tx,
906			      &tls->tx_pool->size);
907}
908
909int mlx5e_ktls_init_tx(struct mlx5e_priv *priv)
910{
911	struct mlx5_crypto_dek_pool *dek_pool;
912	struct mlx5e_tls *tls = priv->tls;
913	int err;
914
915	if (!mlx5e_is_ktls_device(priv->mdev))
916		return 0;
917
918	/* DEK pool could be used by either or both of TX and RX. But we have to
919	 * put the creation here to avoid syndrome when doing devlink reload.
920	 */
921	dek_pool = mlx5_crypto_dek_pool_create(priv->mdev, MLX5_ACCEL_OBJ_TLS_KEY);
922	if (IS_ERR(dek_pool))
923		return PTR_ERR(dek_pool);
924	tls->dek_pool = dek_pool;
925
926	if (!mlx5e_is_ktls_tx(priv->mdev))
927		return 0;
928
929	priv->tls->tx_pool = mlx5e_tls_tx_pool_init(priv->mdev, &priv->tls->sw_stats);
930	if (!priv->tls->tx_pool) {
931		err = -ENOMEM;
932		goto err_tx_pool_init;
933	}
934
935	mlx5e_tls_tx_debugfs_init(tls, tls->debugfs.dfs);
936
937	return 0;
938
939err_tx_pool_init:
940	mlx5_crypto_dek_pool_destroy(dek_pool);
941	return err;
942}
943
944void mlx5e_ktls_cleanup_tx(struct mlx5e_priv *priv)
945{
946	if (!mlx5e_is_ktls_tx(priv->mdev))
947		goto dek_pool_destroy;
948
949	debugfs_remove_recursive(priv->tls->debugfs.dfs_tx);
950	priv->tls->debugfs.dfs_tx = NULL;
951
952	mlx5e_tls_tx_pool_cleanup(priv->tls->tx_pool);
953	priv->tls->tx_pool = NULL;
954
955dek_pool_destroy:
956	if (mlx5e_is_ktls_device(priv->mdev))
957		mlx5_crypto_dek_pool_destroy(priv->tls->dek_pool);
958}
959