1/*
2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <crypto/aead.h>
35#include <net/xfrm.h>
36#include <net/esp.h>
37#include "ipsec.h"
38#include "ipsec_rxtx.h"
39#include "en.h"
40#include "esw/ipsec_fs.h"
41
42enum {
43	MLX5E_IPSEC_TX_SYNDROME_OFFLOAD = 0x8,
44	MLX5E_IPSEC_TX_SYNDROME_OFFLOAD_WITH_LSO_TCP = 0x9,
45};
46
47static int mlx5e_ipsec_remove_trailer(struct sk_buff *skb, struct xfrm_state *x)
48{
49	unsigned int alen = crypto_aead_authsize(x->data);
50	struct ipv6hdr *ipv6hdr = ipv6_hdr(skb);
51	struct iphdr *ipv4hdr = ip_hdr(skb);
52	unsigned int trailer_len;
53	u8 plen;
54	int ret;
55
56	ret = skb_copy_bits(skb, skb->len - alen - 2, &plen, 1);
57	if (unlikely(ret))
58		return ret;
59
60	trailer_len = alen + plen + 2;
61
62	ret = pskb_trim(skb, skb->len - trailer_len);
63	if (unlikely(ret))
64		return ret;
65	if (skb->protocol == htons(ETH_P_IP)) {
66		ipv4hdr->tot_len = htons(ntohs(ipv4hdr->tot_len) - trailer_len);
67		ip_send_check(ipv4hdr);
68	} else {
69		ipv6hdr->payload_len = htons(ntohs(ipv6hdr->payload_len) -
70					     trailer_len);
71	}
72	return 0;
73}
74
75static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
76				struct mlx5_wqe_eth_seg *eseg, u8 mode,
77				struct xfrm_offload *xo)
78{
79	/* Tunnel Mode:
80	 * SWP:      OutL3       InL3  InL4
81	 * Pkt: MAC  IP     ESP  IP    L4
82	 *
83	 * Transport Mode:
84	 * SWP:      OutL3       OutL4
85	 * Pkt: MAC  IP     ESP  L4
86	 *
87	 * Tunnel(VXLAN TCP/UDP) over Transport Mode
88	 * SWP:      OutL3                   InL3  InL4
89	 * Pkt: MAC  IP     ESP  UDP  VXLAN  IP    L4
90	 */
91
92	/* Shared settings */
93	eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
94	if (skb->protocol == htons(ETH_P_IPV6))
95		eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
96
97	/* Tunnel mode */
98	if (mode == XFRM_MODE_TUNNEL) {
99		eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
100		if (xo->proto == IPPROTO_IPV6)
101			eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
102
103		switch (xo->inner_ipproto) {
104		case IPPROTO_UDP:
105			eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
106			fallthrough;
107		case IPPROTO_TCP:
108			/* IP | ESP | IP | [TCP | UDP] */
109			eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
110			break;
111		default:
112			break;
113		}
114		return;
115	}
116
117	/* Transport mode */
118	if (mode != XFRM_MODE_TRANSPORT)
119		return;
120
121	if (!xo->inner_ipproto) {
122		switch (xo->proto) {
123		case IPPROTO_UDP:
124			eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_UDP;
125			fallthrough;
126		case IPPROTO_TCP:
127			/* IP | ESP | TCP */
128			eseg->swp_outer_l4_offset = skb_inner_transport_offset(skb) / 2;
129			break;
130		default:
131			break;
132		}
133	} else {
134		/* Tunnel(VXLAN TCP/UDP) over Transport Mode */
135		switch (xo->inner_ipproto) {
136		case IPPROTO_UDP:
137			eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
138			fallthrough;
139		case IPPROTO_TCP:
140			eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
141			eseg->swp_inner_l4_offset =
142				(skb->csum_start + skb->head - skb->data) / 2;
143			if (inner_ip_hdr(skb)->version == 6)
144				eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
145			break;
146		default:
147			break;
148		}
149	}
150
151}
152
153void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
154			    struct xfrm_offload *xo)
155{
156	struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
157	__u32 oseq = replay_esn->oseq;
158	int iv_offset;
159	__be64 seqno;
160	u32 seq_hi;
161
162	if (unlikely(skb_is_gso(skb) && oseq < MLX5E_IPSEC_ESN_SCOPE_MID &&
163		     MLX5E_IPSEC_ESN_SCOPE_MID < (oseq - skb_shinfo(skb)->gso_segs))) {
164		seq_hi = xo->seq.hi - 1;
165	} else {
166		seq_hi = xo->seq.hi;
167	}
168
169	/* Place the SN in the IV field */
170	seqno = cpu_to_be64(xo->seq.low + ((u64)seq_hi << 32));
171	iv_offset = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr);
172	skb_store_bits(skb, iv_offset, &seqno, 8);
173}
174
175void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x,
176			struct xfrm_offload *xo)
177{
178	int iv_offset;
179	__be64 seqno;
180
181	/* Place the SN in the IV field */
182	seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
183	iv_offset = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr);
184	skb_store_bits(skb, iv_offset, &seqno, 8);
185}
186
187void mlx5e_ipsec_handle_tx_wqe(struct mlx5e_tx_wqe *wqe,
188			       struct mlx5e_accel_tx_ipsec_state *ipsec_st,
189			       struct mlx5_wqe_inline_seg *inlseg)
190{
191	inlseg->byte_count = cpu_to_be32(ipsec_st->tailen | MLX5_INLINE_SEG);
192	esp_output_fill_trailer((u8 *)inlseg->data, 0, ipsec_st->plen, ipsec_st->xo->proto);
193}
194
195static int mlx5e_ipsec_set_state(struct mlx5e_priv *priv,
196				 struct sk_buff *skb,
197				 struct xfrm_state *x,
198				 struct xfrm_offload *xo,
199				 struct mlx5e_accel_tx_ipsec_state *ipsec_st)
200{
201	unsigned int blksize, clen, alen, plen;
202	struct crypto_aead *aead;
203	unsigned int tailen;
204
205	ipsec_st->x = x;
206	ipsec_st->xo = xo;
207	aead = x->data;
208	alen = crypto_aead_authsize(aead);
209	blksize = ALIGN(crypto_aead_blocksize(aead), 4);
210	clen = ALIGN(skb->len + 2, blksize);
211	plen = max_t(u32, clen - skb->len, 4);
212	tailen = plen + alen;
213	ipsec_st->plen = plen;
214	ipsec_st->tailen = tailen;
215
216	return 0;
217}
218
219void mlx5e_ipsec_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb,
220			       struct mlx5_wqe_eth_seg *eseg)
221{
222	struct xfrm_offload *xo = xfrm_offload(skb);
223	struct xfrm_encap_tmpl  *encap;
224	struct xfrm_state *x;
225	struct sec_path *sp;
226	u8 l3_proto;
227
228	sp = skb_sec_path(skb);
229	if (unlikely(sp->len != 1))
230		return;
231
232	x = xfrm_input_state(skb);
233	if (unlikely(!x))
234		return;
235
236	if (unlikely(!x->xso.offload_handle ||
237		     (skb->protocol != htons(ETH_P_IP) &&
238		      skb->protocol != htons(ETH_P_IPV6))))
239		return;
240
241	mlx5e_ipsec_set_swp(skb, eseg, x->props.mode, xo);
242
243	l3_proto = (x->props.family == AF_INET) ?
244		   ((struct iphdr *)skb_network_header(skb))->protocol :
245		   ((struct ipv6hdr *)skb_network_header(skb))->nexthdr;
246
247	eseg->flow_table_metadata |= cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC);
248	eseg->trailer |= cpu_to_be32(MLX5_ETH_WQE_INSERT_TRAILER);
249	encap = x->encap;
250	if (!encap) {
251		eseg->trailer |= (l3_proto == IPPROTO_ESP) ?
252			cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_IP_ASSOC) :
253			cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_L4_ASSOC);
254	} else if (encap->encap_type == UDP_ENCAP_ESPINUDP) {
255		eseg->trailer |= (l3_proto == IPPROTO_ESP) ?
256			cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_IP_ASSOC) :
257			cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_L4_ASSOC);
258	}
259}
260
261bool mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
262			       struct sk_buff *skb,
263			       struct mlx5e_accel_tx_ipsec_state *ipsec_st)
264{
265	struct mlx5e_priv *priv = netdev_priv(netdev);
266	struct xfrm_offload *xo = xfrm_offload(skb);
267	struct mlx5e_ipsec_sa_entry *sa_entry;
268	struct xfrm_state *x;
269	struct sec_path *sp;
270
271	sp = skb_sec_path(skb);
272	if (unlikely(sp->len != 1)) {
273		atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_bundle);
274		goto drop;
275	}
276
277	x = xfrm_input_state(skb);
278	if (unlikely(!x)) {
279		atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_no_state);
280		goto drop;
281	}
282
283	if (unlikely(!x->xso.offload_handle ||
284		     (skb->protocol != htons(ETH_P_IP) &&
285		      skb->protocol != htons(ETH_P_IPV6)))) {
286		atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_not_ip);
287		goto drop;
288	}
289
290	if (!skb_is_gso(skb))
291		if (unlikely(mlx5e_ipsec_remove_trailer(skb, x))) {
292			atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_trailer);
293			goto drop;
294		}
295
296	sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
297	sa_entry->set_iv_op(skb, x, xo);
298	mlx5e_ipsec_set_state(priv, skb, x, xo, ipsec_st);
299
300	return true;
301
302drop:
303	kfree_skb(skb);
304	return false;
305}
306
307void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
308				       struct sk_buff *skb,
309				       u32 ipsec_meta_data)
310{
311	struct mlx5e_priv *priv = netdev_priv(netdev);
312	struct mlx5e_ipsec *ipsec = priv->ipsec;
313	struct mlx5e_ipsec_sa_entry *sa_entry;
314	struct xfrm_offload *xo;
315	struct sec_path *sp;
316	u32  sa_handle;
317
318	sa_handle = MLX5_IPSEC_METADATA_HANDLE(ipsec_meta_data);
319	sp = secpath_set(skb);
320	if (unlikely(!sp)) {
321		atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_sp_alloc);
322		return;
323	}
324
325	rcu_read_lock();
326	sa_entry = xa_load(&ipsec->sadb, sa_handle);
327	if (unlikely(!sa_entry)) {
328		rcu_read_unlock();
329		atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
330		return;
331	}
332	xfrm_state_hold(sa_entry->x);
333	rcu_read_unlock();
334
335	sp->xvec[sp->len++] = sa_entry->x;
336	sp->olen++;
337
338	xo = xfrm_offload(skb);
339	xo->flags = CRYPTO_DONE;
340	xo->status = CRYPTO_SUCCESS;
341}
342
343int mlx5_esw_ipsec_rx_make_metadata(struct mlx5e_priv *priv, u32 id, u32 *metadata)
344{
345	struct mlx5e_ipsec *ipsec = priv->ipsec;
346	u32 ipsec_obj_id;
347	int err;
348
349	if (!ipsec || !ipsec->is_uplink_rep)
350		return -EINVAL;
351
352	err = mlx5_esw_ipsec_rx_ipsec_obj_id_search(priv, id, &ipsec_obj_id);
353	if (err) {
354		atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
355		return err;
356	}
357
358	*metadata = ipsec_obj_id;
359	return 0;
360}
361