1/*
2 * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#ifndef __MLX5E_EN_ACCEL_H__
35#define __MLX5E_EN_ACCEL_H__
36
37#include <linux/skbuff.h>
38#include <linux/netdevice.h>
39#include "en_accel/ipsec_rxtx.h"
40#include "en_accel/ktls.h"
41#include "en_accel/ktls_txrx.h"
42#include <en_accel/macsec.h>
43#include "en.h"
44#include "en/txrx.h"
45
46#if IS_ENABLED(CONFIG_GENEVE)
47#include <net/geneve.h>
48
49static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev)
50{
51	return mlx5_tx_swp_supported(mdev);
52}
53
54static inline void
55mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg, u16 ihs)
56{
57	struct mlx5e_swp_spec swp_spec = {};
58	unsigned int offset = 0;
59	__be16 l3_proto;
60	u8 l4_proto;
61
62	l3_proto = vlan_get_protocol(skb);
63	switch (l3_proto) {
64	case htons(ETH_P_IP):
65		l4_proto = ip_hdr(skb)->protocol;
66		break;
67	case htons(ETH_P_IPV6):
68		l4_proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL);
69		break;
70	default:
71		return;
72	}
73
74	if (l4_proto != IPPROTO_UDP ||
75	    udp_hdr(skb)->dest != cpu_to_be16(GENEVE_UDP_PORT))
76		return;
77	swp_spec.l3_proto = l3_proto;
78	swp_spec.l4_proto = l4_proto;
79	swp_spec.is_tun = true;
80	if (inner_ip_hdr(skb)->version == 6) {
81		swp_spec.tun_l3_proto = htons(ETH_P_IPV6);
82		swp_spec.tun_l4_proto = inner_ipv6_hdr(skb)->nexthdr;
83	} else {
84		swp_spec.tun_l3_proto = htons(ETH_P_IP);
85		swp_spec.tun_l4_proto = inner_ip_hdr(skb)->protocol;
86	}
87
88	mlx5e_set_eseg_swp(skb, eseg, &swp_spec);
89	if (skb_vlan_tag_present(skb) && ihs)
90		mlx5e_eseg_swp_offsets_add_vlan(eseg);
91}
92
93#else
94static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev)
95{
96	return false;
97}
98
99#endif /* CONFIG_GENEVE */
100
101static inline void
102mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb)
103{
104	int payload_len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr);
105
106	udp_hdr(skb)->len = htons(payload_len);
107}
108
109struct mlx5e_accel_tx_state {
110#ifdef CONFIG_MLX5_EN_TLS
111	struct mlx5e_accel_tx_tls_state tls;
112#endif
113#ifdef CONFIG_MLX5_EN_IPSEC
114	struct mlx5e_accel_tx_ipsec_state ipsec;
115#endif
116};
117
118static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
119					struct mlx5e_txqsq *sq,
120					struct sk_buff *skb,
121					struct mlx5e_accel_tx_state *state)
122{
123	if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
124		mlx5e_udp_gso_handle_tx_skb(skb);
125
126#ifdef CONFIG_MLX5_EN_TLS
127	/* May send WQEs. */
128	if (tls_is_skb_tx_device_offloaded(skb))
129		if (unlikely(!mlx5e_ktls_handle_tx_skb(dev, sq, skb,
130						       &state->tls)))
131			return false;
132#endif
133
134#ifdef CONFIG_MLX5_EN_IPSEC
135	if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state) && xfrm_offload(skb)) {
136		if (unlikely(!mlx5e_ipsec_handle_tx_skb(dev, skb, &state->ipsec)))
137			return false;
138	}
139#endif
140
141#ifdef CONFIG_MLX5_MACSEC
142	if (unlikely(mlx5e_macsec_skb_is_offload(skb))) {
143		struct mlx5e_priv *priv = netdev_priv(dev);
144
145		if (unlikely(!mlx5e_macsec_handle_tx_skb(priv->macsec, skb)))
146			return false;
147	}
148#endif
149
150	return true;
151}
152
153static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq,
154						  struct mlx5e_accel_tx_state *state)
155{
156#ifdef CONFIG_MLX5_EN_IPSEC
157	if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state))
158		return mlx5e_ipsec_tx_ids_len(&state->ipsec);
159#endif
160
161	return 0;
162}
163
164/* Part of the eseg touched by TX offloads */
165#define MLX5E_ACCEL_ESEG_LEN offsetof(struct mlx5_wqe_eth_seg, mss)
166
167static inline void mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
168				       struct sk_buff *skb,
169				       struct mlx5_wqe_eth_seg *eseg, u16 ihs)
170{
171#ifdef CONFIG_MLX5_EN_IPSEC
172	if (xfrm_offload(skb))
173		mlx5e_ipsec_tx_build_eseg(priv, skb, eseg);
174#endif
175
176#ifdef CONFIG_MLX5_MACSEC
177	if (unlikely(mlx5e_macsec_skb_is_offload(skb)))
178		mlx5e_macsec_tx_build_eseg(priv->macsec, skb, eseg);
179#endif
180
181#if IS_ENABLED(CONFIG_GENEVE)
182	if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
183		mlx5e_tx_tunnel_accel(skb, eseg, ihs);
184#endif
185}
186
187static inline void mlx5e_accel_tx_finish(struct mlx5e_txqsq *sq,
188					 struct mlx5e_tx_wqe *wqe,
189					 struct mlx5e_accel_tx_state *state,
190					 struct mlx5_wqe_inline_seg *inlseg)
191{
192#ifdef CONFIG_MLX5_EN_TLS
193	mlx5e_ktls_handle_tx_wqe(&wqe->ctrl, &state->tls);
194#endif
195
196#ifdef CONFIG_MLX5_EN_IPSEC
197	if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state) &&
198	    state->ipsec.xo && state->ipsec.tailen)
199		mlx5e_ipsec_handle_tx_wqe(wqe, &state->ipsec, inlseg);
200#endif
201}
202
203static inline int mlx5e_accel_init_rx(struct mlx5e_priv *priv)
204{
205	return mlx5e_ktls_init_rx(priv);
206}
207
208static inline void mlx5e_accel_cleanup_rx(struct mlx5e_priv *priv)
209{
210	mlx5e_ktls_cleanup_rx(priv);
211}
212
213static inline int mlx5e_accel_init_tx(struct mlx5e_priv *priv)
214{
215	return mlx5e_ktls_init_tx(priv);
216}
217
218static inline void mlx5e_accel_cleanup_tx(struct mlx5e_priv *priv)
219{
220	mlx5e_ktls_cleanup_tx(priv);
221}
222#endif /* __MLX5E_EN_ACCEL_H__ */
223