1// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
3
4#include <linux/netdevice.h>
5#include <linux/netlink.h>
6#include <linux/random.h>
7#include <net/vxlan.h>
8
9#include "reg.h"
10#include "spectrum.h"
11#include "spectrum_nve.h"
12
13#define MLXSW_SP_NVE_VXLAN_IPV4_SUPPORTED_FLAGS (VXLAN_F_UDP_ZERO_CSUM_TX | \
14						 VXLAN_F_LEARN | \
15						 VXLAN_F_LOCALBYPASS)
16#define MLXSW_SP_NVE_VXLAN_IPV6_SUPPORTED_FLAGS (VXLAN_F_IPV6 | \
17						 VXLAN_F_UDP_ZERO_CSUM6_TX | \
18						 VXLAN_F_UDP_ZERO_CSUM6_RX | \
19						 VXLAN_F_LOCALBYPASS)
20
21static bool mlxsw_sp_nve_vxlan_ipv4_flags_check(const struct vxlan_config *cfg,
22						struct netlink_ext_ack *extack)
23{
24	if (!(cfg->flags & VXLAN_F_UDP_ZERO_CSUM_TX)) {
25		NL_SET_ERR_MSG_MOD(extack, "VxLAN: Zero UDP checksum must be allowed for TX");
26		return false;
27	}
28
29	if (cfg->flags & ~MLXSW_SP_NVE_VXLAN_IPV4_SUPPORTED_FLAGS) {
30		NL_SET_ERR_MSG_MOD(extack, "VxLAN: Unsupported flag");
31		return false;
32	}
33
34	return true;
35}
36
37static bool mlxsw_sp_nve_vxlan_ipv6_flags_check(const struct vxlan_config *cfg,
38						struct netlink_ext_ack *extack)
39{
40	if (!(cfg->flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) {
41		NL_SET_ERR_MSG_MOD(extack, "VxLAN: Zero UDP checksum must be allowed for TX");
42		return false;
43	}
44
45	if (!(cfg->flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) {
46		NL_SET_ERR_MSG_MOD(extack, "VxLAN: Zero UDP checksum must be allowed for RX");
47		return false;
48	}
49
50	if (cfg->flags & ~MLXSW_SP_NVE_VXLAN_IPV6_SUPPORTED_FLAGS) {
51		NL_SET_ERR_MSG_MOD(extack, "VxLAN: Unsupported flag");
52		return false;
53	}
54
55	return true;
56}
57
58static bool mlxsw_sp_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve,
59					   const struct mlxsw_sp_nve_params *params,
60					   struct netlink_ext_ack *extack)
61{
62	struct vxlan_dev *vxlan = netdev_priv(params->dev);
63	struct vxlan_config *cfg = &vxlan->cfg;
64
65	if (vxlan_addr_multicast(&cfg->remote_ip)) {
66		NL_SET_ERR_MSG_MOD(extack, "VxLAN: Multicast destination IP is not supported");
67		return false;
68	}
69
70	if (vxlan_addr_any(&cfg->saddr)) {
71		NL_SET_ERR_MSG_MOD(extack, "VxLAN: Source address must be specified");
72		return false;
73	}
74
75	if (cfg->remote_ifindex) {
76		NL_SET_ERR_MSG_MOD(extack, "VxLAN: Local interface is not supported");
77		return false;
78	}
79
80	if (cfg->port_min || cfg->port_max) {
81		NL_SET_ERR_MSG_MOD(extack, "VxLAN: Only default UDP source port range is supported");
82		return false;
83	}
84
85	if (cfg->tos != 1) {
86		NL_SET_ERR_MSG_MOD(extack, "VxLAN: TOS must be configured to inherit");
87		return false;
88	}
89
90	if (cfg->flags & VXLAN_F_TTL_INHERIT) {
91		NL_SET_ERR_MSG_MOD(extack, "VxLAN: TTL must not be configured to inherit");
92		return false;
93	}
94
95	switch (cfg->saddr.sa.sa_family) {
96	case AF_INET:
97		if (!mlxsw_sp_nve_vxlan_ipv4_flags_check(cfg, extack))
98			return false;
99		break;
100	case AF_INET6:
101		if (!mlxsw_sp_nve_vxlan_ipv6_flags_check(cfg, extack))
102			return false;
103		break;
104	}
105
106	if (cfg->ttl == 0) {
107		NL_SET_ERR_MSG_MOD(extack, "VxLAN: TTL must not be configured to 0");
108		return false;
109	}
110
111	if (cfg->label != 0) {
112		NL_SET_ERR_MSG_MOD(extack, "VxLAN: Flow label must be configured to 0");
113		return false;
114	}
115
116	return true;
117}
118
119static bool mlxsw_sp1_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve,
120					    const struct mlxsw_sp_nve_params *params,
121					    struct netlink_ext_ack *extack)
122{
123	if (params->ethertype == ETH_P_8021AD) {
124		NL_SET_ERR_MSG_MOD(extack, "VxLAN: 802.1ad bridge is not supported with VxLAN");
125		return false;
126	}
127
128	return mlxsw_sp_nve_vxlan_can_offload(nve, params, extack);
129}
130
131static void
132mlxsw_sp_nve_vxlan_ul_proto_sip_config(const struct vxlan_config *cfg,
133				       struct mlxsw_sp_nve_config *config)
134{
135	switch (cfg->saddr.sa.sa_family) {
136	case AF_INET:
137		config->ul_proto = MLXSW_SP_L3_PROTO_IPV4;
138		config->ul_sip.addr4 = cfg->saddr.sin.sin_addr.s_addr;
139		break;
140	case AF_INET6:
141		config->ul_proto = MLXSW_SP_L3_PROTO_IPV6;
142		config->ul_sip.addr6 = cfg->saddr.sin6.sin6_addr;
143		break;
144	}
145}
146
147static void mlxsw_sp_nve_vxlan_config(const struct mlxsw_sp_nve *nve,
148				      const struct mlxsw_sp_nve_params *params,
149				      struct mlxsw_sp_nve_config *config)
150{
151	struct vxlan_dev *vxlan = netdev_priv(params->dev);
152	struct vxlan_config *cfg = &vxlan->cfg;
153
154	config->type = MLXSW_SP_NVE_TYPE_VXLAN;
155	config->ttl = cfg->ttl;
156	config->flowlabel = cfg->label;
157	config->learning_en = cfg->flags & VXLAN_F_LEARN ? 1 : 0;
158	config->ul_tb_id = RT_TABLE_MAIN;
159	mlxsw_sp_nve_vxlan_ul_proto_sip_config(cfg, config);
160	config->udp_dport = cfg->dst_port;
161}
162
163static void
164mlxsw_sp_nve_vxlan_config_prepare(char *tngcr_pl,
165				  const struct mlxsw_sp_nve_config *config)
166{
167	struct in6_addr addr6;
168	u8 udp_sport;
169
170	mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, true,
171			     config->ttl);
172	/* VxLAN driver's default UDP source port range is 32768 (0x8000)
173	 * to 60999 (0xee47). Set the upper 8 bits of the UDP source port
174	 * to a random number between 0x80 and 0xee
175	 */
176	get_random_bytes(&udp_sport, sizeof(udp_sport));
177	udp_sport = (udp_sport % (0xee - 0x80 + 1)) + 0x80;
178	mlxsw_reg_tngcr_nve_udp_sport_prefix_set(tngcr_pl, udp_sport);
179
180	switch (config->ul_proto) {
181	case MLXSW_SP_L3_PROTO_IPV4:
182		mlxsw_reg_tngcr_usipv4_set(tngcr_pl,
183					   be32_to_cpu(config->ul_sip.addr4));
184		break;
185	case MLXSW_SP_L3_PROTO_IPV6:
186		addr6 = config->ul_sip.addr6;
187		mlxsw_reg_tngcr_usipv6_memcpy_to(tngcr_pl,
188						 (const char *)&addr6);
189		break;
190	}
191}
192
193static int
194mlxsw_sp1_nve_vxlan_config_set(struct mlxsw_sp *mlxsw_sp,
195			       const struct mlxsw_sp_nve_config *config)
196{
197	char tngcr_pl[MLXSW_REG_TNGCR_LEN];
198	u16 ul_vr_id;
199	int err;
200
201	err = mlxsw_sp_router_tb_id_vr_id(mlxsw_sp, config->ul_tb_id,
202					  &ul_vr_id);
203	if (err)
204		return err;
205
206	mlxsw_sp_nve_vxlan_config_prepare(tngcr_pl, config);
207	mlxsw_reg_tngcr_learn_enable_set(tngcr_pl, config->learning_en);
208	mlxsw_reg_tngcr_underlay_virtual_router_set(tngcr_pl, ul_vr_id);
209
210	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl);
211}
212
213static void mlxsw_sp1_nve_vxlan_config_clear(struct mlxsw_sp *mlxsw_sp)
214{
215	char tngcr_pl[MLXSW_REG_TNGCR_LEN];
216
217	mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, false, 0);
218
219	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl);
220}
221
222static int mlxsw_sp1_nve_vxlan_rtdp_set(struct mlxsw_sp *mlxsw_sp,
223					unsigned int tunnel_index)
224{
225	char rtdp_pl[MLXSW_REG_RTDP_LEN];
226
227	mlxsw_reg_rtdp_pack(rtdp_pl, MLXSW_REG_RTDP_TYPE_NVE, tunnel_index);
228
229	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtdp), rtdp_pl);
230}
231
232static int mlxsw_sp1_nve_vxlan_init(struct mlxsw_sp_nve *nve,
233				    const struct mlxsw_sp_nve_config *config)
234{
235	struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp;
236	int err;
237
238	err = mlxsw_sp_parsing_vxlan_udp_dport_set(mlxsw_sp, config->udp_dport);
239	if (err)
240		return err;
241
242	err = mlxsw_sp_parsing_depth_inc(mlxsw_sp);
243	if (err)
244		goto err_parsing_depth_inc;
245
246	err = mlxsw_sp1_nve_vxlan_config_set(mlxsw_sp, config);
247	if (err)
248		goto err_config_set;
249
250	err = mlxsw_sp1_nve_vxlan_rtdp_set(mlxsw_sp, nve->tunnel_index);
251	if (err)
252		goto err_rtdp_set;
253
254	err = mlxsw_sp_router_nve_promote_decap(mlxsw_sp, config->ul_tb_id,
255						config->ul_proto,
256						&config->ul_sip,
257						nve->tunnel_index);
258	if (err)
259		goto err_promote_decap;
260
261	return 0;
262
263err_promote_decap:
264err_rtdp_set:
265	mlxsw_sp1_nve_vxlan_config_clear(mlxsw_sp);
266err_config_set:
267	mlxsw_sp_parsing_depth_dec(mlxsw_sp);
268err_parsing_depth_inc:
269	mlxsw_sp_parsing_vxlan_udp_dport_set(mlxsw_sp, 0);
270	return err;
271}
272
273static void mlxsw_sp1_nve_vxlan_fini(struct mlxsw_sp_nve *nve)
274{
275	struct mlxsw_sp_nve_config *config = &nve->config;
276	struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp;
277
278	mlxsw_sp_router_nve_demote_decap(mlxsw_sp, config->ul_tb_id,
279					 config->ul_proto, &config->ul_sip);
280	mlxsw_sp1_nve_vxlan_config_clear(mlxsw_sp);
281	mlxsw_sp_parsing_depth_dec(mlxsw_sp);
282	mlxsw_sp_parsing_vxlan_udp_dport_set(mlxsw_sp, 0);
283}
284
285static int
286mlxsw_sp_nve_vxlan_fdb_replay(const struct net_device *nve_dev, __be32 vni,
287			      struct netlink_ext_ack *extack)
288{
289	if (WARN_ON(!netif_is_vxlan(nve_dev)))
290		return -EINVAL;
291	return vxlan_fdb_replay(nve_dev, vni, &mlxsw_sp_switchdev_notifier,
292				extack);
293}
294
295static void
296mlxsw_sp_nve_vxlan_clear_offload(const struct net_device *nve_dev, __be32 vni)
297{
298	if (WARN_ON(!netif_is_vxlan(nve_dev)))
299		return;
300	vxlan_fdb_clear_offload(nve_dev, vni);
301}
302
303const struct mlxsw_sp_nve_ops mlxsw_sp1_nve_vxlan_ops = {
304	.type		= MLXSW_SP_NVE_TYPE_VXLAN,
305	.can_offload	= mlxsw_sp1_nve_vxlan_can_offload,
306	.nve_config	= mlxsw_sp_nve_vxlan_config,
307	.init		= mlxsw_sp1_nve_vxlan_init,
308	.fini		= mlxsw_sp1_nve_vxlan_fini,
309	.fdb_replay	= mlxsw_sp_nve_vxlan_fdb_replay,
310	.fdb_clear_offload = mlxsw_sp_nve_vxlan_clear_offload,
311};
312
313static int mlxsw_sp2_nve_vxlan_learning_set(struct mlxsw_sp *mlxsw_sp,
314					    bool learning_en)
315{
316	char tnpc_pl[MLXSW_REG_TNPC_LEN];
317
318	mlxsw_reg_tnpc_pack(tnpc_pl, MLXSW_REG_TUNNEL_PORT_NVE,
319			    learning_en);
320	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tnpc), tnpc_pl);
321}
322
323static int
324mlxsw_sp2_nve_decap_ethertype_set(struct mlxsw_sp *mlxsw_sp)
325{
326	char spvid_pl[MLXSW_REG_SPVID_LEN] = {};
327
328	mlxsw_reg_spvid_tport_set(spvid_pl, true);
329	mlxsw_reg_spvid_local_port_set(spvid_pl,
330				       MLXSW_REG_TUNNEL_PORT_NVE);
331	mlxsw_reg_spvid_egr_et_set_set(spvid_pl, true);
332	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
333}
334
335static int
336mlxsw_sp2_nve_vxlan_config_set(struct mlxsw_sp *mlxsw_sp,
337			       const struct mlxsw_sp_nve_config *config)
338{
339	char tngcr_pl[MLXSW_REG_TNGCR_LEN];
340	char spvtr_pl[MLXSW_REG_SPVTR_LEN];
341	u16 ul_rif_index;
342	int err;
343
344	err = mlxsw_sp_router_ul_rif_get(mlxsw_sp, config->ul_tb_id,
345					 &ul_rif_index);
346	if (err)
347		return err;
348	mlxsw_sp->nve->ul_rif_index = ul_rif_index;
349
350	err = mlxsw_sp2_nve_vxlan_learning_set(mlxsw_sp, config->learning_en);
351	if (err)
352		goto err_vxlan_learning_set;
353
354	mlxsw_sp_nve_vxlan_config_prepare(tngcr_pl, config);
355	mlxsw_reg_tngcr_underlay_rif_set(tngcr_pl, ul_rif_index);
356
357	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl);
358	if (err)
359		goto err_tngcr_write;
360
361	mlxsw_reg_spvtr_pack(spvtr_pl, true, MLXSW_REG_TUNNEL_PORT_NVE,
362			     MLXSW_REG_SPVTR_IPVID_MODE_ALWAYS_PUSH_VLAN);
363	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvtr), spvtr_pl);
364	if (err)
365		goto err_spvtr_write;
366
367	err = mlxsw_sp2_nve_decap_ethertype_set(mlxsw_sp);
368	if (err)
369		goto err_decap_ethertype_set;
370
371	return 0;
372
373err_decap_ethertype_set:
374	mlxsw_reg_spvtr_pack(spvtr_pl, true, MLXSW_REG_TUNNEL_PORT_NVE,
375			     MLXSW_REG_SPVTR_IPVID_MODE_IEEE_COMPLIANT_PVID);
376	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvtr), spvtr_pl);
377err_spvtr_write:
378	mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, false, 0);
379	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl);
380err_tngcr_write:
381	mlxsw_sp2_nve_vxlan_learning_set(mlxsw_sp, false);
382err_vxlan_learning_set:
383	mlxsw_sp_router_ul_rif_put(mlxsw_sp, ul_rif_index);
384	return err;
385}
386
387static void mlxsw_sp2_nve_vxlan_config_clear(struct mlxsw_sp *mlxsw_sp)
388{
389	char spvtr_pl[MLXSW_REG_SPVTR_LEN];
390	char tngcr_pl[MLXSW_REG_TNGCR_LEN];
391
392	mlxsw_reg_spvtr_pack(spvtr_pl, true, MLXSW_REG_TUNNEL_PORT_NVE,
393			     MLXSW_REG_SPVTR_IPVID_MODE_IEEE_COMPLIANT_PVID);
394	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvtr), spvtr_pl);
395	mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, false, 0);
396	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl);
397	mlxsw_sp2_nve_vxlan_learning_set(mlxsw_sp, false);
398	mlxsw_sp_router_ul_rif_put(mlxsw_sp, mlxsw_sp->nve->ul_rif_index);
399}
400
401static int mlxsw_sp2_nve_vxlan_rtdp_set(struct mlxsw_sp *mlxsw_sp,
402					unsigned int tunnel_index,
403					u16 ul_rif_index)
404{
405	char rtdp_pl[MLXSW_REG_RTDP_LEN];
406
407	mlxsw_reg_rtdp_pack(rtdp_pl, MLXSW_REG_RTDP_TYPE_NVE, tunnel_index);
408	mlxsw_reg_rtdp_egress_router_interface_set(rtdp_pl, ul_rif_index);
409
410	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtdp), rtdp_pl);
411}
412
413static int mlxsw_sp2_nve_vxlan_init(struct mlxsw_sp_nve *nve,
414				    const struct mlxsw_sp_nve_config *config)
415{
416	struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp;
417	int err;
418
419	err = mlxsw_sp_parsing_vxlan_udp_dport_set(mlxsw_sp, config->udp_dport);
420	if (err)
421		return err;
422
423	err = mlxsw_sp_parsing_depth_inc(mlxsw_sp);
424	if (err)
425		goto err_parsing_depth_inc;
426
427	err = mlxsw_sp2_nve_vxlan_config_set(mlxsw_sp, config);
428	if (err)
429		goto err_config_set;
430
431	err = mlxsw_sp2_nve_vxlan_rtdp_set(mlxsw_sp, nve->tunnel_index,
432					   nve->ul_rif_index);
433	if (err)
434		goto err_rtdp_set;
435
436	err = mlxsw_sp_router_nve_promote_decap(mlxsw_sp, config->ul_tb_id,
437						config->ul_proto,
438						&config->ul_sip,
439						nve->tunnel_index);
440	if (err)
441		goto err_promote_decap;
442
443	return 0;
444
445err_promote_decap:
446err_rtdp_set:
447	mlxsw_sp2_nve_vxlan_config_clear(mlxsw_sp);
448err_config_set:
449	mlxsw_sp_parsing_depth_dec(mlxsw_sp);
450err_parsing_depth_inc:
451	mlxsw_sp_parsing_vxlan_udp_dport_set(mlxsw_sp, 0);
452	return err;
453}
454
455static void mlxsw_sp2_nve_vxlan_fini(struct mlxsw_sp_nve *nve)
456{
457	struct mlxsw_sp_nve_config *config = &nve->config;
458	struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp;
459
460	mlxsw_sp_router_nve_demote_decap(mlxsw_sp, config->ul_tb_id,
461					 config->ul_proto, &config->ul_sip);
462	mlxsw_sp2_nve_vxlan_config_clear(mlxsw_sp);
463	mlxsw_sp_parsing_depth_dec(mlxsw_sp);
464	mlxsw_sp_parsing_vxlan_udp_dport_set(mlxsw_sp, 0);
465}
466
467const struct mlxsw_sp_nve_ops mlxsw_sp2_nve_vxlan_ops = {
468	.type		= MLXSW_SP_NVE_TYPE_VXLAN,
469	.can_offload	= mlxsw_sp_nve_vxlan_can_offload,
470	.nve_config	= mlxsw_sp_nve_vxlan_config,
471	.init		= mlxsw_sp2_nve_vxlan_init,
472	.fini		= mlxsw_sp2_nve_vxlan_fini,
473	.fdb_replay	= mlxsw_sp_nve_vxlan_fdb_replay,
474	.fdb_clear_offload = mlxsw_sp_nve_vxlan_clear_offload,
475};
476