1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/* Copyright (c) 2021 Mellanox Technologies. */
3
4#include <linux/build_bug.h>
5#include <linux/list.h>
6#include <linux/notifier.h>
7#include <net/netevent.h>
8#include <net/switchdev.h>
9#include "lib/devcom.h"
10#include "bridge.h"
11#include "eswitch.h"
12#include "bridge_priv.h"
13#define CREATE_TRACE_POINTS
14#include "diag/bridge_tracepoint.h"
15
16static const struct rhashtable_params fdb_ht_params = {
17	.key_offset = offsetof(struct mlx5_esw_bridge_fdb_entry, key),
18	.key_len = sizeof(struct mlx5_esw_bridge_fdb_key),
19	.head_offset = offsetof(struct mlx5_esw_bridge_fdb_entry, ht_node),
20	.automatic_shrinking = true,
21};
22
23static void
24mlx5_esw_bridge_fdb_offload_notify(struct net_device *dev, const unsigned char *addr, u16 vid,
25				   unsigned long val)
26{
27	struct switchdev_notifier_fdb_info send_info = {};
28
29	send_info.addr = addr;
30	send_info.vid = vid;
31	send_info.offloaded = true;
32	call_switchdev_notifiers(val, dev, &send_info.info, NULL);
33}
34
35static void
36mlx5_esw_bridge_fdb_del_notify(struct mlx5_esw_bridge_fdb_entry *entry)
37{
38	if (!(entry->flags & (MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER | MLX5_ESW_BRIDGE_FLAG_PEER)))
39		mlx5_esw_bridge_fdb_offload_notify(entry->dev, entry->key.addr,
40						   entry->key.vid,
41						   SWITCHDEV_FDB_DEL_TO_BRIDGE);
42}
43
44static bool mlx5_esw_bridge_pkt_reformat_vlan_pop_supported(struct mlx5_eswitch *esw)
45{
46	return BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat_remove)) &&
47		MLX5_CAP_GEN_2(esw->dev, max_reformat_remove_size) >= sizeof(struct vlan_hdr) &&
48		MLX5_CAP_GEN_2(esw->dev, max_reformat_remove_offset) >=
49		offsetof(struct vlan_ethhdr, h_vlan_proto);
50}
51
52static struct mlx5_pkt_reformat *
53mlx5_esw_bridge_pkt_reformat_vlan_pop_create(struct mlx5_eswitch *esw)
54{
55	struct mlx5_pkt_reformat_params reformat_params = {};
56
57	reformat_params.type = MLX5_REFORMAT_TYPE_REMOVE_HDR;
58	reformat_params.param_0 = MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START;
59	reformat_params.param_1 = offsetof(struct vlan_ethhdr, h_vlan_proto);
60	reformat_params.size = sizeof(struct vlan_hdr);
61	return mlx5_packet_reformat_alloc(esw->dev, &reformat_params, MLX5_FLOW_NAMESPACE_FDB);
62}
63
64struct mlx5_flow_table *
65mlx5_esw_bridge_table_create(int max_fte, u32 level, struct mlx5_eswitch *esw)
66{
67	struct mlx5_flow_table_attr ft_attr = {};
68	struct mlx5_core_dev *dev = esw->dev;
69	struct mlx5_flow_namespace *ns;
70	struct mlx5_flow_table *fdb;
71
72	ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
73	if (!ns) {
74		esw_warn(dev, "Failed to get FDB namespace\n");
75		return ERR_PTR(-ENOENT);
76	}
77
78	ft_attr.flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
79	ft_attr.max_fte = max_fte;
80	ft_attr.level = level;
81	ft_attr.prio = FDB_BR_OFFLOAD;
82	fdb = mlx5_create_flow_table(ns, &ft_attr);
83	if (IS_ERR(fdb))
84		esw_warn(dev, "Failed to create bridge FDB Table (err=%ld)\n", PTR_ERR(fdb));
85
86	return fdb;
87}
88
89static struct mlx5_flow_group *
90mlx5_esw_bridge_ingress_vlan_proto_fg_create(unsigned int from, unsigned int to, u16 vlan_proto,
91					     struct mlx5_eswitch *esw,
92					     struct mlx5_flow_table *ingress_ft)
93{
94	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
95	struct mlx5_flow_group *fg;
96	u32 *in, *match;
97
98	in = kvzalloc(inlen, GFP_KERNEL);
99	if (!in)
100		return ERR_PTR(-ENOMEM);
101
102	MLX5_SET(create_flow_group_in, in, match_criteria_enable,
103		 MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2);
104	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
105
106	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16);
107	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0);
108	if (vlan_proto == ETH_P_8021Q)
109		MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
110	else if (vlan_proto == ETH_P_8021AD)
111		MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.svlan_tag);
112	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.first_vid);
113
114	MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
115		 mlx5_eswitch_get_vport_metadata_mask());
116
117	MLX5_SET(create_flow_group_in, in, start_flow_index, from);
118	MLX5_SET(create_flow_group_in, in, end_flow_index, to);
119
120	fg = mlx5_create_flow_group(ingress_ft, in);
121	kvfree(in);
122	if (IS_ERR(fg))
123		esw_warn(esw->dev,
124			 "Failed to create VLAN(proto=%x) flow group for bridge ingress table (err=%ld)\n",
125			 vlan_proto, PTR_ERR(fg));
126
127	return fg;
128}
129
130static struct mlx5_flow_group *
131mlx5_esw_bridge_ingress_vlan_fg_create(struct mlx5_eswitch *esw,
132				       struct mlx5_flow_table *ingress_ft)
133{
134	unsigned int from = MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_FROM;
135	unsigned int to = MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO;
136
137	return mlx5_esw_bridge_ingress_vlan_proto_fg_create(from, to, ETH_P_8021Q, esw, ingress_ft);
138}
139
140static struct mlx5_flow_group *
141mlx5_esw_bridge_ingress_qinq_fg_create(struct mlx5_eswitch *esw,
142				       struct mlx5_flow_table *ingress_ft)
143{
144	unsigned int from = MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_FROM;
145	unsigned int to = MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_TO;
146
147	return mlx5_esw_bridge_ingress_vlan_proto_fg_create(from, to, ETH_P_8021AD, esw,
148							    ingress_ft);
149}
150
151static struct mlx5_flow_group *
152mlx5_esw_bridge_ingress_vlan_proto_filter_fg_create(unsigned int from, unsigned int to,
153						    u16 vlan_proto, struct mlx5_eswitch *esw,
154						    struct mlx5_flow_table *ingress_ft)
155{
156	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
157	struct mlx5_flow_group *fg;
158	u32 *in, *match;
159
160	in = kvzalloc(inlen, GFP_KERNEL);
161	if (!in)
162		return ERR_PTR(-ENOMEM);
163
164	MLX5_SET(create_flow_group_in, in, match_criteria_enable,
165		 MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2);
166	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
167
168	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16);
169	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0);
170	if (vlan_proto == ETH_P_8021Q)
171		MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
172	else if (vlan_proto == ETH_P_8021AD)
173		MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.svlan_tag);
174	MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
175		 mlx5_eswitch_get_vport_metadata_mask());
176
177	MLX5_SET(create_flow_group_in, in, start_flow_index, from);
178	MLX5_SET(create_flow_group_in, in, end_flow_index, to);
179
180	fg = mlx5_create_flow_group(ingress_ft, in);
181	if (IS_ERR(fg))
182		esw_warn(esw->dev,
183			 "Failed to create bridge ingress table VLAN filter flow group (err=%ld)\n",
184			 PTR_ERR(fg));
185	kvfree(in);
186	return fg;
187}
188
189static struct mlx5_flow_group *
190mlx5_esw_bridge_ingress_vlan_filter_fg_create(struct mlx5_eswitch *esw,
191					      struct mlx5_flow_table *ingress_ft)
192{
193	unsigned int from = MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_FROM;
194	unsigned int to = MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_TO;
195
196	return mlx5_esw_bridge_ingress_vlan_proto_filter_fg_create(from, to, ETH_P_8021Q, esw,
197								   ingress_ft);
198}
199
200static struct mlx5_flow_group *
201mlx5_esw_bridge_ingress_qinq_filter_fg_create(struct mlx5_eswitch *esw,
202					      struct mlx5_flow_table *ingress_ft)
203{
204	unsigned int from = MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_FROM;
205	unsigned int to = MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_TO;
206
207	return mlx5_esw_bridge_ingress_vlan_proto_filter_fg_create(from, to, ETH_P_8021AD, esw,
208								   ingress_ft);
209}
210
211static struct mlx5_flow_group *
212mlx5_esw_bridge_ingress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *ingress_ft)
213{
214	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
215	struct mlx5_flow_group *fg;
216	u32 *in, *match;
217
218	in = kvzalloc(inlen, GFP_KERNEL);
219	if (!in)
220		return ERR_PTR(-ENOMEM);
221
222	MLX5_SET(create_flow_group_in, in, match_criteria_enable,
223		 MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2);
224	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
225
226	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16);
227	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0);
228
229	MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
230		 mlx5_eswitch_get_vport_metadata_mask());
231
232	MLX5_SET(create_flow_group_in, in, start_flow_index,
233		 MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_FROM);
234	MLX5_SET(create_flow_group_in, in, end_flow_index,
235		 MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_TO);
236
237	fg = mlx5_create_flow_group(ingress_ft, in);
238	if (IS_ERR(fg))
239		esw_warn(esw->dev,
240			 "Failed to create MAC flow group for bridge ingress table (err=%ld)\n",
241			 PTR_ERR(fg));
242
243	kvfree(in);
244	return fg;
245}
246
247static struct mlx5_flow_group *
248mlx5_esw_bridge_egress_vlan_proto_fg_create(unsigned int from, unsigned int to, u16 vlan_proto,
249					    struct mlx5_eswitch *esw,
250					    struct mlx5_flow_table *egress_ft)
251{
252	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
253	struct mlx5_flow_group *fg;
254	u32 *in, *match;
255
256	in = kvzalloc(inlen, GFP_KERNEL);
257	if (!in)
258		return ERR_PTR(-ENOMEM);
259
260	MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
261	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
262
263	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_47_16);
264	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_15_0);
265	if (vlan_proto == ETH_P_8021Q)
266		MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
267	else if (vlan_proto == ETH_P_8021AD)
268		MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.svlan_tag);
269	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.first_vid);
270
271	MLX5_SET(create_flow_group_in, in, start_flow_index, from);
272	MLX5_SET(create_flow_group_in, in, end_flow_index, to);
273
274	fg = mlx5_create_flow_group(egress_ft, in);
275	if (IS_ERR(fg))
276		esw_warn(esw->dev,
277			 "Failed to create VLAN flow group for bridge egress table (err=%ld)\n",
278			 PTR_ERR(fg));
279	kvfree(in);
280	return fg;
281}
282
283static struct mlx5_flow_group *
284mlx5_esw_bridge_egress_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft)
285{
286	unsigned int from = MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_FROM;
287	unsigned int to = MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO;
288
289	return mlx5_esw_bridge_egress_vlan_proto_fg_create(from, to, ETH_P_8021Q, esw, egress_ft);
290}
291
292static struct mlx5_flow_group *
293mlx5_esw_bridge_egress_qinq_fg_create(struct mlx5_eswitch *esw,
294				      struct mlx5_flow_table *egress_ft)
295{
296	unsigned int from = MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_FROM;
297	unsigned int to = MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_TO;
298
299	return mlx5_esw_bridge_egress_vlan_proto_fg_create(from, to, ETH_P_8021AD, esw, egress_ft);
300}
301
302static struct mlx5_flow_group *
303mlx5_esw_bridge_egress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft)
304{
305	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
306	struct mlx5_flow_group *fg;
307	u32 *in, *match;
308
309	in = kvzalloc(inlen, GFP_KERNEL);
310	if (!in)
311		return ERR_PTR(-ENOMEM);
312
313	MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
314	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
315
316	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_47_16);
317	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_15_0);
318
319	MLX5_SET(create_flow_group_in, in, start_flow_index,
320		 MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_FROM);
321	MLX5_SET(create_flow_group_in, in, end_flow_index,
322		 MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO);
323
324	fg = mlx5_create_flow_group(egress_ft, in);
325	if (IS_ERR(fg))
326		esw_warn(esw->dev,
327			 "Failed to create bridge egress table MAC flow group (err=%ld)\n",
328			 PTR_ERR(fg));
329	kvfree(in);
330	return fg;
331}
332
333static struct mlx5_flow_group *
334mlx5_esw_bridge_egress_miss_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft)
335{
336	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
337	struct mlx5_flow_group *fg;
338	u32 *in, *match;
339
340	in = kvzalloc(inlen, GFP_KERNEL);
341	if (!in)
342		return ERR_PTR(-ENOMEM);
343
344	MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_MISC_PARAMETERS_2);
345	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
346
347	MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
348
349	MLX5_SET(create_flow_group_in, in, start_flow_index,
350		 MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_FROM);
351	MLX5_SET(create_flow_group_in, in, end_flow_index,
352		 MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_TO);
353
354	fg = mlx5_create_flow_group(egress_ft, in);
355	if (IS_ERR(fg))
356		esw_warn(esw->dev,
357			 "Failed to create bridge egress table miss flow group (err=%ld)\n",
358			 PTR_ERR(fg));
359	kvfree(in);
360	return fg;
361}
362
363static int
364mlx5_esw_bridge_ingress_table_init(struct mlx5_esw_bridge_offloads *br_offloads)
365{
366	struct mlx5_flow_group *mac_fg, *qinq_filter_fg, *qinq_fg, *vlan_filter_fg, *vlan_fg;
367	struct mlx5_flow_table *ingress_ft, *skip_ft;
368	struct mlx5_eswitch *esw = br_offloads->esw;
369	int err;
370
371	if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
372		return -EOPNOTSUPP;
373
374	ingress_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE,
375						  MLX5_ESW_BRIDGE_LEVEL_INGRESS_TABLE,
376						  esw);
377	if (IS_ERR(ingress_ft))
378		return PTR_ERR(ingress_ft);
379
380	skip_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_SKIP_TABLE_SIZE,
381					       MLX5_ESW_BRIDGE_LEVEL_SKIP_TABLE,
382					       esw);
383	if (IS_ERR(skip_ft)) {
384		err = PTR_ERR(skip_ft);
385		goto err_skip_tbl;
386	}
387
388	vlan_fg = mlx5_esw_bridge_ingress_vlan_fg_create(esw, ingress_ft);
389	if (IS_ERR(vlan_fg)) {
390		err = PTR_ERR(vlan_fg);
391		goto err_vlan_fg;
392	}
393
394	vlan_filter_fg = mlx5_esw_bridge_ingress_vlan_filter_fg_create(esw, ingress_ft);
395	if (IS_ERR(vlan_filter_fg)) {
396		err = PTR_ERR(vlan_filter_fg);
397		goto err_vlan_filter_fg;
398	}
399
400	qinq_fg = mlx5_esw_bridge_ingress_qinq_fg_create(esw, ingress_ft);
401	if (IS_ERR(qinq_fg)) {
402		err = PTR_ERR(qinq_fg);
403		goto err_qinq_fg;
404	}
405
406	qinq_filter_fg = mlx5_esw_bridge_ingress_qinq_filter_fg_create(esw, ingress_ft);
407	if (IS_ERR(qinq_filter_fg)) {
408		err = PTR_ERR(qinq_filter_fg);
409		goto err_qinq_filter_fg;
410	}
411
412	mac_fg = mlx5_esw_bridge_ingress_mac_fg_create(esw, ingress_ft);
413	if (IS_ERR(mac_fg)) {
414		err = PTR_ERR(mac_fg);
415		goto err_mac_fg;
416	}
417
418	br_offloads->ingress_ft = ingress_ft;
419	br_offloads->skip_ft = skip_ft;
420	br_offloads->ingress_vlan_fg = vlan_fg;
421	br_offloads->ingress_vlan_filter_fg = vlan_filter_fg;
422	br_offloads->ingress_qinq_fg = qinq_fg;
423	br_offloads->ingress_qinq_filter_fg = qinq_filter_fg;
424	br_offloads->ingress_mac_fg = mac_fg;
425	return 0;
426
427err_mac_fg:
428	mlx5_destroy_flow_group(qinq_filter_fg);
429err_qinq_filter_fg:
430	mlx5_destroy_flow_group(qinq_fg);
431err_qinq_fg:
432	mlx5_destroy_flow_group(vlan_filter_fg);
433err_vlan_filter_fg:
434	mlx5_destroy_flow_group(vlan_fg);
435err_vlan_fg:
436	mlx5_destroy_flow_table(skip_ft);
437err_skip_tbl:
438	mlx5_destroy_flow_table(ingress_ft);
439	return err;
440}
441
442static void
443mlx5_esw_bridge_ingress_table_cleanup(struct mlx5_esw_bridge_offloads *br_offloads)
444{
445	mlx5_destroy_flow_group(br_offloads->ingress_mac_fg);
446	br_offloads->ingress_mac_fg = NULL;
447	mlx5_destroy_flow_group(br_offloads->ingress_qinq_filter_fg);
448	br_offloads->ingress_qinq_filter_fg = NULL;
449	mlx5_destroy_flow_group(br_offloads->ingress_qinq_fg);
450	br_offloads->ingress_qinq_fg = NULL;
451	mlx5_destroy_flow_group(br_offloads->ingress_vlan_filter_fg);
452	br_offloads->ingress_vlan_filter_fg = NULL;
453	mlx5_destroy_flow_group(br_offloads->ingress_vlan_fg);
454	br_offloads->ingress_vlan_fg = NULL;
455	mlx5_destroy_flow_table(br_offloads->skip_ft);
456	br_offloads->skip_ft = NULL;
457	mlx5_destroy_flow_table(br_offloads->ingress_ft);
458	br_offloads->ingress_ft = NULL;
459}
460
461static struct mlx5_flow_handle *
462mlx5_esw_bridge_egress_miss_flow_create(struct mlx5_flow_table *egress_ft,
463					struct mlx5_flow_table *skip_ft,
464					struct mlx5_pkt_reformat *pkt_reformat);
465
466static int
467mlx5_esw_bridge_egress_table_init(struct mlx5_esw_bridge_offloads *br_offloads,
468				  struct mlx5_esw_bridge *bridge)
469{
470	struct mlx5_flow_group *miss_fg = NULL, *mac_fg, *vlan_fg, *qinq_fg;
471	struct mlx5_pkt_reformat *miss_pkt_reformat = NULL;
472	struct mlx5_flow_handle *miss_handle = NULL;
473	struct mlx5_eswitch *esw = br_offloads->esw;
474	struct mlx5_flow_table *egress_ft;
475	int err;
476
477	egress_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE,
478						 MLX5_ESW_BRIDGE_LEVEL_EGRESS_TABLE,
479						 esw);
480	if (IS_ERR(egress_ft))
481		return PTR_ERR(egress_ft);
482
483	vlan_fg = mlx5_esw_bridge_egress_vlan_fg_create(esw, egress_ft);
484	if (IS_ERR(vlan_fg)) {
485		err = PTR_ERR(vlan_fg);
486		goto err_vlan_fg;
487	}
488
489	qinq_fg = mlx5_esw_bridge_egress_qinq_fg_create(esw, egress_ft);
490	if (IS_ERR(qinq_fg)) {
491		err = PTR_ERR(qinq_fg);
492		goto err_qinq_fg;
493	}
494
495	mac_fg = mlx5_esw_bridge_egress_mac_fg_create(esw, egress_ft);
496	if (IS_ERR(mac_fg)) {
497		err = PTR_ERR(mac_fg);
498		goto err_mac_fg;
499	}
500
501	if (mlx5_esw_bridge_pkt_reformat_vlan_pop_supported(esw)) {
502		miss_fg = mlx5_esw_bridge_egress_miss_fg_create(esw, egress_ft);
503		if (IS_ERR(miss_fg)) {
504			esw_warn(esw->dev, "Failed to create miss flow group (err=%ld)\n",
505				 PTR_ERR(miss_fg));
506			miss_fg = NULL;
507			goto skip_miss_flow;
508		}
509
510		miss_pkt_reformat = mlx5_esw_bridge_pkt_reformat_vlan_pop_create(esw);
511		if (IS_ERR(miss_pkt_reformat)) {
512			esw_warn(esw->dev,
513				 "Failed to alloc packet reformat REMOVE_HEADER (err=%ld)\n",
514				 PTR_ERR(miss_pkt_reformat));
515			miss_pkt_reformat = NULL;
516			mlx5_destroy_flow_group(miss_fg);
517			miss_fg = NULL;
518			goto skip_miss_flow;
519		}
520
521		miss_handle = mlx5_esw_bridge_egress_miss_flow_create(egress_ft,
522								      br_offloads->skip_ft,
523								      miss_pkt_reformat);
524		if (IS_ERR(miss_handle)) {
525			esw_warn(esw->dev, "Failed to create miss flow (err=%ld)\n",
526				 PTR_ERR(miss_handle));
527			miss_handle = NULL;
528			mlx5_packet_reformat_dealloc(esw->dev, miss_pkt_reformat);
529			miss_pkt_reformat = NULL;
530			mlx5_destroy_flow_group(miss_fg);
531			miss_fg = NULL;
532			goto skip_miss_flow;
533		}
534	}
535skip_miss_flow:
536
537	bridge->egress_ft = egress_ft;
538	bridge->egress_vlan_fg = vlan_fg;
539	bridge->egress_qinq_fg = qinq_fg;
540	bridge->egress_mac_fg = mac_fg;
541	bridge->egress_miss_fg = miss_fg;
542	bridge->egress_miss_pkt_reformat = miss_pkt_reformat;
543	bridge->egress_miss_handle = miss_handle;
544	return 0;
545
546err_mac_fg:
547	mlx5_destroy_flow_group(qinq_fg);
548err_qinq_fg:
549	mlx5_destroy_flow_group(vlan_fg);
550err_vlan_fg:
551	mlx5_destroy_flow_table(egress_ft);
552	return err;
553}
554
555static void
556mlx5_esw_bridge_egress_table_cleanup(struct mlx5_esw_bridge *bridge)
557{
558	if (bridge->egress_miss_handle)
559		mlx5_del_flow_rules(bridge->egress_miss_handle);
560	if (bridge->egress_miss_pkt_reformat)
561		mlx5_packet_reformat_dealloc(bridge->br_offloads->esw->dev,
562					     bridge->egress_miss_pkt_reformat);
563	if (bridge->egress_miss_fg)
564		mlx5_destroy_flow_group(bridge->egress_miss_fg);
565	mlx5_destroy_flow_group(bridge->egress_mac_fg);
566	mlx5_destroy_flow_group(bridge->egress_qinq_fg);
567	mlx5_destroy_flow_group(bridge->egress_vlan_fg);
568	mlx5_destroy_flow_table(bridge->egress_ft);
569}
570
571static struct mlx5_flow_handle *
572mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num, const unsigned char *addr,
573					     struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
574					     struct mlx5_esw_bridge *bridge,
575					     struct mlx5_eswitch *esw)
576{
577	struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
578	struct mlx5_flow_act flow_act = {
579		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_COUNT,
580		.flags = FLOW_ACT_NO_APPEND,
581	};
582	struct mlx5_flow_destination dests[2] = {};
583	struct mlx5_flow_spec *rule_spec;
584	struct mlx5_flow_handle *handle;
585	u8 *smac_v, *smac_c;
586
587	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
588	if (!rule_spec)
589		return ERR_PTR(-ENOMEM);
590
591	rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2;
592
593	smac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value,
594			      outer_headers.smac_47_16);
595	ether_addr_copy(smac_v, addr);
596	smac_c = MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria,
597			      outer_headers.smac_47_16);
598	eth_broadcast_addr(smac_c);
599
600	MLX5_SET(fte_match_param, rule_spec->match_criteria,
601		 misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
602	MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_0,
603		 mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num));
604
605	if (vlan && vlan->pkt_reformat_push) {
606		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
607			MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
608		flow_act.pkt_reformat = vlan->pkt_reformat_push;
609		flow_act.modify_hdr = vlan->pkt_mod_hdr_push_mark;
610	} else if (vlan) {
611		if (bridge->vlan_proto == ETH_P_8021Q) {
612			MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
613					 outer_headers.cvlan_tag);
614			MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
615					 outer_headers.cvlan_tag);
616		} else if (bridge->vlan_proto == ETH_P_8021AD) {
617			MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
618					 outer_headers.svlan_tag);
619			MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
620					 outer_headers.svlan_tag);
621		}
622		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
623				 outer_headers.first_vid);
624		MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.first_vid,
625			 vlan->vid);
626	}
627
628	dests[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
629	dests[0].ft = bridge->egress_ft;
630	dests[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
631	dests[1].counter_id = counter_id;
632
633	handle = mlx5_add_flow_rules(br_offloads->ingress_ft, rule_spec, &flow_act, dests,
634				     ARRAY_SIZE(dests));
635
636	kvfree(rule_spec);
637	return handle;
638}
639
640static struct mlx5_flow_handle *
641mlx5_esw_bridge_ingress_flow_create(u16 vport_num, const unsigned char *addr,
642				    struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
643				    struct mlx5_esw_bridge *bridge)
644{
645	return mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id,
646							    bridge, bridge->br_offloads->esw);
647}
648
649static struct mlx5_flow_handle *
650mlx5_esw_bridge_ingress_flow_peer_create(u16 vport_num, u16 esw_owner_vhca_id,
651					 const unsigned char *addr,
652					 struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
653					 struct mlx5_esw_bridge *bridge)
654{
655	struct mlx5_devcom_comp_dev *devcom = bridge->br_offloads->esw->devcom, *pos;
656	struct mlx5_eswitch *tmp, *peer_esw = NULL;
657	static struct mlx5_flow_handle *handle;
658
659	if (!mlx5_devcom_for_each_peer_begin(devcom))
660		return ERR_PTR(-ENODEV);
661
662	mlx5_devcom_for_each_peer_entry(devcom, tmp, pos) {
663		if (mlx5_esw_is_owner(tmp, vport_num, esw_owner_vhca_id)) {
664			peer_esw = tmp;
665			break;
666		}
667	}
668
669	if (!peer_esw) {
670		handle = ERR_PTR(-ENODEV);
671		goto out;
672	}
673
674	handle = mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id,
675							      bridge, peer_esw);
676
677out:
678	mlx5_devcom_for_each_peer_end(devcom);
679	return handle;
680}
681
682static struct mlx5_flow_handle *
683mlx5_esw_bridge_ingress_filter_flow_create(u16 vport_num, const unsigned char *addr,
684					   struct mlx5_esw_bridge *bridge)
685{
686	struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
687	struct mlx5_flow_destination dest = {
688		.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
689		.ft = br_offloads->skip_ft,
690	};
691	struct mlx5_flow_act flow_act = {
692		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
693		.flags = FLOW_ACT_NO_APPEND,
694	};
695	struct mlx5_flow_spec *rule_spec;
696	struct mlx5_flow_handle *handle;
697	u8 *smac_v, *smac_c;
698
699	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
700	if (!rule_spec)
701		return ERR_PTR(-ENOMEM);
702
703	rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2;
704
705	smac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value,
706			      outer_headers.smac_47_16);
707	ether_addr_copy(smac_v, addr);
708	smac_c = MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria,
709			      outer_headers.smac_47_16);
710	eth_broadcast_addr(smac_c);
711
712	MLX5_SET(fte_match_param, rule_spec->match_criteria,
713		 misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
714	MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_0,
715		 mlx5_eswitch_get_vport_metadata_for_match(br_offloads->esw, vport_num));
716
717	if (bridge->vlan_proto == ETH_P_8021Q) {
718		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
719				 outer_headers.cvlan_tag);
720		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
721				 outer_headers.cvlan_tag);
722	} else if (bridge->vlan_proto == ETH_P_8021AD) {
723		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
724				 outer_headers.svlan_tag);
725		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
726				 outer_headers.svlan_tag);
727	}
728
729	handle = mlx5_add_flow_rules(br_offloads->ingress_ft, rule_spec, &flow_act, &dest, 1);
730
731	kvfree(rule_spec);
732	return handle;
733}
734
735static struct mlx5_flow_handle *
736mlx5_esw_bridge_egress_flow_create(u16 vport_num, u16 esw_owner_vhca_id, const unsigned char *addr,
737				   struct mlx5_esw_bridge_vlan *vlan,
738				   struct mlx5_esw_bridge *bridge)
739{
740	struct mlx5_flow_destination dest = {
741		.type = MLX5_FLOW_DESTINATION_TYPE_VPORT,
742		.vport.num = vport_num,
743	};
744	struct mlx5_flow_act flow_act = {
745		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
746		.flags = FLOW_ACT_NO_APPEND,
747	};
748	struct mlx5_flow_spec *rule_spec;
749	struct mlx5_flow_handle *handle;
750	u8 *dmac_v, *dmac_c;
751
752	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
753	if (!rule_spec)
754		return ERR_PTR(-ENOMEM);
755
756	if (MLX5_CAP_ESW_FLOWTABLE(bridge->br_offloads->esw->dev, flow_source) &&
757	    vport_num == MLX5_VPORT_UPLINK)
758		rule_spec->flow_context.flow_source =
759			MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
760	rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
761
762	dmac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value,
763			      outer_headers.dmac_47_16);
764	ether_addr_copy(dmac_v, addr);
765	dmac_c = MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria,
766			      outer_headers.dmac_47_16);
767	eth_broadcast_addr(dmac_c);
768
769	if (vlan) {
770		if (vlan->pkt_reformat_pop) {
771			flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
772			flow_act.pkt_reformat = vlan->pkt_reformat_pop;
773		}
774
775		if (bridge->vlan_proto == ETH_P_8021Q) {
776			MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
777					 outer_headers.cvlan_tag);
778			MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
779					 outer_headers.cvlan_tag);
780		} else if (bridge->vlan_proto == ETH_P_8021AD) {
781			MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
782					 outer_headers.svlan_tag);
783			MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
784					 outer_headers.svlan_tag);
785		}
786		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
787				 outer_headers.first_vid);
788		MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.first_vid,
789			 vlan->vid);
790	}
791
792	if (MLX5_CAP_ESW(bridge->br_offloads->esw->dev, merged_eswitch)) {
793		dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
794		dest.vport.vhca_id = esw_owner_vhca_id;
795	}
796	handle = mlx5_add_flow_rules(bridge->egress_ft, rule_spec, &flow_act, &dest, 1);
797
798	kvfree(rule_spec);
799	return handle;
800}
801
802static struct mlx5_flow_handle *
803mlx5_esw_bridge_egress_miss_flow_create(struct mlx5_flow_table *egress_ft,
804					struct mlx5_flow_table *skip_ft,
805					struct mlx5_pkt_reformat *pkt_reformat)
806{
807	struct mlx5_flow_destination dest = {
808		.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
809		.ft = skip_ft,
810	};
811	struct mlx5_flow_act flow_act = {
812		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
813		MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT,
814		.flags = FLOW_ACT_NO_APPEND,
815		.pkt_reformat = pkt_reformat,
816	};
817	struct mlx5_flow_spec *rule_spec;
818	struct mlx5_flow_handle *handle;
819
820	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
821	if (!rule_spec)
822		return ERR_PTR(-ENOMEM);
823
824	rule_spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
825
826	MLX5_SET(fte_match_param, rule_spec->match_criteria,
827		 misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
828	MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_1,
829		 ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN_MARK);
830
831	handle = mlx5_add_flow_rules(egress_ft, rule_spec, &flow_act, &dest, 1);
832
833	kvfree(rule_spec);
834	return handle;
835}
836
837static struct mlx5_esw_bridge *mlx5_esw_bridge_create(struct net_device *br_netdev,
838						      struct mlx5_esw_bridge_offloads *br_offloads)
839{
840	struct mlx5_esw_bridge *bridge;
841	int err;
842
843	bridge = kvzalloc(sizeof(*bridge), GFP_KERNEL);
844	if (!bridge)
845		return ERR_PTR(-ENOMEM);
846
847	bridge->br_offloads = br_offloads;
848	err = mlx5_esw_bridge_egress_table_init(br_offloads, bridge);
849	if (err)
850		goto err_egress_tbl;
851
852	err = rhashtable_init(&bridge->fdb_ht, &fdb_ht_params);
853	if (err)
854		goto err_fdb_ht;
855
856	err = mlx5_esw_bridge_mdb_init(bridge);
857	if (err)
858		goto err_mdb_ht;
859
860	INIT_LIST_HEAD(&bridge->fdb_list);
861	bridge->ifindex = br_netdev->ifindex;
862	bridge->refcnt = 1;
863	bridge->ageing_time = clock_t_to_jiffies(BR_DEFAULT_AGEING_TIME);
864	bridge->vlan_proto = ETH_P_8021Q;
865	list_add(&bridge->list, &br_offloads->bridges);
866	mlx5_esw_bridge_debugfs_init(br_netdev, bridge);
867
868	return bridge;
869
870err_mdb_ht:
871	rhashtable_destroy(&bridge->fdb_ht);
872err_fdb_ht:
873	mlx5_esw_bridge_egress_table_cleanup(bridge);
874err_egress_tbl:
875	kvfree(bridge);
876	return ERR_PTR(err);
877}
878
879static void mlx5_esw_bridge_get(struct mlx5_esw_bridge *bridge)
880{
881	bridge->refcnt++;
882}
883
884static void mlx5_esw_bridge_put(struct mlx5_esw_bridge_offloads *br_offloads,
885				struct mlx5_esw_bridge *bridge)
886{
887	if (--bridge->refcnt)
888		return;
889
890	mlx5_esw_bridge_debugfs_cleanup(bridge);
891	mlx5_esw_bridge_egress_table_cleanup(bridge);
892	mlx5_esw_bridge_mcast_disable(bridge);
893	list_del(&bridge->list);
894	mlx5_esw_bridge_mdb_cleanup(bridge);
895	rhashtable_destroy(&bridge->fdb_ht);
896	kvfree(bridge);
897
898	if (list_empty(&br_offloads->bridges))
899		mlx5_esw_bridge_ingress_table_cleanup(br_offloads);
900}
901
902static struct mlx5_esw_bridge *
903mlx5_esw_bridge_lookup(struct net_device *br_netdev, struct mlx5_esw_bridge_offloads *br_offloads)
904{
905	struct mlx5_esw_bridge *bridge;
906
907	ASSERT_RTNL();
908
909	list_for_each_entry(bridge, &br_offloads->bridges, list) {
910		if (bridge->ifindex == br_netdev->ifindex) {
911			mlx5_esw_bridge_get(bridge);
912			return bridge;
913		}
914	}
915
916	if (!br_offloads->ingress_ft) {
917		int err = mlx5_esw_bridge_ingress_table_init(br_offloads);
918
919		if (err)
920			return ERR_PTR(err);
921	}
922
923	bridge = mlx5_esw_bridge_create(br_netdev, br_offloads);
924	if (IS_ERR(bridge) && list_empty(&br_offloads->bridges))
925		mlx5_esw_bridge_ingress_table_cleanup(br_offloads);
926	return bridge;
927}
928
929static unsigned long mlx5_esw_bridge_port_key_from_data(u16 vport_num, u16 esw_owner_vhca_id)
930{
931	return vport_num | (unsigned long)esw_owner_vhca_id << sizeof(vport_num) * BITS_PER_BYTE;
932}
933
934unsigned long mlx5_esw_bridge_port_key(struct mlx5_esw_bridge_port *port)
935{
936	return mlx5_esw_bridge_port_key_from_data(port->vport_num, port->esw_owner_vhca_id);
937}
938
939static int mlx5_esw_bridge_port_insert(struct mlx5_esw_bridge_port *port,
940				       struct mlx5_esw_bridge_offloads *br_offloads)
941{
942	return xa_insert(&br_offloads->ports, mlx5_esw_bridge_port_key(port), port, GFP_KERNEL);
943}
944
945static struct mlx5_esw_bridge_port *
946mlx5_esw_bridge_port_lookup(u16 vport_num, u16 esw_owner_vhca_id,
947			    struct mlx5_esw_bridge_offloads *br_offloads)
948{
949	return xa_load(&br_offloads->ports, mlx5_esw_bridge_port_key_from_data(vport_num,
950									       esw_owner_vhca_id));
951}
952
953static void mlx5_esw_bridge_port_erase(struct mlx5_esw_bridge_port *port,
954				       struct mlx5_esw_bridge_offloads *br_offloads)
955{
956	xa_erase(&br_offloads->ports, mlx5_esw_bridge_port_key(port));
957}
958
959static struct mlx5_esw_bridge *
960mlx5_esw_bridge_from_port_lookup(u16 vport_num, u16 esw_owner_vhca_id,
961				 struct mlx5_esw_bridge_offloads *br_offloads)
962{
963	struct mlx5_esw_bridge_port *port;
964
965	port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
966	if (!port)
967		return NULL;
968
969	return port->bridge;
970}
971
972static void mlx5_esw_bridge_fdb_entry_refresh(struct mlx5_esw_bridge_fdb_entry *entry)
973{
974	trace_mlx5_esw_bridge_fdb_entry_refresh(entry);
975
976	mlx5_esw_bridge_fdb_offload_notify(entry->dev, entry->key.addr,
977					   entry->key.vid,
978					   SWITCHDEV_FDB_ADD_TO_BRIDGE);
979}
980
981static void
982mlx5_esw_bridge_fdb_entry_cleanup(struct mlx5_esw_bridge_fdb_entry *entry,
983				  struct mlx5_esw_bridge *bridge)
984{
985	trace_mlx5_esw_bridge_fdb_entry_cleanup(entry);
986
987	rhashtable_remove_fast(&bridge->fdb_ht, &entry->ht_node, fdb_ht_params);
988	mlx5_del_flow_rules(entry->egress_handle);
989	if (entry->filter_handle)
990		mlx5_del_flow_rules(entry->filter_handle);
991	mlx5_del_flow_rules(entry->ingress_handle);
992	mlx5_fc_destroy(bridge->br_offloads->esw->dev, entry->ingress_counter);
993	list_del(&entry->vlan_list);
994	list_del(&entry->list);
995	kvfree(entry);
996}
997
998static void
999mlx5_esw_bridge_fdb_entry_notify_and_cleanup(struct mlx5_esw_bridge_fdb_entry *entry,
1000					     struct mlx5_esw_bridge *bridge)
1001{
1002	mlx5_esw_bridge_fdb_del_notify(entry);
1003	mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
1004}
1005
1006static void mlx5_esw_bridge_fdb_flush(struct mlx5_esw_bridge *bridge)
1007{
1008	struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
1009
1010	list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list)
1011		mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge);
1012}
1013
1014static struct mlx5_esw_bridge_vlan *
1015mlx5_esw_bridge_vlan_lookup(u16 vid, struct mlx5_esw_bridge_port *port)
1016{
1017	return xa_load(&port->vlans, vid);
1018}
1019
1020static int
1021mlx5_esw_bridge_vlan_push_create(u16 vlan_proto, struct mlx5_esw_bridge_vlan *vlan,
1022				 struct mlx5_eswitch *esw)
1023{
1024	struct {
1025		__be16	h_vlan_proto;
1026		__be16	h_vlan_TCI;
1027	} vlan_hdr = { htons(vlan_proto), htons(vlan->vid) };
1028	struct mlx5_pkt_reformat_params reformat_params = {};
1029	struct mlx5_pkt_reformat *pkt_reformat;
1030
1031	if (!BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat_insert)) ||
1032	    MLX5_CAP_GEN_2(esw->dev, max_reformat_insert_size) < sizeof(vlan_hdr) ||
1033	    MLX5_CAP_GEN_2(esw->dev, max_reformat_insert_offset) <
1034	    offsetof(struct vlan_ethhdr, h_vlan_proto)) {
1035		esw_warn(esw->dev, "Packet reformat INSERT_HEADER is not supported\n");
1036		return -EOPNOTSUPP;
1037	}
1038
1039	reformat_params.type = MLX5_REFORMAT_TYPE_INSERT_HDR;
1040	reformat_params.param_0 = MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START;
1041	reformat_params.param_1 = offsetof(struct vlan_ethhdr, h_vlan_proto);
1042	reformat_params.size = sizeof(vlan_hdr);
1043	reformat_params.data = &vlan_hdr;
1044	pkt_reformat = mlx5_packet_reformat_alloc(esw->dev,
1045						  &reformat_params,
1046						  MLX5_FLOW_NAMESPACE_FDB);
1047	if (IS_ERR(pkt_reformat)) {
1048		esw_warn(esw->dev, "Failed to alloc packet reformat INSERT_HEADER (err=%ld)\n",
1049			 PTR_ERR(pkt_reformat));
1050		return PTR_ERR(pkt_reformat);
1051	}
1052
1053	vlan->pkt_reformat_push = pkt_reformat;
1054	return 0;
1055}
1056
1057static void
1058mlx5_esw_bridge_vlan_push_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
1059{
1060	mlx5_packet_reformat_dealloc(esw->dev, vlan->pkt_reformat_push);
1061	vlan->pkt_reformat_push = NULL;
1062}
1063
1064static int
1065mlx5_esw_bridge_vlan_pop_create(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
1066{
1067	struct mlx5_pkt_reformat *pkt_reformat;
1068
1069	if (!mlx5_esw_bridge_pkt_reformat_vlan_pop_supported(esw)) {
1070		esw_warn(esw->dev, "Packet reformat REMOVE_HEADER is not supported\n");
1071		return -EOPNOTSUPP;
1072	}
1073
1074	pkt_reformat = mlx5_esw_bridge_pkt_reformat_vlan_pop_create(esw);
1075	if (IS_ERR(pkt_reformat)) {
1076		esw_warn(esw->dev, "Failed to alloc packet reformat REMOVE_HEADER (err=%ld)\n",
1077			 PTR_ERR(pkt_reformat));
1078		return PTR_ERR(pkt_reformat);
1079	}
1080
1081	vlan->pkt_reformat_pop = pkt_reformat;
1082	return 0;
1083}
1084
1085static void
1086mlx5_esw_bridge_vlan_pop_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
1087{
1088	mlx5_packet_reformat_dealloc(esw->dev, vlan->pkt_reformat_pop);
1089	vlan->pkt_reformat_pop = NULL;
1090}
1091
1092static int
1093mlx5_esw_bridge_vlan_push_mark_create(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
1094{
1095	u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
1096	struct mlx5_modify_hdr *pkt_mod_hdr;
1097
1098	MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
1099	MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_1);
1100	MLX5_SET(set_action_in, action, offset, 8);
1101	MLX5_SET(set_action_in, action, length, ESW_TUN_OPTS_BITS + ESW_TUN_ID_BITS);
1102	MLX5_SET(set_action_in, action, data, ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN);
1103
1104	pkt_mod_hdr = mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_FDB, 1, action);
1105	if (IS_ERR(pkt_mod_hdr))
1106		return PTR_ERR(pkt_mod_hdr);
1107
1108	vlan->pkt_mod_hdr_push_mark = pkt_mod_hdr;
1109	return 0;
1110}
1111
1112static void
1113mlx5_esw_bridge_vlan_push_mark_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
1114{
1115	mlx5_modify_header_dealloc(esw->dev, vlan->pkt_mod_hdr_push_mark);
1116	vlan->pkt_mod_hdr_push_mark = NULL;
1117}
1118
1119static int
1120mlx5_esw_bridge_vlan_push_pop_fhs_create(u16 vlan_proto, struct mlx5_esw_bridge_port *port,
1121					 struct mlx5_esw_bridge_vlan *vlan)
1122{
1123	return mlx5_esw_bridge_vlan_mcast_init(vlan_proto, port, vlan);
1124}
1125
1126static void
1127mlx5_esw_bridge_vlan_push_pop_fhs_cleanup(struct mlx5_esw_bridge_vlan *vlan)
1128{
1129	mlx5_esw_bridge_vlan_mcast_cleanup(vlan);
1130}
1131
1132static int
1133mlx5_esw_bridge_vlan_push_pop_create(u16 vlan_proto, u16 flags, struct mlx5_esw_bridge_port *port,
1134				     struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
1135{
1136	int err;
1137
1138	if (flags & BRIDGE_VLAN_INFO_PVID) {
1139		err = mlx5_esw_bridge_vlan_push_create(vlan_proto, vlan, esw);
1140		if (err)
1141			return err;
1142
1143		err = mlx5_esw_bridge_vlan_push_mark_create(vlan, esw);
1144		if (err)
1145			goto err_vlan_push_mark;
1146	}
1147
1148	if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
1149		err = mlx5_esw_bridge_vlan_pop_create(vlan, esw);
1150		if (err)
1151			goto err_vlan_pop;
1152
1153		err = mlx5_esw_bridge_vlan_push_pop_fhs_create(vlan_proto, port, vlan);
1154		if (err)
1155			goto err_vlan_pop_fhs;
1156	}
1157
1158	return 0;
1159
1160err_vlan_pop_fhs:
1161	mlx5_esw_bridge_vlan_pop_cleanup(vlan, esw);
1162err_vlan_pop:
1163	if (vlan->pkt_mod_hdr_push_mark)
1164		mlx5_esw_bridge_vlan_push_mark_cleanup(vlan, esw);
1165err_vlan_push_mark:
1166	if (vlan->pkt_reformat_push)
1167		mlx5_esw_bridge_vlan_push_cleanup(vlan, esw);
1168	return err;
1169}
1170
1171static struct mlx5_esw_bridge_vlan *
1172mlx5_esw_bridge_vlan_create(u16 vlan_proto, u16 vid, u16 flags, struct mlx5_esw_bridge_port *port,
1173			    struct mlx5_eswitch *esw)
1174{
1175	struct mlx5_esw_bridge_vlan *vlan;
1176	int err;
1177
1178	vlan = kvzalloc(sizeof(*vlan), GFP_KERNEL);
1179	if (!vlan)
1180		return ERR_PTR(-ENOMEM);
1181
1182	vlan->vid = vid;
1183	vlan->flags = flags;
1184	INIT_LIST_HEAD(&vlan->fdb_list);
1185
1186	err = mlx5_esw_bridge_vlan_push_pop_create(vlan_proto, flags, port, vlan, esw);
1187	if (err)
1188		goto err_vlan_push_pop;
1189
1190	err = xa_insert(&port->vlans, vid, vlan, GFP_KERNEL);
1191	if (err)
1192		goto err_xa_insert;
1193
1194	trace_mlx5_esw_bridge_vlan_create(vlan);
1195	return vlan;
1196
1197err_xa_insert:
1198	if (vlan->mcast_handle)
1199		mlx5_esw_bridge_vlan_push_pop_fhs_cleanup(vlan);
1200	if (vlan->pkt_reformat_pop)
1201		mlx5_esw_bridge_vlan_pop_cleanup(vlan, esw);
1202	if (vlan->pkt_mod_hdr_push_mark)
1203		mlx5_esw_bridge_vlan_push_mark_cleanup(vlan, esw);
1204	if (vlan->pkt_reformat_push)
1205		mlx5_esw_bridge_vlan_push_cleanup(vlan, esw);
1206err_vlan_push_pop:
1207	kvfree(vlan);
1208	return ERR_PTR(err);
1209}
1210
1211static void mlx5_esw_bridge_vlan_erase(struct mlx5_esw_bridge_port *port,
1212				       struct mlx5_esw_bridge_vlan *vlan)
1213{
1214	xa_erase(&port->vlans, vlan->vid);
1215}
1216
1217static void mlx5_esw_bridge_vlan_flush(struct mlx5_esw_bridge_port *port,
1218				       struct mlx5_esw_bridge_vlan *vlan,
1219				       struct mlx5_esw_bridge *bridge)
1220{
1221	struct mlx5_eswitch *esw = bridge->br_offloads->esw;
1222	struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
1223
1224	list_for_each_entry_safe(entry, tmp, &vlan->fdb_list, vlan_list)
1225		mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge);
1226	mlx5_esw_bridge_port_mdb_vlan_flush(port, vlan);
1227
1228	if (vlan->mcast_handle)
1229		mlx5_esw_bridge_vlan_push_pop_fhs_cleanup(vlan);
1230	if (vlan->pkt_reformat_pop)
1231		mlx5_esw_bridge_vlan_pop_cleanup(vlan, esw);
1232	if (vlan->pkt_mod_hdr_push_mark)
1233		mlx5_esw_bridge_vlan_push_mark_cleanup(vlan, esw);
1234	if (vlan->pkt_reformat_push)
1235		mlx5_esw_bridge_vlan_push_cleanup(vlan, esw);
1236}
1237
1238static void mlx5_esw_bridge_vlan_cleanup(struct mlx5_esw_bridge_port *port,
1239					 struct mlx5_esw_bridge_vlan *vlan,
1240					 struct mlx5_esw_bridge *bridge)
1241{
1242	trace_mlx5_esw_bridge_vlan_cleanup(vlan);
1243	mlx5_esw_bridge_vlan_flush(port, vlan, bridge);
1244	mlx5_esw_bridge_vlan_erase(port, vlan);
1245	kvfree(vlan);
1246}
1247
1248static void mlx5_esw_bridge_port_vlans_flush(struct mlx5_esw_bridge_port *port,
1249					     struct mlx5_esw_bridge *bridge)
1250{
1251	struct mlx5_esw_bridge_vlan *vlan;
1252	unsigned long index;
1253
1254	xa_for_each(&port->vlans, index, vlan)
1255		mlx5_esw_bridge_vlan_cleanup(port, vlan, bridge);
1256}
1257
1258static int mlx5_esw_bridge_port_vlans_recreate(struct mlx5_esw_bridge_port *port,
1259					       struct mlx5_esw_bridge *bridge)
1260{
1261	struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
1262	struct mlx5_esw_bridge_vlan *vlan;
1263	unsigned long i;
1264	int err;
1265
1266	xa_for_each(&port->vlans, i, vlan) {
1267		mlx5_esw_bridge_vlan_flush(port, vlan, bridge);
1268		err = mlx5_esw_bridge_vlan_push_pop_create(bridge->vlan_proto, vlan->flags, port,
1269							   vlan, br_offloads->esw);
1270		if (err) {
1271			esw_warn(br_offloads->esw->dev,
1272				 "Failed to create VLAN=%u(proto=%x) push/pop actions (vport=%u,err=%d)\n",
1273				 vlan->vid, bridge->vlan_proto, port->vport_num,
1274				 err);
1275			return err;
1276		}
1277	}
1278
1279	return 0;
1280}
1281
1282static int
1283mlx5_esw_bridge_vlans_recreate(struct mlx5_esw_bridge *bridge)
1284{
1285	struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
1286	struct mlx5_esw_bridge_port *port;
1287	unsigned long i;
1288	int err;
1289
1290	xa_for_each(&br_offloads->ports, i, port) {
1291		if (port->bridge != bridge)
1292			continue;
1293
1294		err = mlx5_esw_bridge_port_vlans_recreate(port, bridge);
1295		if (err)
1296			return err;
1297	}
1298
1299	return 0;
1300}
1301
1302static struct mlx5_esw_bridge_vlan *
1303mlx5_esw_bridge_port_vlan_lookup(u16 vid, u16 vport_num, u16 esw_owner_vhca_id,
1304				 struct mlx5_esw_bridge *bridge, struct mlx5_eswitch *esw)
1305{
1306	struct mlx5_esw_bridge_port *port;
1307	struct mlx5_esw_bridge_vlan *vlan;
1308
1309	port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, bridge->br_offloads);
1310	if (!port) {
1311		/* FDB is added asynchronously on wq while port might have been deleted
1312		 * concurrently. Report on 'info' logging level and skip the FDB offload.
1313		 */
1314		esw_info(esw->dev, "Failed to lookup bridge port (vport=%u)\n", vport_num);
1315		return ERR_PTR(-EINVAL);
1316	}
1317
1318	vlan = mlx5_esw_bridge_vlan_lookup(vid, port);
1319	if (!vlan) {
1320		/* FDB is added asynchronously on wq while vlan might have been deleted
1321		 * concurrently. Report on 'info' logging level and skip the FDB offload.
1322		 */
1323		esw_info(esw->dev, "Failed to lookup bridge port vlan metadata (vport=%u)\n",
1324			 vport_num);
1325		return ERR_PTR(-EINVAL);
1326	}
1327
1328	return vlan;
1329}
1330
1331static struct mlx5_esw_bridge_fdb_entry *
1332mlx5_esw_bridge_fdb_lookup(struct mlx5_esw_bridge *bridge,
1333			   const unsigned char *addr, u16 vid)
1334{
1335	struct mlx5_esw_bridge_fdb_key key = {};
1336
1337	ether_addr_copy(key.addr, addr);
1338	key.vid = vid;
1339	return rhashtable_lookup_fast(&bridge->fdb_ht, &key, fdb_ht_params);
1340}
1341
1342static struct mlx5_esw_bridge_fdb_entry *
1343mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
1344			       const unsigned char *addr, u16 vid, bool added_by_user, bool peer,
1345			       struct mlx5_eswitch *esw, struct mlx5_esw_bridge *bridge)
1346{
1347	struct mlx5_esw_bridge_vlan *vlan = NULL;
1348	struct mlx5_esw_bridge_fdb_entry *entry;
1349	struct mlx5_flow_handle *handle;
1350	struct mlx5_fc *counter;
1351	int err;
1352
1353	if (bridge->flags & MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG && vid) {
1354		vlan = mlx5_esw_bridge_port_vlan_lookup(vid, vport_num, esw_owner_vhca_id, bridge,
1355							esw);
1356		if (IS_ERR(vlan))
1357			return ERR_CAST(vlan);
1358	}
1359
1360	entry = mlx5_esw_bridge_fdb_lookup(bridge, addr, vid);
1361	if (entry)
1362		mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge);
1363
1364	entry = kvzalloc(sizeof(*entry), GFP_KERNEL);
1365	if (!entry)
1366		return ERR_PTR(-ENOMEM);
1367
1368	ether_addr_copy(entry->key.addr, addr);
1369	entry->key.vid = vid;
1370	entry->dev = dev;
1371	entry->vport_num = vport_num;
1372	entry->esw_owner_vhca_id = esw_owner_vhca_id;
1373	entry->lastuse = jiffies;
1374	if (added_by_user)
1375		entry->flags |= MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER;
1376	if (peer)
1377		entry->flags |= MLX5_ESW_BRIDGE_FLAG_PEER;
1378
1379	counter = mlx5_fc_create(esw->dev, true);
1380	if (IS_ERR(counter)) {
1381		err = PTR_ERR(counter);
1382		goto err_ingress_fc_create;
1383	}
1384	entry->ingress_counter = counter;
1385
1386	handle = peer ?
1387		mlx5_esw_bridge_ingress_flow_peer_create(vport_num, esw_owner_vhca_id,
1388							 addr, vlan, mlx5_fc_id(counter),
1389							 bridge) :
1390		mlx5_esw_bridge_ingress_flow_create(vport_num, addr, vlan,
1391						    mlx5_fc_id(counter), bridge);
1392	if (IS_ERR(handle)) {
1393		err = PTR_ERR(handle);
1394		esw_warn(esw->dev, "Failed to create ingress flow(vport=%u,err=%d,peer=%d)\n",
1395			 vport_num, err, peer);
1396		goto err_ingress_flow_create;
1397	}
1398	entry->ingress_handle = handle;
1399
1400	if (bridge->flags & MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG) {
1401		handle = mlx5_esw_bridge_ingress_filter_flow_create(vport_num, addr, bridge);
1402		if (IS_ERR(handle)) {
1403			err = PTR_ERR(handle);
1404			esw_warn(esw->dev, "Failed to create ingress filter(vport=%u,err=%d)\n",
1405				 vport_num, err);
1406			goto err_ingress_filter_flow_create;
1407		}
1408		entry->filter_handle = handle;
1409	}
1410
1411	handle = mlx5_esw_bridge_egress_flow_create(vport_num, esw_owner_vhca_id, addr, vlan,
1412						    bridge);
1413	if (IS_ERR(handle)) {
1414		err = PTR_ERR(handle);
1415		esw_warn(esw->dev, "Failed to create egress flow(vport=%u,err=%d)\n",
1416			 vport_num, err);
1417		goto err_egress_flow_create;
1418	}
1419	entry->egress_handle = handle;
1420
1421	err = rhashtable_insert_fast(&bridge->fdb_ht, &entry->ht_node, fdb_ht_params);
1422	if (err) {
1423		esw_warn(esw->dev, "Failed to insert FDB flow(vport=%u,err=%d)\n", vport_num, err);
1424		goto err_ht_init;
1425	}
1426
1427	if (vlan)
1428		list_add(&entry->vlan_list, &vlan->fdb_list);
1429	else
1430		INIT_LIST_HEAD(&entry->vlan_list);
1431	list_add(&entry->list, &bridge->fdb_list);
1432
1433	trace_mlx5_esw_bridge_fdb_entry_init(entry);
1434	return entry;
1435
1436err_ht_init:
1437	mlx5_del_flow_rules(entry->egress_handle);
1438err_egress_flow_create:
1439	if (entry->filter_handle)
1440		mlx5_del_flow_rules(entry->filter_handle);
1441err_ingress_filter_flow_create:
1442	mlx5_del_flow_rules(entry->ingress_handle);
1443err_ingress_flow_create:
1444	mlx5_fc_destroy(esw->dev, entry->ingress_counter);
1445err_ingress_fc_create:
1446	kvfree(entry);
1447	return ERR_PTR(err);
1448}
1449
1450int mlx5_esw_bridge_ageing_time_set(u16 vport_num, u16 esw_owner_vhca_id, unsigned long ageing_time,
1451				    struct mlx5_esw_bridge_offloads *br_offloads)
1452{
1453	struct mlx5_esw_bridge *bridge;
1454
1455	bridge = mlx5_esw_bridge_from_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1456	if (!bridge)
1457		return -EINVAL;
1458
1459	bridge->ageing_time = clock_t_to_jiffies(ageing_time);
1460	return 0;
1461}
1462
1463int mlx5_esw_bridge_vlan_filtering_set(u16 vport_num, u16 esw_owner_vhca_id, bool enable,
1464				       struct mlx5_esw_bridge_offloads *br_offloads)
1465{
1466	struct mlx5_esw_bridge *bridge;
1467	bool filtering;
1468
1469	bridge = mlx5_esw_bridge_from_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1470	if (!bridge)
1471		return -EINVAL;
1472
1473	filtering = bridge->flags & MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG;
1474	if (filtering == enable)
1475		return 0;
1476
1477	mlx5_esw_bridge_fdb_flush(bridge);
1478	mlx5_esw_bridge_mdb_flush(bridge);
1479	if (enable)
1480		bridge->flags |= MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG;
1481	else
1482		bridge->flags &= ~MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG;
1483
1484	return 0;
1485}
1486
1487int mlx5_esw_bridge_vlan_proto_set(u16 vport_num, u16 esw_owner_vhca_id, u16 proto,
1488				   struct mlx5_esw_bridge_offloads *br_offloads)
1489{
1490	struct mlx5_esw_bridge *bridge;
1491
1492	bridge = mlx5_esw_bridge_from_port_lookup(vport_num, esw_owner_vhca_id,
1493						  br_offloads);
1494	if (!bridge)
1495		return -EINVAL;
1496
1497	if (bridge->vlan_proto == proto)
1498		return 0;
1499	if (proto != ETH_P_8021Q && proto != ETH_P_8021AD) {
1500		esw_warn(br_offloads->esw->dev, "Can't set unsupported VLAN protocol %x", proto);
1501		return -EOPNOTSUPP;
1502	}
1503
1504	mlx5_esw_bridge_fdb_flush(bridge);
1505	mlx5_esw_bridge_mdb_flush(bridge);
1506	bridge->vlan_proto = proto;
1507	mlx5_esw_bridge_vlans_recreate(bridge);
1508
1509	return 0;
1510}
1511
1512int mlx5_esw_bridge_mcast_set(u16 vport_num, u16 esw_owner_vhca_id, bool enable,
1513			      struct mlx5_esw_bridge_offloads *br_offloads)
1514{
1515	struct mlx5_eswitch *esw = br_offloads->esw;
1516	struct mlx5_esw_bridge *bridge;
1517	int err = 0;
1518	bool mcast;
1519
1520	if (!(MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_multi_path_any_table) ||
1521	      MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_multi_path_any_table_limit_regc)) ||
1522	    !MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_uplink_hairpin) ||
1523	    !MLX5_CAP_ESW_FLOWTABLE_FDB((esw)->dev, ignore_flow_level))
1524		return -EOPNOTSUPP;
1525
1526	bridge = mlx5_esw_bridge_from_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1527	if (!bridge)
1528		return -EINVAL;
1529
1530	mcast = bridge->flags & MLX5_ESW_BRIDGE_MCAST_FLAG;
1531	if (mcast == enable)
1532		return 0;
1533
1534	if (enable)
1535		err = mlx5_esw_bridge_mcast_enable(bridge);
1536	else
1537		mlx5_esw_bridge_mcast_disable(bridge);
1538
1539	return err;
1540}
1541
1542static int mlx5_esw_bridge_vport_init(u16 vport_num, u16 esw_owner_vhca_id, u16 flags,
1543				      struct mlx5_esw_bridge_offloads *br_offloads,
1544				      struct mlx5_esw_bridge *bridge)
1545{
1546	struct mlx5_eswitch *esw = br_offloads->esw;
1547	struct mlx5_esw_bridge_port *port;
1548	int err;
1549
1550	port = kvzalloc(sizeof(*port), GFP_KERNEL);
1551	if (!port)
1552		return -ENOMEM;
1553
1554	port->vport_num = vport_num;
1555	port->esw_owner_vhca_id = esw_owner_vhca_id;
1556	port->bridge = bridge;
1557	port->flags |= flags;
1558	xa_init(&port->vlans);
1559
1560	err = mlx5_esw_bridge_port_mcast_init(port);
1561	if (err) {
1562		esw_warn(esw->dev,
1563			 "Failed to initialize port multicast (vport=%u,esw_owner_vhca_id=%u,err=%d)\n",
1564			 port->vport_num, port->esw_owner_vhca_id, err);
1565		goto err_port_mcast;
1566	}
1567
1568	err = mlx5_esw_bridge_port_insert(port, br_offloads);
1569	if (err) {
1570		esw_warn(esw->dev,
1571			 "Failed to insert port metadata (vport=%u,esw_owner_vhca_id=%u,err=%d)\n",
1572			 port->vport_num, port->esw_owner_vhca_id, err);
1573		goto err_port_insert;
1574	}
1575	trace_mlx5_esw_bridge_vport_init(port);
1576
1577	return 0;
1578
1579err_port_insert:
1580	mlx5_esw_bridge_port_mcast_cleanup(port);
1581err_port_mcast:
1582	kvfree(port);
1583	return err;
1584}
1585
1586static int mlx5_esw_bridge_vport_cleanup(struct mlx5_esw_bridge_offloads *br_offloads,
1587					 struct mlx5_esw_bridge_port *port)
1588{
1589	u16 vport_num = port->vport_num, esw_owner_vhca_id = port->esw_owner_vhca_id;
1590	struct mlx5_esw_bridge *bridge = port->bridge;
1591	struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
1592
1593	list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list)
1594		if (entry->vport_num == vport_num && entry->esw_owner_vhca_id == esw_owner_vhca_id)
1595			mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
1596
1597	trace_mlx5_esw_bridge_vport_cleanup(port);
1598	mlx5_esw_bridge_port_vlans_flush(port, bridge);
1599	mlx5_esw_bridge_port_mcast_cleanup(port);
1600	mlx5_esw_bridge_port_erase(port, br_offloads);
1601	kvfree(port);
1602	mlx5_esw_bridge_put(br_offloads, bridge);
1603	return 0;
1604}
1605
1606static int mlx5_esw_bridge_vport_link_with_flags(struct net_device *br_netdev, u16 vport_num,
1607						 u16 esw_owner_vhca_id, u16 flags,
1608						 struct mlx5_esw_bridge_offloads *br_offloads,
1609						 struct netlink_ext_ack *extack)
1610{
1611	struct mlx5_esw_bridge *bridge;
1612	int err;
1613
1614	bridge = mlx5_esw_bridge_lookup(br_netdev, br_offloads);
1615	if (IS_ERR(bridge)) {
1616		NL_SET_ERR_MSG_MOD(extack, "Error checking for existing bridge with same ifindex");
1617		return PTR_ERR(bridge);
1618	}
1619
1620	err = mlx5_esw_bridge_vport_init(vport_num, esw_owner_vhca_id, flags, br_offloads, bridge);
1621	if (err) {
1622		NL_SET_ERR_MSG_MOD(extack, "Error initializing port");
1623		goto err_vport;
1624	}
1625	return 0;
1626
1627err_vport:
1628	mlx5_esw_bridge_put(br_offloads, bridge);
1629	return err;
1630}
1631
1632int mlx5_esw_bridge_vport_link(struct net_device *br_netdev, u16 vport_num, u16 esw_owner_vhca_id,
1633			       struct mlx5_esw_bridge_offloads *br_offloads,
1634			       struct netlink_ext_ack *extack)
1635{
1636	return mlx5_esw_bridge_vport_link_with_flags(br_netdev, vport_num, esw_owner_vhca_id, 0,
1637						     br_offloads, extack);
1638}
1639
1640int mlx5_esw_bridge_vport_unlink(struct net_device *br_netdev, u16 vport_num,
1641				 u16 esw_owner_vhca_id,
1642				 struct mlx5_esw_bridge_offloads *br_offloads,
1643				 struct netlink_ext_ack *extack)
1644{
1645	struct mlx5_esw_bridge_port *port;
1646	int err;
1647
1648	port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1649	if (!port) {
1650		NL_SET_ERR_MSG_MOD(extack, "Port is not attached to any bridge");
1651		return -EINVAL;
1652	}
1653	if (port->bridge->ifindex != br_netdev->ifindex) {
1654		NL_SET_ERR_MSG_MOD(extack, "Port is attached to another bridge");
1655		return -EINVAL;
1656	}
1657
1658	err = mlx5_esw_bridge_vport_cleanup(br_offloads, port);
1659	if (err)
1660		NL_SET_ERR_MSG_MOD(extack, "Port cleanup failed");
1661	return err;
1662}
1663
1664int mlx5_esw_bridge_vport_peer_link(struct net_device *br_netdev, u16 vport_num,
1665				    u16 esw_owner_vhca_id,
1666				    struct mlx5_esw_bridge_offloads *br_offloads,
1667				    struct netlink_ext_ack *extack)
1668{
1669	if (!MLX5_CAP_ESW(br_offloads->esw->dev, merged_eswitch))
1670		return 0;
1671
1672	return mlx5_esw_bridge_vport_link_with_flags(br_netdev, vport_num, esw_owner_vhca_id,
1673						     MLX5_ESW_BRIDGE_PORT_FLAG_PEER,
1674						     br_offloads, extack);
1675}
1676
1677int mlx5_esw_bridge_vport_peer_unlink(struct net_device *br_netdev, u16 vport_num,
1678				      u16 esw_owner_vhca_id,
1679				      struct mlx5_esw_bridge_offloads *br_offloads,
1680				      struct netlink_ext_ack *extack)
1681{
1682	return mlx5_esw_bridge_vport_unlink(br_netdev, vport_num, esw_owner_vhca_id, br_offloads,
1683					    extack);
1684}
1685
1686int mlx5_esw_bridge_port_vlan_add(u16 vport_num, u16 esw_owner_vhca_id, u16 vid, u16 flags,
1687				  struct mlx5_esw_bridge_offloads *br_offloads,
1688				  struct netlink_ext_ack *extack)
1689{
1690	struct mlx5_esw_bridge_port *port;
1691	struct mlx5_esw_bridge_vlan *vlan;
1692
1693	port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1694	if (!port)
1695		return -EINVAL;
1696
1697	vlan = mlx5_esw_bridge_vlan_lookup(vid, port);
1698	if (vlan) {
1699		if (vlan->flags == flags)
1700			return 0;
1701		mlx5_esw_bridge_vlan_cleanup(port, vlan, port->bridge);
1702	}
1703
1704	vlan = mlx5_esw_bridge_vlan_create(port->bridge->vlan_proto, vid, flags, port,
1705					   br_offloads->esw);
1706	if (IS_ERR(vlan)) {
1707		NL_SET_ERR_MSG_MOD(extack, "Failed to create VLAN entry");
1708		return PTR_ERR(vlan);
1709	}
1710	return 0;
1711}
1712
1713void mlx5_esw_bridge_port_vlan_del(u16 vport_num, u16 esw_owner_vhca_id, u16 vid,
1714				   struct mlx5_esw_bridge_offloads *br_offloads)
1715{
1716	struct mlx5_esw_bridge_port *port;
1717	struct mlx5_esw_bridge_vlan *vlan;
1718
1719	port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1720	if (!port)
1721		return;
1722
1723	vlan = mlx5_esw_bridge_vlan_lookup(vid, port);
1724	if (!vlan)
1725		return;
1726	mlx5_esw_bridge_vlan_cleanup(port, vlan, port->bridge);
1727}
1728
1729void mlx5_esw_bridge_fdb_update_used(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
1730				     struct mlx5_esw_bridge_offloads *br_offloads,
1731				     struct switchdev_notifier_fdb_info *fdb_info)
1732{
1733	struct mlx5_esw_bridge_fdb_entry *entry;
1734	struct mlx5_esw_bridge *bridge;
1735
1736	bridge = mlx5_esw_bridge_from_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1737	if (!bridge)
1738		return;
1739
1740	entry = mlx5_esw_bridge_fdb_lookup(bridge, fdb_info->addr, fdb_info->vid);
1741	if (!entry) {
1742		esw_debug(br_offloads->esw->dev,
1743			  "FDB update entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n",
1744			  fdb_info->addr, fdb_info->vid, vport_num);
1745		return;
1746	}
1747
1748	entry->lastuse = jiffies;
1749}
1750
1751void mlx5_esw_bridge_fdb_mark_deleted(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
1752				      struct mlx5_esw_bridge_offloads *br_offloads,
1753				      struct switchdev_notifier_fdb_info *fdb_info)
1754{
1755	struct mlx5_esw_bridge_fdb_entry *entry;
1756	struct mlx5_esw_bridge *bridge;
1757
1758	bridge = mlx5_esw_bridge_from_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1759	if (!bridge)
1760		return;
1761
1762	entry = mlx5_esw_bridge_fdb_lookup(bridge, fdb_info->addr, fdb_info->vid);
1763	if (!entry) {
1764		esw_debug(br_offloads->esw->dev,
1765			  "FDB mark deleted entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n",
1766			  fdb_info->addr, fdb_info->vid, vport_num);
1767		return;
1768	}
1769
1770	entry->flags |= MLX5_ESW_BRIDGE_FLAG_DELETED;
1771}
1772
1773void mlx5_esw_bridge_fdb_create(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
1774				struct mlx5_esw_bridge_offloads *br_offloads,
1775				struct switchdev_notifier_fdb_info *fdb_info)
1776{
1777	struct mlx5_esw_bridge_fdb_entry *entry;
1778	struct mlx5_esw_bridge_port *port;
1779	struct mlx5_esw_bridge *bridge;
1780
1781	port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1782	if (!port)
1783		return;
1784
1785	bridge = port->bridge;
1786	entry = mlx5_esw_bridge_fdb_entry_init(dev, vport_num, esw_owner_vhca_id, fdb_info->addr,
1787					       fdb_info->vid, fdb_info->added_by_user,
1788					       port->flags & MLX5_ESW_BRIDGE_PORT_FLAG_PEER,
1789					       br_offloads->esw, bridge);
1790	if (IS_ERR(entry))
1791		return;
1792
1793	if (entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER)
1794		mlx5_esw_bridge_fdb_offload_notify(dev, entry->key.addr, entry->key.vid,
1795						   SWITCHDEV_FDB_OFFLOADED);
1796	else if (!(entry->flags & MLX5_ESW_BRIDGE_FLAG_PEER))
1797		/* Take over dynamic entries to prevent kernel bridge from aging them out. */
1798		mlx5_esw_bridge_fdb_offload_notify(dev, entry->key.addr, entry->key.vid,
1799						   SWITCHDEV_FDB_ADD_TO_BRIDGE);
1800}
1801
1802void mlx5_esw_bridge_fdb_remove(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
1803				struct mlx5_esw_bridge_offloads *br_offloads,
1804				struct switchdev_notifier_fdb_info *fdb_info)
1805{
1806	struct mlx5_eswitch *esw = br_offloads->esw;
1807	struct mlx5_esw_bridge_fdb_entry *entry;
1808	struct mlx5_esw_bridge *bridge;
1809
1810	bridge = mlx5_esw_bridge_from_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1811	if (!bridge)
1812		return;
1813
1814	entry = mlx5_esw_bridge_fdb_lookup(bridge, fdb_info->addr, fdb_info->vid);
1815	if (!entry) {
1816		esw_debug(esw->dev,
1817			  "FDB remove entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n",
1818			  fdb_info->addr, fdb_info->vid, vport_num);
1819		return;
1820	}
1821
1822	mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge);
1823}
1824
1825void mlx5_esw_bridge_update(struct mlx5_esw_bridge_offloads *br_offloads)
1826{
1827	struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
1828	struct mlx5_esw_bridge *bridge;
1829
1830	list_for_each_entry(bridge, &br_offloads->bridges, list) {
1831		list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list) {
1832			unsigned long lastuse =
1833				(unsigned long)mlx5_fc_query_lastuse(entry->ingress_counter);
1834
1835			if (entry->flags & (MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER |
1836					    MLX5_ESW_BRIDGE_FLAG_DELETED))
1837				continue;
1838
1839			if (time_after(lastuse, entry->lastuse))
1840				mlx5_esw_bridge_fdb_entry_refresh(entry);
1841			else if (!(entry->flags & MLX5_ESW_BRIDGE_FLAG_PEER) &&
1842				 time_is_before_jiffies(entry->lastuse + bridge->ageing_time))
1843				mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge);
1844		}
1845	}
1846}
1847
1848int mlx5_esw_bridge_port_mdb_add(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
1849				 const unsigned char *addr, u16 vid,
1850				 struct mlx5_esw_bridge_offloads *br_offloads,
1851				 struct netlink_ext_ack *extack)
1852{
1853	struct mlx5_esw_bridge_vlan *vlan;
1854	struct mlx5_esw_bridge_port *port;
1855	struct mlx5_esw_bridge *bridge;
1856	int err;
1857
1858	port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1859	if (!port) {
1860		esw_warn(br_offloads->esw->dev,
1861			 "Failed to lookup bridge port to add MDB (MAC=%pM,vport=%u)\n",
1862			 addr, vport_num);
1863		NL_SET_ERR_MSG_FMT_MOD(extack,
1864				       "Failed to lookup bridge port to add MDB (MAC=%pM,vport=%u)\n",
1865				       addr, vport_num);
1866		return -EINVAL;
1867	}
1868
1869	bridge = port->bridge;
1870	if (bridge->flags & MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG && vid) {
1871		vlan = mlx5_esw_bridge_vlan_lookup(vid, port);
1872		if (!vlan) {
1873			esw_warn(br_offloads->esw->dev,
1874				 "Failed to lookup bridge port vlan metadata to create MDB (MAC=%pM,vid=%u,vport=%u)\n",
1875				 addr, vid, vport_num);
1876			NL_SET_ERR_MSG_FMT_MOD(extack,
1877					       "Failed to lookup bridge port vlan metadata to create MDB (MAC=%pM,vid=%u,vport=%u)\n",
1878					       addr, vid, vport_num);
1879			return -EINVAL;
1880		}
1881	}
1882
1883	err = mlx5_esw_bridge_port_mdb_attach(dev, port, addr, vid);
1884	if (err) {
1885		NL_SET_ERR_MSG_FMT_MOD(extack, "Failed to add MDB (MAC=%pM,vid=%u,vport=%u)\n",
1886				       addr, vid, vport_num);
1887		return err;
1888	}
1889
1890	return 0;
1891}
1892
1893void mlx5_esw_bridge_port_mdb_del(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
1894				  const unsigned char *addr, u16 vid,
1895				  struct mlx5_esw_bridge_offloads *br_offloads)
1896{
1897	struct mlx5_esw_bridge_port *port;
1898
1899	port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
1900	if (!port)
1901		return;
1902
1903	mlx5_esw_bridge_port_mdb_detach(dev, port, addr, vid);
1904}
1905
1906static void mlx5_esw_bridge_flush(struct mlx5_esw_bridge_offloads *br_offloads)
1907{
1908	struct mlx5_esw_bridge_port *port;
1909	unsigned long i;
1910
1911	xa_for_each(&br_offloads->ports, i, port)
1912		mlx5_esw_bridge_vport_cleanup(br_offloads, port);
1913
1914	WARN_ONCE(!list_empty(&br_offloads->bridges),
1915		  "Cleaning up bridge offloads while still having bridges attached\n");
1916}
1917
1918struct mlx5_esw_bridge_offloads *mlx5_esw_bridge_init(struct mlx5_eswitch *esw)
1919{
1920	struct mlx5_esw_bridge_offloads *br_offloads;
1921
1922	ASSERT_RTNL();
1923
1924	br_offloads = kvzalloc(sizeof(*br_offloads), GFP_KERNEL);
1925	if (!br_offloads)
1926		return ERR_PTR(-ENOMEM);
1927
1928	INIT_LIST_HEAD(&br_offloads->bridges);
1929	xa_init(&br_offloads->ports);
1930	br_offloads->esw = esw;
1931	esw->br_offloads = br_offloads;
1932	mlx5_esw_bridge_debugfs_offloads_init(br_offloads);
1933
1934	return br_offloads;
1935}
1936
1937void mlx5_esw_bridge_cleanup(struct mlx5_eswitch *esw)
1938{
1939	struct mlx5_esw_bridge_offloads *br_offloads = esw->br_offloads;
1940
1941	ASSERT_RTNL();
1942
1943	if (!br_offloads)
1944		return;
1945
1946	mlx5_esw_bridge_flush(br_offloads);
1947	WARN_ON(!xa_empty(&br_offloads->ports));
1948	mlx5_esw_bridge_debugfs_offloads_cleanup(br_offloads);
1949
1950	esw->br_offloads = NULL;
1951	kvfree(br_offloads);
1952}
1953