1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
3
4#include "lib/devcom.h"
5#include "bridge.h"
6#include "eswitch.h"
7#include "bridge_priv.h"
8#include "diag/bridge_tracepoint.h"
9
10static const struct rhashtable_params mdb_ht_params = {
11	.key_offset = offsetof(struct mlx5_esw_bridge_mdb_entry, key),
12	.key_len = sizeof(struct mlx5_esw_bridge_mdb_key),
13	.head_offset = offsetof(struct mlx5_esw_bridge_mdb_entry, ht_node),
14	.automatic_shrinking = true,
15};
16
17int mlx5_esw_bridge_mdb_init(struct mlx5_esw_bridge *bridge)
18{
19	INIT_LIST_HEAD(&bridge->mdb_list);
20	return rhashtable_init(&bridge->mdb_ht, &mdb_ht_params);
21}
22
23void mlx5_esw_bridge_mdb_cleanup(struct mlx5_esw_bridge *bridge)
24{
25	rhashtable_destroy(&bridge->mdb_ht);
26}
27
28static struct mlx5_esw_bridge_port *
29mlx5_esw_bridge_mdb_port_lookup(struct mlx5_esw_bridge_port *port,
30				struct mlx5_esw_bridge_mdb_entry *entry)
31{
32	return xa_load(&entry->ports, mlx5_esw_bridge_port_key(port));
33}
34
35static int mlx5_esw_bridge_mdb_port_insert(struct mlx5_esw_bridge_port *port,
36					   struct mlx5_esw_bridge_mdb_entry *entry)
37{
38	int err = xa_insert(&entry->ports, mlx5_esw_bridge_port_key(port), port, GFP_KERNEL);
39
40	if (!err)
41		entry->num_ports++;
42	return err;
43}
44
45static void mlx5_esw_bridge_mdb_port_remove(struct mlx5_esw_bridge_port *port,
46					    struct mlx5_esw_bridge_mdb_entry *entry)
47{
48	xa_erase(&entry->ports, mlx5_esw_bridge_port_key(port));
49	entry->num_ports--;
50}
51
52static struct mlx5_flow_handle *
53mlx5_esw_bridge_mdb_flow_create(u16 esw_owner_vhca_id, struct mlx5_esw_bridge_mdb_entry *entry,
54				struct mlx5_esw_bridge *bridge)
55{
56	struct mlx5_flow_act flow_act = {
57		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
58		.flags = FLOW_ACT_NO_APPEND | FLOW_ACT_IGNORE_FLOW_LEVEL,
59	};
60	int num_dests = entry->num_ports, i = 0;
61	struct mlx5_flow_destination *dests;
62	struct mlx5_esw_bridge_port *port;
63	struct mlx5_flow_spec *rule_spec;
64	struct mlx5_flow_handle *handle;
65	u8 *dmac_v, *dmac_c;
66	unsigned long idx;
67
68	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
69	if (!rule_spec)
70		return ERR_PTR(-ENOMEM);
71
72	dests = kvcalloc(num_dests, sizeof(*dests), GFP_KERNEL);
73	if (!dests) {
74		kvfree(rule_spec);
75		return ERR_PTR(-ENOMEM);
76	}
77
78	xa_for_each(&entry->ports, idx, port) {
79		dests[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
80		dests[i].ft = port->mcast.ft;
81		if (port->vport_num == MLX5_VPORT_UPLINK)
82			dests[i].ft->flags |= MLX5_FLOW_TABLE_UPLINK_VPORT;
83		i++;
84	}
85
86	rule_spec->flow_context.flags |= FLOW_CONTEXT_UPLINK_HAIRPIN_EN;
87	rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
88	dmac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value, outer_headers.dmac_47_16);
89	ether_addr_copy(dmac_v, entry->key.addr);
90	dmac_c = MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria, outer_headers.dmac_47_16);
91	eth_broadcast_addr(dmac_c);
92
93	if (entry->key.vid) {
94		if (bridge->vlan_proto == ETH_P_8021Q) {
95			MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
96					 outer_headers.cvlan_tag);
97			MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
98					 outer_headers.cvlan_tag);
99		} else if (bridge->vlan_proto == ETH_P_8021AD) {
100			MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
101					 outer_headers.svlan_tag);
102			MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
103					 outer_headers.svlan_tag);
104		}
105		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
106				 outer_headers.first_vid);
107		MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.first_vid,
108			 entry->key.vid);
109	}
110
111	handle = mlx5_add_flow_rules(bridge->egress_ft, rule_spec, &flow_act, dests, num_dests);
112
113	kvfree(dests);
114	kvfree(rule_spec);
115	return handle;
116}
117
118static int
119mlx5_esw_bridge_port_mdb_offload(struct mlx5_esw_bridge_port *port,
120				 struct mlx5_esw_bridge_mdb_entry *entry)
121{
122	struct mlx5_flow_handle *handle;
123
124	handle = mlx5_esw_bridge_mdb_flow_create(port->esw_owner_vhca_id, entry, port->bridge);
125	if (entry->egress_handle) {
126		mlx5_del_flow_rules(entry->egress_handle);
127		entry->egress_handle = NULL;
128	}
129	if (IS_ERR(handle))
130		return PTR_ERR(handle);
131
132	entry->egress_handle = handle;
133	return 0;
134}
135
136static struct mlx5_esw_bridge_mdb_entry *
137mlx5_esw_bridge_mdb_lookup(struct mlx5_esw_bridge *bridge,
138			   const unsigned char *addr, u16 vid)
139{
140	struct mlx5_esw_bridge_mdb_key key = {};
141
142	ether_addr_copy(key.addr, addr);
143	key.vid = vid;
144	return rhashtable_lookup_fast(&bridge->mdb_ht, &key, mdb_ht_params);
145}
146
147static struct mlx5_esw_bridge_mdb_entry *
148mlx5_esw_bridge_port_mdb_entry_init(struct mlx5_esw_bridge_port *port,
149				    const unsigned char *addr, u16 vid)
150{
151	struct mlx5_esw_bridge *bridge = port->bridge;
152	struct mlx5_esw_bridge_mdb_entry *entry;
153	int err;
154
155	entry = kvzalloc(sizeof(*entry), GFP_KERNEL);
156	if (!entry)
157		return ERR_PTR(-ENOMEM);
158
159	ether_addr_copy(entry->key.addr, addr);
160	entry->key.vid = vid;
161	xa_init(&entry->ports);
162	err = rhashtable_insert_fast(&bridge->mdb_ht, &entry->ht_node, mdb_ht_params);
163	if (err)
164		goto err_ht_insert;
165
166	list_add(&entry->list, &bridge->mdb_list);
167
168	return entry;
169
170err_ht_insert:
171	xa_destroy(&entry->ports);
172	kvfree(entry);
173	return ERR_PTR(err);
174}
175
176static void mlx5_esw_bridge_port_mdb_entry_cleanup(struct mlx5_esw_bridge *bridge,
177						   struct mlx5_esw_bridge_mdb_entry *entry)
178{
179	if (entry->egress_handle)
180		mlx5_del_flow_rules(entry->egress_handle);
181	list_del(&entry->list);
182	rhashtable_remove_fast(&bridge->mdb_ht, &entry->ht_node, mdb_ht_params);
183	xa_destroy(&entry->ports);
184	kvfree(entry);
185}
186
187int mlx5_esw_bridge_port_mdb_attach(struct net_device *dev, struct mlx5_esw_bridge_port *port,
188				    const unsigned char *addr, u16 vid)
189{
190	struct mlx5_esw_bridge *bridge = port->bridge;
191	struct mlx5_esw_bridge_mdb_entry *entry;
192	int err;
193
194	if (!(bridge->flags & MLX5_ESW_BRIDGE_MCAST_FLAG))
195		return -EOPNOTSUPP;
196
197	entry = mlx5_esw_bridge_mdb_lookup(bridge, addr, vid);
198	if (entry) {
199		if (mlx5_esw_bridge_mdb_port_lookup(port, entry)) {
200			esw_warn(bridge->br_offloads->esw->dev, "MDB attach entry is already attached to port (MAC=%pM,vid=%u,vport=%u)\n",
201				 addr, vid, port->vport_num);
202			return 0;
203		}
204	} else {
205		entry = mlx5_esw_bridge_port_mdb_entry_init(port, addr, vid);
206		if (IS_ERR(entry)) {
207			err = PTR_ERR(entry);
208			esw_warn(bridge->br_offloads->esw->dev, "MDB attach failed to init entry (MAC=%pM,vid=%u,vport=%u,err=%d)\n",
209				 addr, vid, port->vport_num, err);
210			return err;
211		}
212	}
213
214	err = mlx5_esw_bridge_mdb_port_insert(port, entry);
215	if (err) {
216		if (!entry->num_ports)
217			mlx5_esw_bridge_port_mdb_entry_cleanup(bridge, entry); /* new mdb entry */
218		esw_warn(bridge->br_offloads->esw->dev,
219			 "MDB attach failed to insert port (MAC=%pM,vid=%u,vport=%u,err=%d)\n",
220			 addr, vid, port->vport_num, err);
221		return err;
222	}
223
224	err = mlx5_esw_bridge_port_mdb_offload(port, entry);
225	if (err)
226		/* Single mdb can be used by multiple ports, so just log the
227		 * error and continue.
228		 */
229		esw_warn(bridge->br_offloads->esw->dev, "MDB attach failed to offload (MAC=%pM,vid=%u,vport=%u,err=%d)\n",
230			 addr, vid, port->vport_num, err);
231
232	trace_mlx5_esw_bridge_port_mdb_attach(dev, entry);
233	return 0;
234}
235
236static void mlx5_esw_bridge_port_mdb_entry_detach(struct mlx5_esw_bridge_port *port,
237						  struct mlx5_esw_bridge_mdb_entry *entry)
238{
239	struct mlx5_esw_bridge *bridge = port->bridge;
240	int err;
241
242	mlx5_esw_bridge_mdb_port_remove(port, entry);
243	if (!entry->num_ports) {
244		mlx5_esw_bridge_port_mdb_entry_cleanup(bridge, entry);
245		return;
246	}
247
248	err = mlx5_esw_bridge_port_mdb_offload(port, entry);
249	if (err)
250		/* Single mdb can be used by multiple ports, so just log the
251		 * error and continue.
252		 */
253		esw_warn(bridge->br_offloads->esw->dev, "MDB detach failed to offload (MAC=%pM,vid=%u,vport=%u)\n",
254			 entry->key.addr, entry->key.vid, port->vport_num);
255}
256
257void mlx5_esw_bridge_port_mdb_detach(struct net_device *dev, struct mlx5_esw_bridge_port *port,
258				     const unsigned char *addr, u16 vid)
259{
260	struct mlx5_esw_bridge *bridge = port->bridge;
261	struct mlx5_esw_bridge_mdb_entry *entry;
262
263	entry = mlx5_esw_bridge_mdb_lookup(bridge, addr, vid);
264	if (!entry) {
265		esw_debug(bridge->br_offloads->esw->dev,
266			  "MDB detach entry not found (MAC=%pM,vid=%u,vport=%u)\n",
267			  addr, vid, port->vport_num);
268		return;
269	}
270
271	if (!mlx5_esw_bridge_mdb_port_lookup(port, entry)) {
272		esw_debug(bridge->br_offloads->esw->dev,
273			  "MDB detach entry not attached to the port (MAC=%pM,vid=%u,vport=%u)\n",
274			  addr, vid, port->vport_num);
275		return;
276	}
277
278	trace_mlx5_esw_bridge_port_mdb_detach(dev, entry);
279	mlx5_esw_bridge_port_mdb_entry_detach(port, entry);
280}
281
282void mlx5_esw_bridge_port_mdb_vlan_flush(struct mlx5_esw_bridge_port *port,
283					 struct mlx5_esw_bridge_vlan *vlan)
284{
285	struct mlx5_esw_bridge *bridge = port->bridge;
286	struct mlx5_esw_bridge_mdb_entry *entry, *tmp;
287
288	list_for_each_entry_safe(entry, tmp, &bridge->mdb_list, list)
289		if (entry->key.vid == vlan->vid && mlx5_esw_bridge_mdb_port_lookup(port, entry))
290			mlx5_esw_bridge_port_mdb_entry_detach(port, entry);
291}
292
293static void mlx5_esw_bridge_port_mdb_flush(struct mlx5_esw_bridge_port *port)
294{
295	struct mlx5_esw_bridge *bridge = port->bridge;
296	struct mlx5_esw_bridge_mdb_entry *entry, *tmp;
297
298	list_for_each_entry_safe(entry, tmp, &bridge->mdb_list, list)
299		if (mlx5_esw_bridge_mdb_port_lookup(port, entry))
300			mlx5_esw_bridge_port_mdb_entry_detach(port, entry);
301}
302
303void mlx5_esw_bridge_mdb_flush(struct mlx5_esw_bridge *bridge)
304{
305	struct mlx5_esw_bridge_mdb_entry *entry, *tmp;
306
307	list_for_each_entry_safe(entry, tmp, &bridge->mdb_list, list)
308		mlx5_esw_bridge_port_mdb_entry_cleanup(bridge, entry);
309}
310static int mlx5_esw_bridge_port_mcast_fts_init(struct mlx5_esw_bridge_port *port,
311					       struct mlx5_esw_bridge *bridge)
312{
313	struct mlx5_eswitch *esw = bridge->br_offloads->esw;
314	struct mlx5_flow_table *mcast_ft;
315
316	mcast_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_MCAST_TABLE_SIZE,
317						MLX5_ESW_BRIDGE_LEVEL_MCAST_TABLE,
318						esw);
319	if (IS_ERR(mcast_ft))
320		return PTR_ERR(mcast_ft);
321
322	port->mcast.ft = mcast_ft;
323	return 0;
324}
325
326static void mlx5_esw_bridge_port_mcast_fts_cleanup(struct mlx5_esw_bridge_port *port)
327{
328	if (port->mcast.ft)
329		mlx5_destroy_flow_table(port->mcast.ft);
330	port->mcast.ft = NULL;
331}
332
333static struct mlx5_flow_group *
334mlx5_esw_bridge_mcast_filter_fg_create(struct mlx5_eswitch *esw,
335				       struct mlx5_flow_table *mcast_ft)
336{
337	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
338	struct mlx5_flow_group *fg;
339	u32 *in, *match;
340
341	in = kvzalloc(inlen, GFP_KERNEL);
342	if (!in)
343		return ERR_PTR(-ENOMEM);
344
345	MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_MISC_PARAMETERS_2);
346	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
347
348	MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
349		 mlx5_eswitch_get_vport_metadata_mask());
350
351	MLX5_SET(create_flow_group_in, in, start_flow_index,
352		 MLX5_ESW_BRIDGE_MCAST_TABLE_FILTER_GRP_IDX_FROM);
353	MLX5_SET(create_flow_group_in, in, end_flow_index,
354		 MLX5_ESW_BRIDGE_MCAST_TABLE_FILTER_GRP_IDX_TO);
355
356	fg = mlx5_create_flow_group(mcast_ft, in);
357	kvfree(in);
358	if (IS_ERR(fg))
359		esw_warn(esw->dev,
360			 "Failed to create filter flow group for bridge mcast table (err=%pe)\n",
361			 fg);
362
363	return fg;
364}
365
366static struct mlx5_flow_group *
367mlx5_esw_bridge_mcast_vlan_proto_fg_create(unsigned int from, unsigned int to, u16 vlan_proto,
368					   struct mlx5_eswitch *esw,
369					   struct mlx5_flow_table *mcast_ft)
370{
371	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
372	struct mlx5_flow_group *fg;
373	u32 *in, *match;
374
375	in = kvzalloc(inlen, GFP_KERNEL);
376	if (!in)
377		return ERR_PTR(-ENOMEM);
378
379	MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
380	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
381
382	if (vlan_proto == ETH_P_8021Q)
383		MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
384	else if (vlan_proto == ETH_P_8021AD)
385		MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.svlan_tag);
386	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.first_vid);
387
388	MLX5_SET(create_flow_group_in, in, start_flow_index, from);
389	MLX5_SET(create_flow_group_in, in, end_flow_index, to);
390
391	fg = mlx5_create_flow_group(mcast_ft, in);
392	kvfree(in);
393	if (IS_ERR(fg))
394		esw_warn(esw->dev,
395			 "Failed to create VLAN(proto=%x) flow group for bridge mcast table (err=%pe)\n",
396			 vlan_proto, fg);
397
398	return fg;
399}
400
401static struct mlx5_flow_group *
402mlx5_esw_bridge_mcast_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *mcast_ft)
403{
404	unsigned int from = MLX5_ESW_BRIDGE_MCAST_TABLE_VLAN_GRP_IDX_FROM;
405	unsigned int to = MLX5_ESW_BRIDGE_MCAST_TABLE_VLAN_GRP_IDX_TO;
406
407	return mlx5_esw_bridge_mcast_vlan_proto_fg_create(from, to, ETH_P_8021Q, esw, mcast_ft);
408}
409
410static struct mlx5_flow_group *
411mlx5_esw_bridge_mcast_qinq_fg_create(struct mlx5_eswitch *esw,
412				     struct mlx5_flow_table *mcast_ft)
413{
414	unsigned int from = MLX5_ESW_BRIDGE_MCAST_TABLE_QINQ_GRP_IDX_FROM;
415	unsigned int to = MLX5_ESW_BRIDGE_MCAST_TABLE_QINQ_GRP_IDX_TO;
416
417	return mlx5_esw_bridge_mcast_vlan_proto_fg_create(from, to, ETH_P_8021AD, esw, mcast_ft);
418}
419
420static struct mlx5_flow_group *
421mlx5_esw_bridge_mcast_fwd_fg_create(struct mlx5_eswitch *esw,
422				    struct mlx5_flow_table *mcast_ft)
423{
424	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
425	struct mlx5_flow_group *fg;
426	u32 *in;
427
428	in = kvzalloc(inlen, GFP_KERNEL);
429	if (!in)
430		return ERR_PTR(-ENOMEM);
431
432	MLX5_SET(create_flow_group_in, in, start_flow_index,
433		 MLX5_ESW_BRIDGE_MCAST_TABLE_FWD_GRP_IDX_FROM);
434	MLX5_SET(create_flow_group_in, in, end_flow_index,
435		 MLX5_ESW_BRIDGE_MCAST_TABLE_FWD_GRP_IDX_TO);
436
437	fg = mlx5_create_flow_group(mcast_ft, in);
438	kvfree(in);
439	if (IS_ERR(fg))
440		esw_warn(esw->dev,
441			 "Failed to create forward flow group for bridge mcast table (err=%pe)\n",
442			 fg);
443
444	return fg;
445}
446
447static int mlx5_esw_bridge_port_mcast_fgs_init(struct mlx5_esw_bridge_port *port)
448{
449	struct mlx5_flow_group *fwd_fg, *qinq_fg, *vlan_fg, *filter_fg;
450	struct mlx5_eswitch *esw = port->bridge->br_offloads->esw;
451	struct mlx5_flow_table *mcast_ft = port->mcast.ft;
452	int err;
453
454	filter_fg = mlx5_esw_bridge_mcast_filter_fg_create(esw, mcast_ft);
455	if (IS_ERR(filter_fg))
456		return PTR_ERR(filter_fg);
457
458	vlan_fg = mlx5_esw_bridge_mcast_vlan_fg_create(esw, mcast_ft);
459	if (IS_ERR(vlan_fg)) {
460		err = PTR_ERR(vlan_fg);
461		goto err_vlan_fg;
462	}
463
464	qinq_fg = mlx5_esw_bridge_mcast_qinq_fg_create(esw, mcast_ft);
465	if (IS_ERR(qinq_fg)) {
466		err = PTR_ERR(qinq_fg);
467		goto err_qinq_fg;
468	}
469
470	fwd_fg = mlx5_esw_bridge_mcast_fwd_fg_create(esw, mcast_ft);
471	if (IS_ERR(fwd_fg)) {
472		err = PTR_ERR(fwd_fg);
473		goto err_fwd_fg;
474	}
475
476	port->mcast.filter_fg = filter_fg;
477	port->mcast.vlan_fg = vlan_fg;
478	port->mcast.qinq_fg = qinq_fg;
479	port->mcast.fwd_fg = fwd_fg;
480
481	return 0;
482
483err_fwd_fg:
484	mlx5_destroy_flow_group(qinq_fg);
485err_qinq_fg:
486	mlx5_destroy_flow_group(vlan_fg);
487err_vlan_fg:
488	mlx5_destroy_flow_group(filter_fg);
489	return err;
490}
491
492static void mlx5_esw_bridge_port_mcast_fgs_cleanup(struct mlx5_esw_bridge_port *port)
493{
494	if (port->mcast.fwd_fg)
495		mlx5_destroy_flow_group(port->mcast.fwd_fg);
496	port->mcast.fwd_fg = NULL;
497	if (port->mcast.qinq_fg)
498		mlx5_destroy_flow_group(port->mcast.qinq_fg);
499	port->mcast.qinq_fg = NULL;
500	if (port->mcast.vlan_fg)
501		mlx5_destroy_flow_group(port->mcast.vlan_fg);
502	port->mcast.vlan_fg = NULL;
503	if (port->mcast.filter_fg)
504		mlx5_destroy_flow_group(port->mcast.filter_fg);
505	port->mcast.filter_fg = NULL;
506}
507
508static struct mlx5_flow_handle *
509mlx5_esw_bridge_mcast_flow_with_esw_create(struct mlx5_esw_bridge_port *port,
510					   struct mlx5_eswitch *esw)
511{
512	struct mlx5_flow_act flow_act = {
513		.action = MLX5_FLOW_CONTEXT_ACTION_DROP,
514		.flags = FLOW_ACT_NO_APPEND,
515	};
516	struct mlx5_flow_spec *rule_spec;
517	struct mlx5_flow_handle *handle;
518
519	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
520	if (!rule_spec)
521		return ERR_PTR(-ENOMEM);
522
523	rule_spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
524
525	MLX5_SET(fte_match_param, rule_spec->match_criteria,
526		 misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
527	MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_0,
528		 mlx5_eswitch_get_vport_metadata_for_match(esw, port->vport_num));
529
530	handle = mlx5_add_flow_rules(port->mcast.ft, rule_spec, &flow_act, NULL, 0);
531
532	kvfree(rule_spec);
533	return handle;
534}
535
536static struct mlx5_flow_handle *
537mlx5_esw_bridge_mcast_filter_flow_create(struct mlx5_esw_bridge_port *port)
538{
539	return mlx5_esw_bridge_mcast_flow_with_esw_create(port, port->bridge->br_offloads->esw);
540}
541
542static struct mlx5_flow_handle *
543mlx5_esw_bridge_mcast_filter_flow_peer_create(struct mlx5_esw_bridge_port *port)
544{
545	struct mlx5_devcom_comp_dev *devcom = port->bridge->br_offloads->esw->devcom, *pos;
546	struct mlx5_eswitch *tmp, *peer_esw = NULL;
547	static struct mlx5_flow_handle *handle;
548
549	if (!mlx5_devcom_for_each_peer_begin(devcom))
550		return ERR_PTR(-ENODEV);
551
552	mlx5_devcom_for_each_peer_entry(devcom, tmp, pos) {
553		if (mlx5_esw_is_owner(tmp, port->vport_num, port->esw_owner_vhca_id)) {
554			peer_esw = tmp;
555			break;
556		}
557	}
558
559	if (!peer_esw) {
560		handle = ERR_PTR(-ENODEV);
561		goto out;
562	}
563
564	handle = mlx5_esw_bridge_mcast_flow_with_esw_create(port, peer_esw);
565
566out:
567	mlx5_devcom_for_each_peer_end(devcom);
568	return handle;
569}
570
571static struct mlx5_flow_handle *
572mlx5_esw_bridge_mcast_vlan_flow_create(u16 vlan_proto, struct mlx5_esw_bridge_port *port,
573				       struct mlx5_esw_bridge_vlan *vlan)
574{
575	struct mlx5_flow_act flow_act = {
576		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
577		.flags = FLOW_ACT_NO_APPEND,
578	};
579	struct mlx5_flow_destination dest = {
580		.type = MLX5_FLOW_DESTINATION_TYPE_VPORT,
581		.vport.num = port->vport_num,
582	};
583	struct mlx5_esw_bridge *bridge = port->bridge;
584	struct mlx5_flow_spec *rule_spec;
585	struct mlx5_flow_handle *handle;
586
587	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
588	if (!rule_spec)
589		return ERR_PTR(-ENOMEM);
590
591	rule_spec->flow_context.flags |= FLOW_CONTEXT_UPLINK_HAIRPIN_EN;
592	rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
593
594	flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
595	flow_act.pkt_reformat = vlan->pkt_reformat_pop;
596
597	if (vlan_proto == ETH_P_8021Q) {
598		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
599				 outer_headers.cvlan_tag);
600		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
601				 outer_headers.cvlan_tag);
602	} else if (vlan_proto == ETH_P_8021AD) {
603		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
604				 outer_headers.svlan_tag);
605		MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
606				 outer_headers.svlan_tag);
607	}
608	MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, outer_headers.first_vid);
609	MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.first_vid, vlan->vid);
610
611	if (MLX5_CAP_ESW(bridge->br_offloads->esw->dev, merged_eswitch)) {
612		dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
613		dest.vport.vhca_id = port->esw_owner_vhca_id;
614	}
615	handle = mlx5_add_flow_rules(port->mcast.ft, rule_spec, &flow_act, &dest, 1);
616
617	kvfree(rule_spec);
618	return handle;
619}
620
621int mlx5_esw_bridge_vlan_mcast_init(u16 vlan_proto, struct mlx5_esw_bridge_port *port,
622				    struct mlx5_esw_bridge_vlan *vlan)
623{
624	struct mlx5_flow_handle *handle;
625
626	if (!(port->bridge->flags & MLX5_ESW_BRIDGE_MCAST_FLAG))
627		return 0;
628
629	handle = mlx5_esw_bridge_mcast_vlan_flow_create(vlan_proto, port, vlan);
630	if (IS_ERR(handle))
631		return PTR_ERR(handle);
632
633	vlan->mcast_handle = handle;
634	return 0;
635}
636
637void mlx5_esw_bridge_vlan_mcast_cleanup(struct mlx5_esw_bridge_vlan *vlan)
638{
639	if (vlan->mcast_handle)
640		mlx5_del_flow_rules(vlan->mcast_handle);
641	vlan->mcast_handle = NULL;
642}
643
644static struct mlx5_flow_handle *
645mlx5_esw_bridge_mcast_fwd_flow_create(struct mlx5_esw_bridge_port *port)
646{
647	struct mlx5_flow_act flow_act = {
648		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
649		.flags = FLOW_ACT_NO_APPEND,
650	};
651	struct mlx5_flow_destination dest = {
652		.type = MLX5_FLOW_DESTINATION_TYPE_VPORT,
653		.vport.num = port->vport_num,
654	};
655	struct mlx5_esw_bridge *bridge = port->bridge;
656	struct mlx5_flow_spec *rule_spec;
657	struct mlx5_flow_handle *handle;
658
659	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
660	if (!rule_spec)
661		return ERR_PTR(-ENOMEM);
662
663	if (MLX5_CAP_ESW(bridge->br_offloads->esw->dev, merged_eswitch)) {
664		dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
665		dest.vport.vhca_id = port->esw_owner_vhca_id;
666	}
667	rule_spec->flow_context.flags |= FLOW_CONTEXT_UPLINK_HAIRPIN_EN;
668	handle = mlx5_add_flow_rules(port->mcast.ft, rule_spec, &flow_act, &dest, 1);
669
670	kvfree(rule_spec);
671	return handle;
672}
673
674static int mlx5_esw_bridge_port_mcast_fhs_init(struct mlx5_esw_bridge_port *port)
675{
676	struct mlx5_flow_handle *filter_handle, *fwd_handle;
677	struct mlx5_esw_bridge_vlan *vlan, *failed;
678	unsigned long index;
679	int err;
680
681
682	filter_handle = (port->flags & MLX5_ESW_BRIDGE_PORT_FLAG_PEER) ?
683		mlx5_esw_bridge_mcast_filter_flow_peer_create(port) :
684		mlx5_esw_bridge_mcast_filter_flow_create(port);
685	if (IS_ERR(filter_handle))
686		return PTR_ERR(filter_handle);
687
688	fwd_handle = mlx5_esw_bridge_mcast_fwd_flow_create(port);
689	if (IS_ERR(fwd_handle)) {
690		err = PTR_ERR(fwd_handle);
691		goto err_fwd;
692	}
693
694	xa_for_each(&port->vlans, index, vlan) {
695		err = mlx5_esw_bridge_vlan_mcast_init(port->bridge->vlan_proto, port, vlan);
696		if (err) {
697			failed = vlan;
698			goto err_vlan;
699		}
700	}
701
702	port->mcast.filter_handle = filter_handle;
703	port->mcast.fwd_handle = fwd_handle;
704
705	return 0;
706
707err_vlan:
708	xa_for_each(&port->vlans, index, vlan) {
709		if (vlan == failed)
710			break;
711
712		mlx5_esw_bridge_vlan_mcast_cleanup(vlan);
713	}
714	mlx5_del_flow_rules(fwd_handle);
715err_fwd:
716	mlx5_del_flow_rules(filter_handle);
717	return err;
718}
719
720static void mlx5_esw_bridge_port_mcast_fhs_cleanup(struct mlx5_esw_bridge_port *port)
721{
722	struct mlx5_esw_bridge_vlan *vlan;
723	unsigned long index;
724
725	xa_for_each(&port->vlans, index, vlan)
726		mlx5_esw_bridge_vlan_mcast_cleanup(vlan);
727
728	if (port->mcast.fwd_handle)
729		mlx5_del_flow_rules(port->mcast.fwd_handle);
730	port->mcast.fwd_handle = NULL;
731	if (port->mcast.filter_handle)
732		mlx5_del_flow_rules(port->mcast.filter_handle);
733	port->mcast.filter_handle = NULL;
734}
735
736int mlx5_esw_bridge_port_mcast_init(struct mlx5_esw_bridge_port *port)
737{
738	struct mlx5_esw_bridge *bridge = port->bridge;
739	int err;
740
741	if (!(bridge->flags & MLX5_ESW_BRIDGE_MCAST_FLAG))
742		return 0;
743
744	err = mlx5_esw_bridge_port_mcast_fts_init(port, bridge);
745	if (err)
746		return err;
747
748	err = mlx5_esw_bridge_port_mcast_fgs_init(port);
749	if (err)
750		goto err_fgs;
751
752	err = mlx5_esw_bridge_port_mcast_fhs_init(port);
753	if (err)
754		goto err_fhs;
755	return err;
756
757err_fhs:
758	mlx5_esw_bridge_port_mcast_fgs_cleanup(port);
759err_fgs:
760	mlx5_esw_bridge_port_mcast_fts_cleanup(port);
761	return err;
762}
763
764void mlx5_esw_bridge_port_mcast_cleanup(struct mlx5_esw_bridge_port *port)
765{
766	mlx5_esw_bridge_port_mdb_flush(port);
767	mlx5_esw_bridge_port_mcast_fhs_cleanup(port);
768	mlx5_esw_bridge_port_mcast_fgs_cleanup(port);
769	mlx5_esw_bridge_port_mcast_fts_cleanup(port);
770}
771
772static struct mlx5_flow_group *
773mlx5_esw_bridge_ingress_igmp_fg_create(struct mlx5_eswitch *esw,
774				       struct mlx5_flow_table *ingress_ft)
775{
776	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
777	struct mlx5_flow_group *fg;
778	u32 *in, *match;
779
780	in = kvzalloc(inlen, GFP_KERNEL);
781	if (!in)
782		return ERR_PTR(-ENOMEM);
783
784	MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
785	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
786
787	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.ip_version);
788	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.ip_protocol);
789
790	MLX5_SET(create_flow_group_in, in, start_flow_index,
791		 MLX5_ESW_BRIDGE_INGRESS_TABLE_IGMP_GRP_IDX_FROM);
792	MLX5_SET(create_flow_group_in, in, end_flow_index,
793		 MLX5_ESW_BRIDGE_INGRESS_TABLE_IGMP_GRP_IDX_TO);
794
795	fg = mlx5_create_flow_group(ingress_ft, in);
796	kvfree(in);
797	if (IS_ERR(fg))
798		esw_warn(esw->dev,
799			 "Failed to create IGMP flow group for bridge ingress table (err=%pe)\n",
800			 fg);
801
802	return fg;
803}
804
805static struct mlx5_flow_group *
806mlx5_esw_bridge_ingress_mld_fg_create(struct mlx5_eswitch *esw,
807				      struct mlx5_flow_table *ingress_ft)
808{
809	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
810	struct mlx5_flow_group *fg;
811	u32 *in, *match;
812
813	if (!(MLX5_CAP_GEN(esw->dev, flex_parser_protocols) & MLX5_FLEX_PROTO_ICMPV6)) {
814		esw_warn(esw->dev,
815			 "Can't create MLD flow group due to missing hardware ICMPv6 parsing support\n");
816		return NULL;
817	}
818
819	in = kvzalloc(inlen, GFP_KERNEL);
820	if (!in)
821		return ERR_PTR(-ENOMEM);
822
823	MLX5_SET(create_flow_group_in, in, match_criteria_enable,
824		 MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_3);
825	match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
826
827	MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.ip_version);
828	MLX5_SET_TO_ONES(fte_match_param, match, misc_parameters_3.icmpv6_type);
829
830	MLX5_SET(create_flow_group_in, in, start_flow_index,
831		 MLX5_ESW_BRIDGE_INGRESS_TABLE_MLD_GRP_IDX_FROM);
832	MLX5_SET(create_flow_group_in, in, end_flow_index,
833		 MLX5_ESW_BRIDGE_INGRESS_TABLE_MLD_GRP_IDX_TO);
834
835	fg = mlx5_create_flow_group(ingress_ft, in);
836	kvfree(in);
837	if (IS_ERR(fg))
838		esw_warn(esw->dev,
839			 "Failed to create MLD flow group for bridge ingress table (err=%pe)\n",
840			 fg);
841
842	return fg;
843}
844
845static int
846mlx5_esw_bridge_ingress_mcast_fgs_init(struct mlx5_esw_bridge_offloads *br_offloads)
847{
848	struct mlx5_flow_table *ingress_ft = br_offloads->ingress_ft;
849	struct mlx5_eswitch *esw = br_offloads->esw;
850	struct mlx5_flow_group *igmp_fg, *mld_fg;
851
852	igmp_fg = mlx5_esw_bridge_ingress_igmp_fg_create(esw, ingress_ft);
853	if (IS_ERR(igmp_fg))
854		return PTR_ERR(igmp_fg);
855
856	mld_fg = mlx5_esw_bridge_ingress_mld_fg_create(esw, ingress_ft);
857	if (IS_ERR(mld_fg)) {
858		mlx5_destroy_flow_group(igmp_fg);
859		return PTR_ERR(mld_fg);
860	}
861
862	br_offloads->ingress_igmp_fg = igmp_fg;
863	br_offloads->ingress_mld_fg = mld_fg;
864	return 0;
865}
866
867static void
868mlx5_esw_bridge_ingress_mcast_fgs_cleanup(struct mlx5_esw_bridge_offloads *br_offloads)
869{
870	if (br_offloads->ingress_mld_fg)
871		mlx5_destroy_flow_group(br_offloads->ingress_mld_fg);
872	br_offloads->ingress_mld_fg = NULL;
873	if (br_offloads->ingress_igmp_fg)
874		mlx5_destroy_flow_group(br_offloads->ingress_igmp_fg);
875	br_offloads->ingress_igmp_fg = NULL;
876}
877
878static struct mlx5_flow_handle *
879mlx5_esw_bridge_ingress_igmp_fh_create(struct mlx5_flow_table *ingress_ft,
880				       struct mlx5_flow_table *skip_ft)
881{
882	struct mlx5_flow_destination dest = {
883		.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
884		.ft = skip_ft,
885	};
886	struct mlx5_flow_act flow_act = {
887		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
888		.flags = FLOW_ACT_NO_APPEND,
889	};
890	struct mlx5_flow_spec *rule_spec;
891	struct mlx5_flow_handle *handle;
892
893	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
894	if (!rule_spec)
895		return ERR_PTR(-ENOMEM);
896
897	rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
898
899	MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, outer_headers.ip_version);
900	MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.ip_version, 4);
901	MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, outer_headers.ip_protocol);
902	MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.ip_protocol, IPPROTO_IGMP);
903
904	handle = mlx5_add_flow_rules(ingress_ft, rule_spec, &flow_act, &dest, 1);
905
906	kvfree(rule_spec);
907	return handle;
908}
909
910static struct mlx5_flow_handle *
911mlx5_esw_bridge_ingress_mld_fh_create(u8 type, struct mlx5_flow_table *ingress_ft,
912				      struct mlx5_flow_table *skip_ft)
913{
914	struct mlx5_flow_destination dest = {
915		.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
916		.ft = skip_ft,
917	};
918	struct mlx5_flow_act flow_act = {
919		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
920		.flags = FLOW_ACT_NO_APPEND,
921	};
922	struct mlx5_flow_spec *rule_spec;
923	struct mlx5_flow_handle *handle;
924
925	rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
926	if (!rule_spec)
927		return ERR_PTR(-ENOMEM);
928
929	rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_3;
930
931	MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, outer_headers.ip_version);
932	MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.ip_version, 6);
933	MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, misc_parameters_3.icmpv6_type);
934	MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_3.icmpv6_type, type);
935
936	handle = mlx5_add_flow_rules(ingress_ft, rule_spec, &flow_act, &dest, 1);
937
938	kvfree(rule_spec);
939	return handle;
940}
941
942static int
943mlx5_esw_bridge_ingress_mcast_fhs_create(struct mlx5_esw_bridge_offloads *br_offloads)
944{
945	struct mlx5_flow_handle *igmp_handle, *mld_query_handle, *mld_report_handle,
946		*mld_done_handle;
947	struct mlx5_flow_table *ingress_ft = br_offloads->ingress_ft,
948		*skip_ft = br_offloads->skip_ft;
949	int err;
950
951	igmp_handle = mlx5_esw_bridge_ingress_igmp_fh_create(ingress_ft, skip_ft);
952	if (IS_ERR(igmp_handle))
953		return PTR_ERR(igmp_handle);
954
955	if (br_offloads->ingress_mld_fg) {
956		mld_query_handle = mlx5_esw_bridge_ingress_mld_fh_create(ICMPV6_MGM_QUERY,
957									 ingress_ft,
958									 skip_ft);
959		if (IS_ERR(mld_query_handle)) {
960			err = PTR_ERR(mld_query_handle);
961			goto err_mld_query;
962		}
963
964		mld_report_handle = mlx5_esw_bridge_ingress_mld_fh_create(ICMPV6_MGM_REPORT,
965									  ingress_ft,
966									  skip_ft);
967		if (IS_ERR(mld_report_handle)) {
968			err = PTR_ERR(mld_report_handle);
969			goto err_mld_report;
970		}
971
972		mld_done_handle = mlx5_esw_bridge_ingress_mld_fh_create(ICMPV6_MGM_REDUCTION,
973									ingress_ft,
974									skip_ft);
975		if (IS_ERR(mld_done_handle)) {
976			err = PTR_ERR(mld_done_handle);
977			goto err_mld_done;
978		}
979	} else {
980		mld_query_handle = NULL;
981		mld_report_handle = NULL;
982		mld_done_handle = NULL;
983	}
984
985	br_offloads->igmp_handle = igmp_handle;
986	br_offloads->mld_query_handle = mld_query_handle;
987	br_offloads->mld_report_handle = mld_report_handle;
988	br_offloads->mld_done_handle = mld_done_handle;
989
990	return 0;
991
992err_mld_done:
993	mlx5_del_flow_rules(mld_report_handle);
994err_mld_report:
995	mlx5_del_flow_rules(mld_query_handle);
996err_mld_query:
997	mlx5_del_flow_rules(igmp_handle);
998	return err;
999}
1000
1001static void
1002mlx5_esw_bridge_ingress_mcast_fhs_cleanup(struct mlx5_esw_bridge_offloads *br_offloads)
1003{
1004	if (br_offloads->mld_done_handle)
1005		mlx5_del_flow_rules(br_offloads->mld_done_handle);
1006	br_offloads->mld_done_handle = NULL;
1007	if (br_offloads->mld_report_handle)
1008		mlx5_del_flow_rules(br_offloads->mld_report_handle);
1009	br_offloads->mld_report_handle = NULL;
1010	if (br_offloads->mld_query_handle)
1011		mlx5_del_flow_rules(br_offloads->mld_query_handle);
1012	br_offloads->mld_query_handle = NULL;
1013	if (br_offloads->igmp_handle)
1014		mlx5_del_flow_rules(br_offloads->igmp_handle);
1015	br_offloads->igmp_handle = NULL;
1016}
1017
1018static int mlx5_esw_brige_mcast_init(struct mlx5_esw_bridge *bridge)
1019{
1020	struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
1021	struct mlx5_esw_bridge_port *port, *failed;
1022	unsigned long i;
1023	int err;
1024
1025	xa_for_each(&br_offloads->ports, i, port) {
1026		if (port->bridge != bridge)
1027			continue;
1028
1029		err = mlx5_esw_bridge_port_mcast_init(port);
1030		if (err) {
1031			failed = port;
1032			goto err_port;
1033		}
1034	}
1035	return 0;
1036
1037err_port:
1038	xa_for_each(&br_offloads->ports, i, port) {
1039		if (port == failed)
1040			break;
1041		if (port->bridge != bridge)
1042			continue;
1043
1044		mlx5_esw_bridge_port_mcast_cleanup(port);
1045	}
1046	return err;
1047}
1048
1049static void mlx5_esw_brige_mcast_cleanup(struct mlx5_esw_bridge *bridge)
1050{
1051	struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
1052	struct mlx5_esw_bridge_port *port;
1053	unsigned long i;
1054
1055	xa_for_each(&br_offloads->ports, i, port) {
1056		if (port->bridge != bridge)
1057			continue;
1058
1059		mlx5_esw_bridge_port_mcast_cleanup(port);
1060	}
1061}
1062
1063static int mlx5_esw_brige_mcast_global_enable(struct mlx5_esw_bridge_offloads *br_offloads)
1064{
1065	int err;
1066
1067	if (br_offloads->ingress_igmp_fg)
1068		return 0; /* already enabled by another bridge */
1069
1070	err = mlx5_esw_bridge_ingress_mcast_fgs_init(br_offloads);
1071	if (err) {
1072		esw_warn(br_offloads->esw->dev,
1073			 "Failed to create global multicast flow groups (err=%d)\n",
1074			 err);
1075		return err;
1076	}
1077
1078	err = mlx5_esw_bridge_ingress_mcast_fhs_create(br_offloads);
1079	if (err) {
1080		esw_warn(br_offloads->esw->dev,
1081			 "Failed to create global multicast flows (err=%d)\n",
1082			 err);
1083		goto err_fhs;
1084	}
1085
1086	return 0;
1087
1088err_fhs:
1089	mlx5_esw_bridge_ingress_mcast_fgs_cleanup(br_offloads);
1090	return err;
1091}
1092
1093static void mlx5_esw_brige_mcast_global_disable(struct mlx5_esw_bridge_offloads *br_offloads)
1094{
1095	struct mlx5_esw_bridge *br;
1096
1097	list_for_each_entry(br, &br_offloads->bridges, list) {
1098		/* Ingress table is global, so only disable snooping when all
1099		 * bridges on esw have multicast disabled.
1100		 */
1101		if (br->flags & MLX5_ESW_BRIDGE_MCAST_FLAG)
1102			return;
1103	}
1104
1105	mlx5_esw_bridge_ingress_mcast_fhs_cleanup(br_offloads);
1106	mlx5_esw_bridge_ingress_mcast_fgs_cleanup(br_offloads);
1107}
1108
1109int mlx5_esw_bridge_mcast_enable(struct mlx5_esw_bridge *bridge)
1110{
1111	int err;
1112
1113	err = mlx5_esw_brige_mcast_global_enable(bridge->br_offloads);
1114	if (err)
1115		return err;
1116
1117	bridge->flags |= MLX5_ESW_BRIDGE_MCAST_FLAG;
1118
1119	err = mlx5_esw_brige_mcast_init(bridge);
1120	if (err) {
1121		esw_warn(bridge->br_offloads->esw->dev, "Failed to enable multicast (err=%d)\n",
1122			 err);
1123		bridge->flags &= ~MLX5_ESW_BRIDGE_MCAST_FLAG;
1124		mlx5_esw_brige_mcast_global_disable(bridge->br_offloads);
1125	}
1126	return err;
1127}
1128
1129void mlx5_esw_bridge_mcast_disable(struct mlx5_esw_bridge *bridge)
1130{
1131	mlx5_esw_brige_mcast_cleanup(bridge);
1132	bridge->flags &= ~MLX5_ESW_BRIDGE_MCAST_FLAG;
1133	mlx5_esw_brige_mcast_global_disable(bridge->br_offloads);
1134}
1135