mlx5_en_flow_table.c revision 347877
1219820Sjeff/*-
2219820Sjeff * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
3219820Sjeff *
4219820Sjeff * Redistribution and use in source and binary forms, with or without
5219820Sjeff * modification, are permitted provided that the following conditions
6219820Sjeff * are met:
7219820Sjeff * 1. Redistributions of source code must retain the above copyright
8219820Sjeff *    notice, this list of conditions and the following disclaimer.
9219820Sjeff * 2. Redistributions in binary form must reproduce the above copyright
10219820Sjeff *    notice, this list of conditions and the following disclaimer in the
11219820Sjeff *    documentation and/or other materials provided with the distribution.
12219820Sjeff *
13219820Sjeff * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14219820Sjeff * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15219820Sjeff * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16219820Sjeff * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17219820Sjeff * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18219820Sjeff * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19219820Sjeff * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20219820Sjeff * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21219820Sjeff * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22219820Sjeff * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23219820Sjeff * SUCH DAMAGE.
24219820Sjeff *
25219820Sjeff * $FreeBSD: stable/11/sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c 347877 2019-05-16 18:26:14Z hselasky $
26219820Sjeff */
27219820Sjeff
28219820Sjeff#include "en.h"
29219820Sjeff
30219820Sjeff#include <linux/list.h>
31219820Sjeff#include <dev/mlx5/fs.h>
32219820Sjeff
33219820Sjeff#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
34219820Sjeff
35219820Sjeffenum {
36219820Sjeff	MLX5E_FULLMATCH = 0,
37219820Sjeff	MLX5E_ALLMULTI = 1,
38219820Sjeff	MLX5E_PROMISC = 2,
39219820Sjeff};
40219820Sjeff
41219820Sjeffenum {
42219820Sjeff	MLX5E_UC = 0,
43219820Sjeff	MLX5E_MC_IPV4 = 1,
44219820Sjeff	MLX5E_MC_IPV6 = 2,
45219820Sjeff	MLX5E_MC_OTHER = 3,
46219820Sjeff};
47219820Sjeff
48219820Sjeffenum {
49219820Sjeff	MLX5E_ACTION_NONE = 0,
50219820Sjeff	MLX5E_ACTION_ADD = 1,
51219820Sjeff	MLX5E_ACTION_DEL = 2,
52219820Sjeff};
53219820Sjeff
54219820Sjeffstruct mlx5e_eth_addr_hash_node {
55219820Sjeff	LIST_ENTRY(mlx5e_eth_addr_hash_node) hlist;
56219820Sjeff	u8	action;
57219820Sjeff	struct mlx5e_eth_addr_info ai;
58219820Sjeff};
59219820Sjeff
60219820Sjeffstatic inline int
61219820Sjeffmlx5e_hash_eth_addr(const u8 * addr)
62219820Sjeff{
63219820Sjeff	return (addr[5]);
64219820Sjeff}
65219820Sjeff
66219820Sjeffstatic void
67219820Sjeffmlx5e_add_eth_addr_to_hash(struct mlx5e_eth_addr_hash_head *hash,
68219820Sjeff    const u8 * addr)
69219820Sjeff{
70219820Sjeff	struct mlx5e_eth_addr_hash_node *hn;
71219820Sjeff	int ix = mlx5e_hash_eth_addr(addr);
72219820Sjeff
73219820Sjeff	LIST_FOREACH(hn, &hash[ix], hlist) {
74219820Sjeff		if (bcmp(hn->ai.addr, addr, ETHER_ADDR_LEN) == 0) {
75219820Sjeff			if (hn->action == MLX5E_ACTION_DEL)
76219820Sjeff				hn->action = MLX5E_ACTION_NONE;
77219820Sjeff			return;
78219820Sjeff		}
79219820Sjeff	}
80219820Sjeff
81219820Sjeff	hn = malloc(sizeof(*hn), M_MLX5EN, M_NOWAIT | M_ZERO);
82219820Sjeff	if (hn == NULL)
83219820Sjeff		return;
84219820Sjeff
85219820Sjeff	ether_addr_copy(hn->ai.addr, addr);
86219820Sjeff	hn->action = MLX5E_ACTION_ADD;
87219820Sjeff
88219820Sjeff	LIST_INSERT_HEAD(&hash[ix], hn, hlist);
89219820Sjeff}
90219820Sjeff
91219820Sjeffstatic void
92219820Sjeffmlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn)
93219820Sjeff{
94219820Sjeff	LIST_REMOVE(hn, hlist);
95219820Sjeff	free(hn, M_MLX5EN);
96219820Sjeff}
97219820Sjeff
98219820Sjeffstatic void
99219820Sjeffmlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
100219820Sjeff    struct mlx5e_eth_addr_info *ai)
101219820Sjeff{
102219820Sjeff	if (ai->tt_vec & (1 << MLX5E_TT_IPV6_IPSEC_ESP))
103219820Sjeff		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP]);
104219820Sjeff
105219820Sjeff	if (ai->tt_vec & (1 << MLX5E_TT_IPV4_IPSEC_ESP))
106219820Sjeff		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP]);
107219820Sjeff
108219820Sjeff	if (ai->tt_vec & (1 << MLX5E_TT_IPV6_IPSEC_AH))
109219820Sjeff		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH]);
110219820Sjeff
111219820Sjeff	if (ai->tt_vec & (1 << MLX5E_TT_IPV4_IPSEC_AH))
112219820Sjeff		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH]);
113219820Sjeff
114219820Sjeff	if (ai->tt_vec & (1 << MLX5E_TT_IPV6_TCP))
115219820Sjeff		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_TCP]);
116219820Sjeff
117219820Sjeff	if (ai->tt_vec & (1 << MLX5E_TT_IPV4_TCP))
118219820Sjeff		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_TCP]);
119219820Sjeff
120219820Sjeff	if (ai->tt_vec & (1 << MLX5E_TT_IPV6_UDP))
121219820Sjeff		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_UDP]);
122219820Sjeff
123219820Sjeff	if (ai->tt_vec & (1 << MLX5E_TT_IPV4_UDP))
124219820Sjeff		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_UDP]);
125219820Sjeff
126219820Sjeff	if (ai->tt_vec & (1 << MLX5E_TT_IPV6))
127219820Sjeff		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6]);
128219820Sjeff
129219820Sjeff	if (ai->tt_vec & (1 << MLX5E_TT_IPV4))
130219820Sjeff		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4]);
131219820Sjeff
132219820Sjeff	if (ai->tt_vec & (1 << MLX5E_TT_ANY))
133219820Sjeff		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_ANY]);
134219820Sjeff}
135219820Sjeff
136219820Sjeffstatic int
137219820Sjeffmlx5e_get_eth_addr_type(const u8 * addr)
138219820Sjeff{
139	if (ETHER_IS_MULTICAST(addr) == 0)
140		return (MLX5E_UC);
141
142	if ((addr[0] == 0x01) &&
143	    (addr[1] == 0x00) &&
144	    (addr[2] == 0x5e) &&
145	    !(addr[3] & 0x80))
146		return (MLX5E_MC_IPV4);
147
148	if ((addr[0] == 0x33) &&
149	    (addr[1] == 0x33))
150		return (MLX5E_MC_IPV6);
151
152	return (MLX5E_MC_OTHER);
153}
154
155static	u32
156mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
157{
158	int eth_addr_type;
159	u32 ret;
160
161	switch (type) {
162	case MLX5E_FULLMATCH:
163		eth_addr_type = mlx5e_get_eth_addr_type(ai->addr);
164		switch (eth_addr_type) {
165		case MLX5E_UC:
166			ret =
167			    (1 << MLX5E_TT_IPV4_TCP) |
168			    (1 << MLX5E_TT_IPV6_TCP) |
169			    (1 << MLX5E_TT_IPV4_UDP) |
170			    (1 << MLX5E_TT_IPV6_UDP) |
171			    (1 << MLX5E_TT_IPV4) |
172			    (1 << MLX5E_TT_IPV6) |
173			    (1 << MLX5E_TT_ANY) |
174			    0;
175			break;
176
177		case MLX5E_MC_IPV4:
178			ret =
179			    (1 << MLX5E_TT_IPV4_UDP) |
180			    (1 << MLX5E_TT_IPV4) |
181			    0;
182			break;
183
184		case MLX5E_MC_IPV6:
185			ret =
186			    (1 << MLX5E_TT_IPV6_UDP) |
187			    (1 << MLX5E_TT_IPV6) |
188			    0;
189			break;
190
191		default:
192			ret =
193			    (1 << MLX5E_TT_ANY) |
194			    0;
195			break;
196		}
197		break;
198
199	case MLX5E_ALLMULTI:
200		ret =
201		    (1 << MLX5E_TT_IPV4_UDP) |
202		    (1 << MLX5E_TT_IPV6_UDP) |
203		    (1 << MLX5E_TT_IPV4) |
204		    (1 << MLX5E_TT_IPV6) |
205		    (1 << MLX5E_TT_ANY) |
206		    0;
207		break;
208
209	default:			/* MLX5E_PROMISC */
210		ret =
211		    (1 << MLX5E_TT_IPV4_TCP) |
212		    (1 << MLX5E_TT_IPV6_TCP) |
213		    (1 << MLX5E_TT_IPV4_UDP) |
214		    (1 << MLX5E_TT_IPV6_UDP) |
215		    (1 << MLX5E_TT_IPV4) |
216		    (1 << MLX5E_TT_IPV6) |
217		    (1 << MLX5E_TT_ANY) |
218		    0;
219		break;
220	}
221
222	return (ret);
223}
224
225static int
226mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
227    struct mlx5e_eth_addr_info *ai, int type,
228    u32 *mc, u32 *mv)
229{
230	struct mlx5_flow_destination dest = {};
231	u8 mc_enable = 0;
232	struct mlx5_flow_rule **rule_p;
233	struct mlx5_flow_table *ft = priv->fts.main.t;
234	u8 *mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
235				   outer_headers.dmac_47_16);
236	u8 *mv_dmac = MLX5_ADDR_OF(fte_match_param, mv,
237				   outer_headers.dmac_47_16);
238	u32 *tirn = priv->tirn;
239	u32 tt_vec;
240	int err = 0;
241
242	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
243
244	switch (type) {
245	case MLX5E_FULLMATCH:
246		mc_enable = MLX5_MATCH_OUTER_HEADERS;
247		memset(mc_dmac, 0xff, ETH_ALEN);
248		ether_addr_copy(mv_dmac, ai->addr);
249		break;
250
251	case MLX5E_ALLMULTI:
252		mc_enable = MLX5_MATCH_OUTER_HEADERS;
253		mc_dmac[0] = 0x01;
254		mv_dmac[0] = 0x01;
255		break;
256
257	case MLX5E_PROMISC:
258		break;
259	default:
260		break;
261	}
262
263	tt_vec = mlx5e_get_tt_vec(ai, type);
264
265	if (tt_vec & BIT(MLX5E_TT_ANY)) {
266		rule_p = &ai->ft_rule[MLX5E_TT_ANY];
267		dest.tir_num = tirn[MLX5E_TT_ANY];
268		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
269					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
270					     MLX5_FS_ETH_FLOW_TAG, &dest);
271		if (IS_ERR_OR_NULL(*rule_p))
272			goto err_del_ai;
273		ai->tt_vec |= BIT(MLX5E_TT_ANY);
274	}
275
276	mc_enable = MLX5_MATCH_OUTER_HEADERS;
277	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
278
279	if (tt_vec & BIT(MLX5E_TT_IPV4)) {
280		rule_p = &ai->ft_rule[MLX5E_TT_IPV4];
281		dest.tir_num = tirn[MLX5E_TT_IPV4];
282		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
283			 ETHERTYPE_IP);
284		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
285					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
286					     MLX5_FS_ETH_FLOW_TAG, &dest);
287		if (IS_ERR_OR_NULL(*rule_p))
288			goto err_del_ai;
289		ai->tt_vec |= BIT(MLX5E_TT_IPV4);
290	}
291
292	if (tt_vec & BIT(MLX5E_TT_IPV6)) {
293		rule_p = &ai->ft_rule[MLX5E_TT_IPV6];
294		dest.tir_num = tirn[MLX5E_TT_IPV6];
295		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
296			 ETHERTYPE_IPV6);
297		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
298					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
299					     MLX5_FS_ETH_FLOW_TAG, &dest);
300		if (IS_ERR_OR_NULL(*rule_p))
301			goto err_del_ai;
302		ai->tt_vec |= BIT(MLX5E_TT_IPV6);
303	}
304
305	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
306	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_UDP);
307
308	if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) {
309		rule_p = &ai->ft_rule[MLX5E_TT_IPV4_UDP];
310		dest.tir_num = tirn[MLX5E_TT_IPV4_UDP];
311		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
312			 ETHERTYPE_IP);
313		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
314					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
315					     MLX5_FS_ETH_FLOW_TAG, &dest);
316		if (IS_ERR_OR_NULL(*rule_p))
317			goto err_del_ai;
318		ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP);
319	}
320
321	if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) {
322		rule_p = &ai->ft_rule[MLX5E_TT_IPV6_UDP];
323		dest.tir_num = tirn[MLX5E_TT_IPV6_UDP];
324		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
325			 ETHERTYPE_IPV6);
326		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
327					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
328					     MLX5_FS_ETH_FLOW_TAG, &dest);
329		if (IS_ERR_OR_NULL(*rule_p))
330			goto err_del_ai;
331		ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP);
332	}
333
334	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_TCP);
335
336	if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) {
337		rule_p = &ai->ft_rule[MLX5E_TT_IPV4_TCP];
338		dest.tir_num = tirn[MLX5E_TT_IPV4_TCP];
339		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
340			 ETHERTYPE_IP);
341		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
342					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
343					     MLX5_FS_ETH_FLOW_TAG, &dest);
344		if (IS_ERR_OR_NULL(*rule_p))
345			goto err_del_ai;
346		ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP);
347	}
348
349	if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) {
350		rule_p = &ai->ft_rule[MLX5E_TT_IPV6_TCP];
351		dest.tir_num = tirn[MLX5E_TT_IPV6_TCP];
352		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
353			 ETHERTYPE_IPV6);
354		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
355					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
356					     MLX5_FS_ETH_FLOW_TAG, &dest);
357		if (IS_ERR_OR_NULL(*rule_p))
358			goto err_del_ai;
359
360		ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP);
361	}
362
363	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_AH);
364
365	if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) {
366		rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH];
367		dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH];
368		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
369			 ETHERTYPE_IP);
370		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
371					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
372					     MLX5_FS_ETH_FLOW_TAG, &dest);
373		if (IS_ERR_OR_NULL(*rule_p))
374			goto err_del_ai;
375		ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_AH);
376	}
377
378	if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) {
379		rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH];
380		dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH];
381		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
382			 ETHERTYPE_IPV6);
383		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
384					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
385					     MLX5_FS_ETH_FLOW_TAG, &dest);
386		if (IS_ERR_OR_NULL(*rule_p))
387			goto err_del_ai;
388		ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_AH);
389	}
390
391	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_ESP);
392
393	if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) {
394		rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP];
395		dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP];
396		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
397			 ETHERTYPE_IP);
398		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
399					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
400					     MLX5_FS_ETH_FLOW_TAG, &dest);
401		if (IS_ERR_OR_NULL(*rule_p))
402			goto err_del_ai;
403		ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_ESP);
404	}
405
406	if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) {
407		rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP];
408		dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_ESP];
409		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
410			 ETHERTYPE_IPV6);
411		*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
412					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
413					     MLX5_FS_ETH_FLOW_TAG, &dest);
414		if (IS_ERR_OR_NULL(*rule_p))
415			goto err_del_ai;
416		ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_ESP);
417	}
418
419	return 0;
420
421err_del_ai:
422	err = PTR_ERR(*rule_p);
423	*rule_p = NULL;
424	mlx5e_del_eth_addr_from_flow_table(priv, ai);
425
426	return err;
427}
428
429static int
430mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
431    struct mlx5e_eth_addr_info *ai, int type)
432{
433	u32 *match_criteria;
434	u32 *match_value;
435	int err = 0;
436
437	match_value	= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
438	match_criteria	= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
439	if (!match_value || !match_criteria) {
440		if_printf(priv->ifp, "%s: alloc failed\n", __func__);
441		err = -ENOMEM;
442		goto add_eth_addr_rule_out;
443	}
444	err = mlx5e_add_eth_addr_rule_sub(priv, ai, type, match_criteria,
445	    match_value);
446
447add_eth_addr_rule_out:
448	kvfree(match_criteria);
449	kvfree(match_value);
450
451	return (err);
452}
453
454static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
455{
456	struct ifnet *ifp = priv->ifp;
457	int max_list_size;
458	int list_size;
459	u16 *vlans;
460	int vlan;
461	int err;
462	int i;
463
464	list_size = 0;
465	for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID)
466		list_size++;
467
468	max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
469
470	if (list_size > max_list_size) {
471		if_printf(ifp,
472			    "ifnet vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
473			    list_size, max_list_size);
474		list_size = max_list_size;
475	}
476
477	vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
478	if (!vlans)
479		return -ENOMEM;
480
481	i = 0;
482	for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID) {
483		if (i >= list_size)
484			break;
485		vlans[i++] = vlan;
486	}
487
488	err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
489	if (err)
490		if_printf(ifp, "Failed to modify vport vlans list err(%d)\n",
491			   err);
492
493	kfree(vlans);
494	return err;
495}
496
497enum mlx5e_vlan_rule_type {
498	MLX5E_VLAN_RULE_TYPE_UNTAGGED,
499	MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID,
500	MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID,
501	MLX5E_VLAN_RULE_TYPE_MATCH_VID,
502};
503
504static int
505mlx5e_add_vlan_rule_sub(struct mlx5e_priv *priv,
506    enum mlx5e_vlan_rule_type rule_type, u16 vid,
507    u32 *mc, u32 *mv)
508{
509	struct mlx5_flow_table *ft = priv->fts.vlan.t;
510	struct mlx5_flow_destination dest = {};
511	u8 mc_enable = 0;
512	struct mlx5_flow_rule **rule_p;
513	int err = 0;
514
515	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
516	dest.ft = priv->fts.main.t;
517
518	mc_enable = MLX5_MATCH_OUTER_HEADERS;
519
520	switch (rule_type) {
521	case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
522		rule_p = &priv->vlan.untagged_ft_rule;
523		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
524		break;
525	case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
526		rule_p = &priv->vlan.any_cvlan_ft_rule;
527		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
528		MLX5_SET(fte_match_param, mv, outer_headers.cvlan_tag, 1);
529		break;
530	case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
531		rule_p = &priv->vlan.any_svlan_ft_rule;
532		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
533		MLX5_SET(fte_match_param, mv, outer_headers.svlan_tag, 1);
534		break;
535	default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
536		rule_p = &priv->vlan.active_vlans_ft_rule[vid];
537		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
538		MLX5_SET(fte_match_param, mv, outer_headers.cvlan_tag, 1);
539		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
540		MLX5_SET(fte_match_param, mv, outer_headers.first_vid, vid);
541		mlx5e_vport_context_update_vlans(priv);
542		break;
543	}
544
545	*rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
546				     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
547				     MLX5_FS_ETH_FLOW_TAG,
548				     &dest);
549
550	if (IS_ERR(*rule_p)) {
551		err = PTR_ERR(*rule_p);
552		*rule_p = NULL;
553		if_printf(priv->ifp, "%s: add rule failed\n", __func__);
554	}
555
556	return (err);
557}
558
559static int
560mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
561    enum mlx5e_vlan_rule_type rule_type, u16 vid)
562{
563	u32 *match_criteria;
564	u32 *match_value;
565	int err = 0;
566
567	match_value	= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
568	match_criteria	= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
569	if (!match_value || !match_criteria) {
570		if_printf(priv->ifp, "%s: alloc failed\n", __func__);
571		err = -ENOMEM;
572		goto add_vlan_rule_out;
573	}
574
575	err = mlx5e_add_vlan_rule_sub(priv, rule_type, vid, match_criteria,
576				    match_value);
577
578add_vlan_rule_out:
579	kvfree(match_criteria);
580	kvfree(match_value);
581
582	return (err);
583}
584
585static void
586mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
587    enum mlx5e_vlan_rule_type rule_type, u16 vid)
588{
589	switch (rule_type) {
590	case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
591		if (priv->vlan.untagged_ft_rule) {
592			mlx5_del_flow_rule(priv->vlan.untagged_ft_rule);
593			priv->vlan.untagged_ft_rule = NULL;
594		}
595		break;
596	case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
597		if (priv->vlan.any_cvlan_ft_rule) {
598			mlx5_del_flow_rule(priv->vlan.any_cvlan_ft_rule);
599			priv->vlan.any_cvlan_ft_rule = NULL;
600		}
601		break;
602	case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
603		if (priv->vlan.any_svlan_ft_rule) {
604			mlx5_del_flow_rule(priv->vlan.any_svlan_ft_rule);
605			priv->vlan.any_svlan_ft_rule = NULL;
606		}
607		break;
608	case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
609		if (priv->vlan.active_vlans_ft_rule[vid]) {
610			mlx5_del_flow_rule(priv->vlan.active_vlans_ft_rule[vid]);
611			priv->vlan.active_vlans_ft_rule[vid] = NULL;
612		}
613		mlx5e_vport_context_update_vlans(priv);
614		break;
615	default:
616		break;
617	}
618}
619
620static void
621mlx5e_del_any_vid_rules(struct mlx5e_priv *priv)
622{
623	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
624	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
625}
626
627static int
628mlx5e_add_any_vid_rules(struct mlx5e_priv *priv)
629{
630	int err;
631
632	err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
633	if (err)
634		return (err);
635
636	err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
637	if (err)
638		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
639
640	return (err);
641}
642
643void
644mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
645{
646	if (priv->vlan.filter_disabled) {
647		priv->vlan.filter_disabled = false;
648		if (priv->ifp->if_flags & IFF_PROMISC)
649			return;
650		if (test_bit(MLX5E_STATE_OPENED, &priv->state))
651			mlx5e_del_any_vid_rules(priv);
652	}
653}
654
655void
656mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
657{
658	if (!priv->vlan.filter_disabled) {
659		priv->vlan.filter_disabled = true;
660		if (priv->ifp->if_flags & IFF_PROMISC)
661			return;
662		if (test_bit(MLX5E_STATE_OPENED, &priv->state))
663			mlx5e_add_any_vid_rules(priv);
664	}
665}
666
667void
668mlx5e_vlan_rx_add_vid(void *arg, struct ifnet *ifp, u16 vid)
669{
670	struct mlx5e_priv *priv = arg;
671
672	if (ifp != priv->ifp)
673		return;
674
675	PRIV_LOCK(priv);
676	if (!test_and_set_bit(vid, priv->vlan.active_vlans) &&
677	    test_bit(MLX5E_STATE_OPENED, &priv->state))
678		mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
679	PRIV_UNLOCK(priv);
680}
681
682void
683mlx5e_vlan_rx_kill_vid(void *arg, struct ifnet *ifp, u16 vid)
684{
685	struct mlx5e_priv *priv = arg;
686
687	if (ifp != priv->ifp)
688		return;
689
690	PRIV_LOCK(priv);
691	clear_bit(vid, priv->vlan.active_vlans);
692	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
693		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
694	PRIV_UNLOCK(priv);
695}
696
697int
698mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv)
699{
700	int err;
701	int i;
702
703	set_bit(0, priv->vlan.active_vlans);
704	for_each_set_bit(i, priv->vlan.active_vlans, VLAN_N_VID) {
705		err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
706					  i);
707		if (err)
708			goto error;
709	}
710
711	err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
712	if (err)
713		goto error;
714
715	if (priv->vlan.filter_disabled) {
716		err = mlx5e_add_any_vid_rules(priv);
717		if (err)
718			goto error;
719	}
720	return (0);
721error:
722	mlx5e_del_all_vlan_rules(priv);
723	return (err);
724}
725
726void
727mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv)
728{
729	int i;
730
731	if (priv->vlan.filter_disabled)
732		mlx5e_del_any_vid_rules(priv);
733
734	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
735
736	for_each_set_bit(i, priv->vlan.active_vlans, VLAN_N_VID)
737		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i);
738	clear_bit(0, priv->vlan.active_vlans);
739}
740
741#define	mlx5e_for_each_hash_node(hn, tmp, hash, i) \
742	for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
743		LIST_FOREACH_SAFE(hn, &(hash)[i], hlist, tmp)
744
745static void
746mlx5e_execute_action(struct mlx5e_priv *priv,
747    struct mlx5e_eth_addr_hash_node *hn)
748{
749	switch (hn->action) {
750	case MLX5E_ACTION_ADD:
751		mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH);
752		hn->action = MLX5E_ACTION_NONE;
753		break;
754
755	case MLX5E_ACTION_DEL:
756		mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai);
757		mlx5e_del_eth_addr_from_hash(hn);
758		break;
759
760	default:
761		break;
762	}
763}
764
765static void
766mlx5e_sync_ifp_addr(struct mlx5e_priv *priv)
767{
768	struct ifnet *ifp = priv->ifp;
769	struct ifaddr *ifa;
770	struct ifmultiaddr *ifma;
771
772	/* XXX adding this entry might not be needed */
773	mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_uc,
774	    LLADDR((struct sockaddr_dl *)(ifp->if_addr->ifa_addr)));
775
776	if_addr_rlock(ifp);
777	TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
778		if (ifa->ifa_addr->sa_family != AF_LINK)
779			continue;
780		mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_uc,
781		    LLADDR((struct sockaddr_dl *)ifa->ifa_addr));
782	}
783	if_addr_runlock(ifp);
784
785	if_maddr_rlock(ifp);
786	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
787		if (ifma->ifma_addr->sa_family != AF_LINK)
788			continue;
789		mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_mc,
790		    LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
791	}
792	if_maddr_runlock(ifp);
793}
794
795static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
796				  u8 addr_array[][ETH_ALEN], int size)
797{
798	bool is_uc = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC);
799	struct ifnet *ifp = priv->ifp;
800	struct mlx5e_eth_addr_hash_node *hn;
801	struct mlx5e_eth_addr_hash_head *addr_list;
802	struct mlx5e_eth_addr_hash_node *tmp;
803	int i = 0;
804	int hi;
805
806	addr_list = is_uc ? priv->eth_addr.if_uc : priv->eth_addr.if_mc;
807
808	if (is_uc) /* Make sure our own address is pushed first */
809		ether_addr_copy(addr_array[i++], IF_LLADDR(ifp));
810	else if (priv->eth_addr.broadcast_enabled)
811		ether_addr_copy(addr_array[i++], ifp->if_broadcastaddr);
812
813	mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
814		if (ether_addr_equal(IF_LLADDR(ifp), hn->ai.addr))
815			continue;
816		if (i >= size)
817			break;
818		ether_addr_copy(addr_array[i++], hn->ai.addr);
819	}
820}
821
822static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
823						 int list_type)
824{
825	bool is_uc = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC);
826	struct mlx5e_eth_addr_hash_node *hn;
827	u8 (*addr_array)[ETH_ALEN] = NULL;
828	struct mlx5e_eth_addr_hash_head *addr_list;
829	struct mlx5e_eth_addr_hash_node *tmp;
830	int max_size;
831	int size;
832	int err;
833	int hi;
834
835	size = is_uc ? 0 : (priv->eth_addr.broadcast_enabled ? 1 : 0);
836	max_size = is_uc ?
837		1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
838		1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
839
840	addr_list = is_uc ? priv->eth_addr.if_uc : priv->eth_addr.if_mc;
841	mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
842		size++;
843
844	if (size > max_size) {
845		if_printf(priv->ifp,
846			    "ifp %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
847			    is_uc ? "UC" : "MC", size, max_size);
848		size = max_size;
849	}
850
851	if (size) {
852		addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL);
853		if (!addr_array) {
854			err = -ENOMEM;
855			goto out;
856		}
857		mlx5e_fill_addr_array(priv, list_type, addr_array, size);
858	}
859
860	err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
861out:
862	if (err)
863		if_printf(priv->ifp,
864			   "Failed to modify vport %s list err(%d)\n",
865			   is_uc ? "UC" : "MC", err);
866	kfree(addr_array);
867}
868
869static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
870{
871	struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
872
873	mlx5e_vport_context_update_addr_list(priv, MLX5_NIC_VPORT_LIST_TYPE_UC);
874	mlx5e_vport_context_update_addr_list(priv, MLX5_NIC_VPORT_LIST_TYPE_MC);
875	mlx5_modify_nic_vport_promisc(priv->mdev, 0,
876				      ea->allmulti_enabled,
877				      ea->promisc_enabled);
878}
879
880static void
881mlx5e_apply_ifp_addr(struct mlx5e_priv *priv)
882{
883	struct mlx5e_eth_addr_hash_node *hn;
884	struct mlx5e_eth_addr_hash_node *tmp;
885	int i;
886
887	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_uc, i)
888	    mlx5e_execute_action(priv, hn);
889
890	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i)
891	    mlx5e_execute_action(priv, hn);
892}
893
894static void
895mlx5e_handle_ifp_addr(struct mlx5e_priv *priv)
896{
897	struct mlx5e_eth_addr_hash_node *hn;
898	struct mlx5e_eth_addr_hash_node *tmp;
899	int i;
900
901	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_uc, i)
902	    hn->action = MLX5E_ACTION_DEL;
903	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i)
904	    hn->action = MLX5E_ACTION_DEL;
905
906	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
907		mlx5e_sync_ifp_addr(priv);
908
909	mlx5e_apply_ifp_addr(priv);
910}
911
912void
913mlx5e_set_rx_mode_core(struct mlx5e_priv *priv)
914{
915	struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
916	struct ifnet *ndev = priv->ifp;
917
918	bool rx_mode_enable = test_bit(MLX5E_STATE_OPENED, &priv->state);
919	bool promisc_enabled = rx_mode_enable && (ndev->if_flags & IFF_PROMISC);
920	bool allmulti_enabled = rx_mode_enable && (ndev->if_flags & IFF_ALLMULTI);
921	bool broadcast_enabled = rx_mode_enable;
922
923	bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
924	bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
925	bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
926	bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
927	bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
928	bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
929
930	/* update broadcast address */
931	ether_addr_copy(priv->eth_addr.broadcast.addr,
932	    priv->ifp->if_broadcastaddr);
933
934	if (enable_promisc) {
935		mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
936		if (!priv->vlan.filter_disabled)
937			mlx5e_add_any_vid_rules(priv);
938	}
939	if (enable_allmulti)
940		mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
941	if (enable_broadcast)
942		mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
943
944	mlx5e_handle_ifp_addr(priv);
945
946	if (disable_broadcast)
947		mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
948	if (disable_allmulti)
949		mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
950	if (disable_promisc) {
951		if (!priv->vlan.filter_disabled)
952			mlx5e_del_any_vid_rules(priv);
953		mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
954	}
955
956	ea->promisc_enabled = promisc_enabled;
957	ea->allmulti_enabled = allmulti_enabled;
958	ea->broadcast_enabled = broadcast_enabled;
959
960	mlx5e_vport_context_update(priv);
961}
962
963void
964mlx5e_set_rx_mode_work(struct work_struct *work)
965{
966	struct mlx5e_priv *priv =
967	    container_of(work, struct mlx5e_priv, set_rx_mode_work);
968
969	PRIV_LOCK(priv);
970	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
971		mlx5e_set_rx_mode_core(priv);
972	PRIV_UNLOCK(priv);
973}
974
975static void
976mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
977{
978	int i;
979
980	for (i = ft->num_groups - 1; i >= 0; i--) {
981		if (!IS_ERR_OR_NULL(ft->g[i]))
982			mlx5_destroy_flow_group(ft->g[i]);
983		ft->g[i] = NULL;
984	}
985	ft->num_groups = 0;
986}
987
988static void
989mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
990{
991	mlx5e_destroy_groups(ft);
992	kfree(ft->g);
993	mlx5_destroy_flow_table(ft->t);
994	ft->t = NULL;
995}
996
997#define MLX5E_NUM_MAIN_GROUPS	10
998#define MLX5E_MAIN_GROUP0_SIZE	BIT(4)
999#define MLX5E_MAIN_GROUP1_SIZE	BIT(3)
1000#define MLX5E_MAIN_GROUP2_SIZE	BIT(1)
1001#define MLX5E_MAIN_GROUP3_SIZE	BIT(0)
1002#define MLX5E_MAIN_GROUP4_SIZE	BIT(14)
1003#define MLX5E_MAIN_GROUP5_SIZE	BIT(13)
1004#define MLX5E_MAIN_GROUP6_SIZE	BIT(11)
1005#define MLX5E_MAIN_GROUP7_SIZE	BIT(2)
1006#define MLX5E_MAIN_GROUP8_SIZE	BIT(1)
1007#define MLX5E_MAIN_GROUP9_SIZE	BIT(0)
1008#define MLX5E_MAIN_TABLE_SIZE	(MLX5E_MAIN_GROUP0_SIZE +\
1009				 MLX5E_MAIN_GROUP1_SIZE +\
1010				 MLX5E_MAIN_GROUP2_SIZE +\
1011				 MLX5E_MAIN_GROUP3_SIZE +\
1012				 MLX5E_MAIN_GROUP4_SIZE +\
1013				 MLX5E_MAIN_GROUP5_SIZE +\
1014				 MLX5E_MAIN_GROUP6_SIZE +\
1015				 MLX5E_MAIN_GROUP7_SIZE +\
1016				 MLX5E_MAIN_GROUP8_SIZE +\
1017				 MLX5E_MAIN_GROUP9_SIZE +\
1018				 0)
1019
1020static int
1021mlx5e_create_main_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1022				      int inlen)
1023{
1024	u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1025	u8 *dmac = MLX5_ADDR_OF(create_flow_group_in, in,
1026				match_criteria.outer_headers.dmac_47_16);
1027	int err;
1028	int ix = 0;
1029
1030	/* Tunnel rules need to be first in this list of groups */
1031
1032	/* Start tunnel rules */
1033	memset(in, 0, inlen);
1034	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1035	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1036	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1037	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.udp_dport);
1038	MLX5_SET_CFG(in, start_flow_index, ix);
1039	ix += MLX5E_MAIN_GROUP0_SIZE;
1040	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1041	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1042	if (IS_ERR(ft->g[ft->num_groups]))
1043		goto err_destory_groups;
1044	ft->num_groups++;
1045	/* End Tunnel Rules */
1046
1047	memset(in, 0, inlen);
1048	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1049	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1050	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1051	MLX5_SET_CFG(in, start_flow_index, ix);
1052	ix += MLX5E_MAIN_GROUP1_SIZE;
1053	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1054	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1055	if (IS_ERR(ft->g[ft->num_groups]))
1056		goto err_destory_groups;
1057	ft->num_groups++;
1058
1059	memset(in, 0, inlen);
1060	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1061	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1062	MLX5_SET_CFG(in, start_flow_index, ix);
1063	ix += MLX5E_MAIN_GROUP2_SIZE;
1064	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1065	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1066	if (IS_ERR(ft->g[ft->num_groups]))
1067		goto err_destory_groups;
1068	ft->num_groups++;
1069
1070	memset(in, 0, inlen);
1071	MLX5_SET_CFG(in, start_flow_index, ix);
1072	ix += MLX5E_MAIN_GROUP3_SIZE;
1073	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1074	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1075	if (IS_ERR(ft->g[ft->num_groups]))
1076		goto err_destory_groups;
1077	ft->num_groups++;
1078
1079	memset(in, 0, inlen);
1080	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1081	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1082	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1083	memset(dmac, 0xff, ETH_ALEN);
1084	MLX5_SET_CFG(in, start_flow_index, ix);
1085	ix += MLX5E_MAIN_GROUP4_SIZE;
1086	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1087	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1088	if (IS_ERR(ft->g[ft->num_groups]))
1089		goto err_destory_groups;
1090	ft->num_groups++;
1091
1092	memset(in, 0, inlen);
1093	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1094	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1095	memset(dmac, 0xff, ETH_ALEN);
1096	MLX5_SET_CFG(in, start_flow_index, ix);
1097	ix += MLX5E_MAIN_GROUP5_SIZE;
1098	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1099	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1100	if (IS_ERR(ft->g[ft->num_groups]))
1101		goto err_destory_groups;
1102	ft->num_groups++;
1103
1104	memset(in, 0, inlen);
1105	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1106	memset(dmac, 0xff, ETH_ALEN);
1107	MLX5_SET_CFG(in, start_flow_index, ix);
1108	ix += MLX5E_MAIN_GROUP6_SIZE;
1109	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1110	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1111	if (IS_ERR(ft->g[ft->num_groups]))
1112		goto err_destory_groups;
1113	ft->num_groups++;
1114
1115	memset(in, 0, inlen);
1116	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1117	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1118	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1119	dmac[0] = 0x01;
1120	MLX5_SET_CFG(in, start_flow_index, ix);
1121	ix += MLX5E_MAIN_GROUP7_SIZE;
1122	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1123	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1124	if (IS_ERR(ft->g[ft->num_groups]))
1125		goto err_destory_groups;
1126	ft->num_groups++;
1127
1128	memset(in, 0, inlen);
1129	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1130	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1131	dmac[0] = 0x01;
1132	MLX5_SET_CFG(in, start_flow_index, ix);
1133	ix += MLX5E_MAIN_GROUP8_SIZE;
1134	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1135	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1136	if (IS_ERR(ft->g[ft->num_groups]))
1137		goto err_destory_groups;
1138	ft->num_groups++;
1139
1140	memset(in, 0, inlen);
1141	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1142	dmac[0] = 0x01;
1143	MLX5_SET_CFG(in, start_flow_index, ix);
1144	ix += MLX5E_MAIN_GROUP9_SIZE;
1145	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1146	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1147	if (IS_ERR(ft->g[ft->num_groups]))
1148		goto err_destory_groups;
1149	ft->num_groups++;
1150
1151	return (0);
1152
1153err_destory_groups:
1154	err = PTR_ERR(ft->g[ft->num_groups]);
1155	ft->g[ft->num_groups] = NULL;
1156	mlx5e_destroy_groups(ft);
1157
1158	return (err);
1159}
1160
1161static int
1162mlx5e_create_main_groups(struct mlx5e_flow_table *ft)
1163{
1164	u32 *in;
1165	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1166	int err;
1167
1168	in = mlx5_vzalloc(inlen);
1169	if (!in)
1170		return (-ENOMEM);
1171
1172	err = mlx5e_create_main_groups_sub(ft, in, inlen);
1173
1174	kvfree(in);
1175	return (err);
1176}
1177
1178static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
1179{
1180	struct mlx5e_flow_table *ft = &priv->fts.main;
1181	int err;
1182
1183	ft->num_groups = 0;
1184	ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "main",
1185				       MLX5E_MAIN_TABLE_SIZE);
1186
1187	if (IS_ERR(ft->t)) {
1188		err = PTR_ERR(ft->t);
1189		ft->t = NULL;
1190		return (err);
1191	}
1192	ft->g = kcalloc(MLX5E_NUM_MAIN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1193	if (!ft->g) {
1194		err = -ENOMEM;
1195		goto err_destroy_main_flow_table;
1196	}
1197
1198	err = mlx5e_create_main_groups(ft);
1199	if (err)
1200		goto err_free_g;
1201	return (0);
1202
1203err_free_g:
1204	kfree(ft->g);
1205
1206err_destroy_main_flow_table:
1207	mlx5_destroy_flow_table(ft->t);
1208	ft->t = NULL;
1209
1210	return (err);
1211}
1212
1213static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv)
1214{
1215	mlx5e_destroy_flow_table(&priv->fts.main);
1216}
1217
1218#define MLX5E_NUM_VLAN_GROUPS	3
1219#define MLX5E_VLAN_GROUP0_SIZE	BIT(12)
1220#define MLX5E_VLAN_GROUP1_SIZE	BIT(1)
1221#define MLX5E_VLAN_GROUP2_SIZE	BIT(0)
1222#define MLX5E_VLAN_TABLE_SIZE	(MLX5E_VLAN_GROUP0_SIZE +\
1223				 MLX5E_VLAN_GROUP1_SIZE +\
1224				 MLX5E_VLAN_GROUP2_SIZE +\
1225				 0)
1226
1227static int
1228mlx5e_create_vlan_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1229				      int inlen)
1230{
1231	int err;
1232	int ix = 0;
1233	u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1234
1235	memset(in, 0, inlen);
1236	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1237	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1238	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1239	MLX5_SET_CFG(in, start_flow_index, ix);
1240	ix += MLX5E_VLAN_GROUP0_SIZE;
1241	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1242	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1243	if (IS_ERR(ft->g[ft->num_groups]))
1244		goto err_destory_groups;
1245	ft->num_groups++;
1246
1247	memset(in, 0, inlen);
1248	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1249	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1250	MLX5_SET_CFG(in, start_flow_index, ix);
1251	ix += MLX5E_VLAN_GROUP1_SIZE;
1252	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1253	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1254	if (IS_ERR(ft->g[ft->num_groups]))
1255		goto err_destory_groups;
1256	ft->num_groups++;
1257
1258	memset(in, 0, inlen);
1259	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1260	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
1261	MLX5_SET_CFG(in, start_flow_index, ix);
1262	ix += MLX5E_VLAN_GROUP2_SIZE;
1263	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1264	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1265	if (IS_ERR(ft->g[ft->num_groups]))
1266		goto err_destory_groups;
1267	ft->num_groups++;
1268
1269	return (0);
1270
1271err_destory_groups:
1272	err = PTR_ERR(ft->g[ft->num_groups]);
1273	ft->g[ft->num_groups] = NULL;
1274	mlx5e_destroy_groups(ft);
1275
1276	return (err);
1277}
1278
1279static int
1280mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft)
1281{
1282	u32 *in;
1283	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1284	int err;
1285
1286	in = mlx5_vzalloc(inlen);
1287	if (!in)
1288		return (-ENOMEM);
1289
1290	err = mlx5e_create_vlan_groups_sub(ft, in, inlen);
1291
1292	kvfree(in);
1293	return (err);
1294}
1295
1296static int
1297mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
1298{
1299	struct mlx5e_flow_table *ft = &priv->fts.vlan;
1300	int err;
1301
1302	ft->num_groups = 0;
1303	ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "vlan",
1304				       MLX5E_VLAN_TABLE_SIZE);
1305
1306	if (IS_ERR(ft->t)) {
1307		err = PTR_ERR(ft->t);
1308		ft->t = NULL;
1309		return (err);
1310	}
1311	ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1312	if (!ft->g) {
1313		err = -ENOMEM;
1314		goto err_destroy_vlan_flow_table;
1315	}
1316
1317	err = mlx5e_create_vlan_groups(ft);
1318	if (err)
1319		goto err_free_g;
1320
1321	return (0);
1322
1323err_free_g:
1324	kfree(ft->g);
1325
1326err_destroy_vlan_flow_table:
1327	mlx5_destroy_flow_table(ft->t);
1328	ft->t = NULL;
1329
1330	return (err);
1331}
1332
1333static void
1334mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
1335{
1336	mlx5e_destroy_flow_table(&priv->fts.vlan);
1337}
1338
1339#define MLX5E_NUM_INNER_RSS_GROUPS	3
1340#define MLX5E_INNER_RSS_GROUP0_SIZE	BIT(3)
1341#define MLX5E_INNER_RSS_GROUP1_SIZE	BIT(1)
1342#define MLX5E_INNER_RSS_GROUP2_SIZE	BIT(0)
1343#define MLX5E_INNER_RSS_TABLE_SIZE	(MLX5E_INNER_RSS_GROUP0_SIZE +\
1344					 MLX5E_INNER_RSS_GROUP1_SIZE +\
1345					 MLX5E_INNER_RSS_GROUP2_SIZE +\
1346					 0)
1347
1348static int
1349mlx5e_create_inner_rss_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1350					   int inlen)
1351{
1352	u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1353	int err;
1354	int ix = 0;
1355
1356	memset(in, 0, inlen);
1357	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
1358	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
1359	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
1360	MLX5_SET_CFG(in, start_flow_index, ix);
1361	ix += MLX5E_INNER_RSS_GROUP0_SIZE;
1362	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1363	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1364	if (IS_ERR(ft->g[ft->num_groups]))
1365		goto err_destory_groups;
1366	ft->num_groups++;
1367
1368	memset(in, 0, inlen);
1369	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
1370	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
1371	MLX5_SET_CFG(in, start_flow_index, ix);
1372	ix += MLX5E_INNER_RSS_GROUP1_SIZE;
1373	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1374	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1375	if (IS_ERR(ft->g[ft->num_groups]))
1376		goto err_destory_groups;
1377	ft->num_groups++;
1378
1379	memset(in, 0, inlen);
1380	MLX5_SET_CFG(in, start_flow_index, ix);
1381	ix += MLX5E_INNER_RSS_GROUP2_SIZE;
1382	MLX5_SET_CFG(in, end_flow_index, ix - 1);
1383	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1384	if (IS_ERR(ft->g[ft->num_groups]))
1385		goto err_destory_groups;
1386	ft->num_groups++;
1387
1388	return (0);
1389
1390err_destory_groups:
1391	err = PTR_ERR(ft->g[ft->num_groups]);
1392	ft->g[ft->num_groups] = NULL;
1393	mlx5e_destroy_groups(ft);
1394
1395	return (err);
1396}
1397
1398static int
1399mlx5e_create_inner_rss_groups(struct mlx5e_flow_table *ft)
1400{
1401	u32 *in;
1402	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1403	int err;
1404
1405	in = mlx5_vzalloc(inlen);
1406	if (!in)
1407		return (-ENOMEM);
1408
1409	err = mlx5e_create_inner_rss_groups_sub(ft, in, inlen);
1410
1411	kvfree(in);
1412	return (err);
1413}
1414
1415static int
1416mlx5e_create_inner_rss_flow_table(struct mlx5e_priv *priv)
1417{
1418	struct mlx5e_flow_table *ft = &priv->fts.inner_rss;
1419	int err;
1420
1421	ft->num_groups = 0;
1422	ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "inner_rss",
1423				       MLX5E_INNER_RSS_TABLE_SIZE);
1424
1425	if (IS_ERR(ft->t)) {
1426		err = PTR_ERR(ft->t);
1427		ft->t = NULL;
1428		return (err);
1429	}
1430	ft->g = kcalloc(MLX5E_NUM_INNER_RSS_GROUPS, sizeof(*ft->g),
1431			GFP_KERNEL);
1432	if (!ft->g) {
1433		err = -ENOMEM;
1434		goto err_destroy_inner_rss_flow_table;
1435	}
1436
1437	err = mlx5e_create_inner_rss_groups(ft);
1438	if (err)
1439		goto err_free_g;
1440
1441	return (0);
1442
1443err_free_g:
1444	kfree(ft->g);
1445
1446err_destroy_inner_rss_flow_table:
1447	mlx5_destroy_flow_table(ft->t);
1448	ft->t = NULL;
1449
1450	return (err);
1451}
1452
1453static void mlx5e_destroy_inner_rss_flow_table(struct mlx5e_priv *priv)
1454{
1455	mlx5e_destroy_flow_table(&priv->fts.inner_rss);
1456}
1457
1458int
1459mlx5e_open_flow_table(struct mlx5e_priv *priv)
1460{
1461	int err;
1462
1463	priv->fts.ns = mlx5_get_flow_namespace(priv->mdev,
1464					       MLX5_FLOW_NAMESPACE_KERNEL);
1465
1466	err = mlx5e_create_vlan_flow_table(priv);
1467	if (err)
1468		return (err);
1469
1470	err = mlx5e_create_main_flow_table(priv);
1471	if (err)
1472		goto err_destroy_vlan_flow_table;
1473
1474	err = mlx5e_create_inner_rss_flow_table(priv);
1475	if (err)
1476		goto err_destroy_main_flow_table;
1477
1478	return (0);
1479
1480err_destroy_main_flow_table:
1481	mlx5e_destroy_main_flow_table(priv);
1482err_destroy_vlan_flow_table:
1483	mlx5e_destroy_vlan_flow_table(priv);
1484
1485	return (err);
1486}
1487
1488void
1489mlx5e_close_flow_table(struct mlx5e_priv *priv)
1490{
1491	mlx5e_destroy_inner_rss_flow_table(priv);
1492	mlx5e_destroy_main_flow_table(priv);
1493	mlx5e_destroy_vlan_flow_table(priv);
1494}
1495