mlx5_fs_tree.c revision 329200
1/*-
2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd.  All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD: stable/11/sys/dev/mlx5/mlx5_core/mlx5_fs_tree.c 329200 2018-02-13 14:37:21Z hselasky $
26 */
27
28#include <linux/module.h>
29#include <dev/mlx5/driver.h>
30#include "mlx5_core.h"
31#include "fs_core.h"
32#include <linux/string.h>
33#include <linux/compiler.h>
34
35#define INIT_TREE_NODE_ARRAY_SIZE(...)	(sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
36					 sizeof(struct init_tree_node))
37
38#define ADD_PRIO(name_val, flags_val, min_level_val, max_ft_val, caps_val, \
39		 ...) {.type = FS_TYPE_PRIO,\
40	.name = name_val,\
41	.min_ft_level = min_level_val,\
42	.flags = flags_val,\
43	.max_ft = max_ft_val,\
44	.caps = caps_val,\
45	.children = (struct init_tree_node[]) {__VA_ARGS__},\
46	.ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
47}
48
49#define ADD_FT_PRIO(name_val, flags_val, max_ft_val,  ...)\
50	ADD_PRIO(name_val, flags_val, 0, max_ft_val, {},\
51		 __VA_ARGS__)\
52
53#define ADD_NS(name_val, ...) {.type = FS_TYPE_NAMESPACE,\
54	.name = name_val,\
55	.children = (struct init_tree_node[]) {__VA_ARGS__},\
56	.ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
57}
58
59#define INIT_CAPS_ARRAY_SIZE(...) (sizeof((long[]){__VA_ARGS__}) /\
60				   sizeof(long))
61
62#define FS_CAP(cap) (__mlx5_bit_off(flow_table_nic_cap, cap))
63
64#define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \
65			       .caps = (long[]) {__VA_ARGS__}}
66
67#define BYPASS_MAX_FT 5
68#define BYPASS_PRIO_MAX_FT 1
69#define KERNEL_MAX_FT 3
70#define LEFTOVER_MAX_FT 1
71#define KENREL_MIN_LEVEL 3
72#define LEFTOVER_MIN_LEVEL KENREL_MIN_LEVEL + 1
73#define BYPASS_MIN_LEVEL MLX5_NUM_BYPASS_FTS + LEFTOVER_MIN_LEVEL
74struct node_caps {
75	size_t	arr_sz;
76	long	*caps;
77};
78
79struct init_tree_node {
80	enum fs_type	type;
81	const char	*name;
82	struct init_tree_node *children;
83	int ar_size;
84	struct node_caps caps;
85	u8  flags;
86	int min_ft_level;
87	int prio;
88	int max_ft;
89} root_fs = {
90	.type = FS_TYPE_NAMESPACE,
91	.name = "root",
92	.ar_size = 3,
93	.children = (struct init_tree_node[]) {
94		ADD_PRIO("by_pass_prio", 0, BYPASS_MIN_LEVEL, 0,
95			 FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en),
96					  FS_CAP(flow_table_properties_nic_receive.modify_root)),
97			 ADD_NS("by_pass_ns",
98				ADD_FT_PRIO("prio0", 0,
99					    BYPASS_PRIO_MAX_FT),
100				ADD_FT_PRIO("prio1", 0,
101					    BYPASS_PRIO_MAX_FT),
102				ADD_FT_PRIO("prio2", 0,
103					    BYPASS_PRIO_MAX_FT),
104				ADD_FT_PRIO("prio3", 0,
105					    BYPASS_PRIO_MAX_FT),
106				ADD_FT_PRIO("prio4", 0,
107					    BYPASS_PRIO_MAX_FT),
108				ADD_FT_PRIO("prio5", 0,
109					    BYPASS_PRIO_MAX_FT),
110				ADD_FT_PRIO("prio6", 0,
111					    BYPASS_PRIO_MAX_FT),
112				ADD_FT_PRIO("prio7", 0,
113					    BYPASS_PRIO_MAX_FT),
114				ADD_FT_PRIO("prio-mcast", 0,
115					    BYPASS_PRIO_MAX_FT))),
116		ADD_PRIO("kernel_prio", 0, KENREL_MIN_LEVEL, 0, {},
117			 ADD_NS("kernel_ns",
118				ADD_FT_PRIO("prio_kernel-0", 0,
119					    KERNEL_MAX_FT))),
120		ADD_PRIO("leftovers_prio", MLX5_CORE_FS_PRIO_SHARED,
121			 LEFTOVER_MIN_LEVEL, 0,
122			 FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en),
123					  FS_CAP(flow_table_properties_nic_receive.modify_root)),
124			 ADD_NS("leftover_ns",
125				ADD_FT_PRIO("leftovers_prio-0",
126					MLX5_CORE_FS_PRIO_SHARED,
127					LEFTOVER_MAX_FT)))
128	}
129};
130
131/* Tree creation functions */
132
133static struct mlx5_flow_root_namespace *find_root(struct fs_base *node)
134{
135	struct fs_base *parent;
136
137	/* Make sure we only read it once while we go up the tree */
138	while ((parent = node->parent))
139		node = parent;
140
141	if (node->type != FS_TYPE_NAMESPACE) {
142		printf("mlx5_core: WARN: ""mlx5: flow steering node %s is not in tree or garbaged\n", node->name);
143		return NULL;
144	}
145
146	return container_of(container_of(node,
147					 struct mlx5_flow_namespace,
148					 base),
149			    struct mlx5_flow_root_namespace,
150			    ns);
151}
152
153static inline struct mlx5_core_dev *fs_get_dev(struct fs_base *node)
154{
155	struct mlx5_flow_root_namespace *root = find_root(node);
156
157	if (root)
158		return root->dev;
159	return NULL;
160}
161
162static void fs_init_node(struct fs_base *node,
163			 unsigned int refcount)
164{
165	kref_init(&node->refcount);
166	atomic_set(&node->users_refcount, refcount);
167	init_completion(&node->complete);
168	INIT_LIST_HEAD(&node->list);
169	mutex_init(&node->lock);
170}
171
172static void _fs_add_node(struct fs_base *node,
173			 const char *name,
174			 struct fs_base *parent)
175{
176	if (parent)
177		atomic_inc(&parent->users_refcount);
178	node->name = kstrdup_const(name, GFP_KERNEL);
179	node->parent = parent;
180}
181
182static void fs_add_node(struct fs_base *node,
183			struct fs_base *parent, const char *name,
184			unsigned int refcount)
185{
186	fs_init_node(node, refcount);
187	_fs_add_node(node, name, parent);
188}
189
190static void _fs_put(struct fs_base *node, void (*kref_cb)(struct kref *kref),
191		    bool parent_locked);
192
193static void fs_del_dst(struct mlx5_flow_rule *dst);
194static void _fs_del_ft(struct mlx5_flow_table *ft);
195static void fs_del_fg(struct mlx5_flow_group *fg);
196static void fs_del_fte(struct fs_fte *fte);
197
198static void cmd_remove_node(struct fs_base *base)
199{
200	switch (base->type) {
201	case FS_TYPE_FLOW_DEST:
202		fs_del_dst(container_of(base, struct mlx5_flow_rule, base));
203		break;
204	case FS_TYPE_FLOW_TABLE:
205		_fs_del_ft(container_of(base, struct mlx5_flow_table, base));
206		break;
207	case FS_TYPE_FLOW_GROUP:
208		fs_del_fg(container_of(base, struct mlx5_flow_group, base));
209		break;
210	case FS_TYPE_FLOW_ENTRY:
211		fs_del_fte(container_of(base, struct fs_fte, base));
212		break;
213	default:
214		break;
215	}
216}
217
218static void __fs_remove_node(struct kref *kref)
219{
220	struct fs_base *node = container_of(kref, struct fs_base, refcount);
221
222	if (node->parent)
223		mutex_lock(&node->parent->lock);
224	mutex_lock(&node->lock);
225	cmd_remove_node(node);
226	mutex_unlock(&node->lock);
227	complete(&node->complete);
228	if (node->parent) {
229		mutex_unlock(&node->parent->lock);
230		_fs_put(node->parent, _fs_remove_node, false);
231	}
232}
233
234void _fs_remove_node(struct kref *kref)
235{
236	struct fs_base *node = container_of(kref, struct fs_base, refcount);
237
238	__fs_remove_node(kref);
239	kfree_const(node->name);
240	kfree(node);
241}
242
243static void fs_get(struct fs_base *node)
244{
245	atomic_inc(&node->users_refcount);
246}
247
248static void _fs_put(struct fs_base *node, void (*kref_cb)(struct kref *kref),
249		    bool parent_locked)
250{
251	struct fs_base *parent_node = node->parent;
252
253	if (parent_node && !parent_locked)
254		mutex_lock(&parent_node->lock);
255	if (atomic_dec_and_test(&node->users_refcount)) {
256		if (parent_node) {
257			/*remove from parent's list*/
258			list_del_init(&node->list);
259			mutex_unlock(&parent_node->lock);
260		}
261		kref_put(&node->refcount, kref_cb);
262		if (parent_node && parent_locked)
263			mutex_lock(&parent_node->lock);
264	} else if (parent_node && !parent_locked) {
265		mutex_unlock(&parent_node->lock);
266	}
267}
268
269static void fs_put(struct fs_base *node)
270{
271	_fs_put(node, __fs_remove_node, false);
272}
273
274static void fs_put_parent_locked(struct fs_base *node)
275{
276	_fs_put(node, __fs_remove_node, true);
277}
278
279static void fs_remove_node(struct fs_base *node)
280{
281	fs_put(node);
282	wait_for_completion(&node->complete);
283	kfree_const(node->name);
284	kfree(node);
285}
286
287static void fs_remove_node_parent_locked(struct fs_base *node)
288{
289	fs_put_parent_locked(node);
290	wait_for_completion(&node->complete);
291	kfree_const(node->name);
292	kfree(node);
293}
294
295static struct fs_fte *fs_alloc_fte(u8 action,
296				   u32 flow_tag,
297				   u32 *match_value,
298				   unsigned int index)
299{
300	struct fs_fte *fte;
301
302
303	fte = kzalloc(sizeof(*fte), GFP_KERNEL);
304	if (!fte)
305		return ERR_PTR(-ENOMEM);
306
307	memcpy(fte->val, match_value, sizeof(fte->val));
308	fte->base.type =  FS_TYPE_FLOW_ENTRY;
309	fte->dests_size = 0;
310	fte->flow_tag = flow_tag;
311	fte->index = index;
312	INIT_LIST_HEAD(&fte->dests);
313	fte->action = action;
314
315	return fte;
316}
317
318static struct fs_fte *alloc_star_ft_entry(struct mlx5_flow_table *ft,
319					  struct mlx5_flow_group *fg,
320					  u32 *match_value,
321					  unsigned int index)
322{
323	int err;
324	struct fs_fte *fte;
325	struct mlx5_flow_rule *dst;
326
327	if (fg->num_ftes == fg->max_ftes)
328		return ERR_PTR(-ENOSPC);
329
330	fte = fs_alloc_fte(MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
331			   MLX5_FS_DEFAULT_FLOW_TAG, match_value, index);
332	if (IS_ERR(fte))
333		return fte;
334
335	/*create dst*/
336	dst = kzalloc(sizeof(*dst), GFP_KERNEL);
337	if (!dst) {
338		err = -ENOMEM;
339		goto free_fte;
340	}
341
342	fte->base.parent = &fg->base;
343	fte->dests_size = 1;
344	dst->dest_attr.type = MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE;
345	dst->base.parent = &fte->base;
346	list_add(&dst->base.list, &fte->dests);
347	/* assumed that the callee creates the star rules sorted by index */
348	list_add_tail(&fte->base.list, &fg->ftes);
349	fg->num_ftes++;
350
351	return fte;
352
353free_fte:
354	kfree(fte);
355	return ERR_PTR(err);
356}
357
358/* assume that fte can't be changed */
359static void free_star_fte_entry(struct fs_fte *fte)
360{
361	struct mlx5_flow_group	*fg;
362	struct mlx5_flow_rule	*dst, *temp;
363
364	fs_get_parent(fg, fte);
365
366	list_for_each_entry_safe(dst, temp, &fte->dests, base.list) {
367		fte->dests_size--;
368		list_del(&dst->base.list);
369		kfree(dst);
370	}
371
372	list_del(&fte->base.list);
373	fg->num_ftes--;
374	kfree(fte);
375}
376
377static struct mlx5_flow_group *fs_alloc_fg(u32 *create_fg_in)
378{
379	struct mlx5_flow_group *fg;
380	void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
381					    create_fg_in, match_criteria);
382	u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
383					    create_fg_in,
384					    match_criteria_enable);
385	fg = kzalloc(sizeof(*fg), GFP_KERNEL);
386	if (!fg)
387		return ERR_PTR(-ENOMEM);
388
389	INIT_LIST_HEAD(&fg->ftes);
390	fg->mask.match_criteria_enable = match_criteria_enable;
391	memcpy(&fg->mask.match_criteria, match_criteria,
392	       sizeof(fg->mask.match_criteria));
393	fg->base.type =  FS_TYPE_FLOW_GROUP;
394	fg->start_index = MLX5_GET(create_flow_group_in, create_fg_in,
395				   start_flow_index);
396	fg->max_ftes = MLX5_GET(create_flow_group_in, create_fg_in,
397				end_flow_index) - fg->start_index + 1;
398	return fg;
399}
400
401static struct mlx5_flow_table *find_next_ft(struct fs_prio *prio);
402static struct mlx5_flow_table *find_prev_ft(struct mlx5_flow_table *curr,
403					    struct fs_prio *prio);
404
405/* assumed src_ft and dst_ft can't be freed */
406static int fs_set_star_rule(struct mlx5_core_dev *dev,
407			    struct mlx5_flow_table *src_ft,
408			    struct mlx5_flow_table *dst_ft)
409{
410	struct mlx5_flow_rule *src_dst;
411	struct fs_fte *src_fte;
412	int err = 0;
413	u32 *match_value;
414	int match_len = MLX5_ST_SZ_BYTES(fte_match_param);
415
416	src_dst = list_first_entry(&src_ft->star_rule.fte->dests,
417				   struct mlx5_flow_rule, base.list);
418	match_value = mlx5_vzalloc(match_len);
419	if (!match_value) {
420		mlx5_core_warn(dev, "failed to allocate inbox\n");
421		return -ENOMEM;
422	}
423	/*Create match context*/
424
425	fs_get_parent(src_fte, src_dst);
426
427	src_dst->dest_attr.ft = dst_ft;
428	if (dst_ft) {
429		err = mlx5_cmd_fs_set_fte(dev,
430					  src_ft->vport,
431					  &src_fte->status,
432					  match_value, src_ft->type,
433					  src_ft->id, src_fte->index,
434					  src_ft->star_rule.fg->id,
435					  src_fte->flow_tag,
436					  src_fte->action,
437					  src_fte->dests_size,
438					  &src_fte->dests);
439		if (err)
440			goto free;
441
442		fs_get(&dst_ft->base);
443	} else {
444		mlx5_cmd_fs_delete_fte(dev,
445				       src_ft->vport,
446				       &src_fte->status,
447				       src_ft->type, src_ft->id,
448				       src_fte->index);
449	}
450
451free:
452	kvfree(match_value);
453	return err;
454}
455
456static int connect_prev_fts(struct fs_prio *locked_prio,
457			    struct fs_prio *prev_prio,
458			    struct mlx5_flow_table *next_ft)
459{
460	struct mlx5_flow_table *iter;
461	int err = 0;
462	struct mlx5_core_dev *dev = fs_get_dev(&prev_prio->base);
463
464	if (!dev)
465		return -ENODEV;
466
467	mutex_lock(&prev_prio->base.lock);
468	fs_for_each_ft(iter, prev_prio) {
469		struct mlx5_flow_rule *src_dst =
470			list_first_entry(&iter->star_rule.fte->dests,
471					 struct mlx5_flow_rule, base.list);
472		struct mlx5_flow_table *prev_ft = src_dst->dest_attr.ft;
473
474		if (prev_ft == next_ft)
475			continue;
476
477		err = fs_set_star_rule(dev, iter, next_ft);
478		if (err) {
479			mlx5_core_warn(dev,
480				       "mlx5: flow steering can't connect prev and next\n");
481			goto unlock;
482		} else {
483			/* Assume ft's prio is locked */
484			if (prev_ft) {
485				struct fs_prio *prio;
486
487				fs_get_parent(prio, prev_ft);
488				if (prio == locked_prio)
489					fs_put_parent_locked(&prev_ft->base);
490				else
491					fs_put(&prev_ft->base);
492			}
493		}
494	}
495
496unlock:
497	mutex_unlock(&prev_prio->base.lock);
498	return 0;
499}
500
501static int create_star_rule(struct mlx5_flow_table *ft, struct fs_prio *prio)
502{
503	struct mlx5_flow_group *fg;
504	int err;
505	u32 *fg_in;
506	u32 *match_value;
507	struct mlx5_flow_table *next_ft;
508	struct mlx5_flow_table *prev_ft;
509	struct mlx5_flow_root_namespace *root = find_root(&prio->base);
510	int fg_inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
511	int match_len = MLX5_ST_SZ_BYTES(fte_match_param);
512
513	fg_in = mlx5_vzalloc(fg_inlen);
514	if (!fg_in) {
515		mlx5_core_warn(root->dev, "failed to allocate inbox\n");
516		return -ENOMEM;
517	}
518
519	match_value = mlx5_vzalloc(match_len);
520	if (!match_value) {
521		mlx5_core_warn(root->dev, "failed to allocate inbox\n");
522		kvfree(fg_in);
523		return -ENOMEM;
524	}
525
526	MLX5_SET(create_flow_group_in, fg_in, start_flow_index, ft->max_fte);
527	MLX5_SET(create_flow_group_in, fg_in, end_flow_index, ft->max_fte);
528	fg = fs_alloc_fg(fg_in);
529	if (IS_ERR(fg)) {
530		err = PTR_ERR(fg);
531		goto out;
532	}
533	ft->star_rule.fg = fg;
534	err =  mlx5_cmd_fs_create_fg(fs_get_dev(&prio->base),
535				     fg_in, ft->vport, ft->type,
536				     ft->id,
537				     &fg->id);
538	if (err)
539		goto free_fg;
540
541	ft->star_rule.fte = alloc_star_ft_entry(ft, fg,
542						      match_value,
543						      ft->max_fte);
544	if (IS_ERR(ft->star_rule.fte))
545		goto free_star_rule;
546
547	mutex_lock(&root->fs_chain_lock);
548	next_ft = find_next_ft(prio);
549	err = fs_set_star_rule(root->dev, ft, next_ft);
550	if (err) {
551		mutex_unlock(&root->fs_chain_lock);
552		goto free_star_rule;
553	}
554	if (next_ft) {
555		struct fs_prio *parent;
556
557		fs_get_parent(parent, next_ft);
558		fs_put(&next_ft->base);
559	}
560	prev_ft = find_prev_ft(ft, prio);
561	if (prev_ft) {
562		struct fs_prio *prev_parent;
563
564		fs_get_parent(prev_parent, prev_ft);
565
566		err = connect_prev_fts(NULL, prev_parent, ft);
567		if (err) {
568			mutex_unlock(&root->fs_chain_lock);
569			goto destroy_chained_star_rule;
570		}
571		fs_put(&prev_ft->base);
572	}
573	mutex_unlock(&root->fs_chain_lock);
574	kvfree(fg_in);
575	kvfree(match_value);
576
577	return 0;
578
579destroy_chained_star_rule:
580	fs_set_star_rule(fs_get_dev(&prio->base), ft, NULL);
581	if (next_ft)
582		fs_put(&next_ft->base);
583free_star_rule:
584	free_star_fte_entry(ft->star_rule.fte);
585	mlx5_cmd_fs_destroy_fg(fs_get_dev(&ft->base), ft->vport,
586			       ft->type, ft->id,
587			       fg->id);
588free_fg:
589	kfree(fg);
590out:
591	kvfree(fg_in);
592	kvfree(match_value);
593	return err;
594}
595
596static void destroy_star_rule(struct mlx5_flow_table *ft, struct fs_prio *prio)
597{
598	int err;
599	struct mlx5_flow_root_namespace *root;
600	struct mlx5_core_dev *dev = fs_get_dev(&prio->base);
601	struct mlx5_flow_table *prev_ft, *next_ft;
602	struct fs_prio *prev_prio;
603
604	WARN_ON(!dev);
605
606	root = find_root(&prio->base);
607	if (!root)
608		printf("mlx5_core: ERR: ""mlx5: flow steering failed to find root of priority %s", prio->base.name);
609
610	/* In order to ensure atomic deletion, first update
611	 * prev ft to point on the next ft.
612	 */
613	mutex_lock(&root->fs_chain_lock);
614	prev_ft = find_prev_ft(ft, prio);
615	next_ft = find_next_ft(prio);
616	if (prev_ft) {
617		fs_get_parent(prev_prio, prev_ft);
618		/*Prev is connected to ft, only if ft is the first(last) in the prio*/
619		err = connect_prev_fts(prio, prev_prio, next_ft);
620		if (err)
621			mlx5_core_warn(root->dev,
622				       "flow steering can't connect prev and next of flow table\n");
623		fs_put(&prev_ft->base);
624	}
625
626	err = fs_set_star_rule(root->dev, ft, NULL);
627	/*One put is for fs_get in find next ft*/
628	if (next_ft) {
629		fs_put(&next_ft->base);
630		if (!err)
631			fs_put(&next_ft->base);
632	}
633
634	mutex_unlock(&root->fs_chain_lock);
635	err = mlx5_cmd_fs_destroy_fg(dev, ft->vport, ft->type, ft->id,
636				     ft->star_rule.fg->id);
637	if (err)
638		mlx5_core_warn(dev,
639			       "flow steering can't destroy star entry group(index:%d) of ft:%s\n", ft->star_rule.fg->start_index,
640			       ft->base.name);
641	free_star_fte_entry(ft->star_rule.fte);
642
643	kfree(ft->star_rule.fg);
644	ft->star_rule.fg = NULL;
645}
646
647static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns,
648				 unsigned int prio)
649{
650	struct fs_prio *iter_prio;
651
652	fs_for_each_prio(iter_prio, ns) {
653		if (iter_prio->prio == prio)
654			return iter_prio;
655	}
656
657	return NULL;
658}
659
660static unsigned int _alloc_new_level(struct fs_prio *prio,
661				     struct mlx5_flow_namespace *match);
662
663static unsigned int __alloc_new_level(struct mlx5_flow_namespace *ns,
664				      struct fs_prio *prio)
665{
666	unsigned int level = 0;
667	struct fs_prio *p;
668
669	if (!ns)
670		return 0;
671
672	mutex_lock(&ns->base.lock);
673	fs_for_each_prio(p, ns) {
674		if (p != prio)
675			level += p->max_ft;
676		else
677			break;
678	}
679	mutex_unlock(&ns->base.lock);
680
681	fs_get_parent(prio, ns);
682	if (prio)
683		WARN_ON(prio->base.type != FS_TYPE_PRIO);
684
685	return level + _alloc_new_level(prio, ns);
686}
687
688/* Called under lock of priority, hence locking all upper objects */
689static unsigned int _alloc_new_level(struct fs_prio *prio,
690				     struct mlx5_flow_namespace *match)
691{
692	struct mlx5_flow_namespace *ns;
693	struct fs_base *it;
694	unsigned int level = 0;
695
696	if (!prio)
697		return 0;
698
699	mutex_lock(&prio->base.lock);
700	fs_for_each_ns_or_ft_reverse(it, prio) {
701		if (it->type == FS_TYPE_NAMESPACE) {
702			struct fs_prio *p;
703
704			fs_get_obj(ns, it);
705
706			if (match != ns) {
707				mutex_lock(&ns->base.lock);
708				fs_for_each_prio(p, ns)
709					level += p->max_ft;
710				mutex_unlock(&ns->base.lock);
711			} else {
712				break;
713			}
714		} else {
715			struct mlx5_flow_table *ft;
716
717			fs_get_obj(ft, it);
718			mutex_unlock(&prio->base.lock);
719			return level + ft->level + 1;
720		}
721	}
722
723	fs_get_parent(ns, prio);
724	mutex_unlock(&prio->base.lock);
725	return __alloc_new_level(ns, prio) + level;
726}
727
728static unsigned int alloc_new_level(struct fs_prio *prio)
729{
730	return _alloc_new_level(prio, NULL);
731}
732
733static int update_root_ft_create(struct mlx5_flow_root_namespace *root,
734				    struct mlx5_flow_table *ft)
735{
736	int err = 0;
737	int min_level = INT_MAX;
738
739	if (root->root_ft)
740		min_level = root->root_ft->level;
741
742	if (ft->level < min_level)
743		err = mlx5_cmd_update_root_ft(root->dev, ft->type,
744					      ft->id);
745	else
746		return err;
747
748	if (err)
749		mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n",
750			       ft->id);
751	else
752		root->root_ft = ft;
753
754	return err;
755}
756
757static struct mlx5_flow_table *_create_ft_common(struct mlx5_flow_namespace *ns,
758						 u16 vport,
759						 struct fs_prio *fs_prio,
760						 int max_fte,
761						 const char *name)
762{
763	struct mlx5_flow_table *ft;
764	int err;
765	int log_table_sz;
766	int ft_size;
767	char gen_name[20];
768	struct mlx5_flow_root_namespace *root =
769		find_root(&ns->base);
770
771	if (!root) {
772		printf("mlx5_core: ERR: ""mlx5: flow steering failed to find root of namespace %s", ns->base.name);
773		return ERR_PTR(-ENODEV);
774	}
775
776	if (fs_prio->num_ft == fs_prio->max_ft)
777		return ERR_PTR(-ENOSPC);
778
779	ft  = kzalloc(sizeof(*ft), GFP_KERNEL);
780	if (!ft)
781		return ERR_PTR(-ENOMEM);
782
783	fs_init_node(&ft->base, 1);
784	INIT_LIST_HEAD(&ft->fgs);
785
786	/* Temporarily WA until we expose the level set in the API */
787	if (root->table_type == FS_FT_ESW_EGRESS_ACL ||
788		root->table_type == FS_FT_ESW_INGRESS_ACL)
789		ft->level = 0;
790	else
791		ft->level = alloc_new_level(fs_prio);
792
793	ft->base.type = FS_TYPE_FLOW_TABLE;
794	ft->vport = vport;
795	ft->type = root->table_type;
796	/*Two entries are reserved for star rules*/
797	ft_size = roundup_pow_of_two(max_fte + 2);
798	/*User isn't aware to those rules*/
799	ft->max_fte = ft_size - 2;
800	log_table_sz = ilog2(ft_size);
801	err = mlx5_cmd_fs_create_ft(root->dev, ft->vport, ft->type,
802				    ft->level, log_table_sz, &ft->id);
803	if (err)
804		goto free_ft;
805
806	err = create_star_rule(ft, fs_prio);
807	if (err)
808		goto del_ft;
809
810	if ((root->table_type == FS_FT_NIC_RX) && MLX5_CAP_FLOWTABLE(root->dev,
811			       flow_table_properties_nic_receive.modify_root)) {
812		err = update_root_ft_create(root, ft);
813		if (err)
814			goto destroy_star_rule;
815	}
816
817	if (!name || !strlen(name)) {
818		snprintf(gen_name, 20, "flow_table_%u", ft->id);
819		_fs_add_node(&ft->base, gen_name, &fs_prio->base);
820	} else {
821		_fs_add_node(&ft->base, name, &fs_prio->base);
822	}
823	list_add_tail(&ft->base.list, &fs_prio->objs);
824	fs_prio->num_ft++;
825
826	return ft;
827
828destroy_star_rule:
829	destroy_star_rule(ft, fs_prio);
830del_ft:
831	mlx5_cmd_fs_destroy_ft(root->dev, ft->vport, ft->type, ft->id);
832free_ft:
833	kfree(ft);
834	return ERR_PTR(err);
835}
836
837static struct mlx5_flow_table *create_ft_common(struct mlx5_flow_namespace *ns,
838						u16 vport,
839						unsigned int prio,
840						int max_fte,
841						const char *name)
842{
843	struct fs_prio *fs_prio = NULL;
844	fs_prio = find_prio(ns, prio);
845	if (!fs_prio)
846		return ERR_PTR(-EINVAL);
847
848	return _create_ft_common(ns, vport, fs_prio, max_fte, name);
849}
850
851
852static struct mlx5_flow_table *find_first_ft_in_ns(struct mlx5_flow_namespace *ns,
853						   struct list_head *start);
854
855static struct mlx5_flow_table *find_first_ft_in_prio(struct fs_prio *prio,
856						     struct list_head *start);
857
858static struct mlx5_flow_table *mlx5_create_autogrouped_shared_flow_table(struct fs_prio *fs_prio)
859{
860	struct mlx5_flow_table *ft;
861
862	ft = find_first_ft_in_prio(fs_prio, &fs_prio->objs);
863	if (ft) {
864		ft->shared_refcount++;
865		return ft;
866	}
867
868	return NULL;
869}
870
871struct mlx5_flow_table *mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
872							   int prio,
873							   const char *name,
874							   int num_flow_table_entries,
875							   int max_num_groups)
876{
877	struct mlx5_flow_table *ft = NULL;
878	struct fs_prio *fs_prio;
879	bool is_shared_prio;
880
881	fs_prio = find_prio(ns, prio);
882	if (!fs_prio)
883		return ERR_PTR(-EINVAL);
884
885	is_shared_prio = fs_prio->flags & MLX5_CORE_FS_PRIO_SHARED;
886	if (is_shared_prio) {
887		mutex_lock(&fs_prio->shared_lock);
888		ft = mlx5_create_autogrouped_shared_flow_table(fs_prio);
889	}
890
891	if (ft)
892		goto return_ft;
893
894	ft = create_ft_common(ns, 0, prio, num_flow_table_entries,
895			      name);
896	if (IS_ERR(ft))
897		goto return_ft;
898
899	ft->autogroup.active = true;
900	ft->autogroup.max_types = max_num_groups;
901	if (is_shared_prio)
902		ft->shared_refcount = 1;
903
904return_ft:
905	if (is_shared_prio)
906		mutex_unlock(&fs_prio->shared_lock);
907	return ft;
908}
909EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table);
910
911struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
912						     u16 vport,
913						     int prio,
914						     const char *name,
915						     int num_flow_table_entries)
916{
917	return create_ft_common(ns, vport, prio, num_flow_table_entries, name);
918}
919EXPORT_SYMBOL(mlx5_create_vport_flow_table);
920
921struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
922					       int prio,
923					       const char *name,
924					       int num_flow_table_entries)
925{
926	return create_ft_common(ns, 0, prio, num_flow_table_entries, name);
927}
928EXPORT_SYMBOL(mlx5_create_flow_table);
929
930static void _fs_del_ft(struct mlx5_flow_table *ft)
931{
932	int err;
933	struct mlx5_core_dev *dev = fs_get_dev(&ft->base);
934	struct fs_prio *prio;
935
936	err = mlx5_cmd_fs_destroy_ft(dev, ft->vport, ft->type, ft->id);
937	if (err)
938		mlx5_core_warn(dev, "flow steering can't destroy ft %s\n",
939			       ft->base.name);
940
941	fs_get_parent(prio, ft);
942	prio->num_ft--;
943}
944
945static int update_root_ft_destroy(struct mlx5_flow_root_namespace *root,
946				    struct mlx5_flow_table *ft)
947{
948	int err = 0;
949	struct fs_prio *prio;
950	struct mlx5_flow_table *next_ft = NULL;
951	struct mlx5_flow_table *put_ft = NULL;
952
953	if (root->root_ft != ft)
954		return 0;
955
956	fs_get_parent(prio, ft);
957	/*Assuming objs containis only flow tables and
958	 * flow tables are sorted by level.
959	 */
960	if (!list_is_last(&ft->base.list, &prio->objs)) {
961		next_ft = list_next_entry(ft, base.list);
962	} else {
963		next_ft = find_next_ft(prio);
964		put_ft = next_ft;
965	}
966
967	if (next_ft) {
968		err = mlx5_cmd_update_root_ft(root->dev, next_ft->type,
969					      next_ft->id);
970		if (err)
971			mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n",
972				       ft->id);
973	}
974	if (!err)
975		root->root_ft = next_ft;
976
977	if (put_ft)
978		fs_put(&put_ft->base);
979
980	return err;
981}
982
983/*Objects in the same prio are destroyed in the reverse order they were createrd*/
984int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
985{
986	int err = 0;
987	struct fs_prio *prio;
988	struct mlx5_flow_root_namespace *root;
989	bool is_shared_prio;
990
991	fs_get_parent(prio, ft);
992	root = find_root(&prio->base);
993
994	if (!root) {
995		printf("mlx5_core: ERR: ""mlx5: flow steering failed to find root of priority %s", prio->base.name);
996		return -ENODEV;
997	}
998
999	is_shared_prio = prio->flags & MLX5_CORE_FS_PRIO_SHARED;
1000	if (is_shared_prio) {
1001		mutex_lock(&prio->shared_lock);
1002		if (ft->shared_refcount > 1) {
1003			--ft->shared_refcount;
1004			fs_put(&ft->base);
1005			mutex_unlock(&prio->shared_lock);
1006			return 0;
1007		}
1008	}
1009
1010	mutex_lock(&prio->base.lock);
1011	mutex_lock(&ft->base.lock);
1012
1013	err = update_root_ft_destroy(root, ft);
1014	if (err)
1015		goto unlock_ft;
1016
1017	/* delete two last entries */
1018	destroy_star_rule(ft, prio);
1019
1020	mutex_unlock(&ft->base.lock);
1021	fs_remove_node_parent_locked(&ft->base);
1022	mutex_unlock(&prio->base.lock);
1023	if (is_shared_prio)
1024		mutex_unlock(&prio->shared_lock);
1025
1026	return err;
1027
1028unlock_ft:
1029	mutex_unlock(&ft->base.lock);
1030	mutex_unlock(&prio->base.lock);
1031	if (is_shared_prio)
1032		mutex_unlock(&prio->shared_lock);
1033
1034	return err;
1035}
1036EXPORT_SYMBOL(mlx5_destroy_flow_table);
1037
1038static struct mlx5_flow_group *fs_create_fg(struct mlx5_core_dev *dev,
1039					    struct mlx5_flow_table *ft,
1040					    struct list_head *prev,
1041					    u32 *fg_in,
1042					    int refcount)
1043{
1044	struct mlx5_flow_group *fg;
1045	int err;
1046	unsigned int end_index;
1047	char name[20];
1048
1049	fg = fs_alloc_fg(fg_in);
1050	if (IS_ERR(fg))
1051		return fg;
1052
1053	end_index = fg->start_index + fg->max_ftes - 1;
1054	err =  mlx5_cmd_fs_create_fg(dev, fg_in,
1055				     ft->vport, ft->type, ft->id,
1056				     &fg->id);
1057	if (err)
1058		goto free_fg;
1059
1060	mutex_lock(&ft->base.lock);
1061	if (ft->autogroup.active)
1062		ft->autogroup.num_types++;
1063
1064	snprintf(name, sizeof(name), "group_%u", fg->id);
1065	/*Add node to tree*/
1066	fs_add_node(&fg->base, &ft->base, name, refcount);
1067	/*Add node to group list*/
1068	list_add(&fg->base.list, prev);
1069	mutex_unlock(&ft->base.lock);
1070
1071	return fg;
1072
1073free_fg:
1074	kfree(fg);
1075	return ERR_PTR(err);
1076}
1077
1078struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
1079					       u32 *in)
1080{
1081	struct mlx5_flow_group *fg;
1082	struct mlx5_core_dev *dev = fs_get_dev(&ft->base);
1083
1084	if (!dev)
1085		return ERR_PTR(-ENODEV);
1086
1087	if (ft->autogroup.active)
1088		return ERR_PTR(-EPERM);
1089
1090	fg = fs_create_fg(dev, ft, ft->fgs.prev, in, 1);
1091
1092	return fg;
1093}
1094EXPORT_SYMBOL(mlx5_create_flow_group);
1095
1096/*Group is destoyed when all the rules in the group were removed*/
1097static void fs_del_fg(struct mlx5_flow_group *fg)
1098{
1099	struct mlx5_flow_table *parent_ft;
1100	struct mlx5_core_dev *dev;
1101
1102	fs_get_parent(parent_ft, fg);
1103	dev = fs_get_dev(&parent_ft->base);
1104	WARN_ON(!dev);
1105
1106	if (parent_ft->autogroup.active)
1107		parent_ft->autogroup.num_types--;
1108
1109	if (mlx5_cmd_fs_destroy_fg(dev, parent_ft->vport,
1110				   parent_ft->type,
1111				   parent_ft->id, fg->id))
1112		mlx5_core_warn(dev, "flow steering can't destroy fg\n");
1113}
1114
1115void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
1116{
1117	fs_remove_node(&fg->base);
1118}
1119EXPORT_SYMBOL(mlx5_destroy_flow_group);
1120
1121static bool _fs_match_exact_val(void *mask, void *val1, void *val2, size_t size)
1122{
1123	unsigned int i;
1124
1125	/* TODO: optimize by comparing 64bits when possible */
1126	for (i = 0; i < size; i++, mask++, val1++, val2++)
1127		if ((*((u8 *)val1) & (*(u8 *)mask)) !=
1128		    ((*(u8 *)val2) & (*(u8 *)mask)))
1129			return false;
1130
1131	return true;
1132}
1133
1134bool fs_match_exact_val(struct mlx5_core_fs_mask *mask,
1135			       void *val1, void *val2)
1136{
1137	if (mask->match_criteria_enable &
1138	    1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS) {
1139		void *fte_match1 = MLX5_ADDR_OF(fte_match_param,
1140						val1, outer_headers);
1141		void *fte_match2 = MLX5_ADDR_OF(fte_match_param,
1142						val2, outer_headers);
1143		void *fte_mask = MLX5_ADDR_OF(fte_match_param,
1144					      mask->match_criteria, outer_headers);
1145
1146		if (!_fs_match_exact_val(fte_mask, fte_match1, fte_match2,
1147					 MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)))
1148			return false;
1149	}
1150
1151	if (mask->match_criteria_enable &
1152	    1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS) {
1153		void *fte_match1 = MLX5_ADDR_OF(fte_match_param,
1154						val1, misc_parameters);
1155		void *fte_match2 = MLX5_ADDR_OF(fte_match_param,
1156						val2, misc_parameters);
1157		void *fte_mask = MLX5_ADDR_OF(fte_match_param,
1158					  mask->match_criteria, misc_parameters);
1159
1160		if (!_fs_match_exact_val(fte_mask, fte_match1, fte_match2,
1161					 MLX5_ST_SZ_BYTES(fte_match_set_misc)))
1162			return false;
1163	}
1164	if (mask->match_criteria_enable &
1165	    1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS) {
1166		void *fte_match1 = MLX5_ADDR_OF(fte_match_param,
1167						val1, inner_headers);
1168		void *fte_match2 = MLX5_ADDR_OF(fte_match_param,
1169						val2, inner_headers);
1170		void *fte_mask = MLX5_ADDR_OF(fte_match_param,
1171					  mask->match_criteria, inner_headers);
1172
1173		if (!_fs_match_exact_val(fte_mask, fte_match1, fte_match2,
1174					 MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)))
1175			return false;
1176	}
1177	return true;
1178}
1179
1180bool fs_match_exact_mask(u8 match_criteria_enable1,
1181				u8 match_criteria_enable2,
1182				void *mask1, void *mask2)
1183{
1184	return match_criteria_enable1 == match_criteria_enable2 &&
1185		!memcmp(mask1, mask2, MLX5_ST_SZ_BYTES(fte_match_param));
1186}
1187
1188static struct mlx5_flow_table *find_first_ft_in_ns_reverse(struct mlx5_flow_namespace *ns,
1189							   struct list_head *start);
1190
1191static struct mlx5_flow_table *_find_first_ft_in_prio_reverse(struct fs_prio *prio,
1192							      struct list_head *start)
1193{
1194	struct fs_base *it = container_of(start, struct fs_base, list);
1195
1196	if (!prio)
1197		return NULL;
1198
1199	fs_for_each_ns_or_ft_continue_reverse(it, prio) {
1200		struct mlx5_flow_namespace	*ns;
1201		struct mlx5_flow_table		*ft;
1202
1203		if (it->type == FS_TYPE_FLOW_TABLE) {
1204			fs_get_obj(ft, it);
1205			fs_get(&ft->base);
1206			return ft;
1207		}
1208
1209		fs_get_obj(ns, it);
1210		WARN_ON(ns->base.type != FS_TYPE_NAMESPACE);
1211
1212		ft = find_first_ft_in_ns_reverse(ns, &ns->prios);
1213		if (ft)
1214			return ft;
1215	}
1216
1217	return NULL;
1218}
1219
1220static struct mlx5_flow_table *find_first_ft_in_prio_reverse(struct fs_prio *prio,
1221							     struct list_head *start)
1222{
1223	struct mlx5_flow_table *ft;
1224
1225	if (!prio)
1226		return NULL;
1227
1228	mutex_lock(&prio->base.lock);
1229	ft = _find_first_ft_in_prio_reverse(prio, start);
1230	mutex_unlock(&prio->base.lock);
1231
1232	return ft;
1233}
1234
1235static struct mlx5_flow_table *find_first_ft_in_ns_reverse(struct mlx5_flow_namespace *ns,
1236							   struct list_head *start)
1237{
1238	struct fs_prio *prio;
1239
1240	if (!ns)
1241		return NULL;
1242
1243	fs_get_obj(prio, container_of(start, struct fs_base, list));
1244	mutex_lock(&ns->base.lock);
1245	fs_for_each_prio_continue_reverse(prio, ns) {
1246		struct mlx5_flow_table *ft;
1247
1248		ft = find_first_ft_in_prio_reverse(prio, &prio->objs);
1249		if (ft) {
1250			mutex_unlock(&ns->base.lock);
1251			return ft;
1252		}
1253	}
1254	mutex_unlock(&ns->base.lock);
1255
1256	return NULL;
1257}
1258
1259/* Returned a held ft, assumed curr is protected, assumed curr's parent is
1260 * locked
1261 */
1262static struct mlx5_flow_table *find_prev_ft(struct mlx5_flow_table *curr,
1263					    struct fs_prio *prio)
1264{
1265	struct mlx5_flow_table *ft = NULL;
1266	struct fs_base *curr_base;
1267
1268	if (!curr)
1269		return NULL;
1270
1271	/* prio has either namespace or flow-tables, but not both */
1272	if (!list_empty(&prio->objs) &&
1273	    list_first_entry(&prio->objs, struct mlx5_flow_table, base.list) !=
1274	    curr)
1275		return NULL;
1276
1277	while (!ft && prio) {
1278		struct mlx5_flow_namespace *ns;
1279
1280		fs_get_parent(ns, prio);
1281		ft = find_first_ft_in_ns_reverse(ns, &prio->base.list);
1282		curr_base = &ns->base;
1283		fs_get_parent(prio, ns);
1284
1285		if (prio && !ft)
1286			ft = find_first_ft_in_prio_reverse(prio,
1287							   &curr_base->list);
1288	}
1289	return ft;
1290}
1291
1292static struct mlx5_flow_table *_find_first_ft_in_prio(struct fs_prio *prio,
1293						      struct list_head *start)
1294{
1295	struct fs_base	*it = container_of(start, struct fs_base, list);
1296
1297	if (!prio)
1298		return NULL;
1299
1300	fs_for_each_ns_or_ft_continue(it, prio) {
1301		struct mlx5_flow_namespace	*ns;
1302		struct mlx5_flow_table		*ft;
1303
1304		if (it->type == FS_TYPE_FLOW_TABLE) {
1305			fs_get_obj(ft, it);
1306			fs_get(&ft->base);
1307			return ft;
1308		}
1309
1310		fs_get_obj(ns, it);
1311		WARN_ON(ns->base.type != FS_TYPE_NAMESPACE);
1312
1313		ft = find_first_ft_in_ns(ns, &ns->prios);
1314		if (ft)
1315			return ft;
1316	}
1317
1318	return NULL;
1319}
1320
1321static struct mlx5_flow_table *find_first_ft_in_prio(struct fs_prio *prio,
1322						     struct list_head *start)
1323{
1324	struct mlx5_flow_table *ft;
1325
1326	if (!prio)
1327		return NULL;
1328
1329	mutex_lock(&prio->base.lock);
1330	ft = _find_first_ft_in_prio(prio, start);
1331	mutex_unlock(&prio->base.lock);
1332
1333	return ft;
1334}
1335
1336static struct mlx5_flow_table *find_first_ft_in_ns(struct mlx5_flow_namespace *ns,
1337						   struct list_head *start)
1338{
1339	struct fs_prio *prio;
1340
1341	if (!ns)
1342		return NULL;
1343
1344	fs_get_obj(prio, container_of(start, struct fs_base, list));
1345	mutex_lock(&ns->base.lock);
1346	fs_for_each_prio_continue(prio, ns) {
1347		struct mlx5_flow_table *ft;
1348
1349		ft = find_first_ft_in_prio(prio, &prio->objs);
1350		if (ft) {
1351			mutex_unlock(&ns->base.lock);
1352			return ft;
1353		}
1354	}
1355	mutex_unlock(&ns->base.lock);
1356
1357	return NULL;
1358}
1359
1360/* returned a held ft, assumed curr is protected, assumed curr's parent is
1361 * locked
1362 */
1363static struct mlx5_flow_table *find_next_ft(struct fs_prio *prio)
1364{
1365	struct mlx5_flow_table *ft = NULL;
1366	struct fs_base *curr_base;
1367
1368	while (!ft && prio) {
1369		struct mlx5_flow_namespace *ns;
1370
1371		fs_get_parent(ns, prio);
1372		ft = find_first_ft_in_ns(ns, &prio->base.list);
1373		curr_base = &ns->base;
1374		fs_get_parent(prio, ns);
1375
1376		if (!ft && prio)
1377			ft = _find_first_ft_in_prio(prio, &curr_base->list);
1378	}
1379	return ft;
1380}
1381
1382
1383/* called under ft mutex lock */
1384static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft,
1385						u8 match_criteria_enable,
1386						u32 *match_criteria)
1387{
1388	unsigned int group_size;
1389	unsigned int candidate_index = 0;
1390	unsigned int candidate_group_num = 0;
1391	struct mlx5_flow_group *g;
1392	struct mlx5_flow_group *ret;
1393	struct list_head *prev = &ft->fgs;
1394	struct mlx5_core_dev *dev;
1395	u32 *in;
1396	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1397	void *match_criteria_addr;
1398
1399	if (!ft->autogroup.active)
1400		return ERR_PTR(-ENOENT);
1401
1402	dev = fs_get_dev(&ft->base);
1403	if (!dev)
1404		return ERR_PTR(-ENODEV);
1405
1406	in = mlx5_vzalloc(inlen);
1407	if (!in) {
1408		mlx5_core_warn(dev, "failed to allocate inbox\n");
1409		return ERR_PTR(-ENOMEM);
1410	}
1411
1412
1413	if (ft->autogroup.num_types < ft->autogroup.max_types)
1414		group_size = ft->max_fte / (ft->autogroup.max_types + 1);
1415	else
1416		group_size = 1;
1417
1418	if (group_size == 0) {
1419		mlx5_core_warn(dev,
1420			       "flow steering can't create group size of 0\n");
1421		ret = ERR_PTR(-EINVAL);
1422		goto out;
1423	}
1424
1425	/* sorted by start_index */
1426	fs_for_each_fg(g, ft) {
1427		candidate_group_num++;
1428		if (candidate_index + group_size > g->start_index)
1429			candidate_index = g->start_index + g->max_ftes;
1430		else
1431			break;
1432		prev = &g->base.list;
1433	}
1434
1435	if (candidate_index + group_size > ft->max_fte) {
1436		ret = ERR_PTR(-ENOSPC);
1437		goto out;
1438	}
1439
1440	MLX5_SET(create_flow_group_in, in, match_criteria_enable,
1441		 match_criteria_enable);
1442	MLX5_SET(create_flow_group_in, in, start_flow_index, candidate_index);
1443	MLX5_SET(create_flow_group_in, in, end_flow_index,   candidate_index +
1444		 group_size - 1);
1445	match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in,
1446					   in, match_criteria);
1447	memcpy(match_criteria_addr, match_criteria,
1448	       MLX5_ST_SZ_BYTES(fte_match_param));
1449
1450	ret = fs_create_fg(dev, ft, prev, in, 0);
1451out:
1452	kvfree(in);
1453	return ret;
1454}
1455
1456static struct mlx5_flow_namespace *get_ns_with_notifiers(struct fs_base *node)
1457{
1458	struct mlx5_flow_namespace *ns = NULL;
1459
1460	while (node  && (node->type != FS_TYPE_NAMESPACE ||
1461			      list_empty(&container_of(node, struct
1462						       mlx5_flow_namespace,
1463						       base)->list_notifiers)))
1464		node = node->parent;
1465
1466	if (node)
1467		fs_get_obj(ns, node);
1468
1469	return ns;
1470}
1471
1472
1473/*Assumption- fte is locked*/
1474static void call_to_add_rule_notifiers(struct mlx5_flow_rule *dst,
1475				      struct fs_fte *fte)
1476{
1477	struct mlx5_flow_namespace *ns;
1478	struct mlx5_flow_handler *iter_handler;
1479	struct fs_client_priv_data *iter_client;
1480	void *data;
1481	bool is_new_rule = list_first_entry(&fte->dests,
1482					    struct mlx5_flow_rule,
1483					    base.list) == dst;
1484	int err;
1485
1486	ns = get_ns_with_notifiers(&fte->base);
1487	if (!ns)
1488		return;
1489
1490	down_read(&ns->notifiers_rw_sem);
1491	list_for_each_entry(iter_handler, &ns->list_notifiers,
1492			    list) {
1493		if (iter_handler->add_dst_cb) {
1494			data = NULL;
1495			mutex_lock(&dst->clients_lock);
1496			list_for_each_entry(
1497				iter_client, &dst->clients_data, list) {
1498				if (iter_client->fs_handler == iter_handler) {
1499					data = iter_client->client_dst_data;
1500					break;
1501				}
1502			}
1503			mutex_unlock(&dst->clients_lock);
1504			err  = iter_handler->add_dst_cb(dst,
1505							is_new_rule,
1506							NULL,
1507							iter_handler->client_context);
1508			if (err)
1509				break;
1510		}
1511	}
1512	up_read(&ns->notifiers_rw_sem);
1513}
1514
1515static void call_to_del_rule_notifiers(struct mlx5_flow_rule *dst,
1516				      struct fs_fte *fte)
1517{
1518	struct mlx5_flow_namespace *ns;
1519	struct mlx5_flow_handler *iter_handler;
1520	struct fs_client_priv_data *iter_client;
1521	void *data;
1522	bool ctx_changed = (fte->dests_size == 0);
1523
1524	ns = get_ns_with_notifiers(&fte->base);
1525	if (!ns)
1526		return;
1527	down_read(&ns->notifiers_rw_sem);
1528	list_for_each_entry(iter_handler, &ns->list_notifiers,
1529			    list) {
1530		data = NULL;
1531		mutex_lock(&dst->clients_lock);
1532		list_for_each_entry(iter_client, &dst->clients_data, list) {
1533			if (iter_client->fs_handler == iter_handler) {
1534				data = iter_client->client_dst_data;
1535				break;
1536			}
1537		}
1538		mutex_unlock(&dst->clients_lock);
1539		if (iter_handler->del_dst_cb) {
1540			iter_handler->del_dst_cb(dst, ctx_changed, data,
1541						 iter_handler->client_context);
1542		}
1543	}
1544	up_read(&ns->notifiers_rw_sem);
1545}
1546
1547/* fte should not be deleted while calling this function */
1548static struct mlx5_flow_rule *_fs_add_dst_fte(struct fs_fte *fte,
1549					      struct mlx5_flow_group *fg,
1550					      struct mlx5_flow_destination *dest)
1551{
1552	struct mlx5_flow_table *ft;
1553	struct mlx5_flow_rule *dst;
1554	int err;
1555
1556	dst = kzalloc(sizeof(*dst), GFP_KERNEL);
1557	if (!dst)
1558		return ERR_PTR(-ENOMEM);
1559
1560	memcpy(&dst->dest_attr, dest, sizeof(*dest));
1561	dst->base.type = FS_TYPE_FLOW_DEST;
1562	INIT_LIST_HEAD(&dst->clients_data);
1563	mutex_init(&dst->clients_lock);
1564	fs_get_parent(ft, fg);
1565	/*Add dest to dests list- added as first element after the head*/
1566	list_add_tail(&dst->base.list, &fte->dests);
1567	fte->dests_size++;
1568	err = mlx5_cmd_fs_set_fte(fs_get_dev(&ft->base),
1569				  ft->vport,
1570				  &fte->status,
1571				  fte->val, ft->type,
1572				  ft->id, fte->index, fg->id, fte->flow_tag,
1573				  fte->action, fte->dests_size, &fte->dests);
1574	if (err)
1575		goto free_dst;
1576
1577	list_del(&dst->base.list);
1578
1579	return dst;
1580
1581free_dst:
1582	list_del(&dst->base.list);
1583	kfree(dst);
1584	fte->dests_size--;
1585	return ERR_PTR(err);
1586}
1587
1588static char *get_dest_name(struct mlx5_flow_destination *dest)
1589{
1590	char *name = kzalloc(sizeof(char) * 20, GFP_KERNEL);
1591
1592	switch (dest->type) {
1593	case MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE:
1594		snprintf(name, 20, "dest_%s_%u", "flow_table",
1595			 dest->ft->id);
1596		return name;
1597	case MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT:
1598		snprintf(name, 20, "dest_%s_%u", "vport",
1599			 dest->vport_num);
1600		return name;
1601	case MLX5_FLOW_CONTEXT_DEST_TYPE_TIR:
1602		snprintf(name, 20, "dest_%s_%u", "tir", dest->tir_num);
1603		return name;
1604	}
1605
1606	return NULL;
1607}
1608
1609/* assumed fg is locked */
1610static unsigned int fs_get_free_fg_index(struct mlx5_flow_group *fg,
1611					 struct list_head **prev)
1612{
1613	struct fs_fte *fte;
1614	unsigned int start = fg->start_index;
1615
1616	if (prev)
1617		*prev = &fg->ftes;
1618
1619	/* assumed list is sorted by index */
1620	fs_for_each_fte(fte, fg) {
1621		if (fte->index != start)
1622			return start;
1623		start++;
1624		if (prev)
1625			*prev = &fte->base.list;
1626	}
1627
1628	return start;
1629}
1630
1631
1632static struct fs_fte *fs_create_fte(struct mlx5_flow_group *fg,
1633			     u32 *match_value,
1634			     u8 action,
1635			     u32 flow_tag,
1636			     struct list_head **prev)
1637{
1638	struct fs_fte *fte;
1639	int index = 0;
1640
1641	index = fs_get_free_fg_index(fg, prev);
1642	fte = fs_alloc_fte(action, flow_tag, match_value, index);
1643	if (IS_ERR(fte))
1644		return fte;
1645
1646	return fte;
1647}
1648
1649static void add_rule_to_tree(struct mlx5_flow_rule *rule,
1650			     struct fs_fte *fte)
1651{
1652	char *dest_name;
1653
1654	dest_name = get_dest_name(&rule->dest_attr);
1655	fs_add_node(&rule->base, &fte->base, dest_name, 1);
1656	/* re-add to list, since fs_add_node reset our list */
1657	list_add_tail(&rule->base.list, &fte->dests);
1658	kfree(dest_name);
1659	call_to_add_rule_notifiers(rule, fte);
1660}
1661
1662static void fs_del_dst(struct mlx5_flow_rule *dst)
1663{
1664	struct mlx5_flow_table *ft;
1665	struct mlx5_flow_group *fg;
1666	struct fs_fte *fte;
1667	u32	*match_value;
1668	struct mlx5_core_dev *dev = fs_get_dev(&dst->base);
1669	int match_len = MLX5_ST_SZ_BYTES(fte_match_param);
1670	int err;
1671
1672	WARN_ON(!dev);
1673
1674	match_value = mlx5_vzalloc(match_len);
1675	if (!match_value) {
1676		mlx5_core_warn(dev, "failed to allocate inbox\n");
1677		return;
1678	}
1679
1680	fs_get_parent(fte, dst);
1681	fs_get_parent(fg, fte);
1682	mutex_lock(&fg->base.lock);
1683	memcpy(match_value, fte->val, sizeof(fte->val));
1684	/* ft can't be changed as fg is locked */
1685	fs_get_parent(ft, fg);
1686	list_del(&dst->base.list);
1687	fte->dests_size--;
1688	if (fte->dests_size) {
1689		err = mlx5_cmd_fs_set_fte(dev, ft->vport,
1690					  &fte->status, match_value, ft->type,
1691					  ft->id, fte->index, fg->id,
1692					  fte->flow_tag, fte->action,
1693					  fte->dests_size, &fte->dests);
1694		if (err) {
1695			mlx5_core_warn(dev, "%s can't delete dst %s\n",
1696				       __func__, dst->base.name);
1697			goto err;
1698		}
1699	}
1700	call_to_del_rule_notifiers(dst, fte);
1701err:
1702	mutex_unlock(&fg->base.lock);
1703	kvfree(match_value);
1704}
1705
1706static void fs_del_fte(struct fs_fte *fte)
1707{
1708	struct mlx5_flow_table *ft;
1709	struct mlx5_flow_group *fg;
1710	int err;
1711	struct mlx5_core_dev *dev;
1712
1713	fs_get_parent(fg, fte);
1714	fs_get_parent(ft, fg);
1715
1716	dev = fs_get_dev(&ft->base);
1717	WARN_ON(!dev);
1718
1719	err = mlx5_cmd_fs_delete_fte(dev, ft->vport, &fte->status,
1720				     ft->type, ft->id, fte->index);
1721	if (err)
1722		mlx5_core_warn(dev, "flow steering can't delete fte %s\n",
1723			       fte->base.name);
1724
1725	fg->num_ftes--;
1726}
1727
1728/* assuming parent fg is locked */
1729/* Add dst algorithm */
1730static struct mlx5_flow_rule *fs_add_dst_fg(struct mlx5_flow_group *fg,
1731						   u32 *match_value,
1732						   u8 action,
1733						   u32 flow_tag,
1734						   struct mlx5_flow_destination *dest)
1735{
1736	struct fs_fte *fte;
1737	struct mlx5_flow_rule *dst;
1738	struct mlx5_flow_table *ft;
1739	struct list_head *prev;
1740	char fte_name[20];
1741
1742	mutex_lock(&fg->base.lock);
1743	fs_for_each_fte(fte, fg) {
1744		/* TODO: Check of size against PRM max size */
1745		mutex_lock(&fte->base.lock);
1746		if (fs_match_exact_val(&fg->mask, match_value, &fte->val) &&
1747		    action == fte->action && flow_tag == fte->flow_tag) {
1748			dst = _fs_add_dst_fte(fte, fg, dest);
1749			mutex_unlock(&fte->base.lock);
1750			if (IS_ERR(dst))
1751				goto unlock_fg;
1752			goto add_rule;
1753		}
1754		mutex_unlock(&fte->base.lock);
1755	}
1756
1757	fs_get_parent(ft, fg);
1758	if (fg->num_ftes == fg->max_ftes) {
1759		dst = ERR_PTR(-ENOSPC);
1760		goto unlock_fg;
1761	}
1762
1763	fte = fs_create_fte(fg, match_value, action, flow_tag, &prev);
1764	if (IS_ERR(fte)) {
1765		dst = (void *)fte;
1766		goto unlock_fg;
1767	}
1768	dst = _fs_add_dst_fte(fte, fg, dest);
1769	if (IS_ERR(dst)) {
1770		kfree(fte);
1771		goto unlock_fg;
1772	}
1773
1774	fg->num_ftes++;
1775
1776	snprintf(fte_name, sizeof(fte_name), "fte%u", fte->index);
1777	/* Add node to tree */
1778	fs_add_node(&fte->base, &fg->base, fte_name, 0);
1779	list_add(&fte->base.list, prev);
1780add_rule:
1781	add_rule_to_tree(dst, fte);
1782unlock_fg:
1783	mutex_unlock(&fg->base.lock);
1784	return dst;
1785}
1786
1787static struct mlx5_flow_rule *fs_add_dst_ft(struct mlx5_flow_table *ft,
1788					    u8 match_criteria_enable,
1789					    u32 *match_criteria,
1790					    u32 *match_value,
1791					    u8 action, u32 flow_tag,
1792					    struct mlx5_flow_destination *dest)
1793{
1794	/*? where dst_entry is allocated*/
1795	struct mlx5_flow_group *g;
1796	struct mlx5_flow_rule *dst;
1797
1798	fs_get(&ft->base);
1799	mutex_lock(&ft->base.lock);
1800	fs_for_each_fg(g, ft)
1801		if (fs_match_exact_mask(g->mask.match_criteria_enable,
1802					match_criteria_enable,
1803					g->mask.match_criteria,
1804					match_criteria)) {
1805			mutex_unlock(&ft->base.lock);
1806
1807			dst = fs_add_dst_fg(g, match_value,
1808					    action, flow_tag, dest);
1809			if (PTR_ERR(dst) && PTR_ERR(dst) != -ENOSPC)
1810				goto unlock;
1811		}
1812	mutex_unlock(&ft->base.lock);
1813
1814	g = create_autogroup(ft, match_criteria_enable, match_criteria);
1815	if (IS_ERR(g)) {
1816		dst = (void *)g;
1817		goto unlock;
1818	}
1819
1820	dst = fs_add_dst_fg(g, match_value,
1821			    action, flow_tag, dest);
1822	if (IS_ERR(dst)) {
1823		/* Remove assumes refcount > 0 and autogroup creates a group
1824		 * with a refcount = 0.
1825		 */
1826		fs_get(&g->base);
1827		fs_remove_node(&g->base);
1828		goto unlock;
1829	}
1830
1831unlock:
1832	fs_put(&ft->base);
1833	return dst;
1834}
1835
1836struct mlx5_flow_rule *
1837mlx5_add_flow_rule(struct mlx5_flow_table *ft,
1838		   u8 match_criteria_enable,
1839		   u32 *match_criteria,
1840		   u32 *match_value,
1841		   u32 action,
1842		   u32 flow_tag,
1843		   struct mlx5_flow_destination *dest)
1844{
1845	struct mlx5_flow_rule *dst;
1846	struct mlx5_flow_namespace *ns;
1847
1848	ns = get_ns_with_notifiers(&ft->base);
1849	if (ns)
1850		down_read(&ns->dests_rw_sem);
1851	dst =  fs_add_dst_ft(ft, match_criteria_enable, match_criteria,
1852			     match_value, action, flow_tag, dest);
1853	if (ns)
1854		up_read(&ns->dests_rw_sem);
1855
1856	return dst;
1857
1858
1859}
1860EXPORT_SYMBOL(mlx5_add_flow_rule);
1861
1862void mlx5_del_flow_rule(struct mlx5_flow_rule *dst)
1863{
1864	struct mlx5_flow_namespace *ns;
1865
1866	ns = get_ns_with_notifiers(&dst->base);
1867	if (ns)
1868		down_read(&ns->dests_rw_sem);
1869	fs_remove_node(&dst->base);
1870	if (ns)
1871		up_read(&ns->dests_rw_sem);
1872}
1873EXPORT_SYMBOL(mlx5_del_flow_rule);
1874
1875#define MLX5_CORE_FS_ROOT_NS_NAME "root"
1876#define MLX5_CORE_FS_ESW_EGRESS_ACL "esw_egress_root"
1877#define MLX5_CORE_FS_ESW_INGRESS_ACL "esw_ingress_root"
1878#define MLX5_CORE_FS_FDB_ROOT_NS_NAME "fdb_root"
1879#define MLX5_CORE_FS_SNIFFER_RX_ROOT_NS_NAME "sniffer_rx_root"
1880#define MLX5_CORE_FS_SNIFFER_TX_ROOT_NS_NAME "sniffer_tx_root"
1881#define MLX5_CORE_FS_PRIO_MAX_FT 4
1882#define MLX5_CORE_FS_PRIO_MAX_NS 1
1883
1884static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
1885				      unsigned prio, int max_ft,
1886				      const char *name, u8 flags)
1887{
1888	struct fs_prio *fs_prio;
1889
1890	fs_prio = kzalloc(sizeof(*fs_prio), GFP_KERNEL);
1891	if (!fs_prio)
1892		return ERR_PTR(-ENOMEM);
1893
1894	fs_prio->base.type = FS_TYPE_PRIO;
1895	fs_add_node(&fs_prio->base, &ns->base, name, 1);
1896	fs_prio->max_ft = max_ft;
1897	fs_prio->max_ns = MLX5_CORE_FS_PRIO_MAX_NS;
1898	fs_prio->prio = prio;
1899	fs_prio->flags = flags;
1900	list_add_tail(&fs_prio->base.list, &ns->prios);
1901	INIT_LIST_HEAD(&fs_prio->objs);
1902	mutex_init(&fs_prio->shared_lock);
1903
1904	return fs_prio;
1905}
1906
1907static void cleanup_root_ns(struct mlx5_core_dev *dev)
1908{
1909	struct mlx5_flow_root_namespace *root_ns = dev->root_ns;
1910	struct fs_prio *iter_prio;
1911
1912	if (!root_ns)
1913		return;
1914
1915	/* stage 1 */
1916	fs_for_each_prio(iter_prio, &root_ns->ns) {
1917		struct mlx5_flow_namespace *iter_ns;
1918
1919		fs_for_each_ns(iter_ns, iter_prio) {
1920			while (!list_empty(&iter_ns->prios)) {
1921				struct fs_base *iter_prio2 =
1922					list_first_entry(&iter_ns->prios,
1923							 struct fs_base,
1924							 list);
1925
1926				fs_remove_node(iter_prio2);
1927			}
1928		}
1929	}
1930
1931	/* stage 2 */
1932	fs_for_each_prio(iter_prio, &root_ns->ns) {
1933		while (!list_empty(&iter_prio->objs)) {
1934			struct fs_base *iter_ns =
1935				list_first_entry(&iter_prio->objs,
1936						 struct fs_base,
1937						 list);
1938
1939				fs_remove_node(iter_ns);
1940		}
1941	}
1942	/* stage 3 */
1943	while (!list_empty(&root_ns->ns.prios)) {
1944		struct fs_base *iter_prio =
1945			list_first_entry(&root_ns->ns.prios,
1946					 struct fs_base,
1947					 list);
1948
1949		fs_remove_node(iter_prio);
1950	}
1951
1952	fs_remove_node(&root_ns->ns.base);
1953	dev->root_ns = NULL;
1954}
1955
1956static void cleanup_single_prio_root_ns(struct mlx5_core_dev *dev,
1957					struct mlx5_flow_root_namespace *root_ns)
1958{
1959	struct fs_base *prio;
1960
1961	if (!root_ns)
1962		return;
1963
1964	if (!list_empty(&root_ns->ns.prios)) {
1965		prio = list_first_entry(&root_ns->ns.prios,
1966					struct fs_base,
1967				 list);
1968		fs_remove_node(prio);
1969	}
1970	fs_remove_node(&root_ns->ns.base);
1971	root_ns = NULL;
1972}
1973
1974void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
1975{
1976	cleanup_root_ns(dev);
1977	cleanup_single_prio_root_ns(dev, dev->sniffer_rx_root_ns);
1978	cleanup_single_prio_root_ns(dev, dev->sniffer_tx_root_ns);
1979	cleanup_single_prio_root_ns(dev, dev->fdb_root_ns);
1980	cleanup_single_prio_root_ns(dev, dev->esw_egress_root_ns);
1981	cleanup_single_prio_root_ns(dev, dev->esw_ingress_root_ns);
1982}
1983
1984static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace
1985						 *ns)
1986{
1987	ns->base.type = FS_TYPE_NAMESPACE;
1988	init_rwsem(&ns->dests_rw_sem);
1989	init_rwsem(&ns->notifiers_rw_sem);
1990	INIT_LIST_HEAD(&ns->prios);
1991	INIT_LIST_HEAD(&ns->list_notifiers);
1992
1993	return ns;
1994}
1995
1996static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_core_dev *dev,
1997							  enum fs_ft_type
1998							  table_type,
1999							  char *name)
2000{
2001	struct mlx5_flow_root_namespace *root_ns;
2002	struct mlx5_flow_namespace *ns;
2003
2004	/* create the root namespace */
2005	root_ns = mlx5_vzalloc(sizeof(*root_ns));
2006	if (!root_ns)
2007		goto err;
2008
2009	root_ns->dev = dev;
2010	root_ns->table_type = table_type;
2011	mutex_init(&root_ns->fs_chain_lock);
2012
2013	ns = &root_ns->ns;
2014	fs_init_namespace(ns);
2015	fs_add_node(&ns->base, NULL, name, 1);
2016
2017	return root_ns;
2018err:
2019	return NULL;
2020}
2021
2022static int init_fdb_root_ns(struct mlx5_core_dev *dev)
2023{
2024	struct fs_prio *prio;
2025
2026	dev->fdb_root_ns = create_root_ns(dev, FS_FT_FDB,
2027					  MLX5_CORE_FS_FDB_ROOT_NS_NAME);
2028	if (!dev->fdb_root_ns)
2029		return -ENOMEM;
2030
2031	/* create 1 prio*/
2032	prio = fs_create_prio(&dev->fdb_root_ns->ns, 0, 1, "fdb_prio", 0);
2033	if (IS_ERR(prio))
2034		return PTR_ERR(prio);
2035	else
2036		return 0;
2037}
2038
2039#define MAX_VPORTS 128
2040
2041static int init_egress_acl_root_ns(struct mlx5_core_dev *dev)
2042{
2043	struct fs_prio *prio;
2044
2045	dev->esw_egress_root_ns = create_root_ns(dev, FS_FT_ESW_EGRESS_ACL,
2046						 MLX5_CORE_FS_ESW_EGRESS_ACL);
2047	if (!dev->esw_egress_root_ns)
2048		return -ENOMEM;
2049
2050	/* create 1 prio*/
2051	prio = fs_create_prio(&dev->esw_egress_root_ns->ns, 0, MAX_VPORTS,
2052			      "esw_egress_prio", 0);
2053	if (IS_ERR(prio))
2054		return PTR_ERR(prio);
2055	else
2056		return 0;
2057}
2058
2059static int init_ingress_acl_root_ns(struct mlx5_core_dev *dev)
2060{
2061	struct fs_prio *prio;
2062
2063	dev->esw_ingress_root_ns = create_root_ns(dev, FS_FT_ESW_INGRESS_ACL,
2064						  MLX5_CORE_FS_ESW_INGRESS_ACL);
2065	if (!dev->esw_ingress_root_ns)
2066		return -ENOMEM;
2067
2068	/* create 1 prio*/
2069	prio = fs_create_prio(&dev->esw_ingress_root_ns->ns, 0, MAX_VPORTS,
2070			      "esw_ingress_prio", 0);
2071	if (IS_ERR(prio))
2072		return PTR_ERR(prio);
2073	else
2074		return 0;
2075}
2076
2077static int init_sniffer_rx_root_ns(struct mlx5_core_dev *dev)
2078{
2079	struct fs_prio *prio;
2080
2081	dev->sniffer_rx_root_ns = create_root_ns(dev, FS_FT_SNIFFER_RX,
2082				     MLX5_CORE_FS_SNIFFER_RX_ROOT_NS_NAME);
2083	if (!dev->sniffer_rx_root_ns)
2084		return  -ENOMEM;
2085
2086	/* create 1 prio*/
2087	prio = fs_create_prio(&dev->sniffer_rx_root_ns->ns, 0, 1,
2088			      "sniffer_prio", 0);
2089	if (IS_ERR(prio))
2090		return PTR_ERR(prio);
2091	else
2092		return 0;
2093}
2094
2095
2096static int init_sniffer_tx_root_ns(struct mlx5_core_dev *dev)
2097{
2098	struct fs_prio *prio;
2099
2100	dev->sniffer_tx_root_ns = create_root_ns(dev, FS_FT_SNIFFER_TX,
2101						 MLX5_CORE_FS_SNIFFER_TX_ROOT_NS_NAME);
2102	if (!dev->sniffer_tx_root_ns)
2103		return  -ENOMEM;
2104
2105	/* create 1 prio*/
2106	prio = fs_create_prio(&dev->sniffer_tx_root_ns->ns, 0, 1,
2107			      "sniffer_prio", 0);
2108	if (IS_ERR(prio))
2109		return PTR_ERR(prio);
2110	else
2111		return 0;
2112}
2113
2114static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio,
2115						       const char *name)
2116{
2117	struct mlx5_flow_namespace	*ns;
2118
2119	ns = kzalloc(sizeof(*ns), GFP_KERNEL);
2120	if (!ns)
2121		return ERR_PTR(-ENOMEM);
2122
2123	fs_init_namespace(ns);
2124	fs_add_node(&ns->base, &prio->base, name, 1);
2125	list_add_tail(&ns->base.list, &prio->objs);
2126
2127	return ns;
2128}
2129
2130#define FLOW_TABLE_BIT_SZ 1
2131#define GET_FLOW_TABLE_CAP(dev, offset) \
2132	((be32_to_cpu(*((__be32 *)(dev->hca_caps_cur[MLX5_CAP_FLOW_TABLE]) +	\
2133			offset / 32)) >>					\
2134	  (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ)
2135
2136static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps)
2137{
2138	int i;
2139
2140	for (i = 0; i < caps->arr_sz; i++) {
2141		if (!GET_FLOW_TABLE_CAP(dev, caps->caps[i]))
2142			return false;
2143	}
2144	return true;
2145}
2146
2147static int _init_root_tree(struct mlx5_core_dev *dev, int max_ft_level,
2148		    struct init_tree_node *node, struct fs_base *base_parent,
2149		    struct init_tree_node *tree_parent)
2150{
2151	struct mlx5_flow_namespace *fs_ns;
2152	struct fs_prio *fs_prio;
2153	int priority;
2154	struct fs_base *base;
2155	int i;
2156	int err = 0;
2157
2158	if (node->type == FS_TYPE_PRIO) {
2159		if ((node->min_ft_level > max_ft_level) ||
2160		    !has_required_caps(dev, &node->caps))
2161			goto out;
2162
2163		fs_get_obj(fs_ns, base_parent);
2164		priority = node - tree_parent->children;
2165		fs_prio = fs_create_prio(fs_ns, priority,
2166					 node->max_ft,
2167					 node->name, node->flags);
2168		if (IS_ERR(fs_prio)) {
2169			err = PTR_ERR(fs_prio);
2170			goto out;
2171		}
2172		base = &fs_prio->base;
2173	} else if (node->type == FS_TYPE_NAMESPACE) {
2174		fs_get_obj(fs_prio, base_parent);
2175		fs_ns = fs_create_namespace(fs_prio, node->name);
2176		if (IS_ERR(fs_ns)) {
2177			err = PTR_ERR(fs_ns);
2178			goto out;
2179		}
2180		base = &fs_ns->base;
2181	} else {
2182		return -EINVAL;
2183	}
2184	for (i = 0; i < node->ar_size; i++) {
2185		err = _init_root_tree(dev, max_ft_level, &node->children[i], base,
2186				      node);
2187		if (err)
2188			break;
2189	}
2190out:
2191	return err;
2192}
2193
2194static int init_root_tree(struct mlx5_core_dev *dev, int max_ft_level,
2195		   struct init_tree_node *node, struct fs_base *parent)
2196{
2197	int i;
2198	struct mlx5_flow_namespace *fs_ns;
2199	int err = 0;
2200
2201	fs_get_obj(fs_ns, parent);
2202	for (i = 0; i < node->ar_size; i++) {
2203		err = _init_root_tree(dev, max_ft_level,
2204				      &node->children[i], &fs_ns->base, node);
2205		if (err)
2206			break;
2207	}
2208	return err;
2209}
2210
2211static int sum_max_ft_in_prio(struct fs_prio *prio);
2212static int sum_max_ft_in_ns(struct mlx5_flow_namespace *ns)
2213{
2214	struct fs_prio *prio;
2215	int sum = 0;
2216
2217	fs_for_each_prio(prio, ns) {
2218		sum += sum_max_ft_in_prio(prio);
2219	}
2220	return  sum;
2221}
2222
2223static int sum_max_ft_in_prio(struct fs_prio *prio)
2224{
2225	int sum = 0;
2226	struct fs_base *it;
2227	struct mlx5_flow_namespace	*ns;
2228
2229	if (prio->max_ft)
2230		return prio->max_ft;
2231
2232	fs_for_each_ns_or_ft(it, prio) {
2233		if (it->type == FS_TYPE_FLOW_TABLE)
2234			continue;
2235
2236		fs_get_obj(ns, it);
2237		sum += sum_max_ft_in_ns(ns);
2238	}
2239	prio->max_ft = sum;
2240	return  sum;
2241}
2242
2243static void set_max_ft(struct mlx5_flow_namespace *ns)
2244{
2245	struct fs_prio *prio;
2246
2247	if (!ns)
2248		return;
2249
2250	fs_for_each_prio(prio, ns)
2251		sum_max_ft_in_prio(prio);
2252}
2253
2254static int init_root_ns(struct mlx5_core_dev *dev)
2255{
2256	int max_ft_level = MLX5_CAP_FLOWTABLE(dev,
2257					      flow_table_properties_nic_receive.
2258					      max_ft_level);
2259
2260	dev->root_ns = create_root_ns(dev, FS_FT_NIC_RX,
2261				      MLX5_CORE_FS_ROOT_NS_NAME);
2262	if (IS_ERR_OR_NULL(dev->root_ns))
2263		goto err;
2264
2265
2266	if (init_root_tree(dev, max_ft_level, &root_fs, &dev->root_ns->ns.base))
2267		goto err;
2268
2269	set_max_ft(&dev->root_ns->ns);
2270
2271	return 0;
2272err:
2273	return -ENOMEM;
2274}
2275
2276u8 mlx5_get_match_criteria_enable(struct mlx5_flow_rule *rule)
2277{
2278	struct fs_base *pbase;
2279	struct mlx5_flow_group *fg;
2280
2281	pbase = rule->base.parent;
2282	WARN_ON(!pbase);
2283	pbase = pbase->parent;
2284	WARN_ON(!pbase);
2285
2286	fs_get_obj(fg, pbase);
2287	return fg->mask.match_criteria_enable;
2288}
2289
2290void mlx5_get_match_value(u32 *match_value,
2291			  struct mlx5_flow_rule *rule)
2292{
2293	struct fs_base *pbase;
2294	struct fs_fte *fte;
2295
2296	pbase = rule->base.parent;
2297	WARN_ON(!pbase);
2298	fs_get_obj(fte, pbase);
2299
2300	memcpy(match_value, fte->val, sizeof(fte->val));
2301}
2302
2303void mlx5_get_match_criteria(u32 *match_criteria,
2304			     struct mlx5_flow_rule *rule)
2305{
2306	struct fs_base *pbase;
2307	struct mlx5_flow_group *fg;
2308
2309	pbase = rule->base.parent;
2310	WARN_ON(!pbase);
2311	pbase = pbase->parent;
2312	WARN_ON(!pbase);
2313
2314	fs_get_obj(fg, pbase);
2315	memcpy(match_criteria, &fg->mask.match_criteria,
2316	       sizeof(fg->mask.match_criteria));
2317}
2318
2319int mlx5_init_fs(struct mlx5_core_dev *dev)
2320{
2321	int err;
2322
2323	if (MLX5_CAP_GEN(dev, nic_flow_table)) {
2324		err = init_root_ns(dev);
2325		if (err)
2326			goto err;
2327	}
2328
2329	err = init_fdb_root_ns(dev);
2330	if (err)
2331		goto err;
2332
2333	err = init_egress_acl_root_ns(dev);
2334	if (err)
2335		goto err;
2336
2337	err = init_ingress_acl_root_ns(dev);
2338	if (err)
2339		goto err;
2340
2341	err = init_sniffer_tx_root_ns(dev);
2342	if (err)
2343		goto err;
2344
2345	err = init_sniffer_rx_root_ns(dev);
2346	if (err)
2347		goto err;
2348
2349	return 0;
2350err:
2351	mlx5_cleanup_fs(dev);
2352	return err;
2353}
2354
2355struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
2356						  enum mlx5_flow_namespace_type type)
2357{
2358	struct mlx5_flow_root_namespace *root_ns = dev->root_ns;
2359	int prio;
2360	static struct fs_prio *fs_prio;
2361	struct mlx5_flow_namespace *ns;
2362
2363	switch (type) {
2364	case MLX5_FLOW_NAMESPACE_BYPASS:
2365		prio = 0;
2366		break;
2367	case MLX5_FLOW_NAMESPACE_KERNEL:
2368		prio = 1;
2369		break;
2370	case MLX5_FLOW_NAMESPACE_LEFTOVERS:
2371		prio = 2;
2372		break;
2373	case MLX5_FLOW_NAMESPACE_FDB:
2374		if (dev->fdb_root_ns)
2375			return &dev->fdb_root_ns->ns;
2376		else
2377			return NULL;
2378	case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
2379		if (dev->esw_egress_root_ns)
2380			return &dev->esw_egress_root_ns->ns;
2381		else
2382			return NULL;
2383	case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
2384		if (dev->esw_ingress_root_ns)
2385			return &dev->esw_ingress_root_ns->ns;
2386		else
2387			return NULL;
2388	case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
2389		if (dev->sniffer_rx_root_ns)
2390			return &dev->sniffer_rx_root_ns->ns;
2391		else
2392			return NULL;
2393	case MLX5_FLOW_NAMESPACE_SNIFFER_TX:
2394		if (dev->sniffer_tx_root_ns)
2395			return &dev->sniffer_tx_root_ns->ns;
2396		else
2397			return NULL;
2398	default:
2399		return NULL;
2400	}
2401
2402	if (!root_ns)
2403		return NULL;
2404
2405	fs_prio = find_prio(&root_ns->ns, prio);
2406	if (!fs_prio)
2407		return NULL;
2408
2409	ns = list_first_entry(&fs_prio->objs,
2410			      typeof(*ns),
2411			      base.list);
2412
2413	return ns;
2414}
2415EXPORT_SYMBOL(mlx5_get_flow_namespace);
2416
2417
2418int mlx5_set_rule_private_data(struct mlx5_flow_rule *rule,
2419				  struct mlx5_flow_handler *fs_handler,
2420				  void  *client_data)
2421{
2422	struct fs_client_priv_data *priv_data;
2423
2424	mutex_lock(&rule->clients_lock);
2425	/*Check that hanlder isn't exists in the list already*/
2426	list_for_each_entry(priv_data, &rule->clients_data, list) {
2427		if (priv_data->fs_handler == fs_handler) {
2428			priv_data->client_dst_data = client_data;
2429			goto unlock;
2430		}
2431	}
2432	priv_data = kzalloc(sizeof(*priv_data), GFP_KERNEL);
2433	if (!priv_data) {
2434		mutex_unlock(&rule->clients_lock);
2435		return -ENOMEM;
2436	}
2437
2438	priv_data->client_dst_data = client_data;
2439	priv_data->fs_handler = fs_handler;
2440	list_add(&priv_data->list, &rule->clients_data);
2441
2442unlock:
2443	mutex_unlock(&rule->clients_lock);
2444
2445	return 0;
2446}
2447
2448static int remove_from_clients(struct mlx5_flow_rule *rule,
2449			bool ctx_changed,
2450			void *client_data,
2451			void *context)
2452{
2453	struct fs_client_priv_data *iter_client;
2454	struct fs_client_priv_data *temp_client;
2455	struct mlx5_flow_handler *handler = (struct
2456						mlx5_flow_handler*)context;
2457
2458	mutex_lock(&rule->clients_lock);
2459	list_for_each_entry_safe(iter_client, temp_client,
2460				 &rule->clients_data, list) {
2461		if (iter_client->fs_handler == handler) {
2462			list_del(&iter_client->list);
2463			kfree(iter_client);
2464			break;
2465		}
2466	}
2467	mutex_unlock(&rule->clients_lock);
2468
2469	return 0;
2470}
2471
2472struct mlx5_flow_handler *mlx5_register_rule_notifier(struct mlx5_core_dev *dev,
2473								enum mlx5_flow_namespace_type ns_type,
2474								rule_event_fn add_cb,
2475								rule_event_fn del_cb,
2476								void *context)
2477{
2478	struct mlx5_flow_namespace *ns;
2479	struct mlx5_flow_handler *handler;
2480
2481	ns = mlx5_get_flow_namespace(dev, ns_type);
2482	if (!ns)
2483		return ERR_PTR(-EINVAL);
2484
2485	handler = kzalloc(sizeof(*handler), GFP_KERNEL);
2486	if (!handler)
2487		return ERR_PTR(-ENOMEM);
2488
2489	handler->add_dst_cb = add_cb;
2490	handler->del_dst_cb = del_cb;
2491	handler->client_context = context;
2492	handler->ns = ns;
2493	down_write(&ns->notifiers_rw_sem);
2494	list_add_tail(&handler->list, &ns->list_notifiers);
2495	up_write(&ns->notifiers_rw_sem);
2496
2497	return handler;
2498}
2499
2500static void iterate_rules_in_ns(struct mlx5_flow_namespace *ns,
2501				rule_event_fn add_rule_cb,
2502				void *context);
2503
2504void mlx5_unregister_rule_notifier(struct mlx5_flow_handler *handler)
2505{
2506	struct mlx5_flow_namespace *ns = handler->ns;
2507
2508	/*Remove from dst's clients*/
2509	down_write(&ns->dests_rw_sem);
2510	down_write(&ns->notifiers_rw_sem);
2511	iterate_rules_in_ns(ns, remove_from_clients, handler);
2512	list_del(&handler->list);
2513	up_write(&ns->notifiers_rw_sem);
2514	up_write(&ns->dests_rw_sem);
2515	kfree(handler);
2516}
2517
2518static void iterate_rules_in_ft(struct mlx5_flow_table *ft,
2519				rule_event_fn add_rule_cb,
2520				void *context)
2521{
2522	struct mlx5_flow_group *iter_fg;
2523	struct fs_fte *iter_fte;
2524	struct mlx5_flow_rule *iter_rule;
2525	int err = 0;
2526	bool is_new_rule;
2527
2528	mutex_lock(&ft->base.lock);
2529	fs_for_each_fg(iter_fg, ft) {
2530		mutex_lock(&iter_fg->base.lock);
2531		fs_for_each_fte(iter_fte, iter_fg) {
2532			mutex_lock(&iter_fte->base.lock);
2533			is_new_rule = true;
2534			fs_for_each_dst(iter_rule, iter_fte) {
2535				fs_get(&iter_rule->base);
2536				err = add_rule_cb(iter_rule,
2537						 is_new_rule,
2538						 NULL,
2539						 context);
2540				fs_put_parent_locked(&iter_rule->base);
2541				if (err)
2542					break;
2543				is_new_rule = false;
2544			}
2545			mutex_unlock(&iter_fte->base.lock);
2546			if (err)
2547				break;
2548		}
2549		mutex_unlock(&iter_fg->base.lock);
2550		if (err)
2551			break;
2552	}
2553	mutex_unlock(&ft->base.lock);
2554}
2555
2556static void iterate_rules_in_prio(struct fs_prio *prio,
2557				  rule_event_fn add_rule_cb,
2558				  void *context)
2559{
2560	struct fs_base *it;
2561
2562	mutex_lock(&prio->base.lock);
2563	fs_for_each_ns_or_ft(it, prio) {
2564		if (it->type == FS_TYPE_FLOW_TABLE) {
2565			struct mlx5_flow_table	      *ft;
2566
2567			fs_get_obj(ft, it);
2568			iterate_rules_in_ft(ft, add_rule_cb, context);
2569		} else {
2570			struct mlx5_flow_namespace *ns;
2571
2572			fs_get_obj(ns, it);
2573			iterate_rules_in_ns(ns, add_rule_cb, context);
2574		}
2575	}
2576	mutex_unlock(&prio->base.lock);
2577}
2578
2579static void iterate_rules_in_ns(struct mlx5_flow_namespace *ns,
2580				rule_event_fn add_rule_cb,
2581				void *context)
2582{
2583	struct fs_prio *iter_prio;
2584
2585	mutex_lock(&ns->base.lock);
2586	fs_for_each_prio(iter_prio, ns) {
2587		iterate_rules_in_prio(iter_prio, add_rule_cb, context);
2588	}
2589	mutex_unlock(&ns->base.lock);
2590}
2591
2592void mlx5_flow_iterate_existing_rules(struct mlx5_flow_namespace *ns,
2593					 rule_event_fn add_rule_cb,
2594					 void *context)
2595{
2596	down_write(&ns->dests_rw_sem);
2597	down_read(&ns->notifiers_rw_sem);
2598	iterate_rules_in_ns(ns, add_rule_cb, context);
2599	up_read(&ns->notifiers_rw_sem);
2600	up_write(&ns->dests_rw_sem);
2601}
2602
2603
2604void mlx5_del_flow_rules_list(struct mlx5_flow_rules_list *rules_list)
2605{
2606	struct mlx5_flow_rule_node *iter_node;
2607	struct mlx5_flow_rule_node *temp_node;
2608
2609	list_for_each_entry_safe(iter_node, temp_node, &rules_list->head, list) {
2610		list_del(&iter_node->list);
2611		kfree(iter_node);
2612	}
2613
2614	kfree(rules_list);
2615}
2616
2617#define ROCEV1_ETHERTYPE 0x8915
2618static int set_rocev1_rules(struct list_head *rules_list)
2619{
2620	struct mlx5_flow_rule_node *rocev1_rule;
2621
2622	rocev1_rule = kzalloc(sizeof(*rocev1_rule), GFP_KERNEL);
2623	if (!rocev1_rule)
2624		return -ENOMEM;
2625
2626	rocev1_rule->match_criteria_enable =
2627		1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS;
2628	MLX5_SET(fte_match_set_lyr_2_4, rocev1_rule->match_criteria, ethertype,
2629		 0xffff);
2630	MLX5_SET(fte_match_set_lyr_2_4, rocev1_rule->match_value, ethertype,
2631		 ROCEV1_ETHERTYPE);
2632
2633	list_add_tail(&rocev1_rule->list, rules_list);
2634
2635	return 0;
2636}
2637
2638#define ROCEV2_UDP_PORT 4791
2639static int set_rocev2_rules(struct list_head *rules_list)
2640{
2641	struct mlx5_flow_rule_node *ipv4_rule;
2642	struct mlx5_flow_rule_node *ipv6_rule;
2643
2644	ipv4_rule = kzalloc(sizeof(*ipv4_rule), GFP_KERNEL);
2645	if (!ipv4_rule)
2646		return -ENOMEM;
2647
2648	ipv6_rule = kzalloc(sizeof(*ipv6_rule), GFP_KERNEL);
2649	if (!ipv6_rule) {
2650		kfree(ipv4_rule);
2651		return -ENOMEM;
2652	}
2653
2654	ipv4_rule->match_criteria_enable =
2655		1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS;
2656	MLX5_SET(fte_match_set_lyr_2_4, ipv4_rule->match_criteria, ethertype,
2657		 0xffff);
2658	MLX5_SET(fte_match_set_lyr_2_4, ipv4_rule->match_value, ethertype,
2659		 0x0800);
2660	MLX5_SET(fte_match_set_lyr_2_4, ipv4_rule->match_criteria, ip_protocol,
2661		 0xff);
2662	MLX5_SET(fte_match_set_lyr_2_4, ipv4_rule->match_value, ip_protocol,
2663		 IPPROTO_UDP);
2664	MLX5_SET(fte_match_set_lyr_2_4, ipv4_rule->match_criteria, udp_dport,
2665		 0xffff);
2666	MLX5_SET(fte_match_set_lyr_2_4, ipv4_rule->match_value, udp_dport,
2667		 ROCEV2_UDP_PORT);
2668
2669	ipv6_rule->match_criteria_enable =
2670		1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS;
2671	MLX5_SET(fte_match_set_lyr_2_4, ipv6_rule->match_criteria, ethertype,
2672		 0xffff);
2673	MLX5_SET(fte_match_set_lyr_2_4, ipv6_rule->match_value, ethertype,
2674		 0x86dd);
2675	MLX5_SET(fte_match_set_lyr_2_4, ipv6_rule->match_criteria, ip_protocol,
2676		 0xff);
2677	MLX5_SET(fte_match_set_lyr_2_4, ipv6_rule->match_value, ip_protocol,
2678		 IPPROTO_UDP);
2679	MLX5_SET(fte_match_set_lyr_2_4, ipv6_rule->match_criteria, udp_dport,
2680		 0xffff);
2681	MLX5_SET(fte_match_set_lyr_2_4, ipv6_rule->match_value, udp_dport,
2682		 ROCEV2_UDP_PORT);
2683
2684	list_add_tail(&ipv4_rule->list, rules_list);
2685	list_add_tail(&ipv6_rule->list, rules_list);
2686
2687	return 0;
2688}
2689
2690
2691struct mlx5_flow_rules_list *get_roce_flow_rules(u8 roce_mode)
2692{
2693	int err = 0;
2694	struct mlx5_flow_rules_list *rules_list =
2695		kzalloc(sizeof(*rules_list), GFP_KERNEL);
2696
2697	if (!rules_list)
2698		return NULL;
2699
2700	INIT_LIST_HEAD(&rules_list->head);
2701
2702	if (roce_mode & MLX5_ROCE_VERSION_1_CAP) {
2703		err = set_rocev1_rules(&rules_list->head);
2704		if (err)
2705			goto free_list;
2706	}
2707	if (roce_mode & MLX5_ROCE_VERSION_2_CAP)
2708		err = set_rocev2_rules(&rules_list->head);
2709	if (err)
2710		goto free_list;
2711
2712	return rules_list;
2713
2714free_list:
2715	mlx5_del_flow_rules_list(rules_list);
2716	return NULL;
2717}
2718