1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
4#include "en/tc_priv.h"
5#include "en_tc.h"
6#include "post_act.h"
7#include "mlx5_core.h"
8#include "fs_core.h"
9
10struct mlx5e_post_act {
11	enum mlx5_flow_namespace_type ns_type;
12	struct mlx5_fs_chains *chains;
13	struct mlx5_flow_table *ft;
14	struct mlx5e_priv *priv;
15	struct xarray ids;
16};
17
18struct mlx5e_post_act_handle {
19	enum mlx5_flow_namespace_type ns_type;
20	struct mlx5_flow_attr *attr;
21	struct mlx5_flow_handle *rule;
22	u32 id;
23};
24
25#define MLX5_POST_ACTION_BITS MLX5_REG_MAPPING_MBITS(FTEID_TO_REG)
26#define MLX5_POST_ACTION_MASK MLX5_REG_MAPPING_MASK(FTEID_TO_REG)
27#define MLX5_POST_ACTION_MAX MLX5_POST_ACTION_MASK
28
29struct mlx5e_post_act *
30mlx5e_tc_post_act_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
31		       enum mlx5_flow_namespace_type ns_type)
32{
33	enum fs_flow_table_type table_type = ns_type == MLX5_FLOW_NAMESPACE_FDB ?
34					     FS_FT_FDB : FS_FT_NIC_RX;
35	struct mlx5e_post_act *post_act;
36	int err;
37
38	if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ignore_flow_level, table_type)) {
39		if (priv->mdev->coredev_type == MLX5_COREDEV_PF)
40			mlx5_core_dbg(priv->mdev, "firmware flow level support is missing\n");
41		err = -EOPNOTSUPP;
42		goto err_check;
43	}
44
45	post_act = kzalloc(sizeof(*post_act), GFP_KERNEL);
46	if (!post_act) {
47		err = -ENOMEM;
48		goto err_check;
49	}
50	post_act->ft = mlx5_chains_create_global_table(chains);
51	if (IS_ERR(post_act->ft)) {
52		err = PTR_ERR(post_act->ft);
53		mlx5_core_warn(priv->mdev, "failed to create post action table, err: %d\n", err);
54		goto err_ft;
55	}
56	post_act->chains = chains;
57	post_act->ns_type = ns_type;
58	post_act->priv = priv;
59	xa_init_flags(&post_act->ids, XA_FLAGS_ALLOC1);
60	return post_act;
61
62err_ft:
63	kfree(post_act);
64err_check:
65	return ERR_PTR(err);
66}
67
68void
69mlx5e_tc_post_act_destroy(struct mlx5e_post_act *post_act)
70{
71	if (IS_ERR_OR_NULL(post_act))
72		return;
73
74	xa_destroy(&post_act->ids);
75	mlx5_chains_destroy_global_table(post_act->chains, post_act->ft);
76	kfree(post_act);
77}
78
79int
80mlx5e_tc_post_act_offload(struct mlx5e_post_act *post_act,
81			  struct mlx5e_post_act_handle *handle)
82{
83	struct mlx5_flow_spec *spec;
84	int err;
85
86	if (IS_ERR(post_act))
87		return PTR_ERR(post_act);
88
89	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
90	if (!spec)
91		return -ENOMEM;
92
93	/* Post action rule matches on fte_id and executes original rule's tc rule action */
94	mlx5e_tc_match_to_reg_match(spec, FTEID_TO_REG, handle->id, MLX5_POST_ACTION_MASK);
95
96	handle->rule = mlx5e_tc_rule_offload(post_act->priv, spec, handle->attr);
97	if (IS_ERR(handle->rule)) {
98		err = PTR_ERR(handle->rule);
99		netdev_warn(post_act->priv->netdev, "Failed to add post action rule");
100		goto err_rule;
101	}
102
103	kvfree(spec);
104	return 0;
105
106err_rule:
107	kvfree(spec);
108	return err;
109}
110
111struct mlx5e_post_act_handle *
112mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *post_attr)
113{
114	struct mlx5e_post_act_handle *handle;
115	int err;
116
117	if (IS_ERR(post_act))
118		return ERR_CAST(post_act);
119
120	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
121	if (!handle)
122		return ERR_PTR(-ENOMEM);
123
124	post_attr->chain = 0;
125	post_attr->prio = 0;
126	post_attr->ft = post_act->ft;
127	post_attr->inner_match_level = MLX5_MATCH_NONE;
128	post_attr->outer_match_level = MLX5_MATCH_NONE;
129	post_attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_DECAP;
130	post_attr->flags |= MLX5_ATTR_FLAG_NO_IN_PORT;
131
132	handle->ns_type = post_act->ns_type;
133	/* Splits were handled before post action */
134	if (handle->ns_type == MLX5_FLOW_NAMESPACE_FDB)
135		post_attr->esw_attr->split_count = 0;
136
137	err = xa_alloc(&post_act->ids, &handle->id, post_attr,
138		       XA_LIMIT(1, MLX5_POST_ACTION_MAX), GFP_KERNEL);
139	if (err)
140		goto err_xarray;
141
142	handle->attr = post_attr;
143
144	return handle;
145
146err_xarray:
147	kfree(handle);
148	return ERR_PTR(err);
149}
150
151void
152mlx5e_tc_post_act_unoffload(struct mlx5e_post_act *post_act,
153			    struct mlx5e_post_act_handle *handle)
154{
155	mlx5e_tc_rule_unoffload(post_act->priv, handle->rule, handle->attr);
156	handle->rule = NULL;
157}
158
159void
160mlx5e_tc_post_act_del(struct mlx5e_post_act *post_act, struct mlx5e_post_act_handle *handle)
161{
162	if (!IS_ERR_OR_NULL(handle->rule))
163		mlx5e_tc_post_act_unoffload(post_act, handle);
164	xa_erase(&post_act->ids, handle->id);
165	kfree(handle);
166}
167
168struct mlx5_flow_table *
169mlx5e_tc_post_act_get_ft(struct mlx5e_post_act *post_act)
170{
171	return post_act->ft;
172}
173
174/* Allocate a header modify action to write the post action handle fte id to a register. */
175int
176mlx5e_tc_post_act_set_handle(struct mlx5_core_dev *dev,
177			     struct mlx5e_post_act_handle *handle,
178			     struct mlx5e_tc_mod_hdr_acts *acts)
179{
180	return mlx5e_tc_match_to_reg_set(dev, acts, handle->ns_type, FTEID_TO_REG, handle->id);
181}
182