1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2/* Copyright (C) 2018 Netronome Systems, Inc. */
3
4#include <linux/bitfield.h>
5#include <net/pkt_cls.h>
6
7#include "../nfpcore/nfp_cpp.h"
8#include "../nfp_app.h"
9#include "../nfp_net_repr.h"
10#include "main.h"
11
12struct nfp_abm_u32_match {
13	u32 handle;
14	u32 band;
15	u8 mask;
16	u8 val;
17	struct list_head list;
18};
19
20static bool
21nfp_abm_u32_check_knode(struct nfp_abm *abm, struct tc_cls_u32_knode *knode,
22			__be16 proto, struct netlink_ext_ack *extack)
23{
24	struct tc_u32_key *k;
25	unsigned int tos_off;
26
27	if (knode->exts && tcf_exts_has_actions(knode->exts)) {
28		NL_SET_ERR_MSG_MOD(extack, "action offload not supported");
29		return false;
30	}
31	if (knode->link_handle) {
32		NL_SET_ERR_MSG_MOD(extack, "linking not supported");
33		return false;
34	}
35	if (knode->sel->flags != TC_U32_TERMINAL) {
36		NL_SET_ERR_MSG_MOD(extack,
37				   "flags must be equal to TC_U32_TERMINAL");
38		return false;
39	}
40	if (knode->sel->off || knode->sel->offshift || knode->sel->offmask ||
41	    knode->sel->offoff || knode->fshift) {
42		NL_SET_ERR_MSG_MOD(extack, "variable offsetting not supported");
43		return false;
44	}
45	if (knode->sel->hoff || knode->sel->hmask) {
46		NL_SET_ERR_MSG_MOD(extack, "hashing not supported");
47		return false;
48	}
49	if (knode->val || knode->mask) {
50		NL_SET_ERR_MSG_MOD(extack, "matching on mark not supported");
51		return false;
52	}
53	if (knode->res && knode->res->class) {
54		NL_SET_ERR_MSG_MOD(extack, "setting non-0 class not supported");
55		return false;
56	}
57	if (knode->res && knode->res->classid >= abm->num_bands) {
58		NL_SET_ERR_MSG_MOD(extack,
59				   "classid higher than number of bands");
60		return false;
61	}
62	if (knode->sel->nkeys != 1) {
63		NL_SET_ERR_MSG_MOD(extack, "exactly one key required");
64		return false;
65	}
66
67	switch (proto) {
68	case htons(ETH_P_IP):
69		tos_off = 16;
70		break;
71	case htons(ETH_P_IPV6):
72		tos_off = 20;
73		break;
74	default:
75		NL_SET_ERR_MSG_MOD(extack, "only IP and IPv6 supported as filter protocol");
76		return false;
77	}
78
79	k = &knode->sel->keys[0];
80	if (k->offmask) {
81		NL_SET_ERR_MSG_MOD(extack, "offset mask - variable offsetting not supported");
82		return false;
83	}
84	if (k->off) {
85		NL_SET_ERR_MSG_MOD(extack, "only DSCP fields can be matched");
86		return false;
87	}
88	if (k->val & ~k->mask) {
89		NL_SET_ERR_MSG_MOD(extack, "mask does not cover the key");
90		return false;
91	}
92	if (be32_to_cpu(k->mask) >> tos_off & ~abm->dscp_mask) {
93		NL_SET_ERR_MSG_MOD(extack, "only high DSCP class selector bits can be used");
94		nfp_err(abm->app->cpp,
95			"u32 offload: requested mask %x FW can support only %x\n",
96			be32_to_cpu(k->mask) >> tos_off, abm->dscp_mask);
97		return false;
98	}
99
100	return true;
101}
102
103/* This filter list -> map conversion is O(n * m), we expect single digit or
104 * low double digit number of prios and likewise for the filters.  Also u32
105 * doesn't report stats, so it's really only setup time cost.
106 */
107static unsigned int
108nfp_abm_find_band_for_prio(struct nfp_abm_link *alink, unsigned int prio)
109{
110	struct nfp_abm_u32_match *iter;
111
112	list_for_each_entry(iter, &alink->dscp_map, list)
113		if ((prio & iter->mask) == iter->val)
114			return iter->band;
115
116	return alink->def_band;
117}
118
119static int nfp_abm_update_band_map(struct nfp_abm_link *alink)
120{
121	unsigned int i, bits_per_prio, prios_per_word, base_shift;
122	struct nfp_abm *abm = alink->abm;
123	u32 field_mask;
124
125	alink->has_prio = !list_empty(&alink->dscp_map);
126
127	bits_per_prio = roundup_pow_of_two(order_base_2(abm->num_bands));
128	field_mask = (1 << bits_per_prio) - 1;
129	prios_per_word = sizeof(u32) * BITS_PER_BYTE / bits_per_prio;
130
131	/* FW mask applies from top bits */
132	base_shift = 8 - order_base_2(abm->num_prios);
133
134	for (i = 0; i < abm->num_prios; i++) {
135		unsigned int offset;
136		u32 *word;
137		u8 band;
138
139		word = &alink->prio_map[i / prios_per_word];
140		offset = (i % prios_per_word) * bits_per_prio;
141
142		band = nfp_abm_find_band_for_prio(alink, i << base_shift);
143
144		*word &= ~(field_mask << offset);
145		*word |= band << offset;
146	}
147
148	/* Qdisc offload status may change if has_prio changed */
149	nfp_abm_qdisc_offload_update(alink);
150
151	return nfp_abm_ctrl_prio_map_update(alink, alink->prio_map);
152}
153
154static void
155nfp_abm_u32_knode_delete(struct nfp_abm_link *alink,
156			 struct tc_cls_u32_knode *knode)
157{
158	struct nfp_abm_u32_match *iter;
159
160	list_for_each_entry(iter, &alink->dscp_map, list)
161		if (iter->handle == knode->handle) {
162			list_del(&iter->list);
163			kfree(iter);
164			nfp_abm_update_band_map(alink);
165			return;
166		}
167}
168
169static int
170nfp_abm_u32_knode_replace(struct nfp_abm_link *alink,
171			  struct tc_cls_u32_knode *knode,
172			  __be16 proto, struct netlink_ext_ack *extack)
173{
174	struct nfp_abm_u32_match *match = NULL, *iter;
175	unsigned int tos_off;
176	u8 mask, val;
177	int err;
178
179	if (!nfp_abm_u32_check_knode(alink->abm, knode, proto, extack))
180		goto err_delete;
181
182	tos_off = proto == htons(ETH_P_IP) ? 16 : 20;
183
184	/* Extract the DSCP Class Selector bits */
185	val = be32_to_cpu(knode->sel->keys[0].val) >> tos_off & 0xff;
186	mask = be32_to_cpu(knode->sel->keys[0].mask) >> tos_off & 0xff;
187
188	/* Check if there is no conflicting mapping and find match by handle */
189	list_for_each_entry(iter, &alink->dscp_map, list) {
190		u32 cmask;
191
192		if (iter->handle == knode->handle) {
193			match = iter;
194			continue;
195		}
196
197		cmask = iter->mask & mask;
198		if ((iter->val & cmask) == (val & cmask) &&
199		    iter->band != knode->res->classid) {
200			NL_SET_ERR_MSG_MOD(extack, "conflict with already offloaded filter");
201			goto err_delete;
202		}
203	}
204
205	if (!match) {
206		match = kzalloc(sizeof(*match), GFP_KERNEL);
207		if (!match)
208			return -ENOMEM;
209		list_add(&match->list, &alink->dscp_map);
210	}
211	match->handle = knode->handle;
212	match->band = knode->res->classid;
213	match->mask = mask;
214	match->val = val;
215
216	err = nfp_abm_update_band_map(alink);
217	if (err)
218		goto err_delete;
219
220	return 0;
221
222err_delete:
223	nfp_abm_u32_knode_delete(alink, knode);
224	return -EOPNOTSUPP;
225}
226
227static int nfp_abm_setup_tc_block_cb(enum tc_setup_type type,
228				     void *type_data, void *cb_priv)
229{
230	struct tc_cls_u32_offload *cls_u32 = type_data;
231	struct nfp_repr *repr = cb_priv;
232	struct nfp_abm_link *alink;
233
234	alink = repr->app_priv;
235
236	if (type != TC_SETUP_CLSU32) {
237		NL_SET_ERR_MSG_MOD(cls_u32->common.extack,
238				   "only offload of u32 classifier supported");
239		return -EOPNOTSUPP;
240	}
241	if (!tc_cls_can_offload_and_chain0(repr->netdev, &cls_u32->common))
242		return -EOPNOTSUPP;
243
244	if (cls_u32->common.protocol != htons(ETH_P_IP) &&
245	    cls_u32->common.protocol != htons(ETH_P_IPV6)) {
246		NL_SET_ERR_MSG_MOD(cls_u32->common.extack,
247				   "only IP and IPv6 supported as filter protocol");
248		return -EOPNOTSUPP;
249	}
250
251	switch (cls_u32->command) {
252	case TC_CLSU32_NEW_KNODE:
253	case TC_CLSU32_REPLACE_KNODE:
254		return nfp_abm_u32_knode_replace(alink, &cls_u32->knode,
255						 cls_u32->common.protocol,
256						 cls_u32->common.extack);
257	case TC_CLSU32_DELETE_KNODE:
258		nfp_abm_u32_knode_delete(alink, &cls_u32->knode);
259		return 0;
260	default:
261		return -EOPNOTSUPP;
262	}
263}
264
265static LIST_HEAD(nfp_abm_block_cb_list);
266
267int nfp_abm_setup_cls_block(struct net_device *netdev, struct nfp_repr *repr,
268			    struct flow_block_offload *f)
269{
270	return flow_block_cb_setup_simple(f, &nfp_abm_block_cb_list,
271					  nfp_abm_setup_tc_block_cb,
272					  repr, repr, true);
273}
274