1// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2/* Copyright (c) 2020 Marvell International Ltd. All rights reserved */
3
4#include "prestera.h"
5#include "prestera_acl.h"
6#include "prestera_flow.h"
7#include "prestera_flower.h"
8#include "prestera_matchall.h"
9
10struct prestera_flower_template {
11	struct prestera_acl_ruleset *ruleset;
12	struct list_head list;
13	u32 chain_index;
14};
15
16static void
17prestera_flower_template_free(struct prestera_flower_template *template)
18{
19	prestera_acl_ruleset_put(template->ruleset);
20	list_del(&template->list);
21	kfree(template);
22}
23
24void prestera_flower_template_cleanup(struct prestera_flow_block *block)
25{
26	struct prestera_flower_template *template, *tmp;
27
28	/* put the reference to all rulesets kept in tmpl create */
29	list_for_each_entry_safe(template, tmp, &block->template_list, list)
30		prestera_flower_template_free(template);
31}
32
33static int
34prestera_flower_parse_goto_action(struct prestera_flow_block *block,
35				  struct prestera_acl_rule *rule,
36				  u32 chain_index,
37				  const struct flow_action_entry *act)
38{
39	struct prestera_acl_ruleset *ruleset;
40
41	if (act->chain_index <= chain_index)
42		/* we can jump only forward */
43		return -EINVAL;
44
45	if (rule->re_arg.jump.valid)
46		return -EEXIST;
47
48	ruleset = prestera_acl_ruleset_get(block->sw->acl, block,
49					   act->chain_index);
50	if (IS_ERR(ruleset))
51		return PTR_ERR(ruleset);
52
53	rule->re_arg.jump.valid = 1;
54	rule->re_arg.jump.i.index = prestera_acl_ruleset_index_get(ruleset);
55
56	rule->jump_ruleset = ruleset;
57
58	return 0;
59}
60
61static int prestera_flower_parse_actions(struct prestera_flow_block *block,
62					 struct prestera_acl_rule *rule,
63					 struct flow_action *flow_action,
64					 u32 chain_index,
65					 struct netlink_ext_ack *extack)
66{
67	const struct flow_action_entry *act;
68	int err, i;
69
70	/* whole struct (rule->re_arg) must be initialized with 0 */
71	if (!flow_action_has_entries(flow_action))
72		return 0;
73
74	if (!flow_action_mixed_hw_stats_check(flow_action, extack))
75		return -EOPNOTSUPP;
76
77	act = flow_action_first_entry_get(flow_action);
78	if (act->hw_stats & FLOW_ACTION_HW_STATS_DISABLED) {
79		/* Nothing to do */
80	} else if (act->hw_stats & FLOW_ACTION_HW_STATS_DELAYED) {
81		/* setup counter first */
82		rule->re_arg.count.valid = true;
83		err = prestera_acl_chain_to_client(chain_index, block->ingress,
84						   &rule->re_arg.count.client);
85		if (err)
86			return err;
87	} else {
88		NL_SET_ERR_MSG_MOD(extack, "Unsupported action HW stats type");
89		return -EOPNOTSUPP;
90	}
91
92	flow_action_for_each(i, act, flow_action) {
93		switch (act->id) {
94		case FLOW_ACTION_ACCEPT:
95			if (rule->re_arg.accept.valid)
96				return -EEXIST;
97
98			rule->re_arg.accept.valid = 1;
99			break;
100		case FLOW_ACTION_DROP:
101			if (rule->re_arg.drop.valid)
102				return -EEXIST;
103
104			rule->re_arg.drop.valid = 1;
105			break;
106		case FLOW_ACTION_TRAP:
107			if (rule->re_arg.trap.valid)
108				return -EEXIST;
109
110			rule->re_arg.trap.valid = 1;
111			break;
112		case FLOW_ACTION_POLICE:
113			if (rule->re_arg.police.valid)
114				return -EEXIST;
115
116			rule->re_arg.police.valid = 1;
117			rule->re_arg.police.rate =
118				act->police.rate_bytes_ps;
119			rule->re_arg.police.burst = act->police.burst;
120			rule->re_arg.police.ingress = block->ingress;
121			break;
122		case FLOW_ACTION_GOTO:
123			err = prestera_flower_parse_goto_action(block, rule,
124								chain_index,
125								act);
126			if (err)
127				return err;
128			break;
129		default:
130			NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
131			pr_err("Unsupported action\n");
132			return -EOPNOTSUPP;
133		}
134	}
135
136	return 0;
137}
138
139static int prestera_flower_parse_meta(struct prestera_acl_rule *rule,
140				      struct flow_cls_offload *f,
141				      struct prestera_flow_block *block)
142{
143	struct flow_rule *f_rule = flow_cls_offload_flow_rule(f);
144	struct prestera_acl_match *r_match = &rule->re_key.match;
145	struct prestera_port *port;
146	struct net_device *ingress_dev;
147	struct flow_match_meta match;
148	__be16 key, mask;
149
150	flow_rule_match_meta(f_rule, &match);
151
152	if (match.mask->l2_miss) {
153		NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on \"l2_miss\"");
154		return -EOPNOTSUPP;
155	}
156
157	if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
158		NL_SET_ERR_MSG_MOD(f->common.extack,
159				   "Unsupported ingress ifindex mask");
160		return -EINVAL;
161	}
162
163	ingress_dev = __dev_get_by_index(block->net,
164					 match.key->ingress_ifindex);
165	if (!ingress_dev) {
166		NL_SET_ERR_MSG_MOD(f->common.extack,
167				   "Can't find specified ingress port to match on");
168		return -EINVAL;
169	}
170
171	if (!prestera_netdev_check(ingress_dev)) {
172		NL_SET_ERR_MSG_MOD(f->common.extack,
173				   "Can't match on switchdev ingress port");
174		return -EINVAL;
175	}
176	port = netdev_priv(ingress_dev);
177
178	mask = htons(0x1FFF << 3);
179	key = htons(port->hw_id << 3);
180	rule_match_set(r_match->key, SYS_PORT, key);
181	rule_match_set(r_match->mask, SYS_PORT, mask);
182
183	mask = htons(0x3FF);
184	key = htons(port->dev_id);
185	rule_match_set(r_match->key, SYS_DEV, key);
186	rule_match_set(r_match->mask, SYS_DEV, mask);
187
188	return 0;
189}
190
191static int prestera_flower_parse(struct prestera_flow_block *block,
192				 struct prestera_acl_rule *rule,
193				 struct flow_cls_offload *f)
194{
195	struct flow_rule *f_rule = flow_cls_offload_flow_rule(f);
196	struct flow_dissector *dissector = f_rule->match.dissector;
197	struct prestera_acl_match *r_match = &rule->re_key.match;
198	__be16 n_proto_mask = 0;
199	__be16 n_proto_key = 0;
200	u16 addr_type = 0;
201	u8 ip_proto = 0;
202	int err;
203
204	if (dissector->used_keys &
205	    ~(BIT_ULL(FLOW_DISSECTOR_KEY_META) |
206	      BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
207	      BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
208	      BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
209	      BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
210	      BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
211	      BIT_ULL(FLOW_DISSECTOR_KEY_ICMP) |
212	      BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
213	      BIT_ULL(FLOW_DISSECTOR_KEY_PORTS_RANGE) |
214	      BIT_ULL(FLOW_DISSECTOR_KEY_VLAN))) {
215		NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key");
216		return -EOPNOTSUPP;
217	}
218
219	prestera_acl_rule_priority_set(rule, f->common.prio);
220
221	if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_META)) {
222		err = prestera_flower_parse_meta(rule, f, block);
223		if (err)
224			return err;
225	}
226
227	if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_CONTROL)) {
228		struct flow_match_control match;
229
230		flow_rule_match_control(f_rule, &match);
231		addr_type = match.key->addr_type;
232	}
233
234	if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_BASIC)) {
235		struct flow_match_basic match;
236
237		flow_rule_match_basic(f_rule, &match);
238		n_proto_key = match.key->n_proto;
239		n_proto_mask = match.mask->n_proto;
240
241		if (ntohs(match.key->n_proto) == ETH_P_ALL) {
242			n_proto_key = 0;
243			n_proto_mask = 0;
244		}
245
246		rule_match_set(r_match->key, ETH_TYPE, n_proto_key);
247		rule_match_set(r_match->mask, ETH_TYPE, n_proto_mask);
248
249		rule_match_set(r_match->key, IP_PROTO, match.key->ip_proto);
250		rule_match_set(r_match->mask, IP_PROTO, match.mask->ip_proto);
251		ip_proto = match.key->ip_proto;
252	}
253
254	if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
255		struct flow_match_eth_addrs match;
256
257		flow_rule_match_eth_addrs(f_rule, &match);
258
259		/* DA key, mask */
260		rule_match_set_n(r_match->key,
261				 ETH_DMAC_0, &match.key->dst[0], 4);
262		rule_match_set_n(r_match->key,
263				 ETH_DMAC_1, &match.key->dst[4], 2);
264
265		rule_match_set_n(r_match->mask,
266				 ETH_DMAC_0, &match.mask->dst[0], 4);
267		rule_match_set_n(r_match->mask,
268				 ETH_DMAC_1, &match.mask->dst[4], 2);
269
270		/* SA key, mask */
271		rule_match_set_n(r_match->key,
272				 ETH_SMAC_0, &match.key->src[0], 4);
273		rule_match_set_n(r_match->key,
274				 ETH_SMAC_1, &match.key->src[4], 2);
275
276		rule_match_set_n(r_match->mask,
277				 ETH_SMAC_0, &match.mask->src[0], 4);
278		rule_match_set_n(r_match->mask,
279				 ETH_SMAC_1, &match.mask->src[4], 2);
280	}
281
282	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
283		struct flow_match_ipv4_addrs match;
284
285		flow_rule_match_ipv4_addrs(f_rule, &match);
286
287		rule_match_set(r_match->key, IP_SRC, match.key->src);
288		rule_match_set(r_match->mask, IP_SRC, match.mask->src);
289
290		rule_match_set(r_match->key, IP_DST, match.key->dst);
291		rule_match_set(r_match->mask, IP_DST, match.mask->dst);
292	}
293
294	if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_PORTS)) {
295		struct flow_match_ports match;
296
297		if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
298			NL_SET_ERR_MSG_MOD
299			    (f->common.extack,
300			     "Only UDP and TCP keys are supported");
301			return -EINVAL;
302		}
303
304		flow_rule_match_ports(f_rule, &match);
305
306		rule_match_set(r_match->key, L4_PORT_SRC, match.key->src);
307		rule_match_set(r_match->mask, L4_PORT_SRC, match.mask->src);
308
309		rule_match_set(r_match->key, L4_PORT_DST, match.key->dst);
310		rule_match_set(r_match->mask, L4_PORT_DST, match.mask->dst);
311	}
312
313	if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_PORTS_RANGE)) {
314		struct flow_match_ports_range match;
315		__be32 tp_key, tp_mask;
316
317		flow_rule_match_ports_range(f_rule, &match);
318
319		/* src port range (min, max) */
320		tp_key = htonl(ntohs(match.key->tp_min.src) |
321			       (ntohs(match.key->tp_max.src) << 16));
322		tp_mask = htonl(ntohs(match.mask->tp_min.src) |
323				(ntohs(match.mask->tp_max.src) << 16));
324		rule_match_set(r_match->key, L4_PORT_RANGE_SRC, tp_key);
325		rule_match_set(r_match->mask, L4_PORT_RANGE_SRC, tp_mask);
326
327		/* dst port range (min, max) */
328		tp_key = htonl(ntohs(match.key->tp_min.dst) |
329			       (ntohs(match.key->tp_max.dst) << 16));
330		tp_mask = htonl(ntohs(match.mask->tp_min.dst) |
331				(ntohs(match.mask->tp_max.dst) << 16));
332		rule_match_set(r_match->key, L4_PORT_RANGE_DST, tp_key);
333		rule_match_set(r_match->mask, L4_PORT_RANGE_DST, tp_mask);
334	}
335
336	if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_VLAN)) {
337		struct flow_match_vlan match;
338
339		flow_rule_match_vlan(f_rule, &match);
340
341		if (match.mask->vlan_id != 0) {
342			__be16 key = cpu_to_be16(match.key->vlan_id);
343			__be16 mask = cpu_to_be16(match.mask->vlan_id);
344
345			rule_match_set(r_match->key, VLAN_ID, key);
346			rule_match_set(r_match->mask, VLAN_ID, mask);
347		}
348
349		rule_match_set(r_match->key, VLAN_TPID, match.key->vlan_tpid);
350		rule_match_set(r_match->mask, VLAN_TPID, match.mask->vlan_tpid);
351	}
352
353	if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_ICMP)) {
354		struct flow_match_icmp match;
355
356		flow_rule_match_icmp(f_rule, &match);
357
358		rule_match_set(r_match->key, ICMP_TYPE, match.key->type);
359		rule_match_set(r_match->mask, ICMP_TYPE, match.mask->type);
360
361		rule_match_set(r_match->key, ICMP_CODE, match.key->code);
362		rule_match_set(r_match->mask, ICMP_CODE, match.mask->code);
363	}
364
365	return prestera_flower_parse_actions(block, rule, &f->rule->action,
366					     f->common.chain_index,
367					     f->common.extack);
368}
369
370static int prestera_flower_prio_check(struct prestera_flow_block *block,
371				      struct flow_cls_offload *f)
372{
373	u32 mall_prio_min;
374	u32 mall_prio_max;
375	int err;
376
377	err = prestera_mall_prio_get(block, &mall_prio_min, &mall_prio_max);
378	if (err == -ENOENT)
379		/* No matchall filters installed on this chain. */
380		return 0;
381
382	if (err) {
383		NL_SET_ERR_MSG(f->common.extack, "Failed to get matchall priorities");
384		return err;
385	}
386
387	if (f->common.prio <= mall_prio_max && block->ingress) {
388		NL_SET_ERR_MSG(f->common.extack,
389			       "Failed to add in front of existing matchall rules");
390		return -EOPNOTSUPP;
391	}
392	if (f->common.prio >= mall_prio_min && !block->ingress) {
393		NL_SET_ERR_MSG(f->common.extack, "Failed to add behind of existing matchall rules");
394		return -EOPNOTSUPP;
395	}
396
397	return 0;
398}
399
400int prestera_flower_prio_get(struct prestera_flow_block *block, u32 chain_index,
401			     u32 *prio_min, u32 *prio_max)
402{
403	struct prestera_acl_ruleset *ruleset;
404
405	ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block, chain_index);
406	if (IS_ERR(ruleset))
407		return PTR_ERR(ruleset);
408
409	prestera_acl_ruleset_prio_get(ruleset, prio_min, prio_max);
410	return 0;
411}
412
413int prestera_flower_replace(struct prestera_flow_block *block,
414			    struct flow_cls_offload *f)
415{
416	struct prestera_acl_ruleset *ruleset;
417	struct prestera_acl *acl = block->sw->acl;
418	struct prestera_acl_rule *rule;
419	int err;
420
421	err = prestera_flower_prio_check(block, f);
422	if (err)
423		return err;
424
425	ruleset = prestera_acl_ruleset_get(acl, block, f->common.chain_index);
426	if (IS_ERR(ruleset))
427		return PTR_ERR(ruleset);
428
429	/* increments the ruleset reference */
430	rule = prestera_acl_rule_create(ruleset, f->cookie,
431					f->common.chain_index);
432	if (IS_ERR(rule)) {
433		err = PTR_ERR(rule);
434		goto err_rule_create;
435	}
436
437	err = prestera_flower_parse(block, rule, f);
438	if (err)
439		goto err_rule_add;
440
441	if (!prestera_acl_ruleset_is_offload(ruleset)) {
442		err = prestera_acl_ruleset_offload(ruleset);
443		if (err)
444			goto err_ruleset_offload;
445	}
446
447	err = prestera_acl_rule_add(block->sw, rule);
448	if (err)
449		goto err_rule_add;
450
451	prestera_acl_ruleset_put(ruleset);
452	return 0;
453
454err_ruleset_offload:
455err_rule_add:
456	prestera_acl_rule_destroy(rule);
457err_rule_create:
458	prestera_acl_ruleset_put(ruleset);
459	return err;
460}
461
462void prestera_flower_destroy(struct prestera_flow_block *block,
463			     struct flow_cls_offload *f)
464{
465	struct prestera_acl_ruleset *ruleset;
466	struct prestera_acl_rule *rule;
467
468	ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block,
469					      f->common.chain_index);
470	if (IS_ERR(ruleset))
471		return;
472
473	rule = prestera_acl_rule_lookup(ruleset, f->cookie);
474	if (rule) {
475		prestera_acl_rule_del(block->sw, rule);
476		prestera_acl_rule_destroy(rule);
477	}
478	prestera_acl_ruleset_put(ruleset);
479}
480
481int prestera_flower_tmplt_create(struct prestera_flow_block *block,
482				 struct flow_cls_offload *f)
483{
484	struct prestera_flower_template *template;
485	struct prestera_acl_ruleset *ruleset;
486	struct prestera_acl_rule rule;
487	int err;
488
489	memset(&rule, 0, sizeof(rule));
490	err = prestera_flower_parse(block, &rule, f);
491	if (err)
492		return err;
493
494	template = kmalloc(sizeof(*template), GFP_KERNEL);
495	if (!template) {
496		err = -ENOMEM;
497		goto err_malloc;
498	}
499
500	prestera_acl_rule_keymask_pcl_id_set(&rule, 0);
501	ruleset = prestera_acl_ruleset_get(block->sw->acl, block,
502					   f->common.chain_index);
503	if (IS_ERR_OR_NULL(ruleset)) {
504		err = -EINVAL;
505		goto err_ruleset_get;
506	}
507
508	/* preserve keymask/template to this ruleset */
509	err = prestera_acl_ruleset_keymask_set(ruleset, rule.re_key.match.mask);
510	if (err)
511		goto err_ruleset_keymask_set;
512
513	/* skip error, as it is not possible to reject template operation,
514	 * so, keep the reference to the ruleset for rules to be added
515	 * to that ruleset later. In case of offload fail, the ruleset
516	 * will be offloaded again during adding a new rule. Also,
517	 * unlikly possble that ruleset is already offloaded at this staage.
518	 */
519	prestera_acl_ruleset_offload(ruleset);
520
521	/* keep the reference to the ruleset */
522	template->ruleset = ruleset;
523	template->chain_index = f->common.chain_index;
524	list_add_rcu(&template->list, &block->template_list);
525	return 0;
526
527err_ruleset_keymask_set:
528	prestera_acl_ruleset_put(ruleset);
529err_ruleset_get:
530	kfree(template);
531err_malloc:
532	NL_SET_ERR_MSG_MOD(f->common.extack, "Create chain template failed");
533	return err;
534}
535
536void prestera_flower_tmplt_destroy(struct prestera_flow_block *block,
537				   struct flow_cls_offload *f)
538{
539	struct prestera_flower_template *template, *tmp;
540
541	list_for_each_entry_safe(template, tmp, &block->template_list, list)
542		if (template->chain_index == f->common.chain_index) {
543			/* put the reference to the ruleset kept in create */
544			prestera_flower_template_free(template);
545			return;
546		}
547}
548
549int prestera_flower_stats(struct prestera_flow_block *block,
550			  struct flow_cls_offload *f)
551{
552	struct prestera_acl_ruleset *ruleset;
553	struct prestera_acl_rule *rule;
554	u64 packets;
555	u64 lastuse;
556	u64 bytes;
557	int err;
558
559	ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block,
560					      f->common.chain_index);
561	if (IS_ERR(ruleset))
562		return PTR_ERR(ruleset);
563
564	rule = prestera_acl_rule_lookup(ruleset, f->cookie);
565	if (!rule) {
566		err = -EINVAL;
567		goto err_rule_get_stats;
568	}
569
570	err = prestera_acl_rule_get_stats(block->sw->acl, rule, &packets,
571					  &bytes, &lastuse);
572	if (err)
573		goto err_rule_get_stats;
574
575	flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
576			  FLOW_ACTION_HW_STATS_DELAYED);
577
578err_rule_get_stats:
579	prestera_acl_ruleset_put(ruleset);
580	return err;
581}
582