1// SPDX-License-Identifier: GPL-2.0
2/* Marvell RVU Ethernet driver
3 *
4 * Copyright (C) 2021 Marvell.
5 *
6 */
7
8#include <linux/netdevice.h>
9#include <linux/etherdevice.h>
10#include <linux/inetdevice.h>
11#include <linux/rhashtable.h>
12#include <linux/bitfield.h>
13#include <net/flow_dissector.h>
14#include <net/pkt_cls.h>
15#include <net/tc_act/tc_gact.h>
16#include <net/tc_act/tc_mirred.h>
17#include <net/tc_act/tc_vlan.h>
18#include <net/ipv6.h>
19
20#include "cn10k.h"
21#include "otx2_common.h"
22#include "qos.h"
23
24#define CN10K_MAX_BURST_MANTISSA	0x7FFFULL
25#define CN10K_MAX_BURST_SIZE		8453888ULL
26
27#define CN10K_TLX_BURST_MANTISSA	GENMASK_ULL(43, 29)
28#define CN10K_TLX_BURST_EXPONENT	GENMASK_ULL(47, 44)
29
30#define OTX2_UNSUPP_LSE_DEPTH		GENMASK(6, 4)
31
32#define MCAST_INVALID_GRP		(-1U)
33
34struct otx2_tc_flow_stats {
35	u64 bytes;
36	u64 pkts;
37	u64 used;
38};
39
40struct otx2_tc_flow {
41	struct list_head		list;
42	unsigned long			cookie;
43	struct rcu_head			rcu;
44	struct otx2_tc_flow_stats	stats;
45	spinlock_t			lock; /* lock for stats */
46	u16				rq;
47	u16				entry;
48	u16				leaf_profile;
49	bool				is_act_police;
50	u32				prio;
51	struct npc_install_flow_req	req;
52	u32				mcast_grp_idx;
53	u64				rate;
54	u32				burst;
55	bool				is_pps;
56};
57
58static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst,
59				      u32 *burst_exp, u32 *burst_mantissa)
60{
61	int max_burst, max_mantissa;
62	unsigned int tmp;
63
64	if (is_dev_otx2(nic->pdev)) {
65		max_burst = MAX_BURST_SIZE;
66		max_mantissa = MAX_BURST_MANTISSA;
67	} else {
68		max_burst = CN10K_MAX_BURST_SIZE;
69		max_mantissa = CN10K_MAX_BURST_MANTISSA;
70	}
71
72	/* Burst is calculated as
73	 * ((256 + BURST_MANTISSA) << (1 + BURST_EXPONENT)) / 256
74	 * Max supported burst size is 130,816 bytes.
75	 */
76	burst = min_t(u32, burst, max_burst);
77	if (burst) {
78		*burst_exp = ilog2(burst) ? ilog2(burst) - 1 : 0;
79		tmp = burst - rounddown_pow_of_two(burst);
80		if (burst < max_mantissa)
81			*burst_mantissa = tmp * 2;
82		else
83			*burst_mantissa = tmp / (1ULL << (*burst_exp - 7));
84	} else {
85		*burst_exp = MAX_BURST_EXPONENT;
86		*burst_mantissa = max_mantissa;
87	}
88}
89
90static void otx2_get_egress_rate_cfg(u64 maxrate, u32 *exp,
91				     u32 *mantissa, u32 *div_exp)
92{
93	u64 tmp;
94
95	/* Rate calculation by hardware
96	 *
97	 * PIR_ADD = ((256 + mantissa) << exp) / 256
98	 * rate = (2 * PIR_ADD) / ( 1 << div_exp)
99	 * The resultant rate is in Mbps.
100	 */
101
102	/* 2Mbps to 100Gbps can be expressed with div_exp = 0.
103	 * Setting this to '0' will ease the calculation of
104	 * exponent and mantissa.
105	 */
106	*div_exp = 0;
107
108	if (maxrate) {
109		*exp = ilog2(maxrate) ? ilog2(maxrate) - 1 : 0;
110		tmp = maxrate - rounddown_pow_of_two(maxrate);
111		if (maxrate < MAX_RATE_MANTISSA)
112			*mantissa = tmp * 2;
113		else
114			*mantissa = tmp / (1ULL << (*exp - 7));
115	} else {
116		/* Instead of disabling rate limiting, set all values to max */
117		*exp = MAX_RATE_EXPONENT;
118		*mantissa = MAX_RATE_MANTISSA;
119	}
120}
121
122u64 otx2_get_txschq_rate_regval(struct otx2_nic *nic,
123				u64 maxrate, u32 burst)
124{
125	u32 burst_exp, burst_mantissa;
126	u32 exp, mantissa, div_exp;
127	u64 regval = 0;
128
129	/* Get exponent and mantissa values from the desired rate */
130	otx2_get_egress_burst_cfg(nic, burst, &burst_exp, &burst_mantissa);
131	otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp);
132
133	if (is_dev_otx2(nic->pdev)) {
134		regval = FIELD_PREP(TLX_BURST_EXPONENT, (u64)burst_exp) |
135				FIELD_PREP(TLX_BURST_MANTISSA, (u64)burst_mantissa) |
136				FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) |
137				FIELD_PREP(TLX_RATE_EXPONENT, exp) |
138				FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0);
139	} else {
140		regval = FIELD_PREP(CN10K_TLX_BURST_EXPONENT, (u64)burst_exp) |
141				FIELD_PREP(CN10K_TLX_BURST_MANTISSA, (u64)burst_mantissa) |
142				FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) |
143				FIELD_PREP(TLX_RATE_EXPONENT, exp) |
144				FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0);
145	}
146
147	return regval;
148}
149
150static int otx2_set_matchall_egress_rate(struct otx2_nic *nic,
151					 u32 burst, u64 maxrate)
152{
153	struct otx2_hw *hw = &nic->hw;
154	struct nix_txschq_config *req;
155	int txschq, err;
156
157	/* All SQs share the same TL4, so pick the first scheduler */
158	txschq = hw->txschq_list[NIX_TXSCH_LVL_TL4][0];
159
160	mutex_lock(&nic->mbox.lock);
161	req = otx2_mbox_alloc_msg_nix_txschq_cfg(&nic->mbox);
162	if (!req) {
163		mutex_unlock(&nic->mbox.lock);
164		return -ENOMEM;
165	}
166
167	req->lvl = NIX_TXSCH_LVL_TL4;
168	req->num_regs = 1;
169	req->reg[0] = NIX_AF_TL4X_PIR(txschq);
170	req->regval[0] = otx2_get_txschq_rate_regval(nic, maxrate, burst);
171
172	err = otx2_sync_mbox_msg(&nic->mbox);
173	mutex_unlock(&nic->mbox.lock);
174	return err;
175}
176
177static int otx2_tc_validate_flow(struct otx2_nic *nic,
178				 struct flow_action *actions,
179				 struct netlink_ext_ack *extack)
180{
181	if (nic->flags & OTX2_FLAG_INTF_DOWN) {
182		NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
183		return -EINVAL;
184	}
185
186	if (!flow_action_has_entries(actions)) {
187		NL_SET_ERR_MSG_MOD(extack, "MATCHALL offload called with no action");
188		return -EINVAL;
189	}
190
191	if (!flow_offload_has_one_action(actions)) {
192		NL_SET_ERR_MSG_MOD(extack,
193				   "Egress MATCHALL offload supports only 1 policing action");
194		return -EINVAL;
195	}
196	return 0;
197}
198
199static int otx2_policer_validate(const struct flow_action *action,
200				 const struct flow_action_entry *act,
201				 struct netlink_ext_ack *extack)
202{
203	if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
204		NL_SET_ERR_MSG_MOD(extack,
205				   "Offload not supported when exceed action is not drop");
206		return -EOPNOTSUPP;
207	}
208
209	if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
210	    act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
211		NL_SET_ERR_MSG_MOD(extack,
212				   "Offload not supported when conform action is not pipe or ok");
213		return -EOPNOTSUPP;
214	}
215
216	if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
217	    !flow_action_is_last_entry(action, act)) {
218		NL_SET_ERR_MSG_MOD(extack,
219				   "Offload not supported when conform action is ok, but action is not last");
220		return -EOPNOTSUPP;
221	}
222
223	if (act->police.peakrate_bytes_ps ||
224	    act->police.avrate || act->police.overhead) {
225		NL_SET_ERR_MSG_MOD(extack,
226				   "Offload not supported when peakrate/avrate/overhead is configured");
227		return -EOPNOTSUPP;
228	}
229
230	return 0;
231}
232
233static int otx2_tc_egress_matchall_install(struct otx2_nic *nic,
234					   struct tc_cls_matchall_offload *cls)
235{
236	struct netlink_ext_ack *extack = cls->common.extack;
237	struct flow_action *actions = &cls->rule->action;
238	struct flow_action_entry *entry;
239	int err;
240
241	err = otx2_tc_validate_flow(nic, actions, extack);
242	if (err)
243		return err;
244
245	if (nic->flags & OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED) {
246		NL_SET_ERR_MSG_MOD(extack,
247				   "Only one Egress MATCHALL ratelimiter can be offloaded");
248		return -ENOMEM;
249	}
250
251	entry = &cls->rule->action.entries[0];
252	switch (entry->id) {
253	case FLOW_ACTION_POLICE:
254		err = otx2_policer_validate(&cls->rule->action, entry, extack);
255		if (err)
256			return err;
257
258		if (entry->police.rate_pkt_ps) {
259			NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second");
260			return -EOPNOTSUPP;
261		}
262		err = otx2_set_matchall_egress_rate(nic, entry->police.burst,
263						    otx2_convert_rate(entry->police.rate_bytes_ps));
264		if (err)
265			return err;
266		nic->flags |= OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED;
267		break;
268	default:
269		NL_SET_ERR_MSG_MOD(extack,
270				   "Only police action is supported with Egress MATCHALL offload");
271		return -EOPNOTSUPP;
272	}
273
274	return 0;
275}
276
277static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic,
278					  struct tc_cls_matchall_offload *cls)
279{
280	struct netlink_ext_ack *extack = cls->common.extack;
281	int err;
282
283	if (nic->flags & OTX2_FLAG_INTF_DOWN) {
284		NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
285		return -EINVAL;
286	}
287
288	err = otx2_set_matchall_egress_rate(nic, 0, 0);
289	nic->flags &= ~OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED;
290	return err;
291}
292
293static int otx2_tc_act_set_hw_police(struct otx2_nic *nic,
294				     struct otx2_tc_flow *node)
295{
296	int rc;
297
298	mutex_lock(&nic->mbox.lock);
299
300	rc = cn10k_alloc_leaf_profile(nic, &node->leaf_profile);
301	if (rc) {
302		mutex_unlock(&nic->mbox.lock);
303		return rc;
304	}
305
306	rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile,
307				     node->burst, node->rate, node->is_pps);
308	if (rc)
309		goto free_leaf;
310
311	rc = cn10k_map_unmap_rq_policer(nic, node->rq, node->leaf_profile, true);
312	if (rc)
313		goto free_leaf;
314
315	mutex_unlock(&nic->mbox.lock);
316
317	return 0;
318
319free_leaf:
320	if (cn10k_free_leaf_profile(nic, node->leaf_profile))
321		netdev_err(nic->netdev,
322			   "Unable to free leaf bandwidth profile(%d)\n",
323			   node->leaf_profile);
324	mutex_unlock(&nic->mbox.lock);
325	return rc;
326}
327
328static int otx2_tc_act_set_police(struct otx2_nic *nic,
329				  struct otx2_tc_flow *node,
330				  struct flow_cls_offload *f,
331				  u64 rate, u32 burst, u32 mark,
332				  struct npc_install_flow_req *req, bool pps)
333{
334	struct netlink_ext_ack *extack = f->common.extack;
335	struct otx2_hw *hw = &nic->hw;
336	int rq_idx, rc;
337
338	rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues);
339	if (rq_idx >= hw->rx_queues) {
340		NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded");
341		return -EINVAL;
342	}
343
344	req->match_id = mark & 0xFFFFULL;
345	req->index = rq_idx;
346	req->op = NIX_RX_ACTIONOP_UCAST;
347
348	node->is_act_police = true;
349	node->rq = rq_idx;
350	node->burst = burst;
351	node->rate = rate;
352	node->is_pps = pps;
353
354	rc = otx2_tc_act_set_hw_police(nic, node);
355	if (!rc)
356		set_bit(rq_idx, &nic->rq_bmap);
357
358	return rc;
359}
360
361static int otx2_tc_update_mcast(struct otx2_nic *nic,
362				struct npc_install_flow_req *req,
363				struct netlink_ext_ack *extack,
364				struct otx2_tc_flow *node,
365				struct nix_mcast_grp_update_req *ureq,
366				u8 num_intf)
367{
368	struct nix_mcast_grp_update_req *grp_update_req;
369	struct nix_mcast_grp_create_req *creq;
370	struct nix_mcast_grp_create_rsp *crsp;
371	u32 grp_index;
372	int rc;
373
374	mutex_lock(&nic->mbox.lock);
375	creq = otx2_mbox_alloc_msg_nix_mcast_grp_create(&nic->mbox);
376	if (!creq) {
377		rc = -ENOMEM;
378		goto error;
379	}
380
381	creq->dir = NIX_MCAST_INGRESS;
382	/* Send message to AF */
383	rc = otx2_sync_mbox_msg(&nic->mbox);
384	if (rc) {
385		NL_SET_ERR_MSG_MOD(extack, "Failed to create multicast group");
386		goto error;
387	}
388
389	crsp = (struct nix_mcast_grp_create_rsp *)otx2_mbox_get_rsp(&nic->mbox.mbox,
390			0,
391			&creq->hdr);
392	if (IS_ERR(crsp)) {
393		rc = PTR_ERR(crsp);
394		goto error;
395	}
396
397	grp_index = crsp->mcast_grp_idx;
398	grp_update_req = otx2_mbox_alloc_msg_nix_mcast_grp_update(&nic->mbox);
399	if (!grp_update_req) {
400		NL_SET_ERR_MSG_MOD(extack, "Failed to update multicast group");
401		rc = -ENOMEM;
402		goto error;
403	}
404
405	ureq->op = NIX_MCAST_OP_ADD_ENTRY;
406	ureq->mcast_grp_idx = grp_index;
407	ureq->num_mce_entry = num_intf;
408	ureq->pcifunc[0] = nic->pcifunc;
409	ureq->channel[0] = nic->hw.tx_chan_base;
410
411	ureq->dest_type[0] = NIX_RX_RSS;
412	ureq->rq_rss_index[0] = 0;
413	memcpy(&ureq->hdr, &grp_update_req->hdr, sizeof(struct mbox_msghdr));
414	memcpy(grp_update_req, ureq, sizeof(struct nix_mcast_grp_update_req));
415
416	/* Send message to AF */
417	rc = otx2_sync_mbox_msg(&nic->mbox);
418	if (rc) {
419		NL_SET_ERR_MSG_MOD(extack, "Failed to update multicast group");
420		goto error;
421	}
422
423	mutex_unlock(&nic->mbox.lock);
424	req->op = NIX_RX_ACTIONOP_MCAST;
425	req->index = grp_index;
426	node->mcast_grp_idx = grp_index;
427	return 0;
428
429error:
430	mutex_unlock(&nic->mbox.lock);
431	return rc;
432}
433
434static int otx2_tc_parse_actions(struct otx2_nic *nic,
435				 struct flow_action *flow_action,
436				 struct npc_install_flow_req *req,
437				 struct flow_cls_offload *f,
438				 struct otx2_tc_flow *node)
439{
440	struct nix_mcast_grp_update_req dummy_grp_update_req = { 0 };
441	struct netlink_ext_ack *extack = f->common.extack;
442	bool pps = false, mcast = false;
443	struct flow_action_entry *act;
444	struct net_device *target;
445	struct otx2_nic *priv;
446	u32 burst, mark = 0;
447	u8 nr_police = 0;
448	u8 num_intf = 1;
449	int err, i;
450	u64 rate;
451
452	if (!flow_action_has_entries(flow_action)) {
453		NL_SET_ERR_MSG_MOD(extack, "no tc actions specified");
454		return -EINVAL;
455	}
456
457	flow_action_for_each(i, act, flow_action) {
458		switch (act->id) {
459		case FLOW_ACTION_DROP:
460			req->op = NIX_RX_ACTIONOP_DROP;
461			return 0;
462		case FLOW_ACTION_ACCEPT:
463			req->op = NIX_RX_ACTION_DEFAULT;
464			return 0;
465		case FLOW_ACTION_REDIRECT_INGRESS:
466			target = act->dev;
467			priv = netdev_priv(target);
468			/* npc_install_flow_req doesn't support passing a target pcifunc */
469			if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) {
470				NL_SET_ERR_MSG_MOD(extack,
471						   "can't redirect to other pf/vf");
472				return -EOPNOTSUPP;
473			}
474			req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK;
475
476			/* if op is already set; avoid overwriting the same */
477			if (!req->op)
478				req->op = NIX_RX_ACTION_DEFAULT;
479			break;
480
481		case FLOW_ACTION_VLAN_POP:
482			req->vtag0_valid = true;
483			/* use RX_VTAG_TYPE7 which is initialized to strip vlan tag */
484			req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7;
485			break;
486		case FLOW_ACTION_POLICE:
487			/* Ingress ratelimiting is not supported on OcteonTx2 */
488			if (is_dev_otx2(nic->pdev)) {
489				NL_SET_ERR_MSG_MOD(extack,
490					"Ingress policing not supported on this platform");
491				return -EOPNOTSUPP;
492			}
493
494			err = otx2_policer_validate(flow_action, act, extack);
495			if (err)
496				return err;
497
498			if (act->police.rate_bytes_ps > 0) {
499				rate = act->police.rate_bytes_ps * 8;
500				burst = act->police.burst;
501			} else if (act->police.rate_pkt_ps > 0) {
502				/* The algorithm used to calculate rate
503				 * mantissa, exponent values for a given token
504				 * rate (token can be byte or packet) requires
505				 * token rate to be mutiplied by 8.
506				 */
507				rate = act->police.rate_pkt_ps * 8;
508				burst = act->police.burst_pkt;
509				pps = true;
510			}
511			nr_police++;
512			break;
513		case FLOW_ACTION_MARK:
514			mark = act->mark;
515			break;
516
517		case FLOW_ACTION_RX_QUEUE_MAPPING:
518			req->op = NIX_RX_ACTIONOP_UCAST;
519			req->index = act->rx_queue;
520			break;
521
522		case FLOW_ACTION_MIRRED_INGRESS:
523			target = act->dev;
524			priv = netdev_priv(target);
525			dummy_grp_update_req.pcifunc[num_intf] = priv->pcifunc;
526			dummy_grp_update_req.channel[num_intf] = priv->hw.tx_chan_base;
527			dummy_grp_update_req.dest_type[num_intf] = NIX_RX_RSS;
528			dummy_grp_update_req.rq_rss_index[num_intf] = 0;
529			mcast = true;
530			num_intf++;
531			break;
532
533		default:
534			return -EOPNOTSUPP;
535		}
536	}
537
538	if (mcast) {
539		err = otx2_tc_update_mcast(nic, req, extack, node,
540					   &dummy_grp_update_req,
541					   num_intf);
542		if (err)
543			return err;
544	}
545
546	if (nr_police > 1) {
547		NL_SET_ERR_MSG_MOD(extack,
548				   "rate limit police offload requires a single action");
549		return -EOPNOTSUPP;
550	}
551
552	if (nr_police)
553		return otx2_tc_act_set_police(nic, node, f, rate, burst,
554					      mark, req, pps);
555
556	return 0;
557}
558
559static int otx2_tc_process_vlan(struct otx2_nic *nic, struct flow_msg *flow_spec,
560				struct flow_msg *flow_mask, struct flow_rule *rule,
561				struct npc_install_flow_req *req, bool is_inner)
562{
563	struct flow_match_vlan match;
564	u16 vlan_tci, vlan_tci_mask;
565
566	if (is_inner)
567		flow_rule_match_cvlan(rule, &match);
568	else
569		flow_rule_match_vlan(rule, &match);
570
571	if (!eth_type_vlan(match.key->vlan_tpid)) {
572		netdev_err(nic->netdev, "vlan tpid 0x%x not supported\n",
573			   ntohs(match.key->vlan_tpid));
574		return -EOPNOTSUPP;
575	}
576
577	if (!match.mask->vlan_id) {
578		struct flow_action_entry *act;
579		int i;
580
581		flow_action_for_each(i, act, &rule->action) {
582			if (act->id == FLOW_ACTION_DROP) {
583				netdev_err(nic->netdev,
584					   "vlan tpid 0x%x with vlan_id %d is not supported for DROP rule.\n",
585					   ntohs(match.key->vlan_tpid), match.key->vlan_id);
586				return -EOPNOTSUPP;
587			}
588		}
589	}
590
591	if (match.mask->vlan_id ||
592	    match.mask->vlan_dei ||
593	    match.mask->vlan_priority) {
594		vlan_tci = match.key->vlan_id |
595			   match.key->vlan_dei << 12 |
596			   match.key->vlan_priority << 13;
597
598		vlan_tci_mask = match.mask->vlan_id |
599				match.mask->vlan_dei << 12 |
600				match.mask->vlan_priority << 13;
601		if (is_inner) {
602			flow_spec->vlan_itci = htons(vlan_tci);
603			flow_mask->vlan_itci = htons(vlan_tci_mask);
604			req->features |= BIT_ULL(NPC_INNER_VID);
605		} else {
606			flow_spec->vlan_tci = htons(vlan_tci);
607			flow_mask->vlan_tci = htons(vlan_tci_mask);
608			req->features |= BIT_ULL(NPC_OUTER_VID);
609		}
610	}
611
612	return 0;
613}
614
615static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
616				struct flow_cls_offload *f,
617				struct npc_install_flow_req *req)
618{
619	struct netlink_ext_ack *extack = f->common.extack;
620	struct flow_msg *flow_spec = &req->packet;
621	struct flow_msg *flow_mask = &req->mask;
622	struct flow_dissector *dissector;
623	struct flow_rule *rule;
624	u8 ip_proto = 0;
625
626	rule = flow_cls_offload_flow_rule(f);
627	dissector = rule->match.dissector;
628
629	if ((dissector->used_keys &
630	    ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
631	      BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
632	      BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
633	      BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
634	      BIT(FLOW_DISSECTOR_KEY_CVLAN) |
635	      BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
636	      BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
637	      BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
638	      BIT(FLOW_DISSECTOR_KEY_IPSEC) |
639	      BIT_ULL(FLOW_DISSECTOR_KEY_MPLS) |
640	      BIT_ULL(FLOW_DISSECTOR_KEY_ICMP) |
641	      BIT_ULL(FLOW_DISSECTOR_KEY_TCP) |
642	      BIT_ULL(FLOW_DISSECTOR_KEY_IP))))  {
643		netdev_info(nic->netdev, "unsupported flow used key 0x%llx",
644			    dissector->used_keys);
645		return -EOPNOTSUPP;
646	}
647
648	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
649		struct flow_match_basic match;
650
651		flow_rule_match_basic(rule, &match);
652
653		/* All EtherTypes can be matched, no hw limitation */
654		flow_spec->etype = match.key->n_proto;
655		flow_mask->etype = match.mask->n_proto;
656		req->features |= BIT_ULL(NPC_ETYPE);
657
658		if (match.mask->ip_proto &&
659		    (match.key->ip_proto != IPPROTO_TCP &&
660		     match.key->ip_proto != IPPROTO_UDP &&
661		     match.key->ip_proto != IPPROTO_SCTP &&
662		     match.key->ip_proto != IPPROTO_ICMP &&
663		     match.key->ip_proto != IPPROTO_ESP &&
664		     match.key->ip_proto != IPPROTO_AH &&
665		     match.key->ip_proto != IPPROTO_ICMPV6)) {
666			netdev_info(nic->netdev,
667				    "ip_proto=0x%x not supported\n",
668				    match.key->ip_proto);
669			return -EOPNOTSUPP;
670		}
671		if (match.mask->ip_proto)
672			ip_proto = match.key->ip_proto;
673
674		if (ip_proto == IPPROTO_UDP)
675			req->features |= BIT_ULL(NPC_IPPROTO_UDP);
676		else if (ip_proto == IPPROTO_TCP)
677			req->features |= BIT_ULL(NPC_IPPROTO_TCP);
678		else if (ip_proto == IPPROTO_SCTP)
679			req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
680		else if (ip_proto == IPPROTO_ICMP)
681			req->features |= BIT_ULL(NPC_IPPROTO_ICMP);
682		else if (ip_proto == IPPROTO_ICMPV6)
683			req->features |= BIT_ULL(NPC_IPPROTO_ICMP6);
684		else if (ip_proto == IPPROTO_ESP)
685			req->features |= BIT_ULL(NPC_IPPROTO_ESP);
686		else if (ip_proto == IPPROTO_AH)
687			req->features |= BIT_ULL(NPC_IPPROTO_AH);
688	}
689
690	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
691		struct flow_match_control match;
692		u32 val;
693
694		flow_rule_match_control(rule, &match);
695		if (match.mask->flags & FLOW_DIS_FIRST_FRAG) {
696			NL_SET_ERR_MSG_MOD(extack, "HW doesn't support frag first/later");
697			return -EOPNOTSUPP;
698		}
699
700		if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
701			val = match.key->flags & FLOW_DIS_IS_FRAGMENT;
702			if (ntohs(flow_spec->etype) == ETH_P_IP) {
703				flow_spec->ip_flag = val ? IPV4_FLAG_MORE : 0;
704				flow_mask->ip_flag = IPV4_FLAG_MORE;
705				req->features |= BIT_ULL(NPC_IPFRAG_IPV4);
706			} else if (ntohs(flow_spec->etype) == ETH_P_IPV6) {
707				flow_spec->next_header = val ?
708							 IPPROTO_FRAGMENT : 0;
709				flow_mask->next_header = 0xff;
710				req->features |= BIT_ULL(NPC_IPFRAG_IPV6);
711			} else {
712				NL_SET_ERR_MSG_MOD(extack, "flow-type should be either IPv4 and IPv6");
713				return -EOPNOTSUPP;
714			}
715		}
716	}
717
718	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
719		struct flow_match_eth_addrs match;
720
721		flow_rule_match_eth_addrs(rule, &match);
722		if (!is_zero_ether_addr(match.mask->src)) {
723			NL_SET_ERR_MSG_MOD(extack, "src mac match not supported");
724			return -EOPNOTSUPP;
725		}
726
727		if (!is_zero_ether_addr(match.mask->dst)) {
728			ether_addr_copy(flow_spec->dmac, (u8 *)&match.key->dst);
729			ether_addr_copy(flow_mask->dmac,
730					(u8 *)&match.mask->dst);
731			req->features |= BIT_ULL(NPC_DMAC);
732		}
733	}
734
735	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPSEC)) {
736		struct flow_match_ipsec match;
737
738		flow_rule_match_ipsec(rule, &match);
739		if (!match.mask->spi) {
740			NL_SET_ERR_MSG_MOD(extack, "spi index not specified");
741			return -EOPNOTSUPP;
742		}
743		if (ip_proto != IPPROTO_ESP &&
744		    ip_proto != IPPROTO_AH) {
745			NL_SET_ERR_MSG_MOD(extack,
746					   "SPI index is valid only for ESP/AH proto");
747			return -EOPNOTSUPP;
748		}
749
750		flow_spec->spi = match.key->spi;
751		flow_mask->spi = match.mask->spi;
752		req->features |= BIT_ULL(NPC_IPSEC_SPI);
753	}
754
755	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
756		struct flow_match_ip match;
757
758		flow_rule_match_ip(rule, &match);
759		if ((ntohs(flow_spec->etype) != ETH_P_IP) &&
760		    match.mask->tos) {
761			NL_SET_ERR_MSG_MOD(extack, "tos not supported");
762			return -EOPNOTSUPP;
763		}
764		if (match.mask->ttl) {
765			NL_SET_ERR_MSG_MOD(extack, "ttl not supported");
766			return -EOPNOTSUPP;
767		}
768		flow_spec->tos = match.key->tos;
769		flow_mask->tos = match.mask->tos;
770		req->features |= BIT_ULL(NPC_TOS);
771	}
772
773	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
774		int ret;
775
776		ret = otx2_tc_process_vlan(nic, flow_spec, flow_mask, rule, req, false);
777		if (ret)
778			return ret;
779	}
780
781	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
782		int ret;
783
784		ret = otx2_tc_process_vlan(nic, flow_spec, flow_mask, rule, req, true);
785		if (ret)
786			return ret;
787	}
788
789	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
790		struct flow_match_ipv4_addrs match;
791
792		flow_rule_match_ipv4_addrs(rule, &match);
793
794		flow_spec->ip4dst = match.key->dst;
795		flow_mask->ip4dst = match.mask->dst;
796		req->features |= BIT_ULL(NPC_DIP_IPV4);
797
798		flow_spec->ip4src = match.key->src;
799		flow_mask->ip4src = match.mask->src;
800		req->features |= BIT_ULL(NPC_SIP_IPV4);
801	} else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
802		struct flow_match_ipv6_addrs match;
803
804		flow_rule_match_ipv6_addrs(rule, &match);
805
806		if (ipv6_addr_loopback(&match.key->dst) ||
807		    ipv6_addr_loopback(&match.key->src)) {
808			NL_SET_ERR_MSG_MOD(extack,
809					   "Flow matching IPv6 loopback addr not supported");
810			return -EOPNOTSUPP;
811		}
812
813		if (!ipv6_addr_any(&match.mask->dst)) {
814			memcpy(&flow_spec->ip6dst,
815			       (struct in6_addr *)&match.key->dst,
816			       sizeof(flow_spec->ip6dst));
817			memcpy(&flow_mask->ip6dst,
818			       (struct in6_addr *)&match.mask->dst,
819			       sizeof(flow_spec->ip6dst));
820			req->features |= BIT_ULL(NPC_DIP_IPV6);
821		}
822
823		if (!ipv6_addr_any(&match.mask->src)) {
824			memcpy(&flow_spec->ip6src,
825			       (struct in6_addr *)&match.key->src,
826			       sizeof(flow_spec->ip6src));
827			memcpy(&flow_mask->ip6src,
828			       (struct in6_addr *)&match.mask->src,
829			       sizeof(flow_spec->ip6src));
830			req->features |= BIT_ULL(NPC_SIP_IPV6);
831		}
832	}
833
834	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
835		struct flow_match_ports match;
836
837		flow_rule_match_ports(rule, &match);
838
839		flow_spec->dport = match.key->dst;
840		flow_mask->dport = match.mask->dst;
841
842		if (flow_mask->dport) {
843			if (ip_proto == IPPROTO_UDP)
844				req->features |= BIT_ULL(NPC_DPORT_UDP);
845			else if (ip_proto == IPPROTO_TCP)
846				req->features |= BIT_ULL(NPC_DPORT_TCP);
847			else if (ip_proto == IPPROTO_SCTP)
848				req->features |= BIT_ULL(NPC_DPORT_SCTP);
849		}
850
851		flow_spec->sport = match.key->src;
852		flow_mask->sport = match.mask->src;
853
854		if (flow_mask->sport) {
855			if (ip_proto == IPPROTO_UDP)
856				req->features |= BIT_ULL(NPC_SPORT_UDP);
857			else if (ip_proto == IPPROTO_TCP)
858				req->features |= BIT_ULL(NPC_SPORT_TCP);
859			else if (ip_proto == IPPROTO_SCTP)
860				req->features |= BIT_ULL(NPC_SPORT_SCTP);
861		}
862	}
863
864	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
865		struct flow_match_tcp match;
866
867		flow_rule_match_tcp(rule, &match);
868
869		flow_spec->tcp_flags = match.key->flags;
870		flow_mask->tcp_flags = match.mask->flags;
871		req->features |= BIT_ULL(NPC_TCP_FLAGS);
872	}
873
874	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) {
875		struct flow_match_mpls match;
876		u8 bit;
877
878		flow_rule_match_mpls(rule, &match);
879
880		if (match.mask->used_lses & OTX2_UNSUPP_LSE_DEPTH) {
881			NL_SET_ERR_MSG_MOD(extack,
882					   "unsupported LSE depth for MPLS match offload");
883			return -EOPNOTSUPP;
884		}
885
886		for_each_set_bit(bit, (unsigned long *)&match.mask->used_lses,
887				 FLOW_DIS_MPLS_MAX)  {
888			/* check if any of the fields LABEL,TC,BOS are set */
889			if (*((u32 *)&match.mask->ls[bit]) &
890			    OTX2_FLOWER_MASK_MPLS_NON_TTL) {
891				/* Hardware will capture 4 byte MPLS header into
892				 * two fields NPC_MPLSX_LBTCBOS and NPC_MPLSX_TTL.
893				 * Derive the associated NPC key based on header
894				 * index and offset.
895				 */
896
897				req->features |= BIT_ULL(NPC_MPLS1_LBTCBOS +
898							 2 * bit);
899				flow_spec->mpls_lse[bit] =
900					FIELD_PREP(OTX2_FLOWER_MASK_MPLS_LB,
901						   match.key->ls[bit].mpls_label) |
902					FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TC,
903						   match.key->ls[bit].mpls_tc) |
904					FIELD_PREP(OTX2_FLOWER_MASK_MPLS_BOS,
905						   match.key->ls[bit].mpls_bos);
906
907				flow_mask->mpls_lse[bit] =
908					FIELD_PREP(OTX2_FLOWER_MASK_MPLS_LB,
909						   match.mask->ls[bit].mpls_label) |
910					FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TC,
911						   match.mask->ls[bit].mpls_tc) |
912					FIELD_PREP(OTX2_FLOWER_MASK_MPLS_BOS,
913						   match.mask->ls[bit].mpls_bos);
914			}
915
916			if (match.mask->ls[bit].mpls_ttl) {
917				req->features |= BIT_ULL(NPC_MPLS1_TTL +
918							 2 * bit);
919				flow_spec->mpls_lse[bit] |=
920					FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TTL,
921						   match.key->ls[bit].mpls_ttl);
922				flow_mask->mpls_lse[bit] |=
923					FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TTL,
924						   match.mask->ls[bit].mpls_ttl);
925			}
926		}
927	}
928
929	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP)) {
930		struct flow_match_icmp match;
931
932		flow_rule_match_icmp(rule, &match);
933
934		flow_spec->icmp_type = match.key->type;
935		flow_mask->icmp_type = match.mask->type;
936		req->features |= BIT_ULL(NPC_TYPE_ICMP);
937
938		flow_spec->icmp_code = match.key->code;
939		flow_mask->icmp_code = match.mask->code;
940		req->features |= BIT_ULL(NPC_CODE_ICMP);
941	}
942	return otx2_tc_parse_actions(nic, &rule->action, req, f, node);
943}
944
945static void otx2_destroy_tc_flow_list(struct otx2_nic *pfvf)
946{
947	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
948	struct otx2_tc_flow *iter, *tmp;
949
950	if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC))
951		return;
952
953	list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list_tc, list) {
954		list_del(&iter->list);
955		kfree(iter);
956		flow_cfg->nr_flows--;
957	}
958}
959
960static struct otx2_tc_flow *otx2_tc_get_entry_by_cookie(struct otx2_flow_config *flow_cfg,
961							unsigned long cookie)
962{
963	struct otx2_tc_flow *tmp;
964
965	list_for_each_entry(tmp, &flow_cfg->flow_list_tc, list) {
966		if (tmp->cookie == cookie)
967			return tmp;
968	}
969
970	return NULL;
971}
972
973static struct otx2_tc_flow *otx2_tc_get_entry_by_index(struct otx2_flow_config *flow_cfg,
974						       int index)
975{
976	struct otx2_tc_flow *tmp;
977	int i = 0;
978
979	list_for_each_entry(tmp, &flow_cfg->flow_list_tc, list) {
980		if (i == index)
981			return tmp;
982		i++;
983	}
984
985	return NULL;
986}
987
988static void otx2_tc_del_from_flow_list(struct otx2_flow_config *flow_cfg,
989				       struct otx2_tc_flow *node)
990{
991	struct list_head *pos, *n;
992	struct otx2_tc_flow *tmp;
993
994	list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) {
995		tmp = list_entry(pos, struct otx2_tc_flow, list);
996		if (node == tmp) {
997			list_del(&node->list);
998			return;
999		}
1000	}
1001}
1002
1003static int otx2_tc_add_to_flow_list(struct otx2_flow_config *flow_cfg,
1004				    struct otx2_tc_flow *node)
1005{
1006	struct list_head *pos, *n;
1007	struct otx2_tc_flow *tmp;
1008	int index = 0;
1009
1010	/* If the flow list is empty then add the new node */
1011	if (list_empty(&flow_cfg->flow_list_tc)) {
1012		list_add(&node->list, &flow_cfg->flow_list_tc);
1013		return index;
1014	}
1015
1016	list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) {
1017		tmp = list_entry(pos, struct otx2_tc_flow, list);
1018		if (node->prio < tmp->prio)
1019			break;
1020		index++;
1021	}
1022
1023	list_add(&node->list, pos->prev);
1024	return index;
1025}
1026
1027static int otx2_add_mcam_flow_entry(struct otx2_nic *nic, struct npc_install_flow_req *req)
1028{
1029	struct npc_install_flow_req *tmp_req;
1030	int err;
1031
1032	mutex_lock(&nic->mbox.lock);
1033	tmp_req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
1034	if (!tmp_req) {
1035		mutex_unlock(&nic->mbox.lock);
1036		return -ENOMEM;
1037	}
1038
1039	memcpy(tmp_req, req, sizeof(struct npc_install_flow_req));
1040	/* Send message to AF */
1041	err = otx2_sync_mbox_msg(&nic->mbox);
1042	if (err) {
1043		netdev_err(nic->netdev, "Failed to install MCAM flow entry %d\n",
1044			   req->entry);
1045		mutex_unlock(&nic->mbox.lock);
1046		return -EFAULT;
1047	}
1048
1049	mutex_unlock(&nic->mbox.lock);
1050	return 0;
1051}
1052
1053static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry, u16 *cntr_val)
1054{
1055	struct npc_delete_flow_rsp *rsp;
1056	struct npc_delete_flow_req *req;
1057	int err;
1058
1059	mutex_lock(&nic->mbox.lock);
1060	req = otx2_mbox_alloc_msg_npc_delete_flow(&nic->mbox);
1061	if (!req) {
1062		mutex_unlock(&nic->mbox.lock);
1063		return -ENOMEM;
1064	}
1065
1066	req->entry = entry;
1067
1068	/* Send message to AF */
1069	err = otx2_sync_mbox_msg(&nic->mbox);
1070	if (err) {
1071		netdev_err(nic->netdev, "Failed to delete MCAM flow entry %d\n",
1072			   entry);
1073		mutex_unlock(&nic->mbox.lock);
1074		return -EFAULT;
1075	}
1076
1077	if (cntr_val) {
1078		rsp = (struct npc_delete_flow_rsp *)otx2_mbox_get_rsp(&nic->mbox.mbox,
1079								      0, &req->hdr);
1080		if (IS_ERR(rsp)) {
1081			netdev_err(nic->netdev, "Failed to get MCAM delete response for entry %d\n",
1082				   entry);
1083			mutex_unlock(&nic->mbox.lock);
1084			return -EFAULT;
1085		}
1086
1087		*cntr_val = rsp->cntr_val;
1088	}
1089
1090	mutex_unlock(&nic->mbox.lock);
1091	return 0;
1092}
1093
1094static int otx2_tc_update_mcam_table_del_req(struct otx2_nic *nic,
1095					     struct otx2_flow_config *flow_cfg,
1096					     struct otx2_tc_flow *node)
1097{
1098	struct list_head *pos, *n;
1099	struct otx2_tc_flow *tmp;
1100	int i = 0, index = 0;
1101	u16 cntr_val = 0;
1102
1103	/* Find and delete the entry from the list and re-install
1104	 * all the entries from beginning to the index of the
1105	 * deleted entry to higher mcam indexes.
1106	 */
1107	list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) {
1108		tmp = list_entry(pos, struct otx2_tc_flow, list);
1109		if (node == tmp) {
1110			list_del(&tmp->list);
1111			break;
1112		}
1113
1114		otx2_del_mcam_flow_entry(nic, tmp->entry, &cntr_val);
1115		tmp->entry++;
1116		tmp->req.entry = tmp->entry;
1117		tmp->req.cntr_val = cntr_val;
1118		index++;
1119	}
1120
1121	list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) {
1122		if (i == index)
1123			break;
1124
1125		tmp = list_entry(pos, struct otx2_tc_flow, list);
1126		otx2_add_mcam_flow_entry(nic, &tmp->req);
1127		i++;
1128	}
1129
1130	return 0;
1131}
1132
1133static int otx2_tc_update_mcam_table_add_req(struct otx2_nic *nic,
1134					     struct otx2_flow_config *flow_cfg,
1135					     struct otx2_tc_flow *node)
1136{
1137	int mcam_idx = flow_cfg->max_flows - flow_cfg->nr_flows - 1;
1138	struct otx2_tc_flow *tmp;
1139	int list_idx, i;
1140	u16 cntr_val = 0;
1141
1142	/* Find the index of the entry(list_idx) whose priority
1143	 * is greater than the new entry and re-install all
1144	 * the entries from beginning to list_idx to higher
1145	 * mcam indexes.
1146	 */
1147	list_idx = otx2_tc_add_to_flow_list(flow_cfg, node);
1148	for (i = 0; i < list_idx; i++) {
1149		tmp = otx2_tc_get_entry_by_index(flow_cfg, i);
1150		if (!tmp)
1151			return -ENOMEM;
1152
1153		otx2_del_mcam_flow_entry(nic, tmp->entry, &cntr_val);
1154		tmp->entry = flow_cfg->flow_ent[mcam_idx];
1155		tmp->req.entry = tmp->entry;
1156		tmp->req.cntr_val = cntr_val;
1157		otx2_add_mcam_flow_entry(nic, &tmp->req);
1158		mcam_idx++;
1159	}
1160
1161	return mcam_idx;
1162}
1163
1164static int otx2_tc_update_mcam_table(struct otx2_nic *nic,
1165				     struct otx2_flow_config *flow_cfg,
1166				     struct otx2_tc_flow *node,
1167				     bool add_req)
1168{
1169	if (add_req)
1170		return otx2_tc_update_mcam_table_add_req(nic, flow_cfg, node);
1171
1172	return otx2_tc_update_mcam_table_del_req(nic, flow_cfg, node);
1173}
1174
1175static int otx2_tc_del_flow(struct otx2_nic *nic,
1176			    struct flow_cls_offload *tc_flow_cmd)
1177{
1178	struct otx2_flow_config *flow_cfg = nic->flow_cfg;
1179	struct nix_mcast_grp_destroy_req *grp_destroy_req;
1180	struct otx2_tc_flow *flow_node;
1181	int err;
1182
1183	flow_node = otx2_tc_get_entry_by_cookie(flow_cfg, tc_flow_cmd->cookie);
1184	if (!flow_node) {
1185		netdev_err(nic->netdev, "tc flow not found for cookie 0x%lx\n",
1186			   tc_flow_cmd->cookie);
1187		return -EINVAL;
1188	}
1189
1190	if (flow_node->is_act_police) {
1191		__clear_bit(flow_node->rq, &nic->rq_bmap);
1192
1193		if (nic->flags & OTX2_FLAG_INTF_DOWN)
1194			goto free_mcam_flow;
1195
1196		mutex_lock(&nic->mbox.lock);
1197
1198		err = cn10k_map_unmap_rq_policer(nic, flow_node->rq,
1199						 flow_node->leaf_profile, false);
1200		if (err)
1201			netdev_err(nic->netdev,
1202				   "Unmapping RQ %d & profile %d failed\n",
1203				   flow_node->rq, flow_node->leaf_profile);
1204
1205		err = cn10k_free_leaf_profile(nic, flow_node->leaf_profile);
1206		if (err)
1207			netdev_err(nic->netdev,
1208				   "Unable to free leaf bandwidth profile(%d)\n",
1209				   flow_node->leaf_profile);
1210
1211		mutex_unlock(&nic->mbox.lock);
1212	}
1213	/* Remove the multicast/mirror related nodes */
1214	if (flow_node->mcast_grp_idx != MCAST_INVALID_GRP) {
1215		mutex_lock(&nic->mbox.lock);
1216		grp_destroy_req = otx2_mbox_alloc_msg_nix_mcast_grp_destroy(&nic->mbox);
1217		grp_destroy_req->mcast_grp_idx = flow_node->mcast_grp_idx;
1218		otx2_sync_mbox_msg(&nic->mbox);
1219		mutex_unlock(&nic->mbox.lock);
1220	}
1221
1222
1223free_mcam_flow:
1224	otx2_del_mcam_flow_entry(nic, flow_node->entry, NULL);
1225	otx2_tc_update_mcam_table(nic, flow_cfg, flow_node, false);
1226	kfree_rcu(flow_node, rcu);
1227	flow_cfg->nr_flows--;
1228	return 0;
1229}
1230
1231static int otx2_tc_add_flow(struct otx2_nic *nic,
1232			    struct flow_cls_offload *tc_flow_cmd)
1233{
1234	struct netlink_ext_ack *extack = tc_flow_cmd->common.extack;
1235	struct otx2_flow_config *flow_cfg = nic->flow_cfg;
1236	struct otx2_tc_flow *new_node, *old_node;
1237	struct npc_install_flow_req *req, dummy;
1238	int rc, err, mcam_idx;
1239
1240	if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT))
1241		return -ENOMEM;
1242
1243	if (nic->flags & OTX2_FLAG_INTF_DOWN) {
1244		NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
1245		return -EINVAL;
1246	}
1247
1248	if (flow_cfg->nr_flows == flow_cfg->max_flows) {
1249		NL_SET_ERR_MSG_MOD(extack,
1250				   "Free MCAM entry not available to add the flow");
1251		return -ENOMEM;
1252	}
1253
1254	/* allocate memory for the new flow and it's node */
1255	new_node = kzalloc(sizeof(*new_node), GFP_KERNEL);
1256	if (!new_node)
1257		return -ENOMEM;
1258	spin_lock_init(&new_node->lock);
1259	new_node->cookie = tc_flow_cmd->cookie;
1260	new_node->prio = tc_flow_cmd->common.prio;
1261	new_node->mcast_grp_idx = MCAST_INVALID_GRP;
1262
1263	memset(&dummy, 0, sizeof(struct npc_install_flow_req));
1264
1265	rc = otx2_tc_prepare_flow(nic, new_node, tc_flow_cmd, &dummy);
1266	if (rc) {
1267		kfree_rcu(new_node, rcu);
1268		return rc;
1269	}
1270
1271	/* If a flow exists with the same cookie, delete it */
1272	old_node = otx2_tc_get_entry_by_cookie(flow_cfg, tc_flow_cmd->cookie);
1273	if (old_node)
1274		otx2_tc_del_flow(nic, tc_flow_cmd);
1275
1276	mcam_idx = otx2_tc_update_mcam_table(nic, flow_cfg, new_node, true);
1277	mutex_lock(&nic->mbox.lock);
1278	req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
1279	if (!req) {
1280		mutex_unlock(&nic->mbox.lock);
1281		rc = -ENOMEM;
1282		goto free_leaf;
1283	}
1284
1285	memcpy(&dummy.hdr, &req->hdr, sizeof(struct mbox_msghdr));
1286	memcpy(req, &dummy, sizeof(struct npc_install_flow_req));
1287	req->channel = nic->hw.rx_chan_base;
1288	req->entry = flow_cfg->flow_ent[mcam_idx];
1289	req->intf = NIX_INTF_RX;
1290	req->set_cntr = 1;
1291	new_node->entry = req->entry;
1292
1293	/* Send message to AF */
1294	rc = otx2_sync_mbox_msg(&nic->mbox);
1295	if (rc) {
1296		NL_SET_ERR_MSG_MOD(extack, "Failed to install MCAM flow entry");
1297		mutex_unlock(&nic->mbox.lock);
1298		goto free_leaf;
1299	}
1300
1301	mutex_unlock(&nic->mbox.lock);
1302	memcpy(&new_node->req, req, sizeof(struct npc_install_flow_req));
1303
1304	flow_cfg->nr_flows++;
1305	return 0;
1306
1307free_leaf:
1308	otx2_tc_del_from_flow_list(flow_cfg, new_node);
1309	kfree_rcu(new_node, rcu);
1310	if (new_node->is_act_police) {
1311		mutex_lock(&nic->mbox.lock);
1312
1313		err = cn10k_map_unmap_rq_policer(nic, new_node->rq,
1314						 new_node->leaf_profile, false);
1315		if (err)
1316			netdev_err(nic->netdev,
1317				   "Unmapping RQ %d & profile %d failed\n",
1318				   new_node->rq, new_node->leaf_profile);
1319		err = cn10k_free_leaf_profile(nic, new_node->leaf_profile);
1320		if (err)
1321			netdev_err(nic->netdev,
1322				   "Unable to free leaf bandwidth profile(%d)\n",
1323				   new_node->leaf_profile);
1324
1325		__clear_bit(new_node->rq, &nic->rq_bmap);
1326
1327		mutex_unlock(&nic->mbox.lock);
1328	}
1329
1330	return rc;
1331}
1332
1333static int otx2_tc_get_flow_stats(struct otx2_nic *nic,
1334				  struct flow_cls_offload *tc_flow_cmd)
1335{
1336	struct npc_mcam_get_stats_req *req;
1337	struct npc_mcam_get_stats_rsp *rsp;
1338	struct otx2_tc_flow_stats *stats;
1339	struct otx2_tc_flow *flow_node;
1340	int err;
1341
1342	flow_node = otx2_tc_get_entry_by_cookie(nic->flow_cfg, tc_flow_cmd->cookie);
1343	if (!flow_node) {
1344		netdev_info(nic->netdev, "tc flow not found for cookie %lx",
1345			    tc_flow_cmd->cookie);
1346		return -EINVAL;
1347	}
1348
1349	mutex_lock(&nic->mbox.lock);
1350
1351	req = otx2_mbox_alloc_msg_npc_mcam_entry_stats(&nic->mbox);
1352	if (!req) {
1353		mutex_unlock(&nic->mbox.lock);
1354		return -ENOMEM;
1355	}
1356
1357	req->entry = flow_node->entry;
1358
1359	err = otx2_sync_mbox_msg(&nic->mbox);
1360	if (err) {
1361		netdev_err(nic->netdev, "Failed to get stats for MCAM flow entry %d\n",
1362			   req->entry);
1363		mutex_unlock(&nic->mbox.lock);
1364		return -EFAULT;
1365	}
1366
1367	rsp = (struct npc_mcam_get_stats_rsp *)otx2_mbox_get_rsp
1368		(&nic->mbox.mbox, 0, &req->hdr);
1369	if (IS_ERR(rsp)) {
1370		mutex_unlock(&nic->mbox.lock);
1371		return PTR_ERR(rsp);
1372	}
1373
1374	mutex_unlock(&nic->mbox.lock);
1375
1376	if (!rsp->stat_ena)
1377		return -EINVAL;
1378
1379	stats = &flow_node->stats;
1380
1381	spin_lock(&flow_node->lock);
1382	flow_stats_update(&tc_flow_cmd->stats, 0x0, rsp->stat - stats->pkts, 0x0, 0x0,
1383			  FLOW_ACTION_HW_STATS_IMMEDIATE);
1384	stats->pkts = rsp->stat;
1385	spin_unlock(&flow_node->lock);
1386
1387	return 0;
1388}
1389
1390static int otx2_setup_tc_cls_flower(struct otx2_nic *nic,
1391				    struct flow_cls_offload *cls_flower)
1392{
1393	switch (cls_flower->command) {
1394	case FLOW_CLS_REPLACE:
1395		return otx2_tc_add_flow(nic, cls_flower);
1396	case FLOW_CLS_DESTROY:
1397		return otx2_tc_del_flow(nic, cls_flower);
1398	case FLOW_CLS_STATS:
1399		return otx2_tc_get_flow_stats(nic, cls_flower);
1400	default:
1401		return -EOPNOTSUPP;
1402	}
1403}
1404
1405static int otx2_tc_ingress_matchall_install(struct otx2_nic *nic,
1406					    struct tc_cls_matchall_offload *cls)
1407{
1408	struct netlink_ext_ack *extack = cls->common.extack;
1409	struct flow_action *actions = &cls->rule->action;
1410	struct flow_action_entry *entry;
1411	u64 rate;
1412	int err;
1413
1414	err = otx2_tc_validate_flow(nic, actions, extack);
1415	if (err)
1416		return err;
1417
1418	if (nic->flags & OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED) {
1419		NL_SET_ERR_MSG_MOD(extack,
1420				   "Only one ingress MATCHALL ratelimitter can be offloaded");
1421		return -ENOMEM;
1422	}
1423
1424	entry = &cls->rule->action.entries[0];
1425	switch (entry->id) {
1426	case FLOW_ACTION_POLICE:
1427		/* Ingress ratelimiting is not supported on OcteonTx2 */
1428		if (is_dev_otx2(nic->pdev)) {
1429			NL_SET_ERR_MSG_MOD(extack,
1430					   "Ingress policing not supported on this platform");
1431			return -EOPNOTSUPP;
1432		}
1433
1434		err = cn10k_alloc_matchall_ipolicer(nic);
1435		if (err)
1436			return err;
1437
1438		/* Convert to bits per second */
1439		rate = entry->police.rate_bytes_ps * 8;
1440		err = cn10k_set_matchall_ipolicer_rate(nic, entry->police.burst, rate);
1441		if (err)
1442			return err;
1443		nic->flags |= OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED;
1444		break;
1445	default:
1446		NL_SET_ERR_MSG_MOD(extack,
1447				   "Only police action supported with Ingress MATCHALL offload");
1448		return -EOPNOTSUPP;
1449	}
1450
1451	return 0;
1452}
1453
1454static int otx2_tc_ingress_matchall_delete(struct otx2_nic *nic,
1455					   struct tc_cls_matchall_offload *cls)
1456{
1457	struct netlink_ext_ack *extack = cls->common.extack;
1458	int err;
1459
1460	if (nic->flags & OTX2_FLAG_INTF_DOWN) {
1461		NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
1462		return -EINVAL;
1463	}
1464
1465	err = cn10k_free_matchall_ipolicer(nic);
1466	nic->flags &= ~OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED;
1467	return err;
1468}
1469
1470static int otx2_setup_tc_ingress_matchall(struct otx2_nic *nic,
1471					  struct tc_cls_matchall_offload *cls_matchall)
1472{
1473	switch (cls_matchall->command) {
1474	case TC_CLSMATCHALL_REPLACE:
1475		return otx2_tc_ingress_matchall_install(nic, cls_matchall);
1476	case TC_CLSMATCHALL_DESTROY:
1477		return otx2_tc_ingress_matchall_delete(nic, cls_matchall);
1478	case TC_CLSMATCHALL_STATS:
1479	default:
1480		break;
1481	}
1482
1483	return -EOPNOTSUPP;
1484}
1485
1486static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type,
1487					  void *type_data, void *cb_priv)
1488{
1489	struct otx2_nic *nic = cb_priv;
1490	bool ntuple;
1491
1492	if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data))
1493		return -EOPNOTSUPP;
1494
1495	ntuple = nic->netdev->features & NETIF_F_NTUPLE;
1496	switch (type) {
1497	case TC_SETUP_CLSFLOWER:
1498		if (ntuple) {
1499			netdev_warn(nic->netdev,
1500				    "Can't install TC flower offload rule when NTUPLE is active");
1501			return -EOPNOTSUPP;
1502		}
1503
1504		return otx2_setup_tc_cls_flower(nic, type_data);
1505	case TC_SETUP_CLSMATCHALL:
1506		return otx2_setup_tc_ingress_matchall(nic, type_data);
1507	default:
1508		break;
1509	}
1510
1511	return -EOPNOTSUPP;
1512}
1513
1514static int otx2_setup_tc_egress_matchall(struct otx2_nic *nic,
1515					 struct tc_cls_matchall_offload *cls_matchall)
1516{
1517	switch (cls_matchall->command) {
1518	case TC_CLSMATCHALL_REPLACE:
1519		return otx2_tc_egress_matchall_install(nic, cls_matchall);
1520	case TC_CLSMATCHALL_DESTROY:
1521		return otx2_tc_egress_matchall_delete(nic, cls_matchall);
1522	case TC_CLSMATCHALL_STATS:
1523	default:
1524		break;
1525	}
1526
1527	return -EOPNOTSUPP;
1528}
1529
1530static int otx2_setup_tc_block_egress_cb(enum tc_setup_type type,
1531					 void *type_data, void *cb_priv)
1532{
1533	struct otx2_nic *nic = cb_priv;
1534
1535	if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data))
1536		return -EOPNOTSUPP;
1537
1538	switch (type) {
1539	case TC_SETUP_CLSMATCHALL:
1540		return otx2_setup_tc_egress_matchall(nic, type_data);
1541	default:
1542		break;
1543	}
1544
1545	return -EOPNOTSUPP;
1546}
1547
1548static LIST_HEAD(otx2_block_cb_list);
1549
1550static int otx2_setup_tc_block(struct net_device *netdev,
1551			       struct flow_block_offload *f)
1552{
1553	struct otx2_nic *nic = netdev_priv(netdev);
1554	flow_setup_cb_t *cb;
1555	bool ingress;
1556
1557	if (f->block_shared)
1558		return -EOPNOTSUPP;
1559
1560	if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
1561		cb = otx2_setup_tc_block_ingress_cb;
1562		ingress = true;
1563	} else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
1564		cb = otx2_setup_tc_block_egress_cb;
1565		ingress = false;
1566	} else {
1567		return -EOPNOTSUPP;
1568	}
1569
1570	return flow_block_cb_setup_simple(f, &otx2_block_cb_list, cb,
1571					  nic, nic, ingress);
1572}
1573
1574int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
1575		  void *type_data)
1576{
1577	switch (type) {
1578	case TC_SETUP_BLOCK:
1579		return otx2_setup_tc_block(netdev, type_data);
1580	case TC_SETUP_QDISC_HTB:
1581		return otx2_setup_tc_htb(netdev, type_data);
1582	default:
1583		return -EOPNOTSUPP;
1584	}
1585}
1586EXPORT_SYMBOL(otx2_setup_tc);
1587
1588int otx2_init_tc(struct otx2_nic *nic)
1589{
1590	/* Exclude receive queue 0 being used for police action */
1591	set_bit(0, &nic->rq_bmap);
1592
1593	if (!nic->flow_cfg) {
1594		netdev_err(nic->netdev,
1595			   "Can't init TC, nic->flow_cfg is not setup\n");
1596		return -EINVAL;
1597	}
1598
1599	return 0;
1600}
1601EXPORT_SYMBOL(otx2_init_tc);
1602
1603void otx2_shutdown_tc(struct otx2_nic *nic)
1604{
1605	otx2_destroy_tc_flow_list(nic);
1606}
1607EXPORT_SYMBOL(otx2_shutdown_tc);
1608
1609static void otx2_tc_config_ingress_rule(struct otx2_nic *nic,
1610					struct otx2_tc_flow *node)
1611{
1612	struct npc_install_flow_req *req;
1613
1614	if (otx2_tc_act_set_hw_police(nic, node))
1615		return;
1616
1617	mutex_lock(&nic->mbox.lock);
1618
1619	req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
1620	if (!req)
1621		goto err;
1622
1623	memcpy(req, &node->req, sizeof(struct npc_install_flow_req));
1624
1625	if (otx2_sync_mbox_msg(&nic->mbox))
1626		netdev_err(nic->netdev,
1627			   "Failed to install MCAM flow entry for ingress rule");
1628err:
1629	mutex_unlock(&nic->mbox.lock);
1630}
1631
1632void otx2_tc_apply_ingress_police_rules(struct otx2_nic *nic)
1633{
1634	struct otx2_flow_config *flow_cfg = nic->flow_cfg;
1635	struct otx2_tc_flow *node;
1636
1637	/* If any ingress policer rules exist for the interface then
1638	 * apply those rules. Ingress policer rules depend on bandwidth
1639	 * profiles linked to the receive queues. Since no receive queues
1640	 * exist when interface is down, ingress policer rules are stored
1641	 * and configured in hardware after all receive queues are allocated
1642	 * in otx2_open.
1643	 */
1644	list_for_each_entry(node, &flow_cfg->flow_list_tc, list) {
1645		if (node->is_act_police)
1646			otx2_tc_config_ingress_rule(nic, node);
1647	}
1648}
1649EXPORT_SYMBOL(otx2_tc_apply_ingress_police_rules);
1650