1// SPDX-License-Identifier: GPL-2.0
2/* Marvell RVU Ethernet driver
3 *
4 * Copyright (C) 2020 Marvell.
5 *
6 */
7
8#include <net/ipv6.h>
9#include <linux/sort.h>
10
11#include "otx2_common.h"
12
13#define OTX2_DEFAULT_ACTION	0x1
14
15static int otx2_mcam_entry_init(struct otx2_nic *pfvf);
16
17struct otx2_flow {
18	struct ethtool_rx_flow_spec flow_spec;
19	struct list_head list;
20	u32 location;
21	u32 entry;
22	bool is_vf;
23	u8 rss_ctx_id;
24#define DMAC_FILTER_RULE		BIT(0)
25#define PFC_FLOWCTRL_RULE		BIT(1)
26	u16 rule_type;
27	int vf;
28};
29
30enum dmac_req {
31	DMAC_ADDR_UPDATE,
32	DMAC_ADDR_DEL
33};
34
35static void otx2_clear_ntuple_flow_info(struct otx2_nic *pfvf, struct otx2_flow_config *flow_cfg)
36{
37	devm_kfree(pfvf->dev, flow_cfg->flow_ent);
38	flow_cfg->flow_ent = NULL;
39	flow_cfg->max_flows = 0;
40}
41
42static int otx2_free_ntuple_mcam_entries(struct otx2_nic *pfvf)
43{
44	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
45	struct npc_mcam_free_entry_req *req;
46	int ent, err;
47
48	if (!flow_cfg->max_flows)
49		return 0;
50
51	mutex_lock(&pfvf->mbox.lock);
52	for (ent = 0; ent < flow_cfg->max_flows; ent++) {
53		req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
54		if (!req)
55			break;
56
57		req->entry = flow_cfg->flow_ent[ent];
58
59		/* Send message to AF to free MCAM entries */
60		err = otx2_sync_mbox_msg(&pfvf->mbox);
61		if (err)
62			break;
63	}
64	mutex_unlock(&pfvf->mbox.lock);
65	otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
66	return 0;
67}
68
69static int mcam_entry_cmp(const void *a, const void *b)
70{
71	return *(u16 *)a - *(u16 *)b;
72}
73
74int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count)
75{
76	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
77	struct npc_mcam_alloc_entry_req *req;
78	struct npc_mcam_alloc_entry_rsp *rsp;
79	int ent, allocated = 0;
80
81	/* Free current ones and allocate new ones with requested count */
82	otx2_free_ntuple_mcam_entries(pfvf);
83
84	if (!count)
85		return 0;
86
87	flow_cfg->flow_ent = devm_kmalloc_array(pfvf->dev, count,
88						sizeof(u16), GFP_KERNEL);
89	if (!flow_cfg->flow_ent) {
90		netdev_err(pfvf->netdev,
91			   "%s: Unable to allocate memory for flow entries\n",
92			    __func__);
93		return -ENOMEM;
94	}
95
96	mutex_lock(&pfvf->mbox.lock);
97
98	/* In a single request a max of NPC_MAX_NONCONTIG_ENTRIES MCAM entries
99	 * can only be allocated.
100	 */
101	while (allocated < count) {
102		req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
103		if (!req)
104			goto exit;
105
106		req->contig = false;
107		req->count = (count - allocated) > NPC_MAX_NONCONTIG_ENTRIES ?
108				NPC_MAX_NONCONTIG_ENTRIES : count - allocated;
109
110		/* Allocate higher priority entries for PFs, so that VF's entries
111		 * will be on top of PF.
112		 */
113		if (!is_otx2_vf(pfvf->pcifunc)) {
114			req->priority = NPC_MCAM_HIGHER_PRIO;
115			req->ref_entry = flow_cfg->def_ent[0];
116		}
117
118		/* Send message to AF */
119		if (otx2_sync_mbox_msg(&pfvf->mbox))
120			goto exit;
121
122		rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
123			(&pfvf->mbox.mbox, 0, &req->hdr);
124
125		for (ent = 0; ent < rsp->count; ent++)
126			flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent];
127
128		allocated += rsp->count;
129
130		/* If this request is not fulfilled, no need to send
131		 * further requests.
132		 */
133		if (rsp->count != req->count)
134			break;
135	}
136
137	/* Multiple MCAM entry alloc requests could result in non-sequential
138	 * MCAM entries in the flow_ent[] array. Sort them in an ascending order,
139	 * otherwise user installed ntuple filter index and MCAM entry index will
140	 * not be in sync.
141	 */
142	if (allocated)
143		sort(&flow_cfg->flow_ent[0], allocated,
144		     sizeof(flow_cfg->flow_ent[0]), mcam_entry_cmp, NULL);
145
146exit:
147	mutex_unlock(&pfvf->mbox.lock);
148
149	flow_cfg->max_flows = allocated;
150
151	if (allocated) {
152		pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
153		pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
154	}
155
156	if (allocated != count)
157		netdev_info(pfvf->netdev,
158			    "Unable to allocate %d MCAM entries, got only %d\n",
159			    count, allocated);
160	return allocated;
161}
162EXPORT_SYMBOL(otx2_alloc_mcam_entries);
163
164static int otx2_mcam_entry_init(struct otx2_nic *pfvf)
165{
166	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
167	struct npc_get_field_status_req *freq;
168	struct npc_get_field_status_rsp *frsp;
169	struct npc_mcam_alloc_entry_req *req;
170	struct npc_mcam_alloc_entry_rsp *rsp;
171	int vf_vlan_max_flows;
172	int ent, count;
173
174	vf_vlan_max_flows = pfvf->total_vfs * OTX2_PER_VF_VLAN_FLOWS;
175	count = OTX2_MAX_UNICAST_FLOWS +
176			OTX2_MAX_VLAN_FLOWS + vf_vlan_max_flows;
177
178	flow_cfg->def_ent = devm_kmalloc_array(pfvf->dev, count,
179					       sizeof(u16), GFP_KERNEL);
180	if (!flow_cfg->def_ent)
181		return -ENOMEM;
182
183	mutex_lock(&pfvf->mbox.lock);
184
185	req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
186	if (!req) {
187		mutex_unlock(&pfvf->mbox.lock);
188		return -ENOMEM;
189	}
190
191	req->contig = false;
192	req->count = count;
193
194	/* Send message to AF */
195	if (otx2_sync_mbox_msg(&pfvf->mbox)) {
196		mutex_unlock(&pfvf->mbox.lock);
197		return -EINVAL;
198	}
199
200	rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
201	       (&pfvf->mbox.mbox, 0, &req->hdr);
202
203	if (rsp->count != req->count) {
204		netdev_info(pfvf->netdev,
205			    "Unable to allocate MCAM entries for ucast, vlan and vf_vlan\n");
206		mutex_unlock(&pfvf->mbox.lock);
207		devm_kfree(pfvf->dev, flow_cfg->def_ent);
208		return 0;
209	}
210
211	for (ent = 0; ent < rsp->count; ent++)
212		flow_cfg->def_ent[ent] = rsp->entry_list[ent];
213
214	flow_cfg->vf_vlan_offset = 0;
215	flow_cfg->unicast_offset = vf_vlan_max_flows;
216	flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset +
217					OTX2_MAX_UNICAST_FLOWS;
218	pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT;
219
220	/* Check if NPC_DMAC field is supported
221	 * by the mkex profile before setting VLAN support flag.
222	 */
223	freq = otx2_mbox_alloc_msg_npc_get_field_status(&pfvf->mbox);
224	if (!freq) {
225		mutex_unlock(&pfvf->mbox.lock);
226		return -ENOMEM;
227	}
228
229	freq->field = NPC_DMAC;
230	if (otx2_sync_mbox_msg(&pfvf->mbox)) {
231		mutex_unlock(&pfvf->mbox.lock);
232		return -EINVAL;
233	}
234
235	frsp = (struct npc_get_field_status_rsp *)otx2_mbox_get_rsp
236	       (&pfvf->mbox.mbox, 0, &freq->hdr);
237
238	if (frsp->enable) {
239		pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT;
240		pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT;
241	}
242
243	pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
244	mutex_unlock(&pfvf->mbox.lock);
245
246	/* Allocate entries for Ntuple filters */
247	count = otx2_alloc_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT);
248	if (count <= 0) {
249		otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
250		return 0;
251	}
252
253	pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
254
255	return 0;
256}
257
258/* TODO : revisit on size */
259#define OTX2_DMAC_FLTR_BITMAP_SZ (4 * 2048 + 32)
260
261int otx2vf_mcam_flow_init(struct otx2_nic *pfvf)
262{
263	struct otx2_flow_config *flow_cfg;
264
265	pfvf->flow_cfg = devm_kzalloc(pfvf->dev,
266				      sizeof(struct otx2_flow_config),
267				      GFP_KERNEL);
268	if (!pfvf->flow_cfg)
269		return -ENOMEM;
270
271	pfvf->flow_cfg->dmacflt_bmap = devm_kcalloc(pfvf->dev,
272						    BITS_TO_LONGS(OTX2_DMAC_FLTR_BITMAP_SZ),
273						    sizeof(long), GFP_KERNEL);
274	if (!pfvf->flow_cfg->dmacflt_bmap)
275		return -ENOMEM;
276
277	flow_cfg = pfvf->flow_cfg;
278	INIT_LIST_HEAD(&flow_cfg->flow_list);
279	INIT_LIST_HEAD(&flow_cfg->flow_list_tc);
280	flow_cfg->max_flows = 0;
281
282	return 0;
283}
284EXPORT_SYMBOL(otx2vf_mcam_flow_init);
285
286int otx2_mcam_flow_init(struct otx2_nic *pf)
287{
288	int err;
289
290	pf->flow_cfg = devm_kzalloc(pf->dev, sizeof(struct otx2_flow_config),
291				    GFP_KERNEL);
292	if (!pf->flow_cfg)
293		return -ENOMEM;
294
295	pf->flow_cfg->dmacflt_bmap = devm_kcalloc(pf->dev,
296						  BITS_TO_LONGS(OTX2_DMAC_FLTR_BITMAP_SZ),
297						  sizeof(long), GFP_KERNEL);
298	if (!pf->flow_cfg->dmacflt_bmap)
299		return -ENOMEM;
300
301	INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
302	INIT_LIST_HEAD(&pf->flow_cfg->flow_list_tc);
303
304	/* Allocate bare minimum number of MCAM entries needed for
305	 * unicast and ntuple filters.
306	 */
307	err = otx2_mcam_entry_init(pf);
308	if (err)
309		return err;
310
311	/* Check if MCAM entries are allocate or not */
312	if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
313		return 0;
314
315	pf->mac_table = devm_kzalloc(pf->dev, sizeof(struct otx2_mac_table)
316					* OTX2_MAX_UNICAST_FLOWS, GFP_KERNEL);
317	if (!pf->mac_table)
318		return -ENOMEM;
319
320	otx2_dmacflt_get_max_cnt(pf);
321
322	/* DMAC filters are not allocated */
323	if (!pf->flow_cfg->dmacflt_max_flows)
324		return 0;
325
326	pf->flow_cfg->bmap_to_dmacindex =
327			devm_kzalloc(pf->dev, sizeof(u32) *
328				     pf->flow_cfg->dmacflt_max_flows,
329				     GFP_KERNEL);
330
331	if (!pf->flow_cfg->bmap_to_dmacindex)
332		return -ENOMEM;
333
334	pf->flags |= OTX2_FLAG_DMACFLTR_SUPPORT;
335
336	return 0;
337}
338
339void otx2_mcam_flow_del(struct otx2_nic *pf)
340{
341	otx2_destroy_mcam_flows(pf);
342}
343EXPORT_SYMBOL(otx2_mcam_flow_del);
344
345/*  On success adds mcam entry
346 *  On failure enable promisous mode
347 */
348static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac)
349{
350	struct otx2_flow_config *flow_cfg = pf->flow_cfg;
351	struct npc_install_flow_req *req;
352	int err, i;
353
354	if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
355		return -ENOMEM;
356
357	/* dont have free mcam entries or uc list is greater than alloted */
358	if (netdev_uc_count(pf->netdev) > OTX2_MAX_UNICAST_FLOWS)
359		return -ENOMEM;
360
361	mutex_lock(&pf->mbox.lock);
362	req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
363	if (!req) {
364		mutex_unlock(&pf->mbox.lock);
365		return -ENOMEM;
366	}
367
368	/* unicast offset starts with 32 0..31 for ntuple */
369	for (i = 0; i <  OTX2_MAX_UNICAST_FLOWS; i++) {
370		if (pf->mac_table[i].inuse)
371			continue;
372		ether_addr_copy(pf->mac_table[i].addr, mac);
373		pf->mac_table[i].inuse = true;
374		pf->mac_table[i].mcam_entry =
375			flow_cfg->def_ent[i + flow_cfg->unicast_offset];
376		req->entry =  pf->mac_table[i].mcam_entry;
377		break;
378	}
379
380	ether_addr_copy(req->packet.dmac, mac);
381	eth_broadcast_addr((u8 *)&req->mask.dmac);
382	req->features = BIT_ULL(NPC_DMAC);
383	req->channel = pf->hw.rx_chan_base;
384	req->intf = NIX_INTF_RX;
385	req->op = NIX_RX_ACTION_DEFAULT;
386	req->set_cntr = 1;
387
388	err = otx2_sync_mbox_msg(&pf->mbox);
389	mutex_unlock(&pf->mbox.lock);
390
391	return err;
392}
393
394int otx2_add_macfilter(struct net_device *netdev, const u8 *mac)
395{
396	struct otx2_nic *pf = netdev_priv(netdev);
397
398	if (!bitmap_empty(pf->flow_cfg->dmacflt_bmap,
399			  pf->flow_cfg->dmacflt_max_flows))
400		netdev_warn(netdev,
401			    "Add %pM to CGX/RPM DMAC filters list as well\n",
402			    mac);
403
404	return otx2_do_add_macfilter(pf, mac);
405}
406
407static bool otx2_get_mcamentry_for_mac(struct otx2_nic *pf, const u8 *mac,
408				       int *mcam_entry)
409{
410	int i;
411
412	for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) {
413		if (!pf->mac_table[i].inuse)
414			continue;
415
416		if (ether_addr_equal(pf->mac_table[i].addr, mac)) {
417			*mcam_entry = pf->mac_table[i].mcam_entry;
418			pf->mac_table[i].inuse = false;
419			return true;
420		}
421	}
422	return false;
423}
424
425int otx2_del_macfilter(struct net_device *netdev, const u8 *mac)
426{
427	struct otx2_nic *pf = netdev_priv(netdev);
428	struct npc_delete_flow_req *req;
429	int err, mcam_entry;
430
431	/* check does mcam entry exists for given mac */
432	if (!otx2_get_mcamentry_for_mac(pf, mac, &mcam_entry))
433		return 0;
434
435	mutex_lock(&pf->mbox.lock);
436	req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
437	if (!req) {
438		mutex_unlock(&pf->mbox.lock);
439		return -ENOMEM;
440	}
441	req->entry = mcam_entry;
442	/* Send message to AF */
443	err = otx2_sync_mbox_msg(&pf->mbox);
444	mutex_unlock(&pf->mbox.lock);
445
446	return err;
447}
448
449static struct otx2_flow *otx2_find_flow(struct otx2_nic *pfvf, u32 location)
450{
451	struct otx2_flow *iter;
452
453	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
454		if (iter->location == location)
455			return iter;
456	}
457
458	return NULL;
459}
460
461static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow)
462{
463	struct list_head *head = &pfvf->flow_cfg->flow_list;
464	struct otx2_flow *iter;
465
466	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
467		if (iter->location > flow->location)
468			break;
469		head = &iter->list;
470	}
471
472	list_add(&flow->list, head);
473}
474
475int otx2_get_maxflows(struct otx2_flow_config *flow_cfg)
476{
477	if (!flow_cfg)
478		return 0;
479
480	if (flow_cfg->nr_flows == flow_cfg->max_flows ||
481	    !bitmap_empty(flow_cfg->dmacflt_bmap,
482			  flow_cfg->dmacflt_max_flows))
483		return flow_cfg->max_flows + flow_cfg->dmacflt_max_flows;
484	else
485		return flow_cfg->max_flows;
486}
487EXPORT_SYMBOL(otx2_get_maxflows);
488
489int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
490		  u32 location)
491{
492	struct otx2_flow *iter;
493
494	if (location >= otx2_get_maxflows(pfvf->flow_cfg))
495		return -EINVAL;
496
497	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
498		if (iter->location == location) {
499			nfc->fs = iter->flow_spec;
500			nfc->rss_context = iter->rss_ctx_id;
501			return 0;
502		}
503	}
504
505	return -ENOENT;
506}
507
508int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
509		       u32 *rule_locs)
510{
511	u32 rule_cnt = nfc->rule_cnt;
512	u32 location = 0;
513	int idx = 0;
514	int err = 0;
515
516	nfc->data = otx2_get_maxflows(pfvf->flow_cfg);
517	while ((!err || err == -ENOENT) && idx < rule_cnt) {
518		err = otx2_get_flow(pfvf, nfc, location);
519		if (!err)
520			rule_locs[idx++] = location;
521		location++;
522	}
523	nfc->rule_cnt = rule_cnt;
524
525	return err;
526}
527
528static int otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
529				  struct npc_install_flow_req *req,
530				  u32 flow_type)
531{
532	struct ethtool_usrip4_spec *ipv4_usr_mask = &fsp->m_u.usr_ip4_spec;
533	struct ethtool_usrip4_spec *ipv4_usr_hdr = &fsp->h_u.usr_ip4_spec;
534	struct ethtool_tcpip4_spec *ipv4_l4_mask = &fsp->m_u.tcp_ip4_spec;
535	struct ethtool_tcpip4_spec *ipv4_l4_hdr = &fsp->h_u.tcp_ip4_spec;
536	struct ethtool_ah_espip4_spec *ah_esp_hdr = &fsp->h_u.ah_ip4_spec;
537	struct ethtool_ah_espip4_spec *ah_esp_mask = &fsp->m_u.ah_ip4_spec;
538	struct flow_msg *pmask = &req->mask;
539	struct flow_msg *pkt = &req->packet;
540
541	switch (flow_type) {
542	case IP_USER_FLOW:
543		if (ipv4_usr_mask->ip4src) {
544			memcpy(&pkt->ip4src, &ipv4_usr_hdr->ip4src,
545			       sizeof(pkt->ip4src));
546			memcpy(&pmask->ip4src, &ipv4_usr_mask->ip4src,
547			       sizeof(pmask->ip4src));
548			req->features |= BIT_ULL(NPC_SIP_IPV4);
549		}
550		if (ipv4_usr_mask->ip4dst) {
551			memcpy(&pkt->ip4dst, &ipv4_usr_hdr->ip4dst,
552			       sizeof(pkt->ip4dst));
553			memcpy(&pmask->ip4dst, &ipv4_usr_mask->ip4dst,
554			       sizeof(pmask->ip4dst));
555			req->features |= BIT_ULL(NPC_DIP_IPV4);
556		}
557		if (ipv4_usr_mask->tos) {
558			pkt->tos = ipv4_usr_hdr->tos;
559			pmask->tos = ipv4_usr_mask->tos;
560			req->features |= BIT_ULL(NPC_TOS);
561		}
562		if (ipv4_usr_mask->proto) {
563			switch (ipv4_usr_hdr->proto) {
564			case IPPROTO_ICMP:
565				req->features |= BIT_ULL(NPC_IPPROTO_ICMP);
566				break;
567			case IPPROTO_TCP:
568				req->features |= BIT_ULL(NPC_IPPROTO_TCP);
569				break;
570			case IPPROTO_UDP:
571				req->features |= BIT_ULL(NPC_IPPROTO_UDP);
572				break;
573			case IPPROTO_SCTP:
574				req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
575				break;
576			case IPPROTO_AH:
577				req->features |= BIT_ULL(NPC_IPPROTO_AH);
578				break;
579			case IPPROTO_ESP:
580				req->features |= BIT_ULL(NPC_IPPROTO_ESP);
581				break;
582			default:
583				return -EOPNOTSUPP;
584			}
585		}
586		pkt->etype = cpu_to_be16(ETH_P_IP);
587		pmask->etype = cpu_to_be16(0xFFFF);
588		req->features |= BIT_ULL(NPC_ETYPE);
589		break;
590	case TCP_V4_FLOW:
591	case UDP_V4_FLOW:
592	case SCTP_V4_FLOW:
593		pkt->etype = cpu_to_be16(ETH_P_IP);
594		pmask->etype = cpu_to_be16(0xFFFF);
595		req->features |= BIT_ULL(NPC_ETYPE);
596		if (ipv4_l4_mask->ip4src) {
597			memcpy(&pkt->ip4src, &ipv4_l4_hdr->ip4src,
598			       sizeof(pkt->ip4src));
599			memcpy(&pmask->ip4src, &ipv4_l4_mask->ip4src,
600			       sizeof(pmask->ip4src));
601			req->features |= BIT_ULL(NPC_SIP_IPV4);
602		}
603		if (ipv4_l4_mask->ip4dst) {
604			memcpy(&pkt->ip4dst, &ipv4_l4_hdr->ip4dst,
605			       sizeof(pkt->ip4dst));
606			memcpy(&pmask->ip4dst, &ipv4_l4_mask->ip4dst,
607			       sizeof(pmask->ip4dst));
608			req->features |= BIT_ULL(NPC_DIP_IPV4);
609		}
610		if (ipv4_l4_mask->tos) {
611			pkt->tos = ipv4_l4_hdr->tos;
612			pmask->tos = ipv4_l4_mask->tos;
613			req->features |= BIT_ULL(NPC_TOS);
614		}
615		if (ipv4_l4_mask->psrc) {
616			memcpy(&pkt->sport, &ipv4_l4_hdr->psrc,
617			       sizeof(pkt->sport));
618			memcpy(&pmask->sport, &ipv4_l4_mask->psrc,
619			       sizeof(pmask->sport));
620			if (flow_type == UDP_V4_FLOW)
621				req->features |= BIT_ULL(NPC_SPORT_UDP);
622			else if (flow_type == TCP_V4_FLOW)
623				req->features |= BIT_ULL(NPC_SPORT_TCP);
624			else
625				req->features |= BIT_ULL(NPC_SPORT_SCTP);
626		}
627		if (ipv4_l4_mask->pdst) {
628			memcpy(&pkt->dport, &ipv4_l4_hdr->pdst,
629			       sizeof(pkt->dport));
630			memcpy(&pmask->dport, &ipv4_l4_mask->pdst,
631			       sizeof(pmask->dport));
632			if (flow_type == UDP_V4_FLOW)
633				req->features |= BIT_ULL(NPC_DPORT_UDP);
634			else if (flow_type == TCP_V4_FLOW)
635				req->features |= BIT_ULL(NPC_DPORT_TCP);
636			else
637				req->features |= BIT_ULL(NPC_DPORT_SCTP);
638		}
639		if (flow_type == UDP_V4_FLOW)
640			req->features |= BIT_ULL(NPC_IPPROTO_UDP);
641		else if (flow_type == TCP_V4_FLOW)
642			req->features |= BIT_ULL(NPC_IPPROTO_TCP);
643		else
644			req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
645		break;
646	case AH_V4_FLOW:
647	case ESP_V4_FLOW:
648		pkt->etype = cpu_to_be16(ETH_P_IP);
649		pmask->etype = cpu_to_be16(0xFFFF);
650		req->features |= BIT_ULL(NPC_ETYPE);
651		if (ah_esp_mask->ip4src) {
652			memcpy(&pkt->ip4src, &ah_esp_hdr->ip4src,
653			       sizeof(pkt->ip4src));
654			memcpy(&pmask->ip4src, &ah_esp_mask->ip4src,
655			       sizeof(pmask->ip4src));
656			req->features |= BIT_ULL(NPC_SIP_IPV4);
657		}
658		if (ah_esp_mask->ip4dst) {
659			memcpy(&pkt->ip4dst, &ah_esp_hdr->ip4dst,
660			       sizeof(pkt->ip4dst));
661			memcpy(&pmask->ip4dst, &ah_esp_mask->ip4dst,
662			       sizeof(pmask->ip4dst));
663			req->features |= BIT_ULL(NPC_DIP_IPV4);
664		}
665		if (ah_esp_mask->tos) {
666			pkt->tos = ah_esp_hdr->tos;
667			pmask->tos = ah_esp_mask->tos;
668			req->features |= BIT_ULL(NPC_TOS);
669		}
670
671		/* NPC profile doesn't extract AH/ESP header fields */
672		if (ah_esp_mask->spi & ah_esp_hdr->spi)
673			return -EOPNOTSUPP;
674
675		if (flow_type == AH_V4_FLOW)
676			req->features |= BIT_ULL(NPC_IPPROTO_AH);
677		else
678			req->features |= BIT_ULL(NPC_IPPROTO_ESP);
679		break;
680	default:
681		break;
682	}
683
684	return 0;
685}
686
687static int otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
688				  struct npc_install_flow_req *req,
689				  u32 flow_type)
690{
691	struct ethtool_usrip6_spec *ipv6_usr_mask = &fsp->m_u.usr_ip6_spec;
692	struct ethtool_usrip6_spec *ipv6_usr_hdr = &fsp->h_u.usr_ip6_spec;
693	struct ethtool_tcpip6_spec *ipv6_l4_mask = &fsp->m_u.tcp_ip6_spec;
694	struct ethtool_tcpip6_spec *ipv6_l4_hdr = &fsp->h_u.tcp_ip6_spec;
695	struct ethtool_ah_espip6_spec *ah_esp_hdr = &fsp->h_u.ah_ip6_spec;
696	struct ethtool_ah_espip6_spec *ah_esp_mask = &fsp->m_u.ah_ip6_spec;
697	struct flow_msg *pmask = &req->mask;
698	struct flow_msg *pkt = &req->packet;
699
700	switch (flow_type) {
701	case IPV6_USER_FLOW:
702		if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6src)) {
703			memcpy(&pkt->ip6src, &ipv6_usr_hdr->ip6src,
704			       sizeof(pkt->ip6src));
705			memcpy(&pmask->ip6src, &ipv6_usr_mask->ip6src,
706			       sizeof(pmask->ip6src));
707			req->features |= BIT_ULL(NPC_SIP_IPV6);
708		}
709		if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6dst)) {
710			memcpy(&pkt->ip6dst, &ipv6_usr_hdr->ip6dst,
711			       sizeof(pkt->ip6dst));
712			memcpy(&pmask->ip6dst, &ipv6_usr_mask->ip6dst,
713			       sizeof(pmask->ip6dst));
714			req->features |= BIT_ULL(NPC_DIP_IPV6);
715		}
716		if (ipv6_usr_hdr->l4_proto == IPPROTO_FRAGMENT) {
717			pkt->next_header = ipv6_usr_hdr->l4_proto;
718			pmask->next_header = ipv6_usr_mask->l4_proto;
719			req->features |= BIT_ULL(NPC_IPFRAG_IPV6);
720		}
721		pkt->etype = cpu_to_be16(ETH_P_IPV6);
722		pmask->etype = cpu_to_be16(0xFFFF);
723		req->features |= BIT_ULL(NPC_ETYPE);
724		break;
725	case TCP_V6_FLOW:
726	case UDP_V6_FLOW:
727	case SCTP_V6_FLOW:
728		pkt->etype = cpu_to_be16(ETH_P_IPV6);
729		pmask->etype = cpu_to_be16(0xFFFF);
730		req->features |= BIT_ULL(NPC_ETYPE);
731		if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6src)) {
732			memcpy(&pkt->ip6src, &ipv6_l4_hdr->ip6src,
733			       sizeof(pkt->ip6src));
734			memcpy(&pmask->ip6src, &ipv6_l4_mask->ip6src,
735			       sizeof(pmask->ip6src));
736			req->features |= BIT_ULL(NPC_SIP_IPV6);
737		}
738		if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6dst)) {
739			memcpy(&pkt->ip6dst, &ipv6_l4_hdr->ip6dst,
740			       sizeof(pkt->ip6dst));
741			memcpy(&pmask->ip6dst, &ipv6_l4_mask->ip6dst,
742			       sizeof(pmask->ip6dst));
743			req->features |= BIT_ULL(NPC_DIP_IPV6);
744		}
745		if (ipv6_l4_mask->psrc) {
746			memcpy(&pkt->sport, &ipv6_l4_hdr->psrc,
747			       sizeof(pkt->sport));
748			memcpy(&pmask->sport, &ipv6_l4_mask->psrc,
749			       sizeof(pmask->sport));
750			if (flow_type == UDP_V6_FLOW)
751				req->features |= BIT_ULL(NPC_SPORT_UDP);
752			else if (flow_type == TCP_V6_FLOW)
753				req->features |= BIT_ULL(NPC_SPORT_TCP);
754			else
755				req->features |= BIT_ULL(NPC_SPORT_SCTP);
756		}
757		if (ipv6_l4_mask->pdst) {
758			memcpy(&pkt->dport, &ipv6_l4_hdr->pdst,
759			       sizeof(pkt->dport));
760			memcpy(&pmask->dport, &ipv6_l4_mask->pdst,
761			       sizeof(pmask->dport));
762			if (flow_type == UDP_V6_FLOW)
763				req->features |= BIT_ULL(NPC_DPORT_UDP);
764			else if (flow_type == TCP_V6_FLOW)
765				req->features |= BIT_ULL(NPC_DPORT_TCP);
766			else
767				req->features |= BIT_ULL(NPC_DPORT_SCTP);
768		}
769		if (flow_type == UDP_V6_FLOW)
770			req->features |= BIT_ULL(NPC_IPPROTO_UDP);
771		else if (flow_type == TCP_V6_FLOW)
772			req->features |= BIT_ULL(NPC_IPPROTO_TCP);
773		else
774			req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
775		break;
776	case AH_V6_FLOW:
777	case ESP_V6_FLOW:
778		pkt->etype = cpu_to_be16(ETH_P_IPV6);
779		pmask->etype = cpu_to_be16(0xFFFF);
780		req->features |= BIT_ULL(NPC_ETYPE);
781		if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6src)) {
782			memcpy(&pkt->ip6src, &ah_esp_hdr->ip6src,
783			       sizeof(pkt->ip6src));
784			memcpy(&pmask->ip6src, &ah_esp_mask->ip6src,
785			       sizeof(pmask->ip6src));
786			req->features |= BIT_ULL(NPC_SIP_IPV6);
787		}
788		if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6dst)) {
789			memcpy(&pkt->ip6dst, &ah_esp_hdr->ip6dst,
790			       sizeof(pkt->ip6dst));
791			memcpy(&pmask->ip6dst, &ah_esp_mask->ip6dst,
792			       sizeof(pmask->ip6dst));
793			req->features |= BIT_ULL(NPC_DIP_IPV6);
794		}
795
796		/* NPC profile doesn't extract AH/ESP header fields */
797		if ((ah_esp_mask->spi & ah_esp_hdr->spi) ||
798		    (ah_esp_mask->tclass & ah_esp_hdr->tclass))
799			return -EOPNOTSUPP;
800
801		if (flow_type == AH_V6_FLOW)
802			req->features |= BIT_ULL(NPC_IPPROTO_AH);
803		else
804			req->features |= BIT_ULL(NPC_IPPROTO_ESP);
805		break;
806	default:
807		break;
808	}
809
810	return 0;
811}
812
813static int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
814			      struct npc_install_flow_req *req)
815{
816	struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
817	struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
818	struct flow_msg *pmask = &req->mask;
819	struct flow_msg *pkt = &req->packet;
820	u32 flow_type;
821	int ret;
822
823	flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
824	switch (flow_type) {
825	/* bits not set in mask are don't care */
826	case ETHER_FLOW:
827		if (!is_zero_ether_addr(eth_mask->h_source)) {
828			ether_addr_copy(pkt->smac, eth_hdr->h_source);
829			ether_addr_copy(pmask->smac, eth_mask->h_source);
830			req->features |= BIT_ULL(NPC_SMAC);
831		}
832		if (!is_zero_ether_addr(eth_mask->h_dest)) {
833			ether_addr_copy(pkt->dmac, eth_hdr->h_dest);
834			ether_addr_copy(pmask->dmac, eth_mask->h_dest);
835			req->features |= BIT_ULL(NPC_DMAC);
836		}
837		if (eth_hdr->h_proto) {
838			memcpy(&pkt->etype, &eth_hdr->h_proto,
839			       sizeof(pkt->etype));
840			memcpy(&pmask->etype, &eth_mask->h_proto,
841			       sizeof(pmask->etype));
842			req->features |= BIT_ULL(NPC_ETYPE);
843		}
844		break;
845	case IP_USER_FLOW:
846	case TCP_V4_FLOW:
847	case UDP_V4_FLOW:
848	case SCTP_V4_FLOW:
849	case AH_V4_FLOW:
850	case ESP_V4_FLOW:
851		ret = otx2_prepare_ipv4_flow(fsp, req, flow_type);
852		if (ret)
853			return ret;
854		break;
855	case IPV6_USER_FLOW:
856	case TCP_V6_FLOW:
857	case UDP_V6_FLOW:
858	case SCTP_V6_FLOW:
859	case AH_V6_FLOW:
860	case ESP_V6_FLOW:
861		ret = otx2_prepare_ipv6_flow(fsp, req, flow_type);
862		if (ret)
863			return ret;
864		break;
865	default:
866		return -EOPNOTSUPP;
867	}
868	if (fsp->flow_type & FLOW_EXT) {
869		u16 vlan_etype;
870
871		if (fsp->m_ext.vlan_etype) {
872			/* Partial masks not supported */
873			if (be16_to_cpu(fsp->m_ext.vlan_etype) != 0xFFFF)
874				return -EINVAL;
875
876			vlan_etype = be16_to_cpu(fsp->h_ext.vlan_etype);
877
878			/* Drop rule with vlan_etype == 802.1Q
879			 * and vlan_id == 0 is not supported
880			 */
881			if (vlan_etype == ETH_P_8021Q && !fsp->m_ext.vlan_tci &&
882			    fsp->ring_cookie == RX_CLS_FLOW_DISC)
883				return -EINVAL;
884
885			/* Only ETH_P_8021Q and ETH_P_802AD types supported */
886			if (vlan_etype != ETH_P_8021Q &&
887			    vlan_etype != ETH_P_8021AD)
888				return -EINVAL;
889
890			memcpy(&pkt->vlan_etype, &fsp->h_ext.vlan_etype,
891			       sizeof(pkt->vlan_etype));
892			memcpy(&pmask->vlan_etype, &fsp->m_ext.vlan_etype,
893			       sizeof(pmask->vlan_etype));
894
895			if (vlan_etype == ETH_P_8021Q)
896				req->features |= BIT_ULL(NPC_VLAN_ETYPE_CTAG);
897			else
898				req->features |= BIT_ULL(NPC_VLAN_ETYPE_STAG);
899		}
900
901		if (fsp->m_ext.vlan_tci) {
902			memcpy(&pkt->vlan_tci, &fsp->h_ext.vlan_tci,
903			       sizeof(pkt->vlan_tci));
904			memcpy(&pmask->vlan_tci, &fsp->m_ext.vlan_tci,
905			       sizeof(pmask->vlan_tci));
906			req->features |= BIT_ULL(NPC_OUTER_VID);
907		}
908
909		if (fsp->m_ext.data[1]) {
910			if (flow_type == IP_USER_FLOW) {
911				if (be32_to_cpu(fsp->h_ext.data[1]) != IPV4_FLAG_MORE)
912					return -EINVAL;
913
914				pkt->ip_flag = be32_to_cpu(fsp->h_ext.data[1]);
915				pmask->ip_flag = be32_to_cpu(fsp->m_ext.data[1]);
916				req->features |= BIT_ULL(NPC_IPFRAG_IPV4);
917			} else if (fsp->h_ext.data[1] ==
918					cpu_to_be32(OTX2_DEFAULT_ACTION)) {
919				/* Not Drop/Direct to queue but use action
920				 * in default entry
921				 */
922				req->op = NIX_RX_ACTION_DEFAULT;
923			}
924		}
925	}
926
927	if (fsp->flow_type & FLOW_MAC_EXT &&
928	    !is_zero_ether_addr(fsp->m_ext.h_dest)) {
929		ether_addr_copy(pkt->dmac, fsp->h_ext.h_dest);
930		ether_addr_copy(pmask->dmac, fsp->m_ext.h_dest);
931		req->features |= BIT_ULL(NPC_DMAC);
932	}
933
934	if (!req->features)
935		return -EOPNOTSUPP;
936
937	return 0;
938}
939
940static int otx2_is_flow_rule_dmacfilter(struct otx2_nic *pfvf,
941					struct ethtool_rx_flow_spec *fsp)
942{
943	struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
944	struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
945	u64 ring_cookie = fsp->ring_cookie;
946	u32 flow_type;
947
948	if (!(pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT))
949		return false;
950
951	flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
952
953	/* CGX/RPM block dmac filtering configured for white listing
954	 * check for action other than DROP
955	 */
956	if (flow_type == ETHER_FLOW && ring_cookie != RX_CLS_FLOW_DISC &&
957	    !ethtool_get_flow_spec_ring_vf(ring_cookie)) {
958		if (is_zero_ether_addr(eth_mask->h_dest) &&
959		    is_valid_ether_addr(eth_hdr->h_dest))
960			return true;
961	}
962
963	return false;
964}
965
966static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
967{
968	u64 ring_cookie = flow->flow_spec.ring_cookie;
969#ifdef CONFIG_DCB
970	int vlan_prio, qidx, pfc_rule = 0;
971#endif
972	struct npc_install_flow_req *req;
973	int err, vf = 0;
974
975	mutex_lock(&pfvf->mbox.lock);
976	req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
977	if (!req) {
978		mutex_unlock(&pfvf->mbox.lock);
979		return -ENOMEM;
980	}
981
982	err = otx2_prepare_flow_request(&flow->flow_spec, req);
983	if (err) {
984		/* free the allocated msg above */
985		otx2_mbox_reset(&pfvf->mbox.mbox, 0);
986		mutex_unlock(&pfvf->mbox.lock);
987		return err;
988	}
989
990	req->entry = flow->entry;
991	req->intf = NIX_INTF_RX;
992	req->set_cntr = 1;
993	req->channel = pfvf->hw.rx_chan_base;
994	if (ring_cookie == RX_CLS_FLOW_DISC) {
995		req->op = NIX_RX_ACTIONOP_DROP;
996	} else {
997		/* change to unicast only if action of default entry is not
998		 * requested by user
999		 */
1000		if (flow->flow_spec.flow_type & FLOW_RSS) {
1001			req->op = NIX_RX_ACTIONOP_RSS;
1002			req->index = flow->rss_ctx_id;
1003			req->flow_key_alg = pfvf->hw.flowkey_alg_idx;
1004		} else {
1005			req->op = NIX_RX_ACTIONOP_UCAST;
1006			req->index = ethtool_get_flow_spec_ring(ring_cookie);
1007		}
1008		vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
1009		if (vf > pci_num_vf(pfvf->pdev)) {
1010			mutex_unlock(&pfvf->mbox.lock);
1011			return -EINVAL;
1012		}
1013
1014#ifdef CONFIG_DCB
1015		/* Identify PFC rule if PFC enabled and ntuple rule is vlan */
1016		if (!vf && (req->features & BIT_ULL(NPC_OUTER_VID)) &&
1017		    pfvf->pfc_en && req->op != NIX_RX_ACTIONOP_RSS) {
1018			vlan_prio = ntohs(req->packet.vlan_tci) &
1019				    ntohs(req->mask.vlan_tci);
1020
1021			/* Get the priority */
1022			vlan_prio >>= 13;
1023			flow->rule_type |= PFC_FLOWCTRL_RULE;
1024			/* Check if PFC enabled for this priority */
1025			if (pfvf->pfc_en & BIT(vlan_prio)) {
1026				pfc_rule = true;
1027				qidx = req->index;
1028			}
1029		}
1030#endif
1031	}
1032
1033	/* ethtool ring_cookie has (VF + 1) for VF */
1034	if (vf) {
1035		req->vf = vf;
1036		flow->is_vf = true;
1037		flow->vf = vf;
1038	}
1039
1040	/* Send message to AF */
1041	err = otx2_sync_mbox_msg(&pfvf->mbox);
1042
1043#ifdef CONFIG_DCB
1044	if (!err && pfc_rule)
1045		otx2_update_bpid_in_rqctx(pfvf, vlan_prio, qidx, true);
1046#endif
1047
1048	mutex_unlock(&pfvf->mbox.lock);
1049	return err;
1050}
1051
1052static int otx2_add_flow_with_pfmac(struct otx2_nic *pfvf,
1053				    struct otx2_flow *flow)
1054{
1055	struct otx2_flow *pf_mac;
1056	struct ethhdr *eth_hdr;
1057
1058	pf_mac = kzalloc(sizeof(*pf_mac), GFP_KERNEL);
1059	if (!pf_mac)
1060		return -ENOMEM;
1061
1062	pf_mac->entry = 0;
1063	pf_mac->rule_type |= DMAC_FILTER_RULE;
1064	pf_mac->location = pfvf->flow_cfg->max_flows;
1065	memcpy(&pf_mac->flow_spec, &flow->flow_spec,
1066	       sizeof(struct ethtool_rx_flow_spec));
1067	pf_mac->flow_spec.location = pf_mac->location;
1068
1069	/* Copy PF mac address */
1070	eth_hdr = &pf_mac->flow_spec.h_u.ether_spec;
1071	ether_addr_copy(eth_hdr->h_dest, pfvf->netdev->dev_addr);
1072
1073	/* Install DMAC filter with PF mac address */
1074	otx2_dmacflt_add(pfvf, eth_hdr->h_dest, 0);
1075
1076	otx2_add_flow_to_list(pfvf, pf_mac);
1077	pfvf->flow_cfg->nr_flows++;
1078	set_bit(0, pfvf->flow_cfg->dmacflt_bmap);
1079
1080	return 0;
1081}
1082
1083int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
1084{
1085	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1086	struct ethtool_rx_flow_spec *fsp = &nfc->fs;
1087	struct otx2_flow *flow;
1088	struct ethhdr *eth_hdr;
1089	bool new = false;
1090	int err = 0;
1091	u64 vf_num;
1092	u32 ring;
1093
1094	if (!flow_cfg->max_flows) {
1095		netdev_err(pfvf->netdev,
1096			   "Ntuple rule count is 0, allocate and retry\n");
1097		return -EINVAL;
1098	}
1099
1100	ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
1101	if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
1102		return -ENOMEM;
1103
1104	/* Number of queues on a VF can be greater or less than
1105	 * the PF's queue. Hence no need to check for the
1106	 * queue count. Hence no need to check queue count if PF
1107	 * is installing for its VF. Below is the expected vf_num value
1108	 * based on the ethtool commands.
1109	 *
1110	 * e.g.
1111	 * 1. ethtool -U <netdev> ... action -1  ==> vf_num:255
1112	 * 2. ethtool -U <netdev> ... action <queue_num>  ==> vf_num:0
1113	 * 3. ethtool -U <netdev> ... vf <vf_idx> queue <queue_num>  ==>
1114	 *    vf_num:vf_idx+1
1115	 */
1116	vf_num = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
1117	if (!is_otx2_vf(pfvf->pcifunc) && !vf_num &&
1118	    ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
1119		return -EINVAL;
1120
1121	if (fsp->location >= otx2_get_maxflows(flow_cfg))
1122		return -EINVAL;
1123
1124	flow = otx2_find_flow(pfvf, fsp->location);
1125	if (!flow) {
1126		flow = kzalloc(sizeof(*flow), GFP_KERNEL);
1127		if (!flow)
1128			return -ENOMEM;
1129		flow->location = fsp->location;
1130		flow->entry = flow_cfg->flow_ent[flow->location];
1131		new = true;
1132	}
1133	/* struct copy */
1134	flow->flow_spec = *fsp;
1135
1136	if (fsp->flow_type & FLOW_RSS)
1137		flow->rss_ctx_id = nfc->rss_context;
1138
1139	if (otx2_is_flow_rule_dmacfilter(pfvf, &flow->flow_spec)) {
1140		eth_hdr = &flow->flow_spec.h_u.ether_spec;
1141
1142		/* Sync dmac filter table with updated fields */
1143		if (flow->rule_type & DMAC_FILTER_RULE)
1144			return otx2_dmacflt_update(pfvf, eth_hdr->h_dest,
1145						   flow->entry);
1146
1147		if (bitmap_full(flow_cfg->dmacflt_bmap,
1148				flow_cfg->dmacflt_max_flows)) {
1149			netdev_warn(pfvf->netdev,
1150				    "Can't insert the rule %d as max allowed dmac filters are %d\n",
1151				    flow->location +
1152				    flow_cfg->dmacflt_max_flows,
1153				    flow_cfg->dmacflt_max_flows);
1154			err = -EINVAL;
1155			if (new)
1156				kfree(flow);
1157			return err;
1158		}
1159
1160		/* Install PF mac address to DMAC filter list */
1161		if (!test_bit(0, flow_cfg->dmacflt_bmap))
1162			otx2_add_flow_with_pfmac(pfvf, flow);
1163
1164		flow->rule_type |= DMAC_FILTER_RULE;
1165		flow->entry = find_first_zero_bit(flow_cfg->dmacflt_bmap,
1166						  flow_cfg->dmacflt_max_flows);
1167		fsp->location = flow_cfg->max_flows + flow->entry;
1168		flow->flow_spec.location = fsp->location;
1169		flow->location = fsp->location;
1170
1171		set_bit(flow->entry, flow_cfg->dmacflt_bmap);
1172		otx2_dmacflt_add(pfvf, eth_hdr->h_dest, flow->entry);
1173
1174	} else {
1175		if (flow->location >= pfvf->flow_cfg->max_flows) {
1176			netdev_warn(pfvf->netdev,
1177				    "Can't insert non dmac ntuple rule at %d, allowed range %d-0\n",
1178				    flow->location,
1179				    flow_cfg->max_flows - 1);
1180			err = -EINVAL;
1181		} else {
1182			err = otx2_add_flow_msg(pfvf, flow);
1183		}
1184	}
1185
1186	if (err) {
1187		if (err == MBOX_MSG_INVALID)
1188			err = -EINVAL;
1189		if (new)
1190			kfree(flow);
1191		return err;
1192	}
1193
1194	/* add the new flow installed to list */
1195	if (new) {
1196		otx2_add_flow_to_list(pfvf, flow);
1197		flow_cfg->nr_flows++;
1198	}
1199
1200	if (flow->is_vf)
1201		netdev_info(pfvf->netdev,
1202			    "Make sure that VF's queue number is within its queue limit\n");
1203	return 0;
1204}
1205
1206static int otx2_remove_flow_msg(struct otx2_nic *pfvf, u16 entry, bool all)
1207{
1208	struct npc_delete_flow_req *req;
1209	int err;
1210
1211	mutex_lock(&pfvf->mbox.lock);
1212	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
1213	if (!req) {
1214		mutex_unlock(&pfvf->mbox.lock);
1215		return -ENOMEM;
1216	}
1217
1218	req->entry = entry;
1219	if (all)
1220		req->all = 1;
1221
1222	/* Send message to AF */
1223	err = otx2_sync_mbox_msg(&pfvf->mbox);
1224	mutex_unlock(&pfvf->mbox.lock);
1225	return err;
1226}
1227
1228static void otx2_update_rem_pfmac(struct otx2_nic *pfvf, int req)
1229{
1230	struct otx2_flow *iter;
1231	struct ethhdr *eth_hdr;
1232	bool found = false;
1233
1234	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
1235		if ((iter->rule_type & DMAC_FILTER_RULE) && iter->entry == 0) {
1236			eth_hdr = &iter->flow_spec.h_u.ether_spec;
1237			if (req == DMAC_ADDR_DEL) {
1238				otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
1239						    0);
1240				clear_bit(0, pfvf->flow_cfg->dmacflt_bmap);
1241				found = true;
1242			} else {
1243				ether_addr_copy(eth_hdr->h_dest,
1244						pfvf->netdev->dev_addr);
1245
1246				otx2_dmacflt_update(pfvf, eth_hdr->h_dest, 0);
1247			}
1248			break;
1249		}
1250	}
1251
1252	if (found) {
1253		list_del(&iter->list);
1254		kfree(iter);
1255		pfvf->flow_cfg->nr_flows--;
1256	}
1257}
1258
1259int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
1260{
1261	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1262	struct otx2_flow *flow;
1263	int err;
1264
1265	if (location >= otx2_get_maxflows(flow_cfg))
1266		return -EINVAL;
1267
1268	flow = otx2_find_flow(pfvf, location);
1269	if (!flow)
1270		return -ENOENT;
1271
1272	if (flow->rule_type & DMAC_FILTER_RULE) {
1273		struct ethhdr *eth_hdr = &flow->flow_spec.h_u.ether_spec;
1274
1275		/* user not allowed to remove dmac filter with interface mac */
1276		if (ether_addr_equal(pfvf->netdev->dev_addr, eth_hdr->h_dest))
1277			return -EPERM;
1278
1279		err = otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
1280					  flow->entry);
1281		clear_bit(flow->entry, flow_cfg->dmacflt_bmap);
1282		/* If all dmac filters are removed delete macfilter with
1283		 * interface mac address and configure CGX/RPM block in
1284		 * promiscuous mode
1285		 */
1286		if (bitmap_weight(flow_cfg->dmacflt_bmap,
1287				  flow_cfg->dmacflt_max_flows) == 1)
1288			otx2_update_rem_pfmac(pfvf, DMAC_ADDR_DEL);
1289	} else {
1290#ifdef CONFIG_DCB
1291		if (flow->rule_type & PFC_FLOWCTRL_RULE)
1292			otx2_update_bpid_in_rqctx(pfvf, 0,
1293						  flow->flow_spec.ring_cookie,
1294						  false);
1295#endif
1296
1297		err = otx2_remove_flow_msg(pfvf, flow->entry, false);
1298	}
1299
1300	if (err)
1301		return err;
1302
1303	list_del(&flow->list);
1304	kfree(flow);
1305	flow_cfg->nr_flows--;
1306
1307	return 0;
1308}
1309
1310void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id)
1311{
1312	struct otx2_flow *flow, *tmp;
1313	int err;
1314
1315	list_for_each_entry_safe(flow, tmp, &pfvf->flow_cfg->flow_list, list) {
1316		if (flow->rss_ctx_id != ctx_id)
1317			continue;
1318		err = otx2_remove_flow(pfvf, flow->location);
1319		if (err)
1320			netdev_warn(pfvf->netdev,
1321				    "Can't delete the rule %d associated with this rss group err:%d",
1322				    flow->location, err);
1323	}
1324}
1325
1326int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf)
1327{
1328	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1329	struct npc_delete_flow_req *req;
1330	struct otx2_flow *iter, *tmp;
1331	int err;
1332
1333	if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
1334		return 0;
1335
1336	if (!flow_cfg->max_flows)
1337		return 0;
1338
1339	mutex_lock(&pfvf->mbox.lock);
1340	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
1341	if (!req) {
1342		mutex_unlock(&pfvf->mbox.lock);
1343		return -ENOMEM;
1344	}
1345
1346	req->start = flow_cfg->flow_ent[0];
1347	req->end   = flow_cfg->flow_ent[flow_cfg->max_flows - 1];
1348	err = otx2_sync_mbox_msg(&pfvf->mbox);
1349	mutex_unlock(&pfvf->mbox.lock);
1350
1351	list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
1352		list_del(&iter->list);
1353		kfree(iter);
1354		flow_cfg->nr_flows--;
1355	}
1356	return err;
1357}
1358
1359int otx2_destroy_mcam_flows(struct otx2_nic *pfvf)
1360{
1361	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1362	struct npc_mcam_free_entry_req *req;
1363	struct otx2_flow *iter, *tmp;
1364	int err;
1365
1366	if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC))
1367		return 0;
1368
1369	/* remove all flows */
1370	err = otx2_remove_flow_msg(pfvf, 0, true);
1371	if (err)
1372		return err;
1373
1374	list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
1375		list_del(&iter->list);
1376		kfree(iter);
1377		flow_cfg->nr_flows--;
1378	}
1379
1380	mutex_lock(&pfvf->mbox.lock);
1381	req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
1382	if (!req) {
1383		mutex_unlock(&pfvf->mbox.lock);
1384		return -ENOMEM;
1385	}
1386
1387	req->all = 1;
1388	/* Send message to AF to free MCAM entries */
1389	err = otx2_sync_mbox_msg(&pfvf->mbox);
1390	if (err) {
1391		mutex_unlock(&pfvf->mbox.lock);
1392		return err;
1393	}
1394
1395	pfvf->flags &= ~OTX2_FLAG_MCAM_ENTRIES_ALLOC;
1396	mutex_unlock(&pfvf->mbox.lock);
1397
1398	return 0;
1399}
1400
1401int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf)
1402{
1403	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1404	struct npc_install_flow_req *req;
1405	int err;
1406
1407	mutex_lock(&pfvf->mbox.lock);
1408	req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
1409	if (!req) {
1410		mutex_unlock(&pfvf->mbox.lock);
1411		return -ENOMEM;
1412	}
1413
1414	req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset];
1415	req->intf = NIX_INTF_RX;
1416	ether_addr_copy(req->packet.dmac, pfvf->netdev->dev_addr);
1417	eth_broadcast_addr((u8 *)&req->mask.dmac);
1418	req->channel = pfvf->hw.rx_chan_base;
1419	req->op = NIX_RX_ACTION_DEFAULT;
1420	req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
1421	req->vtag0_valid = true;
1422	req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE0;
1423
1424	/* Send message to AF */
1425	err = otx2_sync_mbox_msg(&pfvf->mbox);
1426	mutex_unlock(&pfvf->mbox.lock);
1427	return err;
1428}
1429
1430static int otx2_delete_rxvlan_offload_flow(struct otx2_nic *pfvf)
1431{
1432	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1433	struct npc_delete_flow_req *req;
1434	int err;
1435
1436	mutex_lock(&pfvf->mbox.lock);
1437	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
1438	if (!req) {
1439		mutex_unlock(&pfvf->mbox.lock);
1440		return -ENOMEM;
1441	}
1442
1443	req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset];
1444	/* Send message to AF */
1445	err = otx2_sync_mbox_msg(&pfvf->mbox);
1446	mutex_unlock(&pfvf->mbox.lock);
1447	return err;
1448}
1449
1450int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable)
1451{
1452	struct nix_vtag_config *req;
1453	struct mbox_msghdr *rsp_hdr;
1454	int err;
1455
1456	/* Dont have enough mcam entries */
1457	if (!(pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT))
1458		return -ENOMEM;
1459
1460	if (enable) {
1461		err = otx2_install_rxvlan_offload_flow(pf);
1462		if (err)
1463			return err;
1464	} else {
1465		err = otx2_delete_rxvlan_offload_flow(pf);
1466		if (err)
1467			return err;
1468	}
1469
1470	mutex_lock(&pf->mbox.lock);
1471	req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
1472	if (!req) {
1473		mutex_unlock(&pf->mbox.lock);
1474		return -ENOMEM;
1475	}
1476
1477	/* config strip, capture and size */
1478	req->vtag_size = VTAGSIZE_T4;
1479	req->cfg_type = 1; /* rx vlan cfg */
1480	req->rx.vtag_type = NIX_AF_LFX_RX_VTAG_TYPE0;
1481	req->rx.strip_vtag = enable;
1482	req->rx.capture_vtag = enable;
1483
1484	err = otx2_sync_mbox_msg(&pf->mbox);
1485	if (err) {
1486		mutex_unlock(&pf->mbox.lock);
1487		return err;
1488	}
1489
1490	rsp_hdr = otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
1491	if (IS_ERR(rsp_hdr)) {
1492		mutex_unlock(&pf->mbox.lock);
1493		return PTR_ERR(rsp_hdr);
1494	}
1495
1496	mutex_unlock(&pf->mbox.lock);
1497	return rsp_hdr->rc;
1498}
1499
1500void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf)
1501{
1502	struct otx2_flow *iter;
1503	struct ethhdr *eth_hdr;
1504
1505	list_for_each_entry(iter, &pf->flow_cfg->flow_list, list) {
1506		if (iter->rule_type & DMAC_FILTER_RULE) {
1507			eth_hdr = &iter->flow_spec.h_u.ether_spec;
1508			otx2_dmacflt_add(pf, eth_hdr->h_dest,
1509					 iter->entry);
1510		}
1511	}
1512}
1513
1514void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf)
1515{
1516	otx2_update_rem_pfmac(pfvf, DMAC_ADDR_UPDATE);
1517}
1518