1// SPDX-License-Identifier: GPL-2.0
2/* Marvell RVU Admin Function driver
3 *
4 * Copyright (C) 2022 Marvell.
5 *
6 */
7
8#include <linux/bitfield.h>
9#include <linux/module.h>
10#include <linux/pci.h>
11#include <linux/firmware.h>
12#include <linux/stddef.h>
13#include <linux/debugfs.h>
14
15#include "rvu_struct.h"
16#include "rvu_reg.h"
17#include "rvu.h"
18#include "npc.h"
19#include "cgx.h"
20#include "rvu_npc_fs.h"
21#include "rvu_npc_hash.h"
22
23static u64 rvu_npc_wide_extract(const u64 input[], size_t start_bit,
24				size_t width_bits)
25{
26	const u64 mask = ~(u64)((~(__uint128_t)0) << width_bits);
27	const size_t msb = start_bit + width_bits - 1;
28	const size_t lword = start_bit >> 6;
29	const size_t uword = msb >> 6;
30	size_t lbits;
31	u64 hi, lo;
32
33	if (lword == uword)
34		return (input[lword] >> (start_bit & 63)) & mask;
35
36	lbits = 64 - (start_bit & 63);
37	hi = input[uword];
38	lo = (input[lword] >> (start_bit & 63));
39	return ((hi << lbits) | lo) & mask;
40}
41
42static void rvu_npc_lshift_key(u64 *key, size_t key_bit_len)
43{
44	u64 prev_orig_word = 0;
45	u64 cur_orig_word = 0;
46	size_t extra = key_bit_len % 64;
47	size_t max_idx = key_bit_len / 64;
48	size_t i;
49
50	if (extra)
51		max_idx++;
52
53	for (i = 0; i < max_idx; i++) {
54		cur_orig_word = key[i];
55		key[i] = key[i] << 1;
56		key[i] |= ((prev_orig_word >> 63) & 0x1);
57		prev_orig_word = cur_orig_word;
58	}
59}
60
61static u32 rvu_npc_toeplitz_hash(const u64 *data, u64 *key, size_t data_bit_len,
62				 size_t key_bit_len)
63{
64	u32 hash_out = 0;
65	u64 temp_data = 0;
66	int i;
67
68	for (i = data_bit_len - 1; i >= 0; i--) {
69		temp_data = (data[i / 64]);
70		temp_data = temp_data >> (i % 64);
71		temp_data &= 0x1;
72		if (temp_data)
73			hash_out ^= (u32)(rvu_npc_wide_extract(key, key_bit_len - 32, 32));
74
75		rvu_npc_lshift_key(key, key_bit_len);
76	}
77
78	return hash_out;
79}
80
81u32 npc_field_hash_calc(u64 *ldata, struct npc_get_field_hash_info_rsp rsp,
82			u8 intf, u8 hash_idx)
83{
84	u64 hash_key[3];
85	u64 data_padded[2];
86	u32 field_hash;
87
88	hash_key[0] = rsp.secret_key[1] << 31;
89	hash_key[0] |= rsp.secret_key[2];
90	hash_key[1] = rsp.secret_key[1] >> 33;
91	hash_key[1] |= rsp.secret_key[0] << 31;
92	hash_key[2] = rsp.secret_key[0] >> 33;
93
94	data_padded[0] = rsp.hash_mask[intf][hash_idx][0] & ldata[0];
95	data_padded[1] = rsp.hash_mask[intf][hash_idx][1] & ldata[1];
96	field_hash = rvu_npc_toeplitz_hash(data_padded, hash_key, 128, 159);
97
98	field_hash &= FIELD_GET(GENMASK(63, 32), rsp.hash_ctrl[intf][hash_idx]);
99	field_hash += FIELD_GET(GENMASK(31, 0), rsp.hash_ctrl[intf][hash_idx]);
100	return field_hash;
101}
102
103static u64 npc_update_use_hash(struct rvu *rvu, int blkaddr,
104			       u8 intf, int lid, int lt, int ld)
105{
106	u8 hdr, key;
107	u64 cfg;
108
109	cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, lt, ld));
110	hdr = FIELD_GET(NPC_HDR_OFFSET, cfg);
111	key = FIELD_GET(NPC_KEY_OFFSET, cfg);
112
113	/* Update use_hash(bit-20) to 'true' and
114	 * bytesm1(bit-16:19) to '0x3' in KEX_LD_CFG
115	 */
116	cfg = KEX_LD_CFG_USE_HASH(0x1, 0x03,
117				  hdr, 0x1, 0x0, key);
118
119	return cfg;
120}
121
122static void npc_program_mkex_hash_rx(struct rvu *rvu, int blkaddr,
123				     u8 intf)
124{
125	struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash;
126	int lid, lt, ld, hash_cnt = 0;
127
128	if (is_npc_intf_tx(intf))
129		return;
130
131	/* Program HASH_CFG */
132	for (lid = 0; lid < NPC_MAX_LID; lid++) {
133		for (lt = 0; lt < NPC_MAX_LT; lt++) {
134			for (ld = 0; ld < NPC_MAX_LD; ld++) {
135				if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][ld]) {
136					u64 cfg;
137
138					if (hash_cnt == NPC_MAX_HASH)
139						return;
140
141					cfg = npc_update_use_hash(rvu, blkaddr,
142								  intf, lid, lt, ld);
143					/* Set updated KEX configuration */
144					SET_KEX_LD(intf, lid, lt, ld, cfg);
145					/* Set HASH configuration */
146					SET_KEX_LD_HASH(intf, ld,
147							mkex_hash->hash[intf][ld]);
148					SET_KEX_LD_HASH_MASK(intf, ld, 0,
149							     mkex_hash->hash_mask[intf][ld][0]);
150					SET_KEX_LD_HASH_MASK(intf, ld, 1,
151							     mkex_hash->hash_mask[intf][ld][1]);
152					SET_KEX_LD_HASH_CTRL(intf, ld,
153							     mkex_hash->hash_ctrl[intf][ld]);
154
155					hash_cnt++;
156				}
157			}
158		}
159	}
160}
161
162static void npc_program_mkex_hash_tx(struct rvu *rvu, int blkaddr,
163				     u8 intf)
164{
165	struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash;
166	int lid, lt, ld, hash_cnt = 0;
167
168	if (is_npc_intf_rx(intf))
169		return;
170
171	/* Program HASH_CFG */
172	for (lid = 0; lid < NPC_MAX_LID; lid++) {
173		for (lt = 0; lt < NPC_MAX_LT; lt++) {
174			for (ld = 0; ld < NPC_MAX_LD; ld++)
175				if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][ld]) {
176					u64 cfg;
177
178					if (hash_cnt == NPC_MAX_HASH)
179						return;
180
181					cfg = npc_update_use_hash(rvu, blkaddr,
182								  intf, lid, lt, ld);
183					/* Set updated KEX configuration */
184					SET_KEX_LD(intf, lid, lt, ld, cfg);
185					/* Set HASH configuration */
186					SET_KEX_LD_HASH(intf, ld,
187							mkex_hash->hash[intf][ld]);
188					SET_KEX_LD_HASH_MASK(intf, ld, 0,
189							     mkex_hash->hash_mask[intf][ld][0]);
190					SET_KEX_LD_HASH_MASK(intf, ld, 1,
191							     mkex_hash->hash_mask[intf][ld][1]);
192					SET_KEX_LD_HASH_CTRL(intf, ld,
193							     mkex_hash->hash_ctrl[intf][ld]);
194					hash_cnt++;
195				}
196		}
197	}
198}
199
200void npc_config_secret_key(struct rvu *rvu, int blkaddr)
201{
202	struct hw_cap *hwcap = &rvu->hw->cap;
203	struct rvu_hwinfo *hw = rvu->hw;
204	u8 intf;
205
206	if (!hwcap->npc_hash_extract)
207		return;
208
209	for (intf = 0; intf < hw->npc_intfs; intf++) {
210		rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY0(intf),
211			    RVU_NPC_HASH_SECRET_KEY0);
212		rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY1(intf),
213			    RVU_NPC_HASH_SECRET_KEY1);
214		rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY2(intf),
215			    RVU_NPC_HASH_SECRET_KEY2);
216	}
217}
218
219void npc_program_mkex_hash(struct rvu *rvu, int blkaddr)
220{
221	struct npc_mcam_kex_hash *mh = rvu->kpu.mkex_hash;
222	struct hw_cap *hwcap = &rvu->hw->cap;
223	u8 intf, ld, hdr_offset, byte_len;
224	struct rvu_hwinfo *hw = rvu->hw;
225	u64 cfg;
226
227	/* Check if hardware supports hash extraction */
228	if (!hwcap->npc_hash_extract)
229		return;
230
231	/* Check if IPv6 source/destination address
232	 * should be hash enabled.
233	 * Hashing reduces 128bit SIP/DIP fields to 32bit
234	 * so that 224 bit X2 key can be used for IPv6 based filters as well,
235	 * which in turn results in more number of MCAM entries available for
236	 * use.
237	 *
238	 * Hashing of IPV6 SIP/DIP is enabled in below scenarios
239	 * 1. If the silicon variant supports hashing feature
240	 * 2. If the number of bytes of IP addr being extracted is 4 bytes ie
241	 *    32bit. The assumption here is that if user wants 8bytes of LSB of
242	 *    IP addr or full 16 bytes then his intention is not to use 32bit
243	 *    hash.
244	 */
245	for (intf = 0; intf < hw->npc_intfs; intf++) {
246		for (ld = 0; ld < NPC_MAX_LD; ld++) {
247			cfg = rvu_read64(rvu, blkaddr,
248					 NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf,
249								       NPC_LID_LC,
250								       NPC_LT_LC_IP6,
251								       ld));
252			hdr_offset = FIELD_GET(NPC_HDR_OFFSET, cfg);
253			byte_len = FIELD_GET(NPC_BYTESM, cfg);
254			/* Hashing of IPv6 source/destination address should be
255			 * enabled if,
256			 * hdr_offset == 8 (offset of source IPv6 address) or
257			 * hdr_offset == 24 (offset of destination IPv6)
258			 * address) and the number of byte to be
259			 * extracted is 4. As per hardware configuration
260			 * byte_len should be == actual byte_len - 1.
261			 * Hence byte_len is checked against 3 but nor 4.
262			 */
263			if ((hdr_offset == 8 || hdr_offset == 24) && byte_len == 3)
264				mh->lid_lt_ld_hash_en[intf][NPC_LID_LC][NPC_LT_LC_IP6][ld] = true;
265		}
266	}
267
268	/* Update hash configuration if the field is hash enabled */
269	for (intf = 0; intf < hw->npc_intfs; intf++) {
270		npc_program_mkex_hash_rx(rvu, blkaddr, intf);
271		npc_program_mkex_hash_tx(rvu, blkaddr, intf);
272	}
273}
274
275void npc_update_field_hash(struct rvu *rvu, u8 intf,
276			   struct mcam_entry *entry,
277			   int blkaddr,
278			   u64 features,
279			   struct flow_msg *pkt,
280			   struct flow_msg *mask,
281			   struct flow_msg *opkt,
282			   struct flow_msg *omask)
283{
284	struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash;
285	struct npc_get_field_hash_info_req req;
286	struct npc_get_field_hash_info_rsp rsp;
287	u64 ldata[2], cfg;
288	u32 field_hash;
289	u8 hash_idx;
290
291	if (!rvu->hw->cap.npc_hash_extract) {
292		dev_dbg(rvu->dev, "%s: Field hash extract feature is not supported\n", __func__);
293		return;
294	}
295
296	req.intf = intf;
297	rvu_mbox_handler_npc_get_field_hash_info(rvu, &req, &rsp);
298
299	for (hash_idx = 0; hash_idx < NPC_MAX_HASH; hash_idx++) {
300		cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_HASHX_CFG(intf, hash_idx));
301		if ((cfg & BIT_ULL(11)) && (cfg & BIT_ULL(12))) {
302			u8 lid = (cfg & GENMASK_ULL(10, 8)) >> 8;
303			u8 ltype = (cfg & GENMASK_ULL(7, 4)) >> 4;
304			u8 ltype_mask = cfg & GENMASK_ULL(3, 0);
305
306			if (mkex_hash->lid_lt_ld_hash_en[intf][lid][ltype][hash_idx]) {
307				switch (ltype & ltype_mask) {
308				/* If hash extract enabled is supported for IPv6 then
309				 * 128 bit IPv6 source and destination addressed
310				 * is hashed to 32 bit value.
311				 */
312				case NPC_LT_LC_IP6:
313					/* ld[0] == hash_idx[0] == Source IPv6
314					 * ld[1] == hash_idx[1] == Destination IPv6
315					 */
316					if ((features & BIT_ULL(NPC_SIP_IPV6)) && !hash_idx) {
317						u32 src_ip[IPV6_WORDS];
318
319						be32_to_cpu_array(src_ip, pkt->ip6src, IPV6_WORDS);
320						ldata[1] = (u64)src_ip[0] << 32 | src_ip[1];
321						ldata[0] = (u64)src_ip[2] << 32 | src_ip[3];
322						field_hash = npc_field_hash_calc(ldata,
323										 rsp,
324										 intf,
325										 hash_idx);
326						npc_update_entry(rvu, NPC_SIP_IPV6, entry,
327								 field_hash, 0,
328								 GENMASK(31, 0), 0, intf);
329						memcpy(&opkt->ip6src, &pkt->ip6src,
330						       sizeof(pkt->ip6src));
331						memcpy(&omask->ip6src, &mask->ip6src,
332						       sizeof(mask->ip6src));
333					} else if ((features & BIT_ULL(NPC_DIP_IPV6)) && hash_idx) {
334						u32 dst_ip[IPV6_WORDS];
335
336						be32_to_cpu_array(dst_ip, pkt->ip6dst, IPV6_WORDS);
337						ldata[1] = (u64)dst_ip[0] << 32 | dst_ip[1];
338						ldata[0] = (u64)dst_ip[2] << 32 | dst_ip[3];
339						field_hash = npc_field_hash_calc(ldata,
340										 rsp,
341										 intf,
342										 hash_idx);
343						npc_update_entry(rvu, NPC_DIP_IPV6, entry,
344								 field_hash, 0,
345								 GENMASK(31, 0), 0, intf);
346						memcpy(&opkt->ip6dst, &pkt->ip6dst,
347						       sizeof(pkt->ip6dst));
348						memcpy(&omask->ip6dst, &mask->ip6dst,
349						       sizeof(mask->ip6dst));
350					}
351
352					break;
353				}
354			}
355		}
356	}
357}
358
359int rvu_mbox_handler_npc_get_field_hash_info(struct rvu *rvu,
360					     struct npc_get_field_hash_info_req *req,
361					     struct npc_get_field_hash_info_rsp *rsp)
362{
363	u64 *secret_key = rsp->secret_key;
364	u8 intf = req->intf;
365	int i, j, blkaddr;
366
367	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
368	if (blkaddr < 0) {
369		dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
370		return -EINVAL;
371	}
372
373	secret_key[0] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY0(intf));
374	secret_key[1] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY1(intf));
375	secret_key[2] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY2(intf));
376
377	for (i = 0; i < NPC_MAX_HASH; i++) {
378		for (j = 0; j < NPC_MAX_HASH_MASK; j++) {
379			rsp->hash_mask[NIX_INTF_RX][i][j] =
380				GET_KEX_LD_HASH_MASK(NIX_INTF_RX, i, j);
381			rsp->hash_mask[NIX_INTF_TX][i][j] =
382				GET_KEX_LD_HASH_MASK(NIX_INTF_TX, i, j);
383		}
384	}
385
386	for (i = 0; i < NPC_MAX_INTF; i++)
387		for (j = 0; j < NPC_MAX_HASH; j++)
388			rsp->hash_ctrl[i][j] = GET_KEX_LD_HASH_CTRL(i, j);
389
390	return 0;
391}
392
393/**
394 *	rvu_exact_prepare_mdata - Make mdata for mcam entry
395 *	@mac: MAC address
396 *	@chan: Channel number.
397 *	@ctype: Channel Type.
398 *	@mask: LDATA mask.
399 *	Return: Meta data
400 */
401static u64 rvu_exact_prepare_mdata(u8 *mac, u16 chan, u16 ctype, u64 mask)
402{
403	u64 ldata = ether_addr_to_u64(mac);
404
405	/* Please note that mask is 48bit which excludes chan and ctype.
406	 * Increase mask bits if we need to include them as well.
407	 */
408	ldata |= ((u64)chan << 48);
409	ldata |= ((u64)ctype  << 60);
410	ldata &= mask;
411	ldata = ldata << 2;
412
413	return ldata;
414}
415
416/**
417 *      rvu_exact_calculate_hash - calculate hash index to mem table.
418 *	@rvu: resource virtualization unit.
419 *	@chan: Channel number
420 *	@ctype: Channel type.
421 *	@mac: MAC address
422 *	@mask: HASH mask.
423 *	@table_depth: Depth of table.
424 *	Return: Hash value
425 */
426static u32 rvu_exact_calculate_hash(struct rvu *rvu, u16 chan, u16 ctype, u8 *mac,
427				    u64 mask, u32 table_depth)
428{
429	struct npc_exact_table *table = rvu->hw->table;
430	u64 hash_key[2];
431	u64 key_in[2];
432	u64 ldata;
433	u32 hash;
434
435	key_in[0] = RVU_NPC_HASH_SECRET_KEY0;
436	key_in[1] = RVU_NPC_HASH_SECRET_KEY2;
437
438	hash_key[0] = key_in[0] << 31;
439	hash_key[0] |= key_in[1];
440	hash_key[1] = key_in[0] >> 33;
441
442	ldata = rvu_exact_prepare_mdata(mac, chan, ctype, mask);
443
444	dev_dbg(rvu->dev, "%s: ldata=0x%llx hash_key0=0x%llx hash_key2=0x%llx\n", __func__,
445		ldata, hash_key[1], hash_key[0]);
446	hash = rvu_npc_toeplitz_hash(&ldata, (u64 *)hash_key, 64, 95);
447
448	hash &= table->mem_table.hash_mask;
449	hash += table->mem_table.hash_offset;
450	dev_dbg(rvu->dev, "%s: hash=%x\n", __func__,  hash);
451
452	return hash;
453}
454
455/**
456 *      rvu_npc_exact_alloc_mem_table_entry - find free entry in 4 way table.
457 *      @rvu: resource virtualization unit.
458 *	@way: Indicate way to table.
459 *	@index: Hash index to 4 way table.
460 *	@hash: Hash value.
461 *
462 *	Searches 4 way table using hash index. Returns 0 on success.
463 *	Return: 0 upon success.
464 */
465static int rvu_npc_exact_alloc_mem_table_entry(struct rvu *rvu, u8 *way,
466					       u32 *index, unsigned int hash)
467{
468	struct npc_exact_table *table;
469	int depth, i;
470
471	table = rvu->hw->table;
472	depth = table->mem_table.depth;
473
474	/* Check all the 4 ways for a free slot. */
475	mutex_lock(&table->lock);
476	for (i = 0; i <  table->mem_table.ways; i++) {
477		if (test_bit(hash + i * depth, table->mem_table.bmap))
478			continue;
479
480		set_bit(hash + i * depth, table->mem_table.bmap);
481		mutex_unlock(&table->lock);
482
483		dev_dbg(rvu->dev, "%s: mem table entry alloc success (way=%d index=%d)\n",
484			__func__, i, hash);
485
486		*way = i;
487		*index = hash;
488		return 0;
489	}
490	mutex_unlock(&table->lock);
491
492	dev_dbg(rvu->dev, "%s: No space in 4 way exact way, weight=%u\n", __func__,
493		bitmap_weight(table->mem_table.bmap, table->mem_table.depth));
494	return -ENOSPC;
495}
496
497/**
498 *	rvu_npc_exact_free_id - Free seq id from bitmat.
499 *	@rvu: Resource virtualization unit.
500 *	@seq_id: Sequence identifier to be freed.
501 */
502static void rvu_npc_exact_free_id(struct rvu *rvu, u32 seq_id)
503{
504	struct npc_exact_table *table;
505
506	table = rvu->hw->table;
507	mutex_lock(&table->lock);
508	clear_bit(seq_id, table->id_bmap);
509	mutex_unlock(&table->lock);
510	dev_dbg(rvu->dev, "%s: freed id %d\n", __func__, seq_id);
511}
512
513/**
514 *	rvu_npc_exact_alloc_id - Alloc seq id from bitmap.
515 *	@rvu: Resource virtualization unit.
516 *	@seq_id: Sequence identifier.
517 *	Return: True or false.
518 */
519static bool rvu_npc_exact_alloc_id(struct rvu *rvu, u32 *seq_id)
520{
521	struct npc_exact_table *table;
522	u32 idx;
523
524	table = rvu->hw->table;
525
526	mutex_lock(&table->lock);
527	idx = find_first_zero_bit(table->id_bmap, table->tot_ids);
528	if (idx == table->tot_ids) {
529		mutex_unlock(&table->lock);
530		dev_err(rvu->dev, "%s: No space in id bitmap (%d)\n",
531			__func__, table->tot_ids);
532
533		return false;
534	}
535
536	/* Mark bit map to indicate that slot is used.*/
537	set_bit(idx, table->id_bmap);
538	mutex_unlock(&table->lock);
539
540	*seq_id = idx;
541	dev_dbg(rvu->dev, "%s: Allocated id (%d)\n", __func__, *seq_id);
542
543	return true;
544}
545
546/**
547 *      rvu_npc_exact_alloc_cam_table_entry - find free slot in fully associative table.
548 *      @rvu: resource virtualization unit.
549 *	@index: Index to exact CAM table.
550 *	Return: 0 upon success; else error number.
551 */
552static int rvu_npc_exact_alloc_cam_table_entry(struct rvu *rvu, int *index)
553{
554	struct npc_exact_table *table;
555	u32 idx;
556
557	table = rvu->hw->table;
558
559	mutex_lock(&table->lock);
560	idx = find_first_zero_bit(table->cam_table.bmap, table->cam_table.depth);
561	if (idx == table->cam_table.depth) {
562		mutex_unlock(&table->lock);
563		dev_info(rvu->dev, "%s: No space in exact cam table, weight=%u\n", __func__,
564			 bitmap_weight(table->cam_table.bmap, table->cam_table.depth));
565		return -ENOSPC;
566	}
567
568	/* Mark bit map to indicate that slot is used.*/
569	set_bit(idx, table->cam_table.bmap);
570	mutex_unlock(&table->lock);
571
572	*index = idx;
573	dev_dbg(rvu->dev, "%s: cam table entry alloc success (index=%d)\n",
574		__func__, idx);
575	return 0;
576}
577
578/**
579 *	rvu_exact_prepare_table_entry - Data for exact match table entry.
580 *	@rvu: Resource virtualization unit.
581 *	@enable: Enable/Disable entry
582 *	@ctype: Software defined channel type. Currently set as 0.
583 *	@chan: Channel number.
584 *	@mac_addr: Destination mac address.
585 *	Return: mdata for exact match table.
586 */
587static u64 rvu_exact_prepare_table_entry(struct rvu *rvu, bool enable,
588					 u8 ctype, u16 chan, u8 *mac_addr)
589
590{
591	u64 ldata = ether_addr_to_u64(mac_addr);
592
593	/* Enable or disable */
594	u64 mdata = FIELD_PREP(GENMASK_ULL(63, 63), enable ? 1 : 0);
595
596	/* Set Ctype */
597	mdata |= FIELD_PREP(GENMASK_ULL(61, 60), ctype);
598
599	/* Set chan */
600	mdata |= FIELD_PREP(GENMASK_ULL(59, 48), chan);
601
602	/* MAC address */
603	mdata |= FIELD_PREP(GENMASK_ULL(47, 0), ldata);
604
605	return mdata;
606}
607
608/**
609 *	rvu_exact_config_secret_key - Configure secret key.
610 *	@rvu: Resource virtualization unit.
611 */
612static void rvu_exact_config_secret_key(struct rvu *rvu)
613{
614	int blkaddr;
615
616	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
617	rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_SECRET0(NIX_INTF_RX),
618		    RVU_NPC_HASH_SECRET_KEY0);
619
620	rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_SECRET1(NIX_INTF_RX),
621		    RVU_NPC_HASH_SECRET_KEY1);
622
623	rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_SECRET2(NIX_INTF_RX),
624		    RVU_NPC_HASH_SECRET_KEY2);
625}
626
627/**
628 *	rvu_exact_config_search_key - Configure search key
629 *	@rvu: Resource virtualization unit.
630 */
631static void rvu_exact_config_search_key(struct rvu *rvu)
632{
633	int blkaddr;
634	u64 reg_val;
635
636	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
637
638	/* HDR offset */
639	reg_val = FIELD_PREP(GENMASK_ULL(39, 32), 0);
640
641	/* BYTESM1, number of bytes - 1 */
642	reg_val |= FIELD_PREP(GENMASK_ULL(18, 16), ETH_ALEN - 1);
643
644	/* Enable LID and set LID to  NPC_LID_LA */
645	reg_val |= FIELD_PREP(GENMASK_ULL(11, 11), 1);
646	reg_val |= FIELD_PREP(GENMASK_ULL(10, 8),  NPC_LID_LA);
647
648	/* Clear layer type based extraction */
649
650	/* Disable LT_EN */
651	reg_val |= FIELD_PREP(GENMASK_ULL(12, 12), 0);
652
653	/* Set LTYPE_MATCH to 0 */
654	reg_val |= FIELD_PREP(GENMASK_ULL(7, 4), 0);
655
656	/* Set LTYPE_MASK to 0 */
657	reg_val |= FIELD_PREP(GENMASK_ULL(3, 0), 0);
658
659	rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_CFG(NIX_INTF_RX), reg_val);
660}
661
662/**
663 *	rvu_exact_config_result_ctrl - Set exact table hash control
664 *	@rvu: Resource virtualization unit.
665 *	@depth: Depth of Exact match table.
666 *
667 *	Sets mask and offset for hash for mem table.
668 */
669static void rvu_exact_config_result_ctrl(struct rvu *rvu, uint32_t depth)
670{
671	int blkaddr;
672	u64 reg = 0;
673
674	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
675
676	/* Set mask. Note that depth is a power of 2 */
677	rvu->hw->table->mem_table.hash_mask = (depth - 1);
678	reg |= FIELD_PREP(GENMASK_ULL(42, 32), (depth - 1));
679
680	/* Set offset as 0 */
681	rvu->hw->table->mem_table.hash_offset = 0;
682	reg |= FIELD_PREP(GENMASK_ULL(10, 0), 0);
683
684	/* Set reg for RX */
685	rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_RESULT_CTL(NIX_INTF_RX), reg);
686	/* Store hash mask and offset for s/w algorithm */
687}
688
689/**
690 *	rvu_exact_config_table_mask - Set exact table mask.
691 *	@rvu: Resource virtualization unit.
692 */
693static void rvu_exact_config_table_mask(struct rvu *rvu)
694{
695	int blkaddr;
696	u64 mask = 0;
697
698	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
699
700	/* Don't use Ctype */
701	mask |= FIELD_PREP(GENMASK_ULL(61, 60), 0);
702
703	/* Set chan */
704	mask |= GENMASK_ULL(59, 48);
705
706	/* Full ldata */
707	mask |= GENMASK_ULL(47, 0);
708
709	/* Store mask for s/w hash calcualtion */
710	rvu->hw->table->mem_table.mask = mask;
711
712	/* Set mask for RX.*/
713	rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_MASK(NIX_INTF_RX), mask);
714}
715
716/**
717 *      rvu_npc_exact_get_max_entries - Get total number of entries in table.
718 *      @rvu: resource virtualization unit.
719 *	Return: Maximum table entries possible.
720 */
721u32 rvu_npc_exact_get_max_entries(struct rvu *rvu)
722{
723	struct npc_exact_table *table;
724
725	table = rvu->hw->table;
726	return table->tot_ids;
727}
728
729/**
730 *      rvu_npc_exact_has_match_table - Checks support for exact match.
731 *      @rvu: resource virtualization unit.
732 *	Return: True if exact match table is supported/enabled.
733 */
734bool rvu_npc_exact_has_match_table(struct rvu *rvu)
735{
736	return  rvu->hw->cap.npc_exact_match_enabled;
737}
738
739/**
740 *      __rvu_npc_exact_find_entry_by_seq_id - find entry by id
741 *      @rvu: resource virtualization unit.
742 *	@seq_id: Sequence identifier.
743 *
744 *	Caller should acquire the lock.
745 *	Return: Pointer to table entry.
746 */
747static struct npc_exact_table_entry *
748__rvu_npc_exact_find_entry_by_seq_id(struct rvu *rvu, u32 seq_id)
749{
750	struct npc_exact_table *table = rvu->hw->table;
751	struct npc_exact_table_entry *entry = NULL;
752	struct list_head *lhead;
753
754	lhead = &table->lhead_gbl;
755
756	/* traverse to find the matching entry */
757	list_for_each_entry(entry, lhead, glist) {
758		if (entry->seq_id != seq_id)
759			continue;
760
761		return entry;
762	}
763
764	return NULL;
765}
766
767/**
768 *      rvu_npc_exact_add_to_list - Add entry to list
769 *      @rvu: resource virtualization unit.
770 *	@opc_type: OPCODE to select MEM/CAM table.
771 *	@ways: MEM table ways.
772 *	@index: Index in MEM/CAM table.
773 *	@cgx_id: CGX identifier.
774 *	@lmac_id: LMAC identifier.
775 *	@mac_addr: MAC address.
776 *	@chan: Channel number.
777 *	@ctype: Channel Type.
778 *	@seq_id: Sequence identifier
779 *	@cmd: True if function is called by ethtool cmd
780 *	@mcam_idx: NPC mcam index of DMAC entry in NPC mcam.
781 *	@pcifunc: pci function
782 *	Return: 0 upon success.
783 */
784static int rvu_npc_exact_add_to_list(struct rvu *rvu, enum npc_exact_opc_type opc_type, u8 ways,
785				     u32 index, u8 cgx_id, u8 lmac_id, u8 *mac_addr, u16 chan,
786				     u8 ctype, u32 *seq_id, bool cmd, u32 mcam_idx, u16 pcifunc)
787{
788	struct npc_exact_table_entry *entry, *tmp, *iter;
789	struct npc_exact_table *table = rvu->hw->table;
790	struct list_head *lhead, *pprev;
791
792	WARN_ON(ways >= NPC_EXACT_TBL_MAX_WAYS);
793
794	if (!rvu_npc_exact_alloc_id(rvu, seq_id)) {
795		dev_err(rvu->dev, "%s: Generate seq id failed\n", __func__);
796		return -EFAULT;
797	}
798
799	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
800	if (!entry) {
801		rvu_npc_exact_free_id(rvu, *seq_id);
802		dev_err(rvu->dev, "%s: Memory allocation failed\n", __func__);
803		return -ENOMEM;
804	}
805
806	mutex_lock(&table->lock);
807	switch (opc_type) {
808	case NPC_EXACT_OPC_CAM:
809		lhead = &table->lhead_cam_tbl_entry;
810		table->cam_tbl_entry_cnt++;
811		break;
812
813	case NPC_EXACT_OPC_MEM:
814		lhead = &table->lhead_mem_tbl_entry[ways];
815		table->mem_tbl_entry_cnt++;
816		break;
817
818	default:
819		mutex_unlock(&table->lock);
820		kfree(entry);
821		rvu_npc_exact_free_id(rvu, *seq_id);
822
823		dev_err(rvu->dev, "%s: Unknown opc type%d\n", __func__, opc_type);
824		return  -EINVAL;
825	}
826
827	/* Add to global list */
828	INIT_LIST_HEAD(&entry->glist);
829	list_add_tail(&entry->glist, &table->lhead_gbl);
830	INIT_LIST_HEAD(&entry->list);
831	entry->index = index;
832	entry->ways = ways;
833	entry->opc_type = opc_type;
834
835	entry->pcifunc = pcifunc;
836
837	ether_addr_copy(entry->mac, mac_addr);
838	entry->chan = chan;
839	entry->ctype = ctype;
840	entry->cgx_id = cgx_id;
841	entry->lmac_id = lmac_id;
842
843	entry->seq_id = *seq_id;
844
845	entry->mcam_idx = mcam_idx;
846	entry->cmd = cmd;
847
848	pprev = lhead;
849
850	/* Insert entry in ascending order of index */
851	list_for_each_entry_safe(iter, tmp, lhead, list) {
852		if (index < iter->index)
853			break;
854
855		pprev = &iter->list;
856	}
857
858	/* Add to each table list */
859	list_add(&entry->list, pprev);
860	mutex_unlock(&table->lock);
861	return 0;
862}
863
864/**
865 *	rvu_npc_exact_mem_table_write - Wrapper for register write
866 *	@rvu: resource virtualization unit.
867 *	@blkaddr: Block address
868 *	@ways: ways for MEM table.
869 *	@index: Index in MEM
870 *	@mdata: Meta data to be written to register.
871 */
872static void rvu_npc_exact_mem_table_write(struct rvu *rvu, int blkaddr, u8 ways,
873					  u32 index, u64 mdata)
874{
875	rvu_write64(rvu, blkaddr, NPC_AF_EXACT_MEM_ENTRY(ways, index), mdata);
876}
877
878/**
879 *	rvu_npc_exact_cam_table_write - Wrapper for register write
880 *	@rvu: resource virtualization unit.
881 *	@blkaddr: Block address
882 *	@index: Index in MEM
883 *	@mdata: Meta data to be written to register.
884 */
885static void rvu_npc_exact_cam_table_write(struct rvu *rvu, int blkaddr,
886					  u32 index, u64 mdata)
887{
888	rvu_write64(rvu, blkaddr, NPC_AF_EXACT_CAM_ENTRY(index), mdata);
889}
890
891/**
892 *      rvu_npc_exact_dealloc_table_entry - dealloc table entry
893 *      @rvu: resource virtualization unit.
894 *	@opc_type: OPCODE for selection of table(MEM or CAM)
895 *	@ways: ways if opc_type is MEM table.
896 *	@index: Index of MEM or CAM table.
897 *	Return: 0 upon success.
898 */
899static int rvu_npc_exact_dealloc_table_entry(struct rvu *rvu, enum npc_exact_opc_type opc_type,
900					     u8 ways, u32 index)
901{
902	int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
903	struct npc_exact_table *table;
904	u8 null_dmac[6] = { 0 };
905	int depth;
906
907	/* Prepare entry with all fields set to zero */
908	u64 null_mdata = rvu_exact_prepare_table_entry(rvu, false, 0, 0, null_dmac);
909
910	table = rvu->hw->table;
911	depth = table->mem_table.depth;
912
913	mutex_lock(&table->lock);
914
915	switch (opc_type) {
916	case NPC_EXACT_OPC_CAM:
917
918		/* Check whether entry is used already */
919		if (!test_bit(index, table->cam_table.bmap)) {
920			mutex_unlock(&table->lock);
921			dev_err(rvu->dev, "%s: Trying to free an unused entry ways=%d index=%d\n",
922				__func__, ways, index);
923			return -EINVAL;
924		}
925
926		rvu_npc_exact_cam_table_write(rvu, blkaddr, index, null_mdata);
927		clear_bit(index, table->cam_table.bmap);
928		break;
929
930	case NPC_EXACT_OPC_MEM:
931
932		/* Check whether entry is used already */
933		if (!test_bit(index + ways * depth, table->mem_table.bmap)) {
934			mutex_unlock(&table->lock);
935			dev_err(rvu->dev, "%s: Trying to free an unused entry index=%d\n",
936				__func__, index);
937			return -EINVAL;
938		}
939
940		rvu_npc_exact_mem_table_write(rvu, blkaddr, ways, index, null_mdata);
941		clear_bit(index + ways * depth, table->mem_table.bmap);
942		break;
943
944	default:
945		mutex_unlock(&table->lock);
946		dev_err(rvu->dev, "%s: invalid opc type %d", __func__, opc_type);
947		return -ENOSPC;
948	}
949
950	mutex_unlock(&table->lock);
951
952	dev_dbg(rvu->dev, "%s: Successfully deleted entry (index=%d, ways=%d opc_type=%d\n",
953		__func__, index,  ways, opc_type);
954
955	return 0;
956}
957
958/**
959 *	rvu_npc_exact_alloc_table_entry - Allociate an entry
960 *      @rvu: resource virtualization unit.
961 *	@mac: MAC address.
962 *	@chan: Channel number.
963 *	@ctype: Channel Type.
964 *	@index: Index of MEM table or CAM table.
965 *	@ways: Ways. Only valid for MEM table.
966 *	@opc_type: OPCODE to select table (MEM or CAM)
967 *
968 *	Try allocating a slot from MEM table. If all 4 ways
969 *	slot are full for a hash index, check availability in
970 *	32-entry CAM table for allocation.
971 *	Return: 0 upon success.
972 */
973static int rvu_npc_exact_alloc_table_entry(struct rvu *rvu,  char *mac, u16 chan, u8 ctype,
974					   u32 *index, u8 *ways, enum npc_exact_opc_type *opc_type)
975{
976	struct npc_exact_table *table;
977	unsigned int hash;
978	int err;
979
980	table = rvu->hw->table;
981
982	/* Check in 4-ways mem entry for free slote */
983	hash =  rvu_exact_calculate_hash(rvu, chan, ctype, mac, table->mem_table.mask,
984					 table->mem_table.depth);
985	err = rvu_npc_exact_alloc_mem_table_entry(rvu, ways, index, hash);
986	if (!err) {
987		*opc_type = NPC_EXACT_OPC_MEM;
988		dev_dbg(rvu->dev, "%s: inserted in 4 ways hash table ways=%d, index=%d\n",
989			__func__, *ways, *index);
990		return 0;
991	}
992
993	dev_dbg(rvu->dev, "%s: failed to insert in 4 ways hash table\n", __func__);
994
995	/* wayss is 0 for cam table */
996	*ways = 0;
997	err = rvu_npc_exact_alloc_cam_table_entry(rvu, index);
998	if (!err) {
999		*opc_type = NPC_EXACT_OPC_CAM;
1000		dev_dbg(rvu->dev, "%s: inserted in fully associative hash table index=%u\n",
1001			__func__, *index);
1002		return 0;
1003	}
1004
1005	dev_err(rvu->dev, "%s: failed to insert in fully associative hash table\n", __func__);
1006	return -ENOSPC;
1007}
1008
1009/**
1010 *	rvu_npc_exact_save_drop_rule_chan_and_mask - Save drop rules info in data base.
1011 *      @rvu: resource virtualization unit.
1012 *	@drop_mcam_idx: Drop rule index in NPC mcam.
1013 *	@chan_val: Channel value.
1014 *	@chan_mask: Channel Mask.
1015 *	@pcifunc: pcifunc of interface.
1016 *	Return: True upon success.
1017 */
1018static bool rvu_npc_exact_save_drop_rule_chan_and_mask(struct rvu *rvu, int drop_mcam_idx,
1019						       u64 chan_val, u64 chan_mask, u16 pcifunc)
1020{
1021	struct npc_exact_table *table;
1022	int i;
1023
1024	table = rvu->hw->table;
1025
1026	for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) {
1027		if (!table->drop_rule_map[i].valid)
1028			break;
1029
1030		if (table->drop_rule_map[i].chan_val != (u16)chan_val)
1031			continue;
1032
1033		if (table->drop_rule_map[i].chan_mask != (u16)chan_mask)
1034			continue;
1035
1036		return false;
1037	}
1038
1039	if (i == NPC_MCAM_DROP_RULE_MAX)
1040		return false;
1041
1042	table->drop_rule_map[i].drop_rule_idx = drop_mcam_idx;
1043	table->drop_rule_map[i].chan_val = (u16)chan_val;
1044	table->drop_rule_map[i].chan_mask = (u16)chan_mask;
1045	table->drop_rule_map[i].pcifunc = pcifunc;
1046	table->drop_rule_map[i].valid = true;
1047	return true;
1048}
1049
1050/**
1051 *	rvu_npc_exact_calc_drop_rule_chan_and_mask - Calculate Channel number and mask.
1052 *      @rvu: resource virtualization unit.
1053 *	@intf_type: Interface type (SDK, LBK or CGX)
1054 *	@cgx_id: CGX identifier.
1055 *	@lmac_id: LAMC identifier.
1056 *	@val: Channel number.
1057 *	@mask: Channel mask.
1058 *	Return: True upon success.
1059 */
1060static bool rvu_npc_exact_calc_drop_rule_chan_and_mask(struct rvu *rvu, u8 intf_type,
1061						       u8 cgx_id, u8 lmac_id,
1062						       u64 *val, u64 *mask)
1063{
1064	u16 chan_val, chan_mask;
1065
1066	/* No support for SDP and LBK */
1067	if (intf_type != NIX_INTF_TYPE_CGX)
1068		return false;
1069
1070	chan_val = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0);
1071	chan_mask = 0xfff;
1072
1073	if (val)
1074		*val = chan_val;
1075
1076	if (mask)
1077		*mask = chan_mask;
1078
1079	return true;
1080}
1081
1082/**
1083 *	rvu_npc_exact_drop_rule_to_pcifunc - Retrieve pcifunc
1084 *      @rvu: resource virtualization unit.
1085 *	@drop_rule_idx: Drop rule index in NPC mcam.
1086 *
1087 *	Debugfs (exact_drop_cnt) entry displays pcifunc for interface
1088 *	by retrieving the pcifunc value from data base.
1089 *	Return: Drop rule index.
1090 */
1091u16 rvu_npc_exact_drop_rule_to_pcifunc(struct rvu *rvu, u32 drop_rule_idx)
1092{
1093	struct npc_exact_table *table;
1094	int i;
1095
1096	table = rvu->hw->table;
1097
1098	for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) {
1099		if (!table->drop_rule_map[i].valid)
1100			break;
1101
1102		if (table->drop_rule_map[i].drop_rule_idx != drop_rule_idx)
1103			continue;
1104
1105		return table->drop_rule_map[i].pcifunc;
1106	}
1107
1108	dev_err(rvu->dev, "%s: drop mcam rule index (%d) >= NPC_MCAM_DROP_RULE_MAX\n",
1109		__func__, drop_rule_idx);
1110	return -1;
1111}
1112
1113/**
1114 *	rvu_npc_exact_get_drop_rule_info - Get drop rule information.
1115 *      @rvu: resource virtualization unit.
1116 *	@intf_type: Interface type (CGX, SDP or LBK)
1117 *	@cgx_id: CGX identifier.
1118 *	@lmac_id: LMAC identifier.
1119 *	@drop_mcam_idx: NPC mcam drop rule index.
1120 *	@val: Channel value.
1121 *	@mask: Channel mask.
1122 *	@pcifunc: pcifunc of interface corresponding to the drop rule.
1123 *	Return: True upon success.
1124 */
1125static bool rvu_npc_exact_get_drop_rule_info(struct rvu *rvu, u8 intf_type, u8 cgx_id,
1126					     u8 lmac_id, u32 *drop_mcam_idx, u64 *val,
1127					     u64 *mask, u16 *pcifunc)
1128{
1129	struct npc_exact_table *table;
1130	u64 chan_val, chan_mask;
1131	bool rc;
1132	int i;
1133
1134	table = rvu->hw->table;
1135
1136	if (intf_type != NIX_INTF_TYPE_CGX) {
1137		dev_err(rvu->dev, "%s: No drop rule for LBK/SDP mode\n", __func__);
1138		return false;
1139	}
1140
1141	rc = rvu_npc_exact_calc_drop_rule_chan_and_mask(rvu, intf_type, cgx_id,
1142							lmac_id, &chan_val, &chan_mask);
1143	if (!rc)
1144		return false;
1145
1146	for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) {
1147		if (!table->drop_rule_map[i].valid)
1148			break;
1149
1150		if (table->drop_rule_map[i].chan_val != (u16)chan_val)
1151			continue;
1152
1153		if (val)
1154			*val = table->drop_rule_map[i].chan_val;
1155		if (mask)
1156			*mask = table->drop_rule_map[i].chan_mask;
1157		if (pcifunc)
1158			*pcifunc = table->drop_rule_map[i].pcifunc;
1159
1160		*drop_mcam_idx = i;
1161		return true;
1162	}
1163
1164	if (i == NPC_MCAM_DROP_RULE_MAX) {
1165		dev_err(rvu->dev, "%s: drop mcam rule index (%d) >= NPC_MCAM_DROP_RULE_MAX\n",
1166			__func__, *drop_mcam_idx);
1167		return false;
1168	}
1169
1170	dev_err(rvu->dev, "%s: Could not retrieve for cgx=%d, lmac=%d\n",
1171		__func__, cgx_id, lmac_id);
1172	return false;
1173}
1174
1175/**
1176 *	__rvu_npc_exact_cmd_rules_cnt_update - Update number dmac rules against a drop rule.
1177 *      @rvu: resource virtualization unit.
1178 *	@drop_mcam_idx: NPC mcam drop rule index.
1179 *	@val: +1 or -1.
1180 *	@enable_or_disable_cam: If no exact match rules against a drop rule, disable it.
1181 *
1182 *	when first exact match entry against a drop rule is added, enable_or_disable_cam
1183 *	is set to true. When last exact match entry against a drop rule is deleted,
1184 *	enable_or_disable_cam is set to true.
1185 *	Return: Number of rules
1186 */
1187static u16 __rvu_npc_exact_cmd_rules_cnt_update(struct rvu *rvu, int drop_mcam_idx,
1188						int val, bool *enable_or_disable_cam)
1189{
1190	struct npc_exact_table *table;
1191	u16 *cnt, old_cnt;
1192	bool promisc;
1193
1194	table = rvu->hw->table;
1195	promisc = table->promisc_mode[drop_mcam_idx];
1196
1197	cnt = &table->cnt_cmd_rules[drop_mcam_idx];
1198	old_cnt = *cnt;
1199
1200	*cnt += val;
1201
1202	if (!enable_or_disable_cam)
1203		goto done;
1204
1205	*enable_or_disable_cam = false;
1206
1207	if (promisc)
1208		goto done;
1209
1210	/* If all rules are deleted and not already in promisc mode;
1211	 * disable cam
1212	 */
1213	if (!*cnt && val < 0) {
1214		*enable_or_disable_cam = true;
1215		goto done;
1216	}
1217
1218	/* If rule got added and not already in promisc mode; enable cam */
1219	if (!old_cnt && val > 0) {
1220		*enable_or_disable_cam = true;
1221		goto done;
1222	}
1223
1224done:
1225	return *cnt;
1226}
1227
1228/**
1229 *      rvu_npc_exact_del_table_entry_by_id - Delete and free table entry.
1230 *      @rvu: resource virtualization unit.
1231 *	@seq_id: Sequence identifier of the entry.
1232 *
1233 *	Deletes entry from linked lists and free up slot in HW MEM or CAM
1234 *	table.
1235 *	Return: 0 upon success.
1236 */
1237static int rvu_npc_exact_del_table_entry_by_id(struct rvu *rvu, u32 seq_id)
1238{
1239	struct npc_exact_table_entry *entry = NULL;
1240	struct npc_exact_table *table;
1241	bool disable_cam = false;
1242	u32 drop_mcam_idx = -1;
1243	int *cnt;
1244	bool rc;
1245
1246	table = rvu->hw->table;
1247
1248	mutex_lock(&table->lock);
1249
1250	/* Lookup for entry which needs to be updated */
1251	entry = __rvu_npc_exact_find_entry_by_seq_id(rvu, seq_id);
1252	if (!entry) {
1253		dev_dbg(rvu->dev, "%s: failed to find entry for id=%d\n", __func__, seq_id);
1254		mutex_unlock(&table->lock);
1255		return -ENODATA;
1256	}
1257
1258	cnt = (entry->opc_type == NPC_EXACT_OPC_CAM) ? &table->cam_tbl_entry_cnt :
1259				&table->mem_tbl_entry_cnt;
1260
1261	/* delete from lists */
1262	list_del_init(&entry->list);
1263	list_del_init(&entry->glist);
1264
1265	(*cnt)--;
1266
1267	rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, entry->cgx_id,
1268					      entry->lmac_id, &drop_mcam_idx, NULL, NULL, NULL);
1269	if (!rc) {
1270		dev_dbg(rvu->dev, "%s: failed to retrieve drop info for id=0x%x\n",
1271			__func__, seq_id);
1272		mutex_unlock(&table->lock);
1273		return -ENODATA;
1274	}
1275
1276	if (entry->cmd)
1277		__rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, -1, &disable_cam);
1278
1279	/* No dmac filter rules; disable drop on hit rule */
1280	if (disable_cam) {
1281		rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false);
1282		dev_dbg(rvu->dev, "%s: Disabling mcam idx %d\n",
1283			__func__, drop_mcam_idx);
1284	}
1285
1286	mutex_unlock(&table->lock);
1287
1288	rvu_npc_exact_dealloc_table_entry(rvu, entry->opc_type, entry->ways, entry->index);
1289
1290	rvu_npc_exact_free_id(rvu, seq_id);
1291
1292	dev_dbg(rvu->dev, "%s: delete entry success for id=0x%x, mca=%pM\n",
1293		__func__, seq_id, entry->mac);
1294	kfree(entry);
1295
1296	return 0;
1297}
1298
1299/**
1300 *      rvu_npc_exact_add_table_entry - Adds a table entry
1301 *      @rvu: resource virtualization unit.
1302 *	@cgx_id: cgx identifier.
1303 *	@lmac_id: lmac identifier.
1304 *	@mac: MAC address.
1305 *	@chan: Channel number.
1306 *	@ctype: Channel Type.
1307 *	@seq_id: Sequence number.
1308 *	@cmd: Whether it is invoked by ethtool cmd.
1309 *	@mcam_idx: NPC mcam index corresponding to MAC
1310 *	@pcifunc: PCI func.
1311 *
1312 *	Creates a new exact match table entry in either CAM or
1313 *	MEM table.
1314 *	Return: 0 upon success.
1315 */
1316static int rvu_npc_exact_add_table_entry(struct rvu *rvu, u8 cgx_id, u8 lmac_id, u8 *mac,
1317					 u16 chan, u8 ctype, u32 *seq_id, bool cmd,
1318					 u32 mcam_idx, u16 pcifunc)
1319{
1320	int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
1321	enum npc_exact_opc_type opc_type;
1322	bool enable_cam = false;
1323	u32 drop_mcam_idx;
1324	u32 index;
1325	u64 mdata;
1326	bool rc;
1327	int err;
1328	u8 ways;
1329
1330	ctype = 0;
1331
1332	err = rvu_npc_exact_alloc_table_entry(rvu, mac, chan, ctype, &index, &ways, &opc_type);
1333	if (err) {
1334		dev_err(rvu->dev, "%s: Could not alloc in exact match table\n", __func__);
1335		return err;
1336	}
1337
1338	/* Write mdata to table */
1339	mdata = rvu_exact_prepare_table_entry(rvu, true, ctype, chan, mac);
1340
1341	if (opc_type == NPC_EXACT_OPC_CAM)
1342		rvu_npc_exact_cam_table_write(rvu, blkaddr, index, mdata);
1343	else
1344		rvu_npc_exact_mem_table_write(rvu, blkaddr, ways, index,  mdata);
1345
1346	/* Insert entry to linked list */
1347	err = rvu_npc_exact_add_to_list(rvu, opc_type, ways, index, cgx_id, lmac_id,
1348					mac, chan, ctype, seq_id, cmd, mcam_idx, pcifunc);
1349	if (err) {
1350		rvu_npc_exact_dealloc_table_entry(rvu, opc_type, ways, index);
1351		dev_err(rvu->dev, "%s: could not add to exact match table\n", __func__);
1352		return err;
1353	}
1354
1355	rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, cgx_id, lmac_id,
1356					      &drop_mcam_idx, NULL, NULL, NULL);
1357	if (!rc) {
1358		rvu_npc_exact_dealloc_table_entry(rvu, opc_type, ways, index);
1359		dev_dbg(rvu->dev, "%s: failed to get drop rule info cgx=%d lmac=%d\n",
1360			__func__, cgx_id, lmac_id);
1361		return -EINVAL;
1362	}
1363
1364	if (cmd)
1365		__rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 1, &enable_cam);
1366
1367	/* First command rule; enable drop on hit rule */
1368	if (enable_cam) {
1369		rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, true);
1370		dev_dbg(rvu->dev, "%s: Enabling mcam idx %d\n",
1371			__func__, drop_mcam_idx);
1372	}
1373
1374	dev_dbg(rvu->dev,
1375		"%s: Successfully added entry (index=%d, dmac=%pM, ways=%d opc_type=%d\n",
1376		__func__, index, mac, ways, opc_type);
1377
1378	return 0;
1379}
1380
1381/**
1382 *      rvu_npc_exact_update_table_entry - Update exact match table.
1383 *      @rvu: resource virtualization unit.
1384 *	@cgx_id: CGX identifier.
1385 *	@lmac_id: LMAC identifier.
1386 *	@old_mac: Existing MAC address entry.
1387 *	@new_mac: New MAC address entry.
1388 *	@seq_id: Sequence identifier of the entry.
1389 *
1390 *	Updates MAC address of an entry. If entry is in MEM table, new
1391 *	hash value may not match with old one.
1392 *	Return: 0 upon success.
1393 */
1394static int rvu_npc_exact_update_table_entry(struct rvu *rvu, u8 cgx_id, u8 lmac_id,
1395					    u8 *old_mac, u8 *new_mac, u32 *seq_id)
1396{
1397	int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
1398	struct npc_exact_table_entry *entry;
1399	struct npc_exact_table *table;
1400	u32 hash_index;
1401	u64 mdata;
1402
1403	table = rvu->hw->table;
1404
1405	mutex_lock(&table->lock);
1406
1407	/* Lookup for entry which needs to be updated */
1408	entry = __rvu_npc_exact_find_entry_by_seq_id(rvu, *seq_id);
1409	if (!entry) {
1410		mutex_unlock(&table->lock);
1411		dev_dbg(rvu->dev,
1412			"%s: failed to find entry for cgx_id=%d lmac_id=%d old_mac=%pM\n",
1413			__func__, cgx_id, lmac_id, old_mac);
1414		return -ENODATA;
1415	}
1416
1417	/* If entry is in mem table and new hash index is different than old
1418	 * hash index, we cannot update the entry. Fail in these scenarios.
1419	 */
1420	if (entry->opc_type == NPC_EXACT_OPC_MEM) {
1421		hash_index =  rvu_exact_calculate_hash(rvu, entry->chan, entry->ctype,
1422						       new_mac, table->mem_table.mask,
1423						       table->mem_table.depth);
1424		if (hash_index != entry->index) {
1425			dev_dbg(rvu->dev,
1426				"%s: Update failed due to index mismatch(new=0x%x, old=%x)\n",
1427				__func__, hash_index, entry->index);
1428			mutex_unlock(&table->lock);
1429			return -EINVAL;
1430		}
1431	}
1432
1433	mdata = rvu_exact_prepare_table_entry(rvu, true, entry->ctype, entry->chan, new_mac);
1434
1435	if (entry->opc_type == NPC_EXACT_OPC_MEM)
1436		rvu_npc_exact_mem_table_write(rvu, blkaddr, entry->ways, entry->index, mdata);
1437	else
1438		rvu_npc_exact_cam_table_write(rvu, blkaddr, entry->index, mdata);
1439
1440	/* Update entry fields */
1441	ether_addr_copy(entry->mac, new_mac);
1442	*seq_id = entry->seq_id;
1443
1444	dev_dbg(rvu->dev,
1445		"%s: Successfully updated entry (index=%d, dmac=%pM, ways=%d opc_type=%d\n",
1446		__func__, entry->index, entry->mac, entry->ways, entry->opc_type);
1447
1448	dev_dbg(rvu->dev, "%s: Successfully updated entry (old mac=%pM new_mac=%pM\n",
1449		__func__, old_mac, new_mac);
1450
1451	mutex_unlock(&table->lock);
1452	return 0;
1453}
1454
1455/**
1456 *	rvu_npc_exact_promisc_disable - Disable promiscuous mode.
1457 *      @rvu: resource virtualization unit.
1458 *	@pcifunc: pcifunc
1459 *
1460 *	Drop rule is against each PF. We dont support DMAC filter for
1461 *	VF.
1462 *	Return: 0 upon success
1463 */
1464
1465int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc)
1466{
1467	struct npc_exact_table *table;
1468	int pf = rvu_get_pf(pcifunc);
1469	u8 cgx_id, lmac_id;
1470	u32 drop_mcam_idx;
1471	bool *promisc;
1472	bool rc;
1473
1474	table = rvu->hw->table;
1475
1476	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1477	rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, cgx_id, lmac_id,
1478					      &drop_mcam_idx, NULL, NULL, NULL);
1479	if (!rc) {
1480		dev_dbg(rvu->dev, "%s: failed to get drop rule info cgx=%d lmac=%d\n",
1481			__func__, cgx_id, lmac_id);
1482		return -EINVAL;
1483	}
1484
1485	mutex_lock(&table->lock);
1486	promisc = &table->promisc_mode[drop_mcam_idx];
1487
1488	if (!*promisc) {
1489		mutex_unlock(&table->lock);
1490		dev_dbg(rvu->dev, "%s: Err Already promisc mode disabled (cgx=%d lmac=%d)\n",
1491			__func__, cgx_id, lmac_id);
1492		return LMAC_AF_ERR_INVALID_PARAM;
1493	}
1494	*promisc = false;
1495	mutex_unlock(&table->lock);
1496
1497	/* Enable drop rule */
1498	rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX,
1499					   true);
1500
1501	dev_dbg(rvu->dev, "%s: disabled  promisc mode (cgx=%d lmac=%d)\n",
1502		__func__, cgx_id, lmac_id);
1503	return 0;
1504}
1505
1506/**
1507 *	rvu_npc_exact_promisc_enable - Enable promiscuous mode.
1508 *      @rvu: resource virtualization unit.
1509 *	@pcifunc: pcifunc.
1510 *	Return: 0 upon success
1511 */
1512int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc)
1513{
1514	struct npc_exact_table *table;
1515	int pf = rvu_get_pf(pcifunc);
1516	u8 cgx_id, lmac_id;
1517	u32 drop_mcam_idx;
1518	bool *promisc;
1519	bool rc;
1520
1521	table = rvu->hw->table;
1522
1523	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1524	rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, cgx_id, lmac_id,
1525					      &drop_mcam_idx, NULL, NULL, NULL);
1526	if (!rc) {
1527		dev_dbg(rvu->dev, "%s: failed to get drop rule info cgx=%d lmac=%d\n",
1528			__func__, cgx_id, lmac_id);
1529		return -EINVAL;
1530	}
1531
1532	mutex_lock(&table->lock);
1533	promisc = &table->promisc_mode[drop_mcam_idx];
1534
1535	if (*promisc) {
1536		mutex_unlock(&table->lock);
1537		dev_dbg(rvu->dev, "%s: Already in promisc mode (cgx=%d lmac=%d)\n",
1538			__func__, cgx_id, lmac_id);
1539		return LMAC_AF_ERR_INVALID_PARAM;
1540	}
1541	*promisc = true;
1542	mutex_unlock(&table->lock);
1543
1544	/*  disable drop rule */
1545	rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX,
1546					   false);
1547
1548	dev_dbg(rvu->dev, "%s: Enabled promisc mode (cgx=%d lmac=%d)\n",
1549		__func__, cgx_id, lmac_id);
1550	return 0;
1551}
1552
1553/**
1554 *	rvu_npc_exact_mac_addr_reset - Delete PF mac address.
1555 *      @rvu: resource virtualization unit.
1556 *	@req: Reset request
1557 *	@rsp: Reset response.
1558 *	Return: 0 upon success
1559 */
1560int rvu_npc_exact_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req,
1561				 struct msg_rsp *rsp)
1562{
1563	int pf = rvu_get_pf(req->hdr.pcifunc);
1564	u32 seq_id = req->index;
1565	struct rvu_pfvf *pfvf;
1566	u8 cgx_id, lmac_id;
1567	int rc;
1568
1569	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1570
1571	pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
1572
1573	rc = rvu_npc_exact_del_table_entry_by_id(rvu, seq_id);
1574	if (rc) {
1575		/* TODO: how to handle this error case ? */
1576		dev_err(rvu->dev, "%s MAC (%pM) del PF=%d failed\n", __func__, pfvf->mac_addr, pf);
1577		return 0;
1578	}
1579
1580	dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d success (seq_id=%u)\n",
1581		__func__, pfvf->mac_addr, pf, seq_id);
1582	return 0;
1583}
1584
1585/**
1586 *	rvu_npc_exact_mac_addr_update - Update mac address field with new value.
1587 *      @rvu: resource virtualization unit.
1588 *	@req: Update request.
1589 *	@rsp: Update response.
1590 *	Return: 0 upon success
1591 */
1592int rvu_npc_exact_mac_addr_update(struct rvu *rvu,
1593				  struct cgx_mac_addr_update_req *req,
1594				  struct cgx_mac_addr_update_rsp *rsp)
1595{
1596	int pf = rvu_get_pf(req->hdr.pcifunc);
1597	struct npc_exact_table_entry *entry;
1598	struct npc_exact_table *table;
1599	struct rvu_pfvf *pfvf;
1600	u32 seq_id, mcam_idx;
1601	u8 old_mac[ETH_ALEN];
1602	u8 cgx_id, lmac_id;
1603	int rc;
1604
1605	if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
1606		return LMAC_AF_ERR_PERM_DENIED;
1607
1608	dev_dbg(rvu->dev, "%s: Update request for seq_id=%d, mac=%pM\n",
1609		__func__, req->index, req->mac_addr);
1610
1611	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1612
1613	pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
1614
1615	table = rvu->hw->table;
1616
1617	mutex_lock(&table->lock);
1618
1619	/* Lookup for entry which needs to be updated */
1620	entry = __rvu_npc_exact_find_entry_by_seq_id(rvu, req->index);
1621	if (!entry) {
1622		dev_err(rvu->dev, "%s: failed to find entry for id=0x%x\n", __func__, req->index);
1623		mutex_unlock(&table->lock);
1624		return LMAC_AF_ERR_EXACT_MATCH_TBL_LOOK_UP_FAILED;
1625	}
1626	ether_addr_copy(old_mac, entry->mac);
1627	seq_id = entry->seq_id;
1628	mcam_idx = entry->mcam_idx;
1629	mutex_unlock(&table->lock);
1630
1631	rc = rvu_npc_exact_update_table_entry(rvu, cgx_id, lmac_id,  old_mac,
1632					      req->mac_addr, &seq_id);
1633	if (!rc) {
1634		rsp->index = seq_id;
1635		dev_dbg(rvu->dev, "%s  mac:%pM (pfvf:%pM default:%pM) update to PF=%d success\n",
1636			__func__, req->mac_addr, pfvf->mac_addr, pfvf->default_mac, pf);
1637		ether_addr_copy(pfvf->mac_addr, req->mac_addr);
1638		return 0;
1639	}
1640
1641	/* Try deleting and adding it again */
1642	rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index);
1643	if (rc) {
1644		/* This could be a new entry */
1645		dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d failed\n", __func__,
1646			pfvf->mac_addr, pf);
1647	}
1648
1649	rc = rvu_npc_exact_add_table_entry(rvu, cgx_id, lmac_id, req->mac_addr,
1650					   pfvf->rx_chan_base, 0, &seq_id, true,
1651					   mcam_idx, req->hdr.pcifunc);
1652	if (rc) {
1653		dev_err(rvu->dev, "%s MAC (%pM) add PF=%d failed\n", __func__,
1654			req->mac_addr, pf);
1655		return LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED;
1656	}
1657
1658	rsp->index = seq_id;
1659	dev_dbg(rvu->dev,
1660		"%s MAC (new:%pM, old=%pM default:%pM) del and add to PF=%d success (seq_id=%u)\n",
1661		__func__, req->mac_addr, pfvf->mac_addr, pfvf->default_mac, pf, seq_id);
1662
1663	ether_addr_copy(pfvf->mac_addr, req->mac_addr);
1664	return 0;
1665}
1666
1667/**
1668 *	rvu_npc_exact_mac_addr_add - Adds MAC address to exact match table.
1669 *      @rvu: resource virtualization unit.
1670 *	@req: Add request.
1671 *	@rsp: Add response.
1672 *	Return: 0 upon success
1673 */
1674int rvu_npc_exact_mac_addr_add(struct rvu *rvu,
1675			       struct cgx_mac_addr_add_req *req,
1676			       struct cgx_mac_addr_add_rsp *rsp)
1677{
1678	int pf = rvu_get_pf(req->hdr.pcifunc);
1679	struct rvu_pfvf *pfvf;
1680	u8 cgx_id, lmac_id;
1681	int rc = 0;
1682	u32 seq_id;
1683
1684	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1685	pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
1686
1687	rc = rvu_npc_exact_add_table_entry(rvu, cgx_id, lmac_id, req->mac_addr,
1688					   pfvf->rx_chan_base, 0, &seq_id,
1689					   true, -1, req->hdr.pcifunc);
1690
1691	if (!rc) {
1692		rsp->index = seq_id;
1693		dev_dbg(rvu->dev, "%s MAC (%pM) add to PF=%d success (seq_id=%u)\n",
1694			__func__, req->mac_addr, pf, seq_id);
1695		return 0;
1696	}
1697
1698	dev_err(rvu->dev, "%s MAC (%pM) add to PF=%d failed\n", __func__,
1699		req->mac_addr, pf);
1700	return LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED;
1701}
1702
1703/**
1704 *	rvu_npc_exact_mac_addr_del - Delete DMAC filter
1705 *      @rvu: resource virtualization unit.
1706 *	@req: Delete request.
1707 *	@rsp: Delete response.
1708 *	Return: 0 upon success
1709 */
1710int rvu_npc_exact_mac_addr_del(struct rvu *rvu,
1711			       struct cgx_mac_addr_del_req *req,
1712			       struct msg_rsp *rsp)
1713{
1714	int pf = rvu_get_pf(req->hdr.pcifunc);
1715	int rc;
1716
1717	rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index);
1718	if (!rc) {
1719		dev_dbg(rvu->dev, "%s del to PF=%d success (seq_id=%u)\n",
1720			__func__, pf, req->index);
1721		return 0;
1722	}
1723
1724	dev_err(rvu->dev, "%s del to PF=%d failed (seq_id=%u)\n",
1725		__func__,  pf, req->index);
1726	return LMAC_AF_ERR_EXACT_MATCH_TBL_DEL_FAILED;
1727}
1728
1729/**
1730 *	rvu_npc_exact_mac_addr_set - Add PF mac address to dmac filter.
1731 *      @rvu: resource virtualization unit.
1732 *	@req: Set request.
1733 *	@rsp: Set response.
1734 *	Return: 0 upon success
1735 */
1736int rvu_npc_exact_mac_addr_set(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req,
1737			       struct cgx_mac_addr_set_or_get *rsp)
1738{
1739	int pf = rvu_get_pf(req->hdr.pcifunc);
1740	u32 seq_id = req->index;
1741	struct rvu_pfvf *pfvf;
1742	u8 cgx_id, lmac_id;
1743	u32 mcam_idx = -1;
1744	int rc, nixlf;
1745
1746	rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1747
1748	pfvf = &rvu->pf[pf];
1749
1750	/* If table does not have an entry; both update entry and del table entry API
1751	 * below fails. Those are not failure conditions.
1752	 */
1753	rc = rvu_npc_exact_update_table_entry(rvu, cgx_id, lmac_id, pfvf->mac_addr,
1754					      req->mac_addr, &seq_id);
1755	if (!rc) {
1756		rsp->index = seq_id;
1757		ether_addr_copy(pfvf->mac_addr, req->mac_addr);
1758		ether_addr_copy(rsp->mac_addr, req->mac_addr);
1759		dev_dbg(rvu->dev, "%s MAC (%pM) update to PF=%d success\n",
1760			__func__, req->mac_addr, pf);
1761		return 0;
1762	}
1763
1764	/* Try deleting and adding it again */
1765	rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index);
1766	if (rc) {
1767		dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d failed\n",
1768			__func__, pfvf->mac_addr, pf);
1769	}
1770
1771	/* find mcam entry if exist */
1772	rc = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, NULL);
1773	if (!rc) {
1774		mcam_idx = npc_get_nixlf_mcam_index(&rvu->hw->mcam, req->hdr.pcifunc,
1775						    nixlf, NIXLF_UCAST_ENTRY);
1776	}
1777
1778	rc = rvu_npc_exact_add_table_entry(rvu, cgx_id, lmac_id, req->mac_addr,
1779					   pfvf->rx_chan_base, 0, &seq_id,
1780					   true, mcam_idx, req->hdr.pcifunc);
1781	if (rc) {
1782		dev_err(rvu->dev, "%s MAC (%pM) add PF=%d failed\n",
1783			__func__, req->mac_addr, pf);
1784		return LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED;
1785	}
1786
1787	rsp->index = seq_id;
1788	ether_addr_copy(rsp->mac_addr, req->mac_addr);
1789	ether_addr_copy(pfvf->mac_addr, req->mac_addr);
1790	dev_dbg(rvu->dev,
1791		"%s MAC (%pM) del and add to PF=%d success (seq_id=%u)\n",
1792		__func__, req->mac_addr, pf, seq_id);
1793	return 0;
1794}
1795
1796/**
1797 *	rvu_npc_exact_can_disable_feature - Check if feature can be disabled.
1798 *      @rvu: resource virtualization unit.
1799 *	Return: True if exact match feature is supported.
1800 */
1801bool rvu_npc_exact_can_disable_feature(struct rvu *rvu)
1802{
1803	struct npc_exact_table *table = rvu->hw->table;
1804	bool empty;
1805
1806	if (!rvu->hw->cap.npc_exact_match_enabled)
1807		return false;
1808
1809	mutex_lock(&table->lock);
1810	empty = list_empty(&table->lhead_gbl);
1811	mutex_unlock(&table->lock);
1812
1813	return empty;
1814}
1815
1816/**
1817 *	rvu_npc_exact_disable_feature - Disable feature.
1818 *      @rvu: resource virtualization unit.
1819 */
1820void rvu_npc_exact_disable_feature(struct rvu *rvu)
1821{
1822	rvu->hw->cap.npc_exact_match_enabled = false;
1823}
1824
1825/**
1826 *	rvu_npc_exact_reset - Delete and free all entry which match pcifunc.
1827 *      @rvu: resource virtualization unit.
1828 *	@pcifunc: PCI func to match.
1829 */
1830void rvu_npc_exact_reset(struct rvu *rvu, u16 pcifunc)
1831{
1832	struct npc_exact_table *table = rvu->hw->table;
1833	struct npc_exact_table_entry *tmp, *iter;
1834	u32 seq_id;
1835
1836	mutex_lock(&table->lock);
1837	list_for_each_entry_safe(iter, tmp, &table->lhead_gbl, glist) {
1838		if (pcifunc != iter->pcifunc)
1839			continue;
1840
1841		seq_id = iter->seq_id;
1842		dev_dbg(rvu->dev, "%s: resetting pcifun=%d seq_id=%u\n", __func__,
1843			pcifunc, seq_id);
1844
1845		mutex_unlock(&table->lock);
1846		rvu_npc_exact_del_table_entry_by_id(rvu, seq_id);
1847		mutex_lock(&table->lock);
1848	}
1849	mutex_unlock(&table->lock);
1850}
1851
1852/**
1853 *      rvu_npc_exact_init - initialize exact match table
1854 *      @rvu: resource virtualization unit.
1855 *
1856 *	Initialize HW and SW resources to manage 4way-2K table and fully
1857 *	associative 32-entry mcam table.
1858 *	Return: 0 upon success.
1859 */
1860int rvu_npc_exact_init(struct rvu *rvu)
1861{
1862	u64 bcast_mcast_val, bcast_mcast_mask;
1863	struct npc_exact_table *table;
1864	u64 exact_val, exact_mask;
1865	u64 chan_val, chan_mask;
1866	u8 cgx_id, lmac_id;
1867	u32 *drop_mcam_idx;
1868	u16 max_lmac_cnt;
1869	u64 npc_const3;
1870	int table_size;
1871	int blkaddr;
1872	u16 pcifunc;
1873	int err, i;
1874	u64 cfg;
1875	bool rc;
1876
1877	/* Read NPC_AF_CONST3 and check for have exact
1878	 * match functionality is present
1879	 */
1880	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
1881	if (blkaddr < 0) {
1882		dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
1883		return -EINVAL;
1884	}
1885
1886	/* Check exact match feature is supported */
1887	npc_const3 = rvu_read64(rvu, blkaddr, NPC_AF_CONST3);
1888	if (!(npc_const3 & BIT_ULL(62)))
1889		return 0;
1890
1891	/* Check if kex profile has enabled EXACT match nibble */
1892	cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
1893	if (!(cfg & NPC_EXACT_NIBBLE_HIT))
1894		return 0;
1895
1896	/* Set capability to true */
1897	rvu->hw->cap.npc_exact_match_enabled = true;
1898
1899	table = kzalloc(sizeof(*table), GFP_KERNEL);
1900	if (!table)
1901		return -ENOMEM;
1902
1903	dev_dbg(rvu->dev, "%s: Memory allocation for table success\n", __func__);
1904	rvu->hw->table = table;
1905
1906	/* Read table size, ways and depth */
1907	table->mem_table.ways = FIELD_GET(GENMASK_ULL(19, 16), npc_const3);
1908	table->mem_table.depth = FIELD_GET(GENMASK_ULL(15, 0), npc_const3);
1909	table->cam_table.depth = FIELD_GET(GENMASK_ULL(31, 24), npc_const3);
1910
1911	dev_dbg(rvu->dev, "%s: NPC exact match 4way_2k table(ways=%d, depth=%d)\n",
1912		__func__,  table->mem_table.ways, table->cam_table.depth);
1913
1914	/* Check if depth of table is not a sequre of 2
1915	 * TODO: why _builtin_popcount() is not working ?
1916	 */
1917	if ((table->mem_table.depth & (table->mem_table.depth - 1)) != 0) {
1918		dev_err(rvu->dev,
1919			"%s: NPC exact match 4way_2k table depth(%d) is not square of 2\n",
1920			__func__,  table->mem_table.depth);
1921		return -EINVAL;
1922	}
1923
1924	table_size = table->mem_table.depth * table->mem_table.ways;
1925
1926	/* Allocate bitmap for 4way 2K table */
1927	table->mem_table.bmap = devm_bitmap_zalloc(rvu->dev, table_size,
1928						   GFP_KERNEL);
1929	if (!table->mem_table.bmap)
1930		return -ENOMEM;
1931
1932	dev_dbg(rvu->dev, "%s: Allocated bitmap for 4way 2K entry table\n", __func__);
1933
1934	/* Allocate bitmap for 32 entry mcam */
1935	table->cam_table.bmap = devm_bitmap_zalloc(rvu->dev, 32, GFP_KERNEL);
1936
1937	if (!table->cam_table.bmap)
1938		return -ENOMEM;
1939
1940	dev_dbg(rvu->dev, "%s: Allocated bitmap for 32 entry cam\n", __func__);
1941
1942	table->tot_ids = table_size + table->cam_table.depth;
1943	table->id_bmap = devm_bitmap_zalloc(rvu->dev, table->tot_ids,
1944					    GFP_KERNEL);
1945
1946	if (!table->id_bmap)
1947		return -ENOMEM;
1948
1949	dev_dbg(rvu->dev, "%s: Allocated bitmap for id map (total=%d)\n",
1950		__func__, table->tot_ids);
1951
1952	/* Initialize list heads for npc_exact_table entries.
1953	 * This entry is used by debugfs to show entries in
1954	 * exact match table.
1955	 */
1956	for (i = 0; i < NPC_EXACT_TBL_MAX_WAYS; i++)
1957		INIT_LIST_HEAD(&table->lhead_mem_tbl_entry[i]);
1958
1959	INIT_LIST_HEAD(&table->lhead_cam_tbl_entry);
1960	INIT_LIST_HEAD(&table->lhead_gbl);
1961
1962	mutex_init(&table->lock);
1963
1964	rvu_exact_config_secret_key(rvu);
1965	rvu_exact_config_search_key(rvu);
1966
1967	rvu_exact_config_table_mask(rvu);
1968	rvu_exact_config_result_ctrl(rvu, table->mem_table.depth);
1969
1970	/* - No drop rule for LBK
1971	 * - Drop rules for SDP and each LMAC.
1972	 */
1973	exact_val = !NPC_EXACT_RESULT_HIT;
1974	exact_mask = NPC_EXACT_RESULT_HIT;
1975
1976	/* nibble - 3	2  1   0
1977	 *	   L3B L3M L2B L2M
1978	 */
1979	bcast_mcast_val = 0b0000;
1980	bcast_mcast_mask = 0b0011;
1981
1982	/* Install SDP drop rule */
1983	drop_mcam_idx = &table->num_drop_rules;
1984
1985	max_lmac_cnt = rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx +
1986		       PF_CGXMAP_BASE;
1987
1988	for (i = PF_CGXMAP_BASE; i < max_lmac_cnt; i++) {
1989		if (rvu->pf2cgxlmac_map[i] == 0xFF)
1990			continue;
1991
1992		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[i], &cgx_id, &lmac_id);
1993
1994		rc = rvu_npc_exact_calc_drop_rule_chan_and_mask(rvu, NIX_INTF_TYPE_CGX, cgx_id,
1995								lmac_id, &chan_val, &chan_mask);
1996		if (!rc) {
1997			dev_err(rvu->dev,
1998				"%s: failed, info chan_val=0x%llx chan_mask=0x%llx rule_id=%d\n",
1999				__func__, chan_val, chan_mask, *drop_mcam_idx);
2000			return -EINVAL;
2001		}
2002
2003		/* Filter rules are only for PF */
2004		pcifunc = RVU_PFFUNC(i, 0);
2005
2006		dev_dbg(rvu->dev,
2007			"%s:Drop rule cgx=%d lmac=%d chan(val=0x%llx, mask=0x%llx\n",
2008			__func__, cgx_id, lmac_id, chan_val, chan_mask);
2009
2010		rc = rvu_npc_exact_save_drop_rule_chan_and_mask(rvu, table->num_drop_rules,
2011								chan_val, chan_mask, pcifunc);
2012		if (!rc) {
2013			dev_err(rvu->dev,
2014				"%s: failed to set drop info for cgx=%d, lmac=%d, chan=%llx\n",
2015				__func__, cgx_id, lmac_id, chan_val);
2016			return -EINVAL;
2017		}
2018
2019		err = npc_install_mcam_drop_rule(rvu, *drop_mcam_idx,
2020						 &table->counter_idx[*drop_mcam_idx],
2021						 chan_val, chan_mask,
2022						 exact_val, exact_mask,
2023						 bcast_mcast_val, bcast_mcast_mask);
2024		if (err) {
2025			dev_err(rvu->dev,
2026				"failed to configure drop rule (cgx=%d lmac=%d)\n",
2027				cgx_id, lmac_id);
2028			return err;
2029		}
2030
2031		(*drop_mcam_idx)++;
2032	}
2033
2034	dev_info(rvu->dev, "initialized exact match table successfully\n");
2035	return 0;
2036}
2037