1// SPDX-License-Identifier: GPL-2.0
2/* Marvell RVU Admin Function driver
3 *
4 * Copyright (C) 2019 Marvell.
5 *
6 */
7
8#ifdef CONFIG_DEBUG_FS
9
10#include <linux/fs.h>
11#include <linux/debugfs.h>
12#include <linux/module.h>
13#include <linux/pci.h>
14
15#include "rvu_struct.h"
16#include "rvu_reg.h"
17#include "rvu.h"
18#include "cgx.h"
19#include "lmac_common.h"
20#include "npc.h"
21#include "rvu_npc_hash.h"
22#include "mcs.h"
23
24#define DEBUGFS_DIR_NAME "octeontx2"
25
26enum {
27	CGX_STAT0,
28	CGX_STAT1,
29	CGX_STAT2,
30	CGX_STAT3,
31	CGX_STAT4,
32	CGX_STAT5,
33	CGX_STAT6,
34	CGX_STAT7,
35	CGX_STAT8,
36	CGX_STAT9,
37	CGX_STAT10,
38	CGX_STAT11,
39	CGX_STAT12,
40	CGX_STAT13,
41	CGX_STAT14,
42	CGX_STAT15,
43	CGX_STAT16,
44	CGX_STAT17,
45	CGX_STAT18,
46};
47
48/* NIX TX stats */
49enum nix_stat_lf_tx {
50	TX_UCAST	= 0x0,
51	TX_BCAST	= 0x1,
52	TX_MCAST	= 0x2,
53	TX_DROP		= 0x3,
54	TX_OCTS		= 0x4,
55	TX_STATS_ENUM_LAST,
56};
57
58/* NIX RX stats */
59enum nix_stat_lf_rx {
60	RX_OCTS		= 0x0,
61	RX_UCAST	= 0x1,
62	RX_BCAST	= 0x2,
63	RX_MCAST	= 0x3,
64	RX_DROP		= 0x4,
65	RX_DROP_OCTS	= 0x5,
66	RX_FCS		= 0x6,
67	RX_ERR		= 0x7,
68	RX_DRP_BCAST	= 0x8,
69	RX_DRP_MCAST	= 0x9,
70	RX_DRP_L3BCAST	= 0xa,
71	RX_DRP_L3MCAST	= 0xb,
72	RX_STATS_ENUM_LAST,
73};
74
75static char *cgx_rx_stats_fields[] = {
76	[CGX_STAT0]	= "Received packets",
77	[CGX_STAT1]	= "Octets of received packets",
78	[CGX_STAT2]	= "Received PAUSE packets",
79	[CGX_STAT3]	= "Received PAUSE and control packets",
80	[CGX_STAT4]	= "Filtered DMAC0 (NIX-bound) packets",
81	[CGX_STAT5]	= "Filtered DMAC0 (NIX-bound) octets",
82	[CGX_STAT6]	= "Packets dropped due to RX FIFO full",
83	[CGX_STAT7]	= "Octets dropped due to RX FIFO full",
84	[CGX_STAT8]	= "Error packets",
85	[CGX_STAT9]	= "Filtered DMAC1 (NCSI-bound) packets",
86	[CGX_STAT10]	= "Filtered DMAC1 (NCSI-bound) octets",
87	[CGX_STAT11]	= "NCSI-bound packets dropped",
88	[CGX_STAT12]	= "NCSI-bound octets dropped",
89};
90
91static char *cgx_tx_stats_fields[] = {
92	[CGX_STAT0]	= "Packets dropped due to excessive collisions",
93	[CGX_STAT1]	= "Packets dropped due to excessive deferral",
94	[CGX_STAT2]	= "Multiple collisions before successful transmission",
95	[CGX_STAT3]	= "Single collisions before successful transmission",
96	[CGX_STAT4]	= "Total octets sent on the interface",
97	[CGX_STAT5]	= "Total frames sent on the interface",
98	[CGX_STAT6]	= "Packets sent with an octet count < 64",
99	[CGX_STAT7]	= "Packets sent with an octet count == 64",
100	[CGX_STAT8]	= "Packets sent with an octet count of 65-127",
101	[CGX_STAT9]	= "Packets sent with an octet count of 128-255",
102	[CGX_STAT10]	= "Packets sent with an octet count of 256-511",
103	[CGX_STAT11]	= "Packets sent with an octet count of 512-1023",
104	[CGX_STAT12]	= "Packets sent with an octet count of 1024-1518",
105	[CGX_STAT13]	= "Packets sent with an octet count of > 1518",
106	[CGX_STAT14]	= "Packets sent to a broadcast DMAC",
107	[CGX_STAT15]	= "Packets sent to the multicast DMAC",
108	[CGX_STAT16]	= "Transmit underflow and were truncated",
109	[CGX_STAT17]	= "Control/PAUSE packets sent",
110};
111
112static char *rpm_rx_stats_fields[] = {
113	"Octets of received packets",
114	"Octets of received packets with out error",
115	"Received packets with alignment errors",
116	"Control/PAUSE packets received",
117	"Packets received with Frame too long Errors",
118	"Packets received with a1nrange length Errors",
119	"Received packets",
120	"Packets received with FrameCheckSequenceErrors",
121	"Packets received with VLAN header",
122	"Error packets",
123	"Packets received with unicast DMAC",
124	"Packets received with multicast DMAC",
125	"Packets received with broadcast DMAC",
126	"Dropped packets",
127	"Total frames received on interface",
128	"Packets received with an octet count < 64",
129	"Packets received with an octet count == 64",
130	"Packets received with an octet count of 65-127",
131	"Packets received with an octet count of 128-255",
132	"Packets received with an octet count of 256-511",
133	"Packets received with an octet count of 512-1023",
134	"Packets received with an octet count of 1024-1518",
135	"Packets received with an octet count of > 1518",
136	"Oversized Packets",
137	"Jabber Packets",
138	"Fragmented Packets",
139	"CBFC(class based flow control) pause frames received for class 0",
140	"CBFC pause frames received for class 1",
141	"CBFC pause frames received for class 2",
142	"CBFC pause frames received for class 3",
143	"CBFC pause frames received for class 4",
144	"CBFC pause frames received for class 5",
145	"CBFC pause frames received for class 6",
146	"CBFC pause frames received for class 7",
147	"CBFC pause frames received for class 8",
148	"CBFC pause frames received for class 9",
149	"CBFC pause frames received for class 10",
150	"CBFC pause frames received for class 11",
151	"CBFC pause frames received for class 12",
152	"CBFC pause frames received for class 13",
153	"CBFC pause frames received for class 14",
154	"CBFC pause frames received for class 15",
155	"MAC control packets received",
156};
157
158static char *rpm_tx_stats_fields[] = {
159	"Total octets sent on the interface",
160	"Total octets transmitted OK",
161	"Control/Pause frames sent",
162	"Total frames transmitted OK",
163	"Total frames sent with VLAN header",
164	"Error Packets",
165	"Packets sent to unicast DMAC",
166	"Packets sent to the multicast DMAC",
167	"Packets sent to a broadcast DMAC",
168	"Packets sent with an octet count == 64",
169	"Packets sent with an octet count of 65-127",
170	"Packets sent with an octet count of 128-255",
171	"Packets sent with an octet count of 256-511",
172	"Packets sent with an octet count of 512-1023",
173	"Packets sent with an octet count of 1024-1518",
174	"Packets sent with an octet count of > 1518",
175	"CBFC(class based flow control) pause frames transmitted for class 0",
176	"CBFC pause frames transmitted for class 1",
177	"CBFC pause frames transmitted for class 2",
178	"CBFC pause frames transmitted for class 3",
179	"CBFC pause frames transmitted for class 4",
180	"CBFC pause frames transmitted for class 5",
181	"CBFC pause frames transmitted for class 6",
182	"CBFC pause frames transmitted for class 7",
183	"CBFC pause frames transmitted for class 8",
184	"CBFC pause frames transmitted for class 9",
185	"CBFC pause frames transmitted for class 10",
186	"CBFC pause frames transmitted for class 11",
187	"CBFC pause frames transmitted for class 12",
188	"CBFC pause frames transmitted for class 13",
189	"CBFC pause frames transmitted for class 14",
190	"CBFC pause frames transmitted for class 15",
191	"MAC control packets sent",
192	"Total frames sent on the interface"
193};
194
195enum cpt_eng_type {
196	CPT_AE_TYPE = 1,
197	CPT_SE_TYPE = 2,
198	CPT_IE_TYPE = 3,
199};
200
201#define rvu_dbg_NULL NULL
202#define rvu_dbg_open_NULL NULL
203
204#define RVU_DEBUG_SEQ_FOPS(name, read_op, write_op)	\
205static int rvu_dbg_open_##name(struct inode *inode, struct file *file) \
206{ \
207	return single_open(file, rvu_dbg_##read_op, inode->i_private); \
208} \
209static const struct file_operations rvu_dbg_##name##_fops = { \
210	.owner		= THIS_MODULE, \
211	.open		= rvu_dbg_open_##name, \
212	.read		= seq_read, \
213	.write		= rvu_dbg_##write_op, \
214	.llseek		= seq_lseek, \
215	.release	= single_release, \
216}
217
218#define RVU_DEBUG_FOPS(name, read_op, write_op) \
219static const struct file_operations rvu_dbg_##name##_fops = { \
220	.owner = THIS_MODULE, \
221	.open = simple_open, \
222	.read = rvu_dbg_##read_op, \
223	.write = rvu_dbg_##write_op \
224}
225
226static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
227
228static int rvu_dbg_mcs_port_stats_display(struct seq_file *filp, void *unused, int dir)
229{
230	struct mcs *mcs = filp->private;
231	struct mcs_port_stats stats;
232	int lmac;
233
234	seq_puts(filp, "\n port stats\n");
235	mutex_lock(&mcs->stats_lock);
236	for_each_set_bit(lmac, &mcs->hw->lmac_bmap, mcs->hw->lmac_cnt) {
237		mcs_get_port_stats(mcs, &stats, lmac, dir);
238		seq_printf(filp, "port%d: Tcam Miss: %lld\n", lmac, stats.tcam_miss_cnt);
239		seq_printf(filp, "port%d: Parser errors: %lld\n", lmac, stats.parser_err_cnt);
240
241		if (dir == MCS_RX && mcs->hw->mcs_blks > 1)
242			seq_printf(filp, "port%d: Preempt error: %lld\n", lmac,
243				   stats.preempt_err_cnt);
244		if (dir == MCS_TX)
245			seq_printf(filp, "port%d: Sectag insert error: %lld\n", lmac,
246				   stats.sectag_insert_err_cnt);
247	}
248	mutex_unlock(&mcs->stats_lock);
249	return 0;
250}
251
252static int rvu_dbg_mcs_rx_port_stats_display(struct seq_file *filp, void *unused)
253{
254	return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_RX);
255}
256
257RVU_DEBUG_SEQ_FOPS(mcs_rx_port_stats, mcs_rx_port_stats_display, NULL);
258
259static int rvu_dbg_mcs_tx_port_stats_display(struct seq_file *filp, void *unused)
260{
261	return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_TX);
262}
263
264RVU_DEBUG_SEQ_FOPS(mcs_tx_port_stats, mcs_tx_port_stats_display, NULL);
265
266static int rvu_dbg_mcs_sa_stats_display(struct seq_file *filp, void *unused, int dir)
267{
268	struct mcs *mcs = filp->private;
269	struct mcs_sa_stats stats;
270	struct rsrc_bmap *map;
271	int sa_id;
272
273	if (dir == MCS_TX) {
274		map = &mcs->tx.sa;
275		mutex_lock(&mcs->stats_lock);
276		for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) {
277			seq_puts(filp, "\n TX SA stats\n");
278			mcs_get_sa_stats(mcs, &stats, sa_id, MCS_TX);
279			seq_printf(filp, "sa%d: Pkts encrypted: %lld\n", sa_id,
280				   stats.pkt_encrypt_cnt);
281
282			seq_printf(filp, "sa%d: Pkts protected: %lld\n", sa_id,
283				   stats.pkt_protected_cnt);
284		}
285		mutex_unlock(&mcs->stats_lock);
286		return 0;
287	}
288
289	/* RX stats */
290	map = &mcs->rx.sa;
291	mutex_lock(&mcs->stats_lock);
292	for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) {
293		seq_puts(filp, "\n RX SA stats\n");
294		mcs_get_sa_stats(mcs, &stats, sa_id, MCS_RX);
295		seq_printf(filp, "sa%d: Invalid pkts: %lld\n", sa_id, stats.pkt_invalid_cnt);
296		seq_printf(filp, "sa%d: Pkts no sa error: %lld\n", sa_id, stats.pkt_nosaerror_cnt);
297		seq_printf(filp, "sa%d: Pkts not valid: %lld\n", sa_id, stats.pkt_notvalid_cnt);
298		seq_printf(filp, "sa%d: Pkts ok: %lld\n", sa_id, stats.pkt_ok_cnt);
299		seq_printf(filp, "sa%d: Pkts no sa: %lld\n", sa_id, stats.pkt_nosa_cnt);
300	}
301	mutex_unlock(&mcs->stats_lock);
302	return 0;
303}
304
305static int rvu_dbg_mcs_rx_sa_stats_display(struct seq_file *filp, void *unused)
306{
307	return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_RX);
308}
309
310RVU_DEBUG_SEQ_FOPS(mcs_rx_sa_stats, mcs_rx_sa_stats_display, NULL);
311
312static int rvu_dbg_mcs_tx_sa_stats_display(struct seq_file *filp, void *unused)
313{
314	return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_TX);
315}
316
317RVU_DEBUG_SEQ_FOPS(mcs_tx_sa_stats, mcs_tx_sa_stats_display, NULL);
318
319static int rvu_dbg_mcs_tx_sc_stats_display(struct seq_file *filp, void *unused)
320{
321	struct mcs *mcs = filp->private;
322	struct mcs_sc_stats stats;
323	struct rsrc_bmap *map;
324	int sc_id;
325
326	map = &mcs->tx.sc;
327	seq_puts(filp, "\n SC stats\n");
328
329	mutex_lock(&mcs->stats_lock);
330	for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) {
331		mcs_get_sc_stats(mcs, &stats, sc_id, MCS_TX);
332		seq_printf(filp, "\n=======sc%d======\n\n", sc_id);
333		seq_printf(filp, "sc%d: Pkts encrypted: %lld\n", sc_id, stats.pkt_encrypt_cnt);
334		seq_printf(filp, "sc%d: Pkts protected: %lld\n", sc_id, stats.pkt_protected_cnt);
335
336		if (mcs->hw->mcs_blks == 1) {
337			seq_printf(filp, "sc%d: Octets encrypted: %lld\n", sc_id,
338				   stats.octet_encrypt_cnt);
339			seq_printf(filp, "sc%d: Octets protected: %lld\n", sc_id,
340				   stats.octet_protected_cnt);
341		}
342	}
343	mutex_unlock(&mcs->stats_lock);
344	return 0;
345}
346
347RVU_DEBUG_SEQ_FOPS(mcs_tx_sc_stats, mcs_tx_sc_stats_display, NULL);
348
349static int rvu_dbg_mcs_rx_sc_stats_display(struct seq_file *filp, void *unused)
350{
351	struct mcs *mcs = filp->private;
352	struct mcs_sc_stats stats;
353	struct rsrc_bmap *map;
354	int sc_id;
355
356	map = &mcs->rx.sc;
357	seq_puts(filp, "\n SC stats\n");
358
359	mutex_lock(&mcs->stats_lock);
360	for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) {
361		mcs_get_sc_stats(mcs, &stats, sc_id, MCS_RX);
362		seq_printf(filp, "\n=======sc%d======\n\n", sc_id);
363		seq_printf(filp, "sc%d: Cam hits: %lld\n", sc_id, stats.hit_cnt);
364		seq_printf(filp, "sc%d: Invalid pkts: %lld\n", sc_id, stats.pkt_invalid_cnt);
365		seq_printf(filp, "sc%d: Late pkts: %lld\n", sc_id, stats.pkt_late_cnt);
366		seq_printf(filp, "sc%d: Notvalid pkts: %lld\n", sc_id, stats.pkt_notvalid_cnt);
367		seq_printf(filp, "sc%d: Unchecked pkts: %lld\n", sc_id, stats.pkt_unchecked_cnt);
368
369		if (mcs->hw->mcs_blks > 1) {
370			seq_printf(filp, "sc%d: Delay pkts: %lld\n", sc_id, stats.pkt_delay_cnt);
371			seq_printf(filp, "sc%d: Pkts ok: %lld\n", sc_id, stats.pkt_ok_cnt);
372		}
373		if (mcs->hw->mcs_blks == 1) {
374			seq_printf(filp, "sc%d: Octets decrypted: %lld\n", sc_id,
375				   stats.octet_decrypt_cnt);
376			seq_printf(filp, "sc%d: Octets validated: %lld\n", sc_id,
377				   stats.octet_validate_cnt);
378		}
379	}
380	mutex_unlock(&mcs->stats_lock);
381	return 0;
382}
383
384RVU_DEBUG_SEQ_FOPS(mcs_rx_sc_stats, mcs_rx_sc_stats_display, NULL);
385
386static int rvu_dbg_mcs_flowid_stats_display(struct seq_file *filp, void *unused, int dir)
387{
388	struct mcs *mcs = filp->private;
389	struct mcs_flowid_stats stats;
390	struct rsrc_bmap *map;
391	int flow_id;
392
393	seq_puts(filp, "\n Flowid stats\n");
394
395	if (dir == MCS_RX)
396		map = &mcs->rx.flow_ids;
397	else
398		map = &mcs->tx.flow_ids;
399
400	mutex_lock(&mcs->stats_lock);
401	for_each_set_bit(flow_id, map->bmap, mcs->hw->tcam_entries) {
402		mcs_get_flowid_stats(mcs, &stats, flow_id, dir);
403		seq_printf(filp, "Flowid%d: Hit:%lld\n", flow_id, stats.tcam_hit_cnt);
404	}
405	mutex_unlock(&mcs->stats_lock);
406	return 0;
407}
408
409static int rvu_dbg_mcs_tx_flowid_stats_display(struct seq_file *filp, void *unused)
410{
411	return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_TX);
412}
413
414RVU_DEBUG_SEQ_FOPS(mcs_tx_flowid_stats, mcs_tx_flowid_stats_display, NULL);
415
416static int rvu_dbg_mcs_rx_flowid_stats_display(struct seq_file *filp, void *unused)
417{
418	return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_RX);
419}
420
421RVU_DEBUG_SEQ_FOPS(mcs_rx_flowid_stats, mcs_rx_flowid_stats_display, NULL);
422
423static int rvu_dbg_mcs_tx_secy_stats_display(struct seq_file *filp, void *unused)
424{
425	struct mcs *mcs = filp->private;
426	struct mcs_secy_stats stats;
427	struct rsrc_bmap *map;
428	int secy_id;
429
430	map = &mcs->tx.secy;
431	seq_puts(filp, "\n MCS TX secy stats\n");
432
433	mutex_lock(&mcs->stats_lock);
434	for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) {
435		mcs_get_tx_secy_stats(mcs, &stats, secy_id);
436		seq_printf(filp, "\n=======Secy%d======\n\n", secy_id);
437		seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id,
438			   stats.ctl_pkt_bcast_cnt);
439		seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id,
440			   stats.ctl_pkt_mcast_cnt);
441		seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id,
442			   stats.ctl_pkt_ucast_cnt);
443		seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt);
444		seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id,
445			   stats.unctl_pkt_bcast_cnt);
446		seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id,
447			   stats.unctl_pkt_mcast_cnt);
448		seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id,
449			   stats.unctl_pkt_ucast_cnt);
450		seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt);
451		seq_printf(filp, "secy%d: Octet encrypted: %lld\n", secy_id,
452			   stats.octet_encrypted_cnt);
453		seq_printf(filp, "secy%d: octet protected: %lld\n", secy_id,
454			   stats.octet_protected_cnt);
455		seq_printf(filp, "secy%d: Pkts on active sa: %lld\n", secy_id,
456			   stats.pkt_noactivesa_cnt);
457		seq_printf(filp, "secy%d: Pkts too long: %lld\n", secy_id, stats.pkt_toolong_cnt);
458		seq_printf(filp, "secy%d: Pkts untagged: %lld\n", secy_id, stats.pkt_untagged_cnt);
459	}
460	mutex_unlock(&mcs->stats_lock);
461	return 0;
462}
463
464RVU_DEBUG_SEQ_FOPS(mcs_tx_secy_stats, mcs_tx_secy_stats_display, NULL);
465
466static int rvu_dbg_mcs_rx_secy_stats_display(struct seq_file *filp, void *unused)
467{
468	struct mcs *mcs = filp->private;
469	struct mcs_secy_stats stats;
470	struct rsrc_bmap *map;
471	int secy_id;
472
473	map = &mcs->rx.secy;
474	seq_puts(filp, "\n MCS secy stats\n");
475
476	mutex_lock(&mcs->stats_lock);
477	for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) {
478		mcs_get_rx_secy_stats(mcs, &stats, secy_id);
479		seq_printf(filp, "\n=======Secy%d======\n\n", secy_id);
480		seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id,
481			   stats.ctl_pkt_bcast_cnt);
482		seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id,
483			   stats.ctl_pkt_mcast_cnt);
484		seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id,
485			   stats.ctl_pkt_ucast_cnt);
486		seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt);
487		seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id,
488			   stats.unctl_pkt_bcast_cnt);
489		seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id,
490			   stats.unctl_pkt_mcast_cnt);
491		seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id,
492			   stats.unctl_pkt_ucast_cnt);
493		seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt);
494		seq_printf(filp, "secy%d: Octet decrypted: %lld\n", secy_id,
495			   stats.octet_decrypted_cnt);
496		seq_printf(filp, "secy%d: octet validated: %lld\n", secy_id,
497			   stats.octet_validated_cnt);
498		seq_printf(filp, "secy%d: Pkts on disable port: %lld\n", secy_id,
499			   stats.pkt_port_disabled_cnt);
500		seq_printf(filp, "secy%d: Pkts with badtag: %lld\n", secy_id, stats.pkt_badtag_cnt);
501		seq_printf(filp, "secy%d: Pkts with no SA(sectag.tci.c=0): %lld\n", secy_id,
502			   stats.pkt_nosa_cnt);
503		seq_printf(filp, "secy%d: Pkts with nosaerror: %lld\n", secy_id,
504			   stats.pkt_nosaerror_cnt);
505		seq_printf(filp, "secy%d: Tagged ctrl pkts: %lld\n", secy_id,
506			   stats.pkt_tagged_ctl_cnt);
507		seq_printf(filp, "secy%d: Untaged pkts: %lld\n", secy_id, stats.pkt_untaged_cnt);
508		seq_printf(filp, "secy%d: Ctrl pkts: %lld\n", secy_id, stats.pkt_ctl_cnt);
509		if (mcs->hw->mcs_blks > 1)
510			seq_printf(filp, "secy%d: pkts notag: %lld\n", secy_id,
511				   stats.pkt_notag_cnt);
512	}
513	mutex_unlock(&mcs->stats_lock);
514	return 0;
515}
516
517RVU_DEBUG_SEQ_FOPS(mcs_rx_secy_stats, mcs_rx_secy_stats_display, NULL);
518
519static void rvu_dbg_mcs_init(struct rvu *rvu)
520{
521	struct mcs *mcs;
522	char dname[10];
523	int i;
524
525	if (!rvu->mcs_blk_cnt)
526		return;
527
528	rvu->rvu_dbg.mcs_root = debugfs_create_dir("mcs", rvu->rvu_dbg.root);
529
530	for (i = 0; i < rvu->mcs_blk_cnt; i++) {
531		mcs = mcs_get_pdata(i);
532
533		sprintf(dname, "mcs%d", i);
534		rvu->rvu_dbg.mcs = debugfs_create_dir(dname,
535						      rvu->rvu_dbg.mcs_root);
536
537		rvu->rvu_dbg.mcs_rx = debugfs_create_dir("rx_stats", rvu->rvu_dbg.mcs);
538
539		debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_rx, mcs,
540				    &rvu_dbg_mcs_rx_flowid_stats_fops);
541
542		debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_rx, mcs,
543				    &rvu_dbg_mcs_rx_secy_stats_fops);
544
545		debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_rx, mcs,
546				    &rvu_dbg_mcs_rx_sc_stats_fops);
547
548		debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_rx, mcs,
549				    &rvu_dbg_mcs_rx_sa_stats_fops);
550
551		debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_rx, mcs,
552				    &rvu_dbg_mcs_rx_port_stats_fops);
553
554		rvu->rvu_dbg.mcs_tx = debugfs_create_dir("tx_stats", rvu->rvu_dbg.mcs);
555
556		debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_tx, mcs,
557				    &rvu_dbg_mcs_tx_flowid_stats_fops);
558
559		debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_tx, mcs,
560				    &rvu_dbg_mcs_tx_secy_stats_fops);
561
562		debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_tx, mcs,
563				    &rvu_dbg_mcs_tx_sc_stats_fops);
564
565		debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_tx, mcs,
566				    &rvu_dbg_mcs_tx_sa_stats_fops);
567
568		debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_tx, mcs,
569				    &rvu_dbg_mcs_tx_port_stats_fops);
570	}
571}
572
573#define LMT_MAPTBL_ENTRY_SIZE 16
574/* Dump LMTST map table */
575static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
576					       char __user *buffer,
577					       size_t count, loff_t *ppos)
578{
579	struct rvu *rvu = filp->private_data;
580	u64 lmt_addr, val, tbl_base;
581	int pf, vf, num_vfs, hw_vfs;
582	void __iomem *lmt_map_base;
583	int buf_size = 10240;
584	size_t off = 0;
585	int index = 0;
586	char *buf;
587	int ret;
588
589	/* don't allow partial reads */
590	if (*ppos != 0)
591		return 0;
592
593	buf = kzalloc(buf_size, GFP_KERNEL);
594	if (!buf)
595		return -ENOMEM;
596
597	tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
598
599	lmt_map_base = ioremap_wc(tbl_base, 128 * 1024);
600	if (!lmt_map_base) {
601		dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
602		kfree(buf);
603		return false;
604	}
605
606	off +=	scnprintf(&buf[off], buf_size - 1 - off,
607			  "\n\t\t\t\t\tLmtst Map Table Entries");
608	off +=	scnprintf(&buf[off], buf_size - 1 - off,
609			  "\n\t\t\t\t\t=======================");
610	off +=	scnprintf(&buf[off], buf_size - 1 - off, "\nPcifunc\t\t\t");
611	off +=	scnprintf(&buf[off], buf_size - 1 - off, "Table Index\t\t");
612	off +=	scnprintf(&buf[off], buf_size - 1 - off,
613			  "Lmtline Base (word 0)\t\t");
614	off +=	scnprintf(&buf[off], buf_size - 1 - off,
615			  "Lmt Map Entry (word 1)");
616	off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
617	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
618		off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d  \t\t\t",
619				    pf);
620
621		index = pf * rvu->hw->total_vfs * LMT_MAPTBL_ENTRY_SIZE;
622		off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t",
623				 (tbl_base + index));
624		lmt_addr = readq(lmt_map_base + index);
625		off += scnprintf(&buf[off], buf_size - 1 - off,
626				 " 0x%016llx\t\t", lmt_addr);
627		index += 8;
628		val = readq(lmt_map_base + index);
629		off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%016llx\n",
630				 val);
631		/* Reading num of VFs per PF */
632		rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs);
633		for (vf = 0; vf < num_vfs; vf++) {
634			index = (pf * rvu->hw->total_vfs * 16) +
635				((vf + 1)  * LMT_MAPTBL_ENTRY_SIZE);
636			off += scnprintf(&buf[off], buf_size - 1 - off,
637					    "PF%d:VF%d  \t\t", pf, vf);
638			off += scnprintf(&buf[off], buf_size - 1 - off,
639					 " 0x%llx\t\t", (tbl_base + index));
640			lmt_addr = readq(lmt_map_base + index);
641			off += scnprintf(&buf[off], buf_size - 1 - off,
642					 " 0x%016llx\t\t", lmt_addr);
643			index += 8;
644			val = readq(lmt_map_base + index);
645			off += scnprintf(&buf[off], buf_size - 1 - off,
646					 " 0x%016llx\n", val);
647		}
648	}
649	off +=	scnprintf(&buf[off], buf_size - 1 - off, "\n");
650
651	ret = min(off, count);
652	if (copy_to_user(buffer, buf, ret))
653		ret = -EFAULT;
654	kfree(buf);
655
656	iounmap(lmt_map_base);
657	if (ret < 0)
658		return ret;
659
660	*ppos = ret;
661	return ret;
662}
663
664RVU_DEBUG_FOPS(lmtst_map_table, lmtst_map_table_display, NULL);
665
666static void get_lf_str_list(struct rvu_block block, int pcifunc,
667			    char *lfs)
668{
669	int lf = 0, seq = 0, len = 0, prev_lf = block.lf.max;
670
671	for_each_set_bit(lf, block.lf.bmap, block.lf.max) {
672		if (lf >= block.lf.max)
673			break;
674
675		if (block.fn_map[lf] != pcifunc)
676			continue;
677
678		if (lf == prev_lf + 1) {
679			prev_lf = lf;
680			seq = 1;
681			continue;
682		}
683
684		if (seq)
685			len += sprintf(lfs + len, "-%d,%d", prev_lf, lf);
686		else
687			len += (len ? sprintf(lfs + len, ",%d", lf) :
688				      sprintf(lfs + len, "%d", lf));
689
690		prev_lf = lf;
691		seq = 0;
692	}
693
694	if (seq)
695		len += sprintf(lfs + len, "-%d", prev_lf);
696
697	lfs[len] = '\0';
698}
699
700static int get_max_column_width(struct rvu *rvu)
701{
702	int index, pf, vf, lf_str_size = 12, buf_size = 256;
703	struct rvu_block block;
704	u16 pcifunc;
705	char *buf;
706
707	buf = kzalloc(buf_size, GFP_KERNEL);
708	if (!buf)
709		return -ENOMEM;
710
711	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
712		for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
713			pcifunc = pf << 10 | vf;
714			if (!pcifunc)
715				continue;
716
717			for (index = 0; index < BLK_COUNT; index++) {
718				block = rvu->hw->block[index];
719				if (!strlen(block.name))
720					continue;
721
722				get_lf_str_list(block, pcifunc, buf);
723				if (lf_str_size <= strlen(buf))
724					lf_str_size = strlen(buf) + 1;
725			}
726		}
727	}
728
729	kfree(buf);
730	return lf_str_size;
731}
732
733/* Dumps current provisioning status of all RVU block LFs */
734static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
735					  char __user *buffer,
736					  size_t count, loff_t *ppos)
737{
738	int index, off = 0, flag = 0, len = 0, i = 0;
739	struct rvu *rvu = filp->private_data;
740	int bytes_not_copied = 0;
741	struct rvu_block block;
742	int pf, vf, pcifunc;
743	int buf_size = 2048;
744	int lf_str_size;
745	char *lfs;
746	char *buf;
747
748	/* don't allow partial reads */
749	if (*ppos != 0)
750		return 0;
751
752	buf = kzalloc(buf_size, GFP_KERNEL);
753	if (!buf)
754		return -ENOMEM;
755
756	/* Get the maximum width of a column */
757	lf_str_size = get_max_column_width(rvu);
758
759	lfs = kzalloc(lf_str_size, GFP_KERNEL);
760	if (!lfs) {
761		kfree(buf);
762		return -ENOMEM;
763	}
764	off +=	scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size,
765			  "pcifunc");
766	for (index = 0; index < BLK_COUNT; index++)
767		if (strlen(rvu->hw->block[index].name)) {
768			off += scnprintf(&buf[off], buf_size - 1 - off,
769					 "%-*s", lf_str_size,
770					 rvu->hw->block[index].name);
771		}
772
773	off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
774	bytes_not_copied = copy_to_user(buffer + (i * off), buf, off);
775	if (bytes_not_copied)
776		goto out;
777
778	i++;
779	*ppos += off;
780	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
781		for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
782			off = 0;
783			flag = 0;
784			pcifunc = pf << 10 | vf;
785			if (!pcifunc)
786				continue;
787
788			if (vf) {
789				sprintf(lfs, "PF%d:VF%d", pf, vf - 1);
790				off = scnprintf(&buf[off],
791						buf_size - 1 - off,
792						"%-*s", lf_str_size, lfs);
793			} else {
794				sprintf(lfs, "PF%d", pf);
795				off = scnprintf(&buf[off],
796						buf_size - 1 - off,
797						"%-*s", lf_str_size, lfs);
798			}
799
800			for (index = 0; index < BLK_COUNT; index++) {
801				block = rvu->hw->block[index];
802				if (!strlen(block.name))
803					continue;
804				len = 0;
805				lfs[len] = '\0';
806				get_lf_str_list(block, pcifunc, lfs);
807				if (strlen(lfs))
808					flag = 1;
809
810				off += scnprintf(&buf[off], buf_size - 1 - off,
811						 "%-*s", lf_str_size, lfs);
812			}
813			if (flag) {
814				off +=	scnprintf(&buf[off],
815						  buf_size - 1 - off, "\n");
816				bytes_not_copied = copy_to_user(buffer +
817								(i * off),
818								buf, off);
819				if (bytes_not_copied)
820					goto out;
821
822				i++;
823				*ppos += off;
824			}
825		}
826	}
827
828out:
829	kfree(lfs);
830	kfree(buf);
831	if (bytes_not_copied)
832		return -EFAULT;
833
834	return *ppos;
835}
836
837RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
838
839static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
840{
841	struct rvu *rvu = filp->private;
842	struct pci_dev *pdev = NULL;
843	struct mac_ops *mac_ops;
844	char cgx[10], lmac[10];
845	struct rvu_pfvf *pfvf;
846	int pf, domain, blkid;
847	u8 cgx_id, lmac_id;
848	u16 pcifunc;
849
850	domain = 2;
851	mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
852	/* There can be no CGX devices at all */
853	if (!mac_ops)
854		return 0;
855	seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\n",
856		   mac_ops->name);
857	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
858		if (!is_pf_cgxmapped(rvu, pf))
859			continue;
860
861		pdev =  pci_get_domain_bus_and_slot(domain, pf + 1, 0);
862		if (!pdev)
863			continue;
864
865		cgx[0] = 0;
866		lmac[0] = 0;
867		pcifunc = pf << 10;
868		pfvf = rvu_get_pfvf(rvu, pcifunc);
869
870		if (pfvf->nix_blkaddr == BLKADDR_NIX0)
871			blkid = 0;
872		else
873			blkid = 1;
874
875		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id,
876				    &lmac_id);
877		sprintf(cgx, "%s%d", mac_ops->name, cgx_id);
878		sprintf(lmac, "LMAC%d", lmac_id);
879		seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n",
880			   dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac);
881
882		pci_dev_put(pdev);
883	}
884	return 0;
885}
886
887RVU_DEBUG_SEQ_FOPS(rvu_pf_cgx_map, rvu_pf_cgx_map_display, NULL);
888
889static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blkaddr, int lf,
890				u16 *pcifunc)
891{
892	struct rvu_block *block;
893	struct rvu_hwinfo *hw;
894
895	hw = rvu->hw;
896	block = &hw->block[blkaddr];
897
898	if (lf < 0 || lf >= block->lf.max) {
899		dev_warn(rvu->dev, "Invalid LF: valid range: 0-%d\n",
900			 block->lf.max - 1);
901		return false;
902	}
903
904	*pcifunc = block->fn_map[lf];
905	if (!*pcifunc) {
906		dev_warn(rvu->dev,
907			 "This LF is not attached to any RVU PFFUNC\n");
908		return false;
909	}
910	return true;
911}
912
913static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf)
914{
915	char *buf;
916
917	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
918	if (!buf)
919		return;
920
921	if (!pfvf->aura_ctx) {
922		seq_puts(m, "Aura context is not initialized\n");
923	} else {
924		bitmap_print_to_pagebuf(false, buf, pfvf->aura_bmap,
925					pfvf->aura_ctx->qsize);
926		seq_printf(m, "Aura count : %d\n", pfvf->aura_ctx->qsize);
927		seq_printf(m, "Aura context ena/dis bitmap : %s\n", buf);
928	}
929
930	if (!pfvf->pool_ctx) {
931		seq_puts(m, "Pool context is not initialized\n");
932	} else {
933		bitmap_print_to_pagebuf(false, buf, pfvf->pool_bmap,
934					pfvf->pool_ctx->qsize);
935		seq_printf(m, "Pool count : %d\n", pfvf->pool_ctx->qsize);
936		seq_printf(m, "Pool context ena/dis bitmap : %s\n", buf);
937	}
938	kfree(buf);
939}
940
941/* The 'qsize' entry dumps current Aura/Pool context Qsize
942 * and each context's current enable/disable status in a bitmap.
943 */
944static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
945				 int blktype)
946{
947	void (*print_qsize)(struct seq_file *filp,
948			    struct rvu_pfvf *pfvf) = NULL;
949	struct dentry *current_dir;
950	struct rvu_pfvf *pfvf;
951	struct rvu *rvu;
952	int qsize_id;
953	u16 pcifunc;
954	int blkaddr;
955
956	rvu = filp->private;
957	switch (blktype) {
958	case BLKTYPE_NPA:
959		qsize_id = rvu->rvu_dbg.npa_qsize_id;
960		print_qsize = print_npa_qsize;
961		break;
962
963	case BLKTYPE_NIX:
964		qsize_id = rvu->rvu_dbg.nix_qsize_id;
965		print_qsize = print_nix_qsize;
966		break;
967
968	default:
969		return -EINVAL;
970	}
971
972	if (blktype == BLKTYPE_NPA) {
973		blkaddr = BLKADDR_NPA;
974	} else {
975		current_dir = filp->file->f_path.dentry->d_parent;
976		blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
977				   BLKADDR_NIX1 : BLKADDR_NIX0);
978	}
979
980	if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc))
981		return -EINVAL;
982
983	pfvf = rvu_get_pfvf(rvu, pcifunc);
984	print_qsize(filp, pfvf);
985
986	return 0;
987}
988
989static ssize_t rvu_dbg_qsize_write(struct file *filp,
990				   const char __user *buffer, size_t count,
991				   loff_t *ppos, int blktype)
992{
993	char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix";
994	struct seq_file *seqfile = filp->private_data;
995	char *cmd_buf, *cmd_buf_tmp, *subtoken;
996	struct rvu *rvu = seqfile->private;
997	struct dentry *current_dir;
998	int blkaddr;
999	u16 pcifunc;
1000	int ret, lf;
1001
1002	cmd_buf = memdup_user(buffer, count + 1);
1003	if (IS_ERR(cmd_buf))
1004		return -ENOMEM;
1005
1006	cmd_buf[count] = '\0';
1007
1008	cmd_buf_tmp = strchr(cmd_buf, '\n');
1009	if (cmd_buf_tmp) {
1010		*cmd_buf_tmp = '\0';
1011		count = cmd_buf_tmp - cmd_buf + 1;
1012	}
1013
1014	cmd_buf_tmp = cmd_buf;
1015	subtoken = strsep(&cmd_buf, " ");
1016	ret = subtoken ? kstrtoint(subtoken, 10, &lf) : -EINVAL;
1017	if (cmd_buf)
1018		ret = -EINVAL;
1019
1020	if (ret < 0 || !strncmp(subtoken, "help", 4)) {
1021		dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
1022		goto qsize_write_done;
1023	}
1024
1025	if (blktype == BLKTYPE_NPA) {
1026		blkaddr = BLKADDR_NPA;
1027	} else {
1028		current_dir = filp->f_path.dentry->d_parent;
1029		blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
1030				   BLKADDR_NIX1 : BLKADDR_NIX0);
1031	}
1032
1033	if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) {
1034		ret = -EINVAL;
1035		goto qsize_write_done;
1036	}
1037	if (blktype  == BLKTYPE_NPA)
1038		rvu->rvu_dbg.npa_qsize_id = lf;
1039	else
1040		rvu->rvu_dbg.nix_qsize_id = lf;
1041
1042qsize_write_done:
1043	kfree(cmd_buf_tmp);
1044	return ret ? ret : count;
1045}
1046
1047static ssize_t rvu_dbg_npa_qsize_write(struct file *filp,
1048				       const char __user *buffer,
1049				       size_t count, loff_t *ppos)
1050{
1051	return rvu_dbg_qsize_write(filp, buffer, count, ppos,
1052					    BLKTYPE_NPA);
1053}
1054
1055static int rvu_dbg_npa_qsize_display(struct seq_file *filp, void *unused)
1056{
1057	return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NPA);
1058}
1059
1060RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write);
1061
1062/* Dumps given NPA Aura's context */
1063static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
1064{
1065	struct npa_aura_s *aura = &rsp->aura;
1066	struct rvu *rvu = m->private;
1067
1068	seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
1069
1070	seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n",
1071		   aura->ena, aura->pool_caching);
1072	seq_printf(m, "W1: pool way mask\t%d\nW1: avg con\t\t%d\n",
1073		   aura->pool_way_mask, aura->avg_con);
1074	seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n",
1075		   aura->pool_drop_ena, aura->aura_drop_ena);
1076	seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n",
1077		   aura->bp_ena, aura->aura_drop);
1078	seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n",
1079		   aura->shift, aura->avg_level);
1080
1081	seq_printf(m, "W2: count\t\t%llu\nW2: nix0_bpid\t\t%d\nW2: nix1_bpid\t\t%d\n",
1082		   (u64)aura->count, aura->nix0_bpid, aura->nix1_bpid);
1083
1084	seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
1085		   (u64)aura->limit, aura->bp, aura->fc_ena);
1086
1087	if (!is_rvu_otx2(rvu))
1088		seq_printf(m, "W3: fc_be\t\t%d\n", aura->fc_be);
1089	seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n",
1090		   aura->fc_up_crossing, aura->fc_stype);
1091	seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits);
1092
1093	seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr);
1094
1095	seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n",
1096		   aura->pool_drop, aura->update_time);
1097	seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n",
1098		   aura->err_int, aura->err_int_ena);
1099	seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n",
1100		   aura->thresh_int, aura->thresh_int_ena);
1101	seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n",
1102		   aura->thresh_up, aura->thresh_qint_idx);
1103	seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx);
1104
1105	seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh);
1106	if (!is_rvu_otx2(rvu))
1107		seq_printf(m, "W6: fc_msh_dst\t\t%d\n", aura->fc_msh_dst);
1108}
1109
1110/* Dumps given NPA Pool's context */
1111static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
1112{
1113	struct npa_pool_s *pool = &rsp->pool;
1114	struct rvu *rvu = m->private;
1115
1116	seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
1117
1118	seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n",
1119		   pool->ena, pool->nat_align);
1120	seq_printf(m, "W1: stack_caching\t%d\nW1: stack_way_mask\t%d\n",
1121		   pool->stack_caching, pool->stack_way_mask);
1122	seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n",
1123		   pool->buf_offset, pool->buf_size);
1124
1125	seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n",
1126		   pool->stack_max_pages, pool->stack_pages);
1127
1128	seq_printf(m, "W3: op_pc \t\t%llu\n", (u64)pool->op_pc);
1129
1130	seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n",
1131		   pool->stack_offset, pool->shift, pool->avg_level);
1132	seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n",
1133		   pool->avg_con, pool->fc_ena, pool->fc_stype);
1134	seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n",
1135		   pool->fc_hyst_bits, pool->fc_up_crossing);
1136	if (!is_rvu_otx2(rvu))
1137		seq_printf(m, "W4: fc_be\t\t%d\n", pool->fc_be);
1138	seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time);
1139
1140	seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr);
1141
1142	seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start);
1143
1144	seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end);
1145
1146	seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n",
1147		   pool->err_int, pool->err_int_ena);
1148	seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int);
1149	seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n",
1150		   pool->thresh_int_ena, pool->thresh_up);
1151	seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d\n",
1152		   pool->thresh_qint_idx, pool->err_qint_idx);
1153	if (!is_rvu_otx2(rvu))
1154		seq_printf(m, "W8: fc_msh_dst\t\t%d\n", pool->fc_msh_dst);
1155}
1156
1157/* Reads aura/pool's ctx from admin queue */
1158static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
1159{
1160	void (*print_npa_ctx)(struct seq_file *m, struct npa_aq_enq_rsp *rsp);
1161	struct npa_aq_enq_req aq_req;
1162	struct npa_aq_enq_rsp rsp;
1163	struct rvu_pfvf *pfvf;
1164	int aura, rc, max_id;
1165	int npalf, id, all;
1166	struct rvu *rvu;
1167	u16 pcifunc;
1168
1169	rvu = m->private;
1170
1171	switch (ctype) {
1172	case NPA_AQ_CTYPE_AURA:
1173		npalf = rvu->rvu_dbg.npa_aura_ctx.lf;
1174		id = rvu->rvu_dbg.npa_aura_ctx.id;
1175		all = rvu->rvu_dbg.npa_aura_ctx.all;
1176		break;
1177
1178	case NPA_AQ_CTYPE_POOL:
1179		npalf = rvu->rvu_dbg.npa_pool_ctx.lf;
1180		id = rvu->rvu_dbg.npa_pool_ctx.id;
1181		all = rvu->rvu_dbg.npa_pool_ctx.all;
1182		break;
1183	default:
1184		return -EINVAL;
1185	}
1186
1187	if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
1188		return -EINVAL;
1189
1190	pfvf = rvu_get_pfvf(rvu, pcifunc);
1191	if (ctype == NPA_AQ_CTYPE_AURA && !pfvf->aura_ctx) {
1192		seq_puts(m, "Aura context is not initialized\n");
1193		return -EINVAL;
1194	} else if (ctype == NPA_AQ_CTYPE_POOL && !pfvf->pool_ctx) {
1195		seq_puts(m, "Pool context is not initialized\n");
1196		return -EINVAL;
1197	}
1198
1199	memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
1200	aq_req.hdr.pcifunc = pcifunc;
1201	aq_req.ctype = ctype;
1202	aq_req.op = NPA_AQ_INSTOP_READ;
1203	if (ctype == NPA_AQ_CTYPE_AURA) {
1204		max_id = pfvf->aura_ctx->qsize;
1205		print_npa_ctx = print_npa_aura_ctx;
1206	} else {
1207		max_id = pfvf->pool_ctx->qsize;
1208		print_npa_ctx = print_npa_pool_ctx;
1209	}
1210
1211	if (id < 0 || id >= max_id) {
1212		seq_printf(m, "Invalid %s, valid range is 0-%d\n",
1213			   (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
1214			max_id - 1);
1215		return -EINVAL;
1216	}
1217
1218	if (all)
1219		id = 0;
1220	else
1221		max_id = id + 1;
1222
1223	for (aura = id; aura < max_id; aura++) {
1224		aq_req.aura_id = aura;
1225
1226		/* Skip if queue is uninitialized */
1227		if (ctype == NPA_AQ_CTYPE_POOL && !test_bit(aura, pfvf->pool_bmap))
1228			continue;
1229
1230		seq_printf(m, "======%s : %d=======\n",
1231			   (ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL",
1232			aq_req.aura_id);
1233		rc = rvu_npa_aq_enq_inst(rvu, &aq_req, &rsp);
1234		if (rc) {
1235			seq_puts(m, "Failed to read context\n");
1236			return -EINVAL;
1237		}
1238		print_npa_ctx(m, &rsp);
1239	}
1240	return 0;
1241}
1242
1243static int write_npa_ctx(struct rvu *rvu, bool all,
1244			 int npalf, int id, int ctype)
1245{
1246	struct rvu_pfvf *pfvf;
1247	int max_id = 0;
1248	u16 pcifunc;
1249
1250	if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
1251		return -EINVAL;
1252
1253	pfvf = rvu_get_pfvf(rvu, pcifunc);
1254
1255	if (ctype == NPA_AQ_CTYPE_AURA) {
1256		if (!pfvf->aura_ctx) {
1257			dev_warn(rvu->dev, "Aura context is not initialized\n");
1258			return -EINVAL;
1259		}
1260		max_id = pfvf->aura_ctx->qsize;
1261	} else if (ctype == NPA_AQ_CTYPE_POOL) {
1262		if (!pfvf->pool_ctx) {
1263			dev_warn(rvu->dev, "Pool context is not initialized\n");
1264			return -EINVAL;
1265		}
1266		max_id = pfvf->pool_ctx->qsize;
1267	}
1268
1269	if (id < 0 || id >= max_id) {
1270		dev_warn(rvu->dev, "Invalid %s, valid range is 0-%d\n",
1271			 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
1272			max_id - 1);
1273		return -EINVAL;
1274	}
1275
1276	switch (ctype) {
1277	case NPA_AQ_CTYPE_AURA:
1278		rvu->rvu_dbg.npa_aura_ctx.lf = npalf;
1279		rvu->rvu_dbg.npa_aura_ctx.id = id;
1280		rvu->rvu_dbg.npa_aura_ctx.all = all;
1281		break;
1282
1283	case NPA_AQ_CTYPE_POOL:
1284		rvu->rvu_dbg.npa_pool_ctx.lf = npalf;
1285		rvu->rvu_dbg.npa_pool_ctx.id = id;
1286		rvu->rvu_dbg.npa_pool_ctx.all = all;
1287		break;
1288	default:
1289		return -EINVAL;
1290	}
1291	return 0;
1292}
1293
1294static int parse_cmd_buffer_ctx(char *cmd_buf, size_t *count,
1295				const char __user *buffer, int *npalf,
1296				int *id, bool *all)
1297{
1298	int bytes_not_copied;
1299	char *cmd_buf_tmp;
1300	char *subtoken;
1301	int ret;
1302
1303	bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
1304	if (bytes_not_copied)
1305		return -EFAULT;
1306
1307	cmd_buf[*count] = '\0';
1308	cmd_buf_tmp = strchr(cmd_buf, '\n');
1309
1310	if (cmd_buf_tmp) {
1311		*cmd_buf_tmp = '\0';
1312		*count = cmd_buf_tmp - cmd_buf + 1;
1313	}
1314
1315	subtoken = strsep(&cmd_buf, " ");
1316	ret = subtoken ? kstrtoint(subtoken, 10, npalf) : -EINVAL;
1317	if (ret < 0)
1318		return ret;
1319	subtoken = strsep(&cmd_buf, " ");
1320	if (subtoken && strcmp(subtoken, "all") == 0) {
1321		*all = true;
1322	} else {
1323		ret = subtoken ? kstrtoint(subtoken, 10, id) : -EINVAL;
1324		if (ret < 0)
1325			return ret;
1326	}
1327	if (cmd_buf)
1328		return -EINVAL;
1329	return ret;
1330}
1331
1332static ssize_t rvu_dbg_npa_ctx_write(struct file *filp,
1333				     const char __user *buffer,
1334				     size_t count, loff_t *ppos, int ctype)
1335{
1336	char *cmd_buf, *ctype_string = (ctype == NPA_AQ_CTYPE_AURA) ?
1337					"aura" : "pool";
1338	struct seq_file *seqfp = filp->private_data;
1339	struct rvu *rvu = seqfp->private;
1340	int npalf, id = 0, ret;
1341	bool all = false;
1342
1343	if ((*ppos != 0) || !count)
1344		return -EINVAL;
1345
1346	cmd_buf = kzalloc(count + 1, GFP_KERNEL);
1347	if (!cmd_buf)
1348		return count;
1349	ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
1350				   &npalf, &id, &all);
1351	if (ret < 0) {
1352		dev_info(rvu->dev,
1353			 "Usage: echo <npalf> [%s number/all] > %s_ctx\n",
1354			 ctype_string, ctype_string);
1355		goto done;
1356	} else {
1357		ret = write_npa_ctx(rvu, all, npalf, id, ctype);
1358	}
1359done:
1360	kfree(cmd_buf);
1361	return ret ? ret : count;
1362}
1363
1364static ssize_t rvu_dbg_npa_aura_ctx_write(struct file *filp,
1365					  const char __user *buffer,
1366					  size_t count, loff_t *ppos)
1367{
1368	return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
1369				     NPA_AQ_CTYPE_AURA);
1370}
1371
1372static int rvu_dbg_npa_aura_ctx_display(struct seq_file *filp, void *unused)
1373{
1374	return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_AURA);
1375}
1376
1377RVU_DEBUG_SEQ_FOPS(npa_aura_ctx, npa_aura_ctx_display, npa_aura_ctx_write);
1378
1379static ssize_t rvu_dbg_npa_pool_ctx_write(struct file *filp,
1380					  const char __user *buffer,
1381					  size_t count, loff_t *ppos)
1382{
1383	return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
1384				     NPA_AQ_CTYPE_POOL);
1385}
1386
1387static int rvu_dbg_npa_pool_ctx_display(struct seq_file *filp, void *unused)
1388{
1389	return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_POOL);
1390}
1391
1392RVU_DEBUG_SEQ_FOPS(npa_pool_ctx, npa_pool_ctx_display, npa_pool_ctx_write);
1393
1394static void ndc_cache_stats(struct seq_file *s, int blk_addr,
1395			    int ctype, int transaction)
1396{
1397	u64 req, out_req, lat, cant_alloc;
1398	struct nix_hw *nix_hw;
1399	struct rvu *rvu;
1400	int port;
1401
1402	if (blk_addr == BLKADDR_NDC_NPA0) {
1403		rvu = s->private;
1404	} else {
1405		nix_hw = s->private;
1406		rvu = nix_hw->rvu;
1407	}
1408
1409	for (port = 0; port < NDC_MAX_PORT; port++) {
1410		req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC
1411						(port, ctype, transaction));
1412		lat = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_LAT_PC
1413						(port, ctype, transaction));
1414		out_req = rvu_read64(rvu, blk_addr,
1415				     NDC_AF_PORTX_RTX_RWX_OSTDN_PC
1416				     (port, ctype, transaction));
1417		cant_alloc = rvu_read64(rvu, blk_addr,
1418					NDC_AF_PORTX_RTX_CANT_ALLOC_PC
1419					(port, transaction));
1420		seq_printf(s, "\nPort:%d\n", port);
1421		seq_printf(s, "\tTotal Requests:\t\t%lld\n", req);
1422		seq_printf(s, "\tTotal Time Taken:\t%lld cycles\n", lat);
1423		seq_printf(s, "\tAvg Latency:\t\t%lld cycles\n", lat / req);
1424		seq_printf(s, "\tOutstanding Requests:\t%lld\n", out_req);
1425		seq_printf(s, "\tCant Alloc Requests:\t%lld\n", cant_alloc);
1426	}
1427}
1428
1429static int ndc_blk_cache_stats(struct seq_file *s, int idx, int blk_addr)
1430{
1431	seq_puts(s, "\n***** CACHE mode read stats *****\n");
1432	ndc_cache_stats(s, blk_addr, CACHING, NDC_READ_TRANS);
1433	seq_puts(s, "\n***** CACHE mode write stats *****\n");
1434	ndc_cache_stats(s, blk_addr, CACHING, NDC_WRITE_TRANS);
1435	seq_puts(s, "\n***** BY-PASS mode read stats *****\n");
1436	ndc_cache_stats(s, blk_addr, BYPASS, NDC_READ_TRANS);
1437	seq_puts(s, "\n***** BY-PASS mode write stats *****\n");
1438	ndc_cache_stats(s, blk_addr, BYPASS, NDC_WRITE_TRANS);
1439	return 0;
1440}
1441
1442static int rvu_dbg_npa_ndc_cache_display(struct seq_file *filp, void *unused)
1443{
1444	return ndc_blk_cache_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
1445}
1446
1447RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL);
1448
1449static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
1450{
1451	struct nix_hw *nix_hw;
1452	struct rvu *rvu;
1453	int bank, max_bank;
1454	u64 ndc_af_const;
1455
1456	if (blk_addr == BLKADDR_NDC_NPA0) {
1457		rvu = s->private;
1458	} else {
1459		nix_hw = s->private;
1460		rvu = nix_hw->rvu;
1461	}
1462
1463	ndc_af_const = rvu_read64(rvu, blk_addr, NDC_AF_CONST);
1464	max_bank = FIELD_GET(NDC_AF_BANK_MASK, ndc_af_const);
1465	for (bank = 0; bank < max_bank; bank++) {
1466		seq_printf(s, "BANK:%d\n", bank);
1467		seq_printf(s, "\tHits:\t%lld\n",
1468			   (u64)rvu_read64(rvu, blk_addr,
1469			   NDC_AF_BANKX_HIT_PC(bank)));
1470		seq_printf(s, "\tMiss:\t%lld\n",
1471			   (u64)rvu_read64(rvu, blk_addr,
1472			    NDC_AF_BANKX_MISS_PC(bank)));
1473	}
1474	return 0;
1475}
1476
1477static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused)
1478{
1479	struct nix_hw *nix_hw = filp->private;
1480	int blkaddr = 0;
1481	int ndc_idx = 0;
1482
1483	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1484		   BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1485	ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_RX : NIX0_RX);
1486
1487	return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
1488}
1489
1490RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL);
1491
1492static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused)
1493{
1494	struct nix_hw *nix_hw = filp->private;
1495	int blkaddr = 0;
1496	int ndc_idx = 0;
1497
1498	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1499		   BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1500	ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_TX : NIX0_TX);
1501
1502	return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
1503}
1504
1505RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL);
1506
1507static int rvu_dbg_npa_ndc_hits_miss_display(struct seq_file *filp,
1508					     void *unused)
1509{
1510	return ndc_blk_hits_miss_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
1511}
1512
1513RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL);
1514
1515static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp,
1516						void *unused)
1517{
1518	struct nix_hw *nix_hw = filp->private;
1519	int ndc_idx = NPA0_U;
1520	int blkaddr = 0;
1521
1522	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1523		   BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1524
1525	return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1526}
1527
1528RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL);
1529
1530static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp,
1531						void *unused)
1532{
1533	struct nix_hw *nix_hw = filp->private;
1534	int ndc_idx = NPA0_U;
1535	int blkaddr = 0;
1536
1537	blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1538		   BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1539
1540	return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1541}
1542
1543RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL);
1544
1545static void print_nix_cn10k_sq_ctx(struct seq_file *m,
1546				   struct nix_cn10k_sq_ctx_s *sq_ctx)
1547{
1548	seq_printf(m, "W0: ena \t\t\t%d\nW0: qint_idx \t\t\t%d\n",
1549		   sq_ctx->ena, sq_ctx->qint_idx);
1550	seq_printf(m, "W0: substream \t\t\t0x%03x\nW0: sdp_mcast \t\t\t%d\n",
1551		   sq_ctx->substream, sq_ctx->sdp_mcast);
1552	seq_printf(m, "W0: cq \t\t\t\t%d\nW0: sqe_way_mask \t\t%d\n\n",
1553		   sq_ctx->cq, sq_ctx->sqe_way_mask);
1554
1555	seq_printf(m, "W1: smq \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: xoff\t\t\t%d\n",
1556		   sq_ctx->smq, sq_ctx->cq_ena, sq_ctx->xoff);
1557	seq_printf(m, "W1: sso_ena \t\t\t%d\nW1: smq_rr_weight\t\t%d\n",
1558		   sq_ctx->sso_ena, sq_ctx->smq_rr_weight);
1559	seq_printf(m, "W1: default_chan\t\t%d\nW1: sqb_count\t\t\t%d\n\n",
1560		   sq_ctx->default_chan, sq_ctx->sqb_count);
1561
1562	seq_printf(m, "W2: smq_rr_count_lb \t\t%d\n", sq_ctx->smq_rr_count_lb);
1563	seq_printf(m, "W2: smq_rr_count_ub \t\t%d\n", sq_ctx->smq_rr_count_ub);
1564	seq_printf(m, "W2: sqb_aura \t\t\t%d\nW2: sq_int \t\t\t%d\n",
1565		   sq_ctx->sqb_aura, sq_ctx->sq_int);
1566	seq_printf(m, "W2: sq_int_ena \t\t\t%d\nW2: sqe_stype \t\t\t%d\n",
1567		   sq_ctx->sq_int_ena, sq_ctx->sqe_stype);
1568
1569	seq_printf(m, "W3: max_sqe_size\t\t%d\nW3: cq_limit\t\t\t%d\n",
1570		   sq_ctx->max_sqe_size, sq_ctx->cq_limit);
1571	seq_printf(m, "W3: lmt_dis \t\t\t%d\nW3: mnq_dis \t\t\t%d\n",
1572		   sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1573	seq_printf(m, "W3: smq_next_sq\t\t\t%d\nW3: smq_lso_segnum\t\t%d\n",
1574		   sq_ctx->smq_next_sq, sq_ctx->smq_lso_segnum);
1575	seq_printf(m, "W3: tail_offset \t\t%d\nW3: smenq_offset\t\t%d\n",
1576		   sq_ctx->tail_offset, sq_ctx->smenq_offset);
1577	seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n",
1578		   sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld);
1579
1580	seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
1581		   sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
1582	seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1583	seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1584	seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1585	seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1586		   sq_ctx->smenq_next_sqb);
1587
1588	seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1589
1590	seq_printf(m, "W9: vfi_lso_total\t\t%d\n", sq_ctx->vfi_lso_total);
1591	seq_printf(m, "W9: vfi_lso_sizem1\t\t%d\nW9: vfi_lso_sb\t\t\t%d\n",
1592		   sq_ctx->vfi_lso_sizem1, sq_ctx->vfi_lso_sb);
1593	seq_printf(m, "W9: vfi_lso_mps\t\t\t%d\nW9: vfi_lso_vlan0_ins_ena\t%d\n",
1594		   sq_ctx->vfi_lso_mps, sq_ctx->vfi_lso_vlan0_ins_ena);
1595	seq_printf(m, "W9: vfi_lso_vlan1_ins_ena\t%d\nW9: vfi_lso_vld \t\t%d\n\n",
1596		   sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1597
1598	seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1599		   (u64)sq_ctx->scm_lso_rem);
1600	seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1601	seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1602	seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1603		   (u64)sq_ctx->dropped_octs);
1604	seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1605		   (u64)sq_ctx->dropped_pkts);
1606}
1607
1608/* Dumps given nix_sq's context */
1609static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1610{
1611	struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
1612	struct nix_hw *nix_hw = m->private;
1613	struct rvu *rvu = nix_hw->rvu;
1614
1615	if (!is_rvu_otx2(rvu)) {
1616		print_nix_cn10k_sq_ctx(m, (struct nix_cn10k_sq_ctx_s *)sq_ctx);
1617		return;
1618	}
1619	seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n",
1620		   sq_ctx->sqe_way_mask, sq_ctx->cq);
1621	seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
1622		   sq_ctx->sdp_mcast, sq_ctx->substream);
1623	seq_printf(m, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n\n",
1624		   sq_ctx->qint_idx, sq_ctx->ena);
1625
1626	seq_printf(m, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d\n",
1627		   sq_ctx->sqb_count, sq_ctx->default_chan);
1628	seq_printf(m, "W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d\n",
1629		   sq_ctx->smq_rr_quantum, sq_ctx->sso_ena);
1630	seq_printf(m, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n\n",
1631		   sq_ctx->xoff, sq_ctx->cq_ena, sq_ctx->smq);
1632
1633	seq_printf(m, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d\n",
1634		   sq_ctx->sqe_stype, sq_ctx->sq_int_ena);
1635	seq_printf(m, "W2: sq_int \t\t\t%d\nW2: sqb_aura \t\t\t%d\n",
1636		   sq_ctx->sq_int, sq_ctx->sqb_aura);
1637	seq_printf(m, "W2: smq_rr_count \t\t%d\n\n", sq_ctx->smq_rr_count);
1638
1639	seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
1640		   sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
1641	seq_printf(m, "W3: smenq_next_sqb_vld \t\t%d\nW3: head_offset\t\t\t%d\n",
1642		   sq_ctx->smenq_next_sqb_vld, sq_ctx->head_offset);
1643	seq_printf(m, "W3: smenq_offset\t\t%d\nW3: tail_offset\t\t\t%d\n",
1644		   sq_ctx->smenq_offset, sq_ctx->tail_offset);
1645	seq_printf(m, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq\t\t\t%d\n",
1646		   sq_ctx->smq_lso_segnum, sq_ctx->smq_next_sq);
1647	seq_printf(m, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d\n",
1648		   sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1649	seq_printf(m, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n\n",
1650		   sq_ctx->cq_limit, sq_ctx->max_sqe_size);
1651
1652	seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1653	seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1654	seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1655	seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1656		   sq_ctx->smenq_next_sqb);
1657
1658	seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1659
1660	seq_printf(m, "W9: vfi_lso_vld\t\t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d\n",
1661		   sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1662	seq_printf(m, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d\n",
1663		   sq_ctx->vfi_lso_vlan0_ins_ena, sq_ctx->vfi_lso_mps);
1664	seq_printf(m, "W9: vfi_lso_sb\t\t\t%d\nW9: vfi_lso_sizem1\t\t%d\n",
1665		   sq_ctx->vfi_lso_sb, sq_ctx->vfi_lso_sizem1);
1666	seq_printf(m, "W9: vfi_lso_total\t\t%d\n\n", sq_ctx->vfi_lso_total);
1667
1668	seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1669		   (u64)sq_ctx->scm_lso_rem);
1670	seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1671	seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1672	seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1673		   (u64)sq_ctx->dropped_octs);
1674	seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1675		   (u64)sq_ctx->dropped_pkts);
1676}
1677
1678static void print_nix_cn10k_rq_ctx(struct seq_file *m,
1679				   struct nix_cn10k_rq_ctx_s *rq_ctx)
1680{
1681	seq_printf(m, "W0: ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
1682		   rq_ctx->ena, rq_ctx->sso_ena);
1683	seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
1684		   rq_ctx->ipsech_ena, rq_ctx->ena_wqwd);
1685	seq_printf(m, "W0: cq \t\t\t\t%d\nW0: lenerr_dis \t\t\t%d\n",
1686		   rq_ctx->cq, rq_ctx->lenerr_dis);
1687	seq_printf(m, "W0: csum_il4_dis \t\t%d\nW0: csum_ol4_dis \t\t%d\n",
1688		   rq_ctx->csum_il4_dis, rq_ctx->csum_ol4_dis);
1689	seq_printf(m, "W0: len_il4_dis \t\t%d\nW0: len_il3_dis \t\t%d\n",
1690		   rq_ctx->len_il4_dis, rq_ctx->len_il3_dis);
1691	seq_printf(m, "W0: len_ol4_dis \t\t%d\nW0: len_ol3_dis \t\t%d\n",
1692		   rq_ctx->len_ol4_dis, rq_ctx->len_ol3_dis);
1693	seq_printf(m, "W0: wqe_aura \t\t\t%d\n\n", rq_ctx->wqe_aura);
1694
1695	seq_printf(m, "W1: spb_aura \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
1696		   rq_ctx->spb_aura, rq_ctx->lpb_aura);
1697	seq_printf(m, "W1: spb_aura \t\t\t%d\n", rq_ctx->spb_aura);
1698	seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
1699		   rq_ctx->sso_grp, rq_ctx->sso_tt);
1700	seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: wqe_caching \t\t%d\n",
1701		   rq_ctx->pb_caching, rq_ctx->wqe_caching);
1702	seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
1703		   rq_ctx->xqe_drop_ena, rq_ctx->spb_drop_ena);
1704	seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: pb_stashing \t\t%d\n",
1705		   rq_ctx->lpb_drop_ena, rq_ctx->pb_stashing);
1706	seq_printf(m, "W1: ipsecd_drop_ena \t\t%d\nW1: chi_ena \t\t\t%d\n\n",
1707		   rq_ctx->ipsecd_drop_ena, rq_ctx->chi_ena);
1708
1709	seq_printf(m, "W2: band_prof_id \t\t%d\n", rq_ctx->band_prof_id);
1710	seq_printf(m, "W2: policer_ena \t\t%d\n", rq_ctx->policer_ena);
1711	seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n", rq_ctx->spb_sizem1);
1712	seq_printf(m, "W2: wqe_skip \t\t\t%d\nW2: sqb_ena \t\t\t%d\n",
1713		   rq_ctx->wqe_skip, rq_ctx->spb_ena);
1714	seq_printf(m, "W2: lpb_size1 \t\t\t%d\nW2: first_skip \t\t\t%d\n",
1715		   rq_ctx->lpb_sizem1, rq_ctx->first_skip);
1716	seq_printf(m, "W2: later_skip\t\t\t%d\nW2: xqe_imm_size\t\t%d\n",
1717		   rq_ctx->later_skip, rq_ctx->xqe_imm_size);
1718	seq_printf(m, "W2: xqe_imm_copy \t\t%d\nW2: xqe_hdr_split \t\t%d\n\n",
1719		   rq_ctx->xqe_imm_copy, rq_ctx->xqe_hdr_split);
1720
1721	seq_printf(m, "W3: xqe_drop \t\t\t%d\nW3: xqe_pass \t\t\t%d\n",
1722		   rq_ctx->xqe_drop, rq_ctx->xqe_pass);
1723	seq_printf(m, "W3: wqe_pool_drop \t\t%d\nW3: wqe_pool_pass \t\t%d\n",
1724		   rq_ctx->wqe_pool_drop, rq_ctx->wqe_pool_pass);
1725	seq_printf(m, "W3: spb_pool_drop \t\t%d\nW3: spb_pool_pass \t\t%d\n",
1726		   rq_ctx->spb_pool_drop, rq_ctx->spb_pool_pass);
1727	seq_printf(m, "W3: spb_aura_drop \t\t%d\nW3: spb_aura_pass \t\t%d\n\n",
1728		   rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
1729
1730	seq_printf(m, "W4: lpb_aura_drop \t\t%d\nW3: lpb_aura_pass \t\t%d\n",
1731		   rq_ctx->lpb_aura_pass, rq_ctx->lpb_aura_drop);
1732	seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW3: lpb_pool_pass \t\t%d\n",
1733		   rq_ctx->lpb_pool_drop, rq_ctx->lpb_pool_pass);
1734	seq_printf(m, "W4: rq_int \t\t\t%d\nW4: rq_int_ena\t\t\t%d\n",
1735		   rq_ctx->rq_int, rq_ctx->rq_int_ena);
1736	seq_printf(m, "W4: qint_idx \t\t\t%d\n\n", rq_ctx->qint_idx);
1737
1738	seq_printf(m, "W5: ltag \t\t\t%d\nW5: good_utag \t\t\t%d\n",
1739		   rq_ctx->ltag, rq_ctx->good_utag);
1740	seq_printf(m, "W5: bad_utag \t\t\t%d\nW5: flow_tagw \t\t\t%d\n",
1741		   rq_ctx->bad_utag, rq_ctx->flow_tagw);
1742	seq_printf(m, "W5: ipsec_vwqe \t\t\t%d\nW5: vwqe_ena \t\t\t%d\n",
1743		   rq_ctx->ipsec_vwqe, rq_ctx->vwqe_ena);
1744	seq_printf(m, "W5: vwqe_wait \t\t\t%d\nW5: max_vsize_exp\t\t%d\n",
1745		   rq_ctx->vwqe_wait, rq_ctx->max_vsize_exp);
1746	seq_printf(m, "W5: vwqe_skip \t\t\t%d\n\n", rq_ctx->vwqe_skip);
1747
1748	seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
1749	seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
1750	seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
1751	seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
1752	seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
1753}
1754
1755/* Dumps given nix_rq's context */
1756static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1757{
1758	struct nix_rq_ctx_s *rq_ctx = &rsp->rq;
1759	struct nix_hw *nix_hw = m->private;
1760	struct rvu *rvu = nix_hw->rvu;
1761
1762	if (!is_rvu_otx2(rvu)) {
1763		print_nix_cn10k_rq_ctx(m, (struct nix_cn10k_rq_ctx_s *)rq_ctx);
1764		return;
1765	}
1766
1767	seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
1768		   rq_ctx->wqe_aura, rq_ctx->substream);
1769	seq_printf(m, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
1770		   rq_ctx->cq, rq_ctx->ena_wqwd);
1771	seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
1772		   rq_ctx->ipsech_ena, rq_ctx->sso_ena);
1773	seq_printf(m, "W0: ena \t\t\t%d\n\n", rq_ctx->ena);
1774
1775	seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
1776		   rq_ctx->lpb_drop_ena, rq_ctx->spb_drop_ena);
1777	seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d\n",
1778		   rq_ctx->xqe_drop_ena, rq_ctx->wqe_caching);
1779	seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
1780		   rq_ctx->pb_caching, rq_ctx->sso_tt);
1781	seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
1782		   rq_ctx->sso_grp, rq_ctx->lpb_aura);
1783	seq_printf(m, "W1: spb_aura \t\t\t%d\n\n", rq_ctx->spb_aura);
1784
1785	seq_printf(m, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d\n",
1786		   rq_ctx->xqe_hdr_split, rq_ctx->xqe_imm_copy);
1787	seq_printf(m, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d\n",
1788		   rq_ctx->xqe_imm_size, rq_ctx->later_skip);
1789	seq_printf(m, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d\n",
1790		   rq_ctx->first_skip, rq_ctx->lpb_sizem1);
1791	seq_printf(m, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d\n",
1792		   rq_ctx->spb_ena, rq_ctx->wqe_skip);
1793	seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n\n", rq_ctx->spb_sizem1);
1794
1795	seq_printf(m, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d\n",
1796		   rq_ctx->spb_pool_pass, rq_ctx->spb_pool_drop);
1797	seq_printf(m, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d\n",
1798		   rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
1799	seq_printf(m, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d\n",
1800		   rq_ctx->wqe_pool_pass, rq_ctx->wqe_pool_drop);
1801	seq_printf(m, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n\n",
1802		   rq_ctx->xqe_pass, rq_ctx->xqe_drop);
1803
1804	seq_printf(m, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d\n",
1805		   rq_ctx->qint_idx, rq_ctx->rq_int_ena);
1806	seq_printf(m, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d\n",
1807		   rq_ctx->rq_int, rq_ctx->lpb_pool_pass);
1808	seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d\n",
1809		   rq_ctx->lpb_pool_drop, rq_ctx->lpb_aura_pass);
1810	seq_printf(m, "W4: lpb_aura_drop \t\t%d\n\n", rq_ctx->lpb_aura_drop);
1811
1812	seq_printf(m, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d\n",
1813		   rq_ctx->flow_tagw, rq_ctx->bad_utag);
1814	seq_printf(m, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n\n",
1815		   rq_ctx->good_utag, rq_ctx->ltag);
1816
1817	seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
1818	seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
1819	seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
1820	seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
1821	seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
1822}
1823
1824/* Dumps given nix_cq's context */
1825static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1826{
1827	struct nix_cq_ctx_s *cq_ctx = &rsp->cq;
1828	struct nix_hw *nix_hw = m->private;
1829	struct rvu *rvu = nix_hw->rvu;
1830
1831	seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base);
1832
1833	seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr);
1834	seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n",
1835		   cq_ctx->avg_con, cq_ctx->cint_idx);
1836	seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n",
1837		   cq_ctx->cq_err, cq_ctx->qint_idx);
1838	seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n",
1839		   cq_ctx->bpid, cq_ctx->bp_ena);
1840
1841	if (!is_rvu_otx2(rvu)) {
1842		seq_printf(m, "W1: lbpid_high \t\t\t0x%03x\n", cq_ctx->lbpid_high);
1843		seq_printf(m, "W1: lbpid_med \t\t\t0x%03x\n", cq_ctx->lbpid_med);
1844		seq_printf(m, "W1: lbpid_low \t\t\t0x%03x\n", cq_ctx->lbpid_low);
1845		seq_printf(m, "(W1: lbpid) \t\t\t0x%03x\n",
1846			   cq_ctx->lbpid_high << 6 | cq_ctx->lbpid_med << 3 |
1847			   cq_ctx->lbpid_low);
1848		seq_printf(m, "W1: lbp_ena \t\t\t\t%d\n\n", cq_ctx->lbp_ena);
1849	}
1850
1851	seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n",
1852		   cq_ctx->update_time, cq_ctx->avg_level);
1853	seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n",
1854		   cq_ctx->head, cq_ctx->tail);
1855
1856	seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n",
1857		   cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int);
1858	seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n",
1859		   cq_ctx->qsize, cq_ctx->caching);
1860	seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n",
1861		   cq_ctx->substream, cq_ctx->ena);
1862	if (!is_rvu_otx2(rvu)) {
1863		seq_printf(m, "W3: lbp_frac \t\t\t%d\n", cq_ctx->lbp_frac);
1864		seq_printf(m, "W3: cpt_drop_err_en \t\t\t%d\n",
1865			   cq_ctx->cpt_drop_err_en);
1866	}
1867	seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n",
1868		   cq_ctx->drop_ena, cq_ctx->drop);
1869	seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp);
1870}
1871
1872static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
1873					 void *unused, int ctype)
1874{
1875	void (*print_nix_ctx)(struct seq_file *filp,
1876			      struct nix_aq_enq_rsp *rsp) = NULL;
1877	struct nix_hw *nix_hw = filp->private;
1878	struct rvu *rvu = nix_hw->rvu;
1879	struct nix_aq_enq_req aq_req;
1880	struct nix_aq_enq_rsp rsp;
1881	char *ctype_string = NULL;
1882	int qidx, rc, max_id = 0;
1883	struct rvu_pfvf *pfvf;
1884	int nixlf, id, all;
1885	u16 pcifunc;
1886
1887	switch (ctype) {
1888	case NIX_AQ_CTYPE_CQ:
1889		nixlf = rvu->rvu_dbg.nix_cq_ctx.lf;
1890		id = rvu->rvu_dbg.nix_cq_ctx.id;
1891		all = rvu->rvu_dbg.nix_cq_ctx.all;
1892		break;
1893
1894	case NIX_AQ_CTYPE_SQ:
1895		nixlf = rvu->rvu_dbg.nix_sq_ctx.lf;
1896		id = rvu->rvu_dbg.nix_sq_ctx.id;
1897		all = rvu->rvu_dbg.nix_sq_ctx.all;
1898		break;
1899
1900	case NIX_AQ_CTYPE_RQ:
1901		nixlf = rvu->rvu_dbg.nix_rq_ctx.lf;
1902		id = rvu->rvu_dbg.nix_rq_ctx.id;
1903		all = rvu->rvu_dbg.nix_rq_ctx.all;
1904		break;
1905
1906	default:
1907		return -EINVAL;
1908	}
1909
1910	if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1911		return -EINVAL;
1912
1913	pfvf = rvu_get_pfvf(rvu, pcifunc);
1914	if (ctype == NIX_AQ_CTYPE_SQ && !pfvf->sq_ctx) {
1915		seq_puts(filp, "SQ context is not initialized\n");
1916		return -EINVAL;
1917	} else if (ctype == NIX_AQ_CTYPE_RQ && !pfvf->rq_ctx) {
1918		seq_puts(filp, "RQ context is not initialized\n");
1919		return -EINVAL;
1920	} else if (ctype == NIX_AQ_CTYPE_CQ && !pfvf->cq_ctx) {
1921		seq_puts(filp, "CQ context is not initialized\n");
1922		return -EINVAL;
1923	}
1924
1925	if (ctype == NIX_AQ_CTYPE_SQ) {
1926		max_id = pfvf->sq_ctx->qsize;
1927		ctype_string = "sq";
1928		print_nix_ctx = print_nix_sq_ctx;
1929	} else if (ctype == NIX_AQ_CTYPE_RQ) {
1930		max_id = pfvf->rq_ctx->qsize;
1931		ctype_string = "rq";
1932		print_nix_ctx = print_nix_rq_ctx;
1933	} else if (ctype == NIX_AQ_CTYPE_CQ) {
1934		max_id = pfvf->cq_ctx->qsize;
1935		ctype_string = "cq";
1936		print_nix_ctx = print_nix_cq_ctx;
1937	}
1938
1939	memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1940	aq_req.hdr.pcifunc = pcifunc;
1941	aq_req.ctype = ctype;
1942	aq_req.op = NIX_AQ_INSTOP_READ;
1943	if (all)
1944		id = 0;
1945	else
1946		max_id = id + 1;
1947	for (qidx = id; qidx < max_id; qidx++) {
1948		aq_req.qidx = qidx;
1949		seq_printf(filp, "=====%s_ctx for nixlf:%d and qidx:%d is=====\n",
1950			   ctype_string, nixlf, aq_req.qidx);
1951		rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
1952		if (rc) {
1953			seq_puts(filp, "Failed to read the context\n");
1954			return -EINVAL;
1955		}
1956		print_nix_ctx(filp, &rsp);
1957	}
1958	return 0;
1959}
1960
1961static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf,
1962			       int id, int ctype, char *ctype_string,
1963			       struct seq_file *m)
1964{
1965	struct nix_hw *nix_hw = m->private;
1966	struct rvu_pfvf *pfvf;
1967	int max_id = 0;
1968	u16 pcifunc;
1969
1970	if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1971		return -EINVAL;
1972
1973	pfvf = rvu_get_pfvf(rvu, pcifunc);
1974
1975	if (ctype == NIX_AQ_CTYPE_SQ) {
1976		if (!pfvf->sq_ctx) {
1977			dev_warn(rvu->dev, "SQ context is not initialized\n");
1978			return -EINVAL;
1979		}
1980		max_id = pfvf->sq_ctx->qsize;
1981	} else if (ctype == NIX_AQ_CTYPE_RQ) {
1982		if (!pfvf->rq_ctx) {
1983			dev_warn(rvu->dev, "RQ context is not initialized\n");
1984			return -EINVAL;
1985		}
1986		max_id = pfvf->rq_ctx->qsize;
1987	} else if (ctype == NIX_AQ_CTYPE_CQ) {
1988		if (!pfvf->cq_ctx) {
1989			dev_warn(rvu->dev, "CQ context is not initialized\n");
1990			return -EINVAL;
1991		}
1992		max_id = pfvf->cq_ctx->qsize;
1993	}
1994
1995	if (id < 0 || id >= max_id) {
1996		dev_warn(rvu->dev, "Invalid %s_ctx valid range 0-%d\n",
1997			 ctype_string, max_id - 1);
1998		return -EINVAL;
1999	}
2000	switch (ctype) {
2001	case NIX_AQ_CTYPE_CQ:
2002		rvu->rvu_dbg.nix_cq_ctx.lf = nixlf;
2003		rvu->rvu_dbg.nix_cq_ctx.id = id;
2004		rvu->rvu_dbg.nix_cq_ctx.all = all;
2005		break;
2006
2007	case NIX_AQ_CTYPE_SQ:
2008		rvu->rvu_dbg.nix_sq_ctx.lf = nixlf;
2009		rvu->rvu_dbg.nix_sq_ctx.id = id;
2010		rvu->rvu_dbg.nix_sq_ctx.all = all;
2011		break;
2012
2013	case NIX_AQ_CTYPE_RQ:
2014		rvu->rvu_dbg.nix_rq_ctx.lf = nixlf;
2015		rvu->rvu_dbg.nix_rq_ctx.id = id;
2016		rvu->rvu_dbg.nix_rq_ctx.all = all;
2017		break;
2018	default:
2019		return -EINVAL;
2020	}
2021	return 0;
2022}
2023
2024static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
2025					   const char __user *buffer,
2026					   size_t count, loff_t *ppos,
2027					   int ctype)
2028{
2029	struct seq_file *m = filp->private_data;
2030	struct nix_hw *nix_hw = m->private;
2031	struct rvu *rvu = nix_hw->rvu;
2032	char *cmd_buf, *ctype_string;
2033	int nixlf, id = 0, ret;
2034	bool all = false;
2035
2036	if ((*ppos != 0) || !count)
2037		return -EINVAL;
2038
2039	switch (ctype) {
2040	case NIX_AQ_CTYPE_SQ:
2041		ctype_string = "sq";
2042		break;
2043	case NIX_AQ_CTYPE_RQ:
2044		ctype_string = "rq";
2045		break;
2046	case NIX_AQ_CTYPE_CQ:
2047		ctype_string = "cq";
2048		break;
2049	default:
2050		return -EINVAL;
2051	}
2052
2053	cmd_buf = kzalloc(count + 1, GFP_KERNEL);
2054
2055	if (!cmd_buf)
2056		return count;
2057
2058	ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
2059				   &nixlf, &id, &all);
2060	if (ret < 0) {
2061		dev_info(rvu->dev,
2062			 "Usage: echo <nixlf> [%s number/all] > %s_ctx\n",
2063			 ctype_string, ctype_string);
2064		goto done;
2065	} else {
2066		ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype,
2067					  ctype_string, m);
2068	}
2069done:
2070	kfree(cmd_buf);
2071	return ret ? ret : count;
2072}
2073
2074static ssize_t rvu_dbg_nix_sq_ctx_write(struct file *filp,
2075					const char __user *buffer,
2076					size_t count, loff_t *ppos)
2077{
2078	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
2079					    NIX_AQ_CTYPE_SQ);
2080}
2081
2082static int rvu_dbg_nix_sq_ctx_display(struct seq_file *filp, void *unused)
2083{
2084	return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_SQ);
2085}
2086
2087RVU_DEBUG_SEQ_FOPS(nix_sq_ctx, nix_sq_ctx_display, nix_sq_ctx_write);
2088
2089static ssize_t rvu_dbg_nix_rq_ctx_write(struct file *filp,
2090					const char __user *buffer,
2091					size_t count, loff_t *ppos)
2092{
2093	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
2094					    NIX_AQ_CTYPE_RQ);
2095}
2096
2097static int rvu_dbg_nix_rq_ctx_display(struct seq_file *filp, void  *unused)
2098{
2099	return rvu_dbg_nix_queue_ctx_display(filp, unused,  NIX_AQ_CTYPE_RQ);
2100}
2101
2102RVU_DEBUG_SEQ_FOPS(nix_rq_ctx, nix_rq_ctx_display, nix_rq_ctx_write);
2103
2104static ssize_t rvu_dbg_nix_cq_ctx_write(struct file *filp,
2105					const char __user *buffer,
2106					size_t count, loff_t *ppos)
2107{
2108	return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
2109					    NIX_AQ_CTYPE_CQ);
2110}
2111
2112static int rvu_dbg_nix_cq_ctx_display(struct seq_file *filp, void *unused)
2113{
2114	return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_CQ);
2115}
2116
2117RVU_DEBUG_SEQ_FOPS(nix_cq_ctx, nix_cq_ctx_display, nix_cq_ctx_write);
2118
2119static void print_nix_qctx_qsize(struct seq_file *filp, int qsize,
2120				 unsigned long *bmap, char *qtype)
2121{
2122	char *buf;
2123
2124	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
2125	if (!buf)
2126		return;
2127
2128	bitmap_print_to_pagebuf(false, buf, bmap, qsize);
2129	seq_printf(filp, "%s context count : %d\n", qtype, qsize);
2130	seq_printf(filp, "%s context ena/dis bitmap : %s\n",
2131		   qtype, buf);
2132	kfree(buf);
2133}
2134
2135static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf)
2136{
2137	if (!pfvf->cq_ctx)
2138		seq_puts(filp, "cq context is not initialized\n");
2139	else
2140		print_nix_qctx_qsize(filp, pfvf->cq_ctx->qsize, pfvf->cq_bmap,
2141				     "cq");
2142
2143	if (!pfvf->rq_ctx)
2144		seq_puts(filp, "rq context is not initialized\n");
2145	else
2146		print_nix_qctx_qsize(filp, pfvf->rq_ctx->qsize, pfvf->rq_bmap,
2147				     "rq");
2148
2149	if (!pfvf->sq_ctx)
2150		seq_puts(filp, "sq context is not initialized\n");
2151	else
2152		print_nix_qctx_qsize(filp, pfvf->sq_ctx->qsize, pfvf->sq_bmap,
2153				     "sq");
2154}
2155
2156static ssize_t rvu_dbg_nix_qsize_write(struct file *filp,
2157				       const char __user *buffer,
2158				       size_t count, loff_t *ppos)
2159{
2160	return rvu_dbg_qsize_write(filp, buffer, count, ppos,
2161				   BLKTYPE_NIX);
2162}
2163
2164static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
2165{
2166	return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NIX);
2167}
2168
2169RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
2170
2171static void print_band_prof_ctx(struct seq_file *m,
2172				struct nix_bandprof_s *prof)
2173{
2174	char *str;
2175
2176	switch (prof->pc_mode) {
2177	case NIX_RX_PC_MODE_VLAN:
2178		str = "VLAN";
2179		break;
2180	case NIX_RX_PC_MODE_DSCP:
2181		str = "DSCP";
2182		break;
2183	case NIX_RX_PC_MODE_GEN:
2184		str = "Generic";
2185		break;
2186	case NIX_RX_PC_MODE_RSVD:
2187		str = "Reserved";
2188		break;
2189	}
2190	seq_printf(m, "W0: pc_mode\t\t%s\n", str);
2191	str = (prof->icolor == 3) ? "Color blind" :
2192		(prof->icolor == 0) ? "Green" :
2193		(prof->icolor == 1) ? "Yellow" : "Red";
2194	seq_printf(m, "W0: icolor\t\t%s\n", str);
2195	seq_printf(m, "W0: tnl_ena\t\t%d\n", prof->tnl_ena);
2196	seq_printf(m, "W0: peir_exponent\t%d\n", prof->peir_exponent);
2197	seq_printf(m, "W0: pebs_exponent\t%d\n", prof->pebs_exponent);
2198	seq_printf(m, "W0: cir_exponent\t%d\n", prof->cir_exponent);
2199	seq_printf(m, "W0: cbs_exponent\t%d\n", prof->cbs_exponent);
2200	seq_printf(m, "W0: peir_mantissa\t%d\n", prof->peir_mantissa);
2201	seq_printf(m, "W0: pebs_mantissa\t%d\n", prof->pebs_mantissa);
2202	seq_printf(m, "W0: cir_mantissa\t%d\n", prof->cir_mantissa);
2203
2204	seq_printf(m, "W1: cbs_mantissa\t%d\n", prof->cbs_mantissa);
2205	str = (prof->lmode == 0) ? "byte" : "packet";
2206	seq_printf(m, "W1: lmode\t\t%s\n", str);
2207	seq_printf(m, "W1: l_select\t\t%d\n", prof->l_sellect);
2208	seq_printf(m, "W1: rdiv\t\t%d\n", prof->rdiv);
2209	seq_printf(m, "W1: adjust_exponent\t%d\n", prof->adjust_exponent);
2210	seq_printf(m, "W1: adjust_mantissa\t%d\n", prof->adjust_mantissa);
2211	str = (prof->gc_action == 0) ? "PASS" :
2212		(prof->gc_action == 1) ? "DROP" : "RED";
2213	seq_printf(m, "W1: gc_action\t\t%s\n", str);
2214	str = (prof->yc_action == 0) ? "PASS" :
2215		(prof->yc_action == 1) ? "DROP" : "RED";
2216	seq_printf(m, "W1: yc_action\t\t%s\n", str);
2217	str = (prof->rc_action == 0) ? "PASS" :
2218		(prof->rc_action == 1) ? "DROP" : "RED";
2219	seq_printf(m, "W1: rc_action\t\t%s\n", str);
2220	seq_printf(m, "W1: meter_algo\t\t%d\n", prof->meter_algo);
2221	seq_printf(m, "W1: band_prof_id\t%d\n", prof->band_prof_id);
2222	seq_printf(m, "W1: hl_en\t\t%d\n", prof->hl_en);
2223
2224	seq_printf(m, "W2: ts\t\t\t%lld\n", (u64)prof->ts);
2225	seq_printf(m, "W3: pe_accum\t\t%d\n", prof->pe_accum);
2226	seq_printf(m, "W3: c_accum\t\t%d\n", prof->c_accum);
2227	seq_printf(m, "W4: green_pkt_pass\t%lld\n",
2228		   (u64)prof->green_pkt_pass);
2229	seq_printf(m, "W5: yellow_pkt_pass\t%lld\n",
2230		   (u64)prof->yellow_pkt_pass);
2231	seq_printf(m, "W6: red_pkt_pass\t%lld\n", (u64)prof->red_pkt_pass);
2232	seq_printf(m, "W7: green_octs_pass\t%lld\n",
2233		   (u64)prof->green_octs_pass);
2234	seq_printf(m, "W8: yellow_octs_pass\t%lld\n",
2235		   (u64)prof->yellow_octs_pass);
2236	seq_printf(m, "W9: red_octs_pass\t%lld\n", (u64)prof->red_octs_pass);
2237	seq_printf(m, "W10: green_pkt_drop\t%lld\n",
2238		   (u64)prof->green_pkt_drop);
2239	seq_printf(m, "W11: yellow_pkt_drop\t%lld\n",
2240		   (u64)prof->yellow_pkt_drop);
2241	seq_printf(m, "W12: red_pkt_drop\t%lld\n", (u64)prof->red_pkt_drop);
2242	seq_printf(m, "W13: green_octs_drop\t%lld\n",
2243		   (u64)prof->green_octs_drop);
2244	seq_printf(m, "W14: yellow_octs_drop\t%lld\n",
2245		   (u64)prof->yellow_octs_drop);
2246	seq_printf(m, "W15: red_octs_drop\t%lld\n", (u64)prof->red_octs_drop);
2247	seq_puts(m, "==============================\n");
2248}
2249
2250static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)
2251{
2252	struct nix_hw *nix_hw = m->private;
2253	struct nix_cn10k_aq_enq_req aq_req;
2254	struct nix_cn10k_aq_enq_rsp aq_rsp;
2255	struct rvu *rvu = nix_hw->rvu;
2256	struct nix_ipolicer *ipolicer;
2257	int layer, prof_idx, idx, rc;
2258	u16 pcifunc;
2259	char *str;
2260
2261	/* Ingress policers do not exist on all platforms */
2262	if (!nix_hw->ipolicer)
2263		return 0;
2264
2265	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
2266		if (layer == BAND_PROF_INVAL_LAYER)
2267			continue;
2268		str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
2269			(layer == BAND_PROF_MID_LAYER) ? "Mid" : "Top";
2270
2271		seq_printf(m, "\n%s bandwidth profiles\n", str);
2272		seq_puts(m, "=======================\n");
2273
2274		ipolicer = &nix_hw->ipolicer[layer];
2275
2276		for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
2277			if (is_rsrc_free(&ipolicer->band_prof, idx))
2278				continue;
2279
2280			prof_idx = (idx & 0x3FFF) | (layer << 14);
2281			rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
2282						 0x00, NIX_AQ_CTYPE_BANDPROF,
2283						 prof_idx);
2284			if (rc) {
2285				dev_err(rvu->dev,
2286					"%s: Failed to fetch context of %s profile %d, err %d\n",
2287					__func__, str, idx, rc);
2288				return 0;
2289			}
2290			seq_printf(m, "\n%s bandwidth profile:: %d\n", str, idx);
2291			pcifunc = ipolicer->pfvf_map[idx];
2292			if (!(pcifunc & RVU_PFVF_FUNC_MASK))
2293				seq_printf(m, "Allocated to :: PF %d\n",
2294					   rvu_get_pf(pcifunc));
2295			else
2296				seq_printf(m, "Allocated to :: PF %d VF %d\n",
2297					   rvu_get_pf(pcifunc),
2298					   (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
2299			print_band_prof_ctx(m, &aq_rsp.prof);
2300		}
2301	}
2302	return 0;
2303}
2304
2305RVU_DEBUG_SEQ_FOPS(nix_band_prof_ctx, nix_band_prof_ctx_display, NULL);
2306
2307static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused)
2308{
2309	struct nix_hw *nix_hw = m->private;
2310	struct nix_ipolicer *ipolicer;
2311	int layer;
2312	char *str;
2313
2314	/* Ingress policers do not exist on all platforms */
2315	if (!nix_hw->ipolicer)
2316		return 0;
2317
2318	seq_puts(m, "\nBandwidth profile resource free count\n");
2319	seq_puts(m, "=====================================\n");
2320	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
2321		if (layer == BAND_PROF_INVAL_LAYER)
2322			continue;
2323		str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
2324			(layer == BAND_PROF_MID_LAYER) ? "Mid " : "Top ";
2325
2326		ipolicer = &nix_hw->ipolicer[layer];
2327		seq_printf(m, "%s :: Max: %4d  Free: %4d\n", str,
2328			   ipolicer->band_prof.max,
2329			   rvu_rsrc_free_count(&ipolicer->band_prof));
2330	}
2331	seq_puts(m, "=====================================\n");
2332
2333	return 0;
2334}
2335
2336RVU_DEBUG_SEQ_FOPS(nix_band_prof_rsrc, nix_band_prof_rsrc_display, NULL);
2337
2338static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
2339{
2340	struct nix_hw *nix_hw;
2341
2342	if (!is_block_implemented(rvu->hw, blkaddr))
2343		return;
2344
2345	if (blkaddr == BLKADDR_NIX0) {
2346		rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root);
2347		nix_hw = &rvu->hw->nix[0];
2348	} else {
2349		rvu->rvu_dbg.nix = debugfs_create_dir("nix1",
2350						      rvu->rvu_dbg.root);
2351		nix_hw = &rvu->hw->nix[1];
2352	}
2353
2354	debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2355			    &rvu_dbg_nix_sq_ctx_fops);
2356	debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2357			    &rvu_dbg_nix_rq_ctx_fops);
2358	debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2359			    &rvu_dbg_nix_cq_ctx_fops);
2360	debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
2361			    &rvu_dbg_nix_ndc_tx_cache_fops);
2362	debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
2363			    &rvu_dbg_nix_ndc_rx_cache_fops);
2364	debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
2365			    &rvu_dbg_nix_ndc_tx_hits_miss_fops);
2366	debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
2367			    &rvu_dbg_nix_ndc_rx_hits_miss_fops);
2368	debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
2369			    &rvu_dbg_nix_qsize_fops);
2370	debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2371			    &rvu_dbg_nix_band_prof_ctx_fops);
2372	debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw,
2373			    &rvu_dbg_nix_band_prof_rsrc_fops);
2374}
2375
2376static void rvu_dbg_npa_init(struct rvu *rvu)
2377{
2378	rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root);
2379
2380	debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu,
2381			    &rvu_dbg_npa_qsize_fops);
2382	debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu,
2383			    &rvu_dbg_npa_aura_ctx_fops);
2384	debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
2385			    &rvu_dbg_npa_pool_ctx_fops);
2386	debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu,
2387			    &rvu_dbg_npa_ndc_cache_fops);
2388	debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, rvu,
2389			    &rvu_dbg_npa_ndc_hits_miss_fops);
2390}
2391
2392#define PRINT_CGX_CUML_NIXRX_STATUS(idx, name)				\
2393	({								\
2394		u64 cnt;						\
2395		err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx),	\
2396					     NIX_STATS_RX, &(cnt));	\
2397		if (!err)						\
2398			seq_printf(s, "%s: %llu\n", name, cnt);		\
2399		cnt;							\
2400	})
2401
2402#define PRINT_CGX_CUML_NIXTX_STATUS(idx, name)			\
2403	({								\
2404		u64 cnt;						\
2405		err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx),	\
2406					  NIX_STATS_TX, &(cnt));	\
2407		if (!err)						\
2408			seq_printf(s, "%s: %llu\n", name, cnt);		\
2409		cnt;							\
2410	})
2411
2412static int cgx_print_stats(struct seq_file *s, int lmac_id)
2413{
2414	struct cgx_link_user_info linfo;
2415	struct mac_ops *mac_ops;
2416	void *cgxd = s->private;
2417	u64 ucast, mcast, bcast;
2418	int stat = 0, err = 0;
2419	u64 tx_stat, rx_stat;
2420	struct rvu *rvu;
2421
2422	rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
2423					     PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
2424	if (!rvu)
2425		return -ENODEV;
2426
2427	mac_ops = get_mac_ops(cgxd);
2428	/* There can be no CGX devices at all */
2429	if (!mac_ops)
2430		return 0;
2431
2432	/* Link status */
2433	seq_puts(s, "\n=======Link Status======\n\n");
2434	err = cgx_get_link_info(cgxd, lmac_id, &linfo);
2435	if (err)
2436		seq_puts(s, "Failed to read link status\n");
2437	seq_printf(s, "\nLink is %s %d Mbps\n\n",
2438		   linfo.link_up ? "UP" : "DOWN", linfo.speed);
2439
2440	/* Rx stats */
2441	seq_printf(s, "\n=======NIX RX_STATS(%s port level)======\n\n",
2442		   mac_ops->name);
2443	ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames");
2444	if (err)
2445		return err;
2446	mcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_MCAST, "rx_mcast_frames");
2447	if (err)
2448		return err;
2449	bcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_BCAST, "rx_bcast_frames");
2450	if (err)
2451		return err;
2452	seq_printf(s, "rx_frames: %llu\n", ucast + mcast + bcast);
2453	PRINT_CGX_CUML_NIXRX_STATUS(RX_OCTS, "rx_bytes");
2454	if (err)
2455		return err;
2456	PRINT_CGX_CUML_NIXRX_STATUS(RX_DROP, "rx_drops");
2457	if (err)
2458		return err;
2459	PRINT_CGX_CUML_NIXRX_STATUS(RX_ERR, "rx_errors");
2460	if (err)
2461		return err;
2462
2463	/* Tx stats */
2464	seq_printf(s, "\n=======NIX TX_STATS(%s port level)======\n\n",
2465		   mac_ops->name);
2466	ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames");
2467	if (err)
2468		return err;
2469	mcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_MCAST, "tx_mcast_frames");
2470	if (err)
2471		return err;
2472	bcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_BCAST, "tx_bcast_frames");
2473	if (err)
2474		return err;
2475	seq_printf(s, "tx_frames: %llu\n", ucast + mcast + bcast);
2476	PRINT_CGX_CUML_NIXTX_STATUS(TX_OCTS, "tx_bytes");
2477	if (err)
2478		return err;
2479	PRINT_CGX_CUML_NIXTX_STATUS(TX_DROP, "tx_drops");
2480	if (err)
2481		return err;
2482
2483	/* Rx stats */
2484	seq_printf(s, "\n=======%s RX_STATS======\n\n", mac_ops->name);
2485	while (stat < mac_ops->rx_stats_cnt) {
2486		err = mac_ops->mac_get_rx_stats(cgxd, lmac_id, stat, &rx_stat);
2487		if (err)
2488			return err;
2489		if (is_rvu_otx2(rvu))
2490			seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat],
2491				   rx_stat);
2492		else
2493			seq_printf(s, "%s: %llu\n", rpm_rx_stats_fields[stat],
2494				   rx_stat);
2495		stat++;
2496	}
2497
2498	/* Tx stats */
2499	stat = 0;
2500	seq_printf(s, "\n=======%s TX_STATS======\n\n", mac_ops->name);
2501	while (stat < mac_ops->tx_stats_cnt) {
2502		err = mac_ops->mac_get_tx_stats(cgxd, lmac_id, stat, &tx_stat);
2503		if (err)
2504			return err;
2505
2506		if (is_rvu_otx2(rvu))
2507			seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat],
2508				   tx_stat);
2509		else
2510			seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat],
2511				   tx_stat);
2512		stat++;
2513	}
2514
2515	return err;
2516}
2517
2518static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id)
2519{
2520	struct dentry *current_dir;
2521	char *buf;
2522
2523	current_dir = filp->file->f_path.dentry->d_parent;
2524	buf = strrchr(current_dir->d_name.name, 'c');
2525	if (!buf)
2526		return -EINVAL;
2527
2528	return kstrtoint(buf + 1, 10, lmac_id);
2529}
2530
2531static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
2532{
2533	int lmac_id, err;
2534
2535	err = rvu_dbg_derive_lmacid(filp, &lmac_id);
2536	if (!err)
2537		return cgx_print_stats(filp, lmac_id);
2538
2539	return err;
2540}
2541
2542RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
2543
2544static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id)
2545{
2546	struct pci_dev *pdev = NULL;
2547	void *cgxd = s->private;
2548	char *bcast, *mcast;
2549	u16 index, domain;
2550	u8 dmac[ETH_ALEN];
2551	struct rvu *rvu;
2552	u64 cfg, mac;
2553	int pf;
2554
2555	rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
2556					     PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
2557	if (!rvu)
2558		return -ENODEV;
2559
2560	pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
2561	domain = 2;
2562
2563	pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
2564	if (!pdev)
2565		return 0;
2566
2567	cfg = cgx_read_dmac_ctrl(cgxd, lmac_id);
2568	bcast = cfg & CGX_DMAC_BCAST_MODE ? "ACCEPT" : "REJECT";
2569	mcast = cfg & CGX_DMAC_MCAST_MODE ? "ACCEPT" : "REJECT";
2570
2571	seq_puts(s,
2572		 "PCI dev       RVUPF   BROADCAST  MULTICAST  FILTER-MODE\n");
2573	seq_printf(s, "%s  PF%d  %9s  %9s",
2574		   dev_name(&pdev->dev), pf, bcast, mcast);
2575	if (cfg & CGX_DMAC_CAM_ACCEPT)
2576		seq_printf(s, "%12s\n\n", "UNICAST");
2577	else
2578		seq_printf(s, "%16s\n\n", "PROMISCUOUS");
2579
2580	seq_puts(s, "\nDMAC-INDEX  ADDRESS\n");
2581
2582	for (index = 0 ; index < 32 ; index++) {
2583		cfg = cgx_read_dmac_entry(cgxd, index);
2584		/* Display enabled dmac entries associated with current lmac */
2585		if (lmac_id == FIELD_GET(CGX_DMAC_CAM_ENTRY_LMACID, cfg) &&
2586		    FIELD_GET(CGX_DMAC_CAM_ADDR_ENABLE, cfg)) {
2587			mac = FIELD_GET(CGX_RX_DMAC_ADR_MASK, cfg);
2588			u64_to_ether_addr(mac, dmac);
2589			seq_printf(s, "%7d     %pM\n", index, dmac);
2590		}
2591	}
2592
2593	pci_dev_put(pdev);
2594	return 0;
2595}
2596
2597static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused)
2598{
2599	int err, lmac_id;
2600
2601	err = rvu_dbg_derive_lmacid(filp, &lmac_id);
2602	if (!err)
2603		return cgx_print_dmac_flt(filp, lmac_id);
2604
2605	return err;
2606}
2607
2608RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL);
2609
2610static void rvu_dbg_cgx_init(struct rvu *rvu)
2611{
2612	struct mac_ops *mac_ops;
2613	unsigned long lmac_bmap;
2614	int i, lmac_id;
2615	char dname[20];
2616	void *cgx;
2617
2618	if (!cgx_get_cgxcnt_max())
2619		return;
2620
2621	mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
2622	if (!mac_ops)
2623		return;
2624
2625	rvu->rvu_dbg.cgx_root = debugfs_create_dir(mac_ops->name,
2626						   rvu->rvu_dbg.root);
2627
2628	for (i = 0; i < cgx_get_cgxcnt_max(); i++) {
2629		cgx = rvu_cgx_pdata(i, rvu);
2630		if (!cgx)
2631			continue;
2632		lmac_bmap = cgx_get_lmac_bmap(cgx);
2633		/* cgx debugfs dir */
2634		sprintf(dname, "%s%d", mac_ops->name, i);
2635		rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
2636						      rvu->rvu_dbg.cgx_root);
2637
2638		for_each_set_bit(lmac_id, &lmac_bmap, rvu->hw->lmac_per_cgx) {
2639			/* lmac debugfs dir */
2640			sprintf(dname, "lmac%d", lmac_id);
2641			rvu->rvu_dbg.lmac =
2642				debugfs_create_dir(dname, rvu->rvu_dbg.cgx);
2643
2644			debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac,
2645					    cgx, &rvu_dbg_cgx_stat_fops);
2646			debugfs_create_file("mac_filter", 0600,
2647					    rvu->rvu_dbg.lmac, cgx,
2648					    &rvu_dbg_cgx_dmac_flt_fops);
2649		}
2650	}
2651}
2652
2653/* NPC debugfs APIs */
2654static void rvu_print_npc_mcam_info(struct seq_file *s,
2655				    u16 pcifunc, int blkaddr)
2656{
2657	struct rvu *rvu = s->private;
2658	int entry_acnt, entry_ecnt;
2659	int cntr_acnt, cntr_ecnt;
2660
2661	rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr,
2662					  &entry_acnt, &entry_ecnt);
2663	rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr,
2664					    &cntr_acnt, &cntr_ecnt);
2665	if (!entry_acnt && !cntr_acnt)
2666		return;
2667
2668	if (!(pcifunc & RVU_PFVF_FUNC_MASK))
2669		seq_printf(s, "\n\t\t Device \t\t: PF%d\n",
2670			   rvu_get_pf(pcifunc));
2671	else
2672		seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n",
2673			   rvu_get_pf(pcifunc),
2674			   (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
2675
2676	if (entry_acnt) {
2677		seq_printf(s, "\t\t Entries allocated \t: %d\n", entry_acnt);
2678		seq_printf(s, "\t\t Entries enabled \t: %d\n", entry_ecnt);
2679	}
2680	if (cntr_acnt) {
2681		seq_printf(s, "\t\t Counters allocated \t: %d\n", cntr_acnt);
2682		seq_printf(s, "\t\t Counters enabled \t: %d\n", cntr_ecnt);
2683	}
2684}
2685
2686static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
2687{
2688	struct rvu *rvu = filp->private;
2689	int pf, vf, numvfs, blkaddr;
2690	struct npc_mcam *mcam;
2691	u16 pcifunc, counters;
2692	u64 cfg;
2693
2694	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2695	if (blkaddr < 0)
2696		return -ENODEV;
2697
2698	mcam = &rvu->hw->mcam;
2699	counters = rvu->hw->npc_counters;
2700
2701	seq_puts(filp, "\nNPC MCAM info:\n");
2702	/* MCAM keywidth on receive and transmit sides */
2703	cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
2704	cfg = (cfg >> 32) & 0x07;
2705	seq_printf(filp, "\t\t RX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
2706		   "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
2707		   "224bits" : "448bits"));
2708	cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX));
2709	cfg = (cfg >> 32) & 0x07;
2710	seq_printf(filp, "\t\t TX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
2711		   "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
2712		   "224bits" : "448bits"));
2713
2714	mutex_lock(&mcam->lock);
2715	/* MCAM entries */
2716	seq_printf(filp, "\n\t\t MCAM entries \t: %d\n", mcam->total_entries);
2717	seq_printf(filp, "\t\t Reserved \t: %d\n",
2718		   mcam->total_entries - mcam->bmap_entries);
2719	seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt);
2720
2721	/* MCAM counters */
2722	seq_printf(filp, "\n\t\t MCAM counters \t: %d\n", counters);
2723	seq_printf(filp, "\t\t Reserved \t: %d\n",
2724		   counters - mcam->counters.max);
2725	seq_printf(filp, "\t\t Available \t: %d\n",
2726		   rvu_rsrc_free_count(&mcam->counters));
2727
2728	if (mcam->bmap_entries == mcam->bmap_fcnt) {
2729		mutex_unlock(&mcam->lock);
2730		return 0;
2731	}
2732
2733	seq_puts(filp, "\n\t\t Current allocation\n");
2734	seq_puts(filp, "\t\t====================\n");
2735	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2736		pcifunc = (pf << RVU_PFVF_PF_SHIFT);
2737		rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
2738
2739		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2740		numvfs = (cfg >> 12) & 0xFF;
2741		for (vf = 0; vf < numvfs; vf++) {
2742			pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1);
2743			rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
2744		}
2745	}
2746
2747	mutex_unlock(&mcam->lock);
2748	return 0;
2749}
2750
2751RVU_DEBUG_SEQ_FOPS(npc_mcam_info, npc_mcam_info_display, NULL);
2752
2753static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
2754					     void *unused)
2755{
2756	struct rvu *rvu = filp->private;
2757	struct npc_mcam *mcam;
2758	int blkaddr;
2759
2760	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2761	if (blkaddr < 0)
2762		return -ENODEV;
2763
2764	mcam = &rvu->hw->mcam;
2765
2766	seq_puts(filp, "\nNPC MCAM RX miss action stats\n");
2767	seq_printf(filp, "\t\tStat %d: \t%lld\n", mcam->rx_miss_act_cntr,
2768		   rvu_read64(rvu, blkaddr,
2769			      NPC_AF_MATCH_STATX(mcam->rx_miss_act_cntr)));
2770
2771	return 0;
2772}
2773
2774RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
2775
2776#define RVU_DBG_PRINT_MPLS_TTL(pkt, mask)                                     \
2777do {									      \
2778	seq_printf(s, "%ld ", FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, pkt));     \
2779	seq_printf(s, "mask 0x%lx\n",                                         \
2780		   FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, mask));               \
2781} while (0)                                                                   \
2782
2783#define RVU_DBG_PRINT_MPLS_LBTCBOS(_pkt, _mask)                               \
2784do {									      \
2785	typeof(_pkt) (pkt) = (_pkt);					      \
2786	typeof(_mask) (mask) = (_mask);                                       \
2787	seq_printf(s, "%ld %ld %ld\n",                                        \
2788		   FIELD_GET(OTX2_FLOWER_MASK_MPLS_LB, pkt),                  \
2789		   FIELD_GET(OTX2_FLOWER_MASK_MPLS_TC, pkt),                  \
2790		   FIELD_GET(OTX2_FLOWER_MASK_MPLS_BOS, pkt));                \
2791	seq_printf(s, "\tmask 0x%lx 0x%lx 0x%lx\n",                           \
2792		   FIELD_GET(OTX2_FLOWER_MASK_MPLS_LB, mask),                 \
2793		   FIELD_GET(OTX2_FLOWER_MASK_MPLS_TC, mask),                 \
2794		   FIELD_GET(OTX2_FLOWER_MASK_MPLS_BOS, mask));               \
2795} while (0)                                                                   \
2796
2797static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
2798					struct rvu_npc_mcam_rule *rule)
2799{
2800	u8 bit;
2801
2802	for_each_set_bit(bit, (unsigned long *)&rule->features, 64) {
2803		seq_printf(s, "\t%s  ", npc_get_field_name(bit));
2804		switch (bit) {
2805		case NPC_LXMB:
2806			if (rule->lxmb == 1)
2807				seq_puts(s, "\tL2M nibble is set\n");
2808			else
2809				seq_puts(s, "\tL2B nibble is set\n");
2810			break;
2811		case NPC_DMAC:
2812			seq_printf(s, "%pM ", rule->packet.dmac);
2813			seq_printf(s, "mask %pM\n", rule->mask.dmac);
2814			break;
2815		case NPC_SMAC:
2816			seq_printf(s, "%pM ", rule->packet.smac);
2817			seq_printf(s, "mask %pM\n", rule->mask.smac);
2818			break;
2819		case NPC_ETYPE:
2820			seq_printf(s, "0x%x ", ntohs(rule->packet.etype));
2821			seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype));
2822			break;
2823		case NPC_OUTER_VID:
2824			seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_tci));
2825			seq_printf(s, "mask 0x%x\n",
2826				   ntohs(rule->mask.vlan_tci));
2827			break;
2828		case NPC_INNER_VID:
2829			seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_itci));
2830			seq_printf(s, "mask 0x%x\n",
2831				   ntohs(rule->mask.vlan_itci));
2832			break;
2833		case NPC_TOS:
2834			seq_printf(s, "%d ", rule->packet.tos);
2835			seq_printf(s, "mask 0x%x\n", rule->mask.tos);
2836			break;
2837		case NPC_SIP_IPV4:
2838			seq_printf(s, "%pI4 ", &rule->packet.ip4src);
2839			seq_printf(s, "mask %pI4\n", &rule->mask.ip4src);
2840			break;
2841		case NPC_DIP_IPV4:
2842			seq_printf(s, "%pI4 ", &rule->packet.ip4dst);
2843			seq_printf(s, "mask %pI4\n", &rule->mask.ip4dst);
2844			break;
2845		case NPC_SIP_IPV6:
2846			seq_printf(s, "%pI6 ", rule->packet.ip6src);
2847			seq_printf(s, "mask %pI6\n", rule->mask.ip6src);
2848			break;
2849		case NPC_DIP_IPV6:
2850			seq_printf(s, "%pI6 ", rule->packet.ip6dst);
2851			seq_printf(s, "mask %pI6\n", rule->mask.ip6dst);
2852			break;
2853		case NPC_IPFRAG_IPV6:
2854			seq_printf(s, "0x%x ", rule->packet.next_header);
2855			seq_printf(s, "mask 0x%x\n", rule->mask.next_header);
2856			break;
2857		case NPC_IPFRAG_IPV4:
2858			seq_printf(s, "0x%x ", rule->packet.ip_flag);
2859			seq_printf(s, "mask 0x%x\n", rule->mask.ip_flag);
2860			break;
2861		case NPC_SPORT_TCP:
2862		case NPC_SPORT_UDP:
2863		case NPC_SPORT_SCTP:
2864			seq_printf(s, "%d ", ntohs(rule->packet.sport));
2865			seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.sport));
2866			break;
2867		case NPC_DPORT_TCP:
2868		case NPC_DPORT_UDP:
2869		case NPC_DPORT_SCTP:
2870			seq_printf(s, "%d ", ntohs(rule->packet.dport));
2871			seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
2872			break;
2873		case NPC_TCP_FLAGS:
2874			seq_printf(s, "%d ", rule->packet.tcp_flags);
2875			seq_printf(s, "mask 0x%x\n", rule->mask.tcp_flags);
2876			break;
2877		case NPC_IPSEC_SPI:
2878			seq_printf(s, "0x%x ", ntohl(rule->packet.spi));
2879			seq_printf(s, "mask 0x%x\n", ntohl(rule->mask.spi));
2880			break;
2881		case NPC_MPLS1_LBTCBOS:
2882			RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[0],
2883						   rule->mask.mpls_lse[0]);
2884			break;
2885		case NPC_MPLS1_TTL:
2886			RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[0],
2887					       rule->mask.mpls_lse[0]);
2888			break;
2889		case NPC_MPLS2_LBTCBOS:
2890			RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[1],
2891						   rule->mask.mpls_lse[1]);
2892			break;
2893		case NPC_MPLS2_TTL:
2894			RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[1],
2895					       rule->mask.mpls_lse[1]);
2896			break;
2897		case NPC_MPLS3_LBTCBOS:
2898			RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[2],
2899						   rule->mask.mpls_lse[2]);
2900			break;
2901		case NPC_MPLS3_TTL:
2902			RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[2],
2903					       rule->mask.mpls_lse[2]);
2904			break;
2905		case NPC_MPLS4_LBTCBOS:
2906			RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[3],
2907						   rule->mask.mpls_lse[3]);
2908			break;
2909		case NPC_MPLS4_TTL:
2910			RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[3],
2911					       rule->mask.mpls_lse[3]);
2912			break;
2913		case NPC_TYPE_ICMP:
2914			seq_printf(s, "%d ", rule->packet.icmp_type);
2915			seq_printf(s, "mask 0x%x\n", rule->mask.icmp_type);
2916			break;
2917		case NPC_CODE_ICMP:
2918			seq_printf(s, "%d ", rule->packet.icmp_code);
2919			seq_printf(s, "mask 0x%x\n", rule->mask.icmp_code);
2920			break;
2921		default:
2922			seq_puts(s, "\n");
2923			break;
2924		}
2925	}
2926}
2927
2928static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
2929					 struct rvu_npc_mcam_rule *rule)
2930{
2931	if (is_npc_intf_tx(rule->intf)) {
2932		switch (rule->tx_action.op) {
2933		case NIX_TX_ACTIONOP_DROP:
2934			seq_puts(s, "\taction: Drop\n");
2935			break;
2936		case NIX_TX_ACTIONOP_UCAST_DEFAULT:
2937			seq_puts(s, "\taction: Unicast to default channel\n");
2938			break;
2939		case NIX_TX_ACTIONOP_UCAST_CHAN:
2940			seq_printf(s, "\taction: Unicast to channel %d\n",
2941				   rule->tx_action.index);
2942			break;
2943		case NIX_TX_ACTIONOP_MCAST:
2944			seq_puts(s, "\taction: Multicast\n");
2945			break;
2946		case NIX_TX_ACTIONOP_DROP_VIOL:
2947			seq_puts(s, "\taction: Lockdown Violation Drop\n");
2948			break;
2949		default:
2950			break;
2951		}
2952	} else {
2953		switch (rule->rx_action.op) {
2954		case NIX_RX_ACTIONOP_DROP:
2955			seq_puts(s, "\taction: Drop\n");
2956			break;
2957		case NIX_RX_ACTIONOP_UCAST:
2958			seq_printf(s, "\taction: Direct to queue %d\n",
2959				   rule->rx_action.index);
2960			break;
2961		case NIX_RX_ACTIONOP_RSS:
2962			seq_puts(s, "\taction: RSS\n");
2963			break;
2964		case NIX_RX_ACTIONOP_UCAST_IPSEC:
2965			seq_puts(s, "\taction: Unicast ipsec\n");
2966			break;
2967		case NIX_RX_ACTIONOP_MCAST:
2968			seq_puts(s, "\taction: Multicast\n");
2969			break;
2970		default:
2971			break;
2972		}
2973	}
2974}
2975
2976static const char *rvu_dbg_get_intf_name(int intf)
2977{
2978	switch (intf) {
2979	case NIX_INTFX_RX(0):
2980		return "NIX0_RX";
2981	case NIX_INTFX_RX(1):
2982		return "NIX1_RX";
2983	case NIX_INTFX_TX(0):
2984		return "NIX0_TX";
2985	case NIX_INTFX_TX(1):
2986		return "NIX1_TX";
2987	default:
2988		break;
2989	}
2990
2991	return "unknown";
2992}
2993
2994static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
2995{
2996	struct rvu_npc_mcam_rule *iter;
2997	struct rvu *rvu = s->private;
2998	struct npc_mcam *mcam;
2999	int pf, vf = -1;
3000	bool enabled;
3001	int blkaddr;
3002	u16 target;
3003	u64 hits;
3004
3005	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
3006	if (blkaddr < 0)
3007		return 0;
3008
3009	mcam = &rvu->hw->mcam;
3010
3011	mutex_lock(&mcam->lock);
3012	list_for_each_entry(iter, &mcam->mcam_rules, list) {
3013		pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
3014		seq_printf(s, "\n\tInstalled by: PF%d ", pf);
3015
3016		if (iter->owner & RVU_PFVF_FUNC_MASK) {
3017			vf = (iter->owner & RVU_PFVF_FUNC_MASK) - 1;
3018			seq_printf(s, "VF%d", vf);
3019		}
3020		seq_puts(s, "\n");
3021
3022		seq_printf(s, "\tdirection: %s\n", is_npc_intf_rx(iter->intf) ?
3023						    "RX" : "TX");
3024		seq_printf(s, "\tinterface: %s\n",
3025			   rvu_dbg_get_intf_name(iter->intf));
3026		seq_printf(s, "\tmcam entry: %d\n", iter->entry);
3027
3028		rvu_dbg_npc_mcam_show_flows(s, iter);
3029		if (is_npc_intf_rx(iter->intf)) {
3030			target = iter->rx_action.pf_func;
3031			pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
3032			seq_printf(s, "\tForward to: PF%d ", pf);
3033
3034			if (target & RVU_PFVF_FUNC_MASK) {
3035				vf = (target & RVU_PFVF_FUNC_MASK) - 1;
3036				seq_printf(s, "VF%d", vf);
3037			}
3038			seq_puts(s, "\n");
3039			seq_printf(s, "\tchannel: 0x%x\n", iter->chan);
3040			seq_printf(s, "\tchannel_mask: 0x%x\n", iter->chan_mask);
3041		}
3042
3043		rvu_dbg_npc_mcam_show_action(s, iter);
3044
3045		enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, iter->entry);
3046		seq_printf(s, "\tenabled: %s\n", enabled ? "yes" : "no");
3047
3048		if (!iter->has_cntr)
3049			continue;
3050		seq_printf(s, "\tcounter: %d\n", iter->cntr);
3051
3052		hits = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(iter->cntr));
3053		seq_printf(s, "\thits: %lld\n", hits);
3054	}
3055	mutex_unlock(&mcam->lock);
3056
3057	return 0;
3058}
3059
3060RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL);
3061
3062static int rvu_dbg_npc_exact_show_entries(struct seq_file *s, void *unused)
3063{
3064	struct npc_exact_table_entry *mem_entry[NPC_EXACT_TBL_MAX_WAYS] = { 0 };
3065	struct npc_exact_table_entry *cam_entry;
3066	struct npc_exact_table *table;
3067	struct rvu *rvu = s->private;
3068	int i, j;
3069
3070	u8 bitmap = 0;
3071
3072	table = rvu->hw->table;
3073
3074	mutex_lock(&table->lock);
3075
3076	/* Check if there is at least one entry in mem table */
3077	if (!table->mem_tbl_entry_cnt)
3078		goto dump_cam_table;
3079
3080	/* Print table headers */
3081	seq_puts(s, "\n\tExact Match MEM Table\n");
3082	seq_puts(s, "Index\t");
3083
3084	for (i = 0; i < table->mem_table.ways; i++) {
3085		mem_entry[i] = list_first_entry_or_null(&table->lhead_mem_tbl_entry[i],
3086							struct npc_exact_table_entry, list);
3087
3088		seq_printf(s, "Way-%d\t\t\t\t\t", i);
3089	}
3090
3091	seq_puts(s, "\n");
3092	for (i = 0; i < table->mem_table.ways; i++)
3093		seq_puts(s, "\tChan  MAC                     \t");
3094
3095	seq_puts(s, "\n\n");
3096
3097	/* Print mem table entries */
3098	for (i = 0; i < table->mem_table.depth; i++) {
3099		bitmap = 0;
3100		for (j = 0; j < table->mem_table.ways; j++) {
3101			if (!mem_entry[j])
3102				continue;
3103
3104			if (mem_entry[j]->index != i)
3105				continue;
3106
3107			bitmap |= BIT(j);
3108		}
3109
3110		/* No valid entries */
3111		if (!bitmap)
3112			continue;
3113
3114		seq_printf(s, "%d\t", i);
3115		for (j = 0; j < table->mem_table.ways; j++) {
3116			if (!(bitmap & BIT(j))) {
3117				seq_puts(s, "nil\t\t\t\t\t");
3118				continue;
3119			}
3120
3121			seq_printf(s, "0x%x %pM\t\t\t", mem_entry[j]->chan,
3122				   mem_entry[j]->mac);
3123			mem_entry[j] = list_next_entry(mem_entry[j], list);
3124		}
3125		seq_puts(s, "\n");
3126	}
3127
3128dump_cam_table:
3129
3130	if (!table->cam_tbl_entry_cnt)
3131		goto done;
3132
3133	seq_puts(s, "\n\tExact Match CAM Table\n");
3134	seq_puts(s, "index\tchan\tMAC\n");
3135
3136	/* Traverse cam table entries */
3137	list_for_each_entry(cam_entry, &table->lhead_cam_tbl_entry, list) {
3138		seq_printf(s, "%d\t0x%x\t%pM\n", cam_entry->index, cam_entry->chan,
3139			   cam_entry->mac);
3140	}
3141
3142done:
3143	mutex_unlock(&table->lock);
3144	return 0;
3145}
3146
3147RVU_DEBUG_SEQ_FOPS(npc_exact_entries, npc_exact_show_entries, NULL);
3148
3149static int rvu_dbg_npc_exact_show_info(struct seq_file *s, void *unused)
3150{
3151	struct npc_exact_table *table;
3152	struct rvu *rvu = s->private;
3153	int i;
3154
3155	table = rvu->hw->table;
3156
3157	seq_puts(s, "\n\tExact Table Info\n");
3158	seq_printf(s, "Exact Match Feature : %s\n",
3159		   rvu->hw->cap.npc_exact_match_enabled ? "enabled" : "disable");
3160	if (!rvu->hw->cap.npc_exact_match_enabled)
3161		return 0;
3162
3163	seq_puts(s, "\nMCAM Index\tMAC Filter Rules Count\n");
3164	for (i = 0; i < table->num_drop_rules; i++)
3165		seq_printf(s, "%d\t\t%d\n", i, table->cnt_cmd_rules[i]);
3166
3167	seq_puts(s, "\nMcam Index\tPromisc Mode Status\n");
3168	for (i = 0; i < table->num_drop_rules; i++)
3169		seq_printf(s, "%d\t\t%s\n", i, table->promisc_mode[i] ? "on" : "off");
3170
3171	seq_puts(s, "\n\tMEM Table Info\n");
3172	seq_printf(s, "Ways : %d\n", table->mem_table.ways);
3173	seq_printf(s, "Depth : %d\n", table->mem_table.depth);
3174	seq_printf(s, "Mask : 0x%llx\n", table->mem_table.mask);
3175	seq_printf(s, "Hash Mask : 0x%x\n", table->mem_table.hash_mask);
3176	seq_printf(s, "Hash Offset : 0x%x\n", table->mem_table.hash_offset);
3177
3178	seq_puts(s, "\n\tCAM Table Info\n");
3179	seq_printf(s, "Depth : %d\n", table->cam_table.depth);
3180
3181	return 0;
3182}
3183
3184RVU_DEBUG_SEQ_FOPS(npc_exact_info, npc_exact_show_info, NULL);
3185
3186static int rvu_dbg_npc_exact_drop_cnt(struct seq_file *s, void *unused)
3187{
3188	struct npc_exact_table *table;
3189	struct rvu *rvu = s->private;
3190	struct npc_key_field *field;
3191	u16 chan, pcifunc;
3192	int blkaddr, i;
3193	u64 cfg, cam1;
3194	char *str;
3195
3196	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
3197	table = rvu->hw->table;
3198
3199	field = &rvu->hw->mcam.rx_key_fields[NPC_CHAN];
3200
3201	seq_puts(s, "\n\t Exact Hit on drop status\n");
3202	seq_puts(s, "\npcifunc\tmcam_idx\tHits\tchan\tstatus\n");
3203
3204	for (i = 0; i < table->num_drop_rules; i++) {
3205		pcifunc = rvu_npc_exact_drop_rule_to_pcifunc(rvu, i);
3206		cfg = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CFG(i, 0));
3207
3208		/* channel will be always in keyword 0 */
3209		cam1 = rvu_read64(rvu, blkaddr,
3210				  NPC_AF_MCAMEX_BANKX_CAMX_W0(i, 0, 1));
3211		chan = field->kw_mask[0] & cam1;
3212
3213		str = (cfg & 1) ? "enabled" : "disabled";
3214
3215		seq_printf(s, "0x%x\t%d\t\t%llu\t0x%x\t%s\n", pcifunc, i,
3216			   rvu_read64(rvu, blkaddr,
3217				      NPC_AF_MATCH_STATX(table->counter_idx[i])),
3218			   chan, str);
3219	}
3220
3221	return 0;
3222}
3223
3224RVU_DEBUG_SEQ_FOPS(npc_exact_drop_cnt, npc_exact_drop_cnt, NULL);
3225
3226static void rvu_dbg_npc_init(struct rvu *rvu)
3227{
3228	rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
3229
3230	debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc, rvu,
3231			    &rvu_dbg_npc_mcam_info_fops);
3232	debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc, rvu,
3233			    &rvu_dbg_npc_mcam_rules_fops);
3234
3235	debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, rvu,
3236			    &rvu_dbg_npc_rx_miss_act_fops);
3237
3238	if (!rvu->hw->cap.npc_exact_match_enabled)
3239		return;
3240
3241	debugfs_create_file("exact_entries", 0444, rvu->rvu_dbg.npc, rvu,
3242			    &rvu_dbg_npc_exact_entries_fops);
3243
3244	debugfs_create_file("exact_info", 0444, rvu->rvu_dbg.npc, rvu,
3245			    &rvu_dbg_npc_exact_info_fops);
3246
3247	debugfs_create_file("exact_drop_cnt", 0444, rvu->rvu_dbg.npc, rvu,
3248			    &rvu_dbg_npc_exact_drop_cnt_fops);
3249
3250}
3251
3252static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type)
3253{
3254	struct cpt_ctx *ctx = filp->private;
3255	u64 busy_sts = 0, free_sts = 0;
3256	u32 e_min = 0, e_max = 0, e, i;
3257	u16 max_ses, max_ies, max_aes;
3258	struct rvu *rvu = ctx->rvu;
3259	int blkaddr = ctx->blkaddr;
3260	u64 reg;
3261
3262	reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
3263	max_ses = reg & 0xffff;
3264	max_ies = (reg >> 16) & 0xffff;
3265	max_aes = (reg >> 32) & 0xffff;
3266
3267	switch (eng_type) {
3268	case CPT_AE_TYPE:
3269		e_min = max_ses + max_ies;
3270		e_max = max_ses + max_ies + max_aes;
3271		break;
3272	case CPT_SE_TYPE:
3273		e_min = 0;
3274		e_max = max_ses;
3275		break;
3276	case CPT_IE_TYPE:
3277		e_min = max_ses;
3278		e_max = max_ses + max_ies;
3279		break;
3280	default:
3281		return -EINVAL;
3282	}
3283
3284	for (e = e_min, i = 0; e < e_max; e++, i++) {
3285		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e));
3286		if (reg & 0x1)
3287			busy_sts |= 1ULL << i;
3288
3289		if (reg & 0x2)
3290			free_sts |= 1ULL << i;
3291	}
3292	seq_printf(filp, "FREE STS : 0x%016llx\n", free_sts);
3293	seq_printf(filp, "BUSY STS : 0x%016llx\n", busy_sts);
3294
3295	return 0;
3296}
3297
3298static int rvu_dbg_cpt_ae_sts_display(struct seq_file *filp, void *unused)
3299{
3300	return cpt_eng_sts_display(filp, CPT_AE_TYPE);
3301}
3302
3303RVU_DEBUG_SEQ_FOPS(cpt_ae_sts, cpt_ae_sts_display, NULL);
3304
3305static int rvu_dbg_cpt_se_sts_display(struct seq_file *filp, void *unused)
3306{
3307	return cpt_eng_sts_display(filp, CPT_SE_TYPE);
3308}
3309
3310RVU_DEBUG_SEQ_FOPS(cpt_se_sts, cpt_se_sts_display, NULL);
3311
3312static int rvu_dbg_cpt_ie_sts_display(struct seq_file *filp, void *unused)
3313{
3314	return cpt_eng_sts_display(filp, CPT_IE_TYPE);
3315}
3316
3317RVU_DEBUG_SEQ_FOPS(cpt_ie_sts, cpt_ie_sts_display, NULL);
3318
3319static int rvu_dbg_cpt_engines_info_display(struct seq_file *filp, void *unused)
3320{
3321	struct cpt_ctx *ctx = filp->private;
3322	u16 max_ses, max_ies, max_aes;
3323	struct rvu *rvu = ctx->rvu;
3324	int blkaddr = ctx->blkaddr;
3325	u32 e_max, e;
3326	u64 reg;
3327
3328	reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
3329	max_ses = reg & 0xffff;
3330	max_ies = (reg >> 16) & 0xffff;
3331	max_aes = (reg >> 32) & 0xffff;
3332
3333	e_max = max_ses + max_ies + max_aes;
3334
3335	seq_puts(filp, "===========================================\n");
3336	for (e = 0; e < e_max; e++) {
3337		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(e));
3338		seq_printf(filp, "CPT Engine[%u] Group Enable   0x%02llx\n", e,
3339			   reg & 0xff);
3340		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_ACTIVE(e));
3341		seq_printf(filp, "CPT Engine[%u] Active Info    0x%llx\n", e,
3342			   reg);
3343		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(e));
3344		seq_printf(filp, "CPT Engine[%u] Control        0x%llx\n", e,
3345			   reg);
3346		seq_puts(filp, "===========================================\n");
3347	}
3348	return 0;
3349}
3350
3351RVU_DEBUG_SEQ_FOPS(cpt_engines_info, cpt_engines_info_display, NULL);
3352
3353static int rvu_dbg_cpt_lfs_info_display(struct seq_file *filp, void *unused)
3354{
3355	struct cpt_ctx *ctx = filp->private;
3356	int blkaddr = ctx->blkaddr;
3357	struct rvu *rvu = ctx->rvu;
3358	struct rvu_block *block;
3359	struct rvu_hwinfo *hw;
3360	u64 reg;
3361	u32 lf;
3362
3363	hw = rvu->hw;
3364	block = &hw->block[blkaddr];
3365	if (!block->lf.bmap)
3366		return -ENODEV;
3367
3368	seq_puts(filp, "===========================================\n");
3369	for (lf = 0; lf < block->lf.max; lf++) {
3370		reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(lf));
3371		seq_printf(filp, "CPT Lf[%u] CTL          0x%llx\n", lf, reg);
3372		reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(lf));
3373		seq_printf(filp, "CPT Lf[%u] CTL2         0x%llx\n", lf, reg);
3374		reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_PTR_CTL(lf));
3375		seq_printf(filp, "CPT Lf[%u] PTR_CTL      0x%llx\n", lf, reg);
3376		reg = rvu_read64(rvu, blkaddr, block->lfcfg_reg |
3377				(lf << block->lfshift));
3378		seq_printf(filp, "CPT Lf[%u] CFG          0x%llx\n", lf, reg);
3379		seq_puts(filp, "===========================================\n");
3380	}
3381	return 0;
3382}
3383
3384RVU_DEBUG_SEQ_FOPS(cpt_lfs_info, cpt_lfs_info_display, NULL);
3385
3386static int rvu_dbg_cpt_err_info_display(struct seq_file *filp, void *unused)
3387{
3388	struct cpt_ctx *ctx = filp->private;
3389	struct rvu *rvu = ctx->rvu;
3390	int blkaddr = ctx->blkaddr;
3391	u64 reg0, reg1;
3392
3393	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
3394	reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
3395	seq_printf(filp, "CPT_AF_FLTX_INT:       0x%llx 0x%llx\n", reg0, reg1);
3396	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(0));
3397	reg1 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(1));
3398	seq_printf(filp, "CPT_AF_PSNX_EXE:       0x%llx 0x%llx\n", reg0, reg1);
3399	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_LF(0));
3400	seq_printf(filp, "CPT_AF_PSNX_LF:        0x%llx\n", reg0);
3401	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
3402	seq_printf(filp, "CPT_AF_RVU_INT:        0x%llx\n", reg0);
3403	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
3404	seq_printf(filp, "CPT_AF_RAS_INT:        0x%llx\n", reg0);
3405	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO);
3406	seq_printf(filp, "CPT_AF_EXE_ERR_INFO:   0x%llx\n", reg0);
3407
3408	return 0;
3409}
3410
3411RVU_DEBUG_SEQ_FOPS(cpt_err_info, cpt_err_info_display, NULL);
3412
3413static int rvu_dbg_cpt_pc_display(struct seq_file *filp, void *unused)
3414{
3415	struct cpt_ctx *ctx = filp->private;
3416	struct rvu *rvu = ctx->rvu;
3417	int blkaddr = ctx->blkaddr;
3418	u64 reg;
3419
3420	reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC);
3421	seq_printf(filp, "CPT instruction requests   %llu\n", reg);
3422	reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC);
3423	seq_printf(filp, "CPT instruction latency    %llu\n", reg);
3424	reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC);
3425	seq_printf(filp, "CPT NCB read requests      %llu\n", reg);
3426	reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC);
3427	seq_printf(filp, "CPT NCB read latency       %llu\n", reg);
3428	reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC);
3429	seq_printf(filp, "CPT read requests caused by UC fills   %llu\n", reg);
3430	reg = rvu_read64(rvu, blkaddr, CPT_AF_ACTIVE_CYCLES_PC);
3431	seq_printf(filp, "CPT active cycles pc       %llu\n", reg);
3432	reg = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT);
3433	seq_printf(filp, "CPT clock count pc         %llu\n", reg);
3434
3435	return 0;
3436}
3437
3438RVU_DEBUG_SEQ_FOPS(cpt_pc, cpt_pc_display, NULL);
3439
3440static void rvu_dbg_cpt_init(struct rvu *rvu, int blkaddr)
3441{
3442	struct cpt_ctx *ctx;
3443
3444	if (!is_block_implemented(rvu->hw, blkaddr))
3445		return;
3446
3447	if (blkaddr == BLKADDR_CPT0) {
3448		rvu->rvu_dbg.cpt = debugfs_create_dir("cpt", rvu->rvu_dbg.root);
3449		ctx = &rvu->rvu_dbg.cpt_ctx[0];
3450		ctx->blkaddr = BLKADDR_CPT0;
3451		ctx->rvu = rvu;
3452	} else {
3453		rvu->rvu_dbg.cpt = debugfs_create_dir("cpt1",
3454						      rvu->rvu_dbg.root);
3455		ctx = &rvu->rvu_dbg.cpt_ctx[1];
3456		ctx->blkaddr = BLKADDR_CPT1;
3457		ctx->rvu = rvu;
3458	}
3459
3460	debugfs_create_file("cpt_pc", 0600, rvu->rvu_dbg.cpt, ctx,
3461			    &rvu_dbg_cpt_pc_fops);
3462	debugfs_create_file("cpt_ae_sts", 0600, rvu->rvu_dbg.cpt, ctx,
3463			    &rvu_dbg_cpt_ae_sts_fops);
3464	debugfs_create_file("cpt_se_sts", 0600, rvu->rvu_dbg.cpt, ctx,
3465			    &rvu_dbg_cpt_se_sts_fops);
3466	debugfs_create_file("cpt_ie_sts", 0600, rvu->rvu_dbg.cpt, ctx,
3467			    &rvu_dbg_cpt_ie_sts_fops);
3468	debugfs_create_file("cpt_engines_info", 0600, rvu->rvu_dbg.cpt, ctx,
3469			    &rvu_dbg_cpt_engines_info_fops);
3470	debugfs_create_file("cpt_lfs_info", 0600, rvu->rvu_dbg.cpt, ctx,
3471			    &rvu_dbg_cpt_lfs_info_fops);
3472	debugfs_create_file("cpt_err_info", 0600, rvu->rvu_dbg.cpt, ctx,
3473			    &rvu_dbg_cpt_err_info_fops);
3474}
3475
3476static const char *rvu_get_dbg_dir_name(struct rvu *rvu)
3477{
3478	if (!is_rvu_otx2(rvu))
3479		return "cn10k";
3480	else
3481		return "octeontx2";
3482}
3483
3484void rvu_dbg_init(struct rvu *rvu)
3485{
3486	rvu->rvu_dbg.root = debugfs_create_dir(rvu_get_dbg_dir_name(rvu), NULL);
3487
3488	debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
3489			    &rvu_dbg_rsrc_status_fops);
3490
3491	if (!is_rvu_otx2(rvu))
3492		debugfs_create_file("lmtst_map_table", 0444, rvu->rvu_dbg.root,
3493				    rvu, &rvu_dbg_lmtst_map_table_fops);
3494
3495	if (!cgx_get_cgxcnt_max())
3496		goto create;
3497
3498	if (is_rvu_otx2(rvu))
3499		debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root,
3500				    rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
3501	else
3502		debugfs_create_file("rvu_pf_rpm_map", 0444, rvu->rvu_dbg.root,
3503				    rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
3504
3505create:
3506	rvu_dbg_npa_init(rvu);
3507	rvu_dbg_nix_init(rvu, BLKADDR_NIX0);
3508
3509	rvu_dbg_nix_init(rvu, BLKADDR_NIX1);
3510	rvu_dbg_cgx_init(rvu);
3511	rvu_dbg_npc_init(rvu);
3512	rvu_dbg_cpt_init(rvu, BLKADDR_CPT0);
3513	rvu_dbg_cpt_init(rvu, BLKADDR_CPT1);
3514	rvu_dbg_mcs_init(rvu);
3515}
3516
3517void rvu_dbg_exit(struct rvu *rvu)
3518{
3519	debugfs_remove_recursive(rvu->rvu_dbg.root);
3520}
3521
3522#endif /* CONFIG_DEBUG_FS */
3523