1// SPDX-License-Identifier: GPL-2.0+
2/* Copyright (c) 2018-2019 Hisilicon Limited. */
3
4#include <linux/device.h>
5#include <linux/sched/clock.h>
6
7#include "hclge_debugfs.h"
8#include "hclge_err.h"
9#include "hclge_main.h"
10#include "hclge_regs.h"
11#include "hclge_tm.h"
12#include "hnae3.h"
13
14static const char * const state_str[] = { "off", "on" };
15static const char * const hclge_mac_state_str[] = {
16	"TO_ADD", "TO_DEL", "ACTIVE"
17};
18
19static const char * const tc_map_mode_str[] = { "PRIO", "DSCP" };
20
21static const struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
22	{ .cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
23	  .dfx_msg = &hclge_dbg_bios_common_reg[0],
24	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_bios_common_reg),
25		       .offset = HCLGE_DBG_DFX_BIOS_OFFSET,
26		       .cmd = HCLGE_OPC_DFX_BIOS_COMMON_REG } },
27	{ .cmd = HNAE3_DBG_CMD_REG_SSU,
28	  .dfx_msg = &hclge_dbg_ssu_reg_0[0],
29	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_0),
30		       .offset = HCLGE_DBG_DFX_SSU_0_OFFSET,
31		       .cmd = HCLGE_OPC_DFX_SSU_REG_0 } },
32	{ .cmd = HNAE3_DBG_CMD_REG_SSU,
33	  .dfx_msg = &hclge_dbg_ssu_reg_1[0],
34	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_1),
35		       .offset = HCLGE_DBG_DFX_SSU_1_OFFSET,
36		       .cmd = HCLGE_OPC_DFX_SSU_REG_1 } },
37	{ .cmd = HNAE3_DBG_CMD_REG_SSU,
38	  .dfx_msg = &hclge_dbg_ssu_reg_2[0],
39	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_2),
40		       .offset = HCLGE_DBG_DFX_SSU_2_OFFSET,
41		       .cmd = HCLGE_OPC_DFX_SSU_REG_2 } },
42	{ .cmd = HNAE3_DBG_CMD_REG_IGU_EGU,
43	  .dfx_msg = &hclge_dbg_igu_egu_reg[0],
44	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_igu_egu_reg),
45		       .offset = HCLGE_DBG_DFX_IGU_OFFSET,
46		       .cmd = HCLGE_OPC_DFX_IGU_EGU_REG } },
47	{ .cmd = HNAE3_DBG_CMD_REG_RPU,
48	  .dfx_msg = &hclge_dbg_rpu_reg_0[0],
49	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_0),
50		       .offset = HCLGE_DBG_DFX_RPU_0_OFFSET,
51		       .cmd = HCLGE_OPC_DFX_RPU_REG_0 } },
52	{ .cmd = HNAE3_DBG_CMD_REG_RPU,
53	  .dfx_msg = &hclge_dbg_rpu_reg_1[0],
54	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_1),
55		       .offset = HCLGE_DBG_DFX_RPU_1_OFFSET,
56		       .cmd = HCLGE_OPC_DFX_RPU_REG_1 } },
57	{ .cmd = HNAE3_DBG_CMD_REG_NCSI,
58	  .dfx_msg = &hclge_dbg_ncsi_reg[0],
59	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ncsi_reg),
60		       .offset = HCLGE_DBG_DFX_NCSI_OFFSET,
61		       .cmd = HCLGE_OPC_DFX_NCSI_REG } },
62	{ .cmd = HNAE3_DBG_CMD_REG_RTC,
63	  .dfx_msg = &hclge_dbg_rtc_reg[0],
64	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rtc_reg),
65		       .offset = HCLGE_DBG_DFX_RTC_OFFSET,
66		       .cmd = HCLGE_OPC_DFX_RTC_REG } },
67	{ .cmd = HNAE3_DBG_CMD_REG_PPP,
68	  .dfx_msg = &hclge_dbg_ppp_reg[0],
69	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ppp_reg),
70		       .offset = HCLGE_DBG_DFX_PPP_OFFSET,
71		       .cmd = HCLGE_OPC_DFX_PPP_REG } },
72	{ .cmd = HNAE3_DBG_CMD_REG_RCB,
73	  .dfx_msg = &hclge_dbg_rcb_reg[0],
74	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rcb_reg),
75		       .offset = HCLGE_DBG_DFX_RCB_OFFSET,
76		       .cmd = HCLGE_OPC_DFX_RCB_REG } },
77	{ .cmd = HNAE3_DBG_CMD_REG_TQP,
78	  .dfx_msg = &hclge_dbg_tqp_reg[0],
79	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_tqp_reg),
80		       .offset = HCLGE_DBG_DFX_TQP_OFFSET,
81		       .cmd = HCLGE_OPC_DFX_TQP_REG } },
82};
83
84/* make sure: len(name) + interval >= maxlen(item data) + 2,
85 * for example, name = "pkt_num"(len: 7), the prototype of item data is u32,
86 * and print as "%u"(maxlen: 10), so the interval should be at least 5.
87 */
88static void hclge_dbg_fill_content(char *content, u16 len,
89				   const struct hclge_dbg_item *items,
90				   const char **result, u16 size)
91{
92#define HCLGE_DBG_LINE_END_LEN	2
93	char *pos = content;
94	u16 item_len;
95	u16 i;
96
97	if (!len) {
98		return;
99	} else if (len <= HCLGE_DBG_LINE_END_LEN) {
100		*pos++ = '\0';
101		return;
102	}
103
104	memset(content, ' ', len);
105	len -= HCLGE_DBG_LINE_END_LEN;
106
107	for (i = 0; i < size; i++) {
108		item_len = strlen(items[i].name) + items[i].interval;
109		if (len < item_len)
110			break;
111
112		if (result) {
113			if (item_len < strlen(result[i]))
114				break;
115			memcpy(pos, result[i], strlen(result[i]));
116		} else {
117			memcpy(pos, items[i].name, strlen(items[i].name));
118		}
119		pos += item_len;
120		len -= item_len;
121	}
122	*pos++ = '\n';
123	*pos++ = '\0';
124}
125
126static char *hclge_dbg_get_func_id_str(char *buf, u8 id)
127{
128	if (id)
129		sprintf(buf, "vf%u", id - 1U);
130	else
131		sprintf(buf, "pf");
132
133	return buf;
134}
135
136static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset,
137				    u32 *bd_num)
138{
139	struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
140	int entries_per_desc;
141	int index;
142	int ret;
143
144	ret = hclge_query_bd_num_cmd_send(hdev, desc);
145	if (ret) {
146		dev_err(&hdev->pdev->dev,
147			"failed to get dfx bd_num, offset = %d, ret = %d\n",
148			offset, ret);
149		return ret;
150	}
151
152	entries_per_desc = ARRAY_SIZE(desc[0].data);
153	index = offset % entries_per_desc;
154
155	*bd_num = le32_to_cpu(desc[offset / entries_per_desc].data[index]);
156	if (!(*bd_num)) {
157		dev_err(&hdev->pdev->dev, "The value of dfx bd_num is 0!\n");
158		return -EINVAL;
159	}
160
161	return 0;
162}
163
164static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
165			      struct hclge_desc *desc_src,
166			      int index, int bd_num,
167			      enum hclge_opcode_type cmd)
168{
169	struct hclge_desc *desc = desc_src;
170	int ret, i;
171
172	hclge_cmd_setup_basic_desc(desc, cmd, true);
173	desc->data[0] = cpu_to_le32(index);
174
175	for (i = 1; i < bd_num; i++) {
176		desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
177		desc++;
178		hclge_cmd_setup_basic_desc(desc, cmd, true);
179	}
180
181	ret = hclge_cmd_send(&hdev->hw, desc_src, bd_num);
182	if (ret)
183		dev_err(&hdev->pdev->dev,
184			"cmd(0x%x) send fail, ret = %d\n", cmd, ret);
185	return ret;
186}
187
188static int
189hclge_dbg_dump_reg_tqp(struct hclge_dev *hdev,
190		       const struct hclge_dbg_reg_type_info *reg_info,
191		       char *buf, int len, int *pos)
192{
193	const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
194	const struct hclge_dbg_reg_common_msg *reg_msg = &reg_info->reg_msg;
195	struct hclge_desc *desc_src;
196	u32 index, entry, i, cnt;
197	int bd_num, min_num, ret;
198	struct hclge_desc *desc;
199
200	ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num);
201	if (ret)
202		return ret;
203
204	desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
205	if (!desc_src)
206		return -ENOMEM;
207
208	min_num = min_t(int, bd_num * HCLGE_DESC_DATA_LEN, reg_msg->msg_num);
209
210	for (i = 0, cnt = 0; i < min_num; i++, dfx_message++)
211		*pos += scnprintf(buf + *pos, len - *pos, "item%u = %s\n",
212				  cnt++, dfx_message->message);
213
214	for (i = 0; i < cnt; i++)
215		*pos += scnprintf(buf + *pos, len - *pos, "item%u\t", i);
216
217	*pos += scnprintf(buf + *pos, len - *pos, "\n");
218
219	for (index = 0; index < hdev->vport[0].alloc_tqps; index++) {
220		dfx_message = reg_info->dfx_msg;
221		desc = desc_src;
222		ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num,
223					 reg_msg->cmd);
224		if (ret)
225			break;
226
227		for (i = 0; i < min_num; i++, dfx_message++) {
228			entry = i % HCLGE_DESC_DATA_LEN;
229			if (i > 0 && !entry)
230				desc++;
231
232			*pos += scnprintf(buf + *pos, len - *pos, "%#x\t",
233					  le32_to_cpu(desc->data[entry]));
234		}
235		*pos += scnprintf(buf + *pos, len - *pos, "\n");
236	}
237
238	kfree(desc_src);
239	return ret;
240}
241
242static int
243hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
244			  const struct hclge_dbg_reg_type_info *reg_info,
245			  char *buf, int len, int *pos)
246{
247	const struct hclge_dbg_reg_common_msg *reg_msg = &reg_info->reg_msg;
248	const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
249	struct hclge_desc *desc_src;
250	int bd_num, min_num, ret;
251	struct hclge_desc *desc;
252	u32 entry, i;
253
254	ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num);
255	if (ret)
256		return ret;
257
258	desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
259	if (!desc_src)
260		return -ENOMEM;
261
262	desc = desc_src;
263
264	ret = hclge_dbg_cmd_send(hdev, desc, 0, bd_num, reg_msg->cmd);
265	if (ret) {
266		kfree(desc);
267		return ret;
268	}
269
270	min_num = min_t(int, bd_num * HCLGE_DESC_DATA_LEN, reg_msg->msg_num);
271
272	for (i = 0; i < min_num; i++, dfx_message++) {
273		entry = i % HCLGE_DESC_DATA_LEN;
274		if (i > 0 && !entry)
275			desc++;
276		if (!dfx_message->flag)
277			continue;
278
279		*pos += scnprintf(buf + *pos, len - *pos, "%s: %#x\n",
280				  dfx_message->message,
281				  le32_to_cpu(desc->data[entry]));
282	}
283
284	kfree(desc_src);
285	return 0;
286}
287
288static const struct hclge_dbg_status_dfx_info hclge_dbg_mac_en_status[] = {
289	{HCLGE_MAC_TX_EN_B,  "mac_trans_en"},
290	{HCLGE_MAC_RX_EN_B,  "mac_rcv_en"},
291	{HCLGE_MAC_PAD_TX_B, "pad_trans_en"},
292	{HCLGE_MAC_PAD_RX_B, "pad_rcv_en"},
293	{HCLGE_MAC_1588_TX_B, "1588_trans_en"},
294	{HCLGE_MAC_1588_RX_B, "1588_rcv_en"},
295	{HCLGE_MAC_APP_LP_B,  "mac_app_loop_en"},
296	{HCLGE_MAC_LINE_LP_B, "mac_line_loop_en"},
297	{HCLGE_MAC_FCS_TX_B,  "mac_fcs_tx_en"},
298	{HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, "mac_rx_oversize_truncate_en"},
299	{HCLGE_MAC_RX_FCS_STRIP_B, "mac_rx_fcs_strip_en"},
300	{HCLGE_MAC_RX_FCS_B, "mac_rx_fcs_en"},
301	{HCLGE_MAC_TX_UNDER_MIN_ERR_B, "mac_tx_under_min_err_en"},
302	{HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, "mac_tx_oversize_truncate_en"}
303};
304
305static int  hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev, char *buf,
306					     int len, int *pos)
307{
308	struct hclge_config_mac_mode_cmd *req;
309	struct hclge_desc desc;
310	u32 loop_en, i, offset;
311	int ret;
312
313	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
314
315	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
316	if (ret) {
317		dev_err(&hdev->pdev->dev,
318			"failed to dump mac enable status, ret = %d\n", ret);
319		return ret;
320	}
321
322	req = (struct hclge_config_mac_mode_cmd *)desc.data;
323	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
324
325	for (i = 0; i < ARRAY_SIZE(hclge_dbg_mac_en_status); i++) {
326		offset = hclge_dbg_mac_en_status[i].offset;
327		*pos += scnprintf(buf + *pos, len - *pos, "%s: %#x\n",
328				  hclge_dbg_mac_en_status[i].message,
329				  hnae3_get_bit(loop_en, offset));
330	}
331
332	return 0;
333}
334
335static int hclge_dbg_dump_mac_frame_size(struct hclge_dev *hdev, char *buf,
336					 int len, int *pos)
337{
338	struct hclge_config_max_frm_size_cmd *req;
339	struct hclge_desc desc;
340	int ret;
341
342	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, true);
343
344	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
345	if (ret) {
346		dev_err(&hdev->pdev->dev,
347			"failed to dump mac frame size, ret = %d\n", ret);
348		return ret;
349	}
350
351	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
352
353	*pos += scnprintf(buf + *pos, len - *pos, "max_frame_size: %u\n",
354			  le16_to_cpu(req->max_frm_size));
355	*pos += scnprintf(buf + *pos, len - *pos, "min_frame_size: %u\n",
356			  req->min_frm_size);
357
358	return 0;
359}
360
361static int hclge_dbg_dump_mac_speed_duplex(struct hclge_dev *hdev, char *buf,
362					   int len, int *pos)
363{
364#define HCLGE_MAC_SPEED_SHIFT	0
365#define HCLGE_MAC_SPEED_MASK	GENMASK(5, 0)
366#define HCLGE_MAC_DUPLEX_SHIFT	7
367
368	struct hclge_config_mac_speed_dup_cmd *req;
369	struct hclge_desc desc;
370	int ret;
371
372	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, true);
373
374	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
375	if (ret) {
376		dev_err(&hdev->pdev->dev,
377			"failed to dump mac speed duplex, ret = %d\n", ret);
378		return ret;
379	}
380
381	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
382
383	*pos += scnprintf(buf + *pos, len - *pos, "speed: %#lx\n",
384			  hnae3_get_field(req->speed_dup, HCLGE_MAC_SPEED_MASK,
385					  HCLGE_MAC_SPEED_SHIFT));
386	*pos += scnprintf(buf + *pos, len - *pos, "duplex: %#x\n",
387			  hnae3_get_bit(req->speed_dup,
388					HCLGE_MAC_DUPLEX_SHIFT));
389	return 0;
390}
391
392static int hclge_dbg_dump_mac(struct hclge_dev *hdev, char *buf, int len)
393{
394	int pos = 0;
395	int ret;
396
397	ret = hclge_dbg_dump_mac_enable_status(hdev, buf, len, &pos);
398	if (ret)
399		return ret;
400
401	ret = hclge_dbg_dump_mac_frame_size(hdev, buf, len, &pos);
402	if (ret)
403		return ret;
404
405	return hclge_dbg_dump_mac_speed_duplex(hdev, buf, len, &pos);
406}
407
408static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
409				   int *pos)
410{
411	struct hclge_dbg_bitmap_cmd req;
412	struct hclge_desc desc;
413	u16 qset_id, qset_num;
414	int ret;
415
416	ret = hclge_tm_get_qset_num(hdev, &qset_num);
417	if (ret)
418		return ret;
419
420	*pos += scnprintf(buf + *pos, len - *pos,
421			  "qset_id  roce_qset_mask  nic_qset_mask  qset_shaping_pass  qset_bp_status\n");
422	for (qset_id = 0; qset_id < qset_num; qset_id++) {
423		ret = hclge_dbg_cmd_send(hdev, &desc, qset_id, 1,
424					 HCLGE_OPC_QSET_DFX_STS);
425		if (ret)
426			return ret;
427
428		req.bitmap = (u8)le32_to_cpu(desc.data[1]);
429
430		*pos += scnprintf(buf + *pos, len - *pos,
431				  "%04u           %#x            %#x             %#x               %#x\n",
432				  qset_id, req.bit0, req.bit1, req.bit2,
433				  req.bit3);
434	}
435
436	return 0;
437}
438
439static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
440				  int *pos)
441{
442	struct hclge_dbg_bitmap_cmd req;
443	struct hclge_desc desc;
444	u8 pri_id, pri_num;
445	int ret;
446
447	ret = hclge_tm_get_pri_num(hdev, &pri_num);
448	if (ret)
449		return ret;
450
451	*pos += scnprintf(buf + *pos, len - *pos,
452			  "pri_id  pri_mask  pri_cshaping_pass  pri_pshaping_pass\n");
453	for (pri_id = 0; pri_id < pri_num; pri_id++) {
454		ret = hclge_dbg_cmd_send(hdev, &desc, pri_id, 1,
455					 HCLGE_OPC_PRI_DFX_STS);
456		if (ret)
457			return ret;
458
459		req.bitmap = (u8)le32_to_cpu(desc.data[1]);
460
461		*pos += scnprintf(buf + *pos, len - *pos,
462				  "%03u       %#x           %#x                %#x\n",
463				  pri_id, req.bit0, req.bit1, req.bit2);
464	}
465
466	return 0;
467}
468
469static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len,
470				 int *pos)
471{
472	struct hclge_dbg_bitmap_cmd req;
473	struct hclge_desc desc;
474	u8 pg_id;
475	int ret;
476
477	*pos += scnprintf(buf + *pos, len - *pos,
478			  "pg_id  pg_mask  pg_cshaping_pass  pg_pshaping_pass\n");
479	for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) {
480		ret = hclge_dbg_cmd_send(hdev, &desc, pg_id, 1,
481					 HCLGE_OPC_PG_DFX_STS);
482		if (ret)
483			return ret;
484
485		req.bitmap = (u8)le32_to_cpu(desc.data[1]);
486
487		*pos += scnprintf(buf + *pos, len - *pos,
488				  "%03u      %#x           %#x               %#x\n",
489				  pg_id, req.bit0, req.bit1, req.bit2);
490	}
491
492	return 0;
493}
494
495static int hclge_dbg_dump_dcb_queue(struct hclge_dev *hdev, char *buf, int len,
496				    int *pos)
497{
498	struct hclge_desc desc;
499	u16 nq_id;
500	int ret;
501
502	*pos += scnprintf(buf + *pos, len - *pos,
503			  "nq_id  sch_nic_queue_cnt  sch_roce_queue_cnt\n");
504	for (nq_id = 0; nq_id < hdev->num_tqps; nq_id++) {
505		ret = hclge_dbg_cmd_send(hdev, &desc, nq_id, 1,
506					 HCLGE_OPC_SCH_NQ_CNT);
507		if (ret)
508			return ret;
509
510		*pos += scnprintf(buf + *pos, len - *pos, "%04u           %#x",
511				  nq_id, le32_to_cpu(desc.data[1]));
512
513		ret = hclge_dbg_cmd_send(hdev, &desc, nq_id, 1,
514					 HCLGE_OPC_SCH_RQ_CNT);
515		if (ret)
516			return ret;
517
518		*pos += scnprintf(buf + *pos, len - *pos,
519				  "               %#x\n",
520				  le32_to_cpu(desc.data[1]));
521	}
522
523	return 0;
524}
525
526static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len,
527				   int *pos)
528{
529	struct hclge_dbg_bitmap_cmd req;
530	struct hclge_desc desc;
531	u8 port_id = 0;
532	int ret;
533
534	ret = hclge_dbg_cmd_send(hdev, &desc, port_id, 1,
535				 HCLGE_OPC_PORT_DFX_STS);
536	if (ret)
537		return ret;
538
539	req.bitmap = (u8)le32_to_cpu(desc.data[1]);
540
541	*pos += scnprintf(buf + *pos, len - *pos, "port_mask: %#x\n",
542			 req.bit0);
543	*pos += scnprintf(buf + *pos, len - *pos, "port_shaping_pass: %#x\n",
544			 req.bit1);
545
546	return 0;
547}
548
549static int hclge_dbg_dump_dcb_tm(struct hclge_dev *hdev, char *buf, int len,
550				 int *pos)
551{
552	struct hclge_desc desc[2];
553	u8 port_id = 0;
554	int ret;
555
556	ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
557				 HCLGE_OPC_TM_INTERNAL_CNT);
558	if (ret)
559		return ret;
560
561	*pos += scnprintf(buf + *pos, len - *pos, "SCH_NIC_NUM: %#x\n",
562			  le32_to_cpu(desc[0].data[1]));
563	*pos += scnprintf(buf + *pos, len - *pos, "SCH_ROCE_NUM: %#x\n",
564			  le32_to_cpu(desc[0].data[2]));
565
566	ret = hclge_dbg_cmd_send(hdev, desc, port_id, 2,
567				 HCLGE_OPC_TM_INTERNAL_STS);
568	if (ret)
569		return ret;
570
571	*pos += scnprintf(buf + *pos, len - *pos, "pri_bp: %#x\n",
572			  le32_to_cpu(desc[0].data[1]));
573	*pos += scnprintf(buf + *pos, len - *pos, "fifo_dfx_info: %#x\n",
574			  le32_to_cpu(desc[0].data[2]));
575	*pos += scnprintf(buf + *pos, len - *pos,
576			  "sch_roce_fifo_afull_gap: %#x\n",
577			  le32_to_cpu(desc[0].data[3]));
578	*pos += scnprintf(buf + *pos, len - *pos,
579			  "tx_private_waterline: %#x\n",
580			  le32_to_cpu(desc[0].data[4]));
581	*pos += scnprintf(buf + *pos, len - *pos, "tm_bypass_en: %#x\n",
582			  le32_to_cpu(desc[0].data[5]));
583	*pos += scnprintf(buf + *pos, len - *pos, "SSU_TM_BYPASS_EN: %#x\n",
584			  le32_to_cpu(desc[1].data[0]));
585	*pos += scnprintf(buf + *pos, len - *pos, "SSU_RESERVE_CFG: %#x\n",
586			  le32_to_cpu(desc[1].data[1]));
587
588	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER)
589		return 0;
590
591	ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
592				 HCLGE_OPC_TM_INTERNAL_STS_1);
593	if (ret)
594		return ret;
595
596	*pos += scnprintf(buf + *pos, len - *pos, "TC_MAP_SEL: %#x\n",
597			  le32_to_cpu(desc[0].data[1]));
598	*pos += scnprintf(buf + *pos, len - *pos, "IGU_PFC_PRI_EN: %#x\n",
599			  le32_to_cpu(desc[0].data[2]));
600	*pos += scnprintf(buf + *pos, len - *pos, "MAC_PFC_PRI_EN: %#x\n",
601			  le32_to_cpu(desc[0].data[3]));
602	*pos += scnprintf(buf + *pos, len - *pos, "IGU_PRI_MAP_TC_CFG: %#x\n",
603			  le32_to_cpu(desc[0].data[4]));
604	*pos += scnprintf(buf + *pos, len - *pos,
605			  "IGU_TX_PRI_MAP_TC_CFG: %#x\n",
606			  le32_to_cpu(desc[0].data[5]));
607
608	return 0;
609}
610
611static int hclge_dbg_dump_dcb(struct hclge_dev *hdev, char *buf, int len)
612{
613	int pos = 0;
614	int ret;
615
616	ret = hclge_dbg_dump_dcb_qset(hdev, buf, len, &pos);
617	if (ret)
618		return ret;
619
620	ret = hclge_dbg_dump_dcb_pri(hdev, buf, len, &pos);
621	if (ret)
622		return ret;
623
624	ret = hclge_dbg_dump_dcb_pg(hdev, buf, len, &pos);
625	if (ret)
626		return ret;
627
628	ret = hclge_dbg_dump_dcb_queue(hdev, buf, len, &pos);
629	if (ret)
630		return ret;
631
632	ret = hclge_dbg_dump_dcb_port(hdev, buf, len, &pos);
633	if (ret)
634		return ret;
635
636	return hclge_dbg_dump_dcb_tm(hdev, buf, len, &pos);
637}
638
639static int hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev,
640				  enum hnae3_dbg_cmd cmd, char *buf, int len)
641{
642	const struct hclge_dbg_reg_type_info *reg_info;
643	int pos = 0, ret = 0;
644	int i;
645
646	for (i = 0; i < ARRAY_SIZE(hclge_dbg_reg_info); i++) {
647		reg_info = &hclge_dbg_reg_info[i];
648		if (cmd == reg_info->cmd) {
649			if (cmd == HNAE3_DBG_CMD_REG_TQP)
650				return hclge_dbg_dump_reg_tqp(hdev, reg_info,
651							      buf, len, &pos);
652
653			ret = hclge_dbg_dump_reg_common(hdev, reg_info, buf,
654							len, &pos);
655			if (ret)
656				break;
657		}
658	}
659
660	return ret;
661}
662
663static int hclge_dbg_dump_tc(struct hclge_dev *hdev, char *buf, int len)
664{
665	struct hclge_ets_tc_weight_cmd *ets_weight;
666	struct hclge_desc desc;
667	char *sch_mode_str;
668	int pos = 0;
669	int ret;
670	u8 i;
671
672	if (!hnae3_dev_dcb_supported(hdev)) {
673		dev_err(&hdev->pdev->dev,
674			"Only DCB-supported dev supports tc\n");
675		return -EOPNOTSUPP;
676	}
677
678	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, true);
679	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
680	if (ret) {
681		dev_err(&hdev->pdev->dev, "failed to get tc weight, ret = %d\n",
682			ret);
683		return ret;
684	}
685
686	ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
687
688	pos += scnprintf(buf + pos, len - pos, "enabled tc number: %u\n",
689			 hdev->tm_info.num_tc);
690	pos += scnprintf(buf + pos, len - pos, "weight_offset: %u\n",
691			 ets_weight->weight_offset);
692
693	pos += scnprintf(buf + pos, len - pos, "TC    MODE  WEIGHT\n");
694	for (i = 0; i < HNAE3_MAX_TC; i++) {
695		sch_mode_str = ets_weight->tc_weight[i] ? "dwrr" : "sp";
696		pos += scnprintf(buf + pos, len - pos, "%u     %4s    %3u\n",
697				 i, sch_mode_str, ets_weight->tc_weight[i]);
698	}
699
700	return 0;
701}
702
703static const struct hclge_dbg_item tm_pg_items[] = {
704	{ "ID", 2 },
705	{ "PRI_MAP", 2 },
706	{ "MODE", 2 },
707	{ "DWRR", 2 },
708	{ "C_IR_B", 2 },
709	{ "C_IR_U", 2 },
710	{ "C_IR_S", 2 },
711	{ "C_BS_B", 2 },
712	{ "C_BS_S", 2 },
713	{ "C_FLAG", 2 },
714	{ "C_RATE(Mbps)", 2 },
715	{ "P_IR_B", 2 },
716	{ "P_IR_U", 2 },
717	{ "P_IR_S", 2 },
718	{ "P_BS_B", 2 },
719	{ "P_BS_S", 2 },
720	{ "P_FLAG", 2 },
721	{ "P_RATE(Mbps)", 0 }
722};
723
724static void hclge_dbg_fill_shaper_content(struct hclge_tm_shaper_para *para,
725					  char **result, u8 *index)
726{
727	sprintf(result[(*index)++], "%3u", para->ir_b);
728	sprintf(result[(*index)++], "%3u", para->ir_u);
729	sprintf(result[(*index)++], "%3u", para->ir_s);
730	sprintf(result[(*index)++], "%3u", para->bs_b);
731	sprintf(result[(*index)++], "%3u", para->bs_s);
732	sprintf(result[(*index)++], "%3u", para->flag);
733	sprintf(result[(*index)++], "%6u", para->rate);
734}
735
736static int __hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *data_str,
737				  char *buf, int len)
738{
739	struct hclge_tm_shaper_para c_shaper_para, p_shaper_para;
740	char *result[ARRAY_SIZE(tm_pg_items)], *sch_mode_str;
741	u8 pg_id, sch_mode, weight, pri_bit_map, i, j;
742	char content[HCLGE_DBG_TM_INFO_LEN];
743	int pos = 0;
744	int ret;
745
746	for (i = 0; i < ARRAY_SIZE(tm_pg_items); i++) {
747		result[i] = data_str;
748		data_str += HCLGE_DBG_DATA_STR_LEN;
749	}
750
751	hclge_dbg_fill_content(content, sizeof(content), tm_pg_items,
752			       NULL, ARRAY_SIZE(tm_pg_items));
753	pos += scnprintf(buf + pos, len - pos, "%s", content);
754
755	for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) {
756		ret = hclge_tm_get_pg_to_pri_map(hdev, pg_id, &pri_bit_map);
757		if (ret)
758			return ret;
759
760		ret = hclge_tm_get_pg_sch_mode(hdev, pg_id, &sch_mode);
761		if (ret)
762			return ret;
763
764		ret = hclge_tm_get_pg_weight(hdev, pg_id, &weight);
765		if (ret)
766			return ret;
767
768		ret = hclge_tm_get_pg_shaper(hdev, pg_id,
769					     HCLGE_OPC_TM_PG_C_SHAPPING,
770					     &c_shaper_para);
771		if (ret)
772			return ret;
773
774		ret = hclge_tm_get_pg_shaper(hdev, pg_id,
775					     HCLGE_OPC_TM_PG_P_SHAPPING,
776					     &p_shaper_para);
777		if (ret)
778			return ret;
779
780		sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
781				       "sp";
782
783		j = 0;
784		sprintf(result[j++], "%02u", pg_id);
785		sprintf(result[j++], "0x%02x", pri_bit_map);
786		sprintf(result[j++], "%4s", sch_mode_str);
787		sprintf(result[j++], "%3u", weight);
788		hclge_dbg_fill_shaper_content(&c_shaper_para, result, &j);
789		hclge_dbg_fill_shaper_content(&p_shaper_para, result, &j);
790
791		hclge_dbg_fill_content(content, sizeof(content), tm_pg_items,
792				       (const char **)result,
793				       ARRAY_SIZE(tm_pg_items));
794		pos += scnprintf(buf + pos, len - pos, "%s", content);
795	}
796
797	return 0;
798}
799
800static int hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *buf, int len)
801{
802	char *data_str;
803	int ret;
804
805	data_str = kcalloc(ARRAY_SIZE(tm_pg_items),
806			   HCLGE_DBG_DATA_STR_LEN, GFP_KERNEL);
807	if (!data_str)
808		return -ENOMEM;
809
810	ret = __hclge_dbg_dump_tm_pg(hdev, data_str, buf, len);
811
812	kfree(data_str);
813
814	return ret;
815}
816
817static int hclge_dbg_dump_tm_port(struct hclge_dev *hdev,  char *buf, int len)
818{
819	struct hclge_tm_shaper_para shaper_para;
820	int pos = 0;
821	int ret;
822
823	ret = hclge_tm_get_port_shaper(hdev, &shaper_para);
824	if (ret)
825		return ret;
826
827	pos += scnprintf(buf + pos, len - pos,
828			 "IR_B  IR_U  IR_S  BS_B  BS_S  FLAG  RATE(Mbps)\n");
829	pos += scnprintf(buf + pos, len - pos,
830			 "%3u   %3u   %3u   %3u   %3u     %1u   %6u\n",
831			 shaper_para.ir_b, shaper_para.ir_u, shaper_para.ir_s,
832			 shaper_para.bs_b, shaper_para.bs_s, shaper_para.flag,
833			 shaper_para.rate);
834
835	return 0;
836}
837
838static int hclge_dbg_dump_tm_bp_qset_map(struct hclge_dev *hdev, u8 tc_id,
839					 char *buf, int len)
840{
841	u32 qset_mapping[HCLGE_BP_EXT_GRP_NUM];
842	struct hclge_bp_to_qs_map_cmd *map;
843	struct hclge_desc desc;
844	int pos = 0;
845	u8 group_id;
846	u8 grp_num;
847	u16 i = 0;
848	int ret;
849
850	grp_num = hdev->num_tqps <= HCLGE_TQP_MAX_SIZE_DEV_V2 ?
851		  HCLGE_BP_GRP_NUM : HCLGE_BP_EXT_GRP_NUM;
852	map = (struct hclge_bp_to_qs_map_cmd *)desc.data;
853	for (group_id = 0; group_id < grp_num; group_id++) {
854		hclge_cmd_setup_basic_desc(&desc,
855					   HCLGE_OPC_TM_BP_TO_QSET_MAPPING,
856					   true);
857		map->tc_id = tc_id;
858		map->qs_group_id = group_id;
859		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
860		if (ret) {
861			dev_err(&hdev->pdev->dev,
862				"failed to get bp to qset map, ret = %d\n",
863				ret);
864			return ret;
865		}
866
867		qset_mapping[group_id] = le32_to_cpu(map->qs_bit_map);
868	}
869
870	pos += scnprintf(buf + pos, len - pos, "INDEX | TM BP QSET MAPPING:\n");
871	for (group_id = 0; group_id < grp_num / 8; group_id++) {
872		pos += scnprintf(buf + pos, len - pos,
873			 "%04d  | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n",
874			 group_id * 256, qset_mapping[i + 7],
875			 qset_mapping[i + 6], qset_mapping[i + 5],
876			 qset_mapping[i + 4], qset_mapping[i + 3],
877			 qset_mapping[i + 2], qset_mapping[i + 1],
878			 qset_mapping[i]);
879		i += 8;
880	}
881
882	return pos;
883}
884
885static int hclge_dbg_dump_tm_map(struct hclge_dev *hdev, char *buf, int len)
886{
887	u16 queue_id;
888	u16 qset_id;
889	u8 link_vld;
890	int pos = 0;
891	u8 pri_id;
892	u8 tc_id;
893	int ret;
894
895	for (queue_id = 0; queue_id < hdev->num_tqps; queue_id++) {
896		ret = hclge_tm_get_q_to_qs_map(hdev, queue_id, &qset_id);
897		if (ret)
898			return ret;
899
900		ret = hclge_tm_get_qset_map_pri(hdev, qset_id, &pri_id,
901						&link_vld);
902		if (ret)
903			return ret;
904
905		ret = hclge_tm_get_q_to_tc(hdev, queue_id, &tc_id);
906		if (ret)
907			return ret;
908
909		pos += scnprintf(buf + pos, len - pos,
910				 "QUEUE_ID   QSET_ID   PRI_ID   TC_ID\n");
911		pos += scnprintf(buf + pos, len - pos,
912				 "%04u        %4u       %3u      %2u\n",
913				 queue_id, qset_id, pri_id, tc_id);
914
915		if (!hnae3_dev_dcb_supported(hdev))
916			continue;
917
918		ret = hclge_dbg_dump_tm_bp_qset_map(hdev, tc_id, buf + pos,
919						    len - pos);
920		if (ret < 0)
921			return ret;
922		pos += ret;
923
924		pos += scnprintf(buf + pos, len - pos, "\n");
925	}
926
927	return 0;
928}
929
930static int hclge_dbg_dump_tm_nodes(struct hclge_dev *hdev, char *buf, int len)
931{
932	struct hclge_tm_nodes_cmd *nodes;
933	struct hclge_desc desc;
934	int pos = 0;
935	int ret;
936
937	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true);
938	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
939	if (ret) {
940		dev_err(&hdev->pdev->dev,
941			"failed to dump tm nodes, ret = %d\n", ret);
942		return ret;
943	}
944
945	nodes = (struct hclge_tm_nodes_cmd *)desc.data;
946
947	pos += scnprintf(buf + pos, len - pos, "       BASE_ID  MAX_NUM\n");
948	pos += scnprintf(buf + pos, len - pos, "PG      %4u      %4u\n",
949			 nodes->pg_base_id, nodes->pg_num);
950	pos += scnprintf(buf + pos, len - pos, "PRI     %4u      %4u\n",
951			 nodes->pri_base_id, nodes->pri_num);
952	pos += scnprintf(buf + pos, len - pos, "QSET    %4u      %4u\n",
953			 le16_to_cpu(nodes->qset_base_id),
954			 le16_to_cpu(nodes->qset_num));
955	pos += scnprintf(buf + pos, len - pos, "QUEUE   %4u      %4u\n",
956			 le16_to_cpu(nodes->queue_base_id),
957			 le16_to_cpu(nodes->queue_num));
958
959	return 0;
960}
961
962static const struct hclge_dbg_item tm_pri_items[] = {
963	{ "ID", 4 },
964	{ "MODE", 2 },
965	{ "DWRR", 2 },
966	{ "C_IR_B", 2 },
967	{ "C_IR_U", 2 },
968	{ "C_IR_S", 2 },
969	{ "C_BS_B", 2 },
970	{ "C_BS_S", 2 },
971	{ "C_FLAG", 2 },
972	{ "C_RATE(Mbps)", 2 },
973	{ "P_IR_B", 2 },
974	{ "P_IR_U", 2 },
975	{ "P_IR_S", 2 },
976	{ "P_BS_B", 2 },
977	{ "P_BS_S", 2 },
978	{ "P_FLAG", 2 },
979	{ "P_RATE(Mbps)", 0 }
980};
981
982static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len)
983{
984	struct hclge_tm_shaper_para c_shaper_para, p_shaper_para;
985	char *result[ARRAY_SIZE(tm_pri_items)], *sch_mode_str;
986	char content[HCLGE_DBG_TM_INFO_LEN];
987	u8 pri_num, sch_mode, weight, i, j;
988	char *data_str;
989	int pos, ret;
990
991	ret = hclge_tm_get_pri_num(hdev, &pri_num);
992	if (ret)
993		return ret;
994
995	data_str = kcalloc(ARRAY_SIZE(tm_pri_items), HCLGE_DBG_DATA_STR_LEN,
996			   GFP_KERNEL);
997	if (!data_str)
998		return -ENOMEM;
999
1000	for (i = 0; i < ARRAY_SIZE(tm_pri_items); i++)
1001		result[i] = &data_str[i * HCLGE_DBG_DATA_STR_LEN];
1002
1003	hclge_dbg_fill_content(content, sizeof(content), tm_pri_items,
1004			       NULL, ARRAY_SIZE(tm_pri_items));
1005	pos = scnprintf(buf, len, "%s", content);
1006
1007	for (i = 0; i < pri_num; i++) {
1008		ret = hclge_tm_get_pri_sch_mode(hdev, i, &sch_mode);
1009		if (ret)
1010			goto out;
1011
1012		ret = hclge_tm_get_pri_weight(hdev, i, &weight);
1013		if (ret)
1014			goto out;
1015
1016		ret = hclge_tm_get_pri_shaper(hdev, i,
1017					      HCLGE_OPC_TM_PRI_C_SHAPPING,
1018					      &c_shaper_para);
1019		if (ret)
1020			goto out;
1021
1022		ret = hclge_tm_get_pri_shaper(hdev, i,
1023					      HCLGE_OPC_TM_PRI_P_SHAPPING,
1024					      &p_shaper_para);
1025		if (ret)
1026			goto out;
1027
1028		sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
1029			       "sp";
1030
1031		j = 0;
1032		sprintf(result[j++], "%04u", i);
1033		sprintf(result[j++], "%4s", sch_mode_str);
1034		sprintf(result[j++], "%3u", weight);
1035		hclge_dbg_fill_shaper_content(&c_shaper_para, result, &j);
1036		hclge_dbg_fill_shaper_content(&p_shaper_para, result, &j);
1037		hclge_dbg_fill_content(content, sizeof(content), tm_pri_items,
1038				       (const char **)result,
1039				       ARRAY_SIZE(tm_pri_items));
1040		pos += scnprintf(buf + pos, len - pos, "%s", content);
1041	}
1042
1043out:
1044	kfree(data_str);
1045	return ret;
1046}
1047
1048static const struct hclge_dbg_item tm_qset_items[] = {
1049	{ "ID", 4 },
1050	{ "MAP_PRI", 2 },
1051	{ "LINK_VLD", 2 },
1052	{ "MODE", 2 },
1053	{ "DWRR", 2 },
1054	{ "IR_B", 2 },
1055	{ "IR_U", 2 },
1056	{ "IR_S", 2 },
1057	{ "BS_B", 2 },
1058	{ "BS_S", 2 },
1059	{ "FLAG", 2 },
1060	{ "RATE(Mbps)", 0 }
1061};
1062
1063static int hclge_dbg_dump_tm_qset(struct hclge_dev *hdev, char *buf, int len)
1064{
1065	char data_str[ARRAY_SIZE(tm_qset_items)][HCLGE_DBG_DATA_STR_LEN];
1066	char *result[ARRAY_SIZE(tm_qset_items)], *sch_mode_str;
1067	u8 priority, link_vld, sch_mode, weight;
1068	struct hclge_tm_shaper_para shaper_para;
1069	char content[HCLGE_DBG_TM_INFO_LEN];
1070	u16 qset_num, i;
1071	int ret, pos;
1072	u8 j;
1073
1074	ret = hclge_tm_get_qset_num(hdev, &qset_num);
1075	if (ret)
1076		return ret;
1077
1078	for (i = 0; i < ARRAY_SIZE(tm_qset_items); i++)
1079		result[i] = &data_str[i][0];
1080
1081	hclge_dbg_fill_content(content, sizeof(content), tm_qset_items,
1082			       NULL, ARRAY_SIZE(tm_qset_items));
1083	pos = scnprintf(buf, len, "%s", content);
1084
1085	for (i = 0; i < qset_num; i++) {
1086		ret = hclge_tm_get_qset_map_pri(hdev, i, &priority, &link_vld);
1087		if (ret)
1088			return ret;
1089
1090		ret = hclge_tm_get_qset_sch_mode(hdev, i, &sch_mode);
1091		if (ret)
1092			return ret;
1093
1094		ret = hclge_tm_get_qset_weight(hdev, i, &weight);
1095		if (ret)
1096			return ret;
1097
1098		ret = hclge_tm_get_qset_shaper(hdev, i, &shaper_para);
1099		if (ret)
1100			return ret;
1101
1102		sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
1103			       "sp";
1104
1105		j = 0;
1106		sprintf(result[j++], "%04u", i);
1107		sprintf(result[j++], "%4u", priority);
1108		sprintf(result[j++], "%4u", link_vld);
1109		sprintf(result[j++], "%4s", sch_mode_str);
1110		sprintf(result[j++], "%3u", weight);
1111		hclge_dbg_fill_shaper_content(&shaper_para, result, &j);
1112
1113		hclge_dbg_fill_content(content, sizeof(content), tm_qset_items,
1114				       (const char **)result,
1115				       ARRAY_SIZE(tm_qset_items));
1116		pos += scnprintf(buf + pos, len - pos, "%s", content);
1117	}
1118
1119	return 0;
1120}
1121
1122static int hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev, char *buf,
1123					int len)
1124{
1125	struct hclge_cfg_pause_param_cmd *pause_param;
1126	struct hclge_desc desc;
1127	int pos = 0;
1128	int ret;
1129
1130	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
1131	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1132	if (ret) {
1133		dev_err(&hdev->pdev->dev,
1134			"failed to dump qos pause, ret = %d\n", ret);
1135		return ret;
1136	}
1137
1138	pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
1139
1140	pos += scnprintf(buf + pos, len - pos, "pause_trans_gap: 0x%x\n",
1141			 pause_param->pause_trans_gap);
1142	pos += scnprintf(buf + pos, len - pos, "pause_trans_time: 0x%x\n",
1143			 le16_to_cpu(pause_param->pause_trans_time));
1144	return 0;
1145}
1146
1147#define HCLGE_DBG_TC_MASK		0x0F
1148
1149static int hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev, char *buf,
1150				      int len)
1151{
1152#define HCLGE_DBG_TC_BIT_WIDTH		4
1153
1154	struct hclge_qos_pri_map_cmd *pri_map;
1155	struct hclge_desc desc;
1156	int pos = 0;
1157	u8 *pri_tc;
1158	u8 tc, i;
1159	int ret;
1160
1161	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, true);
1162	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1163	if (ret) {
1164		dev_err(&hdev->pdev->dev,
1165			"failed to dump qos pri map, ret = %d\n", ret);
1166		return ret;
1167	}
1168
1169	pri_map = (struct hclge_qos_pri_map_cmd *)desc.data;
1170
1171	pos += scnprintf(buf + pos, len - pos, "vlan_to_pri: 0x%x\n",
1172			 pri_map->vlan_pri);
1173	pos += scnprintf(buf + pos, len - pos, "PRI  TC\n");
1174
1175	pri_tc = (u8 *)pri_map;
1176	for (i = 0; i < HNAE3_MAX_TC; i++) {
1177		tc = pri_tc[i >> 1] >> ((i & 1) * HCLGE_DBG_TC_BIT_WIDTH);
1178		tc &= HCLGE_DBG_TC_MASK;
1179		pos += scnprintf(buf + pos, len - pos, "%u     %u\n", i, tc);
1180	}
1181
1182	return 0;
1183}
1184
1185static int hclge_dbg_dump_qos_dscp_map(struct hclge_dev *hdev, char *buf,
1186				       int len)
1187{
1188	struct hnae3_knic_private_info *kinfo = &hdev->vport[0].nic.kinfo;
1189	struct hclge_desc desc[HCLGE_DSCP_MAP_TC_BD_NUM];
1190	u8 *req0 = (u8 *)desc[0].data;
1191	u8 *req1 = (u8 *)desc[1].data;
1192	u8 dscp_tc[HNAE3_MAX_DSCP];
1193	int pos, ret;
1194	u8 i, j;
1195
1196	pos = scnprintf(buf, len, "tc map mode: %s\n",
1197			tc_map_mode_str[kinfo->tc_map_mode]);
1198
1199	if (kinfo->tc_map_mode != HNAE3_TC_MAP_MODE_DSCP)
1200		return 0;
1201
1202	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QOS_MAP, true);
1203	desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1204	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_QOS_MAP, true);
1205	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_DSCP_MAP_TC_BD_NUM);
1206	if (ret) {
1207		dev_err(&hdev->pdev->dev,
1208			"failed to dump qos dscp map, ret = %d\n", ret);
1209		return ret;
1210	}
1211
1212	pos += scnprintf(buf + pos, len - pos, "\nDSCP  PRIO  TC\n");
1213
1214	/* The low 32 dscp setting use bd0, high 32 dscp setting use bd1 */
1215	for (i = 0; i < HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; i++) {
1216		j = i + HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM;
1217		/* Each dscp setting has 4 bits, so each byte saves two dscp
1218		 * setting
1219		 */
1220		dscp_tc[i] = req0[i >> 1] >> HCLGE_DSCP_TC_SHIFT(i);
1221		dscp_tc[j] = req1[i >> 1] >> HCLGE_DSCP_TC_SHIFT(i);
1222		dscp_tc[i] &= HCLGE_DBG_TC_MASK;
1223		dscp_tc[j] &= HCLGE_DBG_TC_MASK;
1224	}
1225
1226	for (i = 0; i < HNAE3_MAX_DSCP; i++) {
1227		if (kinfo->dscp_prio[i] == HNAE3_PRIO_ID_INVALID)
1228			continue;
1229
1230		pos += scnprintf(buf + pos, len - pos, " %2u    %u    %u\n",
1231				 i, kinfo->dscp_prio[i], dscp_tc[i]);
1232	}
1233
1234	return 0;
1235}
1236
1237static int hclge_dbg_dump_tx_buf_cfg(struct hclge_dev *hdev, char *buf, int len)
1238{
1239	struct hclge_tx_buff_alloc_cmd *tx_buf_cmd;
1240	struct hclge_desc desc;
1241	int pos = 0;
1242	int i, ret;
1243
1244	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, true);
1245	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1246	if (ret) {
1247		dev_err(&hdev->pdev->dev,
1248			"failed to dump tx buf, ret = %d\n", ret);
1249		return ret;
1250	}
1251
1252	tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1253	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1254		pos += scnprintf(buf + pos, len - pos,
1255				 "tx_packet_buf_tc_%d: 0x%x\n", i,
1256				 le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i]));
1257
1258	return pos;
1259}
1260
1261static int hclge_dbg_dump_rx_priv_buf_cfg(struct hclge_dev *hdev, char *buf,
1262					  int len)
1263{
1264	struct hclge_rx_priv_buff_cmd *rx_buf_cmd;
1265	struct hclge_desc desc;
1266	int pos = 0;
1267	int i, ret;
1268
1269	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, true);
1270	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1271	if (ret) {
1272		dev_err(&hdev->pdev->dev,
1273			"failed to dump rx priv buf, ret = %d\n", ret);
1274		return ret;
1275	}
1276
1277	pos += scnprintf(buf + pos, len - pos, "\n");
1278
1279	rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc.data;
1280	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1281		pos += scnprintf(buf + pos, len - pos,
1282				 "rx_packet_buf_tc_%d: 0x%x\n", i,
1283				 le16_to_cpu(rx_buf_cmd->buf_num[i]));
1284
1285	pos += scnprintf(buf + pos, len - pos, "rx_share_buf: 0x%x\n",
1286			 le16_to_cpu(rx_buf_cmd->shared_buf));
1287
1288	return pos;
1289}
1290
1291static int hclge_dbg_dump_rx_common_wl_cfg(struct hclge_dev *hdev, char *buf,
1292					   int len)
1293{
1294	struct hclge_rx_com_wl *rx_com_wl;
1295	struct hclge_desc desc;
1296	int pos = 0;
1297	int ret;
1298
1299	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, true);
1300	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1301	if (ret) {
1302		dev_err(&hdev->pdev->dev,
1303			"failed to dump rx common wl, ret = %d\n", ret);
1304		return ret;
1305	}
1306
1307	rx_com_wl = (struct hclge_rx_com_wl *)desc.data;
1308	pos += scnprintf(buf + pos, len - pos, "\n");
1309	pos += scnprintf(buf + pos, len - pos,
1310			 "rx_com_wl: high: 0x%x, low: 0x%x\n",
1311			 le16_to_cpu(rx_com_wl->com_wl.high),
1312			 le16_to_cpu(rx_com_wl->com_wl.low));
1313
1314	return pos;
1315}
1316
1317static int hclge_dbg_dump_rx_global_pkt_cnt(struct hclge_dev *hdev, char *buf,
1318					    int len)
1319{
1320	struct hclge_rx_com_wl *rx_packet_cnt;
1321	struct hclge_desc desc;
1322	int pos = 0;
1323	int ret;
1324
1325	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_GBL_PKT_CNT, true);
1326	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1327	if (ret) {
1328		dev_err(&hdev->pdev->dev,
1329			"failed to dump rx global pkt cnt, ret = %d\n", ret);
1330		return ret;
1331	}
1332
1333	rx_packet_cnt = (struct hclge_rx_com_wl *)desc.data;
1334	pos += scnprintf(buf + pos, len - pos,
1335			 "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
1336			 le16_to_cpu(rx_packet_cnt->com_wl.high),
1337			 le16_to_cpu(rx_packet_cnt->com_wl.low));
1338
1339	return pos;
1340}
1341
1342static int hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev *hdev, char *buf,
1343					     int len)
1344{
1345	struct hclge_rx_priv_wl_buf *rx_priv_wl;
1346	struct hclge_desc desc[2];
1347	int pos = 0;
1348	int i, ret;
1349
1350	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
1351	desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1352	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
1353	ret = hclge_cmd_send(&hdev->hw, desc, 2);
1354	if (ret) {
1355		dev_err(&hdev->pdev->dev,
1356			"failed to dump rx priv wl buf, ret = %d\n", ret);
1357		return ret;
1358	}
1359
1360	rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data;
1361	for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1362		pos += scnprintf(buf + pos, len - pos,
1363			 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i,
1364			 le16_to_cpu(rx_priv_wl->tc_wl[i].high),
1365			 le16_to_cpu(rx_priv_wl->tc_wl[i].low));
1366
1367	rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data;
1368	for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1369		pos += scnprintf(buf + pos, len - pos,
1370			 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n",
1371			 i + HCLGE_TC_NUM_ONE_DESC,
1372			 le16_to_cpu(rx_priv_wl->tc_wl[i].high),
1373			 le16_to_cpu(rx_priv_wl->tc_wl[i].low));
1374
1375	return pos;
1376}
1377
1378static int hclge_dbg_dump_rx_common_threshold_cfg(struct hclge_dev *hdev,
1379						  char *buf, int len)
1380{
1381	struct hclge_rx_com_thrd *rx_com_thrd;
1382	struct hclge_desc desc[2];
1383	int pos = 0;
1384	int i, ret;
1385
1386	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
1387	desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1388	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
1389	ret = hclge_cmd_send(&hdev->hw, desc, 2);
1390	if (ret) {
1391		dev_err(&hdev->pdev->dev,
1392			"failed to dump rx common threshold, ret = %d\n", ret);
1393		return ret;
1394	}
1395
1396	pos += scnprintf(buf + pos, len - pos, "\n");
1397	rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data;
1398	for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1399		pos += scnprintf(buf + pos, len - pos,
1400			 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i,
1401			 le16_to_cpu(rx_com_thrd->com_thrd[i].high),
1402			 le16_to_cpu(rx_com_thrd->com_thrd[i].low));
1403
1404	rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data;
1405	for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1406		pos += scnprintf(buf + pos, len - pos,
1407			 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n",
1408			 i + HCLGE_TC_NUM_ONE_DESC,
1409			 le16_to_cpu(rx_com_thrd->com_thrd[i].high),
1410			 le16_to_cpu(rx_com_thrd->com_thrd[i].low));
1411
1412	return pos;
1413}
1414
1415static int hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev, char *buf,
1416				      int len)
1417{
1418	int pos = 0;
1419	int ret;
1420
1421	ret = hclge_dbg_dump_tx_buf_cfg(hdev, buf + pos, len - pos);
1422	if (ret < 0)
1423		return ret;
1424	pos += ret;
1425
1426	ret = hclge_dbg_dump_rx_priv_buf_cfg(hdev, buf + pos, len - pos);
1427	if (ret < 0)
1428		return ret;
1429	pos += ret;
1430
1431	ret = hclge_dbg_dump_rx_common_wl_cfg(hdev, buf + pos, len - pos);
1432	if (ret < 0)
1433		return ret;
1434	pos += ret;
1435
1436	ret = hclge_dbg_dump_rx_global_pkt_cnt(hdev, buf + pos, len - pos);
1437	if (ret < 0)
1438		return ret;
1439	pos += ret;
1440
1441	pos += scnprintf(buf + pos, len - pos, "\n");
1442	if (!hnae3_dev_dcb_supported(hdev))
1443		return 0;
1444
1445	ret = hclge_dbg_dump_rx_priv_wl_buf_cfg(hdev, buf + pos, len - pos);
1446	if (ret < 0)
1447		return ret;
1448	pos += ret;
1449
1450	ret = hclge_dbg_dump_rx_common_threshold_cfg(hdev, buf + pos,
1451						     len - pos);
1452	if (ret < 0)
1453		return ret;
1454
1455	return 0;
1456}
1457
1458static int hclge_dbg_dump_mng_table(struct hclge_dev *hdev, char *buf, int len)
1459{
1460	struct hclge_mac_ethertype_idx_rd_cmd *req0;
1461	struct hclge_desc desc;
1462	u32 msg_egress_port;
1463	int pos = 0;
1464	int ret, i;
1465
1466	pos += scnprintf(buf + pos, len - pos,
1467			 "entry  mac_addr          mask  ether  ");
1468	pos += scnprintf(buf + pos, len - pos,
1469			 "mask  vlan  mask  i_map  i_dir  e_type  ");
1470	pos += scnprintf(buf + pos, len - pos, "pf_id  vf_id  q_id  drop\n");
1471
1472	for (i = 0; i < HCLGE_DBG_MNG_TBL_MAX; i++) {
1473		hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_ETHERTYPE_IDX_RD,
1474					   true);
1475		req0 = (struct hclge_mac_ethertype_idx_rd_cmd *)&desc.data;
1476		req0->index = cpu_to_le16(i);
1477
1478		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1479		if (ret) {
1480			dev_err(&hdev->pdev->dev,
1481				"failed to dump manage table, ret = %d\n", ret);
1482			return ret;
1483		}
1484
1485		if (!req0->resp_code)
1486			continue;
1487
1488		pos += scnprintf(buf + pos, len - pos, "%02u     %pM ",
1489				 le16_to_cpu(req0->index), req0->mac_addr);
1490
1491		pos += scnprintf(buf + pos, len - pos,
1492				 "%x     %04x   %x     %04x  ",
1493				 !!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B),
1494				 le16_to_cpu(req0->ethter_type),
1495				 !!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B),
1496				 le16_to_cpu(req0->vlan_tag) &
1497				 HCLGE_DBG_MNG_VLAN_TAG);
1498
1499		pos += scnprintf(buf + pos, len - pos,
1500				 "%x     %02x     %02x     ",
1501				 !!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B),
1502				 req0->i_port_bitmap, req0->i_port_direction);
1503
1504		msg_egress_port = le16_to_cpu(req0->egress_port);
1505		pos += scnprintf(buf + pos, len - pos,
1506				 "%x       %x      %02x     %04x  %x\n",
1507				 !!(msg_egress_port & HCLGE_DBG_MNG_E_TYPE_B),
1508				 msg_egress_port & HCLGE_DBG_MNG_PF_ID,
1509				 (msg_egress_port >> 3) & HCLGE_DBG_MNG_VF_ID,
1510				 le16_to_cpu(req0->egress_queue),
1511				 !!(msg_egress_port & HCLGE_DBG_MNG_DROP_B));
1512	}
1513
1514	return 0;
1515}
1516
1517#define HCLGE_DBG_TCAM_BUF_SIZE 256
1518
1519static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x,
1520				  char *tcam_buf,
1521				  struct hclge_dbg_tcam_msg tcam_msg)
1522{
1523	struct hclge_fd_tcam_config_1_cmd *req1;
1524	struct hclge_fd_tcam_config_2_cmd *req2;
1525	struct hclge_fd_tcam_config_3_cmd *req3;
1526	struct hclge_desc desc[3];
1527	int pos = 0;
1528	int ret, i;
1529	__le32 *req;
1530
1531	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true);
1532	desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1533	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, true);
1534	desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1535	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, true);
1536
1537	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
1538	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
1539	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
1540
1541	req1->stage  = tcam_msg.stage;
1542	req1->xy_sel = sel_x ? 1 : 0;
1543	req1->index  = cpu_to_le32(tcam_msg.loc);
1544
1545	ret = hclge_cmd_send(&hdev->hw, desc, 3);
1546	if (ret)
1547		return ret;
1548
1549	pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
1550			 "read result tcam key %s(%u):\n", sel_x ? "x" : "y",
1551			 tcam_msg.loc);
1552
1553	/* tcam_data0 ~ tcam_data1 */
1554	req = (__le32 *)req1->tcam_data;
1555	for (i = 0; i < 2; i++)
1556		pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
1557				 "%08x\n", le32_to_cpu(*req++));
1558
1559	/* tcam_data2 ~ tcam_data7 */
1560	req = (__le32 *)req2->tcam_data;
1561	for (i = 0; i < 6; i++)
1562		pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
1563				 "%08x\n", le32_to_cpu(*req++));
1564
1565	/* tcam_data8 ~ tcam_data12 */
1566	req = (__le32 *)req3->tcam_data;
1567	for (i = 0; i < 5; i++)
1568		pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
1569				 "%08x\n", le32_to_cpu(*req++));
1570
1571	return ret;
1572}
1573
1574static int hclge_dbg_get_rules_location(struct hclge_dev *hdev, u16 *rule_locs)
1575{
1576	struct hclge_fd_rule *rule;
1577	struct hlist_node *node;
1578	int cnt = 0;
1579
1580	spin_lock_bh(&hdev->fd_rule_lock);
1581	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
1582		rule_locs[cnt] = rule->location;
1583		cnt++;
1584	}
1585	spin_unlock_bh(&hdev->fd_rule_lock);
1586
1587	if (cnt != hdev->hclge_fd_rule_num || cnt == 0)
1588		return -EINVAL;
1589
1590	return cnt;
1591}
1592
1593static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len)
1594{
1595	u32 rule_num = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
1596	struct hclge_dbg_tcam_msg tcam_msg;
1597	int i, ret, rule_cnt;
1598	u16 *rule_locs;
1599	char *tcam_buf;
1600	int pos = 0;
1601
1602	if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
1603		dev_err(&hdev->pdev->dev,
1604			"Only FD-supported dev supports dump fd tcam\n");
1605		return -EOPNOTSUPP;
1606	}
1607
1608	if (!hdev->hclge_fd_rule_num || !rule_num)
1609		return 0;
1610
1611	rule_locs = kcalloc(rule_num, sizeof(u16), GFP_KERNEL);
1612	if (!rule_locs)
1613		return -ENOMEM;
1614
1615	tcam_buf = kzalloc(HCLGE_DBG_TCAM_BUF_SIZE, GFP_KERNEL);
1616	if (!tcam_buf) {
1617		kfree(rule_locs);
1618		return -ENOMEM;
1619	}
1620
1621	rule_cnt = hclge_dbg_get_rules_location(hdev, rule_locs);
1622	if (rule_cnt < 0) {
1623		ret = rule_cnt;
1624		dev_err(&hdev->pdev->dev,
1625			"failed to get rule number, ret = %d\n", ret);
1626		goto out;
1627	}
1628
1629	ret = 0;
1630	for (i = 0; i < rule_cnt; i++) {
1631		tcam_msg.stage = HCLGE_FD_STAGE_1;
1632		tcam_msg.loc = rule_locs[i];
1633
1634		ret = hclge_dbg_fd_tcam_read(hdev, true, tcam_buf, tcam_msg);
1635		if (ret) {
1636			dev_err(&hdev->pdev->dev,
1637				"failed to get fd tcam key x, ret = %d\n", ret);
1638			goto out;
1639		}
1640
1641		pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf);
1642
1643		ret = hclge_dbg_fd_tcam_read(hdev, false, tcam_buf, tcam_msg);
1644		if (ret) {
1645			dev_err(&hdev->pdev->dev,
1646				"failed to get fd tcam key y, ret = %d\n", ret);
1647			goto out;
1648		}
1649
1650		pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf);
1651	}
1652
1653out:
1654	kfree(tcam_buf);
1655	kfree(rule_locs);
1656	return ret;
1657}
1658
1659static int hclge_dbg_dump_fd_counter(struct hclge_dev *hdev, char *buf, int len)
1660{
1661	u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
1662	struct hclge_fd_ad_cnt_read_cmd *req;
1663	char str_id[HCLGE_DBG_ID_LEN];
1664	struct hclge_desc desc;
1665	int pos = 0;
1666	int ret;
1667	u64 cnt;
1668	u8 i;
1669
1670	if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
1671		return -EOPNOTSUPP;
1672
1673	pos += scnprintf(buf + pos, len - pos,
1674			 "func_id\thit_times\n");
1675
1676	for (i = 0; i < func_num; i++) {
1677		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_CNT_OP, true);
1678		req = (struct hclge_fd_ad_cnt_read_cmd *)desc.data;
1679		req->index = cpu_to_le16(i);
1680		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1681		if (ret) {
1682			dev_err(&hdev->pdev->dev, "failed to get fd counter, ret = %d\n",
1683				ret);
1684			return ret;
1685		}
1686		cnt = le64_to_cpu(req->cnt);
1687		hclge_dbg_get_func_id_str(str_id, i);
1688		pos += scnprintf(buf + pos, len - pos,
1689				 "%s\t%llu\n", str_id, cnt);
1690	}
1691
1692	return 0;
1693}
1694
1695static const struct hclge_dbg_status_dfx_info hclge_dbg_rst_info[] = {
1696	{HCLGE_MISC_VECTOR_REG_BASE, "vector0 interrupt enable status"},
1697	{HCLGE_MISC_RESET_STS_REG,   "reset interrupt source"},
1698	{HCLGE_MISC_VECTOR_INT_STS,  "reset interrupt status"},
1699	{HCLGE_RAS_PF_OTHER_INT_STS_REG, "RAS interrupt status"},
1700	{HCLGE_GLOBAL_RESET_REG,  "hardware reset status"},
1701	{HCLGE_NIC_CSQ_DEPTH_REG, "handshake status"},
1702	{HCLGE_FUN_RST_ING, "function reset status"}
1703};
1704
1705int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len)
1706{
1707	u32 i, offset;
1708	int pos = 0;
1709
1710	pos += scnprintf(buf + pos, len - pos, "PF reset count: %u\n",
1711			 hdev->rst_stats.pf_rst_cnt);
1712	pos += scnprintf(buf + pos, len - pos, "FLR reset count: %u\n",
1713			 hdev->rst_stats.flr_rst_cnt);
1714	pos += scnprintf(buf + pos, len - pos, "GLOBAL reset count: %u\n",
1715			 hdev->rst_stats.global_rst_cnt);
1716	pos += scnprintf(buf + pos, len - pos, "IMP reset count: %u\n",
1717			 hdev->rst_stats.imp_rst_cnt);
1718	pos += scnprintf(buf + pos, len - pos, "reset done count: %u\n",
1719			 hdev->rst_stats.reset_done_cnt);
1720	pos += scnprintf(buf + pos, len - pos, "HW reset done count: %u\n",
1721			 hdev->rst_stats.hw_reset_done_cnt);
1722	pos += scnprintf(buf + pos, len - pos, "reset count: %u\n",
1723			 hdev->rst_stats.reset_cnt);
1724	pos += scnprintf(buf + pos, len - pos, "reset fail count: %u\n",
1725			 hdev->rst_stats.reset_fail_cnt);
1726
1727	for (i = 0; i < ARRAY_SIZE(hclge_dbg_rst_info); i++) {
1728		offset = hclge_dbg_rst_info[i].offset;
1729		pos += scnprintf(buf + pos, len - pos, "%s: 0x%x\n",
1730				 hclge_dbg_rst_info[i].message,
1731				 hclge_read_dev(&hdev->hw, offset));
1732	}
1733
1734	pos += scnprintf(buf + pos, len - pos, "hdev state: 0x%lx\n",
1735			 hdev->state);
1736
1737	return 0;
1738}
1739
1740static int hclge_dbg_dump_serv_info(struct hclge_dev *hdev, char *buf, int len)
1741{
1742	unsigned long rem_nsec;
1743	int pos = 0;
1744	u64 lc;
1745
1746	lc = local_clock();
1747	rem_nsec = do_div(lc, HCLGE_BILLION_NANO_SECONDS);
1748
1749	pos += scnprintf(buf + pos, len - pos, "local_clock: [%5lu.%06lu]\n",
1750			 (unsigned long)lc, rem_nsec / 1000);
1751	pos += scnprintf(buf + pos, len - pos, "delta: %u(ms)\n",
1752			 jiffies_to_msecs(jiffies - hdev->last_serv_processed));
1753	pos += scnprintf(buf + pos, len - pos,
1754			 "last_service_task_processed: %lu(jiffies)\n",
1755			 hdev->last_serv_processed);
1756	pos += scnprintf(buf + pos, len - pos, "last_service_task_cnt: %lu\n",
1757			 hdev->serv_processed_cnt);
1758
1759	return 0;
1760}
1761
1762static int hclge_dbg_dump_interrupt(struct hclge_dev *hdev, char *buf, int len)
1763{
1764	int pos = 0;
1765
1766	pos += scnprintf(buf + pos, len - pos, "num_nic_msi: %u\n",
1767			 hdev->num_nic_msi);
1768	pos += scnprintf(buf + pos, len - pos, "num_roce_msi: %u\n",
1769			 hdev->num_roce_msi);
1770	pos += scnprintf(buf + pos, len - pos, "num_msi_used: %u\n",
1771			 hdev->num_msi_used);
1772	pos += scnprintf(buf + pos, len - pos, "num_msi_left: %u\n",
1773			 hdev->num_msi_left);
1774
1775	return 0;
1776}
1777
1778static void hclge_dbg_imp_info_data_print(struct hclge_desc *desc_src,
1779					  char *buf, int len, u32 bd_num)
1780{
1781#define HCLGE_DBG_IMP_INFO_PRINT_OFFSET 0x2
1782
1783	struct hclge_desc *desc_index = desc_src;
1784	u32 offset = 0;
1785	int pos = 0;
1786	u32 i, j;
1787
1788	pos += scnprintf(buf + pos, len - pos, "offset | data\n");
1789
1790	for (i = 0; i < bd_num; i++) {
1791		j = 0;
1792		while (j < HCLGE_DESC_DATA_LEN - 1) {
1793			pos += scnprintf(buf + pos, len - pos, "0x%04x | ",
1794					 offset);
1795			pos += scnprintf(buf + pos, len - pos, "0x%08x  ",
1796					 le32_to_cpu(desc_index->data[j++]));
1797			pos += scnprintf(buf + pos, len - pos, "0x%08x\n",
1798					 le32_to_cpu(desc_index->data[j++]));
1799			offset += sizeof(u32) * HCLGE_DBG_IMP_INFO_PRINT_OFFSET;
1800		}
1801		desc_index++;
1802	}
1803}
1804
1805static int
1806hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len)
1807{
1808	struct hclge_get_imp_bd_cmd *req;
1809	struct hclge_desc *desc_src;
1810	struct hclge_desc desc;
1811	u32 bd_num;
1812	int ret;
1813
1814	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_STATS_BD, true);
1815
1816	req = (struct hclge_get_imp_bd_cmd *)desc.data;
1817	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1818	if (ret) {
1819		dev_err(&hdev->pdev->dev,
1820			"failed to get imp statistics bd number, ret = %d\n",
1821			ret);
1822		return ret;
1823	}
1824
1825	bd_num = le32_to_cpu(req->bd_num);
1826	if (!bd_num) {
1827		dev_err(&hdev->pdev->dev, "imp statistics bd number is 0!\n");
1828		return -EINVAL;
1829	}
1830
1831	desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
1832	if (!desc_src)
1833		return -ENOMEM;
1834
1835	ret  = hclge_dbg_cmd_send(hdev, desc_src, 0, bd_num,
1836				  HCLGE_OPC_IMP_STATS_INFO);
1837	if (ret) {
1838		kfree(desc_src);
1839		dev_err(&hdev->pdev->dev,
1840			"failed to get imp statistics, ret = %d\n", ret);
1841		return ret;
1842	}
1843
1844	hclge_dbg_imp_info_data_print(desc_src, buf, len, bd_num);
1845
1846	kfree(desc_src);
1847
1848	return 0;
1849}
1850
1851#define HCLGE_CMD_NCL_CONFIG_BD_NUM	5
1852#define HCLGE_MAX_NCL_CONFIG_LENGTH	16384
1853
1854static void hclge_ncl_config_data_print(struct hclge_desc *desc, int *index,
1855					char *buf, int len, int *pos)
1856{
1857#define HCLGE_CMD_DATA_NUM		6
1858
1859	int offset = HCLGE_MAX_NCL_CONFIG_LENGTH - *index;
1860	int i, j;
1861
1862	for (i = 0; i < HCLGE_CMD_NCL_CONFIG_BD_NUM; i++) {
1863		for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) {
1864			if (i == 0 && j == 0)
1865				continue;
1866
1867			*pos += scnprintf(buf + *pos, len - *pos,
1868					  "0x%04x | 0x%08x\n", offset,
1869					  le32_to_cpu(desc[i].data[j]));
1870
1871			offset += sizeof(u32);
1872			*index -= sizeof(u32);
1873
1874			if (*index <= 0)
1875				return;
1876		}
1877	}
1878}
1879
1880static int
1881hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, char *buf, int len)
1882{
1883#define HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD	(20 + 24 * 4)
1884
1885	struct hclge_desc desc[HCLGE_CMD_NCL_CONFIG_BD_NUM];
1886	int bd_num = HCLGE_CMD_NCL_CONFIG_BD_NUM;
1887	int index = HCLGE_MAX_NCL_CONFIG_LENGTH;
1888	int pos = 0;
1889	u32 data0;
1890	int ret;
1891
1892	pos += scnprintf(buf + pos, len - pos, "offset | data\n");
1893
1894	while (index > 0) {
1895		data0 = HCLGE_MAX_NCL_CONFIG_LENGTH - index;
1896		if (index >= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD)
1897			data0 |= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD << 16;
1898		else
1899			data0 |= (u32)index << 16;
1900		ret = hclge_dbg_cmd_send(hdev, desc, data0, bd_num,
1901					 HCLGE_OPC_QUERY_NCL_CONFIG);
1902		if (ret)
1903			return ret;
1904
1905		hclge_ncl_config_data_print(desc, &index, buf, len, &pos);
1906	}
1907
1908	return 0;
1909}
1910
1911static int hclge_dbg_dump_loopback(struct hclge_dev *hdev, char *buf, int len)
1912{
1913	struct phy_device *phydev = hdev->hw.mac.phydev;
1914	struct hclge_config_mac_mode_cmd *req_app;
1915	struct hclge_common_lb_cmd *req_common;
1916	struct hclge_desc desc;
1917	u8 loopback_en;
1918	int pos = 0;
1919	int ret;
1920
1921	req_app = (struct hclge_config_mac_mode_cmd *)desc.data;
1922	req_common = (struct hclge_common_lb_cmd *)desc.data;
1923
1924	pos += scnprintf(buf + pos, len - pos, "mac id: %u\n",
1925			 hdev->hw.mac.mac_id);
1926
1927	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
1928	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1929	if (ret) {
1930		dev_err(&hdev->pdev->dev,
1931			"failed to dump app loopback status, ret = %d\n", ret);
1932		return ret;
1933	}
1934
1935	loopback_en = hnae3_get_bit(le32_to_cpu(req_app->txrx_pad_fcs_loop_en),
1936				    HCLGE_MAC_APP_LP_B);
1937	pos += scnprintf(buf + pos, len - pos, "app loopback: %s\n",
1938			 state_str[loopback_en]);
1939
1940	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, true);
1941	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1942	if (ret) {
1943		dev_err(&hdev->pdev->dev,
1944			"failed to dump common loopback status, ret = %d\n",
1945			ret);
1946		return ret;
1947	}
1948
1949	loopback_en = req_common->enable & HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
1950	pos += scnprintf(buf + pos, len - pos, "serdes serial loopback: %s\n",
1951			 state_str[loopback_en]);
1952
1953	loopback_en = req_common->enable &
1954			HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B ? 1 : 0;
1955	pos += scnprintf(buf + pos, len - pos, "serdes parallel loopback: %s\n",
1956			 state_str[loopback_en]);
1957
1958	if (phydev) {
1959		loopback_en = phydev->loopback_enabled;
1960		pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n",
1961				 state_str[loopback_en]);
1962	} else if (hnae3_dev_phy_imp_supported(hdev)) {
1963		loopback_en = req_common->enable &
1964			      HCLGE_CMD_GE_PHY_INNER_LOOP_B;
1965		pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n",
1966				 state_str[loopback_en]);
1967	}
1968
1969	return 0;
1970}
1971
1972/* hclge_dbg_dump_mac_tnl_status: print message about mac tnl interrupt
1973 * @hdev: pointer to struct hclge_dev
1974 */
1975static int
1976hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev, char *buf, int len)
1977{
1978	struct hclge_mac_tnl_stats stats;
1979	unsigned long rem_nsec;
1980	int pos = 0;
1981
1982	pos += scnprintf(buf + pos, len - pos,
1983			 "Recently generated mac tnl interruption:\n");
1984
1985	while (kfifo_get(&hdev->mac_tnl_log, &stats)) {
1986		rem_nsec = do_div(stats.time, HCLGE_BILLION_NANO_SECONDS);
1987
1988		pos += scnprintf(buf + pos, len - pos,
1989				 "[%07lu.%03lu] status = 0x%x\n",
1990				 (unsigned long)stats.time, rem_nsec / 1000,
1991				 stats.status);
1992	}
1993
1994	return 0;
1995}
1996
1997
1998static const struct hclge_dbg_item mac_list_items[] = {
1999	{ "FUNC_ID", 2 },
2000	{ "MAC_ADDR", 12 },
2001	{ "STATE", 2 },
2002};
2003
2004static void hclge_dbg_dump_mac_list(struct hclge_dev *hdev, char *buf, int len,
2005				    bool is_unicast)
2006{
2007	char data_str[ARRAY_SIZE(mac_list_items)][HCLGE_DBG_DATA_STR_LEN];
2008	char content[HCLGE_DBG_INFO_LEN], str_id[HCLGE_DBG_ID_LEN];
2009	char *result[ARRAY_SIZE(mac_list_items)];
2010	struct hclge_mac_node *mac_node, *tmp;
2011	struct hclge_vport *vport;
2012	struct list_head *list;
2013	u32 func_id;
2014	int pos = 0;
2015	int i;
2016
2017	for (i = 0; i < ARRAY_SIZE(mac_list_items); i++)
2018		result[i] = &data_str[i][0];
2019
2020	pos += scnprintf(buf + pos, len - pos, "%s MAC_LIST:\n",
2021			 is_unicast ? "UC" : "MC");
2022	hclge_dbg_fill_content(content, sizeof(content), mac_list_items,
2023			       NULL, ARRAY_SIZE(mac_list_items));
2024	pos += scnprintf(buf + pos, len - pos, "%s", content);
2025
2026	for (func_id = 0; func_id < hdev->num_alloc_vport; func_id++) {
2027		vport = &hdev->vport[func_id];
2028		list = is_unicast ? &vport->uc_mac_list : &vport->mc_mac_list;
2029		spin_lock_bh(&vport->mac_list_lock);
2030		list_for_each_entry_safe(mac_node, tmp, list, node) {
2031			i = 0;
2032			result[i++] = hclge_dbg_get_func_id_str(str_id,
2033								func_id);
2034			sprintf(result[i++], "%pM", mac_node->mac_addr);
2035			sprintf(result[i++], "%5s",
2036				hclge_mac_state_str[mac_node->state]);
2037			hclge_dbg_fill_content(content, sizeof(content),
2038					       mac_list_items,
2039					       (const char **)result,
2040					       ARRAY_SIZE(mac_list_items));
2041			pos += scnprintf(buf + pos, len - pos, "%s", content);
2042		}
2043		spin_unlock_bh(&vport->mac_list_lock);
2044	}
2045}
2046
2047static int hclge_dbg_dump_umv_info(struct hclge_dev *hdev, char *buf, int len)
2048{
2049	u8 func_num = pci_num_vf(hdev->pdev) + 1;
2050	struct hclge_vport *vport;
2051	int pos = 0;
2052	u8 i;
2053
2054	pos += scnprintf(buf, len, "num_alloc_vport   : %u\n",
2055			  hdev->num_alloc_vport);
2056	pos += scnprintf(buf + pos, len - pos, "max_umv_size     : %u\n",
2057			 hdev->max_umv_size);
2058	pos += scnprintf(buf + pos, len - pos, "wanted_umv_size  : %u\n",
2059			 hdev->wanted_umv_size);
2060	pos += scnprintf(buf + pos, len - pos, "priv_umv_size    : %u\n",
2061			 hdev->priv_umv_size);
2062
2063	mutex_lock(&hdev->vport_lock);
2064	pos += scnprintf(buf + pos, len - pos, "share_umv_size   : %u\n",
2065			 hdev->share_umv_size);
2066	for (i = 0; i < func_num; i++) {
2067		vport = &hdev->vport[i];
2068		pos += scnprintf(buf + pos, len - pos,
2069				 "vport(%u) used_umv_num : %u\n",
2070				 i, vport->used_umv_num);
2071	}
2072	mutex_unlock(&hdev->vport_lock);
2073
2074	pos += scnprintf(buf + pos, len - pos, "used_mc_mac_num  : %u\n",
2075			 hdev->used_mc_mac_num);
2076
2077	return 0;
2078}
2079
2080static int hclge_get_vlan_rx_offload_cfg(struct hclge_dev *hdev, u8 vf_id,
2081					 struct hclge_dbg_vlan_cfg *vlan_cfg)
2082{
2083	struct hclge_vport_vtag_rx_cfg_cmd *req;
2084	struct hclge_desc desc;
2085	u16 bmap_index;
2086	u8 rx_cfg;
2087	int ret;
2088
2089	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, true);
2090
2091	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
2092	req->vf_offset = vf_id / HCLGE_VF_NUM_PER_CMD;
2093	bmap_index = vf_id % HCLGE_VF_NUM_PER_CMD / HCLGE_VF_NUM_PER_BYTE;
2094	req->vf_bitmap[bmap_index] = 1U << (vf_id % HCLGE_VF_NUM_PER_BYTE);
2095
2096	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2097	if (ret) {
2098		dev_err(&hdev->pdev->dev,
2099			"failed to get vport%u rxvlan cfg, ret = %d\n",
2100			vf_id, ret);
2101		return ret;
2102	}
2103
2104	rx_cfg = req->vport_vlan_cfg;
2105	vlan_cfg->strip_tag1 = hnae3_get_bit(rx_cfg, HCLGE_REM_TAG1_EN_B);
2106	vlan_cfg->strip_tag2 = hnae3_get_bit(rx_cfg, HCLGE_REM_TAG2_EN_B);
2107	vlan_cfg->drop_tag1 = hnae3_get_bit(rx_cfg, HCLGE_DISCARD_TAG1_EN_B);
2108	vlan_cfg->drop_tag2 = hnae3_get_bit(rx_cfg, HCLGE_DISCARD_TAG2_EN_B);
2109	vlan_cfg->pri_only1 = hnae3_get_bit(rx_cfg, HCLGE_SHOW_TAG1_EN_B);
2110	vlan_cfg->pri_only2 = hnae3_get_bit(rx_cfg, HCLGE_SHOW_TAG2_EN_B);
2111
2112	return 0;
2113}
2114
2115static int hclge_get_vlan_tx_offload_cfg(struct hclge_dev *hdev, u8 vf_id,
2116					 struct hclge_dbg_vlan_cfg *vlan_cfg)
2117{
2118	struct hclge_vport_vtag_tx_cfg_cmd *req;
2119	struct hclge_desc desc;
2120	u16 bmap_index;
2121	u8 tx_cfg;
2122	int ret;
2123
2124	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, true);
2125	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
2126	req->vf_offset = vf_id / HCLGE_VF_NUM_PER_CMD;
2127	bmap_index = vf_id % HCLGE_VF_NUM_PER_CMD / HCLGE_VF_NUM_PER_BYTE;
2128	req->vf_bitmap[bmap_index] = 1U << (vf_id % HCLGE_VF_NUM_PER_BYTE);
2129
2130	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2131	if (ret) {
2132		dev_err(&hdev->pdev->dev,
2133			"failed to get vport%u txvlan cfg, ret = %d\n",
2134			vf_id, ret);
2135		return ret;
2136	}
2137
2138	tx_cfg = req->vport_vlan_cfg;
2139	vlan_cfg->pvid = le16_to_cpu(req->def_vlan_tag1);
2140
2141	vlan_cfg->accept_tag1 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_TAG1_B);
2142	vlan_cfg->accept_tag2 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_TAG2_B);
2143	vlan_cfg->accept_untag1 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_UNTAG1_B);
2144	vlan_cfg->accept_untag2 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_UNTAG2_B);
2145	vlan_cfg->insert_tag1 = hnae3_get_bit(tx_cfg, HCLGE_PORT_INS_TAG1_EN_B);
2146	vlan_cfg->insert_tag2 = hnae3_get_bit(tx_cfg, HCLGE_PORT_INS_TAG2_EN_B);
2147	vlan_cfg->shift_tag = hnae3_get_bit(tx_cfg, HCLGE_TAG_SHIFT_MODE_EN_B);
2148
2149	return 0;
2150}
2151
2152static int hclge_get_vlan_filter_config_cmd(struct hclge_dev *hdev,
2153					    u8 vlan_type, u8 vf_id,
2154					    struct hclge_desc *desc)
2155{
2156	struct hclge_vlan_filter_ctrl_cmd *req;
2157	int ret;
2158
2159	hclge_cmd_setup_basic_desc(desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
2160	req = (struct hclge_vlan_filter_ctrl_cmd *)desc->data;
2161	req->vlan_type = vlan_type;
2162	req->vf_id = vf_id;
2163
2164	ret = hclge_cmd_send(&hdev->hw, desc, 1);
2165	if (ret)
2166		dev_err(&hdev->pdev->dev,
2167			"failed to get vport%u vlan filter config, ret = %d.\n",
2168			vf_id, ret);
2169
2170	return ret;
2171}
2172
2173static int hclge_get_vlan_filter_state(struct hclge_dev *hdev, u8 vlan_type,
2174				       u8 vf_id, u8 *vlan_fe)
2175{
2176	struct hclge_vlan_filter_ctrl_cmd *req;
2177	struct hclge_desc desc;
2178	int ret;
2179
2180	ret = hclge_get_vlan_filter_config_cmd(hdev, vlan_type, vf_id, &desc);
2181	if (ret)
2182		return ret;
2183
2184	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
2185	*vlan_fe = req->vlan_fe;
2186
2187	return 0;
2188}
2189
2190static int hclge_get_port_vlan_filter_bypass_state(struct hclge_dev *hdev,
2191						   u8 vf_id, u8 *bypass_en)
2192{
2193	struct hclge_port_vlan_filter_bypass_cmd *req;
2194	struct hclge_desc desc;
2195	int ret;
2196
2197	if (!test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, hdev->ae_dev->caps))
2198		return 0;
2199
2200	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, true);
2201	req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
2202	req->vf_id = vf_id;
2203
2204	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2205	if (ret) {
2206		dev_err(&hdev->pdev->dev,
2207			"failed to get vport%u port vlan filter bypass state, ret = %d.\n",
2208			vf_id, ret);
2209		return ret;
2210	}
2211
2212	*bypass_en = hnae3_get_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B);
2213
2214	return 0;
2215}
2216
2217static const struct hclge_dbg_item vlan_filter_items[] = {
2218	{ "FUNC_ID", 2 },
2219	{ "I_VF_VLAN_FILTER", 2 },
2220	{ "E_VF_VLAN_FILTER", 2 },
2221	{ "PORT_VLAN_FILTER_BYPASS", 0 }
2222};
2223
2224static const struct hclge_dbg_item vlan_offload_items[] = {
2225	{ "FUNC_ID", 2 },
2226	{ "PVID", 4 },
2227	{ "ACCEPT_TAG1", 2 },
2228	{ "ACCEPT_TAG2", 2 },
2229	{ "ACCEPT_UNTAG1", 2 },
2230	{ "ACCEPT_UNTAG2", 2 },
2231	{ "INSERT_TAG1", 2 },
2232	{ "INSERT_TAG2", 2 },
2233	{ "SHIFT_TAG", 2 },
2234	{ "STRIP_TAG1", 2 },
2235	{ "STRIP_TAG2", 2 },
2236	{ "DROP_TAG1", 2 },
2237	{ "DROP_TAG2", 2 },
2238	{ "PRI_ONLY_TAG1", 2 },
2239	{ "PRI_ONLY_TAG2", 0 }
2240};
2241
2242static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev, char *buf,
2243					     int len, int *pos)
2244{
2245	char content[HCLGE_DBG_VLAN_FLTR_INFO_LEN], str_id[HCLGE_DBG_ID_LEN];
2246	const char *result[ARRAY_SIZE(vlan_filter_items)];
2247	u8 i, j, vlan_fe, bypass, ingress, egress;
2248	u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
2249	int ret;
2250
2251	ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_PORT, 0,
2252					  &vlan_fe);
2253	if (ret)
2254		return ret;
2255	ingress = vlan_fe & HCLGE_FILTER_FE_NIC_INGRESS_B;
2256	egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0;
2257
2258	*pos += scnprintf(buf, len, "I_PORT_VLAN_FILTER: %s\n",
2259			  state_str[ingress]);
2260	*pos += scnprintf(buf + *pos, len - *pos, "E_PORT_VLAN_FILTER: %s\n",
2261			  state_str[egress]);
2262
2263	hclge_dbg_fill_content(content, sizeof(content), vlan_filter_items,
2264			       NULL, ARRAY_SIZE(vlan_filter_items));
2265	*pos += scnprintf(buf + *pos, len - *pos, "%s", content);
2266
2267	for (i = 0; i < func_num; i++) {
2268		ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_VF, i,
2269						  &vlan_fe);
2270		if (ret)
2271			return ret;
2272
2273		ingress = vlan_fe & HCLGE_FILTER_FE_NIC_INGRESS_B;
2274		egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0;
2275		ret = hclge_get_port_vlan_filter_bypass_state(hdev, i, &bypass);
2276		if (ret)
2277			return ret;
2278		j = 0;
2279		result[j++] = hclge_dbg_get_func_id_str(str_id, i);
2280		result[j++] = state_str[ingress];
2281		result[j++] = state_str[egress];
2282		result[j++] =
2283			test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
2284				 hdev->ae_dev->caps) ? state_str[bypass] : "NA";
2285		hclge_dbg_fill_content(content, sizeof(content),
2286				       vlan_filter_items, result,
2287				       ARRAY_SIZE(vlan_filter_items));
2288		*pos += scnprintf(buf + *pos, len - *pos, "%s", content);
2289	}
2290	*pos += scnprintf(buf + *pos, len - *pos, "\n");
2291
2292	return 0;
2293}
2294
2295static int hclge_dbg_dump_vlan_offload_config(struct hclge_dev *hdev, char *buf,
2296					      int len, int *pos)
2297{
2298	char str_id[HCLGE_DBG_ID_LEN], str_pvid[HCLGE_DBG_ID_LEN];
2299	const char *result[ARRAY_SIZE(vlan_offload_items)];
2300	char content[HCLGE_DBG_VLAN_OFFLOAD_INFO_LEN];
2301	u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
2302	struct hclge_dbg_vlan_cfg vlan_cfg;
2303	int ret;
2304	u8 i, j;
2305
2306	hclge_dbg_fill_content(content, sizeof(content), vlan_offload_items,
2307			       NULL, ARRAY_SIZE(vlan_offload_items));
2308	*pos += scnprintf(buf + *pos, len - *pos, "%s", content);
2309
2310	for (i = 0; i < func_num; i++) {
2311		ret = hclge_get_vlan_tx_offload_cfg(hdev, i, &vlan_cfg);
2312		if (ret)
2313			return ret;
2314
2315		ret = hclge_get_vlan_rx_offload_cfg(hdev, i, &vlan_cfg);
2316		if (ret)
2317			return ret;
2318
2319		sprintf(str_pvid, "%u", vlan_cfg.pvid);
2320		j = 0;
2321		result[j++] = hclge_dbg_get_func_id_str(str_id, i);
2322		result[j++] = str_pvid;
2323		result[j++] = state_str[vlan_cfg.accept_tag1];
2324		result[j++] = state_str[vlan_cfg.accept_tag2];
2325		result[j++] = state_str[vlan_cfg.accept_untag1];
2326		result[j++] = state_str[vlan_cfg.accept_untag2];
2327		result[j++] = state_str[vlan_cfg.insert_tag1];
2328		result[j++] = state_str[vlan_cfg.insert_tag2];
2329		result[j++] = state_str[vlan_cfg.shift_tag];
2330		result[j++] = state_str[vlan_cfg.strip_tag1];
2331		result[j++] = state_str[vlan_cfg.strip_tag2];
2332		result[j++] = state_str[vlan_cfg.drop_tag1];
2333		result[j++] = state_str[vlan_cfg.drop_tag2];
2334		result[j++] = state_str[vlan_cfg.pri_only1];
2335		result[j++] = state_str[vlan_cfg.pri_only2];
2336
2337		hclge_dbg_fill_content(content, sizeof(content),
2338				       vlan_offload_items, result,
2339				       ARRAY_SIZE(vlan_offload_items));
2340		*pos += scnprintf(buf + *pos, len - *pos, "%s", content);
2341	}
2342
2343	return 0;
2344}
2345
2346static int hclge_dbg_dump_vlan_config(struct hclge_dev *hdev, char *buf,
2347				      int len)
2348{
2349	int pos = 0;
2350	int ret;
2351
2352	ret = hclge_dbg_dump_vlan_filter_config(hdev, buf, len, &pos);
2353	if (ret)
2354		return ret;
2355
2356	return hclge_dbg_dump_vlan_offload_config(hdev, buf, len, &pos);
2357}
2358
2359static int hclge_dbg_dump_ptp_info(struct hclge_dev *hdev, char *buf, int len)
2360{
2361	struct hclge_ptp *ptp = hdev->ptp;
2362	u32 sw_cfg = ptp->ptp_cfg;
2363	unsigned int tx_start;
2364	unsigned int last_rx;
2365	int pos = 0;
2366	u32 hw_cfg;
2367	int ret;
2368
2369	pos += scnprintf(buf + pos, len - pos, "phc %s's debug info:\n",
2370			 ptp->info.name);
2371	pos += scnprintf(buf + pos, len - pos, "ptp enable: %s\n",
2372			 test_bit(HCLGE_PTP_FLAG_EN, &ptp->flags) ?
2373			 "yes" : "no");
2374	pos += scnprintf(buf + pos, len - pos, "ptp tx enable: %s\n",
2375			 test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) ?
2376			 "yes" : "no");
2377	pos += scnprintf(buf + pos, len - pos, "ptp rx enable: %s\n",
2378			 test_bit(HCLGE_PTP_FLAG_RX_EN, &ptp->flags) ?
2379			 "yes" : "no");
2380
2381	last_rx = jiffies_to_msecs(ptp->last_rx);
2382	pos += scnprintf(buf + pos, len - pos, "last rx time: %lu.%lu\n",
2383			 last_rx / MSEC_PER_SEC, last_rx % MSEC_PER_SEC);
2384	pos += scnprintf(buf + pos, len - pos, "rx count: %lu\n", ptp->rx_cnt);
2385
2386	tx_start = jiffies_to_msecs(ptp->tx_start);
2387	pos += scnprintf(buf + pos, len - pos, "last tx start time: %lu.%lu\n",
2388			 tx_start / MSEC_PER_SEC, tx_start % MSEC_PER_SEC);
2389	pos += scnprintf(buf + pos, len - pos, "tx count: %lu\n", ptp->tx_cnt);
2390	pos += scnprintf(buf + pos, len - pos, "tx skipped count: %lu\n",
2391			 ptp->tx_skipped);
2392	pos += scnprintf(buf + pos, len - pos, "tx timeout count: %lu\n",
2393			 ptp->tx_timeout);
2394	pos += scnprintf(buf + pos, len - pos, "last tx seqid: %u\n",
2395			 ptp->last_tx_seqid);
2396
2397	ret = hclge_ptp_cfg_qry(hdev, &hw_cfg);
2398	if (ret)
2399		return ret;
2400
2401	pos += scnprintf(buf + pos, len - pos, "sw_cfg: %#x, hw_cfg: %#x\n",
2402			 sw_cfg, hw_cfg);
2403
2404	pos += scnprintf(buf + pos, len - pos, "tx type: %d, rx filter: %d\n",
2405			 ptp->ts_cfg.tx_type, ptp->ts_cfg.rx_filter);
2406
2407	return 0;
2408}
2409
2410static int hclge_dbg_dump_mac_uc(struct hclge_dev *hdev, char *buf, int len)
2411{
2412	hclge_dbg_dump_mac_list(hdev, buf, len, true);
2413
2414	return 0;
2415}
2416
2417static int hclge_dbg_dump_mac_mc(struct hclge_dev *hdev, char *buf, int len)
2418{
2419	hclge_dbg_dump_mac_list(hdev, buf, len, false);
2420
2421	return 0;
2422}
2423
2424static const struct hclge_dbg_func hclge_dbg_cmd_func[] = {
2425	{
2426		.cmd = HNAE3_DBG_CMD_TM_NODES,
2427		.dbg_dump = hclge_dbg_dump_tm_nodes,
2428	},
2429	{
2430		.cmd = HNAE3_DBG_CMD_TM_PRI,
2431		.dbg_dump = hclge_dbg_dump_tm_pri,
2432	},
2433	{
2434		.cmd = HNAE3_DBG_CMD_TM_QSET,
2435		.dbg_dump = hclge_dbg_dump_tm_qset,
2436	},
2437	{
2438		.cmd = HNAE3_DBG_CMD_TM_MAP,
2439		.dbg_dump = hclge_dbg_dump_tm_map,
2440	},
2441	{
2442		.cmd = HNAE3_DBG_CMD_TM_PG,
2443		.dbg_dump = hclge_dbg_dump_tm_pg,
2444	},
2445	{
2446		.cmd = HNAE3_DBG_CMD_TM_PORT,
2447		.dbg_dump = hclge_dbg_dump_tm_port,
2448	},
2449	{
2450		.cmd = HNAE3_DBG_CMD_TC_SCH_INFO,
2451		.dbg_dump = hclge_dbg_dump_tc,
2452	},
2453	{
2454		.cmd = HNAE3_DBG_CMD_QOS_PAUSE_CFG,
2455		.dbg_dump = hclge_dbg_dump_qos_pause_cfg,
2456	},
2457	{
2458		.cmd = HNAE3_DBG_CMD_QOS_PRI_MAP,
2459		.dbg_dump = hclge_dbg_dump_qos_pri_map,
2460	},
2461	{
2462		.cmd = HNAE3_DBG_CMD_QOS_DSCP_MAP,
2463		.dbg_dump = hclge_dbg_dump_qos_dscp_map,
2464	},
2465	{
2466		.cmd = HNAE3_DBG_CMD_QOS_BUF_CFG,
2467		.dbg_dump = hclge_dbg_dump_qos_buf_cfg,
2468	},
2469	{
2470		.cmd = HNAE3_DBG_CMD_MAC_UC,
2471		.dbg_dump = hclge_dbg_dump_mac_uc,
2472	},
2473	{
2474		.cmd = HNAE3_DBG_CMD_MAC_MC,
2475		.dbg_dump = hclge_dbg_dump_mac_mc,
2476	},
2477	{
2478		.cmd = HNAE3_DBG_CMD_MNG_TBL,
2479		.dbg_dump = hclge_dbg_dump_mng_table,
2480	},
2481	{
2482		.cmd = HNAE3_DBG_CMD_LOOPBACK,
2483		.dbg_dump = hclge_dbg_dump_loopback,
2484	},
2485	{
2486		.cmd = HNAE3_DBG_CMD_PTP_INFO,
2487		.dbg_dump = hclge_dbg_dump_ptp_info,
2488	},
2489	{
2490		.cmd = HNAE3_DBG_CMD_INTERRUPT_INFO,
2491		.dbg_dump = hclge_dbg_dump_interrupt,
2492	},
2493	{
2494		.cmd = HNAE3_DBG_CMD_RESET_INFO,
2495		.dbg_dump = hclge_dbg_dump_rst_info,
2496	},
2497	{
2498		.cmd = HNAE3_DBG_CMD_IMP_INFO,
2499		.dbg_dump = hclge_dbg_get_imp_stats_info,
2500	},
2501	{
2502		.cmd = HNAE3_DBG_CMD_NCL_CONFIG,
2503		.dbg_dump = hclge_dbg_dump_ncl_config,
2504	},
2505	{
2506		.cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
2507		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2508	},
2509	{
2510		.cmd = HNAE3_DBG_CMD_REG_SSU,
2511		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2512	},
2513	{
2514		.cmd = HNAE3_DBG_CMD_REG_IGU_EGU,
2515		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2516	},
2517	{
2518		.cmd = HNAE3_DBG_CMD_REG_RPU,
2519		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2520	},
2521	{
2522		.cmd = HNAE3_DBG_CMD_REG_NCSI,
2523		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2524	},
2525	{
2526		.cmd = HNAE3_DBG_CMD_REG_RTC,
2527		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2528	},
2529	{
2530		.cmd = HNAE3_DBG_CMD_REG_PPP,
2531		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2532	},
2533	{
2534		.cmd = HNAE3_DBG_CMD_REG_RCB,
2535		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2536	},
2537	{
2538		.cmd = HNAE3_DBG_CMD_REG_TQP,
2539		.dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2540	},
2541	{
2542		.cmd = HNAE3_DBG_CMD_REG_MAC,
2543		.dbg_dump = hclge_dbg_dump_mac,
2544	},
2545	{
2546		.cmd = HNAE3_DBG_CMD_REG_DCB,
2547		.dbg_dump = hclge_dbg_dump_dcb,
2548	},
2549	{
2550		.cmd = HNAE3_DBG_CMD_FD_TCAM,
2551		.dbg_dump = hclge_dbg_dump_fd_tcam,
2552	},
2553	{
2554		.cmd = HNAE3_DBG_CMD_MAC_TNL_STATUS,
2555		.dbg_dump = hclge_dbg_dump_mac_tnl_status,
2556	},
2557	{
2558		.cmd = HNAE3_DBG_CMD_SERV_INFO,
2559		.dbg_dump = hclge_dbg_dump_serv_info,
2560	},
2561	{
2562		.cmd = HNAE3_DBG_CMD_VLAN_CONFIG,
2563		.dbg_dump = hclge_dbg_dump_vlan_config,
2564	},
2565	{
2566		.cmd = HNAE3_DBG_CMD_FD_COUNTER,
2567		.dbg_dump = hclge_dbg_dump_fd_counter,
2568	},
2569	{
2570		.cmd = HNAE3_DBG_CMD_UMV_INFO,
2571		.dbg_dump = hclge_dbg_dump_umv_info,
2572	},
2573};
2574
2575int hclge_dbg_read_cmd(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
2576		       char *buf, int len)
2577{
2578	struct hclge_vport *vport = hclge_get_vport(handle);
2579	const struct hclge_dbg_func *cmd_func;
2580	struct hclge_dev *hdev = vport->back;
2581	u32 i;
2582
2583	for (i = 0; i < ARRAY_SIZE(hclge_dbg_cmd_func); i++) {
2584		if (cmd == hclge_dbg_cmd_func[i].cmd) {
2585			cmd_func = &hclge_dbg_cmd_func[i];
2586			if (cmd_func->dbg_dump)
2587				return cmd_func->dbg_dump(hdev, buf, len);
2588			else
2589				return cmd_func->dbg_dump_reg(hdev, cmd, buf,
2590							      len);
2591		}
2592	}
2593
2594	dev_err(&hdev->pdev->dev, "invalid command(%d)\n", cmd);
2595	return -EINVAL;
2596}
2597