1/*
2 * Copyright (c) 2017-2018 Cavium, Inc.
3 * All rights reserved.
4 *
5 *  Redistribution and use in source and binary forms, with or without
6 *  modification, are permitted provided that the following conditions
7 *  are met:
8 *
9 *  1. Redistributions of source code must retain the above copyright
10 *     notice, this list of conditions and the following disclaimer.
11 *  2. Redistributions in binary form must reproduce the above copyright
12 *     notice, this list of conditions and the following disclaimer in the
13 *     documentation and/or other materials provided with the distribution.
14 *
15 *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 *  POSSIBILITY OF SUCH DAMAGE.
26 */
27
28/*
29 * File : ecore_dbg_fw_funcs.c
30 */
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD$");
33
34#include "bcm_osal.h"
35#include "ecore.h"
36#include "ecore_hw.h"
37#include "ecore_mcp.h"
38#include "spad_layout.h"
39#include "nvm_map.h"
40#include "reg_addr.h"
41#include "ecore_hsi_common.h"
42#include "ecore_hsi_debug_tools.h"
43#include "mcp_public.h"
44#include "nvm_map.h"
45#ifndef USE_DBG_BIN_FILE
46#include "ecore_dbg_values.h"
47#endif
48#include "ecore_dbg_fw_funcs.h"
49
50/* Memory groups enum */
51enum mem_groups {
52	MEM_GROUP_PXP_MEM,
53	MEM_GROUP_DMAE_MEM,
54	MEM_GROUP_CM_MEM,
55	MEM_GROUP_QM_MEM,
56	MEM_GROUP_DORQ_MEM,
57	MEM_GROUP_BRB_RAM,
58	MEM_GROUP_BRB_MEM,
59	MEM_GROUP_PRS_MEM,
60	MEM_GROUP_IOR,
61	MEM_GROUP_BTB_RAM,
62	MEM_GROUP_CONN_CFC_MEM,
63	MEM_GROUP_TASK_CFC_MEM,
64	MEM_GROUP_CAU_PI,
65	MEM_GROUP_CAU_MEM,
66	MEM_GROUP_PXP_ILT,
67	MEM_GROUP_TM_MEM,
68	MEM_GROUP_SDM_MEM,
69	MEM_GROUP_PBUF,
70	MEM_GROUP_RAM,
71	MEM_GROUP_MULD_MEM,
72	MEM_GROUP_BTB_MEM,
73	MEM_GROUP_RDIF_CTX,
74	MEM_GROUP_TDIF_CTX,
75	MEM_GROUP_CFC_MEM,
76	MEM_GROUP_IGU_MEM,
77	MEM_GROUP_IGU_MSIX,
78	MEM_GROUP_CAU_SB,
79	MEM_GROUP_BMB_RAM,
80	MEM_GROUP_BMB_MEM,
81	MEM_GROUPS_NUM
82};
83
84/* Memory groups names */
85static const char* s_mem_group_names[] = {
86	"PXP_MEM",
87	"DMAE_MEM",
88	"CM_MEM",
89	"QM_MEM",
90	"DORQ_MEM",
91	"BRB_RAM",
92	"BRB_MEM",
93	"PRS_MEM",
94	"IOR",
95	"BTB_RAM",
96	"CONN_CFC_MEM",
97	"TASK_CFC_MEM",
98	"CAU_PI",
99	"CAU_MEM",
100	"PXP_ILT",
101	"TM_MEM",
102	"SDM_MEM",
103	"PBUF",
104	"RAM",
105	"MULD_MEM",
106	"BTB_MEM",
107	"RDIF_CTX",
108	"TDIF_CTX",
109	"CFC_MEM",
110	"IGU_MEM",
111	"IGU_MSIX",
112	"CAU_SB",
113	"BMB_RAM",
114	"BMB_MEM",
115};
116
117/* Idle check conditions */
118
119#ifndef __PREVENT_COND_ARR__
120
121static u32 cond5(const u32 *r, const u32 *imm) {
122	return (((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]));
123}
124
125static u32 cond7(const u32 *r, const u32 *imm) {
126	return (((r[0] >> imm[0]) & imm[1]) != imm[2]);
127}
128
129static u32 cond6(const u32 *r, const u32 *imm) {
130	return ((r[0] & imm[0]) != imm[1]);
131}
132
133static u32 cond9(const u32 *r, const u32 *imm) {
134	return ((r[0] & imm[0]) >> imm[1]) != (((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5]));
135}
136
137static u32 cond10(const u32 *r, const u32 *imm) {
138	return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]);
139}
140
141static u32 cond4(const u32 *r, const u32 *imm) {
142	return ((r[0] & ~imm[0]) != imm[1]);
143}
144
145static u32 cond0(const u32 *r, const u32 *imm) {
146	return ((r[0] & ~r[1]) != imm[0]);
147}
148
149static u32 cond1(const u32 *r, const u32 *imm) {
150	return (r[0] != imm[0]);
151}
152
153static u32 cond11(const u32 *r, const u32 *imm) {
154	return (r[0] != r[1] && r[2] == imm[0]);
155}
156
157static u32 cond12(const u32 *r, const u32 *imm) {
158	return (r[0] != r[1] && r[2] > imm[0]);
159}
160
161static u32 cond3(const u32 *r, const u32 OSAL_UNUSED *imm) {
162	return (r[0] != r[1]);
163}
164
165static u32 cond13(const u32 *r, const u32 *imm) {
166	return (r[0] & imm[0]);
167}
168
169static u32 cond8(const u32 *r, const u32 *imm) {
170	return (r[0] < (r[1] - imm[0]));
171}
172
173static u32 cond2(const u32 *r, const u32 *imm) {
174	return (r[0] > imm[0]);
175}
176
177/* Array of Idle Check conditions */
178static u32 (*cond_arr[])(const u32 *r, const u32 *imm) = {
179	cond0,
180	cond1,
181	cond2,
182	cond3,
183	cond4,
184	cond5,
185	cond6,
186	cond7,
187	cond8,
188	cond9,
189	cond10,
190	cond11,
191	cond12,
192	cond13,
193};
194
195#endif /* __PREVENT_COND_ARR__ */
196
197/******************************* Data Types **********************************/
198
199enum platform_ids {
200	PLATFORM_ASIC,
201	PLATFORM_EMUL_FULL,
202	PLATFORM_EMUL_REDUCED,
203	PLATFORM_FPGA,
204	MAX_PLATFORM_IDS
205};
206
207struct chip_platform_defs {
208	u8 num_ports;
209	u8 num_pfs;
210	u8 num_vfs;
211};
212
213/* Chip constant definitions */
214struct chip_defs {
215	const char *name;
216	struct chip_platform_defs per_platform[MAX_PLATFORM_IDS];
217};
218
219/* Platform constant definitions */
220struct platform_defs {
221	const char *name;
222	u32 delay_factor;
223	u32 dmae_thresh;
224	u32 log_thresh;
225};
226
227/* Storm constant definitions.
228 * Addresses are in bytes, sizes are in quad-regs.
229 */
230struct storm_defs {
231	char letter;
232	enum block_id block_id;
233	enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
234	bool has_vfc;
235	u32 sem_fast_mem_addr;
236	u32 sem_frame_mode_addr;
237	u32 sem_slow_enable_addr;
238	u32 sem_slow_mode_addr;
239	u32 sem_slow_mode1_conf_addr;
240	u32 sem_sync_dbg_empty_addr;
241	u32 sem_slow_dbg_empty_addr;
242	u32 cm_ctx_wr_addr;
243	u32 cm_conn_ag_ctx_lid_size;
244	u32 cm_conn_ag_ctx_rd_addr;
245	u32 cm_conn_st_ctx_lid_size;
246	u32 cm_conn_st_ctx_rd_addr;
247	u32 cm_task_ag_ctx_lid_size;
248	u32 cm_task_ag_ctx_rd_addr;
249	u32 cm_task_st_ctx_lid_size;
250	u32 cm_task_st_ctx_rd_addr;
251};
252
253/* Block constant definitions */
254struct block_defs {
255	const char *name;
256	bool exists[MAX_CHIP_IDS];
257	bool associated_to_storm;
258
259	/* Valid only if associated_to_storm is true */
260	u32 storm_id;
261	enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
262	u32 dbg_select_addr;
263	u32 dbg_enable_addr;
264	u32 dbg_shift_addr;
265	u32 dbg_force_valid_addr;
266	u32 dbg_force_frame_addr;
267	bool has_reset_bit;
268
269	/* If true, block is taken out of reset before dump */
270	bool unreset;
271	enum dbg_reset_regs reset_reg;
272
273	/* Bit offset in reset register */
274	u8 reset_bit_offset;
275};
276
277/* Reset register definitions */
278struct reset_reg_defs {
279	u32 addr;
280	bool exists[MAX_CHIP_IDS];
281	u32 unreset_val[MAX_CHIP_IDS];
282};
283
284/* Debug Bus Constraint operation constant definitions */
285struct dbg_bus_constraint_op_defs {
286	u8 hw_op_val;
287	bool is_cyclic;
288};
289
290/* Storm Mode definitions */
291struct storm_mode_defs {
292	const char *name;
293	bool is_fast_dbg;
294	u8 id_in_hw;
295};
296
297struct grc_param_defs {
298	u32 default_val[MAX_CHIP_IDS];
299	u32 min;
300	u32 max;
301	bool is_preset;
302	u32 exclude_all_preset_val;
303	u32 crash_preset_val;
304};
305
306/* address is in 128b units. Width is in bits. */
307struct rss_mem_defs {
308	const char *mem_name;
309	const char *type_name;
310	u32 addr;
311	u32 entry_width;
312	u32 num_entries[MAX_CHIP_IDS];
313};
314
315struct vfc_ram_defs {
316	const char *mem_name;
317	const char *type_name;
318	u32 base_row;
319	u32 num_rows;
320};
321
322struct big_ram_defs {
323	const char *instance_name;
324	enum mem_groups mem_group_id;
325	enum mem_groups ram_mem_group_id;
326	enum dbg_grc_params grc_param;
327	u32 addr_reg_addr;
328	u32 data_reg_addr;
329	u32 is_256b_reg_addr;
330	u32 is_256b_bit_offset[MAX_CHIP_IDS];
331	u32 ram_size[MAX_CHIP_IDS]; /* In dwords */
332};
333
334struct phy_defs {
335	const char *phy_name;
336
337	/* PHY base GRC address */
338	u32 base_addr;
339
340	/* Relative address of indirect TBUS address register (bits 0..7) */
341	u32 tbus_addr_lo_addr;
342
343	/* Relative address of indirect TBUS address register (bits 8..10) */
344	u32 tbus_addr_hi_addr;
345
346	/* Relative address of indirect TBUS data register (bits 0..7) */
347	u32 tbus_data_lo_addr;
348
349	/* Relative address of indirect TBUS data register (bits 8..11) */
350	u32 tbus_data_hi_addr;
351};
352
353/******************************** Constants **********************************/
354
355#define MAX_LCIDS			320
356#define MAX_LTIDS			320
357
358#define NUM_IOR_SETS			2
359#define IORS_PER_SET			176
360#define IOR_SET_OFFSET(set_id)		((set_id) * 256)
361
362#define BYTES_IN_DWORD			sizeof(u32)
363
364/* Cyclic  right */
365#define SHR(val, val_width, amount)	(((val) | ((val) << (val_width))) 					>> (amount)) & ((1 << (val_width)) - 1)
366
367/* In the macros below, size and offset are specified in bits */
368#define CEIL_DWORDS(size)		DIV_ROUND_UP(size, 32)
369#define FIELD_BIT_OFFSET(type, field)	type##_##field##_##OFFSET
370#define FIELD_BIT_SIZE(type, field)	type##_##field##_##SIZE
371#define FIELD_DWORD_OFFSET(type, field)		(int)(FIELD_BIT_OFFSET(type, field) / 32)
372#define FIELD_DWORD_SHIFT(type, field)	(FIELD_BIT_OFFSET(type, field) % 32)
373#define FIELD_BIT_MASK(type, field)		(((1 << FIELD_BIT_SIZE(type, field)) - 1) 	<< FIELD_DWORD_SHIFT(type, field))
374
375#define SET_VAR_FIELD(var, type, field, val) 	var[FIELD_DWORD_OFFSET(type, field)] &= 		(~FIELD_BIT_MASK(type, field)); 	var[FIELD_DWORD_OFFSET(type, field)] |= 		(val) << FIELD_DWORD_SHIFT(type, field)
376
377#define ARR_REG_WR(dev, ptt, addr, arr, arr_size) 	for (i = 0; i < (arr_size); i++) 		ecore_wr(dev, ptt, addr, (arr)[i])
378
379#define ARR_REG_RD(dev, ptt, addr, arr, arr_size) 	for (i = 0; i < (arr_size); i++) 		(arr)[i] = ecore_rd(dev, ptt, addr)
380
381#define CHECK_ARR_SIZE(arr, size) 	OSAL_BUILD_BUG_ON(!(OSAL_ARRAY_SIZE(arr) == size))
382
383#ifndef DWORDS_TO_BYTES
384#define DWORDS_TO_BYTES(dwords)		((dwords) * BYTES_IN_DWORD)
385#endif
386#ifndef BYTES_TO_DWORDS
387#define BYTES_TO_DWORDS(bytes)		((bytes) / BYTES_IN_DWORD)
388#endif
389
390/* extra lines include a signature line + optional latency events line */
391#ifndef NUM_DBG_LINES
392#define NUM_EXTRA_DBG_LINES(block_desc)		(1 + (block_desc->has_latency_events ? 1 : 0))
393#define NUM_DBG_LINES(block_desc)		(block_desc->num_of_lines + NUM_EXTRA_DBG_LINES(block_desc))
394#endif
395
396#define USE_DMAE				true
397#define PROTECT_WIDE_BUS		true
398
399#define RAM_LINES_TO_DWORDS(lines)	((lines) * 2)
400#define RAM_LINES_TO_BYTES(lines)		DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines))
401
402#define REG_DUMP_LEN_SHIFT		24
403#define MEM_DUMP_ENTRY_SIZE_DWORDS		BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem))
404
405#define IDLE_CHK_RULE_SIZE_DWORDS		BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule))
406
407#define IDLE_CHK_RESULT_HDR_DWORDS		BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr))
408
409#define IDLE_CHK_RESULT_REG_HDR_DWORDS		BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr))
410
411#define IDLE_CHK_MAX_ENTRIES_SIZE	32
412
413/* The sizes and offsets below are specified in bits */
414#define VFC_CAM_CMD_STRUCT_SIZE		64
415#define VFC_CAM_CMD_ROW_OFFSET		48
416#define VFC_CAM_CMD_ROW_SIZE		9
417#define VFC_CAM_ADDR_STRUCT_SIZE	16
418#define VFC_CAM_ADDR_OP_OFFSET		0
419#define VFC_CAM_ADDR_OP_SIZE		4
420#define VFC_CAM_RESP_STRUCT_SIZE	256
421#define VFC_RAM_ADDR_STRUCT_SIZE	16
422#define VFC_RAM_ADDR_OP_OFFSET		0
423#define VFC_RAM_ADDR_OP_SIZE		2
424#define VFC_RAM_ADDR_ROW_OFFSET		2
425#define VFC_RAM_ADDR_ROW_SIZE		10
426#define VFC_RAM_RESP_STRUCT_SIZE	256
427
428#define VFC_CAM_CMD_DWORDS		CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE)
429#define VFC_CAM_ADDR_DWORDS		CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE)
430#define VFC_CAM_RESP_DWORDS		CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE)
431#define VFC_RAM_CMD_DWORDS		VFC_CAM_CMD_DWORDS
432#define VFC_RAM_ADDR_DWORDS		CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE)
433#define VFC_RAM_RESP_DWORDS		CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE)
434
435#define NUM_VFC_RAM_TYPES		4
436
437#define VFC_CAM_NUM_ROWS		512
438
439#define VFC_OPCODE_CAM_RD		14
440#define VFC_OPCODE_RAM_RD		0
441
442#define NUM_RSS_MEM_TYPES		5
443
444#define NUM_BIG_RAM_TYPES		3
445
446#define NUM_PHY_TBUS_ADDRESSES		2048
447#define PHY_DUMP_SIZE_DWORDS		(NUM_PHY_TBUS_ADDRESSES / 2)
448
449#define SEM_FAST_MODE23_SRC_ENABLE_VAL	0x0
450#define SEM_FAST_MODE23_SRC_DISABLE_VAL	0x7
451#define SEM_FAST_MODE4_SRC_ENABLE_VAL	0x0
452#define SEM_FAST_MODE4_SRC_DISABLE_VAL	0x3
453#define SEM_FAST_MODE6_SRC_ENABLE_VAL	0x10
454#define SEM_FAST_MODE6_SRC_DISABLE_VAL	0x3f
455
456#define SEM_SLOW_MODE1_DATA_ENABLE	0x1
457
458#define VALUES_PER_CYCLE		4
459#define MAX_CYCLE_VALUES_MASK		((1 << VALUES_PER_CYCLE) - 1)
460
461#define MAX_DWORDS_PER_CYCLE		8
462
463#define HW_ID_BITS			3
464
465#define NUM_CALENDAR_SLOTS		16
466
467#define MAX_TRIGGER_STATES		3
468#define TRIGGER_SETS_PER_STATE		2
469#define MAX_CONSTRAINTS			4
470
471#define SEM_FILTER_CID_EN_MASK		0x00b
472#define SEM_FILTER_EID_MASK_EN_MASK	0x013
473#define SEM_FILTER_EID_RANGE_EN_MASK	0x113
474
475#define CHUNK_SIZE_IN_DWORDS		64
476#define CHUNK_SIZE_IN_BYTES		DWORDS_TO_BYTES(CHUNK_SIZE_IN_DWORDS)
477
478#define INT_BUF_NUM_OF_LINES		192
479#define INT_BUF_LINE_SIZE_IN_DWORDS	16
480#define INT_BUF_SIZE_IN_DWORDS			(INT_BUF_NUM_OF_LINES * INT_BUF_LINE_SIZE_IN_DWORDS)
481#define INT_BUF_SIZE_IN_CHUNKS			(INT_BUF_SIZE_IN_DWORDS / CHUNK_SIZE_IN_DWORDS)
482
483#define PCI_BUF_LINE_SIZE_IN_DWORDS	8
484#define PCI_BUF_LINE_SIZE_IN_BYTES		DWORDS_TO_BYTES(PCI_BUF_LINE_SIZE_IN_DWORDS)
485
486#define TARGET_EN_MASK_PCI		0x3
487#define TARGET_EN_MASK_NIG		0x4
488
489#define PCI_REQ_CREDIT			1
490#define PCI_PHYS_ADDR_TYPE		0
491
492#define OPAQUE_FID(pci_func)		((pci_func << 4) | 0xff00)
493
494#define RESET_REG_UNRESET_OFFSET	4
495
496#define PCI_PKT_SIZE_IN_CHUNKS		1
497#define PCI_PKT_SIZE_IN_BYTES			(PCI_PKT_SIZE_IN_CHUNKS * CHUNK_SIZE_IN_BYTES)
498
499#define NIG_PKT_SIZE_IN_CHUNKS		4
500
501#define FLUSH_DELAY_MS			500
502#define STALL_DELAY_MS			500
503
504#define SRC_MAC_ADDR_LO16		0x0a0b
505#define SRC_MAC_ADDR_HI32		0x0c0d0e0f
506#define ETH_TYPE			0x1000
507
508#define STATIC_DEBUG_LINE_DWORDS	9
509
510#define NUM_COMMON_GLOBAL_PARAMS	8
511
512#define FW_IMG_KUKU			0
513#define FW_IMG_MAIN			1
514#define FW_IMG_L2B			2
515
516#ifndef REG_FIFO_ELEMENT_DWORDS
517#define REG_FIFO_ELEMENT_DWORDS		2
518#endif
519#define REG_FIFO_DEPTH_ELEMENTS		32
520#define REG_FIFO_DEPTH_DWORDS			(REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
521
522#ifndef IGU_FIFO_ELEMENT_DWORDS
523#define IGU_FIFO_ELEMENT_DWORDS		4
524#endif
525#define IGU_FIFO_DEPTH_ELEMENTS		64
526#define IGU_FIFO_DEPTH_DWORDS			(IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
527
528#define SEMI_SYNC_FIFO_POLLING_DELAY_MS	5
529#define SEMI_SYNC_FIFO_POLLING_COUNT	20
530
531#ifndef PROTECTION_OVERRIDE_ELEMENT_DWORDS
532#define PROTECTION_OVERRIDE_ELEMENT_DWORDS 2
533#endif
534#define PROTECTION_OVERRIDE_DEPTH_ELEMENTS 20
535#define PROTECTION_OVERRIDE_DEPTH_DWORDS   	(PROTECTION_OVERRIDE_DEPTH_ELEMENTS 	* PROTECTION_OVERRIDE_ELEMENT_DWORDS)
536
537#define MCP_SPAD_TRACE_OFFSIZE_ADDR		(MCP_REG_SCRATCH + 	OFFSETOF(struct static_init, sections[SPAD_SECTION_TRACE]))
538
539#define EMPTY_FW_VERSION_STR		"???_???_???_???"
540#define EMPTY_FW_IMAGE_STR		"???????????????"
541
542/***************************** Constant Arrays *******************************/
543
544struct dbg_array {
545	const u32 *ptr;
546	u32 size_in_dwords;
547};
548
549/* Debug arrays */
550#ifdef USE_DBG_BIN_FILE
551static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { { OSAL_NULL } };
552#else
553static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = {
554	/* BIN_BUF_DBG_MODE_TREE */
555	{ (const u32 *)dbg_modes_tree_buf, OSAL_ARRAY_SIZE(dbg_modes_tree_buf)},
556
557	/* BIN_BUF_DBG_DUMP_REG */
558	{ dump_reg, OSAL_ARRAY_SIZE(dump_reg) },
559
560	/* BIN_BUF_DBG_DUMP_MEM */
561	{ dump_mem, OSAL_ARRAY_SIZE(dump_mem) },
562
563	/* BIN_BUF_DBG_IDLE_CHK_REGS */
564	{ idle_chk_regs, OSAL_ARRAY_SIZE(idle_chk_regs) },
565
566	/* BIN_BUF_DBG_IDLE_CHK_IMMS */
567	{ idle_chk_imms, OSAL_ARRAY_SIZE(idle_chk_imms) },
568
569	/* BIN_BUF_DBG_IDLE_CHK_RULES */
570	{ idle_chk_rules, OSAL_ARRAY_SIZE(idle_chk_rules) },
571
572	/* BIN_BUF_DBG_IDLE_CHK_PARSING_DATA */
573	{ OSAL_NULL, 0 },
574
575	/* BIN_BUF_DBG_ATTN_BLOCKS */
576	{ attn_block, OSAL_ARRAY_SIZE(attn_block) },
577
578	/* BIN_BUF_DBG_ATTN_REGSS */
579	{ attn_reg, OSAL_ARRAY_SIZE(attn_reg) },
580
581	/* BIN_BUF_DBG_ATTN_INDEXES */
582	{ OSAL_NULL, 0 },
583
584	/* BIN_BUF_DBG_ATTN_NAME_OFFSETS */
585	{ OSAL_NULL, 0 },
586
587	/* BIN_BUF_DBG_BUS_BLOCKS */
588	{ dbg_bus_blocks, OSAL_ARRAY_SIZE(dbg_bus_blocks) },
589
590	/* BIN_BUF_DBG_BUS_LINES */
591	{ dbg_bus_lines, OSAL_ARRAY_SIZE(dbg_bus_lines) },
592
593	/* BIN_BUF_DBG_BUS_BLOCKS_USER_DATA */
594	{ OSAL_NULL, 0 },
595
596	/* BIN_BUF_DBG_BUS_LINE_NAME_OFFSETS */
597	{ OSAL_NULL, 0 },
598
599	/* BIN_BUF_DBG_PARSING_STRINGS */
600	{ OSAL_NULL, 0 }
601};
602#endif
603
604/* Chip constant definitions array */
605static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
606	{ "bb",
607
608		/* ASIC */
609		{ { MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB },
610
611		/* EMUL_FULL */
612		{ MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB },
613
614		/* EMUL_REDUCED */
615		{ MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB },
616
617		/* FPGA */
618		{ MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB } } },
619
620	{ "ah",
621
622		/* ASIC */
623		{ { MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2 },
624
625		/* EMUL_FULL */
626		{ MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2 },
627
628		/* EMUL_REDUCED */
629		{ MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2 },
630
631		/* FPGA */
632		{ MAX_NUM_PORTS_K2, 8, MAX_NUM_VFS_K2 } } },
633
634	{ "e5",
635
636		/* ASIC */
637		{ { MAX_NUM_PORTS_E5, MAX_NUM_PFS_E5, MAX_NUM_VFS_E5 },
638
639		/* EMUL_FULL */
640		{ MAX_NUM_PORTS_E5, MAX_NUM_PFS_E5, MAX_NUM_VFS_E5 },
641
642		/* EMUL_REDUCED */
643		{ MAX_NUM_PORTS_E5, MAX_NUM_PFS_E5, MAX_NUM_VFS_E5 },
644
645		/* FPGA */
646		{ MAX_NUM_PORTS_E5, 8, MAX_NUM_VFS_E5 } } }
647};
648
649/* Storm constant definitions array */
650static struct storm_defs s_storm_defs[] = {
651	/* Tstorm */
652	{	'T', BLOCK_TSEM,
653		{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT }, true,
654		TSEM_REG_FAST_MEMORY,
655		TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
656		TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2,
657		TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
658		TCM_REG_CTX_RBC_ACCS,
659		4, TCM_REG_AGG_CON_CTX,
660		16, TCM_REG_SM_CON_CTX,
661		2, TCM_REG_AGG_TASK_CTX,
662		4, TCM_REG_SM_TASK_CTX },
663
664	/* Mstorm */
665	{	'M', BLOCK_MSEM,
666		{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM }, false,
667		MSEM_REG_FAST_MEMORY,
668		MSEM_REG_DBG_FRAME_MODE_BB_K2, MSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
669		MSEM_REG_SLOW_DBG_MODE_BB_K2, MSEM_REG_DBG_MODE1_CFG_BB_K2,
670		MSEM_REG_SYNC_DBG_EMPTY, MSEM_REG_SLOW_DBG_EMPTY_BB_K2,
671		MCM_REG_CTX_RBC_ACCS,
672		1, MCM_REG_AGG_CON_CTX,
673		10, MCM_REG_SM_CON_CTX,
674		2, MCM_REG_AGG_TASK_CTX,
675		7, MCM_REG_SM_TASK_CTX },
676
677	/* Ustorm */
678	{	'U', BLOCK_USEM,
679		{ DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU }, false,
680		USEM_REG_FAST_MEMORY,
681		USEM_REG_DBG_FRAME_MODE_BB_K2, USEM_REG_SLOW_DBG_ACTIVE_BB_K2,
682		USEM_REG_SLOW_DBG_MODE_BB_K2, USEM_REG_DBG_MODE1_CFG_BB_K2,
683		USEM_REG_SYNC_DBG_EMPTY, USEM_REG_SLOW_DBG_EMPTY_BB_K2,
684		UCM_REG_CTX_RBC_ACCS,
685		2, UCM_REG_AGG_CON_CTX,
686		13, UCM_REG_SM_CON_CTX,
687		3, UCM_REG_AGG_TASK_CTX,
688		3, UCM_REG_SM_TASK_CTX },
689
690	/* Xstorm */
691	{	'X', BLOCK_XSEM,
692		{ DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX }, false,
693		XSEM_REG_FAST_MEMORY,
694		XSEM_REG_DBG_FRAME_MODE_BB_K2, XSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
695		XSEM_REG_SLOW_DBG_MODE_BB_K2, XSEM_REG_DBG_MODE1_CFG_BB_K2,
696		XSEM_REG_SYNC_DBG_EMPTY, XSEM_REG_SLOW_DBG_EMPTY_BB_K2,
697		XCM_REG_CTX_RBC_ACCS,
698		9, XCM_REG_AGG_CON_CTX,
699		15, XCM_REG_SM_CON_CTX,
700		0, 0,
701		0, 0 },
702
703	/* Ystorm */
704	{	'Y', BLOCK_YSEM,
705		{ DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY }, false,
706		YSEM_REG_FAST_MEMORY,
707		YSEM_REG_DBG_FRAME_MODE_BB_K2, YSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
708		YSEM_REG_SLOW_DBG_MODE_BB_K2, YSEM_REG_DBG_MODE1_CFG_BB_K2,
709		YSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
710		YCM_REG_CTX_RBC_ACCS,
711		2, YCM_REG_AGG_CON_CTX,
712		3, YCM_REG_SM_CON_CTX,
713		2, YCM_REG_AGG_TASK_CTX,
714		12, YCM_REG_SM_TASK_CTX },
715
716	/* Pstorm */
717	{	'P', BLOCK_PSEM,
718		{ DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS }, true,
719		PSEM_REG_FAST_MEMORY,
720		PSEM_REG_DBG_FRAME_MODE_BB_K2, PSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
721		PSEM_REG_SLOW_DBG_MODE_BB_K2, PSEM_REG_DBG_MODE1_CFG_BB_K2,
722		PSEM_REG_SYNC_DBG_EMPTY, PSEM_REG_SLOW_DBG_EMPTY_BB_K2,
723		PCM_REG_CTX_RBC_ACCS,
724		0, 0,
725		10, PCM_REG_SM_CON_CTX,
726		0, 0,
727		0, 0 }
728};
729
730/* Block definitions array */
731
732static struct block_defs block_grc_defs = {
733	"grc", { true, true, true }, false, 0,
734	{ DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN },
735	GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE,
736	GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID,
737	GRC_REG_DBG_FORCE_FRAME,
738	true, false, DBG_RESET_REG_MISC_PL_UA, 1 };
739
740static struct block_defs block_miscs_defs = {
741	"miscs", { true, true, true }, false, 0,
742	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
743	0, 0, 0, 0, 0,
744	false, false, MAX_DBG_RESET_REGS, 0 };
745
746static struct block_defs block_misc_defs = {
747	"misc", { true, true, true }, false, 0,
748	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
749	0, 0, 0, 0, 0,
750	false, false, MAX_DBG_RESET_REGS, 0 };
751
752static struct block_defs block_dbu_defs = {
753	"dbu", { true, true, true }, false, 0,
754	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
755	0, 0, 0, 0, 0,
756	false, false, MAX_DBG_RESET_REGS, 0 };
757
758static struct block_defs block_pglue_b_defs = {
759	"pglue_b", { true, true, true }, false, 0,
760	{ DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH },
761	PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE,
762	PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID,
763	PGLUE_B_REG_DBG_FORCE_FRAME,
764	true, false, DBG_RESET_REG_MISCS_PL_HV, 1 };
765
766static struct block_defs block_cnig_defs = {
767	"cnig", { true, true, true }, false, 0,
768	{ MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW },
769	CNIG_REG_DBG_SELECT_K2_E5, CNIG_REG_DBG_DWORD_ENABLE_K2_E5,
770	CNIG_REG_DBG_SHIFT_K2_E5, CNIG_REG_DBG_FORCE_VALID_K2_E5,
771	CNIG_REG_DBG_FORCE_FRAME_K2_E5,
772	true, false, DBG_RESET_REG_MISCS_PL_HV, 0 };
773
774static struct block_defs block_cpmu_defs = {
775	"cpmu", { true, true, true }, false, 0,
776	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
777	0, 0, 0, 0, 0,
778	true, false, DBG_RESET_REG_MISCS_PL_HV, 8 };
779
780static struct block_defs block_ncsi_defs = {
781	"ncsi", { true, true, true }, false, 0,
782	{ DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ },
783	NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE,
784	NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID,
785	NCSI_REG_DBG_FORCE_FRAME,
786	true, false, DBG_RESET_REG_MISCS_PL_HV, 5 };
787
788static struct block_defs block_opte_defs = {
789	"opte", { true, true, false }, false, 0,
790	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
791	0, 0, 0, 0, 0,
792	true, false, DBG_RESET_REG_MISCS_PL_HV, 4 };
793
794static struct block_defs block_bmb_defs = {
795	"bmb", { true, true, true }, false, 0,
796	{ DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB, DBG_BUS_CLIENT_RBCB },
797	BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE,
798	BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID,
799	BMB_REG_DBG_FORCE_FRAME,
800	true, false, DBG_RESET_REG_MISCS_PL_UA, 7 };
801
802static struct block_defs block_pcie_defs = {
803	"pcie", { true, true, true }, false, 0,
804	{ MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH },
805	PCIE_REG_DBG_COMMON_SELECT_K2_E5, PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
806	PCIE_REG_DBG_COMMON_SHIFT_K2_E5, PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
807	PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
808	false, false, MAX_DBG_RESET_REGS, 0 };
809
810static struct block_defs block_mcp_defs = {
811	"mcp", { true, true, true }, false, 0,
812	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
813	0, 0, 0, 0, 0,
814	false, false, MAX_DBG_RESET_REGS, 0 };
815
816static struct block_defs block_mcp2_defs = {
817	"mcp2", { true, true, true }, false, 0,
818	{ DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ },
819	MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE,
820	MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID,
821	MCP2_REG_DBG_FORCE_FRAME,
822	false, false, MAX_DBG_RESET_REGS, 0 };
823
824static struct block_defs block_pswhst_defs = {
825	"pswhst", { true, true, true }, false, 0,
826	{ DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
827	PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE,
828	PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID,
829	PSWHST_REG_DBG_FORCE_FRAME,
830	true, false, DBG_RESET_REG_MISC_PL_HV, 0 };
831
832static struct block_defs block_pswhst2_defs = {
833	"pswhst2", { true, true, true }, false, 0,
834	{ DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
835	PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE,
836	PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID,
837	PSWHST2_REG_DBG_FORCE_FRAME,
838	true, false, DBG_RESET_REG_MISC_PL_HV, 0 };
839
840static struct block_defs block_pswrd_defs = {
841	"pswrd", { true, true, true }, false, 0,
842	{ DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
843	PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE,
844	PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID,
845	PSWRD_REG_DBG_FORCE_FRAME,
846	true, false, DBG_RESET_REG_MISC_PL_HV, 2 };
847
848static struct block_defs block_pswrd2_defs = {
849	"pswrd2", { true, true, true }, false, 0,
850	{ DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
851	PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE,
852	PSWRD2_REG_DBG_SHIFT,	PSWRD2_REG_DBG_FORCE_VALID,
853	PSWRD2_REG_DBG_FORCE_FRAME,
854	true, false, DBG_RESET_REG_MISC_PL_HV, 2 };
855
856static struct block_defs block_pswwr_defs = {
857	"pswwr", { true, true, true }, false, 0,
858	{ DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
859	PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE,
860	PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID,
861	PSWWR_REG_DBG_FORCE_FRAME,
862	true, false, DBG_RESET_REG_MISC_PL_HV, 3 };
863
864static struct block_defs block_pswwr2_defs = {
865	"pswwr2", { true, true, true }, false, 0,
866	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
867	0, 0, 0, 0, 0,
868	true, false, DBG_RESET_REG_MISC_PL_HV, 3 };
869
870static struct block_defs block_pswrq_defs = {
871	"pswrq", { true, true, true }, false, 0,
872	{ DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
873	PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE,
874	PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID,
875	PSWRQ_REG_DBG_FORCE_FRAME,
876	true, false, DBG_RESET_REG_MISC_PL_HV, 1 };
877
878static struct block_defs block_pswrq2_defs = {
879	"pswrq2", { true, true, true }, false, 0,
880	{ DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
881	PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE,
882	PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID,
883	PSWRQ2_REG_DBG_FORCE_FRAME,
884	true, false, DBG_RESET_REG_MISC_PL_HV, 1 };
885
886static struct block_defs block_pglcs_defs =	{
887	"pglcs", { true, true, true }, false, 0,
888	{ MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH },
889	PGLCS_REG_DBG_SELECT_K2_E5, PGLCS_REG_DBG_DWORD_ENABLE_K2_E5,
890	PGLCS_REG_DBG_SHIFT_K2_E5, PGLCS_REG_DBG_FORCE_VALID_K2_E5,
891	PGLCS_REG_DBG_FORCE_FRAME_K2_E5,
892	true, false, DBG_RESET_REG_MISCS_PL_HV, 2 };
893
894static struct block_defs block_ptu_defs ={
895	"ptu", { true, true, true }, false, 0,
896	{ DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
897	PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE,
898	PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID,
899	PTU_REG_DBG_FORCE_FRAME,
900	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 20 };
901
902static struct block_defs block_dmae_defs = {
903	"dmae", { true, true, true }, false, 0,
904	{ DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
905	DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE,
906	DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID,
907	DMAE_REG_DBG_FORCE_FRAME,
908	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 28 };
909
910static struct block_defs block_tcm_defs = {
911	"tcm", { true, true, true }, true, DBG_TSTORM_ID,
912	{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT },
913	TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE,
914	TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID,
915	TCM_REG_DBG_FORCE_FRAME,
916	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 5 };
917
918static struct block_defs block_mcm_defs = {
919	"mcm", { true, true, true }, true, DBG_MSTORM_ID,
920	{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM },
921	MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE,
922	MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID,
923	MCM_REG_DBG_FORCE_FRAME,
924	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 3 };
925
926static struct block_defs block_ucm_defs = {
927	"ucm", { true, true, true }, true, DBG_USTORM_ID,
928	{ DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU },
929	UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE,
930	UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID,
931	UCM_REG_DBG_FORCE_FRAME,
932	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 8 };
933
934static struct block_defs block_xcm_defs = {
935	"xcm", { true, true, true }, true, DBG_XSTORM_ID,
936	{ DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX },
937	XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE,
938	XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID,
939	XCM_REG_DBG_FORCE_FRAME,
940	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 19 };
941
942static struct block_defs block_ycm_defs = {
943	"ycm", { true, true, true }, true, DBG_YSTORM_ID,
944	{ DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY },
945	YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE,
946	YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID,
947	YCM_REG_DBG_FORCE_FRAME,
948	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 5 };
949
950static struct block_defs block_pcm_defs = {
951	"pcm", { true, true, true }, true, DBG_PSTORM_ID,
952	{ DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS },
953	PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE,
954	PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID,
955	PCM_REG_DBG_FORCE_FRAME,
956	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 4 };
957
958static struct block_defs block_qm_defs = {
959	"qm", { true, true, true }, false, 0,
960	{ DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ, DBG_BUS_CLIENT_RBCQ },
961	QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE,
962	QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID,
963	QM_REG_DBG_FORCE_FRAME,
964	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 16 };
965
966static struct block_defs block_tm_defs = {
967	"tm", { true, true, true }, false, 0,
968	{ DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS },
969	TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE,
970	TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID,
971	TM_REG_DBG_FORCE_FRAME,
972	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 17 };
973
974static struct block_defs block_dorq_defs = {
975	"dorq", { true, true, true }, false, 0,
976	{ DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY },
977	DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE,
978	DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID,
979	DORQ_REG_DBG_FORCE_FRAME,
980	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 18 };
981
982static struct block_defs block_brb_defs = {
983	"brb", { true, true, true }, false, 0,
984	{ DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR },
985	BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE,
986	BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID,
987	BRB_REG_DBG_FORCE_FRAME,
988	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 0 };
989
990static struct block_defs block_src_defs = {
991	"src", { true, true, true }, false, 0,
992	{ DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF },
993	SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE,
994	SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID,
995	SRC_REG_DBG_FORCE_FRAME,
996	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 2 };
997
998static struct block_defs block_prs_defs = {
999	"prs", { true, true, true }, false, 0,
1000	{ DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR },
1001	PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE,
1002	PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID,
1003	PRS_REG_DBG_FORCE_FRAME,
1004	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 1 };
1005
1006static struct block_defs block_tsdm_defs = {
1007	"tsdm", { true, true, true }, true, DBG_TSTORM_ID,
1008	{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT },
1009	TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE,
1010	TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID,
1011	TSDM_REG_DBG_FORCE_FRAME,
1012	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 3 };
1013
1014static struct block_defs block_msdm_defs = {
1015	"msdm", { true, true, true }, true, DBG_MSTORM_ID,
1016	{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM },
1017	MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE,
1018	MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID,
1019	MSDM_REG_DBG_FORCE_FRAME,
1020	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 6 };
1021
1022static struct block_defs block_usdm_defs = {
1023	"usdm", { true, true, true }, true, DBG_USTORM_ID,
1024	{ DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU },
1025	USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE,
1026	USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID,
1027	USDM_REG_DBG_FORCE_FRAME,
1028	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 7
1029	};
1030static struct block_defs block_xsdm_defs = {
1031	"xsdm", { true, true, true }, true, DBG_XSTORM_ID,
1032	{ DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX },
1033	XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE,
1034	XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID,
1035	XSDM_REG_DBG_FORCE_FRAME,
1036	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 20 };
1037
1038static struct block_defs block_ysdm_defs = {
1039	"ysdm", { true, true, true }, true, DBG_YSTORM_ID,
1040	{ DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY },
1041	YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE,
1042	YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID,
1043	YSDM_REG_DBG_FORCE_FRAME,
1044	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 8 };
1045
1046static struct block_defs block_psdm_defs = {
1047	"psdm", { true, true, true }, true, DBG_PSTORM_ID,
1048	{ DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS },
1049	PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE,
1050	PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID,
1051	PSDM_REG_DBG_FORCE_FRAME,
1052	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 7 };
1053
1054static struct block_defs block_tsem_defs = {
1055	"tsem", { true, true, true }, true, DBG_TSTORM_ID,
1056	{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT },
1057	TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE,
1058	TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID,
1059	TSEM_REG_DBG_FORCE_FRAME,
1060	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 4 };
1061
1062static struct block_defs block_msem_defs = {
1063	"msem", { true, true, true }, true, DBG_MSTORM_ID,
1064	{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM },
1065	MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE,
1066	MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID,
1067	MSEM_REG_DBG_FORCE_FRAME,
1068	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 9 };
1069
1070static struct block_defs block_usem_defs = {
1071	"usem", { true, true, true }, true, DBG_USTORM_ID,
1072	{ DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU },
1073	USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE,
1074	USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID,
1075	USEM_REG_DBG_FORCE_FRAME,
1076	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 9 };
1077
1078static struct block_defs block_xsem_defs = {
1079	"xsem", { true, true, true }, true, DBG_XSTORM_ID,
1080	{ DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX },
1081	XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE,
1082	XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID,
1083	XSEM_REG_DBG_FORCE_FRAME,
1084	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 21 };
1085
1086static struct block_defs block_ysem_defs = {
1087	"ysem", { true, true, true }, true, DBG_YSTORM_ID,
1088	{ DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY },
1089	YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE,
1090	YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID,
1091	YSEM_REG_DBG_FORCE_FRAME,
1092	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 11 };
1093
1094static struct block_defs block_psem_defs = {
1095	"psem", { true, true, true }, true, DBG_PSTORM_ID,
1096	{ DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS },
1097	PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE,
1098	PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID,
1099	PSEM_REG_DBG_FORCE_FRAME,
1100	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 10 };
1101
1102static struct block_defs block_rss_defs = {
1103	"rss", { true, true, true }, false, 0,
1104	{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT },
1105	RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE,
1106	RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID,
1107	RSS_REG_DBG_FORCE_FRAME,
1108	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 18 };
1109
1110static struct block_defs block_tmld_defs = {
1111	"tmld", { true, true, true }, false, 0,
1112	{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM },
1113	TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE,
1114	TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID,
1115	TMLD_REG_DBG_FORCE_FRAME,
1116	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 13 };
1117
1118static struct block_defs block_muld_defs = {
1119	"muld", { true, true, true }, false, 0,
1120	{ DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU },
1121	MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE,
1122	MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID,
1123	MULD_REG_DBG_FORCE_FRAME,
1124	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 14 };
1125
1126static struct block_defs block_yuld_defs = {
1127	"yuld", { true, true, false }, false, 0,
1128	{ DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, MAX_DBG_BUS_CLIENTS },
1129	YULD_REG_DBG_SELECT_BB_K2, YULD_REG_DBG_DWORD_ENABLE_BB_K2,
1130	YULD_REG_DBG_SHIFT_BB_K2, YULD_REG_DBG_FORCE_VALID_BB_K2,
1131	YULD_REG_DBG_FORCE_FRAME_BB_K2,
1132	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 15 };
1133
1134static struct block_defs block_xyld_defs = {
1135	"xyld", { true, true, true }, false, 0,
1136	{ DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX },
1137	XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE,
1138	XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID,
1139	XYLD_REG_DBG_FORCE_FRAME,
1140	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 12 };
1141
1142static struct block_defs block_ptld_defs = {
1143	"ptld", { false, false, true }, false, 0,
1144	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCT },
1145	PTLD_REG_DBG_SELECT_E5, PTLD_REG_DBG_DWORD_ENABLE_E5,
1146	PTLD_REG_DBG_SHIFT_E5, PTLD_REG_DBG_FORCE_VALID_E5,
1147	PTLD_REG_DBG_FORCE_FRAME_E5,
1148	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 28 };
1149
1150static struct block_defs block_ypld_defs = {
1151	"ypld", { false, false, true }, false, 0,
1152	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCS },
1153	YPLD_REG_DBG_SELECT_E5, YPLD_REG_DBG_DWORD_ENABLE_E5,
1154	YPLD_REG_DBG_SHIFT_E5, YPLD_REG_DBG_FORCE_VALID_E5,
1155	YPLD_REG_DBG_FORCE_FRAME_E5,
1156	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 27 };
1157
1158static struct block_defs block_prm_defs = {
1159	"prm", { true, true, true }, false, 0,
1160	{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM },
1161	PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE,
1162	PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID,
1163	PRM_REG_DBG_FORCE_FRAME,
1164	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 21 };
1165
1166static struct block_defs block_pbf_pb1_defs = {
1167	"pbf_pb1", { true, true, true }, false, 0,
1168	{ DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV },
1169	PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE,
1170	PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID,
1171	PBF_PB1_REG_DBG_FORCE_FRAME,
1172	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 11 };
1173
1174static struct block_defs block_pbf_pb2_defs = {
1175	"pbf_pb2", { true, true, true }, false, 0,
1176	{ DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV },
1177	PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE,
1178	PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID,
1179	PBF_PB2_REG_DBG_FORCE_FRAME,
1180	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 12 };
1181
1182static struct block_defs block_rpb_defs = {
1183	"rpb", { true, true, true }, false, 0,
1184	{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM },
1185	RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE,
1186	RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID,
1187	RPB_REG_DBG_FORCE_FRAME,
1188	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 13 };
1189
1190static struct block_defs block_btb_defs = {
1191	"btb", { true, true, true }, false, 0,
1192	{ DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV },
1193	BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE,
1194	BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID,
1195	BTB_REG_DBG_FORCE_FRAME,
1196	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 10 };
1197
1198static struct block_defs block_pbf_defs = {
1199	"pbf", { true, true, true }, false, 0,
1200	{ DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV },
1201	PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE,
1202	PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID,
1203	PBF_REG_DBG_FORCE_FRAME,
1204	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 15 };
1205
1206static struct block_defs block_rdif_defs = {
1207	"rdif", { true, true, true }, false, 0,
1208	{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM },
1209	RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE,
1210	RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID,
1211	RDIF_REG_DBG_FORCE_FRAME,
1212	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 16 };
1213
1214static struct block_defs block_tdif_defs = {
1215	"tdif", { true, true, true }, false, 0,
1216	{ DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS },
1217	TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE,
1218	TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID,
1219	TDIF_REG_DBG_FORCE_FRAME,
1220	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 17 };
1221
1222static struct block_defs block_cdu_defs = {
1223	"cdu", { true, true, true }, false, 0,
1224	{ DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF },
1225	CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE,
1226	CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID,
1227	CDU_REG_DBG_FORCE_FRAME,
1228	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 23 };
1229
1230static struct block_defs block_ccfc_defs = {
1231	"ccfc", { true, true, true }, false, 0,
1232	{ DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF },
1233	CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE,
1234	CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID,
1235	CCFC_REG_DBG_FORCE_FRAME,
1236	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 24 };
1237
1238static struct block_defs block_tcfc_defs = {
1239	"tcfc", { true, true, true }, false, 0,
1240	{ DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF },
1241	TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE,
1242	TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID,
1243	TCFC_REG_DBG_FORCE_FRAME,
1244	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 25 };
1245
1246static struct block_defs block_igu_defs = {
1247	"igu", { true, true, true }, false, 0,
1248	{ DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
1249	IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE,
1250	IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID,
1251	IGU_REG_DBG_FORCE_FRAME,
1252	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 27 };
1253
1254static struct block_defs block_cau_defs = {
1255	"cau", { true, true, true }, false, 0,
1256	{ DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP },
1257	CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE,
1258	CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID,
1259	CAU_REG_DBG_FORCE_FRAME,
1260	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 19 };
1261
1262/* TODO: add debug bus parameters when E5 RGFS RF is added */
1263static struct block_defs block_rgfs_defs = {
1264	"rgfs", { false, false, true }, false, 0,
1265	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1266	0, 0, 0, 0, 0,
1267	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 29 };
1268
1269static struct block_defs block_rgsrc_defs = {
1270	"rgsrc", { false, false, true }, false, 0,
1271	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH },
1272	RGSRC_REG_DBG_SELECT_E5, RGSRC_REG_DBG_DWORD_ENABLE_E5,
1273	RGSRC_REG_DBG_SHIFT_E5, RGSRC_REG_DBG_FORCE_VALID_E5,
1274	RGSRC_REG_DBG_FORCE_FRAME_E5,
1275	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 30 };
1276
1277/* TODO: add debug bus parameters when E5 TGFS RF is added */
1278static struct block_defs block_tgfs_defs = {
1279	"tgfs", { false, false, true }, false, 0,
1280	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1281	0, 0, 0, 0, 0,
1282	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 30 };
1283
1284static struct block_defs block_tgsrc_defs = {
1285	"tgsrc", { false, false, true }, false, 0,
1286	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCV },
1287	TGSRC_REG_DBG_SELECT_E5, TGSRC_REG_DBG_DWORD_ENABLE_E5,
1288	TGSRC_REG_DBG_SHIFT_E5, TGSRC_REG_DBG_FORCE_VALID_E5,
1289	TGSRC_REG_DBG_FORCE_FRAME_E5,
1290	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 31 };
1291
1292static struct block_defs block_umac_defs = {
1293	"umac", { true, true, true }, false, 0,
1294	{ MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ },
1295	UMAC_REG_DBG_SELECT_K2_E5, UMAC_REG_DBG_DWORD_ENABLE_K2_E5,
1296	UMAC_REG_DBG_SHIFT_K2_E5, UMAC_REG_DBG_FORCE_VALID_K2_E5,
1297	UMAC_REG_DBG_FORCE_FRAME_K2_E5,
1298	true, false, DBG_RESET_REG_MISCS_PL_HV, 6 };
1299
1300static struct block_defs block_xmac_defs = {
1301	"xmac", { true, false, false }, false, 0,
1302	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1303	0, 0, 0, 0, 0,
1304	false, false, MAX_DBG_RESET_REGS, 0	};
1305
1306static struct block_defs block_dbg_defs = {
1307	"dbg", { true, true, true }, false, 0,
1308	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1309	0, 0, 0, 0, 0,
1310	true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3 };
1311
1312static struct block_defs block_nig_defs = {
1313	"nig", { true, true, true }, false, 0,
1314	{ DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN },
1315	NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE,
1316	NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID,
1317	NIG_REG_DBG_FORCE_FRAME,
1318	true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 0 };
1319
1320static struct block_defs block_wol_defs = {
1321	"wol", { false, true, true }, false, 0,
1322	{ MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ },
1323	WOL_REG_DBG_SELECT_K2_E5, WOL_REG_DBG_DWORD_ENABLE_K2_E5,
1324	WOL_REG_DBG_SHIFT_K2_E5, WOL_REG_DBG_FORCE_VALID_K2_E5,
1325	WOL_REG_DBG_FORCE_FRAME_K2_E5,
1326	true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 7 };
1327
1328static struct block_defs block_bmbn_defs = {
1329	"bmbn", { false, true, true }, false, 0,
1330	{ MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB, DBG_BUS_CLIENT_RBCB },
1331	BMBN_REG_DBG_SELECT_K2_E5, BMBN_REG_DBG_DWORD_ENABLE_K2_E5,
1332	BMBN_REG_DBG_SHIFT_K2_E5, BMBN_REG_DBG_FORCE_VALID_K2_E5,
1333	BMBN_REG_DBG_FORCE_FRAME_K2_E5,
1334	false, false, MAX_DBG_RESET_REGS, 0 };
1335
1336static struct block_defs block_ipc_defs = {
1337	"ipc", { true, true, true }, false, 0,
1338	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1339	0, 0, 0, 0, 0,
1340	true, false, DBG_RESET_REG_MISCS_PL_UA, 8 };
1341
1342static struct block_defs block_nwm_defs = {
1343	"nwm", { false, true, true }, false, 0,
1344	{ MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW },
1345	NWM_REG_DBG_SELECT_K2_E5, NWM_REG_DBG_DWORD_ENABLE_K2_E5,
1346	NWM_REG_DBG_SHIFT_K2_E5, NWM_REG_DBG_FORCE_VALID_K2_E5,
1347	NWM_REG_DBG_FORCE_FRAME_K2_E5,
1348	true, false, DBG_RESET_REG_MISCS_PL_HV_2, 0 };
1349
1350static struct block_defs block_nws_defs = {
1351	"nws", { false, true, true }, false, 0,
1352	{ MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW },
1353	NWS_REG_DBG_SELECT_K2_E5, NWS_REG_DBG_DWORD_ENABLE_K2_E5,
1354	NWS_REG_DBG_SHIFT_K2_E5, NWS_REG_DBG_FORCE_VALID_K2_E5,
1355	NWS_REG_DBG_FORCE_FRAME_K2_E5,
1356	true, false, DBG_RESET_REG_MISCS_PL_HV, 12 };
1357
1358static struct block_defs block_ms_defs = {
1359	"ms", { false, true, true }, false, 0,
1360	{ MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ },
1361	MS_REG_DBG_SELECT_K2_E5, MS_REG_DBG_DWORD_ENABLE_K2_E5,
1362	MS_REG_DBG_SHIFT_K2_E5, MS_REG_DBG_FORCE_VALID_K2_E5,
1363	MS_REG_DBG_FORCE_FRAME_K2_E5,
1364	true, false, DBG_RESET_REG_MISCS_PL_HV, 13 };
1365
1366static struct block_defs block_phy_pcie_defs = {
1367	"phy_pcie", { false, true, true }, false, 0,
1368	{ MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH },
1369	PCIE_REG_DBG_COMMON_SELECT_K2_E5, PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
1370	PCIE_REG_DBG_COMMON_SHIFT_K2_E5, PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
1371	PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
1372	false, false, MAX_DBG_RESET_REGS, 0 };
1373
1374static struct block_defs block_led_defs = {
1375	"led", { false, true, true }, false, 0,
1376	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1377	0, 0, 0, 0, 0,
1378	true, false, DBG_RESET_REG_MISCS_PL_HV, 14 };
1379
1380static struct block_defs block_avs_wrap_defs = {
1381	"avs_wrap", { false, true, false }, false, 0,
1382	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1383	0, 0, 0, 0, 0,
1384	true, false, DBG_RESET_REG_MISCS_PL_UA, 11 };
1385
1386static struct block_defs block_pxpreqbus_defs = {
1387	"pxpreqbus", { false, false, false }, false, 0,
1388	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1389	0, 0, 0, 0, 0,
1390	false, false, MAX_DBG_RESET_REGS, 0 };
1391
1392static struct block_defs block_misc_aeu_defs = {
1393	"misc_aeu", { true, true, true }, false, 0,
1394	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1395	0, 0, 0, 0, 0,
1396	false, false, MAX_DBG_RESET_REGS, 0 };
1397
1398static struct block_defs block_bar0_map_defs = {
1399	"bar0_map", { true, true, true }, false, 0,
1400	{ MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS },
1401	0, 0, 0, 0, 0,
1402	false, false, MAX_DBG_RESET_REGS, 0 };
1403
1404static struct block_defs* s_block_defs[MAX_BLOCK_ID] = {
1405	&block_grc_defs,
1406 	&block_miscs_defs,
1407 	&block_misc_defs,
1408 	&block_dbu_defs,
1409 	&block_pglue_b_defs,
1410 	&block_cnig_defs,
1411 	&block_cpmu_defs,
1412 	&block_ncsi_defs,
1413 	&block_opte_defs,
1414 	&block_bmb_defs,
1415 	&block_pcie_defs,
1416 	&block_mcp_defs,
1417 	&block_mcp2_defs,
1418 	&block_pswhst_defs,
1419 	&block_pswhst2_defs,
1420 	&block_pswrd_defs,
1421 	&block_pswrd2_defs,
1422 	&block_pswwr_defs,
1423 	&block_pswwr2_defs,
1424 	&block_pswrq_defs,
1425 	&block_pswrq2_defs,
1426 	&block_pglcs_defs,
1427 	&block_dmae_defs,
1428 	&block_ptu_defs,
1429 	&block_tcm_defs,
1430 	&block_mcm_defs,
1431 	&block_ucm_defs,
1432 	&block_xcm_defs,
1433 	&block_ycm_defs,
1434 	&block_pcm_defs,
1435 	&block_qm_defs,
1436 	&block_tm_defs,
1437 	&block_dorq_defs,
1438 	&block_brb_defs,
1439 	&block_src_defs,
1440 	&block_prs_defs,
1441 	&block_tsdm_defs,
1442 	&block_msdm_defs,
1443 	&block_usdm_defs,
1444 	&block_xsdm_defs,
1445 	&block_ysdm_defs,
1446 	&block_psdm_defs,
1447 	&block_tsem_defs,
1448 	&block_msem_defs,
1449 	&block_usem_defs,
1450 	&block_xsem_defs,
1451 	&block_ysem_defs,
1452 	&block_psem_defs,
1453 	&block_rss_defs,
1454 	&block_tmld_defs,
1455 	&block_muld_defs,
1456 	&block_yuld_defs,
1457 	&block_xyld_defs,
1458 	&block_ptld_defs,
1459 	&block_ypld_defs,
1460 	&block_prm_defs,
1461 	&block_pbf_pb1_defs,
1462 	&block_pbf_pb2_defs,
1463 	&block_rpb_defs,
1464 	&block_btb_defs,
1465 	&block_pbf_defs,
1466 	&block_rdif_defs,
1467 	&block_tdif_defs,
1468 	&block_cdu_defs,
1469 	&block_ccfc_defs,
1470 	&block_tcfc_defs,
1471 	&block_igu_defs,
1472 	&block_cau_defs,
1473 	&block_rgfs_defs,
1474 	&block_rgsrc_defs,
1475 	&block_tgfs_defs,
1476 	&block_tgsrc_defs,
1477 	&block_umac_defs,
1478 	&block_xmac_defs,
1479 	&block_dbg_defs,
1480 	&block_nig_defs,
1481 	&block_wol_defs,
1482 	&block_bmbn_defs,
1483 	&block_ipc_defs,
1484 	&block_nwm_defs,
1485 	&block_nws_defs,
1486 	&block_ms_defs,
1487 	&block_phy_pcie_defs,
1488 	&block_led_defs,
1489 	&block_avs_wrap_defs,
1490 	&block_pxpreqbus_defs,
1491 	&block_misc_aeu_defs,
1492 	&block_bar0_map_defs,
1493
1494};
1495
1496/* Constraint operation types */
1497static struct dbg_bus_constraint_op_defs s_constraint_op_defs[] = {
1498	/* DBG_BUS_CONSTRAINT_OP_EQ */
1499	{ 0, false },
1500
1501	/* DBG_BUS_CONSTRAINT_OP_NE */
1502	{ 5, false },
1503
1504	/* DBG_BUS_CONSTRAINT_OP_LT */
1505	{ 1, false },
1506
1507	/* DBG_BUS_CONSTRAINT_OP_LTC */
1508	{ 1, true },
1509
1510	/* DBG_BUS_CONSTRAINT_OP_LE */
1511	{ 2, false },
1512
1513	/* DBG_BUS_CONSTRAINT_OP_LEC */
1514	{ 2, true },
1515
1516	/* DBG_BUS_CONSTRAINT_OP_GT */
1517	{ 4, false },
1518
1519	/* DBG_BUS_CONSTRAINT_OP_GTC */
1520	{ 4, true },
1521
1522	/* DBG_BUS_CONSTRAINT_OP_GE */
1523	{ 3, false },
1524
1525	/* DBG_BUS_CONSTRAINT_OP_GEC */
1526	{ 3, true }
1527};
1528
1529static const char* s_dbg_target_names[] = {
1530	/* DBG_BUS_TARGET_ID_INT_BUF */
1531	"int-buf",
1532
1533	/* DBG_BUS_TARGET_ID_NIG */
1534	"nw",
1535
1536	/* DBG_BUS_TARGET_ID_PCI */
1537	"pci-buf"
1538};
1539
1540static struct storm_mode_defs s_storm_mode_defs[] = {
1541	/* DBG_BUS_STORM_MODE_PRINTF */
1542	{ "printf", true, 0 },
1543
1544	/* DBG_BUS_STORM_MODE_PRAM_ADDR */
1545	{ "pram_addr", true, 1 },
1546
1547	/* DBG_BUS_STORM_MODE_DRA_RW */
1548	{ "dra_rw", true, 2 },
1549
1550	/* DBG_BUS_STORM_MODE_DRA_W */
1551	{ "dra_w", true, 3 },
1552
1553	/* DBG_BUS_STORM_MODE_LD_ST_ADDR */
1554	{ "ld_st_addr", true, 4 },
1555
1556	/* DBG_BUS_STORM_MODE_DRA_FSM */
1557	{ "dra_fsm", true, 5 },
1558
1559	/* DBG_BUS_STORM_MODE_RH */
1560	{ "rh", true, 6 },
1561
1562	/* DBG_BUS_STORM_MODE_FOC */
1563	{ "foc", false, 1 },
1564
1565	/* DBG_BUS_STORM_MODE_EXT_STORE */
1566	{ "ext_store", false, 3 }
1567};
1568
1569static struct platform_defs s_platform_defs[] = {
1570	/* PLATFORM_ASIC */
1571	{ "asic", 1, 256, 32768 },
1572
1573	/* PLATFORM_EMUL_FULL */
1574	{ "emul_full", 2000, 8, 4096 },
1575
1576	/* PLATFORM_EMUL_REDUCED */
1577	{ "emul_reduced", 2000, 8, 4096 },
1578
1579	/* PLATFORM_FPGA */
1580	{ "fpga", 200, 32, 8192 }
1581};
1582
1583static struct grc_param_defs s_grc_param_defs[] = {
1584	/* DBG_GRC_PARAM_DUMP_TSTORM */
1585	{ { 1, 1, 1 }, 0, 1, false, 1, 1 },
1586
1587	/* DBG_GRC_PARAM_DUMP_MSTORM */
1588	{ { 1, 1, 1 }, 0, 1, false, 1, 1 },
1589
1590	/* DBG_GRC_PARAM_DUMP_USTORM */
1591	{ { 1, 1, 1 }, 0, 1, false, 1, 1 },
1592
1593	/* DBG_GRC_PARAM_DUMP_XSTORM */
1594	{ { 1, 1, 1 }, 0, 1, false, 1, 1 },
1595
1596	/* DBG_GRC_PARAM_DUMP_YSTORM */
1597	{ { 1, 1, 1 }, 0, 1, false, 1, 1 },
1598
1599	/* DBG_GRC_PARAM_DUMP_PSTORM */
1600	{ { 1, 1, 1 }, 0, 1, false, 1, 1 },
1601
1602	/* DBG_GRC_PARAM_DUMP_REGS */
1603	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1604
1605	/* DBG_GRC_PARAM_DUMP_RAM */
1606	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1607
1608	/* DBG_GRC_PARAM_DUMP_PBUF */
1609	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1610
1611	/* DBG_GRC_PARAM_DUMP_IOR */
1612	{ { 0, 0, 0 }, 0, 1, false, 0, 1 },
1613
1614	/* DBG_GRC_PARAM_DUMP_VFC */
1615	{ { 0, 0, 0 }, 0, 1, false, 0, 1 },
1616
1617	/* DBG_GRC_PARAM_DUMP_CM_CTX */
1618	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1619
1620	/* DBG_GRC_PARAM_DUMP_ILT */
1621	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1622
1623	/* DBG_GRC_PARAM_DUMP_RSS */
1624	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1625
1626	/* DBG_GRC_PARAM_DUMP_CAU */
1627	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1628
1629	/* DBG_GRC_PARAM_DUMP_QM */
1630	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1631
1632	/* DBG_GRC_PARAM_DUMP_MCP */
1633	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1634
1635	/* DBG_GRC_PARAM_RESERVED */
1636	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1637
1638	/* DBG_GRC_PARAM_DUMP_CFC */
1639	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1640
1641	/* DBG_GRC_PARAM_DUMP_IGU */
1642	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1643
1644	/* DBG_GRC_PARAM_DUMP_BRB */
1645	{ { 0, 0, 0 }, 0, 1, false, 0, 1 },
1646
1647	/* DBG_GRC_PARAM_DUMP_BTB */
1648	{ { 0, 0, 0 }, 0, 1, false, 0, 1 },
1649
1650	/* DBG_GRC_PARAM_DUMP_BMB */
1651	{ { 0, 0, 0 }, 0, 1, false, 0, 1 },
1652
1653	/* DBG_GRC_PARAM_DUMP_NIG */
1654	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1655
1656	/* DBG_GRC_PARAM_DUMP_MULD */
1657	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1658
1659	/* DBG_GRC_PARAM_DUMP_PRS */
1660	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1661
1662	/* DBG_GRC_PARAM_DUMP_DMAE */
1663	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1664
1665	/* DBG_GRC_PARAM_DUMP_TM */
1666	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1667
1668	/* DBG_GRC_PARAM_DUMP_SDM */
1669	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1670
1671	/* DBG_GRC_PARAM_DUMP_DIF */
1672	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1673
1674	/* DBG_GRC_PARAM_DUMP_STATIC */
1675	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1676
1677	/* DBG_GRC_PARAM_UNSTALL */
1678	{ { 0, 0, 0 }, 0, 1, false, 0, 0 },
1679
1680	/* DBG_GRC_PARAM_NUM_LCIDS */
1681	{ { MAX_LCIDS, MAX_LCIDS, MAX_LCIDS }, 1, MAX_LCIDS, false, MAX_LCIDS, MAX_LCIDS },
1682
1683	/* DBG_GRC_PARAM_NUM_LTIDS */
1684	{ { MAX_LTIDS, MAX_LTIDS, MAX_LTIDS }, 1, MAX_LTIDS, false, MAX_LTIDS, MAX_LTIDS },
1685
1686	/* DBG_GRC_PARAM_EXCLUDE_ALL */
1687	{ { 0, 0, 0 }, 0, 1, true, 0, 0 },
1688
1689	/* DBG_GRC_PARAM_CRASH */
1690	{ { 0, 0, 0 }, 0, 1, true, 0, 0 },
1691
1692	/* DBG_GRC_PARAM_PARITY_SAFE */
1693	{ { 0, 0, 0 }, 0, 1, false, 1, 0 },
1694
1695	/* DBG_GRC_PARAM_DUMP_CM */
1696	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1697
1698	/* DBG_GRC_PARAM_DUMP_PHY */
1699	{ { 1, 1, 1 }, 0, 1, false, 0, 1 },
1700
1701	/* DBG_GRC_PARAM_NO_MCP */
1702	{ { 0, 0, 0 }, 0, 1, false, 0, 0 },
1703
1704	/* DBG_GRC_PARAM_NO_FW_VER */
1705	{ { 0, 0, 0 }, 0, 1, false, 0, 0 }
1706};
1707
1708static struct rss_mem_defs s_rss_mem_defs[] = {
1709	{ "rss_mem_cid", "rss_cid", 0, 32,
1710	{ 256, 320, 512 } },
1711
1712	{ "rss_mem_key_msb", "rss_key", 1024, 256,
1713	{ 128, 208, 257 } },
1714
1715	{ "rss_mem_key_lsb", "rss_key", 2048, 64,
1716	{ 128, 208, 257 } },
1717
1718	{ "rss_mem_info", "rss_info", 3072, 16,
1719	{ 128, 208, 256 } },
1720
1721	{ "rss_mem_ind", "rss_ind", 4096, 16,
1722	{ 16384, 26624, 32768 } }
1723};
1724
1725static struct vfc_ram_defs s_vfc_ram_defs[] = {
1726	{ "vfc_ram_tt1", "vfc_ram", 0, 512 },
1727	{ "vfc_ram_mtt2", "vfc_ram", 512, 128 },
1728	{ "vfc_ram_stt2", "vfc_ram", 640, 32 },
1729	{ "vfc_ram_ro_vect", "vfc_ram", 672, 32 }
1730};
1731
1732static struct big_ram_defs s_big_ram_defs[] = {
1733	{ "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
1734	  BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA, MISC_REG_BLOCK_256B_EN, { 0, 0, 0 },
1735	  { 153600, 180224, 282624 } },
1736
1737	{ "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
1738	  BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA, MISC_REG_BLOCK_256B_EN, { 0, 1, 1 },
1739	  { 92160, 117760, 168960 } },
1740
1741	{ "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
1742	  BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA, MISCS_REG_BLOCK_256B_EN, { 0, 0, 0 },
1743	  { 36864, 36864, 36864 } }
1744};
1745
1746static struct reset_reg_defs s_reset_regs_defs[] = {
1747	/* DBG_RESET_REG_MISCS_PL_UA */
1748	{ MISCS_REG_RESET_PL_UA, { true, true, true }, { 0x0, 0x0, 0x0 } },
1749
1750	/* DBG_RESET_REG_MISCS_PL_HV */
1751	{ MISCS_REG_RESET_PL_HV, { true, true, true }, { 0x0, 0x400, 0x600 } },
1752
1753	/* DBG_RESET_REG_MISCS_PL_HV_2 */
1754	{ MISCS_REG_RESET_PL_HV_2_K2_E5, { false, true, true }, { 0x0, 0x0, 0x0 } },
1755
1756	/* DBG_RESET_REG_MISC_PL_UA */
1757	{ MISC_REG_RESET_PL_UA, { true, true, true }, { 0x0, 0x0, 0x0 } },
1758
1759	/* DBG_RESET_REG_MISC_PL_HV */
1760	{ MISC_REG_RESET_PL_HV, { true, true, true }, { 0x0, 0x0, 0x0 } },
1761
1762	/* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
1763	{ MISC_REG_RESET_PL_PDA_VMAIN_1, { true, true, true }, { 0x4404040, 0x4404040, 0x404040 } },
1764
1765	/* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
1766	{ MISC_REG_RESET_PL_PDA_VMAIN_2, { true, true, true }, { 0x7, 0x7c00007, 0x5c08007 } },
1767
1768	/* DBG_RESET_REG_MISC_PL_PDA_VAUX */
1769	{ MISC_REG_RESET_PL_PDA_VAUX, { true, true, true }, { 0x2, 0x2, 0x2 } },
1770};
1771
1772static struct phy_defs s_phy_defs[] = {
1773	{ "nw_phy", NWS_REG_NWS_CMU_K2, PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5, PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5, PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5, PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5 },
1774	{ "sgmii_phy", MS_REG_MS_CMU_K2_E5, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 },
1775	{ "pcie_phy0", PHY_PCIE_REG_PHY0_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 },
1776	{ "pcie_phy1", PHY_PCIE_REG_PHY1_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 },
1777};
1778
1779/* The order of indexes that should be applied to a PCI buffer line */
1780static const u8 s_pci_buf_line_ind[PCI_BUF_LINE_SIZE_IN_DWORDS] = { 1, 0, 3, 2, 5, 4, 7, 6 };
1781
1782/******************************** Variables **********************************/
1783
1784/* The version of the calling app */
1785static u32 s_app_ver;
1786
1787/**************************** Private Functions ******************************/
1788
1789static void ecore_static_asserts(void)
1790{
1791	CHECK_ARR_SIZE(s_dbg_arrays, MAX_BIN_DBG_BUFFER_TYPE);
1792	CHECK_ARR_SIZE(s_big_ram_defs, NUM_BIG_RAM_TYPES);
1793	CHECK_ARR_SIZE(s_vfc_ram_defs, NUM_VFC_RAM_TYPES);
1794	CHECK_ARR_SIZE(s_rss_mem_defs, NUM_RSS_MEM_TYPES);
1795	CHECK_ARR_SIZE(s_chip_defs, MAX_CHIP_IDS);
1796	CHECK_ARR_SIZE(s_platform_defs, MAX_PLATFORM_IDS);
1797	CHECK_ARR_SIZE(s_storm_defs, MAX_DBG_STORMS);
1798	CHECK_ARR_SIZE(s_constraint_op_defs, MAX_DBG_BUS_CONSTRAINT_OPS);
1799	CHECK_ARR_SIZE(s_dbg_target_names, MAX_DBG_BUS_TARGETS);
1800	CHECK_ARR_SIZE(s_storm_mode_defs, MAX_DBG_BUS_STORM_MODES);
1801	CHECK_ARR_SIZE(s_grc_param_defs, MAX_DBG_GRC_PARAMS);
1802	CHECK_ARR_SIZE(s_reset_regs_defs, MAX_DBG_RESET_REGS);
1803}
1804
1805/* Reads and returns a single dword from the specified unaligned buffer. */
1806static u32 ecore_read_unaligned_dword(u8 *buf)
1807{
1808	u32 dword;
1809
1810	OSAL_MEMCPY((u8 *)&dword, buf, sizeof(dword));
1811	return dword;
1812}
1813
1814/* Returns the difference in bytes between the specified physical addresses.
1815 * Assumes that the first address is bigger then the second, and that the
1816 * difference is a 32-bit value.
1817 */
1818static u32 ecore_phys_addr_diff(struct dbg_bus_mem_addr *a,
1819								struct dbg_bus_mem_addr *b)
1820{
1821	return a->hi == b->hi ? a->lo - b->lo : b->lo - a->lo;
1822}
1823
1824/* Sets the value of the specified GRC param */
1825static void ecore_grc_set_param(struct ecore_hwfn *p_hwfn,
1826				 enum dbg_grc_params grc_param,
1827				 u32 val)
1828{
1829	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1830
1831	dev_data->grc.param_val[grc_param] = val;
1832}
1833
1834/* Returns the value of the specified GRC param */
1835static u32 ecore_grc_get_param(struct ecore_hwfn *p_hwfn,
1836							   enum dbg_grc_params grc_param)
1837{
1838	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1839
1840	return dev_data->grc.param_val[grc_param];
1841}
1842
1843/* Initializes the GRC parameters */
1844static void ecore_dbg_grc_init_params(struct ecore_hwfn *p_hwfn)
1845{
1846	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1847
1848	if (!dev_data->grc.params_initialized) {
1849		ecore_dbg_grc_set_params_default(p_hwfn);
1850		dev_data->grc.params_initialized = 1;
1851	}
1852}
1853
1854/* Initializes debug data for the specified device */
1855static enum dbg_status ecore_dbg_dev_init(struct ecore_hwfn *p_hwfn,
1856										  struct ecore_ptt *p_ptt)
1857{
1858	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1859
1860	if (dev_data->initialized)
1861		return DBG_STATUS_OK;
1862
1863	if (!s_app_ver)
1864		return DBG_STATUS_APP_VERSION_NOT_SET;
1865
1866	if (ECORE_IS_E5(p_hwfn->p_dev)) {
1867		dev_data->chip_id = CHIP_E5;
1868		dev_data->mode_enable[MODE_E5] = 1;
1869	}
1870	else if (ECORE_IS_K2(p_hwfn->p_dev)) {
1871		dev_data->chip_id = CHIP_K2;
1872		dev_data->mode_enable[MODE_K2] = 1;
1873	}
1874	else if (ECORE_IS_BB_B0(p_hwfn->p_dev)) {
1875		dev_data->chip_id = CHIP_BB;
1876		dev_data->mode_enable[MODE_BB] = 1;
1877	}
1878	else {
1879		return DBG_STATUS_UNKNOWN_CHIP;
1880	}
1881
1882#ifdef ASIC_ONLY
1883	dev_data->platform_id = PLATFORM_ASIC;
1884	dev_data->mode_enable[MODE_ASIC] = 1;
1885#else
1886	if (CHIP_REV_IS_ASIC(p_hwfn->p_dev)) {
1887		dev_data->platform_id = PLATFORM_ASIC;
1888		dev_data->mode_enable[MODE_ASIC] = 1;
1889	}
1890	else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
1891		if (ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED) & 0x20000000) {
1892			dev_data->platform_id = PLATFORM_EMUL_FULL;
1893			dev_data->mode_enable[MODE_EMUL_FULL] = 1;
1894		}
1895		else {
1896			dev_data->platform_id = PLATFORM_EMUL_REDUCED;
1897			dev_data->mode_enable[MODE_EMUL_REDUCED] = 1;
1898		}
1899	}
1900	else if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
1901		dev_data->platform_id = PLATFORM_FPGA;
1902		dev_data->mode_enable[MODE_FPGA] = 1;
1903	}
1904	else {
1905		return DBG_STATUS_UNKNOWN_CHIP;
1906	}
1907#endif
1908
1909	/* Initializes the GRC parameters */
1910	ecore_dbg_grc_init_params(p_hwfn);
1911
1912	dev_data->use_dmae = USE_DMAE;
1913	dev_data->num_regs_read = 0;
1914	dev_data->initialized = 1;
1915
1916	return DBG_STATUS_OK;
1917}
1918
1919static struct dbg_bus_block* get_dbg_bus_block_desc(struct ecore_hwfn *p_hwfn,
1920														  enum block_id block_id)
1921{
1922	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1923
1924	return (struct dbg_bus_block *)&dbg_bus_blocks[block_id * MAX_CHIP_IDS + dev_data->chip_id];
1925}
1926
1927/* Returns OSAL_NULL for signature line, latency line and non-existing lines */
1928static struct dbg_bus_line* get_dbg_bus_line_desc(struct ecore_hwfn *p_hwfn,
1929														enum block_id block_id)
1930{
1931	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1932	struct dbg_bus_block_data *block_bus;
1933	struct dbg_bus_block *block_desc;
1934
1935	block_bus = &dev_data->bus.blocks[block_id];
1936	block_desc = get_dbg_bus_block_desc(p_hwfn, block_id);
1937
1938	if (!block_bus->line_num ||
1939		(block_bus->line_num == 1 && block_desc->has_latency_events) ||
1940		block_bus->line_num >= NUM_DBG_LINES(block_desc))
1941		return OSAL_NULL;
1942
1943	return (struct dbg_bus_line *)&dbg_bus_lines[block_desc->lines_offset + block_bus->line_num - NUM_EXTRA_DBG_LINES(block_desc)];
1944}
1945
1946/* Reads the FW info structure for the specified Storm from the chip,
1947 * and writes it to the specified fw_info pointer.
1948 */
1949static void ecore_read_fw_info(struct ecore_hwfn *p_hwfn,
1950							   struct ecore_ptt *p_ptt,
1951							   u8 storm_id,
1952							   struct fw_info *fw_info)
1953{
1954	struct storm_defs *storm = &s_storm_defs[storm_id];
1955	struct fw_info_location fw_info_location;
1956	u32 addr, i, *dest;
1957
1958	OSAL_MEMSET(&fw_info_location, 0, sizeof(fw_info_location));
1959	OSAL_MEMSET(fw_info, 0, sizeof(*fw_info));
1960
1961	/* Read first the address that points to fw_info location.
1962	 * The address is located in the last line of the Storm RAM.
1963	 */
1964	addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
1965		(ECORE_IS_E5(p_hwfn->p_dev) ?
1966			DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE_E5) :
1967			DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE_BB_K2))
1968		- sizeof(fw_info_location);
1969
1970	dest = (u32 *)&fw_info_location;
1971
1972	for (i = 0; i < BYTES_TO_DWORDS(sizeof(fw_info_location)); i++, addr += BYTES_IN_DWORD)
1973		dest[i] = ecore_rd(p_hwfn, p_ptt, addr);
1974
1975	/* Read FW version info from Storm RAM */
1976	if (fw_info_location.size > 0 && fw_info_location.size <= sizeof(*fw_info)) {
1977		addr = fw_info_location.grc_addr;
1978		dest = (u32 *)fw_info;
1979		for (i = 0; i < BYTES_TO_DWORDS(fw_info_location.size); i++, addr += BYTES_IN_DWORD)
1980			dest[i] = ecore_rd(p_hwfn, p_ptt, addr);
1981	}
1982}
1983
1984/* Dumps the specified string to the specified buffer.
1985 * Returns the dumped size in bytes.
1986 */
1987static u32 ecore_dump_str(char *dump_buf,
1988						  bool dump,
1989						  const char *str)
1990{
1991	if (dump)
1992		OSAL_STRCPY(dump_buf, str);
1993
1994	return (u32)OSAL_STRLEN(str) + 1;
1995}
1996
1997/* Dumps zeros to align the specified buffer to dwords.
1998 * Returns the dumped size in bytes.
1999 */
2000static u32 ecore_dump_align(char *dump_buf,
2001							bool dump,
2002							u32 byte_offset)
2003{
2004	u8 offset_in_dword, align_size;
2005
2006	offset_in_dword = (u8)(byte_offset & 0x3);
2007	align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0;
2008
2009	if (dump && align_size)
2010		OSAL_MEMSET(dump_buf, 0, align_size);
2011
2012	return align_size;
2013}
2014
2015/* Writes the specified string param to the specified buffer.
2016 * Returns the dumped size in dwords.
2017 */
2018static u32 ecore_dump_str_param(u32 *dump_buf,
2019								bool dump,
2020								const char *param_name,
2021								const char *param_val)
2022{
2023	char *char_buf = (char *)dump_buf;
2024	u32 offset = 0;
2025
2026	/* Dump param name */
2027	offset += ecore_dump_str(char_buf + offset, dump, param_name);
2028
2029	/* Indicate a string param value */
2030	if (dump)
2031		*(char_buf + offset) = 1;
2032	offset++;
2033
2034	/* Dump param value */
2035	offset += ecore_dump_str(char_buf + offset, dump, param_val);
2036
2037	/* Align buffer to next dword */
2038	offset += ecore_dump_align(char_buf + offset, dump, offset);
2039
2040	return BYTES_TO_DWORDS(offset);
2041}
2042
2043/* Writes the specified numeric param to the specified buffer.
2044 * Returns the dumped size in dwords.
2045 */
2046static u32 ecore_dump_num_param(u32 *dump_buf,
2047								bool dump,
2048								const char *param_name,
2049								u32 param_val)
2050{
2051	char *char_buf = (char *)dump_buf;
2052	u32 offset = 0;
2053
2054	/* Dump param name */
2055	offset += ecore_dump_str(char_buf + offset, dump, param_name);
2056
2057	/* Indicate a numeric param value */
2058	if (dump)
2059		*(char_buf + offset) = 0;
2060	offset++;
2061
2062	/* Align buffer to next dword */
2063	offset += ecore_dump_align(char_buf + offset, dump, offset);
2064
2065	/* Dump param value (and change offset from bytes to dwords) */
2066	offset = BYTES_TO_DWORDS(offset);
2067	if (dump)
2068		*(dump_buf + offset) = param_val;
2069	offset++;
2070
2071	return offset;
2072}
2073
2074/* Reads the FW version and writes it as a param to the specified buffer.
2075 * Returns the dumped size in dwords.
2076 */
2077static u32 ecore_dump_fw_ver_param(struct ecore_hwfn *p_hwfn,
2078								   struct ecore_ptt *p_ptt,
2079								   u32 *dump_buf,
2080								   bool dump)
2081{
2082	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2083	char fw_ver_str[16] = EMPTY_FW_VERSION_STR;
2084	char fw_img_str[16] = EMPTY_FW_IMAGE_STR;
2085	struct fw_info fw_info = { { 0 }, { 0 } };
2086	u32 offset = 0;
2087
2088	if (dump && !ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
2089		/* Read FW image/version from PRAM in a non-reset SEMI */
2090		bool found = false;
2091		u8 storm_id;
2092
2093		for (storm_id = 0; storm_id < MAX_DBG_STORMS && !found; storm_id++) {
2094			struct storm_defs *storm = &s_storm_defs[storm_id];
2095
2096			/* Read FW version/image */
2097			if (dev_data->block_in_reset[storm->block_id])
2098				continue;
2099
2100			/* Read FW info for the current Storm */
2101			ecore_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
2102
2103			/* Create FW version/image strings */
2104			if (OSAL_SNPRINTF(fw_ver_str, sizeof(fw_ver_str), "%d_%d_%d_%d", fw_info.ver.num.major, fw_info.ver.num.minor, fw_info.ver.num.rev, fw_info.ver.num.eng) < 0)
2105				DP_NOTICE(p_hwfn, true, "Unexpected debug error: invalid FW version string\n");
2106			switch (fw_info.ver.image_id) {
2107			case FW_IMG_KUKU: OSAL_STRCPY(fw_img_str, "kuku"); break;
2108			case FW_IMG_MAIN: OSAL_STRCPY(fw_img_str, "main"); break;
2109			case FW_IMG_L2B: OSAL_STRCPY(fw_img_str, "l2b"); break;
2110			default: OSAL_STRCPY(fw_img_str, "unknown"); break;
2111			}
2112
2113			found = true;
2114		}
2115	}
2116
2117	/* Dump FW version, image and timestamp */
2118	offset += ecore_dump_str_param(dump_buf + offset, dump, "fw-version", fw_ver_str);
2119	offset += ecore_dump_str_param(dump_buf + offset, dump, "fw-image", fw_img_str);
2120	offset += ecore_dump_num_param(dump_buf + offset, dump, "fw-timestamp", fw_info.ver.timestamp);
2121
2122	return offset;
2123}
2124
2125/* Reads the MFW version and writes it as a param to the specified buffer.
2126 * Returns the dumped size in dwords.
2127 */
2128static u32 ecore_dump_mfw_ver_param(struct ecore_hwfn *p_hwfn,
2129									struct ecore_ptt *p_ptt,
2130									u32 *dump_buf,
2131									bool dump)
2132{
2133	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2134	char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
2135
2136	if (dump && dev_data->platform_id == PLATFORM_ASIC && !ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
2137		u32 public_data_addr, global_section_offsize_addr, global_section_offsize, global_section_addr, mfw_ver;
2138
2139		/* Find MCP public data GRC address. Needs to be ORed with
2140		 * MCP_REG_SCRATCH due to a HW bug.
2141		 */
2142		public_data_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR) | MCP_REG_SCRATCH;
2143
2144		/* Find MCP public global section offset */
2145		global_section_offsize_addr = public_data_addr + OFFSETOF(struct mcp_public_data, sections) + sizeof(offsize_t) * PUBLIC_GLOBAL;
2146		global_section_offsize = ecore_rd(p_hwfn, p_ptt, global_section_offsize_addr);
2147		global_section_addr = MCP_REG_SCRATCH + (global_section_offsize & OFFSIZE_OFFSET_MASK) * 4;
2148
2149		/* Read MFW version from MCP public global section */
2150		mfw_ver = ecore_rd(p_hwfn, p_ptt, global_section_addr + OFFSETOF(struct public_global, mfw_ver));
2151
2152		/* Dump MFW version param */
2153		if (OSAL_SNPRINTF(mfw_ver_str, sizeof(mfw_ver_str), "%d_%d_%d_%d", (u8)(mfw_ver >> 24), (u8)(mfw_ver >> 16), (u8)(mfw_ver >> 8), (u8)mfw_ver) < 0)
2154			DP_NOTICE(p_hwfn, true, "Unexpected debug error: invalid MFW version string\n");
2155	}
2156
2157	return ecore_dump_str_param(dump_buf, dump, "mfw-version", mfw_ver_str);
2158}
2159
2160/* Writes a section header to the specified buffer.
2161 * Returns the dumped size in dwords.
2162 */
2163static u32 ecore_dump_section_hdr(u32 *dump_buf,
2164								  bool dump,
2165								  const char *name,
2166								  u32 num_params)
2167{
2168	return ecore_dump_num_param(dump_buf, dump, name, num_params);
2169}
2170
2171/* Writes the common global params to the specified buffer.
2172 * Returns the dumped size in dwords.
2173 */
2174static u32 ecore_dump_common_global_params(struct ecore_hwfn *p_hwfn,
2175										   struct ecore_ptt *p_ptt,
2176										   u32 *dump_buf,
2177										   bool dump,
2178										   u8 num_specific_global_params)
2179{
2180	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2181	u32 offset = 0;
2182	u8 num_params;
2183
2184	/* Dump global params section header */
2185	num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params;
2186	offset += ecore_dump_section_hdr(dump_buf + offset, dump, "global_params", num_params);
2187
2188	/* Store params */
2189	offset += ecore_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
2190	offset += ecore_dump_mfw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
2191	offset += ecore_dump_num_param(dump_buf + offset, dump, "tools-version", TOOLS_VERSION);
2192	offset += ecore_dump_str_param(dump_buf + offset, dump, "chip", s_chip_defs[dev_data->chip_id].name);
2193	offset += ecore_dump_str_param(dump_buf + offset, dump, "platform", s_platform_defs[dev_data->platform_id].name);
2194	offset += ecore_dump_num_param(dump_buf + offset, dump, "pci-func", p_hwfn->abs_pf_id);
2195
2196	return offset;
2197}
2198
2199/* Writes the "last" section (including CRC) to the specified buffer at the
2200 * given offset. Returns the dumped size in dwords.
2201 */
2202static u32 ecore_dump_last_section(u32 *dump_buf,
2203								   u32 offset,
2204								   bool dump)
2205{
2206	u32 start_offset = offset;
2207
2208	/* Dump CRC section header */
2209	offset += ecore_dump_section_hdr(dump_buf + offset, dump, "last", 0);
2210
2211	/* Calculate CRC32 and add it to the dword after the "last" section */
2212	if (dump)
2213		*(dump_buf + offset) = ~OSAL_CRC32(0xffffffff, (u8 *)dump_buf, DWORDS_TO_BYTES(offset));
2214
2215	offset++;
2216
2217	return offset - start_offset;
2218}
2219
2220/* Update blocks reset state  */
2221static void ecore_update_blocks_reset_state(struct ecore_hwfn *p_hwfn,
2222											struct ecore_ptt *p_ptt)
2223{
2224	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2225	u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
2226	u32 i;
2227
2228	/* Read reset registers */
2229	for (i = 0; i < MAX_DBG_RESET_REGS; i++)
2230		if (s_reset_regs_defs[i].exists[dev_data->chip_id])
2231			reg_val[i] = ecore_rd(p_hwfn, p_ptt, s_reset_regs_defs[i].addr);
2232
2233	/* Check if blocks are in reset */
2234	for (i = 0; i < MAX_BLOCK_ID; i++) {
2235		struct block_defs *block = s_block_defs[i];
2236
2237		dev_data->block_in_reset[i] = block->has_reset_bit && !(reg_val[block->reset_reg] & (1 << block->reset_bit_offset));
2238	}
2239}
2240
2241/* Enable / disable the Debug block */
2242static void ecore_bus_enable_dbg_block(struct ecore_hwfn *p_hwfn,
2243									   struct ecore_ptt *p_ptt,
2244									   bool enable)
2245{
2246	ecore_wr(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON, enable ? 1 : 0);
2247}
2248
2249/* Resets the Debug block */
2250static void ecore_bus_reset_dbg_block(struct ecore_hwfn *p_hwfn,
2251									  struct ecore_ptt *p_ptt)
2252{
2253	u32 dbg_reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
2254	struct block_defs *dbg_block = s_block_defs[BLOCK_DBG];
2255
2256	dbg_reset_reg_addr = s_reset_regs_defs[dbg_block->reset_reg].addr;
2257	old_reset_reg_val = ecore_rd(p_hwfn, p_ptt, dbg_reset_reg_addr);
2258	new_reset_reg_val = old_reset_reg_val & ~(1 << dbg_block->reset_bit_offset);
2259
2260	ecore_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, new_reset_reg_val);
2261	ecore_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, old_reset_reg_val);
2262}
2263
2264static void ecore_bus_set_framing_mode(struct ecore_hwfn *p_hwfn,
2265									   struct ecore_ptt *p_ptt,
2266									   enum dbg_bus_frame_modes mode)
2267{
2268	ecore_wr(p_hwfn, p_ptt, DBG_REG_FRAMING_MODE, (u8)mode);
2269}
2270
2271/* Enable / disable Debug Bus clients according to the specified mask
2272 * (1 = enable, 0 = disable).
2273 */
2274static void ecore_bus_enable_clients(struct ecore_hwfn *p_hwfn,
2275									 struct ecore_ptt *p_ptt,
2276									 u32 client_mask)
2277{
2278	ecore_wr(p_hwfn, p_ptt, DBG_REG_CLIENT_ENABLE, client_mask);
2279}
2280
2281/* Enables the specified Storm for Debug Bus. Assumes a valid Storm ID. */
2282static void ecore_bus_enable_storm(struct ecore_hwfn *p_hwfn,
2283								   struct ecore_ptt *p_ptt,
2284								   enum dbg_storms storm_id)
2285{
2286	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2287	u32 base_addr, sem_filter_params = 0;
2288	struct dbg_bus_storm_data *storm_bus;
2289	struct storm_mode_defs *storm_mode;
2290	struct storm_defs *storm;
2291
2292	storm = &s_storm_defs[storm_id];
2293	storm_bus = &dev_data->bus.storms[storm_id];
2294	storm_mode = &s_storm_mode_defs[storm_bus->mode];
2295	base_addr = storm->sem_fast_mem_addr;
2296
2297	/* Config SEM */
2298	if (storm_mode->is_fast_dbg) {
2299		/* Enable fast debug */
2300		ecore_wr(p_hwfn, p_ptt, storm->sem_frame_mode_addr, DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST);
2301		ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_DEBUG_MODE, storm_mode->id_in_hw);
2302		ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_DEBUG_ACTIVE, 1);
2303
2304		/* Enable messages. Must be done after enabling
2305		 * SEM_FAST_REG_DEBUG_ACTIVE, otherwise messages will
2306		 * be dropped after the SEMI sync fifo is filled.
2307		 */
2308		ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_DBG_MODE23_SRC_DISABLE, SEM_FAST_MODE23_SRC_ENABLE_VAL);
2309		ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_DBG_MODE4_SRC_DISABLE, SEM_FAST_MODE4_SRC_ENABLE_VAL);
2310		ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_DBG_MODE6_SRC_DISABLE, SEM_FAST_MODE6_SRC_ENABLE_VAL);
2311	}
2312	else {
2313		/* Enable slow debug */
2314		ecore_wr(p_hwfn, p_ptt, storm->sem_frame_mode_addr, DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST);
2315		ecore_wr(p_hwfn, p_ptt, storm->sem_slow_enable_addr, 1);
2316		ecore_wr(p_hwfn, p_ptt, storm->sem_slow_mode_addr, storm_mode->id_in_hw);
2317		ecore_wr(p_hwfn, p_ptt, storm->sem_slow_mode1_conf_addr, SEM_SLOW_MODE1_DATA_ENABLE);
2318	}
2319
2320	/* Config SEM cid filter */
2321	if (storm_bus->cid_filter_en) {
2322		ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_FILTER_CID, storm_bus->cid);
2323		sem_filter_params |= SEM_FILTER_CID_EN_MASK;
2324	}
2325
2326	/* Config SEM eid filter */
2327	if (storm_bus->eid_filter_en) {
2328		const union dbg_bus_storm_eid_params *eid_filter = &storm_bus->eid_filter_params;
2329
2330		if (storm_bus->eid_range_not_mask) {
2331			ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_EVENT_ID_RANGE_STRT, eid_filter->range.min);
2332			ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_EVENT_ID_RANGE_END, eid_filter->range.max);
2333			sem_filter_params |= SEM_FILTER_EID_RANGE_EN_MASK;
2334		}
2335		else {
2336			ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_FILTER_EVENT_ID, eid_filter->mask.val);
2337			ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_EVENT_ID_MASK, ~eid_filter->mask.mask);
2338			sem_filter_params |= SEM_FILTER_EID_MASK_EN_MASK;
2339		}
2340	}
2341
2342	/* Config accumulaed SEM filter parameters (if any) */
2343	if (sem_filter_params)
2344		ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_RECORD_FILTER_ENABLE, sem_filter_params);
2345}
2346
2347/* Disables Debug Bus block inputs */
2348static enum dbg_status ecore_bus_disable_inputs(struct ecore_hwfn *p_hwfn,
2349												struct ecore_ptt *p_ptt,
2350												bool empty_semi_fifos)
2351{
2352	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2353	u8 storm_id, num_fifos_to_empty = MAX_DBG_STORMS;
2354	bool is_fifo_empty[MAX_DBG_STORMS] = { false };
2355	u32 block_id;
2356
2357	/* Disable messages output in all Storms */
2358	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2359		struct storm_defs *storm = &s_storm_defs[storm_id];
2360
2361		if (dev_data->block_in_reset[storm->block_id])
2362			continue;
2363
2364		ecore_wr(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_DBG_MODE23_SRC_DISABLE, SEM_FAST_MODE23_SRC_DISABLE_VAL);
2365		ecore_wr(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_DBG_MODE4_SRC_DISABLE, SEM_FAST_MODE4_SRC_DISABLE_VAL);
2366		ecore_wr(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_DBG_MODE6_SRC_DISABLE, SEM_FAST_MODE6_SRC_DISABLE_VAL);
2367	}
2368
2369	/* Try to empty the SEMI sync fifo. Must be done after messages output
2370	 * were disabled in all Storms.
2371	 */
2372	while (num_fifos_to_empty) {
2373		for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2374			struct storm_defs *storm = &s_storm_defs[storm_id];
2375
2376			if (is_fifo_empty[storm_id])
2377				continue;
2378
2379			/* Check if sync fifo got empty */
2380			if (dev_data->block_in_reset[storm->block_id] || ecore_rd(p_hwfn, p_ptt, storm->sem_sync_dbg_empty_addr)) {
2381				is_fifo_empty[storm_id] = true;
2382				num_fifos_to_empty--;
2383			}
2384		}
2385
2386		/* Check if need to continue polling */
2387		if (num_fifos_to_empty) {
2388			u32 polling_ms = SEMI_SYNC_FIFO_POLLING_DELAY_MS * s_platform_defs[dev_data->platform_id].delay_factor;
2389			u32 polling_count = 0;
2390
2391			if (empty_semi_fifos && polling_count < SEMI_SYNC_FIFO_POLLING_COUNT) {
2392				OSAL_MSLEEP(polling_ms);
2393				polling_count++;
2394			}
2395			else {
2396				DP_NOTICE(p_hwfn, false, "Warning: failed to empty the SEMI sync FIFO. It means that the last few messages from the SEMI could not be sent to the DBG block. This can happen when the DBG block is blocked (e.g. due to a PCI problem).\n");
2397				break;
2398			}
2399		}
2400	}
2401
2402	/* Disable debug in all Storms */
2403	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2404		struct storm_defs *storm = &s_storm_defs[storm_id];
2405		u32 base_addr = storm->sem_fast_mem_addr;
2406
2407		if (dev_data->block_in_reset[storm->block_id])
2408			continue;
2409
2410		ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_DEBUG_ACTIVE, 0);
2411		ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_RECORD_FILTER_ENABLE, DBG_BUS_FILTER_TYPE_OFF);
2412		ecore_wr(p_hwfn, p_ptt, storm->sem_frame_mode_addr, DBG_BUS_FRAME_MODE_4HW_0ST);
2413		ecore_wr(p_hwfn, p_ptt, storm->sem_slow_enable_addr, 0);
2414	}
2415
2416	/* Disable all clients */
2417	ecore_bus_enable_clients(p_hwfn, p_ptt, 0);
2418
2419	/* Disable all blocks */
2420	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2421		struct block_defs *block = s_block_defs[block_id];
2422
2423		if (block->dbg_client_id[dev_data->chip_id] != MAX_DBG_BUS_CLIENTS && !dev_data->block_in_reset[block_id])
2424			ecore_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0);
2425	}
2426
2427	/* Disable timestamp */
2428	ecore_wr(p_hwfn, p_ptt, DBG_REG_TIMESTAMP_VALID_EN, 0);
2429
2430	/* Disable filters and triggers */
2431	ecore_wr(p_hwfn, p_ptt, DBG_REG_FILTER_ENABLE, DBG_BUS_FILTER_TYPE_OFF);
2432	ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_ENABLE, 0);
2433
2434	return DBG_STATUS_OK;
2435}
2436
2437/* Sets a Debug Bus trigger/filter constraint */
2438static void ecore_bus_set_constraint(struct ecore_hwfn *p_hwfn,
2439									 struct ecore_ptt *p_ptt,
2440									 bool is_filter,
2441									 u8 constraint_id,
2442									 u8 hw_op_val,
2443									 u32 data_val,
2444									 u32 data_mask,
2445									 u8 frame_bit,
2446									 u8 frame_mask,
2447									 u16 dword_offset,
2448									 u16 range,
2449									 u8 cyclic_bit,
2450									 u8 must_bit)
2451{
2452	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2453	u32 reg_offset = constraint_id * BYTES_IN_DWORD;
2454	u8 curr_trigger_state;
2455
2456	/* For trigger only - set register offset according to state */
2457	if (!is_filter) {
2458		curr_trigger_state = dev_data->bus.next_trigger_state - 1;
2459		reg_offset += curr_trigger_state * TRIGGER_SETS_PER_STATE * MAX_CONSTRAINTS * BYTES_IN_DWORD;
2460	}
2461
2462	ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_OPRTN_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_OPRTN_0) + reg_offset, hw_op_val);
2463	ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_DATA_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_DATA_0) + reg_offset, data_val);
2464	ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_DATA_MASK_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_DATA_MASK_0) + reg_offset, data_mask);
2465	ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_FRAME_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_FRAME_0) + reg_offset, frame_bit);
2466	ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_FRAME_MASK_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_FRAME_MASK_0) + reg_offset, frame_mask);
2467	ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_OFFSET_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_OFFSET_0) + reg_offset, dword_offset);
2468	ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_RANGE_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_RANGE_0) + reg_offset, range);
2469	ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_CYCLIC_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_CYCLIC_0) + reg_offset, cyclic_bit);
2470	ecore_wr(p_hwfn, p_ptt, (is_filter ? DBG_REG_FILTER_CNSTR_MUST_0 : DBG_REG_TRIGGER_STATE_SET_CNSTR_MUST_0) + reg_offset, must_bit);
2471}
2472
2473/* Reads the specified DBG Bus internal buffer range and copy it to the
2474 * specified buffer. Returns the dumped size in dwords.
2475 */
2476static u32 ecore_bus_dump_int_buf_range(struct ecore_hwfn *p_hwfn,
2477										struct ecore_ptt *p_ptt,
2478										u32 *dump_buf,
2479										bool dump,
2480										u32 start_line,
2481										u32 end_line)
2482{
2483	u32 line, reg_addr, i, offset = 0;
2484
2485	if (!dump)
2486		return (end_line - start_line + 1) * INT_BUF_LINE_SIZE_IN_DWORDS;
2487
2488	for (line = start_line, reg_addr = DBG_REG_INTR_BUFFER + DWORDS_TO_BYTES(start_line * INT_BUF_LINE_SIZE_IN_DWORDS);
2489		line <= end_line;
2490		line++, offset += INT_BUF_LINE_SIZE_IN_DWORDS)
2491		for (i = 0; i < INT_BUF_LINE_SIZE_IN_DWORDS; i++, reg_addr += BYTES_IN_DWORD)
2492			dump_buf[offset + INT_BUF_LINE_SIZE_IN_DWORDS - 1 - i] = ecore_rd(p_hwfn, p_ptt, reg_addr);
2493
2494	return offset;
2495}
2496
2497/* Reads the DBG Bus internal buffer and copy its contents to a buffer.
2498 * Returns the dumped size in dwords.
2499 */
2500static u32 ecore_bus_dump_int_buf(struct ecore_hwfn *p_hwfn,
2501								  struct ecore_ptt *p_ptt,
2502								  u32 *dump_buf,
2503								  bool dump)
2504{
2505	u32 last_written_line, offset = 0;
2506
2507	last_written_line = ecore_rd(p_hwfn, p_ptt, DBG_REG_INTR_BUFFER_WR_PTR);
2508
2509	if (ecore_rd(p_hwfn, p_ptt, DBG_REG_WRAP_ON_INT_BUFFER)) {
2510		/* Internal buffer was wrapped: first dump from write pointer
2511		 * to buffer end, then dump from buffer start to write pointer.
2512		 */
2513		if (last_written_line < INT_BUF_NUM_OF_LINES - 1)
2514			offset += ecore_bus_dump_int_buf_range(p_hwfn, p_ptt, dump_buf + offset, dump, last_written_line + 1, INT_BUF_NUM_OF_LINES - 1);
2515		offset += ecore_bus_dump_int_buf_range(p_hwfn, p_ptt, dump_buf + offset, dump, 0, last_written_line);
2516	}
2517	else if (last_written_line) {
2518		/* Internal buffer wasn't wrapped: dump from buffer start until
2519		 *  write pointer.
2520		 */
2521		if (!ecore_rd(p_hwfn, p_ptt, DBG_REG_INTR_BUFFER_RD_PTR))
2522			offset += ecore_bus_dump_int_buf_range(p_hwfn, p_ptt, dump_buf + offset, dump, 0, last_written_line);
2523		else
2524			DP_NOTICE(p_hwfn, true, "Unexpected Debug Bus error: internal buffer read pointer is not zero\n");
2525	}
2526
2527	return offset;
2528}
2529
2530/* Reads the specified DBG Bus PCI buffer range and copy it to the specified
2531 * buffer. Returns the dumped size in dwords.
2532 */
2533static u32 ecore_bus_dump_pci_buf_range(struct ecore_hwfn *p_hwfn,
2534										u32 *dump_buf,
2535										bool dump,
2536										u32 start_line,
2537										u32 end_line)
2538{
2539	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2540	u32 offset = 0;
2541
2542	/* Extract PCI buffer pointer from virtual address */
2543	void *virt_addr_lo = &dev_data->bus.pci_buf.virt_addr.lo;
2544	u32 *pci_buf_start = (u32 *)(osal_uintptr_t)*((u64 *)virt_addr_lo);
2545	u32 *pci_buf, line, i;
2546
2547	if (!dump)
2548		return (end_line - start_line + 1) * PCI_BUF_LINE_SIZE_IN_DWORDS;
2549
2550	for (line = start_line, pci_buf = pci_buf_start + start_line * PCI_BUF_LINE_SIZE_IN_DWORDS;
2551	line <= end_line;
2552		line++, offset += PCI_BUF_LINE_SIZE_IN_DWORDS)
2553		for (i = 0; i < PCI_BUF_LINE_SIZE_IN_DWORDS; i++, pci_buf++)
2554			dump_buf[offset + s_pci_buf_line_ind[i]] = *pci_buf;
2555
2556	return offset;
2557}
2558
2559/* Copies the DBG Bus PCI buffer to the specified buffer.
2560 * Returns the dumped size in dwords.
2561 */
2562static u32 ecore_bus_dump_pci_buf(struct ecore_hwfn *p_hwfn,
2563								  struct ecore_ptt *p_ptt,
2564								  u32 *dump_buf,
2565								  bool dump)
2566{
2567	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2568	u32 next_wr_byte_offset, next_wr_line_offset;
2569	struct dbg_bus_mem_addr next_wr_phys_addr;
2570	u32 pci_buf_size_in_lines, offset = 0;
2571
2572	pci_buf_size_in_lines = dev_data->bus.pci_buf.size / PCI_BUF_LINE_SIZE_IN_BYTES;
2573
2574	/* Extract write pointer (physical address) */
2575	next_wr_phys_addr.lo = ecore_rd(p_hwfn, p_ptt, DBG_REG_EXT_BUFFER_WR_PTR);
2576	next_wr_phys_addr.hi = ecore_rd(p_hwfn, p_ptt, DBG_REG_EXT_BUFFER_WR_PTR + BYTES_IN_DWORD);
2577
2578	/* Convert write pointer to offset */
2579	next_wr_byte_offset = ecore_phys_addr_diff(&next_wr_phys_addr, &dev_data->bus.pci_buf.phys_addr);
2580	if ((next_wr_byte_offset % PCI_BUF_LINE_SIZE_IN_BYTES) || next_wr_byte_offset > dev_data->bus.pci_buf.size)
2581		return 0;
2582	next_wr_line_offset = next_wr_byte_offset / PCI_BUF_LINE_SIZE_IN_BYTES;
2583
2584	/* PCI buffer wrapped: first dump from write pointer to buffer end. */
2585	if (ecore_rd(p_hwfn, p_ptt, DBG_REG_WRAP_ON_EXT_BUFFER))
2586		offset += ecore_bus_dump_pci_buf_range(p_hwfn, dump_buf + offset, dump, next_wr_line_offset, pci_buf_size_in_lines - 1);
2587
2588	/* Dump from buffer start until write pointer */
2589	if (next_wr_line_offset)
2590		offset += ecore_bus_dump_pci_buf_range(p_hwfn, dump_buf + offset, dump, 0, next_wr_line_offset - 1);
2591
2592	return offset;
2593}
2594
2595/* Copies the DBG Bus recorded data to the specified buffer.
2596 * Returns the dumped size in dwords.
2597 */
2598static u32 ecore_bus_dump_data(struct ecore_hwfn *p_hwfn,
2599							   struct ecore_ptt *p_ptt,
2600							   u32 *dump_buf,
2601							   bool dump)
2602{
2603	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2604
2605	switch (dev_data->bus.target) {
2606	case DBG_BUS_TARGET_ID_INT_BUF:
2607		return ecore_bus_dump_int_buf(p_hwfn, p_ptt, dump_buf, dump);
2608	case DBG_BUS_TARGET_ID_PCI:
2609		return ecore_bus_dump_pci_buf(p_hwfn, p_ptt, dump_buf, dump);
2610	default:
2611		break;
2612	}
2613
2614	return 0;
2615}
2616
2617/* Frees the Debug Bus PCI buffer */
2618static void ecore_bus_free_pci_buf(struct ecore_hwfn *p_hwfn)
2619{
2620	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2621	dma_addr_t pci_buf_phys_addr;
2622	void *virt_addr_lo;
2623	u32 *pci_buf;
2624
2625	/* Extract PCI buffer pointer from virtual address */
2626	virt_addr_lo = &dev_data->bus.pci_buf.virt_addr.lo;
2627	pci_buf = (u32 *)(osal_uintptr_t)*((u64 *)virt_addr_lo);
2628
2629	if (!dev_data->bus.pci_buf.size)
2630		return;
2631
2632	OSAL_MEMCPY(&pci_buf_phys_addr, &dev_data->bus.pci_buf.phys_addr, sizeof(pci_buf_phys_addr));
2633
2634	OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, pci_buf, pci_buf_phys_addr, dev_data->bus.pci_buf.size);
2635
2636	dev_data->bus.pci_buf.size = 0;
2637}
2638
2639/* Dumps the list of DBG Bus inputs (blocks/Storms) to the specified buffer.
2640 * Returns the dumped size in dwords.
2641 */
2642static u32 ecore_bus_dump_inputs(struct ecore_hwfn *p_hwfn,
2643								 u32 *dump_buf,
2644								 bool dump)
2645{
2646	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2647	char storm_name[8] = "?storm";
2648	u32 block_id, offset = 0;
2649	u8 storm_id;
2650
2651	/* Store storms */
2652	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2653		struct dbg_bus_storm_data *storm_bus = &dev_data->bus.storms[storm_id];
2654		struct storm_defs *storm = &s_storm_defs[storm_id];
2655
2656		if (!dev_data->bus.storms[storm_id].enabled)
2657			continue;
2658
2659		/* Dump section header */
2660		storm_name[0] = storm->letter;
2661		offset += ecore_dump_section_hdr(dump_buf + offset, dump, "bus_input", 3);
2662		offset += ecore_dump_str_param(dump_buf + offset, dump, "name", storm_name);
2663		offset += ecore_dump_num_param(dump_buf + offset, dump, "id", storm_bus->hw_id);
2664		offset += ecore_dump_str_param(dump_buf + offset, dump, "mode", s_storm_mode_defs[storm_bus->mode].name);
2665	}
2666
2667	/* Store blocks */
2668	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2669		struct dbg_bus_block_data *block_bus = &dev_data->bus.blocks[block_id];
2670		struct block_defs *block = s_block_defs[block_id];
2671
2672		if (!GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
2673			continue;
2674
2675		/* Dump section header */
2676		offset += ecore_dump_section_hdr(dump_buf + offset, dump, "bus_input", 4);
2677		offset += ecore_dump_str_param(dump_buf + offset, dump, "name", block->name);
2678		offset += ecore_dump_num_param(dump_buf + offset, dump, "line", block_bus->line_num);
2679		offset += ecore_dump_num_param(dump_buf + offset, dump, "en", GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK));
2680		offset += ecore_dump_num_param(dump_buf + offset, dump, "shr", GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT));
2681	}
2682
2683	return offset;
2684}
2685
2686/* Dumps the Debug Bus header (params, inputs, data header) to the specified
2687 * buffer. Returns the dumped size in dwords.
2688 */
2689static u32 ecore_bus_dump_hdr(struct ecore_hwfn *p_hwfn,
2690							  struct ecore_ptt *p_ptt,
2691							  u32 *dump_buf,
2692							  bool dump)
2693{
2694	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2695	char hw_id_mask_str[16];
2696	u32 offset = 0;
2697
2698	if (OSAL_SNPRINTF(hw_id_mask_str, sizeof(hw_id_mask_str), "0x%x", dev_data->bus.hw_id_mask) < 0)
2699		DP_NOTICE(p_hwfn, true, "Unexpected debug error: invalid HW ID mask\n");
2700
2701	/* Dump global params */
2702	offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 5);
2703	offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "debug-bus");
2704	offset += ecore_dump_str_param(dump_buf + offset, dump, "wrap-mode", dev_data->bus.one_shot_en ? "one-shot" : "wrap-around");
2705	offset += ecore_dump_num_param(dump_buf + offset, dump, "hw-dwords", dev_data->bus.hw_dwords);
2706	offset += ecore_dump_str_param(dump_buf + offset, dump, "hw-id-mask", hw_id_mask_str);
2707	offset += ecore_dump_str_param(dump_buf + offset, dump, "target", s_dbg_target_names[dev_data->bus.target]);
2708
2709	offset += ecore_bus_dump_inputs(p_hwfn, dump_buf + offset, dump);
2710
2711	if (dev_data->bus.target != DBG_BUS_TARGET_ID_NIG) {
2712		u32 recorded_dwords = 0;
2713
2714		if (dump)
2715			recorded_dwords = ecore_bus_dump_data(p_hwfn, p_ptt, OSAL_NULL, false);
2716
2717		offset += ecore_dump_section_hdr(dump_buf + offset, dump, "bus_data", 1);
2718		offset += ecore_dump_num_param(dump_buf + offset, dump, "size", recorded_dwords);
2719	}
2720
2721	return offset;
2722}
2723
2724static bool ecore_is_mode_match(struct ecore_hwfn *p_hwfn,
2725								u16 *modes_buf_offset)
2726{
2727	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2728	bool arg1, arg2;
2729	u8 tree_val;
2730
2731	/* Get next element from modes tree buffer */
2732	tree_val = ((u8 *)s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr)[(*modes_buf_offset)++];
2733
2734	switch (tree_val) {
2735	case INIT_MODE_OP_NOT:
2736		return !ecore_is_mode_match(p_hwfn, modes_buf_offset);
2737	case INIT_MODE_OP_OR:
2738	case INIT_MODE_OP_AND:
2739		arg1 = ecore_is_mode_match(p_hwfn, modes_buf_offset);
2740		arg2 = ecore_is_mode_match(p_hwfn, modes_buf_offset);
2741		return (tree_val == INIT_MODE_OP_OR) ? (arg1 || arg2) : (arg1 && arg2);
2742	default: return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0;
2743	}
2744}
2745
2746/* Returns true if the specified entity (indicated by GRC param) should be
2747 * included in the dump, false otherwise.
2748 */
2749static bool ecore_grc_is_included(struct ecore_hwfn *p_hwfn,
2750								  enum dbg_grc_params grc_param)
2751{
2752	return ecore_grc_get_param(p_hwfn, grc_param) > 0;
2753}
2754
2755/* Returns true of the specified Storm should be included in the dump, false
2756 * otherwise.
2757 */
2758static bool ecore_grc_is_storm_included(struct ecore_hwfn *p_hwfn,
2759										enum dbg_storms storm)
2760{
2761	return ecore_grc_get_param(p_hwfn, (enum dbg_grc_params)storm) > 0;
2762}
2763
2764/* Returns true if the specified memory should be included in the dump, false
2765 * otherwise.
2766 */
2767static bool ecore_grc_is_mem_included(struct ecore_hwfn *p_hwfn,
2768									  enum block_id block_id,
2769									  u8 mem_group_id)
2770{
2771	struct block_defs *block = s_block_defs[block_id];
2772	u8 i;
2773
2774	/* Check Storm match */
2775	if (block->associated_to_storm &&
2776		!ecore_grc_is_storm_included(p_hwfn, (enum dbg_storms)block->storm_id))
2777		return false;
2778
2779	for (i = 0; i < NUM_BIG_RAM_TYPES; i++) {
2780		struct big_ram_defs *big_ram = &s_big_ram_defs[i];
2781
2782		if (mem_group_id == big_ram->mem_group_id || mem_group_id == big_ram->ram_mem_group_id)
2783			return ecore_grc_is_included(p_hwfn, big_ram->grc_param);
2784	}
2785
2786	switch (mem_group_id) {
2787	case MEM_GROUP_PXP_ILT:
2788	case MEM_GROUP_PXP_MEM:
2789		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP);
2790	case MEM_GROUP_RAM:
2791		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM);
2792	case MEM_GROUP_PBUF:
2793		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF);
2794	case MEM_GROUP_CAU_MEM:
2795	case MEM_GROUP_CAU_SB:
2796	case MEM_GROUP_CAU_PI:
2797		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU);
2798	case MEM_GROUP_QM_MEM:
2799		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM);
2800	case MEM_GROUP_CFC_MEM:
2801	case MEM_GROUP_CONN_CFC_MEM:
2802	case MEM_GROUP_TASK_CFC_MEM:
2803		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC) || ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX);
2804	case MEM_GROUP_IGU_MEM:
2805	case MEM_GROUP_IGU_MSIX:
2806		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
2807	case MEM_GROUP_MULD_MEM:
2808		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD);
2809	case MEM_GROUP_PRS_MEM:
2810		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS);
2811	case MEM_GROUP_DMAE_MEM:
2812		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE);
2813	case MEM_GROUP_TM_MEM:
2814		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM);
2815	case MEM_GROUP_SDM_MEM:
2816		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM);
2817	case MEM_GROUP_TDIF_CTX:
2818	case MEM_GROUP_RDIF_CTX:
2819		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF);
2820	case MEM_GROUP_CM_MEM:
2821		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM);
2822	case MEM_GROUP_IOR:
2823		return ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR);
2824	default:
2825		return true;
2826	}
2827}
2828
2829/* Stalls all Storms */
2830static void ecore_grc_stall_storms(struct ecore_hwfn *p_hwfn,
2831								   struct ecore_ptt *p_ptt,
2832								   bool stall)
2833{
2834	u32 reg_addr;
2835	u8 storm_id;
2836
2837	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2838		if (!ecore_grc_is_storm_included(p_hwfn, (enum dbg_storms)storm_id))
2839			continue;
2840
2841		reg_addr = s_storm_defs[storm_id].sem_fast_mem_addr + SEM_FAST_REG_STALL_0_BB_K2;
2842		ecore_wr(p_hwfn, p_ptt, reg_addr, stall ? 1 : 0);
2843	}
2844
2845	OSAL_MSLEEP(STALL_DELAY_MS);
2846}
2847
2848/* Takes all blocks out of reset */
2849static void ecore_grc_unreset_blocks(struct ecore_hwfn *p_hwfn,
2850									 struct ecore_ptt *p_ptt)
2851{
2852	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2853	u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
2854	u32 block_id, i;
2855
2856	/* Fill reset regs values */
2857	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2858		struct block_defs *block = s_block_defs[block_id];
2859
2860		if (block->exists[dev_data->chip_id] && block->has_reset_bit && block->unreset)
2861			reg_val[block->reset_reg] |= (1 << block->reset_bit_offset);
2862	}
2863
2864	/* Write reset registers */
2865	for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
2866		if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
2867			continue;
2868
2869		reg_val[i] |= s_reset_regs_defs[i].unreset_val[dev_data->chip_id];
2870
2871		if (reg_val[i])
2872			ecore_wr(p_hwfn, p_ptt, s_reset_regs_defs[i].addr + RESET_REG_UNRESET_OFFSET, reg_val[i]);
2873	}
2874}
2875
2876/* Returns the attention block data of the specified block */
2877static const struct dbg_attn_block_type_data* ecore_get_block_attn_data(enum block_id block_id,
2878																		enum dbg_attn_type attn_type)
2879{
2880	const struct dbg_attn_block *base_attn_block_arr = (const struct dbg_attn_block *)s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr;
2881
2882	return &base_attn_block_arr[block_id].per_type_data[attn_type];
2883}
2884
2885/* Returns the attention registers of the specified block */
2886static const struct dbg_attn_reg* ecore_get_block_attn_regs(enum block_id block_id,
2887															enum dbg_attn_type attn_type,
2888															u8 *num_attn_regs)
2889{
2890	const struct dbg_attn_block_type_data *block_type_data = ecore_get_block_attn_data(block_id, attn_type);
2891
2892	*num_attn_regs = block_type_data->num_regs;
2893
2894	return &((const struct dbg_attn_reg *)s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)[block_type_data->regs_offset];
2895}
2896
2897/* For each block, clear the status of all parities */
2898static void ecore_grc_clear_all_prty(struct ecore_hwfn *p_hwfn,
2899									 struct ecore_ptt *p_ptt)
2900{
2901	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2902	const struct dbg_attn_reg *attn_reg_arr;
2903	u8 reg_idx, num_attn_regs;
2904	u32 block_id;
2905
2906	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2907		if (dev_data->block_in_reset[block_id])
2908			continue;
2909
2910		attn_reg_arr = ecore_get_block_attn_regs((enum block_id)block_id, ATTN_TYPE_PARITY, &num_attn_regs);
2911
2912		for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2913			const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
2914			u16 modes_buf_offset;
2915			bool eval_mode;
2916
2917			/* Check mode */
2918			eval_mode = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
2919			modes_buf_offset = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
2920
2921			/* If Mode match: clear parity status */
2922			if (!eval_mode || ecore_is_mode_match(p_hwfn, &modes_buf_offset))
2923				ecore_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(reg_data->sts_clr_address));
2924		}
2925	}
2926}
2927
2928/* Dumps GRC registers section header. Returns the dumped size in dwords.
2929 * the following parameters are dumped:
2930 * - count:	 no. of dumped entries
2931 * - split:	 split type
2932 * - id:	 split ID (dumped only if split_id >= 0)
2933 * - param_name: user parameter value (dumped only if param_name != OSAL_NULL
2934 *		 and param_val != OSAL_NULL).
2935 */
2936static u32 ecore_grc_dump_regs_hdr(u32 *dump_buf,
2937								   bool dump,
2938								   u32 num_reg_entries,
2939								   const char *split_type,
2940								   int split_id,
2941								   const char *param_name,
2942								   const char *param_val)
2943{
2944	u8 num_params = 2 + (split_id >= 0 ? 1 : 0) + (param_name ? 1 : 0);
2945	u32 offset = 0;
2946
2947	offset += ecore_dump_section_hdr(dump_buf + offset, dump, "grc_regs", num_params);
2948	offset += ecore_dump_num_param(dump_buf + offset, dump, "count", num_reg_entries);
2949	offset += ecore_dump_str_param(dump_buf + offset, dump, "split", split_type);
2950	if (split_id >= 0)
2951		offset += ecore_dump_num_param(dump_buf + offset, dump, "id", split_id);
2952	if (param_name && param_val)
2953		offset += ecore_dump_str_param(dump_buf + offset, dump, param_name, param_val);
2954
2955	return offset;
2956}
2957
2958/* Reads the specified registers into the specified buffer.
2959 * The addr and len arguments are specified in dwords.
2960 */
2961void ecore_read_regs(struct ecore_hwfn *p_hwfn,
2962					 struct ecore_ptt *p_ptt,
2963					 u32 *buf,
2964					 u32 addr,
2965					 u32 len)
2966{
2967	u32 i;
2968
2969	for (i = 0; i < len; i++)
2970		buf[i] = ecore_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr + i));
2971}
2972
2973/* Dumps the GRC registers in the specified address range.
2974 * Returns the dumped size in dwords.
2975 * The addr and len arguments are specified in dwords.
2976 */
2977static u32 ecore_grc_dump_addr_range(struct ecore_hwfn *p_hwfn,
2978									 struct ecore_ptt *p_ptt,
2979									 u32 *dump_buf,
2980									 bool dump,
2981									 u32 addr,
2982									 u32 len,
2983									 bool wide_bus)
2984{
2985	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2986
2987	if (!dump)
2988		return len;
2989
2990	/* Print log if needed */
2991	dev_data->num_regs_read += len;
2992	if (dev_data->num_regs_read >= s_platform_defs[dev_data->platform_id].log_thresh) {
2993		DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "Dumping %d registers...\n", dev_data->num_regs_read);
2994		dev_data->num_regs_read = 0;
2995	}
2996
2997	/* Try reading using DMAE */
2998	if (dev_data->use_dmae && (len >= s_platform_defs[dev_data->platform_id].dmae_thresh || (PROTECT_WIDE_BUS && wide_bus))) {
2999		if (!ecore_dmae_grc2host(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr), (u64)(osal_uintptr_t)(dump_buf), len, OSAL_NULL))
3000			return len;
3001		dev_data->use_dmae = 0;
3002		DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "Failed reading from chip using DMAE, using GRC instead\n");
3003	}
3004
3005	/* Read registers */
3006	ecore_read_regs(p_hwfn, p_ptt, dump_buf, addr, len);
3007
3008	return len;
3009}
3010
3011/* Dumps GRC registers sequence header. Returns the dumped size in dwords.
3012 * The addr and len arguments are specified in dwords.
3013 */
3014static u32 ecore_grc_dump_reg_entry_hdr(u32 *dump_buf,
3015										bool dump,
3016										u32 addr,
3017										u32 len)
3018{
3019	if (dump)
3020		*dump_buf = addr | (len << REG_DUMP_LEN_SHIFT);
3021
3022	return 1;
3023}
3024
3025/* Dumps GRC registers sequence. Returns the dumped size in dwords.
3026 * The addr and len arguments are specified in dwords.
3027 */
3028static u32 ecore_grc_dump_reg_entry(struct ecore_hwfn *p_hwfn,
3029									struct ecore_ptt *p_ptt,
3030									u32 *dump_buf,
3031									bool dump,
3032									u32 addr,
3033									u32 len,
3034									bool wide_bus)
3035{
3036	u32 offset = 0;
3037
3038	offset += ecore_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len);
3039	offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, len, wide_bus);
3040
3041	return offset;
3042}
3043
3044/* Dumps GRC registers sequence with skip cycle.
3045 * Returns the dumped size in dwords.
3046 * - addr:	start GRC address in dwords
3047 * - total_len:	total no. of dwords to dump
3048 * - read_len:	no. consecutive dwords to read
3049 * - skip_len:	no. of dwords to skip (and fill with zeros)
3050 */
3051static u32 ecore_grc_dump_reg_entry_skip(struct ecore_hwfn *p_hwfn,
3052										 struct ecore_ptt *p_ptt,
3053										 u32 *dump_buf,
3054										 bool dump,
3055										 u32 addr,
3056										 u32 total_len,
3057										 u32 read_len,
3058										 u32 skip_len)
3059{
3060	u32 offset = 0, reg_offset = 0;
3061
3062	offset += ecore_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len);
3063
3064	if (!dump)
3065		return offset + total_len;
3066
3067	while (reg_offset < total_len) {
3068		u32 curr_len = OSAL_MIN_T(u32, read_len, total_len - reg_offset);
3069
3070		offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, curr_len, false);
3071		reg_offset += curr_len;
3072		addr += curr_len;
3073
3074		if (reg_offset < total_len) {
3075			curr_len = OSAL_MIN_T(u32, skip_len, total_len - skip_len);
3076			OSAL_MEMSET(dump_buf + offset, 0, DWORDS_TO_BYTES(curr_len));
3077			offset += curr_len;
3078			reg_offset += curr_len;
3079			addr += curr_len;
3080		}
3081	}
3082
3083	return offset;
3084}
3085
3086/* Dumps GRC registers entries. Returns the dumped size in dwords. */
3087static u32 ecore_grc_dump_regs_entries(struct ecore_hwfn *p_hwfn,
3088									   struct ecore_ptt *p_ptt,
3089									   struct dbg_array input_regs_arr,
3090									   u32 *dump_buf,
3091									   bool dump,
3092									   bool block_enable[MAX_BLOCK_ID],
3093									   u32 *num_dumped_reg_entries)
3094{
3095	u32 i, offset = 0, input_offset = 0;
3096	bool mode_match = true;
3097
3098	*num_dumped_reg_entries = 0;
3099
3100	while (input_offset < input_regs_arr.size_in_dwords) {
3101		const struct dbg_dump_cond_hdr *cond_hdr = (const struct dbg_dump_cond_hdr *)&input_regs_arr.ptr[input_offset++];
3102		u16 modes_buf_offset;
3103		bool eval_mode;
3104
3105		/* Check mode/block */
3106		eval_mode = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
3107		if (eval_mode) {
3108			modes_buf_offset = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
3109			mode_match = ecore_is_mode_match(p_hwfn, &modes_buf_offset);
3110		}
3111
3112		if (!mode_match || !block_enable[cond_hdr->block_id]) {
3113			input_offset += cond_hdr->data_size;
3114			continue;
3115		}
3116
3117		for (i = 0; i < cond_hdr->data_size; i++, input_offset++) {
3118			const struct dbg_dump_reg *reg = (const struct dbg_dump_reg *)&input_regs_arr.ptr[input_offset];
3119
3120			offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump,
3121				GET_FIELD(reg->data, DBG_DUMP_REG_ADDRESS),
3122				GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH),
3123				GET_FIELD(reg->data, DBG_DUMP_REG_WIDE_BUS));
3124			(*num_dumped_reg_entries)++;
3125		}
3126	}
3127
3128	return offset;
3129}
3130
3131/* Dumps GRC registers entries. Returns the dumped size in dwords. */
3132static u32 ecore_grc_dump_split_data(struct ecore_hwfn *p_hwfn,
3133									 struct ecore_ptt *p_ptt,
3134									 struct dbg_array input_regs_arr,
3135									 u32 *dump_buf,
3136									 bool dump,
3137									 bool block_enable[MAX_BLOCK_ID],
3138									 const char *split_type_name,
3139									 u32 split_id,
3140									 const char *param_name,
3141									 const char *param_val)
3142{
3143	u32 num_dumped_reg_entries, offset;
3144
3145	/* Calculate register dump header size (and skip it for now) */
3146	offset = ecore_grc_dump_regs_hdr(dump_buf, false, 0, split_type_name, split_id, param_name, param_val);
3147
3148	/* Dump registers */
3149	offset += ecore_grc_dump_regs_entries(p_hwfn, p_ptt, input_regs_arr, dump_buf + offset, dump, block_enable, &num_dumped_reg_entries);
3150
3151	/* Write register dump header */
3152	if (dump && num_dumped_reg_entries > 0)
3153		ecore_grc_dump_regs_hdr(dump_buf, dump, num_dumped_reg_entries, split_type_name, split_id, param_name, param_val);
3154
3155	return num_dumped_reg_entries > 0 ? offset : 0;
3156}
3157
3158/* Dumps registers according to the input registers array. Returns the dumped
3159 * size in dwords.
3160 */
3161static u32 ecore_grc_dump_registers(struct ecore_hwfn *p_hwfn,
3162									struct ecore_ptt *p_ptt,
3163									u32 *dump_buf,
3164									bool dump,
3165									bool block_enable[MAX_BLOCK_ID],
3166									const char *param_name,
3167									const char *param_val)
3168{
3169	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3170	struct chip_platform_defs *chip_platform;
3171	u32 offset = 0, input_offset = 0;
3172	u8 port_id, pf_id, vf_id;
3173
3174	chip_platform = &s_chip_defs[dev_data->chip_id].per_platform[dev_data->platform_id];
3175
3176	while (input_offset < s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) {
3177		const struct dbg_dump_split_hdr *split_hdr;
3178		struct dbg_array curr_input_regs_arr;
3179		u32 split_data_size;
3180		u8 split_type_id;
3181
3182		split_hdr = (const struct dbg_dump_split_hdr *)&s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset++];
3183		split_type_id = GET_FIELD(split_hdr->hdr, DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
3184		split_data_size = GET_FIELD(split_hdr->hdr, DBG_DUMP_SPLIT_HDR_DATA_SIZE);
3185		curr_input_regs_arr.ptr = &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset];
3186		curr_input_regs_arr.size_in_dwords = split_data_size;
3187
3188		switch(split_type_id) {
3189		case SPLIT_TYPE_NONE:
3190			offset += ecore_grc_dump_split_data(p_hwfn, p_ptt, curr_input_regs_arr, dump_buf + offset, dump, block_enable, "eng", (u32)(-1), param_name, param_val);
3191			break;
3192
3193		case SPLIT_TYPE_PORT:
3194			for (port_id = 0; port_id < chip_platform->num_ports; port_id++) {
3195				if (dump)
3196					ecore_port_pretend(p_hwfn, p_ptt, port_id);
3197				offset += ecore_grc_dump_split_data(p_hwfn, p_ptt, curr_input_regs_arr, dump_buf + offset, dump, block_enable, "port", port_id, param_name, param_val);
3198			}
3199			break;
3200
3201		case SPLIT_TYPE_PF:
3202		case SPLIT_TYPE_PORT_PF:
3203			for (pf_id = 0; pf_id < chip_platform->num_pfs; pf_id++) {
3204				if (dump)
3205					ecore_fid_pretend(p_hwfn, p_ptt, (pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT));
3206				offset += ecore_grc_dump_split_data(p_hwfn, p_ptt, curr_input_regs_arr, dump_buf + offset, dump, block_enable, "pf", pf_id, param_name, param_val);
3207			}
3208			break;
3209
3210		case SPLIT_TYPE_VF:
3211			for (vf_id = 0; vf_id < chip_platform->num_vfs; vf_id++) {
3212				if (dump)
3213					ecore_fid_pretend(p_hwfn, p_ptt, (1 << PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT) | (vf_id << PXP_PRETEND_CONCRETE_FID_VFID_SHIFT));
3214				offset += ecore_grc_dump_split_data(p_hwfn, p_ptt, curr_input_regs_arr, dump_buf + offset, dump, block_enable, "vf", vf_id, param_name, param_val);
3215			}
3216			break;
3217
3218		default:
3219			break;
3220		}
3221
3222		input_offset += split_data_size;
3223	}
3224
3225	/* Pretend to original PF */
3226	if (dump)
3227		ecore_fid_pretend(p_hwfn, p_ptt, (p_hwfn->rel_pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT));
3228
3229	return offset;
3230}
3231
3232/* Dump reset registers. Returns the dumped size in dwords. */
3233static u32 ecore_grc_dump_reset_regs(struct ecore_hwfn *p_hwfn,
3234	struct ecore_ptt *p_ptt,
3235	u32 *dump_buf,
3236	bool dump)
3237{
3238	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3239	u32 i, offset = 0, num_regs = 0;
3240
3241	/* Calculate header size */
3242	offset += ecore_grc_dump_regs_hdr(dump_buf, false, 0, "eng", -1, OSAL_NULL, OSAL_NULL);
3243
3244	/* Write reset registers */
3245	for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
3246		if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
3247			continue;
3248
3249		offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(s_reset_regs_defs[i].addr), 1, false);
3250		num_regs++;
3251	}
3252
3253	/* Write header */
3254	if (dump)
3255		ecore_grc_dump_regs_hdr(dump_buf, true, num_regs, "eng", -1, OSAL_NULL, OSAL_NULL);
3256
3257	return offset;
3258}
3259
3260/* Dump registers that are modified during GRC Dump and therefore must be
3261 * dumped first. Returns the dumped size in dwords.
3262 */
3263static u32 ecore_grc_dump_modified_regs(struct ecore_hwfn *p_hwfn,
3264										struct ecore_ptt *p_ptt,
3265										u32 *dump_buf,
3266										bool dump)
3267{
3268	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3269	u32 block_id, offset = 0, num_reg_entries = 0;
3270	const struct dbg_attn_reg *attn_reg_arr;
3271	u8 storm_id, reg_idx, num_attn_regs;
3272
3273	/* Calculate header size */
3274	offset += ecore_grc_dump_regs_hdr(dump_buf, false, 0, "eng", -1, OSAL_NULL, OSAL_NULL);
3275
3276	/* Write parity registers */
3277	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3278		if (dev_data->block_in_reset[block_id] && dump)
3279			continue;
3280
3281		attn_reg_arr = ecore_get_block_attn_regs((enum block_id)block_id, ATTN_TYPE_PARITY, &num_attn_regs);
3282
3283		for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
3284			const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
3285			u16 modes_buf_offset;
3286			bool eval_mode;
3287
3288			/* Check mode */
3289			eval_mode = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
3290			modes_buf_offset = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
3291			if (eval_mode && !ecore_is_mode_match(p_hwfn, &modes_buf_offset))
3292				continue;
3293
3294			/* Mode match: read & dump registers */
3295			offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump, reg_data->mask_address, 1, false);
3296			offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump, GET_FIELD(reg_data->data, DBG_ATTN_REG_STS_ADDRESS), 1, false);
3297			num_reg_entries += 2;
3298		}
3299	}
3300
3301	/* Write Storm stall status registers */
3302	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3303		struct storm_defs *storm = &s_storm_defs[storm_id];
3304
3305		if (dev_data->block_in_reset[storm->block_id] && dump)
3306			continue;
3307
3308		offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump,
3309			BYTES_TO_DWORDS(storm->sem_fast_mem_addr + SEM_FAST_REG_STALLED), 1, false);
3310		num_reg_entries++;
3311	}
3312
3313	/* Write header */
3314	if (dump)
3315		ecore_grc_dump_regs_hdr(dump_buf, true, num_reg_entries, "eng", -1, OSAL_NULL, OSAL_NULL);
3316
3317	return offset;
3318}
3319
3320/* Dumps registers that can't be represented in the debug arrays */
3321static u32 ecore_grc_dump_special_regs(struct ecore_hwfn *p_hwfn,
3322									   struct ecore_ptt *p_ptt,
3323									   u32 *dump_buf,
3324									   bool dump)
3325{
3326	u32 offset = 0;
3327
3328	offset += ecore_grc_dump_regs_hdr(dump_buf, dump, 2, "eng", -1, OSAL_NULL, OSAL_NULL);
3329
3330	/* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be
3331	 * skipped).
3332	 */
3333	offset += ecore_grc_dump_reg_entry_skip(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(RDIF_REG_DEBUG_ERROR_INFO), RDIF_REG_DEBUG_ERROR_INFO_SIZE, 7, 1);
3334	offset += ecore_grc_dump_reg_entry_skip(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(TDIF_REG_DEBUG_ERROR_INFO), TDIF_REG_DEBUG_ERROR_INFO_SIZE, 7, 1);
3335
3336	return offset;
3337}
3338
3339/* Dumps a GRC memory header (section and params). Returns the dumped size in
3340 * dwords. The following parameters are dumped:
3341 * - name:	   dumped only if it's not OSAL_NULL.
3342 * - addr:	   in dwords, dumped only if name is OSAL_NULL.
3343 * - len:	   in dwords, always dumped.
3344 * - width:	   dumped if it's not zero.
3345 * - packed:	   dumped only if it's not false.
3346 * - mem_group:	   always dumped.
3347 * - is_storm:	   true only if the memory is related to a Storm.
3348 * - storm_letter: valid only if is_storm is true.
3349 *
3350 */
3351static u32 ecore_grc_dump_mem_hdr(struct ecore_hwfn *p_hwfn,
3352								  u32 *dump_buf,
3353								  bool dump,
3354								  const char *name,
3355								  u32 addr,
3356								  u32 len,
3357								  u32 bit_width,
3358								  bool packed,
3359								  const char *mem_group,
3360								  bool is_storm,
3361								  char storm_letter)
3362{
3363	u8 num_params = 3;
3364	u32 offset = 0;
3365	char buf[64];
3366
3367	if (!len)
3368		DP_NOTICE(p_hwfn, true, "Unexpected GRC Dump error: dumped memory size must be non-zero\n");
3369
3370	if (bit_width)
3371		num_params++;
3372	if (packed)
3373		num_params++;
3374
3375	/* Dump section header */
3376	offset += ecore_dump_section_hdr(dump_buf + offset, dump, "grc_mem", num_params);
3377
3378	if (name) {
3379		/* Dump name */
3380		if (is_storm) {
3381			OSAL_STRCPY(buf, "?STORM_");
3382			buf[0] = storm_letter;
3383			OSAL_STRCPY(buf + OSAL_STRLEN(buf), name);
3384		}
3385		else {
3386			OSAL_STRCPY(buf, name);
3387		}
3388
3389		offset += ecore_dump_str_param(dump_buf + offset, dump, "name", buf);
3390	}
3391	else {
3392		/* Dump address */
3393		u32 addr_in_bytes = DWORDS_TO_BYTES(addr);
3394
3395		offset += ecore_dump_num_param(dump_buf + offset, dump, "addr", addr_in_bytes);
3396	}
3397
3398	/* Dump len */
3399	offset += ecore_dump_num_param(dump_buf + offset, dump, "len", len);
3400
3401	/* Dump bit width */
3402	if (bit_width)
3403		offset += ecore_dump_num_param(dump_buf + offset, dump, "width", bit_width);
3404
3405	/* Dump packed */
3406	if (packed)
3407		offset += ecore_dump_num_param(dump_buf + offset, dump, "packed", 1);
3408
3409	/* Dump reg type */
3410	if (is_storm) {
3411		OSAL_STRCPY(buf, "?STORM_");
3412		buf[0] = storm_letter;
3413		OSAL_STRCPY(buf + OSAL_STRLEN(buf), mem_group);
3414	}
3415	else {
3416		OSAL_STRCPY(buf, mem_group);
3417	}
3418
3419	offset += ecore_dump_str_param(dump_buf + offset, dump, "type", buf);
3420
3421	return offset;
3422}
3423
3424/* Dumps a single GRC memory. If name is OSAL_NULL, the memory is stored by address.
3425 * Returns the dumped size in dwords.
3426 * The addr and len arguments are specified in dwords.
3427 */
3428static u32 ecore_grc_dump_mem(struct ecore_hwfn *p_hwfn,
3429							  struct ecore_ptt *p_ptt,
3430							  u32 *dump_buf,
3431							  bool dump,
3432							  const char *name,
3433							  u32 addr,
3434							  u32 len,
3435							  bool wide_bus,
3436							  u32 bit_width,
3437							  bool packed,
3438							  const char *mem_group,
3439							  bool is_storm,
3440							  char storm_letter)
3441{
3442	u32 offset = 0;
3443
3444	offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, name, addr, len, bit_width, packed, mem_group, is_storm, storm_letter);
3445	offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, len, wide_bus);
3446
3447	return offset;
3448}
3449
3450/* Dumps GRC memories entries. Returns the dumped size in dwords. */
3451static u32 ecore_grc_dump_mem_entries(struct ecore_hwfn *p_hwfn,
3452									  struct ecore_ptt *p_ptt,
3453									  struct dbg_array input_mems_arr,
3454									  u32 *dump_buf,
3455									  bool dump)
3456{
3457	u32 i, offset = 0, input_offset = 0;
3458	bool mode_match = true;
3459
3460	while (input_offset < input_mems_arr.size_in_dwords) {
3461		const struct dbg_dump_cond_hdr *cond_hdr;
3462		u16 modes_buf_offset;
3463		u32 num_entries;
3464		bool eval_mode;
3465
3466		cond_hdr = (const struct dbg_dump_cond_hdr *)&input_mems_arr.ptr[input_offset++];
3467		num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
3468
3469		/* Check required mode */
3470		eval_mode = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
3471		if (eval_mode) {
3472			modes_buf_offset = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
3473			mode_match = ecore_is_mode_match(p_hwfn, &modes_buf_offset);
3474		}
3475
3476		if (!mode_match) {
3477			input_offset += cond_hdr->data_size;
3478			continue;
3479		}
3480
3481		for (i = 0; i < num_entries; i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) {
3482			const struct dbg_dump_mem *mem = (const struct dbg_dump_mem *)&input_mems_arr.ptr[input_offset];
3483			u8 mem_group_id = GET_FIELD(mem->dword0, DBG_DUMP_MEM_MEM_GROUP_ID);
3484			bool is_storm = false, mem_wide_bus;
3485			char storm_letter = 'a';
3486			u32 mem_addr, mem_len;
3487
3488			if (mem_group_id >= MEM_GROUPS_NUM) {
3489				DP_NOTICE(p_hwfn, true, "Invalid mem_group_id\n");
3490				return 0;
3491			}
3492
3493			if (!ecore_grc_is_mem_included(p_hwfn, (enum block_id)cond_hdr->block_id, mem_group_id))
3494				continue;
3495
3496			mem_addr = GET_FIELD(mem->dword0, DBG_DUMP_MEM_ADDRESS);
3497			mem_len = GET_FIELD(mem->dword1, DBG_DUMP_MEM_LENGTH);
3498			mem_wide_bus = GET_FIELD(mem->dword1, DBG_DUMP_MEM_WIDE_BUS);
3499
3500			/* Update memory length for CCFC/TCFC memories
3501			 * according to number of LCIDs/LTIDs.
3502			 */
3503			if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) {
3504				if (mem_len % MAX_LCIDS) {
3505					DP_NOTICE(p_hwfn, true, "Invalid CCFC connection memory size\n");
3506					return 0;
3507				}
3508
3509				mem_len = ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LCIDS) * (mem_len / MAX_LCIDS);
3510			}
3511			else if (mem_group_id == MEM_GROUP_TASK_CFC_MEM) {
3512				if (mem_len % MAX_LTIDS) {
3513					DP_NOTICE(p_hwfn, true, "Invalid TCFC task memory size\n");
3514					return 0;
3515				}
3516
3517				mem_len = ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LTIDS) * (mem_len / MAX_LTIDS);
3518			}
3519
3520			/* If memory is associated with Storm, udpate Storm
3521			 * details.
3522			 */
3523			if (s_block_defs[cond_hdr->block_id]->associated_to_storm) {
3524				is_storm = true;
3525				storm_letter = s_storm_defs[s_block_defs[cond_hdr->block_id]->storm_id].letter;
3526			}
3527
3528			/* Dump memory */
3529			offset += ecore_grc_dump_mem(p_hwfn, p_ptt, dump_buf + offset, dump, OSAL_NULL, mem_addr, mem_len, mem_wide_bus,
3530				0, false, s_mem_group_names[mem_group_id], is_storm, storm_letter);
3531		}
3532	}
3533
3534	return offset;
3535}
3536
3537/* Dumps GRC memories according to the input array dump_mem.
3538 * Returns the dumped size in dwords.
3539 */
3540static u32 ecore_grc_dump_memories(struct ecore_hwfn *p_hwfn,
3541								   struct ecore_ptt *p_ptt,
3542								   u32 *dump_buf,
3543								   bool dump)
3544{
3545	u32 offset = 0, input_offset = 0;
3546
3547	while (input_offset < s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].size_in_dwords) {
3548		const struct dbg_dump_split_hdr *split_hdr;
3549		struct dbg_array curr_input_mems_arr;
3550		u32 split_data_size;
3551		u8 split_type_id;
3552
3553		split_hdr = (const struct dbg_dump_split_hdr *)&s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset++];
3554		split_type_id = GET_FIELD(split_hdr->hdr, DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
3555		split_data_size = GET_FIELD(split_hdr->hdr, DBG_DUMP_SPLIT_HDR_DATA_SIZE);
3556		curr_input_mems_arr.ptr = &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset];
3557		curr_input_mems_arr.size_in_dwords = split_data_size;
3558
3559		switch (split_type_id) {
3560		case SPLIT_TYPE_NONE:
3561			offset += ecore_grc_dump_mem_entries(p_hwfn, p_ptt, curr_input_mems_arr, dump_buf + offset, dump);
3562			break;
3563
3564		default:
3565			DP_NOTICE(p_hwfn, true, "Dumping split memories is currently not supported\n");
3566			break;
3567		}
3568
3569		input_offset += split_data_size;
3570	}
3571
3572	return offset;
3573}
3574
3575/* Dumps GRC context data for the specified Storm.
3576 * Returns the dumped size in dwords.
3577 * The lid_size argument is specified in quad-regs.
3578 */
3579static u32 ecore_grc_dump_ctx_data(struct ecore_hwfn *p_hwfn,
3580								   struct ecore_ptt *p_ptt,
3581								   u32 *dump_buf,
3582								   bool dump,
3583								   const char *name,
3584								   u32 num_lids,
3585								   u32 lid_size,
3586								   u32 rd_reg_addr,
3587								   u8 storm_id)
3588{
3589	struct storm_defs *storm = &s_storm_defs[storm_id];
3590	u32 i, lid, total_size, offset = 0;
3591
3592	if (!lid_size)
3593		return 0;
3594
3595	lid_size *= BYTES_IN_DWORD;
3596	total_size = num_lids * lid_size;
3597
3598	offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, name, 0, total_size, lid_size * 32, false, name, true, storm->letter);
3599
3600	if (!dump)
3601		return offset + total_size;
3602
3603	/* Dump context data */
3604	for (lid = 0; lid < num_lids; lid++) {
3605		for (i = 0; i < lid_size; i++, offset++) {
3606			ecore_wr(p_hwfn, p_ptt, storm->cm_ctx_wr_addr, (i << 9) | lid);
3607			*(dump_buf + offset) = ecore_rd(p_hwfn, p_ptt, rd_reg_addr);
3608		}
3609	}
3610
3611	return offset;
3612}
3613
3614/* Dumps GRC contexts. Returns the dumped size in dwords. */
3615static u32 ecore_grc_dump_ctx(struct ecore_hwfn *p_hwfn,
3616							  struct ecore_ptt *p_ptt,
3617							  u32 *dump_buf,
3618							  bool dump)
3619{
3620	u32 offset = 0;
3621	u8 storm_id;
3622
3623	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3624		struct storm_defs *storm = &s_storm_defs[storm_id];
3625
3626		if (!ecore_grc_is_storm_included(p_hwfn, (enum dbg_storms)storm_id))
3627			continue;
3628
3629		/* Dump Conn AG context size */
3630		offset += ecore_grc_dump_ctx_data(p_hwfn, p_ptt, dump_buf + offset, dump, "CONN_AG_CTX", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LCIDS),
3631			storm->cm_conn_ag_ctx_lid_size, storm->cm_conn_ag_ctx_rd_addr, storm_id);
3632
3633		/* Dump Conn ST context size */
3634		offset += ecore_grc_dump_ctx_data(p_hwfn, p_ptt, dump_buf + offset, dump, "CONN_ST_CTX", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LCIDS),
3635			storm->cm_conn_st_ctx_lid_size, storm->cm_conn_st_ctx_rd_addr, storm_id);
3636
3637		/* Dump Task AG context size */
3638		offset += ecore_grc_dump_ctx_data(p_hwfn, p_ptt, dump_buf + offset, dump, "TASK_AG_CTX", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LTIDS),
3639			storm->cm_task_ag_ctx_lid_size, storm->cm_task_ag_ctx_rd_addr, storm_id);
3640
3641		/* Dump Task ST context size */
3642		offset += ecore_grc_dump_ctx_data(p_hwfn, p_ptt, dump_buf + offset, dump, "TASK_ST_CTX", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LTIDS),
3643			storm->cm_task_st_ctx_lid_size, storm->cm_task_st_ctx_rd_addr, storm_id);
3644	}
3645
3646	return offset;
3647}
3648
3649/* Dumps GRC IORs data. Returns the dumped size in dwords. */
3650static u32 ecore_grc_dump_iors(struct ecore_hwfn *p_hwfn,
3651							   struct ecore_ptt *p_ptt,
3652							   u32 *dump_buf,
3653							   bool dump)
3654{
3655	char buf[10] = "IOR_SET_?";
3656	u32 addr, offset = 0;
3657	u8 storm_id, set_id;
3658
3659	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3660		struct storm_defs *storm = &s_storm_defs[storm_id];
3661
3662		if (!ecore_grc_is_storm_included(p_hwfn, (enum dbg_storms)storm_id))
3663			continue;
3664
3665		for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
3666			addr = BYTES_TO_DWORDS(storm->sem_fast_mem_addr + SEM_FAST_REG_STORM_REG_FILE) + IOR_SET_OFFSET(set_id);
3667			buf[OSAL_STRLEN(buf) - 1] = '0' + set_id;
3668			offset += ecore_grc_dump_mem(p_hwfn, p_ptt, dump_buf + offset, dump, buf, addr, IORS_PER_SET, false, 32, false, "ior", true, storm->letter);
3669		}
3670	}
3671
3672	return offset;
3673}
3674
3675/* Dump VFC CAM. Returns the dumped size in dwords. */
3676static u32 ecore_grc_dump_vfc_cam(struct ecore_hwfn *p_hwfn,
3677								  struct ecore_ptt *p_ptt,
3678								  u32 *dump_buf,
3679								  bool dump,
3680								  u8 storm_id)
3681{
3682	u32 total_size = VFC_CAM_NUM_ROWS * VFC_CAM_RESP_DWORDS;
3683	struct storm_defs *storm = &s_storm_defs[storm_id];
3684	u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 };
3685	u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 };
3686	u32 row, i, offset = 0;
3687
3688	offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, "vfc_cam", 0, total_size, 256, false, "vfc_cam", true, storm->letter);
3689
3690	if (!dump)
3691		return offset + total_size;
3692
3693	/* Prepare CAM address */
3694	SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
3695
3696	for (row = 0; row < VFC_CAM_NUM_ROWS; row++, offset += VFC_CAM_RESP_DWORDS) {
3697		/* Write VFC CAM command */
3698		SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
3699		ARR_REG_WR(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR, cam_cmd, VFC_CAM_CMD_DWORDS);
3700
3701		/* Write VFC CAM address */
3702		ARR_REG_WR(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR, cam_addr, VFC_CAM_ADDR_DWORDS);
3703
3704		/* Read VFC CAM read response */
3705		ARR_REG_RD(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD, dump_buf + offset, VFC_CAM_RESP_DWORDS);
3706	}
3707
3708	return offset;
3709}
3710
3711/* Dump VFC RAM. Returns the dumped size in dwords. */
3712static u32 ecore_grc_dump_vfc_ram(struct ecore_hwfn *p_hwfn,
3713								  struct ecore_ptt *p_ptt,
3714								  u32 *dump_buf,
3715								  bool dump,
3716								  u8 storm_id,
3717								  struct vfc_ram_defs *ram_defs)
3718{
3719	u32 total_size = ram_defs->num_rows * VFC_RAM_RESP_DWORDS;
3720	struct storm_defs *storm = &s_storm_defs[storm_id];
3721	u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 };
3722	u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 };
3723	u32 row, i, offset = 0;
3724
3725	offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, ram_defs->mem_name, 0, total_size, 256, false, ram_defs->type_name, true, storm->letter);
3726
3727	/* Prepare RAM address */
3728	SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
3729
3730	if (!dump)
3731		return offset + total_size;
3732
3733	for (row = ram_defs->base_row; row < ram_defs->base_row + ram_defs->num_rows; row++, offset += VFC_RAM_RESP_DWORDS) {
3734		/* Write VFC RAM command */
3735		ARR_REG_WR(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR, ram_cmd, VFC_RAM_CMD_DWORDS);
3736
3737		/* Write VFC RAM address */
3738		SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row);
3739		ARR_REG_WR(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR, ram_addr, VFC_RAM_ADDR_DWORDS);
3740
3741		/* Read VFC RAM read response */
3742		ARR_REG_RD(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD, dump_buf + offset, VFC_RAM_RESP_DWORDS);
3743	}
3744
3745	return offset;
3746}
3747
3748/* Dumps GRC VFC data. Returns the dumped size in dwords. */
3749static u32 ecore_grc_dump_vfc(struct ecore_hwfn *p_hwfn,
3750							  struct ecore_ptt *p_ptt,
3751							  u32 *dump_buf,
3752							  bool dump)
3753{
3754	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3755	u8 storm_id, i;
3756	u32 offset = 0;
3757
3758	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3759		if (!ecore_grc_is_storm_included(p_hwfn, (enum dbg_storms)storm_id) ||
3760			!s_storm_defs[storm_id].has_vfc ||
3761			(storm_id == DBG_PSTORM_ID && dev_data->platform_id != PLATFORM_ASIC))
3762			continue;
3763
3764		/* Read CAM */
3765		offset += ecore_grc_dump_vfc_cam(p_hwfn, p_ptt, dump_buf + offset, dump, storm_id);
3766
3767		/* Read RAM */
3768		for (i = 0; i < NUM_VFC_RAM_TYPES; i++)
3769			offset += ecore_grc_dump_vfc_ram(p_hwfn, p_ptt, dump_buf + offset, dump, storm_id, &s_vfc_ram_defs[i]);
3770	}
3771
3772	return offset;
3773}
3774
3775/* Dumps GRC RSS data. Returns the dumped size in dwords. */
3776static u32 ecore_grc_dump_rss(struct ecore_hwfn *p_hwfn,
3777							  struct ecore_ptt *p_ptt,
3778							  u32 *dump_buf,
3779							  bool dump)
3780{
3781	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3782	u32 offset = 0;
3783	u8 rss_mem_id;
3784
3785	for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
3786		u32 rss_addr, num_entries, total_dwords;
3787		struct rss_mem_defs *rss_defs;
3788		bool packed;
3789
3790		rss_defs = &s_rss_mem_defs[rss_mem_id];
3791		rss_addr = rss_defs->addr;
3792		num_entries = rss_defs->num_entries[dev_data->chip_id];
3793		total_dwords = (num_entries * rss_defs->entry_width) / 32;
3794		packed = (rss_defs->entry_width == 16);
3795
3796		offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, rss_defs->mem_name, 0, total_dwords,
3797			rss_defs->entry_width, packed, rss_defs->type_name, false, 0);
3798
3799		/* Dump RSS data */
3800		if (!dump) {
3801			offset += total_dwords;
3802			continue;
3803		}
3804
3805		while (total_dwords) {
3806			u32 num_dwords_to_read = OSAL_MIN_T(u32, RSS_REG_RSS_RAM_DATA_SIZE, total_dwords);
3807			ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
3808			offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA), num_dwords_to_read, false);
3809			total_dwords -= num_dwords_to_read;
3810			rss_addr++;
3811		}
3812	}
3813
3814	return offset;
3815}
3816
3817/* Dumps GRC Big RAM. Returns the dumped size in dwords. */
3818static u32 ecore_grc_dump_big_ram(struct ecore_hwfn *p_hwfn,
3819								  struct ecore_ptt *p_ptt,
3820								  u32 *dump_buf,
3821								  bool dump,
3822								  u8 big_ram_id)
3823{
3824	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3825	u32 block_size, ram_size, offset = 0, reg_val, i;
3826	char mem_name[12] = "???_BIG_RAM";
3827	char type_name[8] = "???_RAM";
3828	struct big_ram_defs *big_ram;
3829
3830	big_ram = &s_big_ram_defs[big_ram_id];
3831	ram_size = big_ram->ram_size[dev_data->chip_id];
3832
3833	reg_val = ecore_rd(p_hwfn, p_ptt, big_ram->is_256b_reg_addr);
3834	block_size = reg_val & (1 << big_ram->is_256b_bit_offset[dev_data->chip_id]) ? 256 : 128;
3835
3836	OSAL_STRNCPY(type_name, big_ram->instance_name, OSAL_STRLEN(big_ram->instance_name));
3837	OSAL_STRNCPY(mem_name, big_ram->instance_name, OSAL_STRLEN(big_ram->instance_name));
3838
3839	/* Dump memory header */
3840	offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, mem_name, 0, ram_size, block_size * 8, false, type_name, false, 0);
3841
3842	/* Read and dump Big RAM data */
3843	if (!dump)
3844		return offset + ram_size;
3845
3846	/* Dump Big RAM */
3847	for (i = 0; i < DIV_ROUND_UP(ram_size, BRB_REG_BIG_RAM_DATA_SIZE); i++) {
3848		ecore_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
3849		offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(big_ram->data_reg_addr), BRB_REG_BIG_RAM_DATA_SIZE, false);
3850	}
3851
3852	return offset;
3853}
3854
3855static u32 ecore_grc_dump_mcp(struct ecore_hwfn *p_hwfn,
3856							  struct ecore_ptt *p_ptt,
3857							  u32 *dump_buf,
3858							  bool dump)
3859{
3860	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3861	bool block_enable[MAX_BLOCK_ID] = { 0 };
3862	bool halted = false;
3863	u32 offset = 0;
3864
3865	/* Halt MCP */
3866	if (dump && dev_data->platform_id == PLATFORM_ASIC && !ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3867		halted = !ecore_mcp_halt(p_hwfn, p_ptt);
3868		if (!halted)
3869			DP_NOTICE(p_hwfn, false, "MCP halt failed!\n");
3870	}
3871
3872	/* Dump MCP scratchpad */
3873	offset += ecore_grc_dump_mem(p_hwfn, p_ptt, dump_buf + offset, dump, OSAL_NULL, BYTES_TO_DWORDS(MCP_REG_SCRATCH),
3874		ECORE_IS_E5(p_hwfn->p_dev) ? MCP_REG_SCRATCH_SIZE_E5 : MCP_REG_SCRATCH_SIZE_BB_K2, false, 0, false, "MCP", false, 0);
3875
3876	/* Dump MCP cpu_reg_file */
3877	offset += ecore_grc_dump_mem(p_hwfn, p_ptt, dump_buf + offset, dump, OSAL_NULL, BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE),
3878		MCP_REG_CPU_REG_FILE_SIZE, false, 0, false, "MCP", false, 0);
3879
3880	/* Dump MCP registers */
3881	block_enable[BLOCK_MCP] = true;
3882	offset += ecore_grc_dump_registers(p_hwfn, p_ptt, dump_buf + offset, dump, block_enable, "block", "MCP");
3883
3884	/* Dump required non-MCP registers */
3885	offset += ecore_grc_dump_regs_hdr(dump_buf + offset, dump, 1, "eng", -1, "block", "MCP");
3886	offset += ecore_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR), 1, false);
3887
3888	/* Release MCP */
3889	if (halted && ecore_mcp_resume(p_hwfn, p_ptt))
3890		DP_NOTICE(p_hwfn, false, "Failed to resume MCP after halt!\n");
3891
3892	return offset;
3893}
3894
3895/* Dumps the tbus indirect memory for all PHYs. */
3896static u32 ecore_grc_dump_phy(struct ecore_hwfn *p_hwfn,
3897							  struct ecore_ptt *p_ptt,
3898							  u32 *dump_buf,
3899							  bool dump)
3900{
3901	u32 offset = 0, tbus_lo_offset, tbus_hi_offset;
3902	char mem_name[32];
3903	u8 phy_id;
3904
3905	for (phy_id = 0; phy_id < OSAL_ARRAY_SIZE(s_phy_defs); phy_id++) {
3906		u32 addr_lo_addr, addr_hi_addr, data_lo_addr, data_hi_addr;
3907		struct phy_defs *phy_defs;
3908		u8 *bytes_buf;
3909
3910		phy_defs = &s_phy_defs[phy_id];
3911		addr_lo_addr = phy_defs->base_addr + phy_defs->tbus_addr_lo_addr;
3912		addr_hi_addr = phy_defs->base_addr + phy_defs->tbus_addr_hi_addr;
3913		data_lo_addr = phy_defs->base_addr + phy_defs->tbus_data_lo_addr;
3914		data_hi_addr = phy_defs->base_addr + phy_defs->tbus_data_hi_addr;
3915
3916		if (OSAL_SNPRINTF(mem_name, sizeof(mem_name), "tbus_%s", phy_defs->phy_name) < 0)
3917			DP_NOTICE(p_hwfn, true, "Unexpected debug error: invalid PHY memory name\n");
3918
3919		offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, mem_name, 0, PHY_DUMP_SIZE_DWORDS, 16, true, mem_name, false, 0);
3920
3921		if (!dump) {
3922			offset += PHY_DUMP_SIZE_DWORDS;
3923			continue;
3924		}
3925
3926		bytes_buf = (u8 *)(dump_buf + offset);
3927		for (tbus_hi_offset = 0; tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8); tbus_hi_offset++) {
3928			ecore_wr(p_hwfn, p_ptt, addr_hi_addr, tbus_hi_offset);
3929			for (tbus_lo_offset = 0; tbus_lo_offset < 256; tbus_lo_offset++) {
3930				ecore_wr(p_hwfn, p_ptt, addr_lo_addr, tbus_lo_offset);
3931				*(bytes_buf++) = (u8)ecore_rd(p_hwfn, p_ptt, data_lo_addr);
3932				*(bytes_buf++) = (u8)ecore_rd(p_hwfn, p_ptt, data_hi_addr);
3933			}
3934		}
3935
3936		offset += PHY_DUMP_SIZE_DWORDS;
3937	}
3938
3939	return offset;
3940}
3941
3942static void ecore_config_dbg_line(struct ecore_hwfn *p_hwfn,
3943								  struct ecore_ptt *p_ptt,
3944								  enum block_id block_id,
3945								  u8 line_id,
3946								  u8 enable_mask,
3947								  u8 right_shift,
3948								  u8 force_valid_mask,
3949								  u8 force_frame_mask)
3950{
3951	struct block_defs *block = s_block_defs[block_id];
3952
3953	ecore_wr(p_hwfn, p_ptt, block->dbg_select_addr, line_id);
3954	ecore_wr(p_hwfn, p_ptt, block->dbg_enable_addr, enable_mask);
3955	ecore_wr(p_hwfn, p_ptt, block->dbg_shift_addr, right_shift);
3956	ecore_wr(p_hwfn, p_ptt, block->dbg_force_valid_addr, force_valid_mask);
3957	ecore_wr(p_hwfn, p_ptt, block->dbg_force_frame_addr, force_frame_mask);
3958}
3959
3960/* Dumps Static Debug data. Returns the dumped size in dwords. */
3961static u32 ecore_grc_dump_static_debug(struct ecore_hwfn *p_hwfn,
3962									   struct ecore_ptt *p_ptt,
3963									   u32 *dump_buf,
3964									   bool dump)
3965{
3966	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3967	u32 block_id, line_id, offset = 0;
3968
3969	/* don't dump static debug if a debug bus recording is in progress */
3970	if (dump && ecore_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
3971		return 0;
3972
3973	if (dump) {
3974		/* Disable all blocks debug output */
3975		for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3976			struct block_defs *block = s_block_defs[block_id];
3977
3978			if (block->dbg_client_id[dev_data->chip_id] != MAX_DBG_BUS_CLIENTS)
3979				ecore_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0);
3980		}
3981
3982		ecore_bus_reset_dbg_block(p_hwfn, p_ptt);
3983		ecore_bus_set_framing_mode(p_hwfn, p_ptt, DBG_BUS_FRAME_MODE_8HW_0ST);
3984		ecore_wr(p_hwfn, p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_INT_BUF);
3985		ecore_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, 1);
3986		ecore_bus_enable_dbg_block(p_hwfn, p_ptt, true);
3987	}
3988
3989	/* Dump all static debug lines for each relevant block */
3990	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3991		struct block_defs *block = s_block_defs[block_id];
3992		struct dbg_bus_block *block_desc;
3993		u32 block_dwords;
3994
3995		if (block->dbg_client_id[dev_data->chip_id] == MAX_DBG_BUS_CLIENTS)
3996			continue;
3997
3998		block_desc = get_dbg_bus_block_desc(p_hwfn, (enum block_id)block_id);
3999		block_dwords = NUM_DBG_LINES(block_desc) * STATIC_DEBUG_LINE_DWORDS;
4000
4001		/* Dump static section params */
4002		offset += ecore_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, block->name, 0, block_dwords, 32, false, "STATIC", false, 0);
4003
4004		if (!dump) {
4005			offset += block_dwords;
4006			continue;
4007		}
4008
4009		/* If all lines are invalid - dump zeros */
4010		if (dev_data->block_in_reset[block_id]) {
4011			OSAL_MEMSET(dump_buf + offset, 0, DWORDS_TO_BYTES(block_dwords));
4012			offset += block_dwords;
4013			continue;
4014		}
4015
4016		/* Enable block's client */
4017		ecore_bus_enable_clients(p_hwfn, p_ptt, 1 << block->dbg_client_id[dev_data->chip_id]);
4018		for (line_id = 0; line_id < (u32)NUM_DBG_LINES(block_desc); line_id++) {
4019			/* Configure debug line ID */
4020			ecore_config_dbg_line(p_hwfn, p_ptt, (enum block_id)block_id, (u8)line_id, 0xf, 0, 0, 0);
4021
4022			/* Read debug line info */
4023			offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA), STATIC_DEBUG_LINE_DWORDS, true);
4024		}
4025
4026		/* Disable block's client and debug output */
4027		ecore_bus_enable_clients(p_hwfn, p_ptt, 0);
4028		ecore_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0);
4029	}
4030
4031	if (dump) {
4032		ecore_bus_enable_dbg_block(p_hwfn, p_ptt, false);
4033		ecore_bus_enable_clients(p_hwfn, p_ptt, 0);
4034	}
4035
4036	return offset;
4037}
4038
4039/* Performs GRC Dump to the specified buffer.
4040 * Returns the dumped size in dwords.
4041 */
4042static enum dbg_status ecore_grc_dump(struct ecore_hwfn *p_hwfn,
4043									  struct ecore_ptt *p_ptt,
4044									  u32 *dump_buf,
4045									  bool dump,
4046									  u32 *num_dumped_dwords)
4047{
4048	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4049	bool is_asic, parities_masked = false;
4050	u8 i, port_mode = 0;
4051	u32 offset = 0;
4052
4053	is_asic = dev_data->platform_id == PLATFORM_ASIC;
4054
4055	*num_dumped_dwords = 0;
4056
4057	if (dump) {
4058		/* Find port mode */
4059		switch (ecore_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
4060		case 0: port_mode = 1; break;
4061		case 1: port_mode = 2; break;
4062		case 2: port_mode = 4; break;
4063		}
4064
4065		/* Update reset state */
4066		ecore_update_blocks_reset_state(p_hwfn, p_ptt);
4067	}
4068
4069	/* Dump global params */
4070	offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 4);
4071	offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "grc-dump");
4072	offset += ecore_dump_num_param(dump_buf + offset, dump, "num-lcids", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LCIDS));
4073	offset += ecore_dump_num_param(dump_buf + offset, dump, "num-ltids", ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LTIDS));
4074	offset += ecore_dump_num_param(dump_buf + offset, dump, "num-ports", port_mode);
4075
4076	/* Dump reset registers (dumped before taking blocks out of reset ) */
4077	if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
4078		offset += ecore_grc_dump_reset_regs(p_hwfn, p_ptt, dump_buf + offset, dump);
4079
4080	/* Take all blocks out of reset (using reset registers) */
4081	if (dump) {
4082		ecore_grc_unreset_blocks(p_hwfn, p_ptt);
4083		ecore_update_blocks_reset_state(p_hwfn, p_ptt);
4084	}
4085
4086	/* Disable all parities using MFW command */
4087	if (dump && is_asic && !ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
4088			parities_masked = !ecore_mcp_mask_parities(p_hwfn, p_ptt, 1);
4089			if (!parities_masked) {
4090				DP_NOTICE(p_hwfn, false, "Failed to mask parities using MFW\n");
4091				if (ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_PARITY_SAFE))
4092					return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY;
4093			}
4094		}
4095
4096	/* Dump modified registers (dumped before modifying them) */
4097	if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
4098		offset += ecore_grc_dump_modified_regs(p_hwfn, p_ptt, dump_buf + offset, dump);
4099
4100	/* Stall storms */
4101	if (dump && (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR) || ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)))
4102		ecore_grc_stall_storms(p_hwfn, p_ptt, true);
4103
4104	/* Dump all regs  */
4105	if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) {
4106		bool block_enable[MAX_BLOCK_ID];
4107
4108		/* Dump all blocks except MCP */
4109		for (i = 0; i < MAX_BLOCK_ID; i++)
4110			block_enable[i] = true;
4111		block_enable[BLOCK_MCP] = false;
4112		offset += ecore_grc_dump_registers(p_hwfn, p_ptt, dump_buf + offset, dump, block_enable, OSAL_NULL, OSAL_NULL);
4113
4114		/* Dump special registers */
4115		offset += ecore_grc_dump_special_regs(p_hwfn, p_ptt, dump_buf + offset, dump);
4116	}
4117
4118	/* Dump memories */
4119	offset += ecore_grc_dump_memories(p_hwfn, p_ptt, dump_buf + offset, dump);
4120
4121	/* Dump MCP */
4122	if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP))
4123		offset += ecore_grc_dump_mcp(p_hwfn, p_ptt, dump_buf + offset, dump);
4124
4125	/* Dump context */
4126	if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX))
4127		offset += ecore_grc_dump_ctx(p_hwfn, p_ptt, dump_buf + offset, dump);
4128
4129	/* Dump RSS memories */
4130	if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RSS))
4131		offset += ecore_grc_dump_rss(p_hwfn, p_ptt, dump_buf + offset, dump);
4132
4133	/* Dump Big RAM */
4134	for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
4135		if (ecore_grc_is_included(p_hwfn, s_big_ram_defs[i].grc_param))
4136			offset += ecore_grc_dump_big_ram(p_hwfn, p_ptt, dump_buf + offset, dump, i);
4137
4138	/* Dump IORs */
4139	if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR))
4140		offset += ecore_grc_dump_iors(p_hwfn, p_ptt, dump_buf + offset, dump);
4141
4142	/* Dump VFC */
4143	if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC))
4144		offset += ecore_grc_dump_vfc(p_hwfn, p_ptt, dump_buf + offset, dump);
4145
4146	/* Dump PHY tbus */
4147	if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PHY) && dev_data->chip_id == CHIP_K2 && dev_data->platform_id == PLATFORM_ASIC)
4148		offset += ecore_grc_dump_phy(p_hwfn, p_ptt, dump_buf + offset, dump);
4149
4150	/* Dump static debug data  */
4151	if (ecore_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_STATIC) && dev_data->bus.state == DBG_BUS_STATE_IDLE)
4152		offset += ecore_grc_dump_static_debug(p_hwfn, p_ptt, dump_buf + offset, dump);
4153
4154	/* Dump last section */
4155	offset += ecore_dump_last_section(dump_buf, offset, dump);
4156
4157	if (dump) {
4158		/* Unstall storms */
4159		if (ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL))
4160			ecore_grc_stall_storms(p_hwfn, p_ptt, false);
4161
4162		/* Clear parity status */
4163		if (is_asic)
4164			ecore_grc_clear_all_prty(p_hwfn, p_ptt);
4165
4166		/* Enable all parities using MFW command */
4167		if (parities_masked)
4168			ecore_mcp_mask_parities(p_hwfn, p_ptt, 0);
4169	}
4170
4171	*num_dumped_dwords = offset;
4172
4173	return DBG_STATUS_OK;
4174}
4175
4176/* Writes the specified failing Idle Check rule to the specified buffer.
4177 * Returns the dumped size in dwords.
4178 */
4179static u32 ecore_idle_chk_dump_failure(struct ecore_hwfn *p_hwfn,
4180									   struct ecore_ptt *p_ptt,
4181									   u32 *dump_buf,
4182									   bool dump,
4183									   u16 rule_id,
4184									   const struct dbg_idle_chk_rule *rule,
4185									   u16 fail_entry_id,
4186									   u32 *cond_reg_values)
4187{
4188	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4189	const struct dbg_idle_chk_cond_reg *cond_regs;
4190	const struct dbg_idle_chk_info_reg *info_regs;
4191	u32 i, next_reg_offset = 0, offset = 0;
4192	struct dbg_idle_chk_result_hdr *hdr;
4193	const union dbg_idle_chk_reg *regs;
4194	u8 reg_id;
4195
4196	hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
4197	regs = &((const union dbg_idle_chk_reg *)s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)[rule->reg_offset];
4198	cond_regs = &regs[0].cond_reg;
4199	info_regs = &regs[rule->num_cond_regs].info_reg;
4200
4201	/* Dump rule data */
4202	if (dump) {
4203		OSAL_MEMSET(hdr, 0, sizeof(*hdr));
4204		hdr->rule_id = rule_id;
4205		hdr->mem_entry_id = fail_entry_id;
4206		hdr->severity = rule->severity;
4207		hdr->num_dumped_cond_regs = rule->num_cond_regs;
4208	}
4209
4210	offset += IDLE_CHK_RESULT_HDR_DWORDS;
4211
4212	/* Dump condition register values */
4213	for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
4214		const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
4215		struct dbg_idle_chk_result_reg_hdr *reg_hdr;
4216
4217		reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)(dump_buf + offset);
4218
4219		/* Write register header */
4220		if (!dump) {
4221			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->entry_size;
4222			continue;
4223		}
4224
4225		offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
4226		OSAL_MEMSET(reg_hdr, 0, sizeof(*reg_hdr));
4227		reg_hdr->start_entry = reg->start_entry;
4228		reg_hdr->size = reg->entry_size;
4229		SET_FIELD(reg_hdr->data, DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM, reg->num_entries > 1 || reg->start_entry > 0 ? 1 : 0);
4230		SET_FIELD(reg_hdr->data, DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id);
4231
4232		/* Write register values */
4233		for (i = 0; i < reg_hdr->size; i++, next_reg_offset++, offset++)
4234			dump_buf[offset] = cond_reg_values[next_reg_offset];
4235	}
4236
4237	/* Dump info register values */
4238	for (reg_id = 0; reg_id < rule->num_info_regs; reg_id++) {
4239		const struct dbg_idle_chk_info_reg *reg = &info_regs[reg_id];
4240		u32 block_id;
4241
4242		/* Check if register's block is in reset */
4243		if (!dump) {
4244			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->size;
4245			continue;
4246		}
4247
4248		block_id = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_BLOCK_ID);
4249		if (block_id >= MAX_BLOCK_ID) {
4250			DP_NOTICE(p_hwfn, true, "Invalid block_id\n");
4251			return 0;
4252		}
4253
4254		if (!dev_data->block_in_reset[block_id]) {
4255			struct dbg_idle_chk_result_reg_hdr *reg_hdr;
4256			bool wide_bus, eval_mode, mode_match = true;
4257			u16 modes_buf_offset;
4258			u32 addr;
4259
4260			reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)(dump_buf + offset);
4261
4262			/* Check mode */
4263			eval_mode = GET_FIELD(reg->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
4264			if (eval_mode) {
4265				modes_buf_offset = GET_FIELD(reg->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
4266				mode_match = ecore_is_mode_match(p_hwfn, &modes_buf_offset);
4267			}
4268
4269			if (!mode_match)
4270				continue;
4271
4272			addr = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_ADDRESS);
4273			wide_bus = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_WIDE_BUS);
4274
4275			/* Write register header */
4276			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
4277			hdr->num_dumped_info_regs++;
4278			OSAL_MEMSET(reg_hdr, 0, sizeof(*reg_hdr));
4279			reg_hdr->size = reg->size;
4280			SET_FIELD(reg_hdr->data, DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, rule->num_cond_regs + reg_id);
4281
4282			/* Write register values */
4283			offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, reg->size, wide_bus);
4284		}
4285	}
4286
4287	return offset;
4288}
4289
4290/* Dumps idle check rule entries. Returns the dumped size in dwords. */
4291static u32 ecore_idle_chk_dump_rule_entries(struct ecore_hwfn *p_hwfn,
4292											struct ecore_ptt *p_ptt,
4293											u32 *dump_buf,
4294											bool dump,
4295											const struct dbg_idle_chk_rule *input_rules,
4296											u32 num_input_rules,
4297											u32 *num_failing_rules)
4298{
4299	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4300	u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE];
4301	u32 i, offset = 0;
4302	u16 entry_id;
4303	u8 reg_id;
4304
4305	*num_failing_rules = 0;
4306
4307	for (i = 0; i < num_input_rules; i++) {
4308		const struct dbg_idle_chk_cond_reg *cond_regs;
4309		const struct dbg_idle_chk_rule *rule;
4310		const union dbg_idle_chk_reg *regs;
4311		u16 num_reg_entries = 1;
4312		bool check_rule = true;
4313		const u32 *imm_values;
4314
4315		rule = &input_rules[i];
4316		regs = &((const union dbg_idle_chk_reg *)s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)[rule->reg_offset];
4317		cond_regs = &regs[0].cond_reg;
4318		imm_values = &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr[rule->imm_offset];
4319
4320		/* Check if all condition register blocks are out of reset, and
4321		 * find maximal number of entries (all condition registers that
4322		 * are memories must have the same size, which is > 1).
4323		 */
4324		for (reg_id = 0; reg_id < rule->num_cond_regs && check_rule; reg_id++) {
4325			u32 block_id = GET_FIELD(cond_regs[reg_id].data, DBG_IDLE_CHK_COND_REG_BLOCK_ID);
4326
4327			if (block_id >= MAX_BLOCK_ID) {
4328				DP_NOTICE(p_hwfn, true, "Invalid block_id\n");
4329				return 0;
4330			}
4331
4332			check_rule = !dev_data->block_in_reset[block_id];
4333			if (cond_regs[reg_id].num_entries > num_reg_entries)
4334				num_reg_entries = cond_regs[reg_id].num_entries;
4335		}
4336
4337		if (!check_rule && dump)
4338			continue;
4339
4340		if (!dump) {
4341			u32 entry_dump_size = ecore_idle_chk_dump_failure(p_hwfn, p_ptt, dump_buf + offset, false, rule->rule_id, rule, 0, OSAL_NULL);
4342
4343			offset += num_reg_entries * entry_dump_size;
4344			(*num_failing_rules) += num_reg_entries;
4345			continue;
4346		}
4347
4348		/* Go over all register entries (number of entries is the same for all
4349		 * condition registers).
4350		 */
4351		for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
4352			u32 next_reg_offset = 0;
4353
4354			/* Read current entry of all condition registers */
4355			for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
4356				const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
4357				u32 padded_entry_size, addr;
4358				bool wide_bus;
4359
4360				/* Find GRC address (if it's a memory, the address of the
4361				 * specific entry is calculated).
4362				 */
4363				addr = GET_FIELD(reg->data, DBG_IDLE_CHK_COND_REG_ADDRESS);
4364				wide_bus = GET_FIELD(reg->data, DBG_IDLE_CHK_COND_REG_WIDE_BUS);
4365				if (reg->num_entries > 1 || reg->start_entry > 0) {
4366					padded_entry_size = reg->entry_size > 1 ? OSAL_ROUNDUP_POW_OF_TWO(reg->entry_size) : 1;
4367					addr += (reg->start_entry + entry_id) * padded_entry_size;
4368				}
4369
4370				/* Read registers */
4371				if (next_reg_offset + reg->entry_size >= IDLE_CHK_MAX_ENTRIES_SIZE) {
4372					DP_NOTICE(p_hwfn, true, "idle check registers entry is too large\n");
4373					return 0;
4374				}
4375
4376				next_reg_offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, cond_reg_values + next_reg_offset, dump, addr, reg->entry_size, wide_bus);
4377			}
4378
4379			/* Call rule condition function. if returns true, it's a failure.*/
4380			if ((*cond_arr[rule->cond_id])(cond_reg_values, imm_values)) {
4381				offset += ecore_idle_chk_dump_failure(p_hwfn, p_ptt, dump_buf + offset, dump, rule->rule_id, rule, entry_id, cond_reg_values);
4382				(*num_failing_rules)++;
4383			}
4384		}
4385	}
4386
4387	return offset;
4388}
4389
4390/* Performs Idle Check Dump to the specified buffer.
4391 * Returns the dumped size in dwords.
4392 */
4393static u32 ecore_idle_chk_dump(struct ecore_hwfn *p_hwfn,
4394							   struct ecore_ptt *p_ptt,
4395							   u32 *dump_buf,
4396							   bool dump)
4397{
4398	u32 num_failing_rules_offset, offset = 0, input_offset = 0, num_failing_rules = 0;
4399
4400	/* Dump global params */
4401	offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4402	offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "idle-chk");
4403
4404	/* Dump idle check section header with a single parameter */
4405	offset += ecore_dump_section_hdr(dump_buf + offset, dump, "idle_chk", 1);
4406	num_failing_rules_offset = offset;
4407	offset += ecore_dump_num_param(dump_buf + offset, dump, "num_rules", 0);
4408
4409	while (input_offset < s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].size_in_dwords) {
4410		const struct dbg_idle_chk_cond_hdr *cond_hdr = (const struct dbg_idle_chk_cond_hdr *)&s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr[input_offset++];
4411		bool eval_mode, mode_match = true;
4412		u32 curr_failing_rules;
4413		u16 modes_buf_offset;
4414
4415		/* Check mode */
4416		eval_mode = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
4417		if (eval_mode) {
4418			modes_buf_offset = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
4419			mode_match = ecore_is_mode_match(p_hwfn, &modes_buf_offset);
4420		}
4421
4422		if (mode_match) {
4423			offset += ecore_idle_chk_dump_rule_entries(p_hwfn, p_ptt, dump_buf + offset, dump, (const struct dbg_idle_chk_rule *)&s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr[input_offset], cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS, &curr_failing_rules);
4424			num_failing_rules += curr_failing_rules;
4425		}
4426
4427		input_offset += cond_hdr->data_size;
4428	}
4429
4430	/* Overwrite num_rules parameter */
4431	if (dump)
4432		ecore_dump_num_param(dump_buf + num_failing_rules_offset, dump, "num_rules", num_failing_rules);
4433
4434	/* Dump last section */
4435	offset += ecore_dump_last_section(dump_buf, offset, dump);
4436
4437	return offset;
4438}
4439
4440/* Finds the meta data image in NVRAM */
4441static enum dbg_status ecore_find_nvram_image(struct ecore_hwfn *p_hwfn,
4442											  struct ecore_ptt *p_ptt,
4443											  u32 image_type,
4444											  u32 *nvram_offset_bytes,
4445											  u32 *nvram_size_bytes)
4446{
4447	u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
4448	struct mcp_file_att file_att;
4449	int nvm_result;
4450
4451	/* Call NVRAM get file command */
4452	nvm_result = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_GET_FILE_ATT, image_type, &ret_mcp_resp, &ret_mcp_param, &ret_txn_size, (u32 *)&file_att);
4453
4454	/* Check response */
4455	if (nvm_result || (ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
4456		return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4457
4458	/* Update return values */
4459	*nvram_offset_bytes = file_att.nvm_start_addr;
4460	*nvram_size_bytes = file_att.len;
4461
4462	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n", image_type, *nvram_offset_bytes, *nvram_size_bytes);
4463
4464	/* Check alignment */
4465	if (*nvram_size_bytes & 0x3)
4466		return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
4467
4468	return DBG_STATUS_OK;
4469}
4470
4471/* Reads data from NVRAM */
4472static enum dbg_status ecore_nvram_read(struct ecore_hwfn *p_hwfn,
4473										struct ecore_ptt *p_ptt,
4474										u32 nvram_offset_bytes,
4475										u32 nvram_size_bytes,
4476										u32 *ret_buf)
4477{
4478	u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
4479	s32 bytes_left = nvram_size_bytes;
4480	u32 read_offset = 0;
4481
4482	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "nvram_read: reading image of size %d bytes from NVRAM\n", nvram_size_bytes);
4483
4484	do {
4485		bytes_to_copy = (bytes_left > MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
4486
4487		/* Call NVRAM read command */
4488		if (ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_READ_NVRAM, (nvram_offset_bytes + read_offset) | (bytes_to_copy << DRV_MB_PARAM_NVM_LEN_OFFSET), &ret_mcp_resp, &ret_mcp_param, &ret_read_size, (u32 *)((u8 *)ret_buf + read_offset)))
4489			return DBG_STATUS_NVRAM_READ_FAILED;
4490
4491		/* Check response */
4492		if ((ret_mcp_resp  & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
4493			return DBG_STATUS_NVRAM_READ_FAILED;
4494
4495		/* Update read offset */
4496		read_offset += ret_read_size;
4497		bytes_left -= ret_read_size;
4498	} while (bytes_left > 0);
4499
4500	return DBG_STATUS_OK;
4501}
4502
4503/* Get info on the MCP Trace data in the scratchpad:
4504 * - trace_data_grc_addr (OUT): trace data GRC address in bytes
4505 * - trace_data_size (OUT): trace data size in bytes (without the header)
4506 */
4507static enum dbg_status ecore_mcp_trace_get_data_info(struct ecore_hwfn *p_hwfn,
4508													 struct ecore_ptt *p_ptt,
4509													 u32 *trace_data_grc_addr,
4510													 u32 *trace_data_size)
4511{
4512	u32 spad_trace_offsize, signature;
4513
4514	/* Read trace section offsize structure from MCP scratchpad */
4515	spad_trace_offsize = ecore_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4516
4517	/* Extract trace section address from offsize (in scratchpad) */
4518	*trace_data_grc_addr = MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize);
4519
4520	/* Read signature from MCP trace section */
4521	signature = ecore_rd(p_hwfn, p_ptt, *trace_data_grc_addr + OFFSETOF(struct mcp_trace, signature));
4522
4523	if (signature != MFW_TRACE_SIGNATURE)
4524		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4525
4526	/* Read trace size from MCP trace section */
4527	*trace_data_size = ecore_rd(p_hwfn, p_ptt, *trace_data_grc_addr + OFFSETOF(struct mcp_trace, size));
4528
4529	return DBG_STATUS_OK;
4530}
4531
4532/* Reads MCP trace meta data image from NVRAM
4533 * - running_bundle_id (OUT): running bundle ID (invalid when loaded from file)
4534 * - trace_meta_offset (OUT): trace meta offset in NVRAM in bytes (invalid when
4535 *			      loaded from file).
4536 * - trace_meta_size (OUT):   size in bytes of the trace meta data.
4537 */
4538static enum dbg_status ecore_mcp_trace_get_meta_info(struct ecore_hwfn *p_hwfn,
4539													 struct ecore_ptt *p_ptt,
4540													 u32 trace_data_size_bytes,
4541													 u32 *running_bundle_id,
4542													 u32 *trace_meta_offset,
4543													 u32 *trace_meta_size)
4544{
4545	u32 spad_trace_offsize, nvram_image_type, running_mfw_addr;
4546
4547	/* Read MCP trace section offsize structure from MCP scratchpad */
4548	spad_trace_offsize = ecore_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4549
4550	/* Find running bundle ID */
4551	running_mfw_addr = MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) + SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
4552	*running_bundle_id = ecore_rd(p_hwfn, p_ptt, running_mfw_addr);
4553	if (*running_bundle_id > 1)
4554		return DBG_STATUS_INVALID_NVRAM_BUNDLE;
4555
4556	/* Find image in NVRAM */
4557	nvram_image_type = (*running_bundle_id == DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
4558	return ecore_find_nvram_image(p_hwfn, p_ptt, nvram_image_type, trace_meta_offset, trace_meta_size);
4559}
4560
4561/* Reads the MCP Trace meta data from NVRAM into the specified buffer */
4562static enum dbg_status ecore_mcp_trace_read_meta(struct ecore_hwfn *p_hwfn,
4563												 struct ecore_ptt *p_ptt,
4564												 u32 nvram_offset_in_bytes,
4565												 u32 size_in_bytes,
4566												 u32 *buf)
4567{
4568	u8 modules_num, module_len, i, *byte_buf = (u8 *)buf;
4569	enum dbg_status status;
4570	u32 signature;
4571
4572	/* Read meta data from NVRAM */
4573	status = ecore_nvram_read(p_hwfn, p_ptt, nvram_offset_in_bytes, size_in_bytes, buf);
4574	if (status != DBG_STATUS_OK)
4575		return status;
4576
4577	/* Extract and check first signature */
4578	signature = ecore_read_unaligned_dword(byte_buf);
4579	byte_buf += sizeof(signature);
4580	if (signature != NVM_MAGIC_VALUE)
4581		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4582
4583	/* Extract number of modules */
4584	modules_num = *(byte_buf++);
4585
4586	/* Skip all modules */
4587	for (i = 0; i < modules_num; i++) {
4588		module_len = *(byte_buf++);
4589		byte_buf += module_len;
4590	}
4591
4592	/* Extract and check second signature */
4593	signature = ecore_read_unaligned_dword(byte_buf);
4594	byte_buf += sizeof(signature);
4595	if (signature != NVM_MAGIC_VALUE)
4596		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4597
4598	return DBG_STATUS_OK;
4599}
4600
4601/* Dump MCP Trace */
4602static enum dbg_status ecore_mcp_trace_dump(struct ecore_hwfn *p_hwfn,
4603											struct ecore_ptt *p_ptt,
4604											u32 *dump_buf,
4605											bool dump,
4606											u32 *num_dumped_dwords)
4607{
4608	u32 trace_meta_offset_bytes = 0, trace_meta_size_bytes = 0, trace_meta_size_dwords = 0;
4609	u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
4610	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4611	u32 running_bundle_id, offset = 0;
4612	enum dbg_status status;
4613	bool mcp_access;
4614	int halted = 0;
4615
4616	*num_dumped_dwords = 0;
4617
4618	mcp_access = dev_data->platform_id == PLATFORM_ASIC && !ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
4619
4620	/* Get trace data info */
4621	status = ecore_mcp_trace_get_data_info(p_hwfn, p_ptt, &trace_data_grc_addr, &trace_data_size_bytes);
4622	if (status != DBG_STATUS_OK)
4623		return status;
4624
4625	/* Dump global params */
4626	offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4627	offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "mcp-trace");
4628
4629	/* Halt MCP while reading from scratchpad so the read data will be
4630	 * consistent. if halt fails, MCP trace is taken anyway, with a small
4631	 * risk that it may be corrupt.
4632	 */
4633	if (dump && mcp_access) {
4634		halted = !ecore_mcp_halt(p_hwfn, p_ptt);
4635		if (!halted)
4636			DP_NOTICE(p_hwfn, false, "MCP halt failed!\n");
4637	}
4638
4639	/* Find trace data size */
4640	trace_data_size_dwords = DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace), BYTES_IN_DWORD);
4641
4642	/* Dump trace data section header and param */
4643	offset += ecore_dump_section_hdr(dump_buf + offset, dump, "mcp_trace_data", 1);
4644	offset += ecore_dump_num_param(dump_buf + offset, dump, "size", trace_data_size_dwords);
4645
4646	/* Read trace data from scratchpad into dump buffer */
4647	offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(trace_data_grc_addr), trace_data_size_dwords, false);
4648
4649	/* Resume MCP (only if halt succeeded) */
4650	if (halted && ecore_mcp_resume(p_hwfn, p_ptt))
4651		DP_NOTICE(p_hwfn, false, "Failed to resume MCP after halt!\n");
4652
4653	/* Dump trace meta section header */
4654	offset += ecore_dump_section_hdr(dump_buf + offset, dump, "mcp_trace_meta", 1);
4655
4656	/* Read trace meta only if NVRAM access is enabled
4657	 * (trace_meta_size_bytes is dword-aligned).
4658	 */
4659	if (OSAL_NVM_IS_ACCESS_ENABLED(p_hwfn) && mcp_access) {
4660		status = ecore_mcp_trace_get_meta_info(p_hwfn, p_ptt, trace_data_size_bytes, &running_bundle_id, &trace_meta_offset_bytes, &trace_meta_size_bytes);
4661		if (status == DBG_STATUS_OK)
4662			trace_meta_size_dwords = BYTES_TO_DWORDS(trace_meta_size_bytes);
4663	}
4664
4665	/* Dump trace meta size param */
4666	offset += ecore_dump_num_param(dump_buf + offset, dump, "size", trace_meta_size_dwords);
4667
4668	/* Read trace meta image into dump buffer */
4669	if (dump && trace_meta_size_dwords)
4670		status = ecore_mcp_trace_read_meta(p_hwfn, p_ptt, trace_meta_offset_bytes, trace_meta_size_bytes, dump_buf + offset);
4671	if (status == DBG_STATUS_OK)
4672		offset += trace_meta_size_dwords;
4673
4674	/* Dump last section */
4675	offset += ecore_dump_last_section(dump_buf, offset, dump);
4676
4677	*num_dumped_dwords = offset;
4678
4679	/* If no mcp access, indicate that the dump doesn't contain the meta
4680	 * data from NVRAM.
4681	 */
4682	return mcp_access ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4683}
4684
4685/* Dump GRC FIFO */
4686static enum dbg_status ecore_reg_fifo_dump(struct ecore_hwfn *p_hwfn,
4687										   struct ecore_ptt *p_ptt,
4688										   u32 *dump_buf,
4689										   bool dump,
4690										   u32 *num_dumped_dwords)
4691{
4692	u32 dwords_read, size_param_offset, offset = 0;
4693	bool fifo_has_data;
4694
4695	*num_dumped_dwords = 0;
4696
4697	/* Dump global params */
4698	offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4699	offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "reg-fifo");
4700
4701	/* Dump fifo data section header and param. The size param is 0 for
4702	 * now, and is overwritten after reading the FIFO.
4703	 */
4704	offset += ecore_dump_section_hdr(dump_buf + offset, dump, "reg_fifo_data", 1);
4705	size_param_offset = offset;
4706	offset += ecore_dump_num_param(dump_buf + offset, dump, "size", 0);
4707
4708	if (dump) {
4709		fifo_has_data = ecore_rd(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4710
4711		/* Pull available data from fifo. Use DMAE since this is
4712		 * widebus memory and must be accessed atomically. Test for
4713		 * dwords_read not passing buffer size since more entries could
4714		 * be added to the buffer as we
4715		 * are emptying it.
4716		 */
4717		for (dwords_read = 0; fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS; dwords_read += REG_FIFO_ELEMENT_DWORDS) {
4718			offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, true, BYTES_TO_DWORDS(GRC_REG_TRACE_FIFO), REG_FIFO_ELEMENT_DWORDS, true);
4719			fifo_has_data = ecore_rd(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4720		}
4721
4722		ecore_dump_num_param(dump_buf + size_param_offset, dump, "size", dwords_read);
4723	}
4724	else {
4725		/* FIFO max size is REG_FIFO_DEPTH_DWORDS. There is no way to
4726		 * test how much data is available, except for reading it.
4727		 */
4728		offset += REG_FIFO_DEPTH_DWORDS;
4729	}
4730
4731	/* Dump last section */
4732	offset += ecore_dump_last_section(dump_buf, offset, dump);
4733
4734	*num_dumped_dwords = offset;
4735
4736	return DBG_STATUS_OK;
4737}
4738
4739/* Dump IGU FIFO */
4740static enum dbg_status ecore_igu_fifo_dump(struct ecore_hwfn *p_hwfn,
4741										   struct ecore_ptt *p_ptt,
4742										   u32 *dump_buf,
4743										   bool dump,
4744										   u32 *num_dumped_dwords)
4745{
4746	u32 dwords_read, size_param_offset, offset = 0;
4747	bool fifo_has_data;
4748
4749	*num_dumped_dwords = 0;
4750
4751	/* Dump global params */
4752	offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4753	offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "igu-fifo");
4754
4755	/* Dump fifo data section header and param. The size param is 0 for
4756	 * now, and is overwritten after reading the FIFO.
4757	 */
4758	offset += ecore_dump_section_hdr(dump_buf + offset, dump, "igu_fifo_data", 1);
4759	size_param_offset = offset;
4760	offset += ecore_dump_num_param(dump_buf + offset, dump, "size", 0);
4761
4762	if (dump) {
4763		fifo_has_data = ecore_rd(p_hwfn, p_ptt, IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4764
4765		/* Pull available data from fifo. Use DMAE since this is
4766		 * widebus memory and must be accessed atomically. Test for
4767		 * dwords_read not passing buffer size since more entries could
4768		 * be added to the buffer as we are emptying it.
4769		 */
4770		for (dwords_read = 0; fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS; dwords_read += IGU_FIFO_ELEMENT_DWORDS) {
4771			offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, true, BYTES_TO_DWORDS(IGU_REG_ERROR_HANDLING_MEMORY), IGU_FIFO_ELEMENT_DWORDS, true);
4772			fifo_has_data = ecore_rd(p_hwfn, p_ptt, IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4773		}
4774
4775		ecore_dump_num_param(dump_buf + size_param_offset, dump, "size", dwords_read);
4776	}
4777	else {
4778		/* FIFO max size is IGU_FIFO_DEPTH_DWORDS. There is no way to
4779		 * test how much data is available, except for reading it.
4780		 */
4781		offset += IGU_FIFO_DEPTH_DWORDS;
4782	}
4783
4784	/* Dump last section */
4785	offset += ecore_dump_last_section(dump_buf, offset, dump);
4786
4787	*num_dumped_dwords = offset;
4788
4789	return DBG_STATUS_OK;
4790}
4791
4792/* Protection Override dump */
4793static enum dbg_status ecore_protection_override_dump(struct ecore_hwfn *p_hwfn,
4794													  struct ecore_ptt *p_ptt,
4795													  u32 *dump_buf,
4796													  bool dump,
4797													  u32 *num_dumped_dwords)
4798{
4799	u32 size_param_offset, override_window_dwords, offset = 0;
4800
4801	*num_dumped_dwords = 0;
4802
4803	/* Dump global params */
4804	offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4805	offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "protection-override");
4806
4807	/* Dump data section header and param. The size param is 0 for now,
4808	 * and is overwritten after reading the data.
4809	 */
4810	offset += ecore_dump_section_hdr(dump_buf + offset, dump, "protection_override_data", 1);
4811	size_param_offset = offset;
4812	offset += ecore_dump_num_param(dump_buf + offset, dump, "size", 0);
4813
4814	if (dump) {
4815		/* Add override window info to buffer */
4816		override_window_dwords = ecore_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) * PROTECTION_OVERRIDE_ELEMENT_DWORDS;
4817		offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, true, BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW), override_window_dwords, true);
4818		ecore_dump_num_param(dump_buf + size_param_offset, dump, "size", override_window_dwords);
4819	}
4820	else {
4821		offset += PROTECTION_OVERRIDE_DEPTH_DWORDS;
4822	}
4823
4824	/* Dump last section */
4825	offset += ecore_dump_last_section(dump_buf, offset, dump);
4826
4827	*num_dumped_dwords = offset;
4828
4829	return DBG_STATUS_OK;
4830}
4831
4832/* Performs FW Asserts Dump to the specified buffer.
4833 * Returns the dumped size in dwords.
4834 */
4835static u32 ecore_fw_asserts_dump(struct ecore_hwfn *p_hwfn,
4836								 struct ecore_ptt *p_ptt,
4837								 u32 *dump_buf,
4838								 bool dump)
4839{
4840	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4841	struct fw_asserts_ram_section *asserts;
4842	char storm_letter_str[2] = "?";
4843	struct fw_info fw_info;
4844	u32 offset = 0;
4845	u8 storm_id;
4846
4847	/* Dump global params */
4848	offset += ecore_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1);
4849	offset += ecore_dump_str_param(dump_buf + offset, dump, "dump-type", "fw-asserts");
4850
4851	/* Find Storm dump size */
4852	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
4853		u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx, last_list_idx, addr;
4854		struct storm_defs *storm = &s_storm_defs[storm_id];
4855
4856		if (dev_data->block_in_reset[storm->block_id])
4857			continue;
4858
4859		/* Read FW info for the current Storm  */
4860		ecore_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
4861
4862		asserts = &fw_info.fw_asserts_section;
4863
4864		/* Dump FW Asserts section header and params */
4865		storm_letter_str[0] = storm->letter;
4866		offset += ecore_dump_section_hdr(dump_buf + offset, dump, "fw_asserts", 2);
4867		offset += ecore_dump_str_param(dump_buf + offset, dump, "storm", storm_letter_str);
4868		offset += ecore_dump_num_param(dump_buf + offset, dump, "size", asserts->list_element_dword_size);
4869
4870		/* Read and dump FW Asserts data */
4871		if (!dump) {
4872			offset += asserts->list_element_dword_size;
4873			continue;
4874		}
4875
4876		fw_asserts_section_addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
4877			RAM_LINES_TO_BYTES(asserts->section_ram_line_offset);
4878		next_list_idx_addr = fw_asserts_section_addr + DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
4879		next_list_idx = ecore_rd(p_hwfn, p_ptt, next_list_idx_addr);
4880		last_list_idx = (next_list_idx > 0 ? next_list_idx : asserts->list_num_elements) - 1;
4881		addr = BYTES_TO_DWORDS(fw_asserts_section_addr) + asserts->list_dword_offset +
4882					last_list_idx * asserts->list_element_dword_size;
4883		offset += ecore_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, asserts->list_element_dword_size, false);
4884	}
4885
4886	/* Dump last section */
4887	offset += ecore_dump_last_section(dump_buf, offset, dump);
4888
4889	return offset;
4890}
4891
4892/***************************** Public Functions *******************************/
4893
4894enum dbg_status ecore_dbg_set_bin_ptr(const u8 * const bin_ptr)
4895{
4896	struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
4897	u8 buf_id;
4898
4899	/* convert binary data to debug arrays */
4900	for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
4901		s_dbg_arrays[buf_id].ptr = (u32 *)(bin_ptr + buf_array[buf_id].offset);
4902		s_dbg_arrays[buf_id].size_in_dwords = BYTES_TO_DWORDS(buf_array[buf_id].length);
4903	}
4904
4905	return DBG_STATUS_OK;
4906}
4907
4908enum dbg_status ecore_dbg_set_app_ver(u32 ver)
4909{
4910	if (ver < TOOLS_VERSION)
4911		return DBG_STATUS_UNSUPPORTED_APP_VERSION;
4912
4913	s_app_ver = ver;
4914
4915	return DBG_STATUS_OK;
4916}
4917
4918u32 ecore_dbg_get_fw_func_ver(void)
4919{
4920	return TOOLS_VERSION;
4921}
4922
4923enum chip_ids ecore_dbg_get_chip_id(struct ecore_hwfn *p_hwfn)
4924{
4925	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4926
4927	return (enum chip_ids)dev_data->chip_id;
4928}
4929
4930enum dbg_status ecore_dbg_bus_reset(struct ecore_hwfn *p_hwfn,
4931									struct ecore_ptt *p_ptt,
4932									bool one_shot_en,
4933									u8 force_hw_dwords,
4934									bool unify_inputs,
4935									bool grc_input_en)
4936{
4937	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4938	enum dbg_status status;
4939
4940	status = ecore_dbg_dev_init(p_hwfn, p_ptt);
4941	if (status != DBG_STATUS_OK)
4942		return status;
4943
4944	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_reset: one_shot_en = %d, force_hw_dwords = %d, unify_inputs = %d, grc_input_en = %d\n", one_shot_en, force_hw_dwords, unify_inputs, grc_input_en);
4945
4946	if (force_hw_dwords &&
4947		force_hw_dwords != 4 &&
4948		force_hw_dwords != 8)
4949		return DBG_STATUS_INVALID_ARGS;
4950
4951	if (ecore_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
4952		return DBG_STATUS_DBG_BUS_IN_USE;
4953
4954	/* Update reset state of all blocks */
4955	ecore_update_blocks_reset_state(p_hwfn, p_ptt);
4956
4957	/* Disable all debug inputs */
4958	status = ecore_bus_disable_inputs(p_hwfn, p_ptt, false);
4959	if (status != DBG_STATUS_OK)
4960		return status;
4961
4962	/* Reset DBG block */
4963	ecore_bus_reset_dbg_block(p_hwfn, p_ptt);
4964
4965	/* Set one-shot / wrap-around */
4966	ecore_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, one_shot_en ? 0 : 1);
4967
4968	/* Init state params */
4969	OSAL_MEMSET(&dev_data->bus, 0, sizeof(dev_data->bus));
4970	dev_data->bus.target = DBG_BUS_TARGET_ID_INT_BUF;
4971	dev_data->bus.state = DBG_BUS_STATE_READY;
4972	dev_data->bus.one_shot_en = one_shot_en;
4973	dev_data->bus.hw_dwords = force_hw_dwords;
4974	dev_data->bus.grc_input_en = grc_input_en;
4975	dev_data->bus.unify_inputs = unify_inputs;
4976	dev_data->bus.num_enabled_blocks = grc_input_en ? 1 : 0;
4977
4978	/* Init special DBG block */
4979	if (grc_input_en)
4980		SET_FIELD(dev_data->bus.blocks[BLOCK_DBG].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK, 0x1);
4981
4982	return DBG_STATUS_OK;
4983}
4984
4985enum dbg_status ecore_dbg_bus_set_pci_output(struct ecore_hwfn *p_hwfn,
4986											 struct ecore_ptt *p_ptt,
4987											 u16 buf_size_kb)
4988{
4989	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4990	dma_addr_t pci_buf_phys_addr;
4991	void *pci_buf;
4992
4993	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_set_pci_output: buf_size_kb = %d\n", buf_size_kb);
4994
4995	if (dev_data->bus.target != DBG_BUS_TARGET_ID_INT_BUF)
4996		return DBG_STATUS_OUTPUT_ALREADY_SET;
4997	if (dev_data->bus.state != DBG_BUS_STATE_READY || dev_data->bus.pci_buf.size > 0)
4998		return DBG_STATUS_DBG_BLOCK_NOT_RESET;
4999
5000	dev_data->bus.target = DBG_BUS_TARGET_ID_PCI;
5001	dev_data->bus.pci_buf.size = buf_size_kb * 1024;
5002	if (dev_data->bus.pci_buf.size % PCI_PKT_SIZE_IN_BYTES)
5003		return DBG_STATUS_INVALID_ARGS;
5004
5005	pci_buf = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &pci_buf_phys_addr, dev_data->bus.pci_buf.size);
5006	if (!pci_buf)
5007		return DBG_STATUS_PCI_BUF_ALLOC_FAILED;
5008
5009	OSAL_MEMCPY(&dev_data->bus.pci_buf.phys_addr, &pci_buf_phys_addr, sizeof(pci_buf_phys_addr));
5010
5011	dev_data->bus.pci_buf.virt_addr.lo = (u32)((u64)(osal_uintptr_t)pci_buf);
5012	dev_data->bus.pci_buf.virt_addr.hi = (u32)((u64)(osal_uintptr_t)pci_buf >> 32);
5013
5014	ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_EXT_BUFFER_STRT_ADDR_LSB, dev_data->bus.pci_buf.phys_addr.lo);
5015	ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_EXT_BUFFER_STRT_ADDR_MSB, dev_data->bus.pci_buf.phys_addr.hi);
5016	ecore_wr(p_hwfn, p_ptt, DBG_REG_TARGET_PACKET_SIZE, PCI_PKT_SIZE_IN_CHUNKS);
5017	ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_EXT_BUFFER_SIZE, dev_data->bus.pci_buf.size / PCI_PKT_SIZE_IN_BYTES);
5018	ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_FUNC_NUM, OPAQUE_FID(p_hwfn->rel_pf_id));
5019	ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_LOGIC_ADDR, PCI_PHYS_ADDR_TYPE);
5020	ecore_wr(p_hwfn, p_ptt, DBG_REG_PCI_REQ_CREDIT, PCI_REQ_CREDIT);
5021	ecore_wr(p_hwfn, p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_PCI);
5022	ecore_wr(p_hwfn, p_ptt, DBG_REG_OUTPUT_ENABLE, TARGET_EN_MASK_PCI);
5023
5024	return DBG_STATUS_OK;
5025}
5026
5027enum dbg_status ecore_dbg_bus_set_nw_output(struct ecore_hwfn *p_hwfn,
5028											struct ecore_ptt *p_ptt,
5029											u8 port_id,
5030											u32 dest_addr_lo32,
5031											u16 dest_addr_hi16,
5032											u16 data_limit_size_kb,
5033											bool send_to_other_engine,
5034											bool rcv_from_other_engine)
5035{
5036	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5037
5038	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_set_nw_output: port_id = %d, dest_addr_lo32 = 0x%x, dest_addr_hi16 = 0x%x, data_limit_size_kb = %d, send_to_other_engine = %d, rcv_from_other_engine = %d\n", port_id, dest_addr_lo32, dest_addr_hi16, data_limit_size_kb, send_to_other_engine, rcv_from_other_engine);
5039
5040	if (dev_data->bus.target != DBG_BUS_TARGET_ID_INT_BUF)
5041		return DBG_STATUS_OUTPUT_ALREADY_SET;
5042	if (dev_data->bus.state != DBG_BUS_STATE_READY)
5043		return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5044	if (port_id >= s_chip_defs[dev_data->chip_id].per_platform[dev_data->platform_id].num_ports || (send_to_other_engine && rcv_from_other_engine))
5045		return DBG_STATUS_INVALID_ARGS;
5046
5047	dev_data->bus.target = DBG_BUS_TARGET_ID_NIG;
5048	dev_data->bus.rcv_from_other_engine = rcv_from_other_engine;
5049
5050	ecore_wr(p_hwfn, p_ptt, DBG_REG_OUTPUT_ENABLE, TARGET_EN_MASK_NIG);
5051	ecore_wr(p_hwfn, p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_NIG);
5052
5053	if (send_to_other_engine)
5054		ecore_wr(p_hwfn, p_ptt, DBG_REG_OTHER_ENGINE_MODE_BB_K2, DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_TX);
5055	else
5056		ecore_wr(p_hwfn, p_ptt, NIG_REG_DEBUG_PORT, port_id);
5057
5058	if (rcv_from_other_engine) {
5059		ecore_wr(p_hwfn, p_ptt, DBG_REG_OTHER_ENGINE_MODE_BB_K2, DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_RX);
5060	}
5061	else {
5062		/* Configure ethernet header of 14 bytes */
5063		ecore_wr(p_hwfn, p_ptt, DBG_REG_ETHERNET_HDR_WIDTH, 0);
5064		ecore_wr(p_hwfn, p_ptt, DBG_REG_ETHERNET_HDR_7, dest_addr_lo32);
5065		ecore_wr(p_hwfn, p_ptt, DBG_REG_ETHERNET_HDR_6, (u32)SRC_MAC_ADDR_LO16 | ((u32)dest_addr_hi16 << 16));
5066		ecore_wr(p_hwfn, p_ptt, DBG_REG_ETHERNET_HDR_5, SRC_MAC_ADDR_HI32);
5067		ecore_wr(p_hwfn, p_ptt, DBG_REG_ETHERNET_HDR_4, (u32)ETH_TYPE << 16);
5068		ecore_wr(p_hwfn, p_ptt, DBG_REG_TARGET_PACKET_SIZE, NIG_PKT_SIZE_IN_CHUNKS);
5069		if (data_limit_size_kb)
5070			ecore_wr(p_hwfn, p_ptt, DBG_REG_NIG_DATA_LIMIT_SIZE, (data_limit_size_kb * 1024) / CHUNK_SIZE_IN_BYTES);
5071	}
5072
5073	return DBG_STATUS_OK;
5074}
5075
5076static bool ecore_is_overlapping_enable_mask(struct ecore_hwfn *p_hwfn,
5077									  u8 enable_mask,
5078									  u8 right_shift)
5079{
5080	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5081	u8 curr_shifted_enable_mask, shifted_enable_mask;
5082	u32 block_id;
5083
5084	shifted_enable_mask = SHR(enable_mask, VALUES_PER_CYCLE, right_shift);
5085
5086	if (dev_data->bus.num_enabled_blocks) {
5087		for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
5088			struct dbg_bus_block_data *block_bus = &dev_data->bus.blocks[block_id];
5089
5090			if (!GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5091				continue;
5092
5093			curr_shifted_enable_mask =
5094				SHR(GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK),
5095					VALUES_PER_CYCLE,
5096					GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT));
5097			if (shifted_enable_mask & curr_shifted_enable_mask)
5098				return true;
5099		}
5100	}
5101
5102	return false;
5103}
5104
5105enum dbg_status ecore_dbg_bus_enable_block(struct ecore_hwfn *p_hwfn,
5106										   enum block_id block_id,
5107										   u8 line_num,
5108										   u8 enable_mask,
5109										   u8 right_shift,
5110										   u8 force_valid_mask,
5111										   u8 force_frame_mask)
5112{
5113	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5114	struct block_defs *block = s_block_defs[block_id];
5115	struct dbg_bus_block_data *block_bus;
5116	struct dbg_bus_block *block_desc;
5117
5118	block_bus = &dev_data->bus.blocks[block_id];
5119	block_desc = get_dbg_bus_block_desc(p_hwfn, block_id);
5120
5121	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_enable_block: block = %d, line_num = %d, enable_mask = 0x%x, right_shift = %d, force_valid_mask = 0x%x, force_frame_mask = 0x%x\n", block_id, line_num, enable_mask, right_shift, force_valid_mask, force_frame_mask);
5122
5123	if (dev_data->bus.state != DBG_BUS_STATE_READY)
5124		return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5125	if (block_id >= MAX_BLOCK_ID)
5126		return DBG_STATUS_INVALID_ARGS;
5127	if (GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5128		return DBG_STATUS_BLOCK_ALREADY_ENABLED;
5129	if (block->dbg_client_id[dev_data->chip_id] == MAX_DBG_BUS_CLIENTS ||
5130		line_num >= NUM_DBG_LINES(block_desc) ||
5131		!enable_mask ||
5132		enable_mask > MAX_CYCLE_VALUES_MASK ||
5133		force_valid_mask > MAX_CYCLE_VALUES_MASK ||
5134		force_frame_mask > MAX_CYCLE_VALUES_MASK ||
5135		right_shift > VALUES_PER_CYCLE - 1)
5136		return DBG_STATUS_INVALID_ARGS;
5137	if (dev_data->block_in_reset[block_id])
5138		return DBG_STATUS_BLOCK_IN_RESET;
5139	if (!dev_data->bus.unify_inputs && ecore_is_overlapping_enable_mask(p_hwfn, enable_mask, right_shift))
5140		return DBG_STATUS_INPUT_OVERLAP;
5141
5142	dev_data->bus.blocks[block_id].line_num = line_num;
5143	SET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK, enable_mask);
5144	SET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT, right_shift);
5145	SET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK, force_valid_mask);
5146	SET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK, force_frame_mask);
5147
5148	dev_data->bus.num_enabled_blocks++;
5149
5150	return DBG_STATUS_OK;
5151}
5152
5153enum dbg_status ecore_dbg_bus_enable_storm(struct ecore_hwfn *p_hwfn,
5154										   enum dbg_storms storm_id,
5155										   enum dbg_bus_storm_modes storm_mode)
5156{
5157	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5158	struct dbg_bus_data *bus = &dev_data->bus;
5159	struct dbg_bus_storm_data *storm_bus;
5160	struct storm_defs *storm;
5161
5162	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_enable_storm: storm = %d, storm_mode = %d\n", storm_id, storm_mode);
5163
5164	if (bus->state != DBG_BUS_STATE_READY)
5165		return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5166	if (bus->hw_dwords >= 4)
5167		return DBG_STATUS_HW_ONLY_RECORDING;
5168	if (storm_id >= MAX_DBG_STORMS)
5169		return DBG_STATUS_INVALID_ARGS;
5170	if (storm_mode >= MAX_DBG_BUS_STORM_MODES)
5171		return DBG_STATUS_INVALID_ARGS;
5172	if (bus->unify_inputs)
5173		return DBG_STATUS_INVALID_ARGS;
5174	if (bus->storms[storm_id].enabled)
5175		return DBG_STATUS_STORM_ALREADY_ENABLED;
5176
5177	storm = &s_storm_defs[storm_id];
5178	storm_bus = &bus->storms[storm_id];
5179
5180	if (dev_data->block_in_reset[storm->block_id])
5181		return DBG_STATUS_BLOCK_IN_RESET;
5182
5183	storm_bus->enabled = true;
5184	storm_bus->mode = (u8)storm_mode;
5185	storm_bus->hw_id = bus->num_enabled_storms;
5186
5187	bus->num_enabled_storms++;
5188
5189	return DBG_STATUS_OK;
5190}
5191
5192enum dbg_status ecore_dbg_bus_enable_timestamp(struct ecore_hwfn *p_hwfn,
5193											   struct ecore_ptt *p_ptt,
5194											   u8 valid_mask,
5195											   u8 frame_mask,
5196											   u32 tick_len)
5197{
5198	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5199
5200	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_enable_timestamp: valid_mask = 0x%x, frame_mask = 0x%x, tick_len = %d\n", valid_mask, frame_mask, tick_len);
5201
5202	if (dev_data->bus.state != DBG_BUS_STATE_READY)
5203		return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5204	if (valid_mask > 0x7 || frame_mask > 0x7)
5205		return DBG_STATUS_INVALID_ARGS;
5206	if (!dev_data->bus.unify_inputs && ecore_is_overlapping_enable_mask(p_hwfn, 0x1, 0))
5207		return DBG_STATUS_INPUT_OVERLAP;
5208
5209	dev_data->bus.timestamp_input_en = true;
5210	dev_data->bus.num_enabled_blocks++;
5211
5212	SET_FIELD(dev_data->bus.blocks[BLOCK_DBG].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK, 0x1);
5213
5214	ecore_wr(p_hwfn, p_ptt, DBG_REG_TIMESTAMP_VALID_EN, valid_mask);
5215	ecore_wr(p_hwfn, p_ptt, DBG_REG_TIMESTAMP_FRAME_EN, frame_mask);
5216	ecore_wr(p_hwfn, p_ptt, DBG_REG_TIMESTAMP_TICK, tick_len);
5217
5218	return DBG_STATUS_OK;
5219}
5220
5221enum dbg_status ecore_dbg_bus_add_eid_range_sem_filter(struct ecore_hwfn *p_hwfn,
5222													   enum dbg_storms storm_id,
5223													   u8 min_eid,
5224													   u8 max_eid)
5225{
5226	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5227	struct dbg_bus_storm_data *storm_bus;
5228
5229	storm_bus = &dev_data->bus.storms[storm_id];
5230
5231	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_add_eid_range_sem_filter: storm = %d, min_eid = 0x%x, max_eid = 0x%x\n", storm_id, min_eid, max_eid);
5232
5233	if (storm_id >= MAX_DBG_STORMS)
5234		return DBG_STATUS_INVALID_ARGS;
5235	if (min_eid > max_eid)
5236		return DBG_STATUS_INVALID_ARGS;
5237	if (!storm_bus->enabled)
5238		return DBG_STATUS_STORM_NOT_ENABLED;
5239
5240	storm_bus->eid_filter_en = 1;
5241	storm_bus->eid_range_not_mask = 1;
5242	storm_bus->eid_filter_params.range.min = min_eid;
5243	storm_bus->eid_filter_params.range.max = max_eid;
5244
5245	return DBG_STATUS_OK;
5246}
5247
5248enum dbg_status ecore_dbg_bus_add_eid_mask_sem_filter(struct ecore_hwfn *p_hwfn,
5249													  enum dbg_storms storm_id,
5250													  u8 eid_val,
5251													  u8 eid_mask)
5252{
5253	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5254	struct dbg_bus_storm_data *storm_bus;
5255
5256	storm_bus = &dev_data->bus.storms[storm_id];
5257
5258	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_add_eid_mask_sem_filter: storm = %d, eid_val = 0x%x, eid_mask = 0x%x\n", storm_id, eid_val, eid_mask);
5259
5260	if (storm_id >= MAX_DBG_STORMS)
5261		return DBG_STATUS_INVALID_ARGS;
5262	if (!storm_bus->enabled)
5263		return DBG_STATUS_STORM_NOT_ENABLED;
5264
5265	storm_bus->eid_filter_en = 1;
5266	storm_bus->eid_range_not_mask = 0;
5267	storm_bus->eid_filter_params.mask.val = eid_val;
5268	storm_bus->eid_filter_params.mask.mask = eid_mask;
5269
5270	return DBG_STATUS_OK;
5271}
5272
5273enum dbg_status ecore_dbg_bus_add_cid_sem_filter(struct ecore_hwfn *p_hwfn,
5274												 enum dbg_storms storm_id,
5275												 u32 cid)
5276{
5277	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5278	struct dbg_bus_storm_data *storm_bus;
5279
5280	storm_bus = &dev_data->bus.storms[storm_id];
5281
5282	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_add_cid_sem_filter: storm = %d, cid = 0x%x\n", storm_id, cid);
5283
5284	if (storm_id >= MAX_DBG_STORMS)
5285		return DBG_STATUS_INVALID_ARGS;
5286	if (!storm_bus->enabled)
5287		return DBG_STATUS_STORM_NOT_ENABLED;
5288
5289	storm_bus->cid_filter_en = 1;
5290	storm_bus->cid = cid;
5291
5292	return DBG_STATUS_OK;
5293}
5294
5295enum dbg_status ecore_dbg_bus_enable_filter(struct ecore_hwfn *p_hwfn,
5296											struct ecore_ptt *p_ptt,
5297											enum block_id block_id,
5298											u8 const_msg_len)
5299{
5300	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5301
5302	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_enable_filter: block = %d, const_msg_len = %d\n", block_id, const_msg_len);
5303
5304	if (dev_data->bus.state != DBG_BUS_STATE_READY)
5305		return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5306	if (dev_data->bus.filter_en)
5307		return DBG_STATUS_FILTER_ALREADY_ENABLED;
5308	if (block_id >= MAX_BLOCK_ID)
5309		return DBG_STATUS_INVALID_ARGS;
5310	if (!GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5311		return DBG_STATUS_BLOCK_NOT_ENABLED;
5312	if (!dev_data->bus.unify_inputs)
5313		return DBG_STATUS_FILTER_BUG;
5314
5315	dev_data->bus.filter_en = true;
5316	dev_data->bus.next_constraint_id = 0;
5317	dev_data->bus.adding_filter = true;
5318
5319	/* HW ID is set to 0 due to required unifyInputs */
5320	ecore_wr(p_hwfn, p_ptt, DBG_REG_FILTER_ID_NUM, 0);
5321	ecore_wr(p_hwfn, p_ptt, DBG_REG_FILTER_MSG_LENGTH_ENABLE, const_msg_len > 0 ? 1 : 0);
5322	if (const_msg_len > 0)
5323		ecore_wr(p_hwfn, p_ptt, DBG_REG_FILTER_MSG_LENGTH, const_msg_len - 1);
5324
5325	return DBG_STATUS_OK;
5326}
5327
5328enum dbg_status ecore_dbg_bus_enable_trigger(struct ecore_hwfn *p_hwfn,
5329											 struct ecore_ptt *p_ptt,
5330											 bool rec_pre_trigger,
5331											 u8 pre_chunks,
5332											 bool rec_post_trigger,
5333											 u32 post_cycles,
5334											 bool filter_pre_trigger,
5335											 bool filter_post_trigger)
5336{
5337	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5338	enum dbg_bus_post_trigger_types post_trigger_type;
5339	enum dbg_bus_pre_trigger_types pre_trigger_type;
5340	struct dbg_bus_data *bus = &dev_data->bus;
5341
5342	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_enable_trigger: rec_pre_trigger = %d, pre_chunks = %d, rec_post_trigger = %d, post_cycles = %d, filter_pre_trigger = %d, filter_post_trigger = %d\n", rec_pre_trigger, pre_chunks, rec_post_trigger, post_cycles, filter_pre_trigger, filter_post_trigger);
5343
5344	if (bus->state != DBG_BUS_STATE_READY)
5345		return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5346	if (bus->trigger_en)
5347		return DBG_STATUS_TRIGGER_ALREADY_ENABLED;
5348	if (rec_pre_trigger && pre_chunks >= INT_BUF_SIZE_IN_CHUNKS)
5349		return DBG_STATUS_INVALID_ARGS;
5350
5351	bus->trigger_en = true;
5352	bus->filter_pre_trigger = filter_pre_trigger;
5353	bus->filter_post_trigger = filter_post_trigger;
5354
5355	if (rec_pre_trigger) {
5356		pre_trigger_type = pre_chunks ? DBG_BUS_PRE_TRIGGER_NUM_CHUNKS : DBG_BUS_PRE_TRIGGER_START_FROM_ZERO;
5357		ecore_wr(p_hwfn, p_ptt, DBG_REG_RCRD_ON_WINDOW_PRE_NUM_CHUNKS, pre_chunks);
5358	}
5359	else {
5360		pre_trigger_type = DBG_BUS_PRE_TRIGGER_DROP;
5361	}
5362
5363	if (rec_post_trigger) {
5364		post_trigger_type = DBG_BUS_POST_TRIGGER_RECORD;
5365		ecore_wr(p_hwfn, p_ptt, DBG_REG_RCRD_ON_WINDOW_POST_NUM_CYCLES, post_cycles ? post_cycles : 0xffffffff);
5366	}
5367	else {
5368		post_trigger_type = DBG_BUS_POST_TRIGGER_DROP;
5369	}
5370
5371	ecore_wr(p_hwfn, p_ptt, DBG_REG_RCRD_ON_WINDOW_PRE_TRGR_EVNT_MODE, pre_trigger_type);
5372	ecore_wr(p_hwfn, p_ptt, DBG_REG_RCRD_ON_WINDOW_POST_TRGR_EVNT_MODE, post_trigger_type);
5373	ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_ENABLE, 1);
5374
5375	return DBG_STATUS_OK;
5376}
5377
5378enum dbg_status ecore_dbg_bus_add_trigger_state(struct ecore_hwfn *p_hwfn,
5379												struct ecore_ptt *p_ptt,
5380												enum block_id block_id,
5381												u8 const_msg_len,
5382												u16 count_to_next)
5383{
5384	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5385	struct dbg_bus_data *bus = &dev_data->bus;
5386	struct dbg_bus_block_data *block_bus;
5387	u8 reg_offset;
5388
5389	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_add_trigger_state: block = %d, const_msg_len = %d, count_to_next = %d\n", block_id, const_msg_len, count_to_next);
5390
5391	block_bus = &bus->blocks[block_id];
5392
5393	if (!bus->trigger_en)
5394		return DBG_STATUS_TRIGGER_NOT_ENABLED;
5395	if (bus->next_trigger_state == MAX_TRIGGER_STATES)
5396		return DBG_STATUS_TOO_MANY_TRIGGER_STATES;
5397	if (block_id >= MAX_BLOCK_ID)
5398		return DBG_STATUS_INVALID_ARGS;
5399	if (!GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5400		return DBG_STATUS_BLOCK_NOT_ENABLED;
5401	if (!count_to_next)
5402		return DBG_STATUS_INVALID_ARGS;
5403
5404	bus->next_constraint_id = 0;
5405	bus->adding_filter = false;
5406
5407	/* Store block's shifted enable mask */
5408	SET_FIELD(bus->trigger_states[dev_data->bus.next_trigger_state].data, DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK, SHR(GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK),
5409					   VALUES_PER_CYCLE,
5410					   GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT)));
5411
5412	/* Set trigger state registers */
5413	reg_offset = bus->next_trigger_state * BYTES_IN_DWORD;
5414	ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_MSG_LENGTH_ENABLE_0 + reg_offset, const_msg_len > 0 ? 1 : 0);
5415	if (const_msg_len > 0)
5416		ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_MSG_LENGTH_0 + reg_offset, const_msg_len - 1);
5417
5418	/* Set trigger set registers */
5419	reg_offset = bus->next_trigger_state * TRIGGER_SETS_PER_STATE * BYTES_IN_DWORD;
5420	ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_SET_COUNT_0 + reg_offset, count_to_next);
5421
5422	/* Set next state to final state, and overwrite previous next state
5423	 * (if any).
5424	 */
5425	ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_SET_NXT_STATE_0 + reg_offset, MAX_TRIGGER_STATES);
5426	if (bus->next_trigger_state > 0) {
5427		reg_offset = (bus->next_trigger_state - 1) * TRIGGER_SETS_PER_STATE * BYTES_IN_DWORD;
5428		ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_SET_NXT_STATE_0 + reg_offset, bus->next_trigger_state);
5429	}
5430
5431	bus->next_trigger_state++;
5432
5433	return DBG_STATUS_OK;
5434}
5435
5436enum dbg_status ecore_dbg_bus_add_constraint(struct ecore_hwfn *p_hwfn,
5437											 struct ecore_ptt *p_ptt,
5438											 enum dbg_bus_constraint_ops constraint_op,
5439											 u32 data_val,
5440											 u32 data_mask,
5441											 bool compare_frame,
5442											 u8 frame_bit,
5443											 u8 cycle_offset,
5444											 u8 dword_offset_in_cycle,
5445											 bool is_mandatory)
5446{
5447	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5448	struct dbg_bus_data *bus = &dev_data->bus;
5449	u16 dword_offset, range = 0;
5450
5451	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_add_constraint: op = %d, data_val = 0x%x, data_mask = 0x%x, compare_frame = %d, frame_bit = %d, cycle_offset = %d, dword_offset_in_cycle = %d, is_mandatory = %d\n", constraint_op, data_val, data_mask, compare_frame, frame_bit, cycle_offset, dword_offset_in_cycle, is_mandatory);
5452
5453	if (!bus->filter_en && !dev_data->bus.trigger_en)
5454		return DBG_STATUS_CANT_ADD_CONSTRAINT;
5455	if (bus->trigger_en && !bus->adding_filter && !bus->next_trigger_state)
5456		return DBG_STATUS_CANT_ADD_CONSTRAINT;
5457	if (bus->next_constraint_id >= MAX_CONSTRAINTS)
5458		return DBG_STATUS_TOO_MANY_CONSTRAINTS;
5459	if (constraint_op >= MAX_DBG_BUS_CONSTRAINT_OPS || frame_bit > 1 || dword_offset_in_cycle > 3 || (bus->adding_filter && cycle_offset > 3))
5460		return DBG_STATUS_INVALID_ARGS;
5461	if (compare_frame &&
5462		constraint_op != DBG_BUS_CONSTRAINT_OP_EQ &&
5463		constraint_op != DBG_BUS_CONSTRAINT_OP_NE)
5464		return DBG_STATUS_INVALID_ARGS;
5465
5466	dword_offset = cycle_offset * VALUES_PER_CYCLE + dword_offset_in_cycle;
5467
5468	if (!bus->adding_filter) {
5469		u8 curr_trigger_state_id = bus->next_trigger_state - 1;
5470		struct dbg_bus_trigger_state_data *trigger_state;
5471
5472		trigger_state = &bus->trigger_states[curr_trigger_state_id];
5473
5474		/* Check if the selected dword is enabled in the block */
5475		if (!(GET_FIELD(trigger_state->data, DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK) & (u8)(1 << dword_offset_in_cycle)))
5476			return DBG_STATUS_INVALID_TRIGGER_DWORD_OFFSET;
5477
5478		/* Add selected dword to trigger state's dword mask */
5479		SET_FIELD(trigger_state->data, DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK, GET_FIELD(trigger_state->data, DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK) | (u8)(1 << dword_offset_in_cycle));
5480	}
5481
5482	/* Prepare data mask and range */
5483	if (constraint_op == DBG_BUS_CONSTRAINT_OP_EQ ||
5484		constraint_op == DBG_BUS_CONSTRAINT_OP_NE) {
5485		data_mask = ~data_mask;
5486	}
5487	else {
5488		u8 lsb, width;
5489
5490		/* Extract lsb and width from mask */
5491		if (!data_mask)
5492			return DBG_STATUS_INVALID_ARGS;
5493
5494		for (lsb = 0; lsb < 32 && !(data_mask & 1); lsb++, data_mask >>= 1);
5495		for (width = 0; width < 32 - lsb && (data_mask & 1); width++, data_mask >>= 1);
5496		if (data_mask)
5497			return DBG_STATUS_INVALID_ARGS;
5498		range = (lsb << 5) | (width - 1);
5499	}
5500
5501	/* Add constraint */
5502	ecore_bus_set_constraint(p_hwfn, p_ptt, dev_data->bus.adding_filter ? 1 : 0,
5503		dev_data->bus.next_constraint_id,
5504		s_constraint_op_defs[constraint_op].hw_op_val,
5505		data_val, data_mask, frame_bit,
5506		compare_frame ? 0 : 1, dword_offset, range,
5507		s_constraint_op_defs[constraint_op].is_cyclic ? 1 : 0,
5508		is_mandatory ? 1 : 0);
5509
5510	/* If first constraint, fill other 3 constraints with dummy constraints
5511	 * that always match (using the same offset).
5512	 */
5513	if (!dev_data->bus.next_constraint_id) {
5514		u8 i;
5515
5516		for (i = 1; i < MAX_CONSTRAINTS; i++)
5517			ecore_bus_set_constraint(p_hwfn, p_ptt, bus->adding_filter ? 1 : 0,
5518				i, DBG_BUS_CONSTRAINT_OP_EQ, 0, 0xffffffff,
5519				0, 1, dword_offset, 0, 0, 1);
5520	}
5521
5522	bus->next_constraint_id++;
5523
5524	return DBG_STATUS_OK;
5525}
5526
5527/* Configure the DBG block client mask */
5528static void ecore_config_dbg_block_client_mask(struct ecore_hwfn *p_hwfn,
5529										struct ecore_ptt *p_ptt)
5530{
5531	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5532	struct dbg_bus_data *bus = &dev_data->bus;
5533	u32 block_id, client_mask = 0;
5534	u8 storm_id;
5535
5536	/* Update client mask for Storm inputs */
5537	if (bus->num_enabled_storms)
5538		for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
5539			struct storm_defs *storm = &s_storm_defs[storm_id];
5540
5541			if (bus->storms[storm_id].enabled)
5542				client_mask |= (1 << storm->dbg_client_id[dev_data->chip_id]);
5543		}
5544
5545	/* Update client mask for block inputs */
5546	if (bus->num_enabled_blocks) {
5547		for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
5548			struct dbg_bus_block_data *block_bus = &bus->blocks[block_id];
5549			struct block_defs *block = s_block_defs[block_id];
5550
5551			if (GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK) && block_id != BLOCK_DBG)
5552				client_mask |= (1 << block->dbg_client_id[dev_data->chip_id]);
5553		}
5554	}
5555
5556	/* Update client mask for GRC input */
5557	if (bus->grc_input_en)
5558		client_mask |= (1 << DBG_BUS_CLIENT_CPU);
5559
5560	/* Update client mask for timestamp input */
5561	if (bus->timestamp_input_en)
5562		client_mask |= (1 << DBG_BUS_CLIENT_TIMESTAMP);
5563
5564	ecore_bus_enable_clients(p_hwfn, p_ptt, client_mask);
5565}
5566
5567/* Configure the DBG block framing mode */
5568static enum dbg_status ecore_config_dbg_block_framing_mode(struct ecore_hwfn *p_hwfn,
5569													struct ecore_ptt *p_ptt)
5570{
5571	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5572	struct dbg_bus_data *bus = &dev_data->bus;
5573	enum dbg_bus_frame_modes dbg_framing_mode;
5574	u32 block_id;
5575
5576	if (!bus->hw_dwords && bus->num_enabled_blocks) {
5577		struct dbg_bus_line *line_desc;
5578		u8 hw_dwords;
5579
5580		/* Choose either 4 HW dwords (128-bit mode) or 8 HW dwords
5581		 * (256-bit mode).
5582		 */
5583		for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
5584			struct dbg_bus_block_data *block_bus = &bus->blocks[block_id];
5585
5586			if (!GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5587				continue;
5588
5589			line_desc = get_dbg_bus_line_desc(p_hwfn, (enum block_id)block_id);
5590			hw_dwords = line_desc && GET_FIELD(line_desc->data, DBG_BUS_LINE_IS_256B) ? 8 : 4;
5591
5592			if (bus->hw_dwords > 0 && bus->hw_dwords != hw_dwords)
5593				return DBG_STATUS_NON_MATCHING_LINES;
5594
5595			/* The DBG block doesn't support triggers and
5596			 * filters on 256b debug lines.
5597			 */
5598			if (hw_dwords == 8 && (bus->trigger_en || bus->filter_en))
5599				return DBG_STATUS_NO_FILTER_TRIGGER_64B;
5600
5601			bus->hw_dwords = hw_dwords;
5602		}
5603	}
5604
5605	switch (bus->hw_dwords) {
5606	case 0: dbg_framing_mode = DBG_BUS_FRAME_MODE_0HW_4ST; break;
5607	case 4: dbg_framing_mode = DBG_BUS_FRAME_MODE_4HW_0ST; break;
5608	case 8: dbg_framing_mode = DBG_BUS_FRAME_MODE_8HW_0ST; break;
5609	default: dbg_framing_mode = DBG_BUS_FRAME_MODE_0HW_4ST; break;
5610	}
5611	ecore_bus_set_framing_mode(p_hwfn, p_ptt, dbg_framing_mode);
5612
5613	return DBG_STATUS_OK;
5614}
5615
5616/* Configure the DBG block Storm data */
5617static enum dbg_status ecore_config_storm_inputs(struct ecore_hwfn *p_hwfn,
5618										  struct ecore_ptt *p_ptt)
5619{
5620	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5621	struct dbg_bus_data *bus = &dev_data->bus;
5622	u8 storm_id, i, next_storm_id = 0;
5623	u32 storm_id_mask = 0;
5624
5625	/* Check if SEMI sync FIFO is empty */
5626	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
5627		struct dbg_bus_storm_data *storm_bus = &bus->storms[storm_id];
5628		struct storm_defs *storm = &s_storm_defs[storm_id];
5629
5630		if (storm_bus->enabled && !ecore_rd(p_hwfn, p_ptt, storm->sem_sync_dbg_empty_addr))
5631			return DBG_STATUS_SEMI_FIFO_NOT_EMPTY;
5632	}
5633
5634	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
5635		struct dbg_bus_storm_data *storm_bus = &bus->storms[storm_id];
5636
5637		if (storm_bus->enabled)
5638			storm_id_mask |= (storm_bus->hw_id << (storm_id * HW_ID_BITS));
5639	}
5640
5641	ecore_wr(p_hwfn, p_ptt, DBG_REG_STORM_ID_NUM, storm_id_mask);
5642
5643	/* Disable storm stall if recording to internal buffer in one-shot */
5644	ecore_wr(p_hwfn, p_ptt, DBG_REG_NO_GRANT_ON_FULL, (dev_data->bus.target == DBG_BUS_TARGET_ID_INT_BUF && bus->one_shot_en) ? 0 : 1);
5645
5646	/* Configure calendar */
5647	for (i = 0; i < NUM_CALENDAR_SLOTS; i++, next_storm_id = (next_storm_id + 1) % MAX_DBG_STORMS) {
5648		/* Find next enabled Storm */
5649		for (; !dev_data->bus.storms[next_storm_id].enabled; next_storm_id = (next_storm_id + 1) % MAX_DBG_STORMS);
5650
5651		/* Configure calendar slot */
5652		ecore_wr(p_hwfn, p_ptt, DBG_REG_CALENDAR_SLOT0 + DWORDS_TO_BYTES(i), next_storm_id);
5653	}
5654
5655	return DBG_STATUS_OK;
5656}
5657
5658/* Assign HW ID to each dword/qword:
5659 * if the inputs are unified, HW ID 0 is assigned to all dwords/qwords.
5660 * Otherwise, we would like to assign a different HW ID to each dword, to avoid
5661 * data synchronization issues. however, we need to check if there is a trigger
5662 * state for which more than one dword has a constraint. if there is, we cannot
5663 * assign a different HW ID to each dword (since a trigger state has a single
5664 * HW ID), so we assign a different HW ID to each block.
5665 */
5666static void ecore_assign_hw_ids(struct ecore_hwfn *p_hwfn,
5667						 u8 hw_ids[VALUES_PER_CYCLE])
5668{
5669	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5670	struct dbg_bus_data *bus = &dev_data->bus;
5671	bool hw_id_per_dword = true;
5672	u8 val_id, state_id;
5673	u32 block_id;
5674
5675	OSAL_MEMSET(hw_ids, 0, VALUES_PER_CYCLE);
5676
5677	if (bus->unify_inputs)
5678		return;
5679
5680	if (bus->trigger_en) {
5681		for (state_id = 0; state_id < bus->next_trigger_state && hw_id_per_dword; state_id++) {
5682			u8 num_dwords = 0;
5683
5684			for (val_id = 0; val_id < VALUES_PER_CYCLE; val_id++)
5685				if (GET_FIELD(bus->trigger_states[state_id].data, DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK) & (1 << val_id))
5686					num_dwords++;
5687
5688			if (num_dwords > 1)
5689				hw_id_per_dword = false;
5690		}
5691	}
5692
5693	if (hw_id_per_dword) {
5694		/* Assign a different HW ID for each dword */
5695		for (val_id = 0; val_id < VALUES_PER_CYCLE; val_id++)
5696			hw_ids[val_id] = val_id;
5697	}
5698	else {
5699		u8 shifted_enable_mask, next_hw_id = 0;
5700
5701		/* Assign HW IDs according to blocks enable /  */
5702		for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
5703			struct dbg_bus_block_data *block_bus = &bus->blocks[block_id];
5704
5705			if (!GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))
5706				continue;
5707
5708			block_bus->hw_id = next_hw_id++;
5709			if (!block_bus->hw_id)
5710				continue;
5711
5712			shifted_enable_mask =
5713				SHR(GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_ENABLE_MASK),
5714					VALUES_PER_CYCLE,
5715					GET_FIELD(block_bus->data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT));
5716
5717			for (val_id = 0; val_id < VALUES_PER_CYCLE; val_id++)
5718				if (shifted_enable_mask & (1 << val_id))
5719					hw_ids[val_id] = block_bus->hw_id;
5720		}
5721	}
5722}
5723
5724/* Configure the DBG block HW blocks data */
5725static void ecore_config_block_inputs(struct ecore_hwfn *p_hwfn,
5726							   struct ecore_ptt *p_ptt)
5727{
5728	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5729	struct dbg_bus_data *bus = &dev_data->bus;
5730	u8 hw_ids[VALUES_PER_CYCLE];
5731	u8 val_id, state_id;
5732
5733	ecore_assign_hw_ids(p_hwfn, hw_ids);
5734
5735	/* Assign a HW ID to each trigger state */
5736	if (dev_data->bus.trigger_en) {
5737		for (state_id = 0; state_id < bus->next_trigger_state; state_id++) {
5738			for (val_id = 0; val_id < VALUES_PER_CYCLE; val_id++) {
5739				u8 state_data = bus->trigger_states[state_id].data;
5740
5741				if (GET_FIELD(state_data, DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK) & (1 << val_id)) {
5742					ecore_wr(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATE_ID_0 + state_id * BYTES_IN_DWORD, hw_ids[val_id]);
5743					break;
5744				}
5745			}
5746		}
5747	}
5748
5749	/* Configure HW ID mask */
5750	dev_data->bus.hw_id_mask = 0;
5751	for (val_id = 0; val_id < VALUES_PER_CYCLE; val_id++)
5752		bus->hw_id_mask |= (hw_ids[val_id] << (val_id * HW_ID_BITS));
5753	ecore_wr(p_hwfn, p_ptt, DBG_REG_HW_ID_NUM, bus->hw_id_mask);
5754
5755	/* Configure additional K2 PCIE registers */
5756	if (dev_data->chip_id == CHIP_K2 &&
5757		(GET_FIELD(bus->blocks[BLOCK_PCIE].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK) ||
5758			GET_FIELD(bus->blocks[BLOCK_PHY_PCIE].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK))) {
5759		ecore_wr(p_hwfn, p_ptt, PCIE_REG_DBG_REPEAT_THRESHOLD_COUNT_K2_E5, 1);
5760		ecore_wr(p_hwfn, p_ptt, PCIE_REG_DBG_FW_TRIGGER_ENABLE_K2_E5, 1);
5761	}
5762}
5763
5764enum dbg_status ecore_dbg_bus_start(struct ecore_hwfn *p_hwfn,
5765									struct ecore_ptt *p_ptt)
5766{
5767	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5768	struct dbg_bus_data *bus = &dev_data->bus;
5769	enum dbg_bus_filter_types filter_type;
5770	enum dbg_status status;
5771	u32 block_id;
5772	u8 storm_id;
5773
5774	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_start\n");
5775
5776	if (bus->state != DBG_BUS_STATE_READY)
5777		return DBG_STATUS_DBG_BLOCK_NOT_RESET;
5778
5779	/* Check if any input was enabled */
5780	if (!bus->num_enabled_storms &&
5781		!bus->num_enabled_blocks &&
5782		!bus->rcv_from_other_engine)
5783		return DBG_STATUS_NO_INPUT_ENABLED;
5784
5785	/* Check if too many input types were enabled (storm+dbgmux) */
5786	if (bus->num_enabled_storms && bus->num_enabled_blocks)
5787		return DBG_STATUS_TOO_MANY_INPUTS;
5788
5789	/* Configure framing mode */
5790	if ((status = ecore_config_dbg_block_framing_mode(p_hwfn, p_ptt)) != DBG_STATUS_OK)
5791		return status;
5792
5793	/* Configure DBG block for Storm inputs */
5794	if (bus->num_enabled_storms)
5795		if ((status = ecore_config_storm_inputs(p_hwfn, p_ptt)) != DBG_STATUS_OK)
5796			return status;
5797
5798	/* Configure DBG block for block inputs */
5799	if (bus->num_enabled_blocks)
5800		ecore_config_block_inputs(p_hwfn, p_ptt);
5801
5802	/* Configure filter type */
5803	if (bus->filter_en) {
5804		if (bus->trigger_en) {
5805			if (bus->filter_pre_trigger)
5806				filter_type = bus->filter_post_trigger ? DBG_BUS_FILTER_TYPE_ON : DBG_BUS_FILTER_TYPE_PRE;
5807			else
5808				filter_type = bus->filter_post_trigger ? DBG_BUS_FILTER_TYPE_POST : DBG_BUS_FILTER_TYPE_OFF;
5809		}
5810		else {
5811			filter_type = DBG_BUS_FILTER_TYPE_ON;
5812		}
5813	}
5814	else {
5815		filter_type = DBG_BUS_FILTER_TYPE_OFF;
5816	}
5817	ecore_wr(p_hwfn, p_ptt, DBG_REG_FILTER_ENABLE, filter_type);
5818
5819	/* Restart timestamp */
5820	ecore_wr(p_hwfn, p_ptt, DBG_REG_TIMESTAMP, 0);
5821
5822	/* Enable debug block */
5823	ecore_bus_enable_dbg_block(p_hwfn, p_ptt, 1);
5824
5825	/* Configure enabled blocks - must be done before the DBG block is
5826	 * enabled.
5827	 */
5828	if (dev_data->bus.num_enabled_blocks) {
5829		for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
5830			if (!GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK) || block_id == BLOCK_DBG)
5831				continue;
5832
5833			ecore_config_dbg_line(p_hwfn, p_ptt, (enum block_id)block_id,
5834				dev_data->bus.blocks[block_id].line_num,
5835				GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK),
5836				GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_RIGHT_SHIFT),
5837				GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK),
5838				GET_FIELD(dev_data->bus.blocks[block_id].data, DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK));
5839		}
5840	}
5841
5842	/* Configure client mask */
5843	ecore_config_dbg_block_client_mask(p_hwfn, p_ptt);
5844
5845	/* Configure enabled Storms - must be done after the DBG block is
5846	 * enabled.
5847	 */
5848	if (dev_data->bus.num_enabled_storms)
5849		for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++)
5850			if (dev_data->bus.storms[storm_id].enabled)
5851				ecore_bus_enable_storm(p_hwfn, p_ptt, (enum dbg_storms)storm_id);
5852
5853	dev_data->bus.state = DBG_BUS_STATE_RECORDING;
5854
5855	return DBG_STATUS_OK;
5856}
5857
5858enum dbg_status ecore_dbg_bus_stop(struct ecore_hwfn *p_hwfn,
5859								   struct ecore_ptt *p_ptt)
5860{
5861	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5862	struct dbg_bus_data *bus = &dev_data->bus;
5863	enum dbg_status status = DBG_STATUS_OK;
5864
5865	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_stop\n");
5866
5867	if (bus->state != DBG_BUS_STATE_RECORDING)
5868		return DBG_STATUS_RECORDING_NOT_STARTED;
5869
5870	status = ecore_bus_disable_inputs(p_hwfn, p_ptt, true);
5871	if (status != DBG_STATUS_OK)
5872		return status;
5873
5874	ecore_wr(p_hwfn, p_ptt, DBG_REG_CPU_TIMEOUT, 1);
5875
5876	OSAL_MSLEEP(FLUSH_DELAY_MS);
5877
5878	ecore_bus_enable_dbg_block(p_hwfn, p_ptt, false);
5879
5880	/* Check if trigger worked */
5881	if (bus->trigger_en) {
5882		u32 trigger_state = ecore_rd(p_hwfn, p_ptt, DBG_REG_TRIGGER_STATUS_CUR_STATE);
5883
5884		if (trigger_state != MAX_TRIGGER_STATES)
5885			return DBG_STATUS_DATA_DIDNT_TRIGGER;
5886	}
5887
5888	bus->state = DBG_BUS_STATE_STOPPED;
5889
5890	return status;
5891}
5892
5893enum dbg_status ecore_dbg_bus_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
5894												struct ecore_ptt *p_ptt,
5895												u32 *buf_size)
5896{
5897	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5898	struct dbg_bus_data *bus = &dev_data->bus;
5899	enum dbg_status status;
5900
5901	status = ecore_dbg_dev_init(p_hwfn, p_ptt);
5902
5903	*buf_size = 0;
5904
5905	if (status != DBG_STATUS_OK)
5906		return status;
5907
5908	/* Add dump header */
5909	*buf_size = (u32)ecore_bus_dump_hdr(p_hwfn, p_ptt, OSAL_NULL, false);
5910
5911	switch (bus->target) {
5912	case DBG_BUS_TARGET_ID_INT_BUF:
5913		*buf_size += INT_BUF_SIZE_IN_DWORDS; break;
5914	case DBG_BUS_TARGET_ID_PCI:
5915		*buf_size += BYTES_TO_DWORDS(bus->pci_buf.size); break;
5916	default:
5917		break;
5918	}
5919
5920	/* Dump last section */
5921	*buf_size += ecore_dump_last_section(OSAL_NULL, 0, false);
5922
5923	return DBG_STATUS_OK;
5924}
5925
5926enum dbg_status ecore_dbg_bus_dump(struct ecore_hwfn *p_hwfn,
5927								   struct ecore_ptt *p_ptt,
5928								   u32 *dump_buf,
5929								   u32 buf_size_in_dwords,
5930								   u32 *num_dumped_dwords)
5931{
5932	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5933	u32 min_buf_size_in_dwords, block_id, offset = 0;
5934	struct dbg_bus_data *bus = &dev_data->bus;
5935	enum dbg_status status;
5936	u8 storm_id;
5937
5938	*num_dumped_dwords = 0;
5939
5940	status = ecore_dbg_bus_get_dump_buf_size(p_hwfn, p_ptt, &min_buf_size_in_dwords);
5941	if (status != DBG_STATUS_OK)
5942		return status;
5943
5944	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_bus_dump: dump_buf = 0x%p, buf_size_in_dwords = %d\n", dump_buf, buf_size_in_dwords);
5945
5946	if (bus->state != DBG_BUS_STATE_RECORDING && bus->state != DBG_BUS_STATE_STOPPED)
5947		return DBG_STATUS_RECORDING_NOT_STARTED;
5948
5949	if (bus->state == DBG_BUS_STATE_RECORDING) {
5950		enum dbg_status stop_state = ecore_dbg_bus_stop(p_hwfn, p_ptt);
5951		if (stop_state != DBG_STATUS_OK)
5952			return stop_state;
5953	}
5954
5955	if (buf_size_in_dwords < min_buf_size_in_dwords)
5956		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5957
5958	if (bus->target == DBG_BUS_TARGET_ID_PCI && !bus->pci_buf.size)
5959		return DBG_STATUS_PCI_BUF_NOT_ALLOCATED;
5960
5961	/* Dump header */
5962	offset += ecore_bus_dump_hdr(p_hwfn, p_ptt, dump_buf + offset, true);
5963
5964	/* Dump recorded data */
5965	if (bus->target != DBG_BUS_TARGET_ID_NIG) {
5966		u32 recorded_dwords = ecore_bus_dump_data(p_hwfn, p_ptt, dump_buf + offset, true);
5967
5968		if (!recorded_dwords)
5969			return DBG_STATUS_NO_DATA_RECORDED;
5970		if (recorded_dwords % CHUNK_SIZE_IN_DWORDS)
5971			return DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED;
5972		offset += recorded_dwords;
5973	}
5974
5975	/* Dump last section */
5976	offset += ecore_dump_last_section(dump_buf, offset, true);
5977
5978	/* If recorded to PCI buffer - free the buffer */
5979	ecore_bus_free_pci_buf(p_hwfn);
5980
5981	/* Clear debug bus parameters */
5982	bus->state = DBG_BUS_STATE_IDLE;
5983	bus->num_enabled_blocks = 0;
5984	bus->num_enabled_storms = 0;
5985	bus->filter_en = bus->trigger_en = 0;
5986
5987	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++)
5988		SET_FIELD(bus->blocks[BLOCK_PCIE].data, DBG_BUS_BLOCK_DATA_ENABLE_MASK, 0);
5989
5990	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
5991		struct dbg_bus_storm_data *storm_bus = &bus->storms[storm_id];
5992
5993		storm_bus->enabled = false;
5994		storm_bus->eid_filter_en = storm_bus->cid_filter_en = 0;
5995	}
5996
5997	*num_dumped_dwords = offset;
5998
5999	return DBG_STATUS_OK;
6000}
6001
6002enum dbg_status ecore_dbg_grc_config(struct ecore_hwfn *p_hwfn,
6003									 enum dbg_grc_params grc_param,
6004									 u32 val)
6005{
6006	int i;
6007
6008	DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "dbg_grc_config: paramId = %d, val = %d\n", grc_param, val);
6009
6010	/* Initializes the GRC parameters (if not initialized). Needed in order
6011	 * to set the default parameter values for the first time.
6012	 */
6013	ecore_dbg_grc_init_params(p_hwfn);
6014
6015	if (grc_param >= MAX_DBG_GRC_PARAMS)
6016		return DBG_STATUS_INVALID_ARGS;
6017	if (val < s_grc_param_defs[grc_param].min ||
6018		val > s_grc_param_defs[grc_param].max)
6019		return DBG_STATUS_INVALID_ARGS;
6020
6021	if (s_grc_param_defs[grc_param].is_preset) {
6022		/* Preset param */
6023
6024		/* Disabling a preset is not allowed. Call
6025		 * dbg_grc_set_params_default instead.
6026		 */
6027		if (!val)
6028			return DBG_STATUS_INVALID_ARGS;
6029
6030		/* Update all params with the preset values */
6031		for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) {
6032			u32 preset_val;
6033
6034			if (grc_param == DBG_GRC_PARAM_EXCLUDE_ALL)
6035				preset_val = s_grc_param_defs[i].exclude_all_preset_val;
6036			else if (grc_param == DBG_GRC_PARAM_CRASH)
6037				preset_val = s_grc_param_defs[i].crash_preset_val;
6038			else
6039				return DBG_STATUS_INVALID_ARGS;
6040
6041			ecore_grc_set_param(p_hwfn, (enum dbg_grc_params)i, preset_val);
6042		}
6043	}
6044	else {
6045		/* Regular param - set its value */
6046		ecore_grc_set_param(p_hwfn, grc_param, val);
6047	}
6048
6049	return DBG_STATUS_OK;
6050}
6051
6052/* Assign default GRC param values */
6053void ecore_dbg_grc_set_params_default(struct ecore_hwfn *p_hwfn)
6054{
6055	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
6056	u32 i;
6057
6058	for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
6059		dev_data->grc.param_val[i] = s_grc_param_defs[i].default_val[dev_data->chip_id];
6060}
6061
6062enum dbg_status ecore_dbg_grc_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6063												struct ecore_ptt *p_ptt,
6064												u32 *buf_size)
6065{
6066	enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6067
6068	*buf_size = 0;
6069
6070	if (status != DBG_STATUS_OK)
6071		return status;
6072
6073	if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr || !s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr || !s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr ||
6074		!s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr || !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
6075		return DBG_STATUS_DBG_ARRAY_NOT_SET;
6076
6077	return ecore_grc_dump(p_hwfn, p_ptt, OSAL_NULL, false, buf_size);
6078}
6079
6080enum dbg_status ecore_dbg_grc_dump(struct ecore_hwfn *p_hwfn,
6081								   struct ecore_ptt *p_ptt,
6082								   u32 *dump_buf,
6083								   u32 buf_size_in_dwords,
6084								   u32 *num_dumped_dwords)
6085{
6086	u32 needed_buf_size_in_dwords;
6087	enum dbg_status status;
6088
6089	*num_dumped_dwords = 0;
6090
6091	status = ecore_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6092	if (status != DBG_STATUS_OK)
6093		return status;
6094
6095	if (buf_size_in_dwords < needed_buf_size_in_dwords)
6096		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6097
6098	/* Doesn't do anything, needed for compile time asserts */
6099	ecore_static_asserts();
6100
6101	/* GRC Dump */
6102	status = ecore_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
6103
6104	/* Reveret GRC params to their default */
6105	ecore_dbg_grc_set_params_default(p_hwfn);
6106
6107	return status;
6108}
6109
6110enum dbg_status ecore_dbg_idle_chk_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6111													 struct ecore_ptt *p_ptt,
6112													 u32 *buf_size)
6113{
6114	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
6115	struct idle_chk_data *idle_chk = &dev_data->idle_chk;
6116	enum dbg_status status;
6117
6118	*buf_size = 0;
6119
6120	status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6121	if (status != DBG_STATUS_OK)
6122		return status;
6123
6124	if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr || !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr ||
6125		!s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr || !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr)
6126		return DBG_STATUS_DBG_ARRAY_NOT_SET;
6127
6128	if (!idle_chk->buf_size_set) {
6129		idle_chk->buf_size = ecore_idle_chk_dump(p_hwfn, p_ptt, OSAL_NULL, false);
6130		idle_chk->buf_size_set = true;
6131	}
6132
6133	*buf_size = idle_chk->buf_size;
6134
6135	return DBG_STATUS_OK;
6136}
6137
6138enum dbg_status ecore_dbg_idle_chk_dump(struct ecore_hwfn *p_hwfn,
6139										struct ecore_ptt *p_ptt,
6140										u32 *dump_buf,
6141										u32 buf_size_in_dwords,
6142										u32 *num_dumped_dwords)
6143{
6144	u32 needed_buf_size_in_dwords;
6145	enum dbg_status status;
6146
6147	*num_dumped_dwords = 0;
6148
6149	status = ecore_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6150	if (status != DBG_STATUS_OK)
6151		return status;
6152
6153	if (buf_size_in_dwords < needed_buf_size_in_dwords)
6154		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6155
6156	/* Update reset state */
6157	ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6158
6159	/* Idle Check Dump */
6160	*num_dumped_dwords = ecore_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true);
6161
6162	/* Reveret GRC params to their default */
6163	ecore_dbg_grc_set_params_default(p_hwfn);
6164
6165	return DBG_STATUS_OK;
6166}
6167
6168enum dbg_status ecore_dbg_mcp_trace_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6169													  struct ecore_ptt *p_ptt,
6170													  u32 *buf_size)
6171{
6172	enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6173
6174	*buf_size = 0;
6175
6176	if (status != DBG_STATUS_OK)
6177		return status;
6178
6179	return ecore_mcp_trace_dump(p_hwfn, p_ptt, OSAL_NULL, false, buf_size);
6180}
6181
6182enum dbg_status ecore_dbg_mcp_trace_dump(struct ecore_hwfn *p_hwfn,
6183										 struct ecore_ptt *p_ptt,
6184										 u32 *dump_buf,
6185										 u32 buf_size_in_dwords,
6186										 u32 *num_dumped_dwords)
6187{
6188	u32 needed_buf_size_in_dwords;
6189	enum dbg_status status;
6190
6191	status = ecore_dbg_mcp_trace_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6192	if (status != DBG_STATUS_OK && status != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
6193		return status;
6194
6195	if (buf_size_in_dwords < needed_buf_size_in_dwords)
6196		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6197
6198	/* Update reset state */
6199	ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6200
6201	/* Perform dump */
6202	status = ecore_mcp_trace_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
6203
6204	/* Reveret GRC params to their default */
6205	ecore_dbg_grc_set_params_default(p_hwfn);
6206
6207	return status;
6208}
6209
6210enum dbg_status ecore_dbg_reg_fifo_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6211													 struct ecore_ptt *p_ptt,
6212													 u32 *buf_size)
6213{
6214	enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6215
6216	*buf_size = 0;
6217
6218	if (status != DBG_STATUS_OK)
6219		return status;
6220
6221	return ecore_reg_fifo_dump(p_hwfn, p_ptt, OSAL_NULL, false, buf_size);
6222}
6223
6224enum dbg_status ecore_dbg_reg_fifo_dump(struct ecore_hwfn *p_hwfn,
6225										struct ecore_ptt *p_ptt,
6226										u32 *dump_buf,
6227										u32 buf_size_in_dwords,
6228										u32 *num_dumped_dwords)
6229{
6230	u32 needed_buf_size_in_dwords;
6231	enum dbg_status status;
6232
6233	*num_dumped_dwords = 0;
6234
6235	status = ecore_dbg_reg_fifo_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6236	if (status != DBG_STATUS_OK)
6237		return status;
6238
6239	if (buf_size_in_dwords < needed_buf_size_in_dwords)
6240		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6241
6242	/* Update reset state */
6243	ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6244
6245	status = ecore_reg_fifo_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
6246
6247	/* Reveret GRC params to their default */
6248	ecore_dbg_grc_set_params_default(p_hwfn);
6249
6250	return status;
6251}
6252
6253enum dbg_status ecore_dbg_igu_fifo_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6254													 struct ecore_ptt *p_ptt,
6255													 u32 *buf_size)
6256{
6257	enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6258
6259	*buf_size = 0;
6260
6261	if (status != DBG_STATUS_OK)
6262		return status;
6263
6264	return ecore_igu_fifo_dump(p_hwfn, p_ptt, OSAL_NULL, false, buf_size);
6265}
6266
6267enum dbg_status ecore_dbg_igu_fifo_dump(struct ecore_hwfn *p_hwfn,
6268										struct ecore_ptt *p_ptt,
6269										u32 *dump_buf,
6270										u32 buf_size_in_dwords,
6271										u32 *num_dumped_dwords)
6272{
6273	u32 needed_buf_size_in_dwords;
6274	enum dbg_status status;
6275
6276	*num_dumped_dwords = 0;
6277
6278	status = ecore_dbg_igu_fifo_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6279	if (status != DBG_STATUS_OK)
6280		return status;
6281
6282	if (buf_size_in_dwords < needed_buf_size_in_dwords)
6283		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6284
6285	/* Update reset state */
6286	ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6287
6288	status = ecore_igu_fifo_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
6289
6290	/* Reveret GRC params to their default */
6291	ecore_dbg_grc_set_params_default(p_hwfn);
6292
6293	return status;
6294}
6295
6296enum dbg_status ecore_dbg_protection_override_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6297																struct ecore_ptt *p_ptt,
6298																u32 *buf_size)
6299{
6300	enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6301
6302	*buf_size = 0;
6303
6304	if (status != DBG_STATUS_OK)
6305		return status;
6306
6307	return ecore_protection_override_dump(p_hwfn, p_ptt, OSAL_NULL, false, buf_size);
6308}
6309
6310enum dbg_status ecore_dbg_protection_override_dump(struct ecore_hwfn *p_hwfn,
6311												   struct ecore_ptt *p_ptt,
6312												   u32 *dump_buf,
6313												   u32 buf_size_in_dwords,
6314												   u32 *num_dumped_dwords)
6315{
6316	u32 needed_buf_size_in_dwords;
6317	enum dbg_status status;
6318
6319	*num_dumped_dwords = 0;
6320
6321	status = ecore_dbg_protection_override_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6322	if (status != DBG_STATUS_OK)
6323		return status;
6324
6325	if (buf_size_in_dwords < needed_buf_size_in_dwords)
6326		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6327
6328	/* Update reset state */
6329	ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6330
6331	status = ecore_protection_override_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
6332
6333	/* Reveret GRC params to their default */
6334	ecore_dbg_grc_set_params_default(p_hwfn);
6335
6336	return status;
6337}
6338
6339enum dbg_status ecore_dbg_fw_asserts_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
6340													   struct ecore_ptt *p_ptt,
6341													   u32 *buf_size)
6342{
6343	enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6344
6345	*buf_size = 0;
6346
6347	if (status != DBG_STATUS_OK)
6348		return status;
6349
6350	/* Update reset state */
6351	ecore_update_blocks_reset_state(p_hwfn, p_ptt);
6352
6353	*buf_size = ecore_fw_asserts_dump(p_hwfn, p_ptt, OSAL_NULL, false);
6354
6355	return DBG_STATUS_OK;
6356}
6357
6358enum dbg_status ecore_dbg_fw_asserts_dump(struct ecore_hwfn *p_hwfn,
6359										  struct ecore_ptt *p_ptt,
6360										  u32 *dump_buf,
6361										  u32 buf_size_in_dwords,
6362										  u32 *num_dumped_dwords)
6363{
6364	u32 needed_buf_size_in_dwords;
6365	enum dbg_status status;
6366
6367	*num_dumped_dwords = 0;
6368
6369	status = ecore_dbg_fw_asserts_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords);
6370	if (status != DBG_STATUS_OK)
6371		return status;
6372
6373	if (buf_size_in_dwords < needed_buf_size_in_dwords)
6374		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
6375
6376	*num_dumped_dwords = ecore_fw_asserts_dump(p_hwfn, p_ptt, dump_buf, true);
6377
6378	/* Reveret GRC params to their default */
6379	ecore_dbg_grc_set_params_default(p_hwfn);
6380
6381	return DBG_STATUS_OK;
6382}
6383
6384enum dbg_status ecore_dbg_read_attn(struct ecore_hwfn *p_hwfn,
6385									struct ecore_ptt *p_ptt,
6386									enum block_id block_id,
6387									enum dbg_attn_type attn_type,
6388									bool clear_status,
6389									struct dbg_attn_block_result *results)
6390{
6391	enum dbg_status status = ecore_dbg_dev_init(p_hwfn, p_ptt);
6392	u8 reg_idx, num_attn_regs, num_result_regs = 0;
6393	const struct dbg_attn_reg *attn_reg_arr;
6394
6395	if (status != DBG_STATUS_OK)
6396		return status;
6397
6398	if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr || !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr || !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
6399		return DBG_STATUS_DBG_ARRAY_NOT_SET;
6400
6401	attn_reg_arr = ecore_get_block_attn_regs(block_id, attn_type, &num_attn_regs);
6402
6403	for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
6404		const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
6405		struct dbg_attn_reg_result *reg_result;
6406		u32 sts_addr, sts_val;
6407		u16 modes_buf_offset;
6408		bool eval_mode;
6409
6410		/* Check mode */
6411		eval_mode = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0;
6412		modes_buf_offset = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET);
6413		if (eval_mode && !ecore_is_mode_match(p_hwfn, &modes_buf_offset))
6414			continue;
6415
6416		/* Mode match - read attention status register */
6417		sts_addr = DWORDS_TO_BYTES(clear_status ? reg_data->sts_clr_address : GET_FIELD(reg_data->data, DBG_ATTN_REG_STS_ADDRESS));
6418		sts_val = ecore_rd(p_hwfn, p_ptt, sts_addr);
6419		if (!sts_val)
6420			continue;
6421
6422		/* Non-zero attention status - add to results */
6423		reg_result = &results->reg_results[num_result_regs];
6424		SET_FIELD(reg_result->data, DBG_ATTN_REG_RESULT_STS_ADDRESS, sts_addr);
6425		SET_FIELD(reg_result->data, DBG_ATTN_REG_RESULT_NUM_REG_ATTN, GET_FIELD(reg_data->data, DBG_ATTN_REG_NUM_REG_ATTN));
6426		reg_result->block_attn_offset = reg_data->block_attn_offset;
6427		reg_result->sts_val = sts_val;
6428		reg_result->mask_val = ecore_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(reg_data->mask_address));
6429		num_result_regs++;
6430	}
6431
6432	results->block_id = (u8)block_id;
6433	results->names_offset = ecore_get_block_attn_data(block_id, attn_type)->names_offset;
6434	SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE, attn_type);
6435	SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS, num_result_regs);
6436
6437	return DBG_STATUS_OK;
6438}
6439
6440enum dbg_status ecore_dbg_print_attn(struct ecore_hwfn *p_hwfn,
6441									 struct dbg_attn_block_result *results)
6442{
6443	enum dbg_attn_type attn_type;
6444	u8 num_regs, i;
6445
6446	num_regs = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS);
6447	attn_type = (enum dbg_attn_type)GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE);
6448
6449	for (i = 0; i < num_regs; i++) {
6450		struct dbg_attn_reg_result *reg_result;
6451		const char *attn_type_str;
6452		u32 sts_addr;
6453
6454		reg_result = &results->reg_results[i];
6455		attn_type_str = (attn_type == ATTN_TYPE_INTERRUPT ? "interrupt" : "parity");
6456		sts_addr = GET_FIELD(reg_result->data, DBG_ATTN_REG_RESULT_STS_ADDRESS);
6457		DP_NOTICE(p_hwfn, false, "%s: address 0x%08x, status 0x%08x, mask 0x%08x\n", attn_type_str, sts_addr, reg_result->sts_val, reg_result->mask_val);
6458	}
6459
6460	return DBG_STATUS_OK;
6461}
6462
6463bool ecore_is_block_in_reset(struct ecore_hwfn *p_hwfn,
6464							 struct ecore_ptt *p_ptt,
6465							 enum block_id block_id)
6466{
6467	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
6468	struct block_defs *block = s_block_defs[block_id];
6469	u32 reset_reg;
6470
6471	if (!block->has_reset_bit)
6472		return false;
6473
6474	reset_reg = block->reset_reg;
6475
6476	return s_reset_regs_defs[reset_reg].exists[dev_data->chip_id] ?
6477		!(ecore_rd(p_hwfn, p_ptt, s_reset_regs_defs[reset_reg].addr) & (1 << block->reset_bit_offset)) :	true;
6478}
6479