1/*-
2 * Copyright (c) 2017 Chelsio Communications, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: stable/11/sys/dev/cxgbe/cudbg/cudbg_wtp.c 322014 2017-08-03 14:43:30Z np $");
29
30#include <sys/types.h>
31#include <sys/param.h>
32
33#include "common/common.h"
34#include "common/t4_regs.h"
35#include "cudbg.h"
36#include "cudbg_lib_common.h"
37#include "cudbg_entity.h"
38
39int collect_wtp_data(struct cudbg_init *pdbg_init,
40		     struct cudbg_buffer *dbg_buff,
41		     struct cudbg_error *cudbg_err);
42/*SGE_DEBUG Registers.*/
43#define TP_MIB_SIZE	    0x5e
44
45struct sge_debug_reg_data {
46	/*indx0*/
47	u32 reserved1:4;
48	u32 reserved2:4;
49	u32 debug_uP_SOP_cnt:4;
50	u32 debug_uP_EOP_cnt:4;
51	u32 debug_CIM_SOP1_cnt:4;
52	u32 debug_CIM_EOP1_cnt:4;
53	u32 debug_CIM_SOP0_cnt:4;
54	u32 debug_CIM_EOP0_cnt:4;
55
56	/*indx1*/
57	u32 reserved3:32;
58
59	/*indx2*/
60	u32 debug_T_Rx_SOP1_cnt:4;
61	u32 debug_T_Rx_EOP1_cnt:4;
62	u32 debug_T_Rx_SOP0_cnt:4;
63	u32 debug_T_Rx_EOP0_cnt:4;
64	u32 debug_U_Rx_SOP1_cnt:4;
65	u32 debug_U_Rx_EOP1_cnt:4;
66	u32 debug_U_Rx_SOP0_cnt:4;
67	u32 debug_U_Rx_EOP0_cnt:4;
68
69	/*indx3*/
70	u32 reserved4:32;
71
72	/*indx4*/
73	u32 debug_UD_Rx_SOP3_cnt:4;
74	u32 debug_UD_Rx_EOP3_cnt:4;
75	u32 debug_UD_Rx_SOP2_cnt:4;
76	u32 debug_UD_Rx_EOP2_cnt:4;
77	u32 debug_UD_Rx_SOP1_cnt:4;
78	u32 debug_UD_Rx_EOP1_cnt:4;
79	u32 debug_UD_Rx_SOP0_cnt:4;
80	u32 debug_UD_Rx_EOP0_cnt:4;
81
82	/*indx5*/
83	u32 reserved5:32;
84
85	/*indx6*/
86	u32 debug_U_Tx_SOP3_cnt:4;
87	u32 debug_U_Tx_EOP3_cnt:4;
88	u32 debug_U_Tx_SOP2_cnt:4;
89	u32 debug_U_Tx_EOP2_cnt:4;
90	u32 debug_U_Tx_SOP1_cnt:4;
91	u32 debug_U_Tx_EOP1_cnt:4;
92	u32 debug_U_Tx_SOP0_cnt:4;
93	u32 debug_U_Tx_EOP0_cnt:4;
94
95	/*indx7*/
96	u32 reserved6:32;
97
98	/*indx8*/
99	u32  debug_PC_Rsp_SOP1_cnt:4;
100	u32  debug_PC_Rsp_EOP1_cnt:4;
101	u32  debug_PC_Rsp_SOP0_cnt:4;
102	u32  debug_PC_Rsp_EOP0_cnt:4;
103	u32  debug_PC_Req_SOP1_cnt:4;
104	u32  debug_PC_Req_EOP1_cnt:4;
105	u32  debug_PC_Req_SOP0_cnt:4;
106	u32  debug_PC_Req_EOP0_cnt:4;
107
108	/*indx9*/
109	u32 reserved7:32;
110
111	/*indx10*/
112	u32  debug_PD_Req_SOP3_cnt:4;
113	u32  debug_PD_Req_EOP3_cnt:4;
114	u32  debug_PD_Req_SOP2_cnt:4;
115	u32  debug_PD_Req_EOP2_cnt:4;
116	u32  debug_PD_Req_SOP1_cnt:4;
117	u32  debug_PD_Req_EOP1_cnt:4;
118	u32  debug_PD_Req_SOP0_cnt:4;
119	u32  debug_PD_Req_EOP0_cnt:4;
120
121	/*indx11*/
122	u32 reserved8:32;
123
124	/*indx12*/
125	u32  debug_PD_Rsp_SOP3_cnt:4;
126	u32  debug_PD_Rsp_EOP3_cnt:4;
127	u32  debug_PD_Rsp_SOP2_cnt:4;
128	u32  debug_PD_Rsp_EOP2_cnt:4;
129	u32  debug_PD_Rsp_SOP1_cnt:4;
130	u32  debug_PD_Rsp_EOP1_cnt:4;
131	u32  debug_PD_Rsp_SOP0_cnt:4;
132	u32  debug_PD_Rsp_EOP0_cnt:4;
133
134	/*indx13*/
135	u32 reserved9:32;
136
137	/*indx14*/
138	u32  debug_CPLSW_TP_Rx_SOP1_cnt:4;
139	u32  debug_CPLSW_TP_Rx_EOP1_cnt:4;
140	u32  debug_CPLSW_TP_Rx_SOP0_cnt:4;
141	u32  debug_CPLSW_TP_Rx_EOP0_cnt:4;
142	u32  debug_CPLSW_CIM_SOP1_cnt:4;
143	u32  debug_CPLSW_CIM_EOP1_cnt:4;
144	u32  debug_CPLSW_CIM_SOP0_cnt:4;
145	u32  debug_CPLSW_CIM_EOP0_cnt:4;
146
147	/*indx15*/
148	u32 reserved10:32;
149
150	/*indx16*/
151	u32  debug_PD_Req_Rd3_cnt:4;
152	u32  debug_PD_Req_Rd2_cnt:4;
153	u32  debug_PD_Req_Rd1_cnt:4;
154	u32  debug_PD_Req_Rd0_cnt:4;
155	u32  debug_PD_Req_Int3_cnt:4;
156	u32  debug_PD_Req_Int2_cnt:4;
157	u32  debug_PD_Req_Int1_cnt:4;
158	u32  debug_PD_Req_Int0_cnt:4;
159
160};
161
162struct tp_mib_type tp_mib[] = {
163	{"tp_mib_mac_in_err_0", 0x0},
164	{"tp_mib_mac_in_err_1", 0x1},
165	{"tp_mib_mac_in_err_2", 0x2},
166	{"tp_mib_mac_in_err_3", 0x3},
167	{"tp_mib_hdr_in_err_0", 0x4},
168	{"tp_mib_hdr_in_err_1", 0x5},
169	{"tp_mib_hdr_in_err_2", 0x6},
170	{"tp_mib_hdr_in_err_3", 0x7},
171	{"tp_mib_tcp_in_err_0", 0x8},
172	{"tp_mib_tcp_in_err_1", 0x9},
173	{"tp_mib_tcp_in_err_2", 0xa},
174	{"tp_mib_tcp_in_err_3", 0xb},
175	{"tp_mib_tcp_out_rst", 0xc},
176	{"tp_mib_tcp_in_seg_hi", 0x10},
177	{"tp_mib_tcp_in_seg_lo", 0x11},
178	{"tp_mib_tcp_out_seg_hi", 0x12},
179	{"tp_mib_tcp_out_seg_lo", 0x13},
180	{"tp_mib_tcp_rxt_seg_hi", 0x14},
181	{"tp_mib_tcp_rxt_seg_lo", 0x15},
182	{"tp_mib_tnl_cng_drop_0", 0x18},
183	{"tp_mib_tnl_cng_drop_1", 0x19},
184	{"tp_mib_tnl_cng_drop_2", 0x1a},
185	{"tp_mib_tnl_cng_drop_3", 0x1b},
186	{"tp_mib_ofd_chn_drop_0", 0x1c},
187	{"tp_mib_ofd_chn_drop_1", 0x1d},
188	{"tp_mib_ofd_chn_drop_2", 0x1e},
189	{"tp_mib_ofd_chn_drop_3", 0x1f},
190	{"tp_mib_tnl_out_pkt_0", 0x20},
191	{"tp_mib_tnl_out_pkt_1", 0x21},
192	{"tp_mib_tnl_out_pkt_2", 0x22},
193	{"tp_mib_tnl_out_pkt_3", 0x23},
194	{"tp_mib_tnl_in_pkt_0", 0x24},
195	{"tp_mib_tnl_in_pkt_1", 0x25},
196	{"tp_mib_tnl_in_pkt_2", 0x26},
197	{"tp_mib_tnl_in_pkt_3", 0x27},
198	{"tp_mib_tcp_v6in_err_0", 0x28},
199	{"tp_mib_tcp_v6in_err_1", 0x29},
200	{"tp_mib_tcp_v6in_err_2", 0x2a},
201	{"tp_mib_tcp_v6in_err_3", 0x2b},
202	{"tp_mib_tcp_v6out_rst", 0x2c},
203	{"tp_mib_tcp_v6in_seg_hi", 0x30},
204	{"tp_mib_tcp_v6in_seg_lo", 0x31},
205	{"tp_mib_tcp_v6out_seg_hi", 0x32},
206	{"tp_mib_tcp_v6out_seg_lo", 0x33},
207	{"tp_mib_tcp_v6rxt_seg_hi", 0x34},
208	{"tp_mib_tcp_v6rxt_seg_lo", 0x35},
209	{"tp_mib_ofd_arp_drop", 0x36},
210	{"tp_mib_ofd_dfr_drop", 0x37},
211	{"tp_mib_cpl_in_req_0", 0x38},
212	{"tp_mib_cpl_in_req_1", 0x39},
213	{"tp_mib_cpl_in_req_2", 0x3a},
214	{"tp_mib_cpl_in_req_3", 0x3b},
215	{"tp_mib_cpl_out_rsp_0", 0x3c},
216	{"tp_mib_cpl_out_rsp_1", 0x3d},
217	{"tp_mib_cpl_out_rsp_2", 0x3e},
218	{"tp_mib_cpl_out_rsp_3", 0x3f},
219	{"tp_mib_tnl_lpbk_0", 0x40},
220	{"tp_mib_tnl_lpbk_1", 0x41},
221	{"tp_mib_tnl_lpbk_2", 0x42},
222	{"tp_mib_tnl_lpbk_3", 0x43},
223	{"tp_mib_tnl_drop_0", 0x44},
224	{"tp_mib_tnl_drop_1", 0x45},
225	{"tp_mib_tnl_drop_2", 0x46},
226	{"tp_mib_tnl_drop_3", 0x47},
227	{"tp_mib_fcoe_ddp_0", 0x48},
228	{"tp_mib_fcoe_ddp_1", 0x49},
229	{"tp_mib_fcoe_ddp_2", 0x4a},
230	{"tp_mib_fcoe_ddp_3", 0x4b},
231	{"tp_mib_fcoe_drop_0", 0x4c},
232	{"tp_mib_fcoe_drop_1", 0x4d},
233	{"tp_mib_fcoe_drop_2", 0x4e},
234	{"tp_mib_fcoe_drop_3", 0x4f},
235	{"tp_mib_fcoe_byte_0_hi", 0x50},
236	{"tp_mib_fcoe_byte_0_lo", 0x51},
237	{"tp_mib_fcoe_byte_1_hi", 0x52},
238	{"tp_mib_fcoe_byte_1_lo", 0x53},
239	{"tp_mib_fcoe_byte_2_hi", 0x54},
240	{"tp_mib_fcoe_byte_2_lo", 0x55},
241	{"tp_mib_fcoe_byte_3_hi", 0x56},
242	{"tp_mib_fcoe_byte_3_lo", 0x57},
243	{"tp_mib_ofd_vln_drop_0", 0x58},
244	{"tp_mib_ofd_vln_drop_1", 0x59},
245	{"tp_mib_ofd_vln_drop_2", 0x5a},
246	{"tp_mib_ofd_vln_drop_3", 0x5b},
247	{"tp_mib_usm_pkts", 0x5c},
248	{"tp_mib_usm_drop", 0x5d},
249	{"tp_mib_usm_bytes_hi", 0x5e},
250	{"tp_mib_usm_bytes_lo", 0x5f},
251	{"tp_mib_tid_del", 0x60},
252	{"tp_mib_tid_inv", 0x61},
253	{"tp_mib_tid_act", 0x62},
254	{"tp_mib_tid_pas", 0x63},
255	{"tp_mib_rqe_dfr_mod", 0x64},
256	{"tp_mib_rqe_dfr_pkt", 0x65}
257};
258
259static u32 read_sge_debug_data(struct cudbg_init *pdbg_init, u32 *sge_dbg_reg)
260{
261	struct adapter *padap = pdbg_init->adap;
262	u32 value;
263	int i = 0;
264
265	for (i = 0; i <= 15; i++) {
266		t4_write_reg(padap, A_SGE_DEBUG_INDEX, (u32)i);
267		value = t4_read_reg(padap, A_SGE_DEBUG_DATA_LOW);
268		/*printf("LOW	 0x%08x\n", value);*/
269		sge_dbg_reg[(i << 1) | 1] = HTONL_NIBBLE(value);
270		value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH);
271		/*printf("HIGH	 0x%08x\n", value);*/
272		sge_dbg_reg[(i << 1)] = HTONL_NIBBLE(value);
273	}
274	return 0;
275}
276
277static u32 read_tp_mib_data(struct cudbg_init *pdbg_init,
278			    struct tp_mib_data **ppTp_Mib)
279{
280	struct adapter *padap = pdbg_init->adap;
281	u32 i = 0;
282
283	for (i = 0; i < TP_MIB_SIZE; i++) {
284		t4_tp_mib_read(padap, &tp_mib[i].value, 1,
285				  (u32)tp_mib[i].addr, true);
286	}
287	*ppTp_Mib = (struct tp_mib_data *)&tp_mib[0];
288
289	return 0;
290}
291
292static int t5_wtp_data(struct cudbg_init *pdbg_init,
293		       struct cudbg_buffer *dbg_buff,
294		       struct cudbg_error *cudbg_err)
295{
296	struct adapter *padap = pdbg_init->adap;
297	struct sge_debug_reg_data *sge_dbg_reg = NULL;
298	struct cudbg_buffer scratch_buff;
299	struct tp_mib_data *ptp_mib = NULL;
300	struct wtp_data *wtp;
301	u32 Sge_Dbg[32] = {0};
302	u32 value = 0;
303	u32 i = 0;
304	u32 drop = 0;
305	u32 err = 0;
306	u32 offset;
307	int rc = 0;
308
309	rc = get_scratch_buff(dbg_buff, sizeof(struct wtp_data), &scratch_buff);
310
311	if (rc)
312		goto err;
313
314	offset = scratch_buff.offset;
315	wtp = (struct wtp_data *)((char *)scratch_buff.data + offset);
316
317	read_sge_debug_data(pdbg_init, Sge_Dbg);
318	read_tp_mib_data(pdbg_init, &ptp_mib);
319
320	sge_dbg_reg = (struct sge_debug_reg_data *) &Sge_Dbg[0];
321
322	/*#######################################################################*/
323	/*# TX PATH, starting from pcie*/
324	/*#######################################################################*/
325
326	/* Get Reqests of commmands from SGE to PCIE*/
327
328	wtp->sge_pcie_cmd_req.sop[0] =	sge_dbg_reg->debug_PC_Req_SOP0_cnt;
329	wtp->sge_pcie_cmd_req.sop[1] =	sge_dbg_reg->debug_PC_Req_SOP1_cnt;
330
331	wtp->sge_pcie_cmd_req.eop[0] =	sge_dbg_reg->debug_PC_Req_EOP0_cnt;
332	wtp->sge_pcie_cmd_req.eop[1] =	sge_dbg_reg->debug_PC_Req_EOP1_cnt;
333
334	/* Get Reqests of commmands from PCIE to core*/
335	value = t4_read_reg(padap, A_PCIE_CMDR_REQ_CNT);
336
337	wtp->pcie_core_cmd_req.sop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
338	wtp->pcie_core_cmd_req.sop[1] = ((value >> 8) & 0xFF); /*bit 8:15*/
339	/* there is no EOP for this, so we fake it.*/
340	wtp->pcie_core_cmd_req.eop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
341	wtp->pcie_core_cmd_req.eop[1] = ((value >> 8) & 0xFF); /*bit 8:15*/
342
343	/* Get DMA stats*/
344	for (i = 0; i < 4; i++) {
345		value = t4_read_reg(padap, A_PCIE_T5_DMA_STAT3 + (i * 0x10));
346		wtp->pcie_t5_dma_stat3.sop[i] = value & 0xFF;
347		wtp->pcie_t5_dma_stat3.eop[i] = ((value >> 16) & 0xFF);
348	}
349
350	/* Get SGE debug data high index 6*/
351	value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_6);
352	wtp->sge_debug_data_high_index_6.sop[0] = ((value >> 4) & 0x0F);
353	wtp->sge_debug_data_high_index_6.eop[0] = ((value >> 0) & 0x0F);
354	wtp->sge_debug_data_high_index_6.sop[1] = ((value >> 12) & 0x0F);
355	wtp->sge_debug_data_high_index_6.eop[1] = ((value >> 8) & 0x0F);
356	wtp->sge_debug_data_high_index_6.sop[2] = ((value >> 20) & 0x0F);
357	wtp->sge_debug_data_high_index_6.eop[2] = ((value >> 16) & 0x0F);
358	wtp->sge_debug_data_high_index_6.sop[3] = ((value >> 28) & 0x0F);
359	wtp->sge_debug_data_high_index_6.eop[3] = ((value >> 24) & 0x0F);
360
361	/* Get SGE debug data high index 3*/
362	value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_3);
363	wtp->sge_debug_data_high_index_3.sop[0] = ((value >> 4) & 0x0F);
364	wtp->sge_debug_data_high_index_3.eop[0] = ((value >> 0) & 0x0F);
365	wtp->sge_debug_data_high_index_3.sop[1] = ((value >> 12) & 0x0F);
366	wtp->sge_debug_data_high_index_3.eop[1] = ((value >> 8) & 0x0F);
367	wtp->sge_debug_data_high_index_3.sop[2] = ((value >> 20) & 0x0F);
368	wtp->sge_debug_data_high_index_3.eop[2] = ((value >> 16) & 0x0F);
369	wtp->sge_debug_data_high_index_3.sop[3] = ((value >> 28) & 0x0F);
370	wtp->sge_debug_data_high_index_3.eop[3] = ((value >> 24) & 0x0F);
371
372	/* Get ULP SE CNT CHx*/
373	for (i = 0; i < 4; i++) {
374		value = t4_read_reg(padap, A_ULP_TX_SE_CNT_CH0 + (i * 4));
375		wtp->ulp_se_cnt_chx.sop[i] = ((value >> 28) & 0x0F);
376		wtp->ulp_se_cnt_chx.eop[i] = ((value >> 24) & 0x0F);
377	}
378
379	/* Get MAC PORTx PKT COUNT*/
380	for (i = 0; i < 4; i++) {
381		value = t4_read_reg(padap, 0x3081c + ((i * 4) << 12));
382		wtp->mac_portx_pkt_count.sop[i] = ((value >> 24) & 0xFF);
383		wtp->mac_portx_pkt_count.eop[i] = ((value >> 16) & 0xFF);
384		wtp->mac_porrx_pkt_count.sop[i] = ((value >> 8) & 0xFF);
385		wtp->mac_porrx_pkt_count.eop[i] = ((value >> 0) & 0xFF);
386	}
387
388	/* Get mac portx aFramesTransmittedok*/
389	for (i = 0; i < 4; i++) {
390		value = t4_read_reg(padap, 0x30a80 + ((i * 4) << 12));
391		wtp->mac_portx_aframestra_ok.sop[i] = (value & 0xFF);
392		wtp->mac_portx_aframestra_ok.eop[i] = (value & 0xFF);
393	}
394
395	/* Get command respones from core to PCIE*/
396	value = t4_read_reg(padap, A_PCIE_CMDR_RSP_CNT);
397
398	wtp->core_pcie_cmd_rsp.sop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
399	wtp->core_pcie_cmd_rsp.sop[1] = ((value >> 16) & 0xFF); /*bit 16:23*/
400
401	wtp->core_pcie_cmd_rsp.eop[0] = ((value >> 8) & 0xFF); /*bit 8:15*/
402	wtp->core_pcie_cmd_rsp.eop[1] = ((value >> 24) & 0xFF); /*bit 24:31*/
403
404	/*Get command Resposes from PCIE to SGE*/
405	wtp->pcie_sge_cmd_rsp.sop[0] = sge_dbg_reg->debug_PC_Rsp_SOP0_cnt;
406	wtp->pcie_sge_cmd_rsp.sop[1] = sge_dbg_reg->debug_PC_Rsp_SOP1_cnt;
407
408	wtp->pcie_sge_cmd_rsp.eop[0] = sge_dbg_reg->debug_PC_Rsp_EOP0_cnt;
409	wtp->pcie_sge_cmd_rsp.eop[1] = sge_dbg_reg->debug_PC_Rsp_EOP1_cnt;
410
411	/* Get commands sent from SGE to CIM/uP*/
412	wtp->sge_cim.sop[0] = sge_dbg_reg->debug_CIM_SOP0_cnt;
413	wtp->sge_cim.sop[1] = sge_dbg_reg->debug_CIM_SOP1_cnt;
414
415	wtp->sge_cim.eop[0] = sge_dbg_reg->debug_CIM_EOP0_cnt;
416	wtp->sge_cim.eop[1] = sge_dbg_reg->debug_CIM_EOP1_cnt;
417
418	/* Get Reqests of data from PCIE by SGE*/
419	wtp->utx_sge_dma_req.sop[0] = sge_dbg_reg->debug_UD_Rx_SOP0_cnt;
420	wtp->utx_sge_dma_req.sop[1] = sge_dbg_reg->debug_UD_Rx_SOP1_cnt;
421	wtp->utx_sge_dma_req.sop[2] = sge_dbg_reg->debug_UD_Rx_SOP2_cnt;
422	wtp->utx_sge_dma_req.sop[3] = sge_dbg_reg->debug_UD_Rx_SOP3_cnt;
423
424	wtp->utx_sge_dma_req.eop[0] = sge_dbg_reg->debug_UD_Rx_EOP0_cnt;
425	wtp->utx_sge_dma_req.eop[1] = sge_dbg_reg->debug_UD_Rx_EOP1_cnt;
426	wtp->utx_sge_dma_req.eop[2] = sge_dbg_reg->debug_UD_Rx_EOP2_cnt;
427	wtp->utx_sge_dma_req.eop[3] = sge_dbg_reg->debug_UD_Rx_EOP3_cnt;
428
429	/* Get Reqests of data from PCIE by SGE*/
430	wtp->sge_pcie_dma_req.sop[0] = sge_dbg_reg->debug_PD_Req_Rd0_cnt;
431	wtp->sge_pcie_dma_req.sop[1] = sge_dbg_reg->debug_PD_Req_Rd1_cnt;
432	wtp->sge_pcie_dma_req.sop[2] = sge_dbg_reg->debug_PD_Req_Rd2_cnt;
433	wtp->sge_pcie_dma_req.sop[3] = sge_dbg_reg->debug_PD_Req_Rd3_cnt;
434	/*no EOP's, so fake it.*/
435	wtp->sge_pcie_dma_req.eop[0] = sge_dbg_reg->debug_PD_Req_Rd0_cnt;
436	wtp->sge_pcie_dma_req.eop[1] = sge_dbg_reg->debug_PD_Req_Rd1_cnt;
437	wtp->sge_pcie_dma_req.eop[2] = sge_dbg_reg->debug_PD_Req_Rd2_cnt;
438	wtp->sge_pcie_dma_req.eop[3] = sge_dbg_reg->debug_PD_Req_Rd3_cnt;
439
440	/* Get Reqests of data from PCIE to core*/
441	value = t4_read_reg(padap, A_PCIE_DMAR_REQ_CNT);
442
443	wtp->pcie_core_dma_req.sop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
444	wtp->pcie_core_dma_req.sop[1] = ((value >> 8) & 0xFF); /*bit 8:15*/
445	wtp->pcie_core_dma_req.sop[2] = ((value >> 16) & 0xFF); /*bit 16:23*/
446	wtp->pcie_core_dma_req.sop[3] = ((value >> 24) & 0xFF); /*bit 24:31*/
447	/* There is no eop so fake it.*/
448	wtp->pcie_core_dma_req.eop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
449	wtp->pcie_core_dma_req.eop[1] = ((value >> 8) & 0xFF); /*bit 8:15*/
450	wtp->pcie_core_dma_req.eop[2] = ((value >> 16) & 0xFF); /*bit 16:23*/
451	wtp->pcie_core_dma_req.eop[3] = ((value >> 24) & 0xFF); /*bit 24:31*/
452
453	/* Get data responses from core to PCIE*/
454	value = t4_read_reg(padap, A_PCIE_DMAR_RSP_SOP_CNT);
455
456	wtp->core_pcie_dma_rsp.sop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
457	wtp->core_pcie_dma_rsp.sop[1] = ((value >> 8) & 0xFF); /*bit 8:15*/
458	wtp->core_pcie_dma_rsp.sop[2] = ((value >> 16) & 0xFF); /*bit 16:23*/
459	wtp->core_pcie_dma_rsp.sop[3] = ((value >> 24) & 0xFF); /*bit 24:31*/
460
461	value = t4_read_reg(padap, A_PCIE_DMAR_RSP_EOP_CNT);
462
463	wtp->core_pcie_dma_rsp.eop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
464	wtp->core_pcie_dma_rsp.eop[1] = ((value >> 8) & 0xFF); /*bit 8:15*/
465	wtp->core_pcie_dma_rsp.eop[2] = ((value >> 16) & 0xFF); /*bit 16:23*/
466	wtp->core_pcie_dma_rsp.eop[3] = ((value >> 24) & 0xFF); /*bit 24:31*/
467
468	/* Get PCIE_DATA to SGE*/
469	wtp->pcie_sge_dma_rsp.sop[0] = sge_dbg_reg->debug_PD_Rsp_SOP0_cnt;
470	wtp->pcie_sge_dma_rsp.sop[1] = sge_dbg_reg->debug_PD_Rsp_SOP1_cnt;
471	wtp->pcie_sge_dma_rsp.sop[2] = sge_dbg_reg->debug_PD_Rsp_SOP2_cnt;
472	wtp->pcie_sge_dma_rsp.sop[3] = sge_dbg_reg->debug_PD_Rsp_SOP3_cnt;
473
474	wtp->pcie_sge_dma_rsp.eop[0] = sge_dbg_reg->debug_PD_Rsp_EOP0_cnt;
475	wtp->pcie_sge_dma_rsp.eop[1] = sge_dbg_reg->debug_PD_Rsp_EOP1_cnt;
476	wtp->pcie_sge_dma_rsp.eop[2] = sge_dbg_reg->debug_PD_Rsp_EOP2_cnt;
477	wtp->pcie_sge_dma_rsp.eop[3] = sge_dbg_reg->debug_PD_Rsp_EOP3_cnt;
478
479	/*Get SGE to ULP_TX*/
480	wtp->sge_utx.sop[0] = sge_dbg_reg->debug_U_Tx_SOP0_cnt;
481	wtp->sge_utx.sop[1] = sge_dbg_reg->debug_U_Tx_SOP1_cnt;
482	wtp->sge_utx.sop[2] = sge_dbg_reg->debug_U_Tx_SOP2_cnt;
483	wtp->sge_utx.sop[3] = sge_dbg_reg->debug_U_Tx_SOP3_cnt;
484
485	wtp->sge_utx.eop[0] = sge_dbg_reg->debug_U_Tx_EOP0_cnt;
486	wtp->sge_utx.eop[1] = sge_dbg_reg->debug_U_Tx_EOP1_cnt;
487	wtp->sge_utx.eop[2] = sge_dbg_reg->debug_U_Tx_EOP2_cnt;
488	wtp->sge_utx.eop[3] = sge_dbg_reg->debug_U_Tx_EOP3_cnt;
489
490	/* Get ULP_TX to TP*/
491	for (i = 0; i < 4; i++) {
492		value = t4_read_reg(padap, (A_ULP_TX_SE_CNT_CH0 + (i*4)));
493
494		wtp->utx_tp.sop[i] = ((value >> 28) & 0xF); /*bits 28:31*/
495		wtp->utx_tp.eop[i] = ((value >> 24) & 0xF); /*bits 24:27*/
496	}
497
498	/* Get TP_DBG_CSIDE registers*/
499	for (i = 0; i < 4; i++) {
500		t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_CSIDE_RX0 + i),
501			       true);
502
503		wtp->utx_tpcside.sop[i]   = ((value >> 28) & 0xF);/*bits 28:31*/
504		wtp->utx_tpcside.eop[i]   = ((value >> 24) & 0xF);/*bits 24:27*/
505		wtp->tpcside_rxpld.sop[i] = ((value >> 20) & 0xF);/*bits 20:23*/
506		wtp->tpcside_rxpld.eop[i] = ((value >> 16) & 0xF);/*bits 16:19*/
507		wtp->tpcside_rxarb.sop[i] = ((value >> 12) & 0xF);/*bits 12:15*/
508		wtp->tpcside_rxarb.eop[i] = ((value >> 8) & 0xF); /*bits 8:11*/
509		wtp->tpcside_rxcpl.sop[i] = ((value >> 4) & 0xF); /*bits 4:7*/
510		wtp->tpcside_rxcpl.eop[i] = ((value >> 0) & 0xF); /*bits 0:3*/
511	}
512
513	/* TP_DBG_ESIDE*/
514	for (i = 0; i < 4; i++) {
515		t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_ESIDE_PKT0 + i),
516			       true);
517
518		wtp->tpeside_mps.sop[i] = ((value >> 28) & 0xF); /*bits 28:31*/
519		wtp->tpeside_mps.eop[i] = ((value >> 24) & 0xF); /*bits 24:27*/
520		wtp->tpeside_pm.sop[i]	= ((value >> 20) & 0xF); /*bits 20:23*/
521		wtp->tpeside_pm.eop[i]	= ((value >> 16) & 0xF); /*bits 16:19*/
522		wtp->mps_tpeside.sop[i] = ((value >> 12) & 0xF); /*bits 12:15*/
523		wtp->mps_tpeside.eop[i] = ((value >> 8) & 0xF); /*bits 8:11*/
524		wtp->tpeside_pld.sop[i] = ((value >> 4) & 0xF); /*bits 4:7*/
525		wtp->tpeside_pld.eop[i] = ((value >> 0) & 0xF); /*bits 0:3*/
526
527	}
528
529	/*PCIE CMD STAT2*/
530	for (i = 0; i < 3; i++) {
531		value = t4_read_reg(padap, 0x5988 + (i * 0x10));
532		wtp->pcie_cmd_stat2.sop[i] = value & 0xFF;
533		wtp->pcie_cmd_stat2.eop[i] = value & 0xFF;
534	}
535
536	/*PCIE cmd stat3*/
537	for (i = 0; i < 3; i++) {
538		value = t4_read_reg(padap, 0x598c + (i * 0x10));
539		wtp->pcie_cmd_stat3.sop[i] = value & 0xFF;
540		wtp->pcie_cmd_stat3.eop[i] = value & 0xFF;
541	}
542
543	/* ULP_RX input/output*/
544	for (i = 0; i < 2; i++) {
545		value = t4_read_reg(padap, (A_ULP_RX_SE_CNT_CH0 + (i*4)));
546
547		wtp->pmrx_ulprx.sop[i]	  = ((value >> 4) & 0xF); /*bits 4:7*/
548		wtp->pmrx_ulprx.eop[i]	  = ((value >> 0) & 0xF); /*bits 0:3*/
549		wtp->ulprx_tpcside.sop[i] = ((value >> 28) & 0xF);/*bits 28:31*/
550		wtp->ulprx_tpcside.eop[i] = ((value >> 24) & 0xF);/*bits 24:27*/
551	}
552
553	/* Get the MPS input from TP*/
554	drop = 0;
555	for (i = 0; i < 2; i++) {
556		value = t4_read_reg(padap, (A_MPS_TX_SE_CNT_TP01 + (i << 2)));
557		wtp->tp_mps.sop[(i*2)]	   = ((value >> 8) & 0xFF); /*bit 8:15*/
558		wtp->tp_mps.eop[(i*2)]	   = ((value >> 0) & 0xFF); /*bit 0:7*/
559		wtp->tp_mps.sop[(i*2) + 1] = ((value >> 24) & 0xFF);/*bit 24:31
560								    */
561		wtp->tp_mps.eop[(i*2) + 1] = ((value >> 16) & 0xFF);/*bit 16:23
562								    */
563	}
564	drop  = ptp_mib->TP_MIB_OFD_ARP_DROP.value;
565	drop += ptp_mib->TP_MIB_OFD_DFR_DROP.value;
566
567	drop += ptp_mib->TP_MIB_TNL_DROP_0.value;
568	drop += ptp_mib->TP_MIB_TNL_DROP_1.value;
569	drop += ptp_mib->TP_MIB_TNL_DROP_2.value;
570	drop += ptp_mib->TP_MIB_TNL_DROP_3.value;
571
572	wtp->tp_mps.drops = drop;
573
574	/* Get the MPS output to the MAC's*/
575	drop = 0;
576	for (i = 0; i < 2; i++) {
577		value = t4_read_reg(padap, (A_MPS_TX_SE_CNT_MAC01 + (i << 2)));
578		wtp->mps_xgm.sop[(i*2)]     = ((value >> 8) & 0xFF);/*bit 8:15*/
579		wtp->mps_xgm.eop[(i*2)]     = ((value >> 0) & 0xFF);/*bit 0:7*/
580		wtp->mps_xgm.sop[(i*2) + 1] = ((value >> 24) & 0xFF);/*bit 24:31
581								     */
582		wtp->mps_xgm.eop[(i*2) + 1] = ((value >> 16) & 0xFF);/*bit 16:23
583								     */
584	}
585	for (i = 0; i < 4; i++) {
586		value = t4_read_reg(padap,
587				(T5_PORT0_REG(A_MPS_PORT_STAT_TX_PORT_DROP_L) +
588				(i * T5_PORT_STRIDE)));
589		drop += value;
590	}
591	wtp->mps_xgm.drops = (drop & 0xFF);
592
593	/* Get the SOP/EOP counters into and out of MAC. [JHANEL] I think this
594	 * is*/
595	/* clear on read, so you have to read both TX and RX path at same
596	 * time.*/
597	drop = 0;
598	for (i = 0; i < 4; i++) {
599		value = t4_read_reg(padap,
600				(T5_PORT0_REG(A_MAC_PORT_PKT_COUNT) +
601				(i * T5_PORT_STRIDE)));
602
603		wtp->tx_xgm_xgm.sop[i] = ((value >> 24) & 0xFF); /*bit 24:31*/
604		wtp->tx_xgm_xgm.eop[i] = ((value >> 16) & 0xFF); /*bit 16:23*/
605		wtp->rx_xgm_xgm.sop[i] = ((value >> 8) & 0xFF); /*bit 8:15*/
606		wtp->rx_xgm_xgm.eop[i] = ((value >> 0) & 0xFF); /*bit 0:7*/
607	}
608
609	/* Get the MAC's output to the wire*/
610	drop = 0;
611	for (i = 0; i < 4; i++) {
612		value = t4_read_reg(padap,
613				(T5_PORT0_REG(A_MAC_PORT_AFRAMESTRANSMITTEDOK) +
614				(i * T5_PORT_STRIDE)));
615		wtp->xgm_wire.sop[i] = (value);
616		wtp->xgm_wire.eop[i] = (value); /* No EOP for XGMAC, so fake
617						   it.*/
618	}
619
620	/*########################################################################*/
621	/*# RX PATH, starting from wire*/
622	/*########################################################################*/
623
624	/* Add up the wire input to the MAC*/
625	drop = 0;
626	for (i = 0; i < 4; i++) {
627		value = t4_read_reg(padap,
628				(T5_PORT0_REG(A_MAC_PORT_AFRAMESRECEIVEDOK) +
629				(i * T5_PORT_STRIDE)));
630
631		wtp->wire_xgm.sop[i] = (value);
632		wtp->wire_xgm.eop[i] = (value); /* No EOP for XGMAC, so fake
633						   it.*/
634	}
635
636	/* Already read the rx_xgm_xgm when reading TX path.*/
637
638	/* Add up SOP/EOP's on all 8 MPS buffer channels*/
639	drop = 0;
640	for (i = 0; i < 8; i++) {
641		value = t4_read_reg(padap, (A_MPS_RX_SE_CNT_IN0 + (i << 2)));
642
643		wtp->xgm_mps.sop[i] = ((value >> 8) & 0xFF); /*bits 8:15*/
644		wtp->xgm_mps.eop[i] = ((value >> 0) & 0xFF); /*bits 0:7*/
645	}
646	for (i = 0; i < 4; i++) {
647		value = t4_read_reg(padap, (A_MPS_RX_CLS_DROP_CNT0 + (i << 2)));
648		/* typo in JHANEL's code.*/
649		drop += (value & 0xFFFF) + ((value >> 16) & 0xFFFF);
650	}
651	wtp->xgm_mps.cls_drop = drop & 0xFF;
652
653	/* Add up the overflow drops on all 4 ports.*/
654	drop = 0;
655	for (i = 0; i < 4; i++) {
656		value = t4_read_reg(padap,
657				    (A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L +
658				     (i << 3)));
659		drop += value;
660		value = t4_read_reg(padap,
661				    (A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L +
662				     (i << 2)));
663		value = t4_read_reg(padap,
664				    (A_MPS_STAT_RX_BG_0_LB_DROP_FRAME_L +
665				     (i << 3)));
666		drop += value;
667		value = t4_read_reg(padap,
668				    (A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L +
669				     (i << 2)));
670
671		value = t4_read_reg(padap,
672				    (A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L +
673				     (i << 3)));
674		drop += value;
675		value = t4_read_reg(padap,
676				    (A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L +
677				     (i << 3)));
678		value = t4_read_reg(padap,
679				    (A_MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_L +
680				     (i << 3)));
681		drop += value;
682		value = t4_read_reg(padap,
683				    (A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L +
684				     (i << 3)));
685
686		value = t4_read_reg(padap,
687			T5_PORT0_REG(A_MPS_PORT_STAT_LB_PORT_DROP_FRAMES) +
688			(i * T5_PORT_STRIDE));
689		drop += value;
690	}
691	wtp->xgm_mps.drop = (drop & 0xFF);
692
693	/* Add up the MPS errors that should result in dropped packets*/
694	err = 0;
695	for (i = 0; i < 4; i++) {
696
697		value = t4_read_reg(padap,
698			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L) +
699			(i * T5_PORT_STRIDE)));
700		err += value;
701		value = t4_read_reg(padap,
702			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L) +
703			(i * T5_PORT_STRIDE) + 4));
704
705		value = t4_read_reg(padap,
706			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L) +
707			(i * T5_PORT_STRIDE)));
708		err += value;
709		value = t4_read_reg(padap,
710			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L) +
711			(i * T5_PORT_STRIDE) + 4));
712
713		value = t4_read_reg(padap,
714			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L) +
715			(i * T5_PORT_STRIDE)));
716		err += value;
717		value = t4_read_reg(padap,
718			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L) +
719			(i * T5_PORT_STRIDE) + 4));
720
721		value = t4_read_reg(padap,
722			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L) +
723			(i * T5_PORT_STRIDE)));
724		err += value;
725		value = t4_read_reg(padap,
726			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L) +
727			(i * T5_PORT_STRIDE) + 4));
728
729		value = t4_read_reg(padap,
730			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L) +
731			(i * T5_PORT_STRIDE)));
732		err += value;
733		value = t4_read_reg(padap,
734			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L) +
735			(i * T5_PORT_STRIDE) + 4));
736
737		value = t4_read_reg(padap,
738			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_LESS_64B_L) +
739			(i * T5_PORT_STRIDE)));
740		err += value;
741		value = t4_read_reg(padap,
742			(T5_PORT0_REG((A_MPS_PORT_STAT_RX_PORT_LESS_64B_L) +
743			(i * T5_PORT_STRIDE) + 4)));
744	}
745	wtp->xgm_mps.err = (err & 0xFF);
746
747	drop = 0;
748	for (i = 0; i < 2; i++) {
749		value = t4_read_reg(padap, (A_MPS_RX_SE_CNT_OUT01 + (i << 2)));
750
751		wtp->mps_tp.sop[(i*2)]	   = ((value >> 8) & 0xFF); /*bit 8:15*/
752		wtp->mps_tp.eop[(i*2)]	   = ((value >> 0) & 0xFF); /*bit 0:7*/
753		wtp->mps_tp.sop[(i*2) + 1] = ((value >> 24) & 0xFF);/*bit 24:31
754								    */
755		wtp->mps_tp.eop[(i*2) + 1] = ((value >> 16) & 0xFF);/*bit 16:23
756								    */
757	}
758	drop = ptp_mib->TP_MIB_TNL_CNG_DROP_0.value;
759	drop += ptp_mib->TP_MIB_TNL_CNG_DROP_1.value;
760	drop += ptp_mib->TP_MIB_TNL_CNG_DROP_2.value;
761	drop += ptp_mib->TP_MIB_TNL_CNG_DROP_3.value;
762	drop += ptp_mib->TP_MIB_OFD_CHN_DROP_0.value;
763	drop += ptp_mib->TP_MIB_OFD_CHN_DROP_1.value;
764	drop += ptp_mib->TP_MIB_OFD_CHN_DROP_2.value;
765	drop += ptp_mib->TP_MIB_OFD_CHN_DROP_3.value;
766	drop += ptp_mib->TP_MIB_FCOE_DROP_0.value;
767	drop += ptp_mib->TP_MIB_FCOE_DROP_1.value;
768	drop += ptp_mib->TP_MIB_FCOE_DROP_2.value;
769	drop += ptp_mib->TP_MIB_FCOE_DROP_3.value;
770	drop += ptp_mib->TP_MIB_OFD_VLN_DROP_0.value;
771	drop += ptp_mib->TP_MIB_OFD_VLN_DROP_1.value;
772	drop += ptp_mib->TP_MIB_OFD_VLN_DROP_2.value;
773	drop += ptp_mib->TP_MIB_OFD_VLN_DROP_3.value;
774	drop += ptp_mib->TP_MIB_USM_DROP.value;
775
776	wtp->mps_tp.drops = drop;
777
778	/* Get TP_DBG_CSIDE_TX registers*/
779	for (i = 0; i < 4; i++) {
780		t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_CSIDE_RX0 + i),
781			       true);
782
783		wtp->tpcside_csw.sop[i]   = ((value >> 28) & 0xF);/*bits 28:31*/
784		wtp->tpcside_csw.eop[i]   = ((value >> 24) & 0xF);/*bits 24:27*/
785		wtp->tpcside_pm.sop[i]	  = ((value >> 20) & 0xF);/*bits 20:23*/
786		wtp->tpcside_pm.eop[i]	  = ((value >> 16) & 0xF);/*bits 16:19*/
787		wtp->tpcside_uturn.sop[i] = ((value >> 12) & 0xF);/*bits 12:15*/
788		wtp->tpcside_uturn.eop[i] = ((value >> 8) & 0xF); /*bits 8:11*/
789		wtp->tpcside_txcpl.sop[i] = ((value >> 4) & 0xF); /*bits 4:7*/
790		wtp->tpcside_txcpl.eop[i] = ((value >> 0) & 0xF); /*bits 0:3*/
791	}
792
793	/* TP to CPL_SWITCH*/
794	wtp->tp_csw.sop[0] = sge_dbg_reg->debug_CPLSW_TP_Rx_SOP0_cnt;
795	wtp->tp_csw.sop[1] = sge_dbg_reg->debug_CPLSW_TP_Rx_SOP1_cnt;
796
797	wtp->tp_csw.eop[0] = sge_dbg_reg->debug_CPLSW_TP_Rx_EOP0_cnt;
798	wtp->tp_csw.eop[1] = sge_dbg_reg->debug_CPLSW_TP_Rx_EOP1_cnt;
799
800	/* TP/CPL_SWITCH to SGE*/
801	wtp->csw_sge.sop[0] = sge_dbg_reg->debug_T_Rx_SOP0_cnt;
802	wtp->csw_sge.sop[1] = sge_dbg_reg->debug_T_Rx_SOP1_cnt;
803
804	wtp->csw_sge.eop[0] = sge_dbg_reg->debug_T_Rx_EOP0_cnt;
805	wtp->csw_sge.eop[1] = sge_dbg_reg->debug_T_Rx_EOP1_cnt;
806
807	wtp->sge_pcie.sop[0] = sge_dbg_reg->debug_PD_Req_SOP0_cnt;
808	wtp->sge_pcie.sop[1] = sge_dbg_reg->debug_PD_Req_SOP1_cnt;
809	wtp->sge_pcie.sop[2] = sge_dbg_reg->debug_PD_Req_SOP2_cnt;
810	wtp->sge_pcie.sop[3] = sge_dbg_reg->debug_PD_Req_SOP3_cnt;
811
812	wtp->sge_pcie.eop[0] = sge_dbg_reg->debug_PD_Req_EOP0_cnt;
813	wtp->sge_pcie.eop[1] = sge_dbg_reg->debug_PD_Req_EOP1_cnt;
814	wtp->sge_pcie.eop[2] = sge_dbg_reg->debug_PD_Req_EOP2_cnt;
815	wtp->sge_pcie.eop[3] = sge_dbg_reg->debug_PD_Req_EOP3_cnt;
816
817	wtp->sge_pcie_ints.sop[0] = sge_dbg_reg->debug_PD_Req_Int0_cnt;
818	wtp->sge_pcie_ints.sop[1] = sge_dbg_reg->debug_PD_Req_Int1_cnt;
819	wtp->sge_pcie_ints.sop[2] = sge_dbg_reg->debug_PD_Req_Int2_cnt;
820	wtp->sge_pcie_ints.sop[3] = sge_dbg_reg->debug_PD_Req_Int3_cnt;
821	/* NO EOP, so fake it.*/
822	wtp->sge_pcie_ints.eop[0] = sge_dbg_reg->debug_PD_Req_Int0_cnt;
823	wtp->sge_pcie_ints.eop[1] = sge_dbg_reg->debug_PD_Req_Int1_cnt;
824	wtp->sge_pcie_ints.eop[2] = sge_dbg_reg->debug_PD_Req_Int2_cnt;
825	wtp->sge_pcie_ints.eop[3] = sge_dbg_reg->debug_PD_Req_Int3_cnt;
826
827	/*Get PCIE DMA1 STAT2*/
828	for (i = 0; i < 4; i++) {
829		value = t4_read_reg(padap, A_PCIE_T5_DMA_STAT2 + (i * 0x10));
830		wtp->pcie_dma1_stat2.sop[i] = ((value >> 8) & 0x0F);
831		wtp->pcie_dma1_stat2.eop[i] = ((value >> 8) & 0x0F);
832		wtp->pcie_dma1_stat2_core.sop[i] += value & 0x0F;
833		wtp->pcie_dma1_stat2_core.eop[i] += value & 0x0F;
834	}
835
836	/* Get mac porrx aFramesTransmittedok*/
837	for (i = 0; i < 4; i++) {
838		value = t4_read_reg(padap, 0x30a88 + ((i * 4) << 12));
839		wtp->mac_porrx_aframestra_ok.sop[i] = (value & 0xFF);
840		wtp->mac_porrx_aframestra_ok.eop[i] = (value & 0xFF);
841	}
842
843	/*Get SGE debug data high index 7*/
844	value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_7);
845	wtp->sge_debug_data_high_indx7.sop[0] = ((value >> 4) & 0x0F);
846	wtp->sge_debug_data_high_indx7.eop[0] = ((value >> 0) & 0x0F);
847	wtp->sge_debug_data_high_indx7.sop[1] = ((value >> 12) & 0x0F);
848	wtp->sge_debug_data_high_indx7.eop[1] = ((value >> 8) & 0x0F);
849	wtp->sge_debug_data_high_indx7.sop[2] = ((value >> 20) & 0x0F);
850	wtp->sge_debug_data_high_indx7.eop[2] = ((value >> 16) & 0x0F);
851	wtp->sge_debug_data_high_indx7.sop[3] = ((value >> 28) & 0x0F);
852	wtp->sge_debug_data_high_indx7.eop[3] = ((value >> 24) & 0x0F);
853
854	/*Get SGE debug data high index 1*/
855	value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_1);
856	wtp->sge_debug_data_high_indx1.sop[0] = ((value >> 20) & 0x0F);
857	wtp->sge_debug_data_high_indx1.eop[0] = ((value >> 16) & 0x0F);
858	wtp->sge_debug_data_high_indx1.sop[1] = ((value >> 28) & 0x0F);
859	wtp->sge_debug_data_high_indx1.eop[1] = ((value >> 24) & 0x0F);
860
861	/*Get TP debug CSIDE Tx registers*/
862	for (i = 0; i < 2; i++) {
863		t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_CSIDE_TX0 + i),
864			       true);
865
866		wtp->utx_tpcside_tx.sop[i] = ((value >> 28) & 0xF);/*bits 28:31
867								   */
868		wtp->utx_tpcside_tx.eop[i] = ((value >> 24) & 0xF);
869	}
870
871	/*Get SGE debug data high index 9*/
872	value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_9);
873	wtp->sge_debug_data_high_indx9.sop[0] = ((value >> 20) & 0x0F);
874	wtp->sge_debug_data_high_indx9.sop[1] = ((value >> 28) & 0x0F);
875	wtp->sge_debug_data_high_indx9.eop[0] = ((value >> 16) & 0x0F);
876	wtp->sge_debug_data_high_indx9.eop[1] = ((value >> 24) & 0x0F);
877	wtp->sge_work_req_pkt.sop[0] = ((value >> 4) & 0x0F);
878	wtp->sge_work_req_pkt.sop[1] = ((value >> 12) & 0x0F);
879
880	/*Get LE DB response count*/
881	value = t4_read_reg(padap, A_LE_DB_REQ_RSP_CNT);
882	wtp->le_db_rsp_cnt.sop = value & 0xF;
883	wtp->le_db_rsp_cnt.eop = (value >> 16) & 0xF;
884
885	/*Get TP debug Eside PKTx*/
886	for (i = 0; i < 4; i++) {
887		t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_ESIDE_PKT0 + i),
888			       true);
889
890		wtp->tp_dbg_eside_pktx.sop[i] = ((value >> 12) & 0xF);
891		wtp->tp_dbg_eside_pktx.eop[i] = ((value >> 8) & 0xF);
892	}
893
894	/* Get data responses from core to PCIE*/
895	value = t4_read_reg(padap, A_PCIE_DMAW_SOP_CNT);
896
897	wtp->pcie_core_dmaw.sop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
898	wtp->pcie_core_dmaw.sop[1] = ((value >> 8) & 0xFF); /*bit 8:15*/
899	wtp->pcie_core_dmaw.sop[2] = ((value >> 16) & 0xFF); /*bit 16:23*/
900	wtp->pcie_core_dmaw.sop[3] = ((value >> 24) & 0xFF); /*bit 24:31*/
901
902	value = t4_read_reg(padap, A_PCIE_DMAW_EOP_CNT);
903
904	wtp->pcie_core_dmaw.eop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
905	wtp->pcie_core_dmaw.eop[1] = ((value >> 8) & 0xFF); /*bit 8:15*/
906	wtp->pcie_core_dmaw.eop[2] = ((value >> 16) & 0xFF); /*bit 16:23*/
907	wtp->pcie_core_dmaw.eop[3] = ((value >> 24) & 0xFF); /*bit 24:31*/
908
909	value = t4_read_reg(padap, A_PCIE_DMAI_CNT);
910
911	wtp->pcie_core_dmai.sop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
912	wtp->pcie_core_dmai.sop[1] = ((value >> 8) & 0xFF); /*bit 8:15*/
913	wtp->pcie_core_dmai.sop[2] = ((value >> 16) & 0xFF); /*bit 16:23*/
914	wtp->pcie_core_dmai.sop[3] = ((value >> 24) & 0xFF); /*bit 24:31*/
915	/* no eop for interrups, just fake it.*/
916	wtp->pcie_core_dmai.eop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
917	wtp->pcie_core_dmai.eop[1] = ((value >> 8) & 0xFF); /*bit 8:15*/
918	wtp->pcie_core_dmai.eop[2] = ((value >> 16) & 0xFF); /*bit 16:23*/
919	wtp->pcie_core_dmai.eop[3] = ((value >> 24) & 0xFF); /*bit 24:31*/
920
921	rc = write_compression_hdr(&scratch_buff, dbg_buff);
922
923	if (rc)
924		goto err1;
925
926	rc = compress_buff(&scratch_buff, dbg_buff);
927
928err1:
929	release_scratch_buff(&scratch_buff, dbg_buff);
930err:
931	return rc;
932}
933
934static int t6_wtp_data(struct cudbg_init *pdbg_init,
935		       struct cudbg_buffer *dbg_buff,
936		       struct cudbg_error *cudbg_err)
937{
938	struct adapter *padap = pdbg_init->adap;
939	struct sge_debug_reg_data *sge_dbg_reg = NULL;
940	struct cudbg_buffer scratch_buff;
941	struct tp_mib_data *ptp_mib = NULL;
942	struct wtp_data *wtp;
943	u32 Sge_Dbg[32] = {0};
944	u32 value = 0;
945	u32 i = 0;
946	u32 drop = 0;
947	u32 err = 0;
948	u32 offset;
949	int rc = 0;
950
951	rc = get_scratch_buff(dbg_buff, sizeof(struct wtp_data), &scratch_buff);
952
953	if (rc)
954		goto err;
955
956	offset = scratch_buff.offset;
957	wtp = (struct wtp_data *)((char *)scratch_buff.data + offset);
958
959	read_sge_debug_data(pdbg_init, Sge_Dbg);
960	read_tp_mib_data(pdbg_init, &ptp_mib);
961
962	sge_dbg_reg = (struct sge_debug_reg_data *) &Sge_Dbg[0];
963
964	/*# TX PATH*/
965
966	/*PCIE CMD STAT2*/
967	value = t4_read_reg(padap, A_PCIE_T5_CMD_STAT2);
968	wtp->pcie_cmd_stat2.sop[0] = value & 0xFF;
969	wtp->pcie_cmd_stat2.eop[0] = value & 0xFF;
970
971	value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_7);
972	wtp->sge_pcie_cmd_req.sop[0] = ((value >> 20) & 0x0F);
973	wtp->sge_pcie_cmd_req.eop[0] = ((value >> 16) & 0x0F);
974	wtp->sge_pcie_cmd_req.sop[1] = ((value >> 28) & 0x0F);
975	wtp->sge_pcie_cmd_req.eop[1] = ((value >> 24) & 0x0F);
976
977	value = t4_read_reg(padap, A_PCIE_T5_CMD_STAT3);
978	wtp->pcie_cmd_stat3.sop[0] = value & 0xFF;
979	wtp->pcie_cmd_stat3.eop[0] = value & 0xFF;
980
981	/*Get command Resposes from PCIE to SGE*/
982	wtp->pcie_sge_cmd_rsp.sop[0] = sge_dbg_reg->debug_PC_Rsp_SOP0_cnt;
983	wtp->pcie_sge_cmd_rsp.eop[0] = sge_dbg_reg->debug_PC_Rsp_EOP0_cnt;
984	wtp->pcie_sge_cmd_rsp.sop[1] = sge_dbg_reg->debug_PC_Rsp_SOP1_cnt;
985	wtp->pcie_sge_cmd_rsp.eop[1] = sge_dbg_reg->debug_PC_Rsp_EOP0_cnt;
986
987	/* Get commands sent from SGE to CIM/uP*/
988	wtp->sge_cim.sop[0] = sge_dbg_reg->debug_CIM_SOP0_cnt;
989	wtp->sge_cim.sop[1] = sge_dbg_reg->debug_CIM_SOP1_cnt;
990
991	wtp->sge_cim.eop[0] = sge_dbg_reg->debug_CIM_EOP0_cnt;
992	wtp->sge_cim.eop[1] = sge_dbg_reg->debug_CIM_EOP1_cnt;
993
994	/*Get SGE debug data high index 9*/
995	value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_9);
996	wtp->sge_work_req_pkt.sop[0] = ((value >> 4) & 0x0F);
997	wtp->sge_work_req_pkt.eop[0] = ((value >> 0) & 0x0F);
998
999	for (i = 0; i < 2; i++) {
1000		value = t4_read_reg(padap, A_PCIE_T5_DMA_STAT2 + (i * 0x10));
1001		wtp->pcie_dma1_stat2.sop[i] = ((value >> 8) & 0x0F);
1002		wtp->pcie_dma1_stat2.eop[i] = ((value >> 8) & 0x0F);
1003		wtp->pcie_dma1_stat2_core.sop[i] = value & 0x0F;
1004		wtp->pcie_dma1_stat2_core.eop[i] = value & 0x0F;
1005	}
1006
1007	/* Get DMA0 stats3*/
1008	for (i = 0; i < 2; i++) {
1009		value = t4_read_reg(padap, A_PCIE_T5_DMA_STAT3 + (i * 0x10));
1010		wtp->pcie_t5_dma_stat3.sop[i] = value & 0xFF;
1011		wtp->pcie_t5_dma_stat3.eop[i] = ((value >> 16) & 0xFF);
1012	}
1013
1014	/* Get ULP SE CNT CHx*/
1015	for (i = 0; i < 4; i++) {
1016		value = t4_read_reg(padap, A_ULP_TX_SE_CNT_CH0 + (i * 4));
1017		wtp->ulp_se_cnt_chx.sop[i] = ((value >> 28) & 0x0F);
1018		wtp->ulp_se_cnt_chx.eop[i] = ((value >> 24) & 0x0F);
1019	}
1020
1021	/* Get TP_DBG_CSIDE registers*/
1022	for (i = 0; i < 4; i++) {
1023		t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_CSIDE_RX0 + i),
1024			       true);
1025
1026		wtp->utx_tpcside.sop[i]   = ((value >> 28) & 0xF);/*bits 28:31*/
1027		wtp->utx_tpcside.eop[i]   = ((value >> 24) & 0xF);/*bits 24:27*/
1028		wtp->tpcside_rxarb.sop[i] = ((value >> 12) & 0xF);/*bits 12:15*/
1029		wtp->tpcside_rxarb.eop[i] = ((value >> 8) & 0xF); /*bits 8:11*/
1030	}
1031
1032	for (i = 0; i < 4; i++) {
1033		t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_ESIDE_PKT0 + i),
1034			       true);
1035
1036
1037		wtp->tpeside_mps.sop[i] = ((value >> 28) & 0xF); /*bits 28:31*/
1038		wtp->tpeside_mps.eop[i] = ((value >> 24) & 0xF); /*bits 24:27*/
1039	}
1040
1041	for (i = 0; i < 2; i++) {
1042		value = t4_read_reg(padap, (A_MPS_TX_SE_CNT_TP01 + (i << 2)));
1043		wtp->tp_mps.sop[(i*2)]	   = ((value >> 8) & 0xFF); /*bit 8:15*/
1044		wtp->tp_mps.eop[(i*2)]	   = ((value >> 0) & 0xFF); /*bit 0:7*/
1045		wtp->tp_mps.sop[(i*2) + 1] = ((value >> 24) & 0xFF);/*bit 24:31
1046								    */
1047		wtp->tp_mps.eop[(i*2) + 1] = ((value >> 16) & 0xFF);/*bit 16:23
1048								    */
1049	}
1050
1051	for (i = 0; i < 2; i++) {
1052		value = t4_read_reg(padap, (A_MPS_TX_SE_CNT_MAC01 + (i << 2)));
1053		wtp->mps_xgm.sop[(i*2)]     = ((value >> 8) & 0xFF);/*bit 8:15*/
1054		wtp->mps_xgm.eop[(i*2)]     = ((value >> 0) & 0xFF); /*bit 0:7*/
1055		wtp->mps_xgm.sop[(i*2) + 1] = ((value >> 24) & 0xFF);/*bit 24:31
1056								     */
1057		wtp->mps_xgm.eop[(i*2) + 1] = ((value >> 16) & 0xFF);/*bit 16:23
1058								     */
1059	}
1060
1061	/* Get MAC PORTx PKT COUNT*/
1062	for (i = 0; i < 2; i++) {
1063		value = t4_read_reg(padap, 0x3081c + ((i * 4) << 12));
1064		wtp->mac_portx_pkt_count.sop[i] = ((value >> 24) & 0xFF);
1065		wtp->mac_portx_pkt_count.eop[i] = ((value >> 16) & 0xFF);
1066		wtp->mac_porrx_pkt_count.sop[i] = ((value >> 8) & 0xFF);
1067		wtp->mac_porrx_pkt_count.eop[i] = ((value >> 0) & 0xFF);
1068	}
1069
1070	for (i = 0; i < 2; i++) {
1071		value = t4_read_reg(padap, 0x30f20 + ((i * 4) << 12));
1072		wtp->mac_portx_aframestra_ok.sop[i] = value & 0xff;
1073		wtp->mac_portx_aframestra_ok.eop[i] = value & 0xff;
1074	}
1075
1076	/*MAC_PORT_MTIP_1G10G_TX_etherStatsPkts*/
1077
1078	for (i = 0; i < 2; i++) {
1079		value = t4_read_reg(padap, 0x30f60 + ((i * 4) << 12));
1080		wtp->mac_portx_etherstatspkts.sop[i] = value & 0xff;
1081		wtp->mac_portx_etherstatspkts.eop[i] = value & 0xff;
1082	}
1083
1084	/*RX path*/
1085
1086	value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_7);
1087	wtp->sge_debug_data_high_indx7.sop[0] = ((value >> 4) & 0x0F);
1088	wtp->sge_debug_data_high_indx7.eop[0] = ((value >> 0) & 0x0F);
1089	wtp->sge_debug_data_high_indx7.sop[1] = ((value >> 12) & 0x0F);
1090	wtp->sge_debug_data_high_indx7.eop[1] = ((value >> 8) & 0x0F);
1091
1092	/*Get SGE debug data high index 1*/
1093	value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_1);
1094	wtp->sge_debug_data_high_indx1.sop[0] = ((value >> 20) & 0x0F);
1095	wtp->sge_debug_data_high_indx1.eop[0] = ((value >> 16) & 0x0F);
1096	wtp->sge_debug_data_high_indx1.sop[1] = ((value >> 28) & 0x0F);
1097	wtp->sge_debug_data_high_indx1.eop[1] = ((value >> 24) & 0x0F);
1098
1099	value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_9);
1100	wtp->sge_debug_data_high_indx9.sop[0] = ((value >> 20) & 0x0F);
1101	wtp->sge_debug_data_high_indx9.sop[1] = ((value >> 28) & 0x0F);
1102
1103	wtp->sge_debug_data_high_indx9.eop[0] = ((value >> 16) & 0x0F);
1104	wtp->sge_debug_data_high_indx9.eop[1] = ((value >> 24) & 0x0F);
1105
1106	for (i = 0; i < 2; i++) {
1107		t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_CSIDE_TX0 + i),
1108			       true);
1109
1110		wtp->utx_tpcside_tx.sop[i] = ((value >> 28) & 0xF);/*bits 28:31
1111								   */
1112		wtp->utx_tpcside_tx.eop[i]   = ((value >> 24) & 0xF);
1113	}
1114
1115	/*ULP_RX input/output*/
1116	for (i = 0; i < 2; i++) {
1117		value = t4_read_reg(padap, (A_ULP_RX_SE_CNT_CH0 + (i*4)));
1118
1119		wtp->pmrx_ulprx.sop[i]	  = ((value >> 4) & 0xF); /*bits 4:7*/
1120		wtp->pmrx_ulprx.eop[i]	  = ((value >> 0) & 0xF); /*bits 0:3*/
1121		wtp->ulprx_tpcside.sop[i] = ((value >> 28) & 0xF);/*bits 28:31*/
1122		wtp->ulprx_tpcside.eop[i] = ((value >> 24) & 0xF);/*bits 24:27*/
1123	}
1124
1125	/*Get LE DB response count*/
1126	value = t4_read_reg(padap, A_LE_DB_REQ_RSP_CNT);
1127	wtp->le_db_rsp_cnt.sop = value & 0xF;
1128	wtp->le_db_rsp_cnt.eop = (value >> 16) & 0xF;
1129
1130	/*Get TP debug Eside PKTx*/
1131	for (i = 0; i < 4; i++) {
1132		t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_ESIDE_PKT0 + i),
1133			       true);
1134
1135		wtp->tp_dbg_eside_pktx.sop[i] = ((value >> 12) & 0xF);
1136		wtp->tp_dbg_eside_pktx.eop[i] = ((value >> 8) & 0xF);
1137	}
1138
1139	drop = 0;
1140	/*MPS_RX_SE_CNT_OUT01*/
1141	value = t4_read_reg(padap, (A_MPS_RX_SE_CNT_OUT01 + (i << 2)));
1142	wtp->mps_tp.sop[0] = ((value >> 8) & 0xFF); /*bit 8:15*/
1143	wtp->mps_tp.eop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
1144	wtp->mps_tp.sop[1] = ((value >> 24) & 0xFF); /*bit 24:31*/
1145	wtp->mps_tp.eop[1] = ((value >> 16) & 0xFF); /*bit 16:23*/
1146
1147	drop = ptp_mib->TP_MIB_TNL_CNG_DROP_0.value;
1148	drop += ptp_mib->TP_MIB_TNL_CNG_DROP_1.value;
1149	drop += ptp_mib->TP_MIB_OFD_CHN_DROP_0.value;
1150	drop += ptp_mib->TP_MIB_OFD_CHN_DROP_1.value;
1151	drop += ptp_mib->TP_MIB_FCOE_DROP_0.value;
1152	drop += ptp_mib->TP_MIB_FCOE_DROP_1.value;
1153	drop += ptp_mib->TP_MIB_OFD_VLN_DROP_0.value;
1154	drop += ptp_mib->TP_MIB_OFD_VLN_DROP_1.value;
1155	drop += ptp_mib->TP_MIB_USM_DROP.value;
1156
1157	wtp->mps_tp.drops = drop;
1158
1159	drop = 0;
1160	for (i = 0; i < 8; i++) {
1161		value = t4_read_reg(padap, (A_MPS_RX_SE_CNT_IN0 + (i << 2)));
1162
1163		wtp->xgm_mps.sop[i] = ((value >> 8) & 0xFF); /*bits 8:15*/
1164		wtp->xgm_mps.eop[i] = ((value >> 0) & 0xFF); /*bits 0:7*/
1165	}
1166	for (i = 0; i < 2; i++) {
1167		value = t4_read_reg(padap, (A_MPS_RX_CLS_DROP_CNT0 + (i << 2)));
1168		drop += (value & 0xFFFF) + ((value >> 16) & 0xFFFF);
1169	}
1170	wtp->xgm_mps.cls_drop = drop & 0xFF;
1171
1172	for (i = 0; i < 2; i++) {
1173		value = t4_read_reg(padap, 0x30e20 + ((i * 4) << 12));
1174		wtp->mac_porrx_aframestra_ok.sop[i] = value & 0xff;
1175		wtp->mac_porrx_aframestra_ok.eop[i] = value & 0xff;
1176	}
1177
1178	/*MAC_PORT_MTIP_1G10G_RX_etherStatsPkts*/
1179	for (i = 0; i < 2; i++) {
1180		value = t4_read_reg(padap, 0x30e60 + ((i * 4) << 12));
1181		wtp->mac_porrx_etherstatspkts.sop[i] = value & 0xff;
1182		wtp->mac_porrx_etherstatspkts.eop[i] = value & 0xff;
1183	}
1184
1185	wtp->sge_pcie_ints.sop[0] = sge_dbg_reg->debug_PD_Req_Int0_cnt;
1186	wtp->sge_pcie_ints.sop[1] = sge_dbg_reg->debug_PD_Req_Int1_cnt;
1187	wtp->sge_pcie_ints.sop[2] = sge_dbg_reg->debug_PD_Req_Int2_cnt;
1188	wtp->sge_pcie_ints.sop[3] = sge_dbg_reg->debug_PD_Req_Int3_cnt;
1189
1190	/* Add up the overflow drops on all 4 ports.*/
1191	drop = 0;
1192	for (i = 0; i < 2; i++) {
1193		value = t4_read_reg(padap,
1194				    (A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L +
1195				     (i << 3)));
1196		drop += value;
1197		value = t4_read_reg(padap,
1198				    (A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L +
1199				     (i << 2)));
1200		value = t4_read_reg(padap,
1201				    (A_MPS_STAT_RX_BG_0_LB_DROP_FRAME_L +
1202				     (i << 3)));
1203		drop += value;
1204		value = t4_read_reg(padap,
1205				    (A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L +
1206				     (i << 2)));
1207
1208		value = t4_read_reg(padap,
1209				    (A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L +
1210				     (i << 3)));
1211		drop += value;
1212		value = t4_read_reg(padap,
1213				    (A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L +
1214				     (i << 3)));
1215		value = t4_read_reg(padap,
1216				    (A_MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_L +
1217				     (i << 3)));
1218		drop += value;
1219		value = t4_read_reg(padap,
1220				    (A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L +
1221				     (i << 3)));
1222
1223		value = t4_read_reg(padap,
1224			(T5_PORT0_REG(A_MPS_PORT_STAT_LB_PORT_DROP_FRAMES) +
1225			(i * T5_PORT_STRIDE)));
1226		drop += value;
1227	}
1228	wtp->xgm_mps.drop = (drop & 0xFF);
1229
1230	/* Add up the MPS errors that should result in dropped packets*/
1231	err = 0;
1232	for (i = 0; i < 2; i++) {
1233
1234		value = t4_read_reg(padap,
1235			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L) +
1236			(i * T5_PORT_STRIDE)));
1237		err += value;
1238		value = t4_read_reg(padap,
1239			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L) +
1240			(i * T5_PORT_STRIDE) + 4));
1241
1242		value = t4_read_reg(padap,
1243			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L) +
1244			(i * T5_PORT_STRIDE)));
1245		err += value;
1246		value = t4_read_reg(padap,
1247			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L) +
1248			(i * T5_PORT_STRIDE) + 4));
1249
1250		value = t4_read_reg(padap,
1251			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L) +
1252				     (i * T5_PORT_STRIDE)));
1253		err += value;
1254		value = t4_read_reg(padap,
1255			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L) +
1256			(i * T5_PORT_STRIDE) + 4));
1257
1258		value = t4_read_reg(padap,
1259			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L) +
1260			(i * T5_PORT_STRIDE)));
1261		err += value;
1262		value = t4_read_reg(padap,
1263			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L) +
1264			(i * T5_PORT_STRIDE) + 4));
1265
1266		value = t4_read_reg(padap,
1267			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L) +
1268			(i * T5_PORT_STRIDE)));
1269		err += value;
1270		value = t4_read_reg(padap,
1271			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L) +
1272			(i * T5_PORT_STRIDE) + 4));
1273
1274		value = t4_read_reg(padap,
1275			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_LESS_64B_L) +
1276			(i * T5_PORT_STRIDE)));
1277		err += value;
1278		value = t4_read_reg(padap,
1279			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_LESS_64B_L) +
1280			(i * T5_PORT_STRIDE) + 4));
1281	}
1282	wtp->xgm_mps.err = (err & 0xFF);
1283
1284	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1285
1286	if (rc)
1287		goto err1;
1288
1289	rc = compress_buff(&scratch_buff, dbg_buff);
1290
1291err1:
1292	release_scratch_buff(&scratch_buff, dbg_buff);
1293err:
1294	return rc;
1295}
1296
1297int collect_wtp_data(struct cudbg_init *pdbg_init,
1298		     struct cudbg_buffer *dbg_buff,
1299		     struct cudbg_error *cudbg_err)
1300{
1301	struct adapter *padap = pdbg_init->adap;
1302	int rc = -1;
1303
1304	if (is_t5(padap))
1305		rc = t5_wtp_data(pdbg_init, dbg_buff, cudbg_err);
1306	else if (is_t6(padap))
1307		rc = t6_wtp_data(pdbg_init, dbg_buff, cudbg_err);
1308
1309	return rc;
1310}
1311