• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/net/bnx2x/
1
2 #include "bnx2x_cmn.h"
3 #include "bnx2x_stats.h"
4
5/* Statistics */
6
7/****************************************************************************
8* Macros
9****************************************************************************/
10
11/* sum[hi:lo] += add[hi:lo] */
12#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
13	do { \
14		s_lo += a_lo; \
15		s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
16	} while (0)
17
18/* difference = minuend - subtrahend */
19#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
20	do { \
21		if (m_lo < s_lo) { \
22			/* underflow */ \
23			d_hi = m_hi - s_hi; \
24			if (d_hi > 0) { \
25				/* we can 'loan' 1 */ \
26				d_hi--; \
27				d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
28			} else { \
29				/* m_hi <= s_hi */ \
30				d_hi = 0; \
31				d_lo = 0; \
32			} \
33		} else { \
34			/* m_lo >= s_lo */ \
35			if (m_hi < s_hi) { \
36				d_hi = 0; \
37				d_lo = 0; \
38			} else { \
39				/* m_hi >= s_hi */ \
40				d_hi = m_hi - s_hi; \
41				d_lo = m_lo - s_lo; \
42			} \
43		} \
44	} while (0)
45
46#define UPDATE_STAT64(s, t) \
47	do { \
48		DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
49			diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
50		pstats->mac_stx[0].t##_hi = new->s##_hi; \
51		pstats->mac_stx[0].t##_lo = new->s##_lo; \
52		ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
53		       pstats->mac_stx[1].t##_lo, diff.lo); \
54	} while (0)
55
56#define UPDATE_STAT64_NIG(s, t) \
57	do { \
58		DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
59			diff.lo, new->s##_lo, old->s##_lo); \
60		ADD_64(estats->t##_hi, diff.hi, \
61		       estats->t##_lo, diff.lo); \
62	} while (0)
63
64/* sum[hi:lo] += add */
65#define ADD_EXTEND_64(s_hi, s_lo, a) \
66	do { \
67		s_lo += a; \
68		s_hi += (s_lo < a) ? 1 : 0; \
69	} while (0)
70
71#define UPDATE_EXTEND_STAT(s) \
72	do { \
73		ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
74			      pstats->mac_stx[1].s##_lo, \
75			      new->s); \
76	} while (0)
77
78#define UPDATE_EXTEND_TSTAT(s, t) \
79	do { \
80		diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
81		old_tclient->s = tclient->s; \
82		ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
83	} while (0)
84
85#define UPDATE_EXTEND_USTAT(s, t) \
86	do { \
87		diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
88		old_uclient->s = uclient->s; \
89		ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
90	} while (0)
91
92#define UPDATE_EXTEND_XSTAT(s, t) \
93	do { \
94		diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
95		old_xclient->s = xclient->s; \
96		ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
97	} while (0)
98
99/* minuend -= subtrahend */
100#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
101	do { \
102		DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
103	} while (0)
104
105/* minuend[hi:lo] -= subtrahend */
106#define SUB_EXTEND_64(m_hi, m_lo, s) \
107	do { \
108		SUB_64(m_hi, 0, m_lo, s); \
109	} while (0)
110
111#define SUB_EXTEND_USTAT(s, t) \
112	do { \
113		diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
114		SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
115	} while (0)
116
117/*
118 * General service functions
119 */
120
121static inline long bnx2x_hilo(u32 *hiref)
122{
123	u32 lo = *(hiref + 1);
124#if (BITS_PER_LONG == 64)
125	u32 hi = *hiref;
126
127	return HILO_U64(hi, lo);
128#else
129	return lo;
130#endif
131}
132
133/*
134 * Init service functions
135 */
136
137
138static void bnx2x_storm_stats_post(struct bnx2x *bp)
139{
140	if (!bp->stats_pending) {
141		struct eth_query_ramrod_data ramrod_data = {0};
142		int i, rc;
143
144		spin_lock_bh(&bp->stats_lock);
145
146		ramrod_data.drv_counter = bp->stats_counter++;
147		ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
148		for_each_queue(bp, i)
149			ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
150
151		rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
152				   ((u32 *)&ramrod_data)[1],
153				   ((u32 *)&ramrod_data)[0], 0);
154		if (rc == 0) {
155			/* stats ramrod has it's own slot on the spq */
156			bp->spq_left++;
157			bp->stats_pending = 1;
158		}
159
160		spin_unlock_bh(&bp->stats_lock);
161	}
162}
163
164static void bnx2x_hw_stats_post(struct bnx2x *bp)
165{
166	struct dmae_command *dmae = &bp->stats_dmae;
167	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
168
169	*stats_comp = DMAE_COMP_VAL;
170	if (CHIP_REV_IS_SLOW(bp))
171		return;
172
173	/* loader */
174	if (bp->executer_idx) {
175		int loader_idx = PMF_DMAE_C(bp);
176
177		memset(dmae, 0, sizeof(struct dmae_command));
178
179		dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
180				DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
181				DMAE_CMD_DST_RESET |
182#ifdef __BIG_ENDIAN
183				DMAE_CMD_ENDIANITY_B_DW_SWAP |
184#else
185				DMAE_CMD_ENDIANITY_DW_SWAP |
186#endif
187				(BP_PORT(bp) ? DMAE_CMD_PORT_1 :
188					       DMAE_CMD_PORT_0) |
189				(BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
190		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
191		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
192		dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
193				     sizeof(struct dmae_command) *
194				     (loader_idx + 1)) >> 2;
195		dmae->dst_addr_hi = 0;
196		dmae->len = sizeof(struct dmae_command) >> 2;
197		if (CHIP_IS_E1(bp))
198			dmae->len--;
199		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
200		dmae->comp_addr_hi = 0;
201		dmae->comp_val = 1;
202
203		*stats_comp = 0;
204		bnx2x_post_dmae(bp, dmae, loader_idx);
205
206	} else if (bp->func_stx) {
207		*stats_comp = 0;
208		bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
209	}
210}
211
212static int bnx2x_stats_comp(struct bnx2x *bp)
213{
214	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
215	int cnt = 10;
216
217	might_sleep();
218	while (*stats_comp != DMAE_COMP_VAL) {
219		if (!cnt) {
220			BNX2X_ERR("timeout waiting for stats finished\n");
221			break;
222		}
223		cnt--;
224		msleep(1);
225	}
226	return 1;
227}
228
229/*
230 * Statistics service functions
231 */
232
233static void bnx2x_stats_pmf_update(struct bnx2x *bp)
234{
235	struct dmae_command *dmae;
236	u32 opcode;
237	int loader_idx = PMF_DMAE_C(bp);
238	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
239
240	/* sanity */
241	if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
242		BNX2X_ERR("BUG!\n");
243		return;
244	}
245
246	bp->executer_idx = 0;
247
248	opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
249		  DMAE_CMD_C_ENABLE |
250		  DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
251#ifdef __BIG_ENDIAN
252		  DMAE_CMD_ENDIANITY_B_DW_SWAP |
253#else
254		  DMAE_CMD_ENDIANITY_DW_SWAP |
255#endif
256		  (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
257		  (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
258
259	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
260	dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
261	dmae->src_addr_lo = bp->port.port_stx >> 2;
262	dmae->src_addr_hi = 0;
263	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
264	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
265	dmae->len = DMAE_LEN32_RD_MAX;
266	dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
267	dmae->comp_addr_hi = 0;
268	dmae->comp_val = 1;
269
270	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
271	dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
272	dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
273	dmae->src_addr_hi = 0;
274	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
275				   DMAE_LEN32_RD_MAX * 4);
276	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
277				   DMAE_LEN32_RD_MAX * 4);
278	dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
279	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
280	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
281	dmae->comp_val = DMAE_COMP_VAL;
282
283	*stats_comp = 0;
284	bnx2x_hw_stats_post(bp);
285	bnx2x_stats_comp(bp);
286}
287
288static void bnx2x_port_stats_init(struct bnx2x *bp)
289{
290	struct dmae_command *dmae;
291	int port = BP_PORT(bp);
292	int vn = BP_E1HVN(bp);
293	u32 opcode;
294	int loader_idx = PMF_DMAE_C(bp);
295	u32 mac_addr;
296	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
297
298	/* sanity */
299	if (!bp->link_vars.link_up || !bp->port.pmf) {
300		BNX2X_ERR("BUG!\n");
301		return;
302	}
303
304	bp->executer_idx = 0;
305
306	/* MCP */
307	opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
308		  DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
309		  DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
310#ifdef __BIG_ENDIAN
311		  DMAE_CMD_ENDIANITY_B_DW_SWAP |
312#else
313		  DMAE_CMD_ENDIANITY_DW_SWAP |
314#endif
315		  (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
316		  (vn << DMAE_CMD_E1HVN_SHIFT));
317
318	if (bp->port.port_stx) {
319
320		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
321		dmae->opcode = opcode;
322		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
323		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
324		dmae->dst_addr_lo = bp->port.port_stx >> 2;
325		dmae->dst_addr_hi = 0;
326		dmae->len = sizeof(struct host_port_stats) >> 2;
327		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
328		dmae->comp_addr_hi = 0;
329		dmae->comp_val = 1;
330	}
331
332	if (bp->func_stx) {
333
334		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
335		dmae->opcode = opcode;
336		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
337		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
338		dmae->dst_addr_lo = bp->func_stx >> 2;
339		dmae->dst_addr_hi = 0;
340		dmae->len = sizeof(struct host_func_stats) >> 2;
341		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
342		dmae->comp_addr_hi = 0;
343		dmae->comp_val = 1;
344	}
345
346	/* MAC */
347	opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
348		  DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
349		  DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
350#ifdef __BIG_ENDIAN
351		  DMAE_CMD_ENDIANITY_B_DW_SWAP |
352#else
353		  DMAE_CMD_ENDIANITY_DW_SWAP |
354#endif
355		  (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
356		  (vn << DMAE_CMD_E1HVN_SHIFT));
357
358	if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
359
360		mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
361				   NIG_REG_INGRESS_BMAC0_MEM);
362
363		/* BIGMAC_REGISTER_TX_STAT_GTPKT ..
364		   BIGMAC_REGISTER_TX_STAT_GTBYT */
365		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
366		dmae->opcode = opcode;
367		dmae->src_addr_lo = (mac_addr +
368				     BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
369		dmae->src_addr_hi = 0;
370		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
371		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
372		dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
373			     BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
374		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
375		dmae->comp_addr_hi = 0;
376		dmae->comp_val = 1;
377
378		/* BIGMAC_REGISTER_RX_STAT_GR64 ..
379		   BIGMAC_REGISTER_RX_STAT_GRIPJ */
380		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
381		dmae->opcode = opcode;
382		dmae->src_addr_lo = (mac_addr +
383				     BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
384		dmae->src_addr_hi = 0;
385		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
386				offsetof(struct bmac_stats, rx_stat_gr64_lo));
387		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
388				offsetof(struct bmac_stats, rx_stat_gr64_lo));
389		dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
390			     BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
391		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
392		dmae->comp_addr_hi = 0;
393		dmae->comp_val = 1;
394
395	} else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
396
397		mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
398
399		/* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
400		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
401		dmae->opcode = opcode;
402		dmae->src_addr_lo = (mac_addr +
403				     EMAC_REG_EMAC_RX_STAT_AC) >> 2;
404		dmae->src_addr_hi = 0;
405		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
406		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
407		dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
408		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
409		dmae->comp_addr_hi = 0;
410		dmae->comp_val = 1;
411
412		/* EMAC_REG_EMAC_RX_STAT_AC_28 */
413		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
414		dmae->opcode = opcode;
415		dmae->src_addr_lo = (mac_addr +
416				     EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
417		dmae->src_addr_hi = 0;
418		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
419		     offsetof(struct emac_stats, rx_stat_falsecarriererrors));
420		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
421		     offsetof(struct emac_stats, rx_stat_falsecarriererrors));
422		dmae->len = 1;
423		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
424		dmae->comp_addr_hi = 0;
425		dmae->comp_val = 1;
426
427		/* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
428		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
429		dmae->opcode = opcode;
430		dmae->src_addr_lo = (mac_addr +
431				     EMAC_REG_EMAC_TX_STAT_AC) >> 2;
432		dmae->src_addr_hi = 0;
433		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
434			offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
435		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
436			offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
437		dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
438		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
439		dmae->comp_addr_hi = 0;
440		dmae->comp_val = 1;
441	}
442
443	/* NIG */
444	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
445	dmae->opcode = opcode;
446	dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
447				    NIG_REG_STAT0_BRB_DISCARD) >> 2;
448	dmae->src_addr_hi = 0;
449	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
450	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
451	dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
452	dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
453	dmae->comp_addr_hi = 0;
454	dmae->comp_val = 1;
455
456	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
457	dmae->opcode = opcode;
458	dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
459				    NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
460	dmae->src_addr_hi = 0;
461	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
462			offsetof(struct nig_stats, egress_mac_pkt0_lo));
463	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
464			offsetof(struct nig_stats, egress_mac_pkt0_lo));
465	dmae->len = (2*sizeof(u32)) >> 2;
466	dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
467	dmae->comp_addr_hi = 0;
468	dmae->comp_val = 1;
469
470	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
471	dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
472			DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
473			DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
474#ifdef __BIG_ENDIAN
475			DMAE_CMD_ENDIANITY_B_DW_SWAP |
476#else
477			DMAE_CMD_ENDIANITY_DW_SWAP |
478#endif
479			(port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
480			(vn << DMAE_CMD_E1HVN_SHIFT));
481	dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
482				    NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
483	dmae->src_addr_hi = 0;
484	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
485			offsetof(struct nig_stats, egress_mac_pkt1_lo));
486	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
487			offsetof(struct nig_stats, egress_mac_pkt1_lo));
488	dmae->len = (2*sizeof(u32)) >> 2;
489	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
490	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
491	dmae->comp_val = DMAE_COMP_VAL;
492
493	*stats_comp = 0;
494}
495
496static void bnx2x_func_stats_init(struct bnx2x *bp)
497{
498	struct dmae_command *dmae = &bp->stats_dmae;
499	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
500
501	/* sanity */
502	if (!bp->func_stx) {
503		BNX2X_ERR("BUG!\n");
504		return;
505	}
506
507	bp->executer_idx = 0;
508	memset(dmae, 0, sizeof(struct dmae_command));
509
510	dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
511			DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
512			DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
513#ifdef __BIG_ENDIAN
514			DMAE_CMD_ENDIANITY_B_DW_SWAP |
515#else
516			DMAE_CMD_ENDIANITY_DW_SWAP |
517#endif
518			(BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
519			(BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
520	dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
521	dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
522	dmae->dst_addr_lo = bp->func_stx >> 2;
523	dmae->dst_addr_hi = 0;
524	dmae->len = sizeof(struct host_func_stats) >> 2;
525	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
526	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
527	dmae->comp_val = DMAE_COMP_VAL;
528
529	*stats_comp = 0;
530}
531
532static void bnx2x_stats_start(struct bnx2x *bp)
533{
534	if (bp->port.pmf)
535		bnx2x_port_stats_init(bp);
536
537	else if (bp->func_stx)
538		bnx2x_func_stats_init(bp);
539
540	bnx2x_hw_stats_post(bp);
541	bnx2x_storm_stats_post(bp);
542}
543
544static void bnx2x_stats_pmf_start(struct bnx2x *bp)
545{
546	bnx2x_stats_comp(bp);
547	bnx2x_stats_pmf_update(bp);
548	bnx2x_stats_start(bp);
549}
550
551static void bnx2x_stats_restart(struct bnx2x *bp)
552{
553	bnx2x_stats_comp(bp);
554	bnx2x_stats_start(bp);
555}
556
557static void bnx2x_bmac_stats_update(struct bnx2x *bp)
558{
559	struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
560	struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
561	struct bnx2x_eth_stats *estats = &bp->eth_stats;
562	struct {
563		u32 lo;
564		u32 hi;
565	} diff;
566
567	UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
568	UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
569	UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
570	UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
571	UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
572	UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
573	UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
574	UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
575	UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
576	UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
577	UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
578	UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
579	UPDATE_STAT64(tx_stat_gt127,
580				tx_stat_etherstatspkts65octetsto127octets);
581	UPDATE_STAT64(tx_stat_gt255,
582				tx_stat_etherstatspkts128octetsto255octets);
583	UPDATE_STAT64(tx_stat_gt511,
584				tx_stat_etherstatspkts256octetsto511octets);
585	UPDATE_STAT64(tx_stat_gt1023,
586				tx_stat_etherstatspkts512octetsto1023octets);
587	UPDATE_STAT64(tx_stat_gt1518,
588				tx_stat_etherstatspkts1024octetsto1522octets);
589	UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
590	UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
591	UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
592	UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
593	UPDATE_STAT64(tx_stat_gterr,
594				tx_stat_dot3statsinternalmactransmiterrors);
595	UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
596
597	estats->pause_frames_received_hi =
598				pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
599	estats->pause_frames_received_lo =
600				pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
601
602	estats->pause_frames_sent_hi =
603				pstats->mac_stx[1].tx_stat_outxoffsent_hi;
604	estats->pause_frames_sent_lo =
605				pstats->mac_stx[1].tx_stat_outxoffsent_lo;
606}
607
608static void bnx2x_emac_stats_update(struct bnx2x *bp)
609{
610	struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
611	struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
612	struct bnx2x_eth_stats *estats = &bp->eth_stats;
613
614	UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
615	UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
616	UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
617	UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
618	UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
619	UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
620	UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
621	UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
622	UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
623	UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
624	UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
625	UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
626	UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
627	UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
628	UPDATE_EXTEND_STAT(tx_stat_outxonsent);
629	UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
630	UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
631	UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
632	UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
633	UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
634	UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
635	UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
636	UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
637	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
638	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
639	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
640	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
641	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
642	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
643	UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
644	UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
645
646	estats->pause_frames_received_hi =
647			pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
648	estats->pause_frames_received_lo =
649			pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
650	ADD_64(estats->pause_frames_received_hi,
651	       pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
652	       estats->pause_frames_received_lo,
653	       pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
654
655	estats->pause_frames_sent_hi =
656			pstats->mac_stx[1].tx_stat_outxonsent_hi;
657	estats->pause_frames_sent_lo =
658			pstats->mac_stx[1].tx_stat_outxonsent_lo;
659	ADD_64(estats->pause_frames_sent_hi,
660	       pstats->mac_stx[1].tx_stat_outxoffsent_hi,
661	       estats->pause_frames_sent_lo,
662	       pstats->mac_stx[1].tx_stat_outxoffsent_lo);
663}
664
665static int bnx2x_hw_stats_update(struct bnx2x *bp)
666{
667	struct nig_stats *new = bnx2x_sp(bp, nig_stats);
668	struct nig_stats *old = &(bp->port.old_nig_stats);
669	struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
670	struct bnx2x_eth_stats *estats = &bp->eth_stats;
671	struct {
672		u32 lo;
673		u32 hi;
674	} diff;
675
676	if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
677		bnx2x_bmac_stats_update(bp);
678
679	else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
680		bnx2x_emac_stats_update(bp);
681
682	else { /* unreached */
683		BNX2X_ERR("stats updated by DMAE but no MAC active\n");
684		return -1;
685	}
686
687	ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
688		      new->brb_discard - old->brb_discard);
689	ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
690		      new->brb_truncate - old->brb_truncate);
691
692	UPDATE_STAT64_NIG(egress_mac_pkt0,
693					etherstatspkts1024octetsto1522octets);
694	UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
695
696	memcpy(old, new, sizeof(struct nig_stats));
697
698	memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
699	       sizeof(struct mac_stx));
700	estats->brb_drop_hi = pstats->brb_drop_hi;
701	estats->brb_drop_lo = pstats->brb_drop_lo;
702
703	pstats->host_port_stats_start = ++pstats->host_port_stats_end;
704
705	if (!BP_NOMCP(bp)) {
706		u32 nig_timer_max =
707			SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
708		if (nig_timer_max != estats->nig_timer_max) {
709			estats->nig_timer_max = nig_timer_max;
710			BNX2X_ERR("NIG timer max (%u)\n",
711				  estats->nig_timer_max);
712		}
713	}
714
715	return 0;
716}
717
718static int bnx2x_storm_stats_update(struct bnx2x *bp)
719{
720	struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
721	struct tstorm_per_port_stats *tport =
722					&stats->tstorm_common.port_statistics;
723	struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
724	struct bnx2x_eth_stats *estats = &bp->eth_stats;
725	int i;
726	u16 cur_stats_counter;
727
728	/* Make sure we use the value of the counter
729	 * used for sending the last stats ramrod.
730	 */
731	spin_lock_bh(&bp->stats_lock);
732	cur_stats_counter = bp->stats_counter - 1;
733	spin_unlock_bh(&bp->stats_lock);
734
735	memcpy(&(fstats->total_bytes_received_hi),
736	       &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
737	       sizeof(struct host_func_stats) - 2*sizeof(u32));
738	estats->error_bytes_received_hi = 0;
739	estats->error_bytes_received_lo = 0;
740	estats->etherstatsoverrsizepkts_hi = 0;
741	estats->etherstatsoverrsizepkts_lo = 0;
742	estats->no_buff_discard_hi = 0;
743	estats->no_buff_discard_lo = 0;
744
745	for_each_queue(bp, i) {
746		struct bnx2x_fastpath *fp = &bp->fp[i];
747		int cl_id = fp->cl_id;
748		struct tstorm_per_client_stats *tclient =
749				&stats->tstorm_common.client_statistics[cl_id];
750		struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
751		struct ustorm_per_client_stats *uclient =
752				&stats->ustorm_common.client_statistics[cl_id];
753		struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
754		struct xstorm_per_client_stats *xclient =
755				&stats->xstorm_common.client_statistics[cl_id];
756		struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
757		struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
758		u32 diff;
759
760		/* are storm stats valid? */
761		if (le16_to_cpu(xclient->stats_counter) != cur_stats_counter) {
762			DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
763			   "  xstorm counter (0x%x) != stats_counter (0x%x)\n",
764			   i, xclient->stats_counter, cur_stats_counter + 1);
765			return -1;
766		}
767		if (le16_to_cpu(tclient->stats_counter) != cur_stats_counter) {
768			DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
769			   "  tstorm counter (0x%x) != stats_counter (0x%x)\n",
770			   i, tclient->stats_counter, cur_stats_counter + 1);
771			return -2;
772		}
773		if (le16_to_cpu(uclient->stats_counter) != cur_stats_counter) {
774			DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
775			   "  ustorm counter (0x%x) != stats_counter (0x%x)\n",
776			   i, uclient->stats_counter, cur_stats_counter + 1);
777			return -4;
778		}
779
780		qstats->total_bytes_received_hi =
781			le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
782		qstats->total_bytes_received_lo =
783			le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
784
785		ADD_64(qstats->total_bytes_received_hi,
786		       le32_to_cpu(tclient->rcv_multicast_bytes.hi),
787		       qstats->total_bytes_received_lo,
788		       le32_to_cpu(tclient->rcv_multicast_bytes.lo));
789
790		ADD_64(qstats->total_bytes_received_hi,
791		       le32_to_cpu(tclient->rcv_unicast_bytes.hi),
792		       qstats->total_bytes_received_lo,
793		       le32_to_cpu(tclient->rcv_unicast_bytes.lo));
794
795		SUB_64(qstats->total_bytes_received_hi,
796		       le32_to_cpu(uclient->bcast_no_buff_bytes.hi),
797		       qstats->total_bytes_received_lo,
798		       le32_to_cpu(uclient->bcast_no_buff_bytes.lo));
799
800		SUB_64(qstats->total_bytes_received_hi,
801		       le32_to_cpu(uclient->mcast_no_buff_bytes.hi),
802		       qstats->total_bytes_received_lo,
803		       le32_to_cpu(uclient->mcast_no_buff_bytes.lo));
804
805		SUB_64(qstats->total_bytes_received_hi,
806		       le32_to_cpu(uclient->ucast_no_buff_bytes.hi),
807		       qstats->total_bytes_received_lo,
808		       le32_to_cpu(uclient->ucast_no_buff_bytes.lo));
809
810		qstats->valid_bytes_received_hi =
811					qstats->total_bytes_received_hi;
812		qstats->valid_bytes_received_lo =
813					qstats->total_bytes_received_lo;
814
815		qstats->error_bytes_received_hi =
816				le32_to_cpu(tclient->rcv_error_bytes.hi);
817		qstats->error_bytes_received_lo =
818				le32_to_cpu(tclient->rcv_error_bytes.lo);
819
820		ADD_64(qstats->total_bytes_received_hi,
821		       qstats->error_bytes_received_hi,
822		       qstats->total_bytes_received_lo,
823		       qstats->error_bytes_received_lo);
824
825		UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
826					total_unicast_packets_received);
827		UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
828					total_multicast_packets_received);
829		UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
830					total_broadcast_packets_received);
831		UPDATE_EXTEND_TSTAT(packets_too_big_discard,
832					etherstatsoverrsizepkts);
833		UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
834
835		SUB_EXTEND_USTAT(ucast_no_buff_pkts,
836					total_unicast_packets_received);
837		SUB_EXTEND_USTAT(mcast_no_buff_pkts,
838					total_multicast_packets_received);
839		SUB_EXTEND_USTAT(bcast_no_buff_pkts,
840					total_broadcast_packets_received);
841		UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
842		UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
843		UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
844
845		qstats->total_bytes_transmitted_hi =
846				le32_to_cpu(xclient->unicast_bytes_sent.hi);
847		qstats->total_bytes_transmitted_lo =
848				le32_to_cpu(xclient->unicast_bytes_sent.lo);
849
850		ADD_64(qstats->total_bytes_transmitted_hi,
851		       le32_to_cpu(xclient->multicast_bytes_sent.hi),
852		       qstats->total_bytes_transmitted_lo,
853		       le32_to_cpu(xclient->multicast_bytes_sent.lo));
854
855		ADD_64(qstats->total_bytes_transmitted_hi,
856		       le32_to_cpu(xclient->broadcast_bytes_sent.hi),
857		       qstats->total_bytes_transmitted_lo,
858		       le32_to_cpu(xclient->broadcast_bytes_sent.lo));
859
860		UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
861					total_unicast_packets_transmitted);
862		UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
863					total_multicast_packets_transmitted);
864		UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
865					total_broadcast_packets_transmitted);
866
867		old_tclient->checksum_discard = tclient->checksum_discard;
868		old_tclient->ttl0_discard = tclient->ttl0_discard;
869
870		ADD_64(fstats->total_bytes_received_hi,
871		       qstats->total_bytes_received_hi,
872		       fstats->total_bytes_received_lo,
873		       qstats->total_bytes_received_lo);
874		ADD_64(fstats->total_bytes_transmitted_hi,
875		       qstats->total_bytes_transmitted_hi,
876		       fstats->total_bytes_transmitted_lo,
877		       qstats->total_bytes_transmitted_lo);
878		ADD_64(fstats->total_unicast_packets_received_hi,
879		       qstats->total_unicast_packets_received_hi,
880		       fstats->total_unicast_packets_received_lo,
881		       qstats->total_unicast_packets_received_lo);
882		ADD_64(fstats->total_multicast_packets_received_hi,
883		       qstats->total_multicast_packets_received_hi,
884		       fstats->total_multicast_packets_received_lo,
885		       qstats->total_multicast_packets_received_lo);
886		ADD_64(fstats->total_broadcast_packets_received_hi,
887		       qstats->total_broadcast_packets_received_hi,
888		       fstats->total_broadcast_packets_received_lo,
889		       qstats->total_broadcast_packets_received_lo);
890		ADD_64(fstats->total_unicast_packets_transmitted_hi,
891		       qstats->total_unicast_packets_transmitted_hi,
892		       fstats->total_unicast_packets_transmitted_lo,
893		       qstats->total_unicast_packets_transmitted_lo);
894		ADD_64(fstats->total_multicast_packets_transmitted_hi,
895		       qstats->total_multicast_packets_transmitted_hi,
896		       fstats->total_multicast_packets_transmitted_lo,
897		       qstats->total_multicast_packets_transmitted_lo);
898		ADD_64(fstats->total_broadcast_packets_transmitted_hi,
899		       qstats->total_broadcast_packets_transmitted_hi,
900		       fstats->total_broadcast_packets_transmitted_lo,
901		       qstats->total_broadcast_packets_transmitted_lo);
902		ADD_64(fstats->valid_bytes_received_hi,
903		       qstats->valid_bytes_received_hi,
904		       fstats->valid_bytes_received_lo,
905		       qstats->valid_bytes_received_lo);
906
907		ADD_64(estats->error_bytes_received_hi,
908		       qstats->error_bytes_received_hi,
909		       estats->error_bytes_received_lo,
910		       qstats->error_bytes_received_lo);
911		ADD_64(estats->etherstatsoverrsizepkts_hi,
912		       qstats->etherstatsoverrsizepkts_hi,
913		       estats->etherstatsoverrsizepkts_lo,
914		       qstats->etherstatsoverrsizepkts_lo);
915		ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
916		       estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
917	}
918
919	ADD_64(fstats->total_bytes_received_hi,
920	       estats->rx_stat_ifhcinbadoctets_hi,
921	       fstats->total_bytes_received_lo,
922	       estats->rx_stat_ifhcinbadoctets_lo);
923
924	memcpy(estats, &(fstats->total_bytes_received_hi),
925	       sizeof(struct host_func_stats) - 2*sizeof(u32));
926
927	ADD_64(estats->etherstatsoverrsizepkts_hi,
928	       estats->rx_stat_dot3statsframestoolong_hi,
929	       estats->etherstatsoverrsizepkts_lo,
930	       estats->rx_stat_dot3statsframestoolong_lo);
931	ADD_64(estats->error_bytes_received_hi,
932	       estats->rx_stat_ifhcinbadoctets_hi,
933	       estats->error_bytes_received_lo,
934	       estats->rx_stat_ifhcinbadoctets_lo);
935
936	if (bp->port.pmf) {
937		estats->mac_filter_discard =
938				le32_to_cpu(tport->mac_filter_discard);
939		estats->xxoverflow_discard =
940				le32_to_cpu(tport->xxoverflow_discard);
941		estats->brb_truncate_discard =
942				le32_to_cpu(tport->brb_truncate_discard);
943		estats->mac_discard = le32_to_cpu(tport->mac_discard);
944	}
945
946	fstats->host_func_stats_start = ++fstats->host_func_stats_end;
947
948	bp->stats_pending = 0;
949
950	return 0;
951}
952
953static void bnx2x_net_stats_update(struct bnx2x *bp)
954{
955	struct bnx2x_eth_stats *estats = &bp->eth_stats;
956	struct net_device_stats *nstats = &bp->dev->stats;
957	int i;
958
959	nstats->rx_packets =
960		bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
961		bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
962		bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
963
964	nstats->tx_packets =
965		bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
966		bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
967		bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
968
969	nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
970
971	nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
972
973	nstats->rx_dropped = estats->mac_discard;
974	for_each_queue(bp, i)
975		nstats->rx_dropped +=
976			le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
977
978	nstats->tx_dropped = 0;
979
980	nstats->multicast =
981		bnx2x_hilo(&estats->total_multicast_packets_received_hi);
982
983	nstats->collisions =
984		bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
985
986	nstats->rx_length_errors =
987		bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
988		bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
989	nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
990				 bnx2x_hilo(&estats->brb_truncate_hi);
991	nstats->rx_crc_errors =
992		bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
993	nstats->rx_frame_errors =
994		bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
995	nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
996	nstats->rx_missed_errors = estats->xxoverflow_discard;
997
998	nstats->rx_errors = nstats->rx_length_errors +
999			    nstats->rx_over_errors +
1000			    nstats->rx_crc_errors +
1001			    nstats->rx_frame_errors +
1002			    nstats->rx_fifo_errors +
1003			    nstats->rx_missed_errors;
1004
1005	nstats->tx_aborted_errors =
1006		bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
1007		bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
1008	nstats->tx_carrier_errors =
1009		bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
1010	nstats->tx_fifo_errors = 0;
1011	nstats->tx_heartbeat_errors = 0;
1012	nstats->tx_window_errors = 0;
1013
1014	nstats->tx_errors = nstats->tx_aborted_errors +
1015			    nstats->tx_carrier_errors +
1016	    bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
1017}
1018
1019static void bnx2x_drv_stats_update(struct bnx2x *bp)
1020{
1021	struct bnx2x_eth_stats *estats = &bp->eth_stats;
1022	int i;
1023
1024	estats->driver_xoff = 0;
1025	estats->rx_err_discard_pkt = 0;
1026	estats->rx_skb_alloc_failed = 0;
1027	estats->hw_csum_err = 0;
1028	for_each_queue(bp, i) {
1029		struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
1030
1031		estats->driver_xoff += qstats->driver_xoff;
1032		estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
1033		estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
1034		estats->hw_csum_err += qstats->hw_csum_err;
1035	}
1036}
1037
1038static void bnx2x_stats_update(struct bnx2x *bp)
1039{
1040	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1041
1042	if (*stats_comp != DMAE_COMP_VAL)
1043		return;
1044
1045	if (bp->port.pmf)
1046		bnx2x_hw_stats_update(bp);
1047
1048	if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
1049		BNX2X_ERR("storm stats were not updated for 3 times\n");
1050		bnx2x_panic();
1051		return;
1052	}
1053
1054	bnx2x_net_stats_update(bp);
1055	bnx2x_drv_stats_update(bp);
1056
1057	if (netif_msg_timer(bp)) {
1058		struct bnx2x_eth_stats *estats = &bp->eth_stats;
1059		int i;
1060
1061		printk(KERN_DEBUG "%s: brb drops %u  brb truncate %u\n",
1062		       bp->dev->name,
1063		       estats->brb_drop_lo, estats->brb_truncate_lo);
1064
1065		for_each_queue(bp, i) {
1066			struct bnx2x_fastpath *fp = &bp->fp[i];
1067			struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
1068
1069			printk(KERN_DEBUG "%s: rx usage(%4u)  *rx_cons_sb(%u)"
1070					  "  rx pkt(%lu)  rx calls(%lu %lu)\n",
1071			       fp->name, (le16_to_cpu(*fp->rx_cons_sb) -
1072			       fp->rx_comp_cons),
1073			       le16_to_cpu(*fp->rx_cons_sb),
1074			       bnx2x_hilo(&qstats->
1075					  total_unicast_packets_received_hi),
1076			       fp->rx_calls, fp->rx_pkt);
1077		}
1078
1079		for_each_queue(bp, i) {
1080			struct bnx2x_fastpath *fp = &bp->fp[i];
1081			struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
1082			struct netdev_queue *txq =
1083				netdev_get_tx_queue(bp->dev, i);
1084
1085			printk(KERN_DEBUG "%s: tx avail(%4u)  *tx_cons_sb(%u)"
1086					  "  tx pkt(%lu) tx calls (%lu)"
1087					  "  %s (Xoff events %u)\n",
1088			       fp->name, bnx2x_tx_avail(fp),
1089			       le16_to_cpu(*fp->tx_cons_sb),
1090			       bnx2x_hilo(&qstats->
1091					  total_unicast_packets_transmitted_hi),
1092			       fp->tx_pkt,
1093			       (netif_tx_queue_stopped(txq) ? "Xoff" : "Xon"),
1094			       qstats->driver_xoff);
1095		}
1096	}
1097
1098	bnx2x_hw_stats_post(bp);
1099	bnx2x_storm_stats_post(bp);
1100}
1101
1102static void bnx2x_port_stats_stop(struct bnx2x *bp)
1103{
1104	struct dmae_command *dmae;
1105	u32 opcode;
1106	int loader_idx = PMF_DMAE_C(bp);
1107	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1108
1109	bp->executer_idx = 0;
1110
1111	opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
1112		  DMAE_CMD_C_ENABLE |
1113		  DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
1114#ifdef __BIG_ENDIAN
1115		  DMAE_CMD_ENDIANITY_B_DW_SWAP |
1116#else
1117		  DMAE_CMD_ENDIANITY_DW_SWAP |
1118#endif
1119		  (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
1120		  (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
1121
1122	if (bp->port.port_stx) {
1123
1124		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
1125		if (bp->func_stx)
1126			dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
1127		else
1128			dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
1129		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
1130		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
1131		dmae->dst_addr_lo = bp->port.port_stx >> 2;
1132		dmae->dst_addr_hi = 0;
1133		dmae->len = sizeof(struct host_port_stats) >> 2;
1134		if (bp->func_stx) {
1135			dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
1136			dmae->comp_addr_hi = 0;
1137			dmae->comp_val = 1;
1138		} else {
1139			dmae->comp_addr_lo =
1140				U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1141			dmae->comp_addr_hi =
1142				U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1143			dmae->comp_val = DMAE_COMP_VAL;
1144
1145			*stats_comp = 0;
1146		}
1147	}
1148
1149	if (bp->func_stx) {
1150
1151		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
1152		dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
1153		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
1154		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
1155		dmae->dst_addr_lo = bp->func_stx >> 2;
1156		dmae->dst_addr_hi = 0;
1157		dmae->len = sizeof(struct host_func_stats) >> 2;
1158		dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1159		dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1160		dmae->comp_val = DMAE_COMP_VAL;
1161
1162		*stats_comp = 0;
1163	}
1164}
1165
1166static void bnx2x_stats_stop(struct bnx2x *bp)
1167{
1168	int update = 0;
1169
1170	bnx2x_stats_comp(bp);
1171
1172	if (bp->port.pmf)
1173		update = (bnx2x_hw_stats_update(bp) == 0);
1174
1175	update |= (bnx2x_storm_stats_update(bp) == 0);
1176
1177	if (update) {
1178		bnx2x_net_stats_update(bp);
1179
1180		if (bp->port.pmf)
1181			bnx2x_port_stats_stop(bp);
1182
1183		bnx2x_hw_stats_post(bp);
1184		bnx2x_stats_comp(bp);
1185	}
1186}
1187
1188static void bnx2x_stats_do_nothing(struct bnx2x *bp)
1189{
1190}
1191
1192static const struct {
1193	void (*action)(struct bnx2x *bp);
1194	enum bnx2x_stats_state next_state;
1195} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
1196/* state	event	*/
1197{
1198/* DISABLED	PMF	*/ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
1199/*		LINK_UP	*/ {bnx2x_stats_start,      STATS_STATE_ENABLED},
1200/*		UPDATE	*/ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
1201/*		STOP	*/ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
1202},
1203{
1204/* ENABLED	PMF	*/ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
1205/*		LINK_UP	*/ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
1206/*		UPDATE	*/ {bnx2x_stats_update,     STATS_STATE_ENABLED},
1207/*		STOP	*/ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
1208}
1209};
1210
1211void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
1212{
1213	enum bnx2x_stats_state state;
1214
1215	if (unlikely(bp->panic))
1216		return;
1217
1218	/* Protect a state change flow */
1219	spin_lock_bh(&bp->stats_lock);
1220	state = bp->stats_state;
1221	bp->stats_state = bnx2x_stats_stm[state][event].next_state;
1222	spin_unlock_bh(&bp->stats_lock);
1223
1224	bnx2x_stats_stm[state][event].action(bp);
1225
1226	if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
1227		DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
1228		   state, event, bp->stats_state);
1229}
1230
1231static void bnx2x_port_stats_base_init(struct bnx2x *bp)
1232{
1233	struct dmae_command *dmae;
1234	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1235
1236	/* sanity */
1237	if (!bp->port.pmf || !bp->port.port_stx) {
1238		BNX2X_ERR("BUG!\n");
1239		return;
1240	}
1241
1242	bp->executer_idx = 0;
1243
1244	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
1245	dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
1246			DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
1247			DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
1248#ifdef __BIG_ENDIAN
1249			DMAE_CMD_ENDIANITY_B_DW_SWAP |
1250#else
1251			DMAE_CMD_ENDIANITY_DW_SWAP |
1252#endif
1253			(BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
1254			(BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
1255	dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
1256	dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
1257	dmae->dst_addr_lo = bp->port.port_stx >> 2;
1258	dmae->dst_addr_hi = 0;
1259	dmae->len = sizeof(struct host_port_stats) >> 2;
1260	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1261	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1262	dmae->comp_val = DMAE_COMP_VAL;
1263
1264	*stats_comp = 0;
1265	bnx2x_hw_stats_post(bp);
1266	bnx2x_stats_comp(bp);
1267}
1268
1269static void bnx2x_func_stats_base_init(struct bnx2x *bp)
1270{
1271	int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
1272	int port = BP_PORT(bp);
1273	int func;
1274	u32 func_stx;
1275
1276	/* sanity */
1277	if (!bp->port.pmf || !bp->func_stx) {
1278		BNX2X_ERR("BUG!\n");
1279		return;
1280	}
1281
1282	/* save our func_stx */
1283	func_stx = bp->func_stx;
1284
1285	for (vn = VN_0; vn < vn_max; vn++) {
1286		func = 2*vn + port;
1287
1288		bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
1289		bnx2x_func_stats_init(bp);
1290		bnx2x_hw_stats_post(bp);
1291		bnx2x_stats_comp(bp);
1292	}
1293
1294	/* restore our func_stx */
1295	bp->func_stx = func_stx;
1296}
1297
1298static void bnx2x_func_stats_base_update(struct bnx2x *bp)
1299{
1300	struct dmae_command *dmae = &bp->stats_dmae;
1301	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1302
1303	/* sanity */
1304	if (!bp->func_stx) {
1305		BNX2X_ERR("BUG!\n");
1306		return;
1307	}
1308
1309	bp->executer_idx = 0;
1310	memset(dmae, 0, sizeof(struct dmae_command));
1311
1312	dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
1313			DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
1314			DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
1315#ifdef __BIG_ENDIAN
1316			DMAE_CMD_ENDIANITY_B_DW_SWAP |
1317#else
1318			DMAE_CMD_ENDIANITY_DW_SWAP |
1319#endif
1320			(BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
1321			(BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
1322	dmae->src_addr_lo = bp->func_stx >> 2;
1323	dmae->src_addr_hi = 0;
1324	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
1325	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
1326	dmae->len = sizeof(struct host_func_stats) >> 2;
1327	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1328	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1329	dmae->comp_val = DMAE_COMP_VAL;
1330
1331	*stats_comp = 0;
1332	bnx2x_hw_stats_post(bp);
1333	bnx2x_stats_comp(bp);
1334}
1335
1336void bnx2x_stats_init(struct bnx2x *bp)
1337{
1338	int port = BP_PORT(bp);
1339	int func = BP_FUNC(bp);
1340	int i;
1341
1342	bp->stats_pending = 0;
1343	bp->executer_idx = 0;
1344	bp->stats_counter = 0;
1345
1346	/* port and func stats for management */
1347	if (!BP_NOMCP(bp)) {
1348		bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
1349		bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
1350
1351	} else {
1352		bp->port.port_stx = 0;
1353		bp->func_stx = 0;
1354	}
1355	DP(BNX2X_MSG_STATS, "port_stx 0x%x  func_stx 0x%x\n",
1356	   bp->port.port_stx, bp->func_stx);
1357
1358	/* port stats */
1359	memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
1360	bp->port.old_nig_stats.brb_discard =
1361			REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
1362	bp->port.old_nig_stats.brb_truncate =
1363			REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
1364	REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
1365		    &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
1366	REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
1367		    &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
1368
1369	/* function stats */
1370	for_each_queue(bp, i) {
1371		struct bnx2x_fastpath *fp = &bp->fp[i];
1372
1373		memset(&fp->old_tclient, 0,
1374		       sizeof(struct tstorm_per_client_stats));
1375		memset(&fp->old_uclient, 0,
1376		       sizeof(struct ustorm_per_client_stats));
1377		memset(&fp->old_xclient, 0,
1378		       sizeof(struct xstorm_per_client_stats));
1379		memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
1380	}
1381
1382	memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
1383	memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
1384
1385	bp->stats_state = STATS_STATE_DISABLED;
1386
1387	if (bp->port.pmf) {
1388		if (bp->port.port_stx)
1389			bnx2x_port_stats_base_init(bp);
1390
1391		if (bp->func_stx)
1392			bnx2x_func_stats_base_init(bp);
1393
1394	} else if (bp->func_stx)
1395		bnx2x_func_stats_base_update(bp);
1396}
1397