1/*
2 * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include "common.h"
33#include "regs.h"
34
35/*
36 * # of exact address filters.  The first one is used for the station address,
37 * the rest are available for multicast addresses.
38 */
39#define EXACT_ADDR_FILTERS 8
40
41static inline int macidx(const struct cmac *mac)
42{
43	return mac->offset / (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR);
44}
45
46static void xaui_serdes_reset(struct cmac *mac)
47{
48	static const unsigned int clear[] = {
49		F_PWRDN0 | F_PWRDN1, F_RESETPLL01, F_RESET0 | F_RESET1,
50		F_PWRDN2 | F_PWRDN3, F_RESETPLL23, F_RESET2 | F_RESET3
51	};
52
53	int i;
54	struct adapter *adap = mac->adapter;
55	u32 ctrl = A_XGM_SERDES_CTRL0 + mac->offset;
56
57	t3_write_reg(adap, ctrl, adap->params.vpd.xauicfg[macidx(mac)] |
58		     F_RESET3 | F_RESET2 | F_RESET1 | F_RESET0 |
59		     F_PWRDN3 | F_PWRDN2 | F_PWRDN1 | F_PWRDN0 |
60		     F_RESETPLL23 | F_RESETPLL01);
61	t3_read_reg(adap, ctrl);
62	udelay(15);
63
64	for (i = 0; i < ARRAY_SIZE(clear); i++) {
65		t3_set_reg_field(adap, ctrl, clear[i], 0);
66		udelay(15);
67	}
68}
69
70void t3b_pcs_reset(struct cmac *mac)
71{
72	t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset,
73			 F_PCS_RESET_, 0);
74	udelay(20);
75	t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset, 0,
76			 F_PCS_RESET_);
77}
78
79int t3_mac_reset(struct cmac *mac)
80{
81	static const struct addr_val_pair mac_reset_avp[] = {
82		{A_XGM_TX_CTRL, 0},
83		{A_XGM_RX_CTRL, 0},
84		{A_XGM_RX_CFG, F_DISPAUSEFRAMES | F_EN1536BFRAMES |
85		 F_RMFCS | F_ENJUMBO | F_ENHASHMCAST},
86		{A_XGM_RX_HASH_LOW, 0},
87		{A_XGM_RX_HASH_HIGH, 0},
88		{A_XGM_RX_EXACT_MATCH_LOW_1, 0},
89		{A_XGM_RX_EXACT_MATCH_LOW_2, 0},
90		{A_XGM_RX_EXACT_MATCH_LOW_3, 0},
91		{A_XGM_RX_EXACT_MATCH_LOW_4, 0},
92		{A_XGM_RX_EXACT_MATCH_LOW_5, 0},
93		{A_XGM_RX_EXACT_MATCH_LOW_6, 0},
94		{A_XGM_RX_EXACT_MATCH_LOW_7, 0},
95		{A_XGM_RX_EXACT_MATCH_LOW_8, 0},
96		{A_XGM_STAT_CTRL, F_CLRSTATS}
97	};
98	u32 val;
99	struct adapter *adap = mac->adapter;
100	unsigned int oft = mac->offset;
101
102	t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_);
103	t3_read_reg(adap, A_XGM_RESET_CTRL + oft);	/* flush */
104
105	t3_write_regs(adap, mac_reset_avp, ARRAY_SIZE(mac_reset_avp), oft);
106	t3_set_reg_field(adap, A_XGM_RXFIFO_CFG + oft,
107			 F_RXSTRFRWRD | F_DISERRFRAMES,
108			 uses_xaui(adap) ? 0 : F_RXSTRFRWRD);
109	t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + oft, 0, F_UNDERUNFIX);
110
111	if (uses_xaui(adap)) {
112		if (adap->params.rev == 0) {
113			t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0,
114					 F_RXENABLE | F_TXENABLE);
115			if (t3_wait_op_done(adap, A_XGM_SERDES_STATUS1 + oft,
116					    F_CMULOCK, 1, 5, 2)) {
117				CH_ERR(adap,
118				       "MAC %d XAUI SERDES CMU lock failed\n",
119				       macidx(mac));
120				return -1;
121			}
122			t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0,
123					 F_SERDESRESET_);
124		} else
125			xaui_serdes_reset(mac);
126	}
127
128	t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + oft,
129			 V_RXMAXFRAMERSIZE(M_RXMAXFRAMERSIZE),
130			 V_RXMAXFRAMERSIZE(MAX_FRAME_SIZE) | F_RXENFRAMER);
131	val = F_MAC_RESET_ | F_XGMAC_STOP_EN;
132
133	if (is_10G(adap))
134		val |= F_PCS_RESET_;
135	else if (uses_xaui(adap))
136		val |= F_PCS_RESET_ | F_XG2G_RESET_;
137	else
138		val |= F_RGMII_RESET_ | F_XG2G_RESET_;
139	t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val);
140	t3_read_reg(adap, A_XGM_RESET_CTRL + oft);	/* flush */
141	if ((val & F_PCS_RESET_) && adap->params.rev) {
142		msleep(1);
143		t3b_pcs_reset(mac);
144	}
145
146	memset(&mac->stats, 0, sizeof(mac->stats));
147	return 0;
148}
149
150static int t3b2_mac_reset(struct cmac *mac)
151{
152	struct adapter *adap = mac->adapter;
153	unsigned int oft = mac->offset, store;
154	int idx = macidx(mac);
155	u32 val;
156
157	if (!macidx(mac))
158		t3_set_reg_field(adap, A_MPS_CFG, F_PORT0ACTIVE, 0);
159	else
160		t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE, 0);
161
162	/* Stop NIC traffic to reduce the number of TXTOGGLES */
163	t3_set_reg_field(adap, A_MPS_CFG, F_ENFORCEPKT, 0);
164	/* Ensure TX drains */
165	t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN, 0);
166
167	t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_);
168	t3_read_reg(adap, A_XGM_RESET_CTRL + oft);    /* flush */
169
170	/* Store A_TP_TX_DROP_CFG_CH0 */
171	t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
172	store = t3_read_reg(adap, A_TP_TX_DROP_CFG_CH0 + idx);
173
174	msleep(10);
175
176	/* Change DROP_CFG to 0xc0000011 */
177	t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
178	t3_write_reg(adap, A_TP_PIO_DATA, 0xc0000011);
179
180	/* Check for xgm Rx fifo empty */
181	/* Increased loop count to 1000 from 5 cover 1G and 100Mbps case */
182	if (t3_wait_op_done(adap, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT + oft,
183			    0x80000000, 1, 1000, 2)) {
184		CH_ERR(adap, "MAC %d Rx fifo drain failed\n",
185		       macidx(mac));
186		return -1;
187	}
188
189	t3_write_reg(adap, A_XGM_RESET_CTRL + oft, 0);
190	t3_read_reg(adap, A_XGM_RESET_CTRL + oft);    /* flush */
191
192	val = F_MAC_RESET_;
193	if (is_10G(adap))
194		val |= F_PCS_RESET_;
195	else if (uses_xaui(adap))
196		val |= F_PCS_RESET_ | F_XG2G_RESET_;
197	else
198		val |= F_RGMII_RESET_ | F_XG2G_RESET_;
199	t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val);
200	t3_read_reg(adap, A_XGM_RESET_CTRL + oft);  /* flush */
201	if ((val & F_PCS_RESET_) && adap->params.rev) {
202		msleep(1);
203		t3b_pcs_reset(mac);
204	}
205	t3_write_reg(adap, A_XGM_RX_CFG + oft,
206		     F_DISPAUSEFRAMES | F_EN1536BFRAMES |
207		     F_RMFCS | F_ENJUMBO | F_ENHASHMCAST);
208
209	/* Restore the DROP_CFG */
210	t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
211	t3_write_reg(adap, A_TP_PIO_DATA, store);
212
213	if (!idx)
214		t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT0ACTIVE);
215	else
216		t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT1ACTIVE);
217
218	/* re-enable nic traffic */
219	t3_set_reg_field(adap, A_MPS_CFG, F_ENFORCEPKT, 1);
220
221	/*  Set: re-enable NIC traffic */
222	t3_set_reg_field(adap, A_MPS_CFG, F_ENFORCEPKT, 1);
223
224	return 0;
225}
226
227/*
228 * Set the exact match register 'idx' to recognize the given Ethernet address.
229 */
230static void set_addr_filter(struct cmac *mac, int idx, const u8 * addr)
231{
232	u32 addr_lo, addr_hi;
233	unsigned int oft = mac->offset + idx * 8;
234
235	addr_lo = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
236	addr_hi = (addr[5] << 8) | addr[4];
237
238	t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1 + oft, addr_lo);
239	t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_HIGH_1 + oft, addr_hi);
240}
241
242/* Set one of the station's unicast MAC addresses. */
243int t3_mac_set_address(struct cmac *mac, unsigned int idx, const u8 addr[6])
244{
245	if (idx >= mac->nucast)
246		return -EINVAL;
247	set_addr_filter(mac, idx, addr);
248	return 0;
249}
250
251/*
252 * Specify the number of exact address filters that should be reserved for
253 * unicast addresses.  Caller should reload the unicast and multicast addresses
254 * after calling this.
255 */
256int t3_mac_set_num_ucast(struct cmac *mac, int n)
257{
258	if (n > EXACT_ADDR_FILTERS)
259		return -EINVAL;
260	mac->nucast = n;
261	return 0;
262}
263
264void t3_mac_disable_exact_filters(struct cmac *mac)
265{
266	unsigned int i, reg = mac->offset + A_XGM_RX_EXACT_MATCH_LOW_1;
267
268	for (i = 0; i < EXACT_ADDR_FILTERS; i++, reg += 8) {
269		u32 v = t3_read_reg(mac->adapter, reg);
270		t3_write_reg(mac->adapter, reg, v);
271	}
272	t3_read_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1);	/* flush */
273}
274
275void t3_mac_enable_exact_filters(struct cmac *mac)
276{
277	unsigned int i, reg = mac->offset + A_XGM_RX_EXACT_MATCH_HIGH_1;
278
279	for (i = 0; i < EXACT_ADDR_FILTERS; i++, reg += 8) {
280		u32 v = t3_read_reg(mac->adapter, reg);
281		t3_write_reg(mac->adapter, reg, v);
282	}
283	t3_read_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1);	/* flush */
284}
285
286/* Calculate the RX hash filter index of an Ethernet address */
287static int hash_hw_addr(const u8 * addr)
288{
289	int hash = 0, octet, bit, i = 0, c;
290
291	for (octet = 0; octet < 6; ++octet)
292		for (c = addr[octet], bit = 0; bit < 8; c >>= 1, ++bit) {
293			hash ^= (c & 1) << i;
294			if (++i == 6)
295				i = 0;
296		}
297	return hash;
298}
299
300int t3_mac_set_rx_mode(struct cmac *mac, struct net_device *dev)
301{
302	u32 val, hash_lo, hash_hi;
303	struct adapter *adap = mac->adapter;
304	unsigned int oft = mac->offset;
305
306	val = t3_read_reg(adap, A_XGM_RX_CFG + oft) & ~F_COPYALLFRAMES;
307	if (dev->flags & IFF_PROMISC)
308		val |= F_COPYALLFRAMES;
309	t3_write_reg(adap, A_XGM_RX_CFG + oft, val);
310
311	if (dev->flags & IFF_ALLMULTI)
312		hash_lo = hash_hi = 0xffffffff;
313	else {
314		struct netdev_hw_addr *ha;
315		int exact_addr_idx = mac->nucast;
316
317		hash_lo = hash_hi = 0;
318		netdev_for_each_mc_addr(ha, dev)
319			if (exact_addr_idx < EXACT_ADDR_FILTERS)
320				set_addr_filter(mac, exact_addr_idx++,
321						ha->addr);
322			else {
323				int hash = hash_hw_addr(ha->addr);
324
325				if (hash < 32)
326					hash_lo |= (1 << hash);
327				else
328					hash_hi |= (1 << (hash - 32));
329			}
330	}
331
332	t3_write_reg(adap, A_XGM_RX_HASH_LOW + oft, hash_lo);
333	t3_write_reg(adap, A_XGM_RX_HASH_HIGH + oft, hash_hi);
334	return 0;
335}
336
337static int rx_fifo_hwm(int mtu)
338{
339	int hwm;
340
341	hwm = max(MAC_RXFIFO_SIZE - 3 * mtu, (MAC_RXFIFO_SIZE * 38) / 100);
342	return min(hwm, MAC_RXFIFO_SIZE - 8192);
343}
344
345int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu)
346{
347	int hwm, lwm, divisor;
348	int ipg;
349	unsigned int thres, v, reg;
350	struct adapter *adap = mac->adapter;
351
352	/*
353	 * MAX_FRAME_SIZE inludes header + FCS, mtu doesn't.  The HW max
354	 * packet size register includes header, but not FCS.
355	 */
356	mtu += 14;
357	if (mtu > 1536)
358		mtu += 4;
359
360	if (mtu > MAX_FRAME_SIZE - 4)
361		return -EINVAL;
362	t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, mtu);
363
364	if (adap->params.rev >= T3_REV_B2 &&
365	    (t3_read_reg(adap, A_XGM_RX_CTRL + mac->offset) & F_RXEN)) {
366		t3_mac_disable_exact_filters(mac);
367		v = t3_read_reg(adap, A_XGM_RX_CFG + mac->offset);
368		t3_set_reg_field(adap, A_XGM_RX_CFG + mac->offset,
369				 F_ENHASHMCAST | F_COPYALLFRAMES, F_DISBCAST);
370
371		reg = adap->params.rev == T3_REV_B2 ?
372			A_XGM_RX_MAX_PKT_SIZE_ERR_CNT : A_XGM_RXFIFO_CFG;
373
374		/* drain RX FIFO */
375		if (t3_wait_op_done(adap, reg + mac->offset,
376				    F_RXFIFO_EMPTY, 1, 20, 5)) {
377			t3_write_reg(adap, A_XGM_RX_CFG + mac->offset, v);
378			t3_mac_enable_exact_filters(mac);
379			return -EIO;
380		}
381		t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset,
382				 V_RXMAXPKTSIZE(M_RXMAXPKTSIZE),
383				 V_RXMAXPKTSIZE(mtu));
384		t3_write_reg(adap, A_XGM_RX_CFG + mac->offset, v);
385		t3_mac_enable_exact_filters(mac);
386	} else
387		t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset,
388				 V_RXMAXPKTSIZE(M_RXMAXPKTSIZE),
389				 V_RXMAXPKTSIZE(mtu));
390
391	/*
392	 * Adjust the PAUSE frame watermarks.  We always set the LWM, and the
393	 * HWM only if flow-control is enabled.
394	 */
395	hwm = rx_fifo_hwm(mtu);
396	lwm = min(3 * (int)mtu, MAC_RXFIFO_SIZE / 4);
397	v = t3_read_reg(adap, A_XGM_RXFIFO_CFG + mac->offset);
398	v &= ~V_RXFIFOPAUSELWM(M_RXFIFOPAUSELWM);
399	v |= V_RXFIFOPAUSELWM(lwm / 8);
400	if (G_RXFIFOPAUSEHWM(v))
401		v = (v & ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM)) |
402		    V_RXFIFOPAUSEHWM(hwm / 8);
403
404	t3_write_reg(adap, A_XGM_RXFIFO_CFG + mac->offset, v);
405
406	/* Adjust the TX FIFO threshold based on the MTU */
407	thres = (adap->params.vpd.cclk * 1000) / 15625;
408	thres = (thres * mtu) / 1000;
409	if (is_10G(adap))
410		thres /= 10;
411	thres = mtu > thres ? (mtu - thres + 7) / 8 : 0;
412	thres = max(thres, 8U);	/* need at least 8 */
413	ipg = (adap->params.rev == T3_REV_C) ? 0 : 1;
414	t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + mac->offset,
415			 V_TXFIFOTHRESH(M_TXFIFOTHRESH) | V_TXIPG(M_TXIPG),
416			 V_TXFIFOTHRESH(thres) | V_TXIPG(ipg));
417
418	if (adap->params.rev > 0) {
419		divisor = (adap->params.rev == T3_REV_C) ? 64 : 8;
420		t3_write_reg(adap, A_XGM_PAUSE_TIMER + mac->offset,
421			     (hwm - lwm) * 4 / divisor);
422	}
423	t3_write_reg(adap, A_XGM_TX_PAUSE_QUANTA + mac->offset,
424		     MAC_RXFIFO_SIZE * 4 * 8 / 512);
425	return 0;
426}
427
428int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc)
429{
430	u32 val;
431	struct adapter *adap = mac->adapter;
432	unsigned int oft = mac->offset;
433
434	if (duplex >= 0 && duplex != DUPLEX_FULL)
435		return -EINVAL;
436	if (speed >= 0) {
437		if (speed == SPEED_10)
438			val = V_PORTSPEED(0);
439		else if (speed == SPEED_100)
440			val = V_PORTSPEED(1);
441		else if (speed == SPEED_1000)
442			val = V_PORTSPEED(2);
443		else if (speed == SPEED_10000)
444			val = V_PORTSPEED(3);
445		else
446			return -EINVAL;
447
448		t3_set_reg_field(adap, A_XGM_PORT_CFG + oft,
449				 V_PORTSPEED(M_PORTSPEED), val);
450	}
451
452	val = t3_read_reg(adap, A_XGM_RXFIFO_CFG + oft);
453	val &= ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM);
454	if (fc & PAUSE_TX) {
455		u32 rx_max_pkt_size =
456		    G_RXMAXPKTSIZE(t3_read_reg(adap,
457					       A_XGM_RX_MAX_PKT_SIZE + oft));
458		val |= V_RXFIFOPAUSEHWM(rx_fifo_hwm(rx_max_pkt_size) / 8);
459	}
460	t3_write_reg(adap, A_XGM_RXFIFO_CFG + oft, val);
461
462	t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN,
463			 (fc & PAUSE_RX) ? F_TXPAUSEEN : 0);
464	return 0;
465}
466
467int t3_mac_enable(struct cmac *mac, int which)
468{
469	int idx = macidx(mac);
470	struct adapter *adap = mac->adapter;
471	unsigned int oft = mac->offset;
472	struct mac_stats *s = &mac->stats;
473
474	if (which & MAC_DIRECTION_TX) {
475		t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
476		t3_write_reg(adap, A_TP_PIO_DATA,
477			     adap->params.rev == T3_REV_C ?
478			     0xc4ffff01 : 0xc0ede401);
479		t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE);
480		t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx,
481				 adap->params.rev == T3_REV_C ? 0 : 1 << idx);
482
483		t3_write_reg(adap, A_XGM_TX_CTRL + oft, F_TXEN);
484
485		t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CNT_CH0 + idx);
486		mac->tx_mcnt = s->tx_frames;
487		mac->tx_tcnt = (G_TXDROPCNTCH0RCVD(t3_read_reg(adap,
488							A_TP_PIO_DATA)));
489		mac->tx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
490						A_XGM_TX_SPI4_SOP_EOP_CNT +
491						oft)));
492		mac->rx_mcnt = s->rx_frames;
493		mac->rx_pause = s->rx_pause;
494		mac->rx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
495						A_XGM_RX_SPI4_SOP_EOP_CNT +
496						oft)));
497		mac->rx_ocnt = s->rx_fifo_ovfl;
498		mac->txen = F_TXEN;
499		mac->toggle_cnt = 0;
500	}
501	if (which & MAC_DIRECTION_RX)
502		t3_write_reg(adap, A_XGM_RX_CTRL + oft, F_RXEN);
503	return 0;
504}
505
506int t3_mac_disable(struct cmac *mac, int which)
507{
508	struct adapter *adap = mac->adapter;
509
510	if (which & MAC_DIRECTION_TX) {
511		t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0);
512		mac->txen = 0;
513	}
514	if (which & MAC_DIRECTION_RX) {
515		int val = F_MAC_RESET_;
516
517		t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset,
518				 F_PCS_RESET_, 0);
519		msleep(100);
520		t3_write_reg(adap, A_XGM_RX_CTRL + mac->offset, 0);
521		if (is_10G(adap))
522			val |= F_PCS_RESET_;
523		else if (uses_xaui(adap))
524			val |= F_PCS_RESET_ | F_XG2G_RESET_;
525		else
526			val |= F_RGMII_RESET_ | F_XG2G_RESET_;
527		t3_write_reg(mac->adapter, A_XGM_RESET_CTRL + mac->offset, val);
528	}
529	return 0;
530}
531
532int t3b2_mac_watchdog_task(struct cmac *mac)
533{
534	struct adapter *adap = mac->adapter;
535	struct mac_stats *s = &mac->stats;
536	unsigned int tx_tcnt, tx_xcnt;
537	u64 tx_mcnt = s->tx_frames;
538	int status;
539
540	status = 0;
541	tx_xcnt = 1;		/* By default tx_xcnt is making progress */
542	tx_tcnt = mac->tx_tcnt;	/* If tx_mcnt is progressing ignore tx_tcnt */
543	if (tx_mcnt == mac->tx_mcnt && mac->rx_pause == s->rx_pause) {
544		tx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
545						A_XGM_TX_SPI4_SOP_EOP_CNT +
546					       	mac->offset)));
547		if (tx_xcnt == 0) {
548			t3_write_reg(adap, A_TP_PIO_ADDR,
549				     A_TP_TX_DROP_CNT_CH0 + macidx(mac));
550			tx_tcnt = (G_TXDROPCNTCH0RCVD(t3_read_reg(adap,
551						      A_TP_PIO_DATA)));
552		} else {
553			goto out;
554		}
555	} else {
556		mac->toggle_cnt = 0;
557		goto out;
558	}
559
560	if ((tx_tcnt != mac->tx_tcnt) && (mac->tx_xcnt == 0)) {
561		if (mac->toggle_cnt > 4) {
562			status = 2;
563			goto out;
564		} else {
565			status = 1;
566			goto out;
567		}
568	} else {
569		mac->toggle_cnt = 0;
570		goto out;
571	}
572
573out:
574	mac->tx_tcnt = tx_tcnt;
575	mac->tx_xcnt = tx_xcnt;
576	mac->tx_mcnt = s->tx_frames;
577	mac->rx_pause = s->rx_pause;
578	if (status == 1) {
579		t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0);
580		t3_read_reg(adap, A_XGM_TX_CTRL + mac->offset);  /* flush */
581		t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, mac->txen);
582		t3_read_reg(adap, A_XGM_TX_CTRL + mac->offset);  /* flush */
583		mac->toggle_cnt++;
584	} else if (status == 2) {
585		t3b2_mac_reset(mac);
586		mac->toggle_cnt = 0;
587	}
588	return status;
589}
590
591/*
592 * This function is called periodically to accumulate the current values of the
593 * RMON counters into the port statistics.  Since the packet counters are only
594 * 32 bits they can overflow in ~286 secs at 10G, so the function should be
595 * called more frequently than that.  The byte counters are 45-bit wide, they
596 * would overflow in ~7.8 hours.
597 */
598const struct mac_stats *t3_mac_update_stats(struct cmac *mac)
599{
600#define RMON_READ(mac, addr) t3_read_reg(mac->adapter, addr + mac->offset)
601#define RMON_UPDATE(mac, name, reg) \
602	(mac)->stats.name += (u64)RMON_READ(mac, A_XGM_STAT_##reg)
603#define RMON_UPDATE64(mac, name, reg_lo, reg_hi) \
604	(mac)->stats.name += RMON_READ(mac, A_XGM_STAT_##reg_lo) + \
605			     ((u64)RMON_READ(mac, A_XGM_STAT_##reg_hi) << 32)
606
607	u32 v, lo;
608
609	RMON_UPDATE64(mac, rx_octets, RX_BYTES_LOW, RX_BYTES_HIGH);
610	RMON_UPDATE64(mac, rx_frames, RX_FRAMES_LOW, RX_FRAMES_HIGH);
611	RMON_UPDATE(mac, rx_mcast_frames, RX_MCAST_FRAMES);
612	RMON_UPDATE(mac, rx_bcast_frames, RX_BCAST_FRAMES);
613	RMON_UPDATE(mac, rx_fcs_errs, RX_CRC_ERR_FRAMES);
614	RMON_UPDATE(mac, rx_pause, RX_PAUSE_FRAMES);
615	RMON_UPDATE(mac, rx_jabber, RX_JABBER_FRAMES);
616	RMON_UPDATE(mac, rx_short, RX_SHORT_FRAMES);
617	RMON_UPDATE(mac, rx_symbol_errs, RX_SYM_CODE_ERR_FRAMES);
618
619	RMON_UPDATE(mac, rx_too_long, RX_OVERSIZE_FRAMES);
620
621	v = RMON_READ(mac, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT);
622	if (mac->adapter->params.rev == T3_REV_B2)
623		v &= 0x7fffffff;
624	mac->stats.rx_too_long += v;
625
626	RMON_UPDATE(mac, rx_frames_64, RX_64B_FRAMES);
627	RMON_UPDATE(mac, rx_frames_65_127, RX_65_127B_FRAMES);
628	RMON_UPDATE(mac, rx_frames_128_255, RX_128_255B_FRAMES);
629	RMON_UPDATE(mac, rx_frames_256_511, RX_256_511B_FRAMES);
630	RMON_UPDATE(mac, rx_frames_512_1023, RX_512_1023B_FRAMES);
631	RMON_UPDATE(mac, rx_frames_1024_1518, RX_1024_1518B_FRAMES);
632	RMON_UPDATE(mac, rx_frames_1519_max, RX_1519_MAXB_FRAMES);
633
634	RMON_UPDATE64(mac, tx_octets, TX_BYTE_LOW, TX_BYTE_HIGH);
635	RMON_UPDATE64(mac, tx_frames, TX_FRAME_LOW, TX_FRAME_HIGH);
636	RMON_UPDATE(mac, tx_mcast_frames, TX_MCAST);
637	RMON_UPDATE(mac, tx_bcast_frames, TX_BCAST);
638	RMON_UPDATE(mac, tx_pause, TX_PAUSE);
639	/* This counts error frames in general (bad FCS, underrun, etc). */
640	RMON_UPDATE(mac, tx_underrun, TX_ERR_FRAMES);
641
642	RMON_UPDATE(mac, tx_frames_64, TX_64B_FRAMES);
643	RMON_UPDATE(mac, tx_frames_65_127, TX_65_127B_FRAMES);
644	RMON_UPDATE(mac, tx_frames_128_255, TX_128_255B_FRAMES);
645	RMON_UPDATE(mac, tx_frames_256_511, TX_256_511B_FRAMES);
646	RMON_UPDATE(mac, tx_frames_512_1023, TX_512_1023B_FRAMES);
647	RMON_UPDATE(mac, tx_frames_1024_1518, TX_1024_1518B_FRAMES);
648	RMON_UPDATE(mac, tx_frames_1519_max, TX_1519_MAXB_FRAMES);
649
650	/* The next stat isn't clear-on-read. */
651	t3_write_reg(mac->adapter, A_TP_MIB_INDEX, mac->offset ? 51 : 50);
652	v = t3_read_reg(mac->adapter, A_TP_MIB_RDATA);
653	lo = (u32) mac->stats.rx_cong_drops;
654	mac->stats.rx_cong_drops += (u64) (v - lo);
655
656	return &mac->stats;
657}
658