• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/net/cxgb3/
1/*
2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include "common.h"
33#include "regs.h"
34#include "sge_defs.h"
35#include "firmware_exports.h"
36
37/**
38 *	t3_wait_op_done_val - wait until an operation is completed
39 *	@adapter: the adapter performing the operation
40 *	@reg: the register to check for completion
41 *	@mask: a single-bit field within @reg that indicates completion
42 *	@polarity: the value of the field when the operation is completed
43 *	@attempts: number of check iterations
44 *	@delay: delay in usecs between iterations
45 *	@valp: where to store the value of the register at completion time
46 *
47 *	Wait until an operation is completed by checking a bit in a register
48 *	up to @attempts times.  If @valp is not NULL the value of the register
49 *	at the time it indicated completion is stored there.  Returns 0 if the
50 *	operation completes and -EAGAIN otherwise.
51 */
52
53int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
54			int polarity, int attempts, int delay, u32 *valp)
55{
56	while (1) {
57		u32 val = t3_read_reg(adapter, reg);
58
59		if (!!(val & mask) == polarity) {
60			if (valp)
61				*valp = val;
62			return 0;
63		}
64		if (--attempts == 0)
65			return -EAGAIN;
66		if (delay)
67			udelay(delay);
68	}
69}
70
71/**
72 *	t3_write_regs - write a bunch of registers
73 *	@adapter: the adapter to program
74 *	@p: an array of register address/register value pairs
75 *	@n: the number of address/value pairs
76 *	@offset: register address offset
77 *
78 *	Takes an array of register address/register value pairs and writes each
79 *	value to the corresponding register.  Register addresses are adjusted
80 *	by the supplied offset.
81 */
82void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
83		   int n, unsigned int offset)
84{
85	while (n--) {
86		t3_write_reg(adapter, p->reg_addr + offset, p->val);
87		p++;
88	}
89}
90
91/**
92 *	t3_set_reg_field - set a register field to a value
93 *	@adapter: the adapter to program
94 *	@addr: the register address
95 *	@mask: specifies the portion of the register to modify
96 *	@val: the new value for the register field
97 *
98 *	Sets a register field specified by the supplied mask to the
99 *	given value.
100 */
101void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
102		      u32 val)
103{
104	u32 v = t3_read_reg(adapter, addr) & ~mask;
105
106	t3_write_reg(adapter, addr, v | val);
107	t3_read_reg(adapter, addr);	/* flush */
108}
109
110/**
111 *	t3_read_indirect - read indirectly addressed registers
112 *	@adap: the adapter
113 *	@addr_reg: register holding the indirect address
114 *	@data_reg: register holding the value of the indirect register
115 *	@vals: where the read register values are stored
116 *	@start_idx: index of first indirect register to read
117 *	@nregs: how many indirect registers to read
118 *
119 *	Reads registers that are accessed indirectly through an address/data
120 *	register pair.
121 */
122static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
123			     unsigned int data_reg, u32 *vals,
124			     unsigned int nregs, unsigned int start_idx)
125{
126	while (nregs--) {
127		t3_write_reg(adap, addr_reg, start_idx);
128		*vals++ = t3_read_reg(adap, data_reg);
129		start_idx++;
130	}
131}
132
133/**
134 *	t3_mc7_bd_read - read from MC7 through backdoor accesses
135 *	@mc7: identifies MC7 to read from
136 *	@start: index of first 64-bit word to read
137 *	@n: number of 64-bit words to read
138 *	@buf: where to store the read result
139 *
140 *	Read n 64-bit words from MC7 starting at word start, using backdoor
141 *	accesses.
142 */
143int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
144		   u64 *buf)
145{
146	static const int shift[] = { 0, 0, 16, 24 };
147	static const int step[] = { 0, 32, 16, 8 };
148
149	unsigned int size64 = mc7->size / 8;	/* # of 64-bit words */
150	struct adapter *adap = mc7->adapter;
151
152	if (start >= size64 || start + n > size64)
153		return -EINVAL;
154
155	start *= (8 << mc7->width);
156	while (n--) {
157		int i;
158		u64 val64 = 0;
159
160		for (i = (1 << mc7->width) - 1; i >= 0; --i) {
161			int attempts = 10;
162			u32 val;
163
164			t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
165			t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
166			val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
167			while ((val & F_BUSY) && attempts--)
168				val = t3_read_reg(adap,
169						  mc7->offset + A_MC7_BD_OP);
170			if (val & F_BUSY)
171				return -EIO;
172
173			val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
174			if (mc7->width == 0) {
175				val64 = t3_read_reg(adap,
176						    mc7->offset +
177						    A_MC7_BD_DATA0);
178				val64 |= (u64) val << 32;
179			} else {
180				if (mc7->width > 1)
181					val >>= shift[mc7->width];
182				val64 |= (u64) val << (step[mc7->width] * i);
183			}
184			start += 8;
185		}
186		*buf++ = val64;
187	}
188	return 0;
189}
190
191/*
192 * Initialize MI1.
193 */
194static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
195{
196	u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
197	u32 val = F_PREEN | V_CLKDIV(clkdiv);
198
199	t3_write_reg(adap, A_MI1_CFG, val);
200}
201
202#define MDIO_ATTEMPTS 20
203
204/*
205 * MI1 read/write operations for clause 22 PHYs.
206 */
207static int t3_mi1_read(struct net_device *dev, int phy_addr, int mmd_addr,
208		       u16 reg_addr)
209{
210	struct port_info *pi = netdev_priv(dev);
211	struct adapter *adapter = pi->adapter;
212	int ret;
213	u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
214
215	mutex_lock(&adapter->mdio_lock);
216	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
217	t3_write_reg(adapter, A_MI1_ADDR, addr);
218	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
219	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
220	if (!ret)
221		ret = t3_read_reg(adapter, A_MI1_DATA);
222	mutex_unlock(&adapter->mdio_lock);
223	return ret;
224}
225
226static int t3_mi1_write(struct net_device *dev, int phy_addr, int mmd_addr,
227			u16 reg_addr, u16 val)
228{
229	struct port_info *pi = netdev_priv(dev);
230	struct adapter *adapter = pi->adapter;
231	int ret;
232	u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
233
234	mutex_lock(&adapter->mdio_lock);
235	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
236	t3_write_reg(adapter, A_MI1_ADDR, addr);
237	t3_write_reg(adapter, A_MI1_DATA, val);
238	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
239	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
240	mutex_unlock(&adapter->mdio_lock);
241	return ret;
242}
243
244static const struct mdio_ops mi1_mdio_ops = {
245	.read = t3_mi1_read,
246	.write = t3_mi1_write,
247	.mode_support = MDIO_SUPPORTS_C22
248};
249
250/*
251 * Performs the address cycle for clause 45 PHYs.
252 * Must be called with the MDIO_LOCK held.
253 */
254static int mi1_wr_addr(struct adapter *adapter, int phy_addr, int mmd_addr,
255		       int reg_addr)
256{
257	u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
258
259	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
260	t3_write_reg(adapter, A_MI1_ADDR, addr);
261	t3_write_reg(adapter, A_MI1_DATA, reg_addr);
262	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
263	return t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
264			       MDIO_ATTEMPTS, 10);
265}
266
267/*
268 * MI1 read/write operations for indirect-addressed PHYs.
269 */
270static int mi1_ext_read(struct net_device *dev, int phy_addr, int mmd_addr,
271			u16 reg_addr)
272{
273	struct port_info *pi = netdev_priv(dev);
274	struct adapter *adapter = pi->adapter;
275	int ret;
276
277	mutex_lock(&adapter->mdio_lock);
278	ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
279	if (!ret) {
280		t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
281		ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
282				      MDIO_ATTEMPTS, 10);
283		if (!ret)
284			ret = t3_read_reg(adapter, A_MI1_DATA);
285	}
286	mutex_unlock(&adapter->mdio_lock);
287	return ret;
288}
289
290static int mi1_ext_write(struct net_device *dev, int phy_addr, int mmd_addr,
291			 u16 reg_addr, u16 val)
292{
293	struct port_info *pi = netdev_priv(dev);
294	struct adapter *adapter = pi->adapter;
295	int ret;
296
297	mutex_lock(&adapter->mdio_lock);
298	ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
299	if (!ret) {
300		t3_write_reg(adapter, A_MI1_DATA, val);
301		t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
302		ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
303				      MDIO_ATTEMPTS, 10);
304	}
305	mutex_unlock(&adapter->mdio_lock);
306	return ret;
307}
308
309static const struct mdio_ops mi1_mdio_ext_ops = {
310	.read = mi1_ext_read,
311	.write = mi1_ext_write,
312	.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22
313};
314
315/**
316 *	t3_mdio_change_bits - modify the value of a PHY register
317 *	@phy: the PHY to operate on
318 *	@mmd: the device address
319 *	@reg: the register address
320 *	@clear: what part of the register value to mask off
321 *	@set: what part of the register value to set
322 *
323 *	Changes the value of a PHY register by applying a mask to its current
324 *	value and ORing the result with a new value.
325 */
326int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
327			unsigned int set)
328{
329	int ret;
330	unsigned int val;
331
332	ret = t3_mdio_read(phy, mmd, reg, &val);
333	if (!ret) {
334		val &= ~clear;
335		ret = t3_mdio_write(phy, mmd, reg, val | set);
336	}
337	return ret;
338}
339
340/**
341 *	t3_phy_reset - reset a PHY block
342 *	@phy: the PHY to operate on
343 *	@mmd: the device address of the PHY block to reset
344 *	@wait: how long to wait for the reset to complete in 1ms increments
345 *
346 *	Resets a PHY block and optionally waits for the reset to complete.
347 *	@mmd should be 0 for 10/100/1000 PHYs and the device address to reset
348 *	for 10G PHYs.
349 */
350int t3_phy_reset(struct cphy *phy, int mmd, int wait)
351{
352	int err;
353	unsigned int ctl;
354
355	err = t3_mdio_change_bits(phy, mmd, MDIO_CTRL1, MDIO_CTRL1_LPOWER,
356				  MDIO_CTRL1_RESET);
357	if (err || !wait)
358		return err;
359
360	do {
361		err = t3_mdio_read(phy, mmd, MDIO_CTRL1, &ctl);
362		if (err)
363			return err;
364		ctl &= MDIO_CTRL1_RESET;
365		if (ctl)
366			msleep(1);
367	} while (ctl && --wait);
368
369	return ctl ? -1 : 0;
370}
371
372/**
373 *	t3_phy_advertise - set the PHY advertisement registers for autoneg
374 *	@phy: the PHY to operate on
375 *	@advert: bitmap of capabilities the PHY should advertise
376 *
377 *	Sets a 10/100/1000 PHY's advertisement registers to advertise the
378 *	requested capabilities.
379 */
380int t3_phy_advertise(struct cphy *phy, unsigned int advert)
381{
382	int err;
383	unsigned int val = 0;
384
385	err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_CTRL1000, &val);
386	if (err)
387		return err;
388
389	val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
390	if (advert & ADVERTISED_1000baseT_Half)
391		val |= ADVERTISE_1000HALF;
392	if (advert & ADVERTISED_1000baseT_Full)
393		val |= ADVERTISE_1000FULL;
394
395	err = t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_CTRL1000, val);
396	if (err)
397		return err;
398
399	val = 1;
400	if (advert & ADVERTISED_10baseT_Half)
401		val |= ADVERTISE_10HALF;
402	if (advert & ADVERTISED_10baseT_Full)
403		val |= ADVERTISE_10FULL;
404	if (advert & ADVERTISED_100baseT_Half)
405		val |= ADVERTISE_100HALF;
406	if (advert & ADVERTISED_100baseT_Full)
407		val |= ADVERTISE_100FULL;
408	if (advert & ADVERTISED_Pause)
409		val |= ADVERTISE_PAUSE_CAP;
410	if (advert & ADVERTISED_Asym_Pause)
411		val |= ADVERTISE_PAUSE_ASYM;
412	return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val);
413}
414
415/**
416 *	t3_phy_advertise_fiber - set fiber PHY advertisement register
417 *	@phy: the PHY to operate on
418 *	@advert: bitmap of capabilities the PHY should advertise
419 *
420 *	Sets a fiber PHY's advertisement register to advertise the
421 *	requested capabilities.
422 */
423int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert)
424{
425	unsigned int val = 0;
426
427	if (advert & ADVERTISED_1000baseT_Half)
428		val |= ADVERTISE_1000XHALF;
429	if (advert & ADVERTISED_1000baseT_Full)
430		val |= ADVERTISE_1000XFULL;
431	if (advert & ADVERTISED_Pause)
432		val |= ADVERTISE_1000XPAUSE;
433	if (advert & ADVERTISED_Asym_Pause)
434		val |= ADVERTISE_1000XPSE_ASYM;
435	return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val);
436}
437
438/**
439 *	t3_set_phy_speed_duplex - force PHY speed and duplex
440 *	@phy: the PHY to operate on
441 *	@speed: requested PHY speed
442 *	@duplex: requested PHY duplex
443 *
444 *	Force a 10/100/1000 PHY's speed and duplex.  This also disables
445 *	auto-negotiation except for GigE, where auto-negotiation is mandatory.
446 */
447int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
448{
449	int err;
450	unsigned int ctl;
451
452	err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_BMCR, &ctl);
453	if (err)
454		return err;
455
456	if (speed >= 0) {
457		ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
458		if (speed == SPEED_100)
459			ctl |= BMCR_SPEED100;
460		else if (speed == SPEED_1000)
461			ctl |= BMCR_SPEED1000;
462	}
463	if (duplex >= 0) {
464		ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
465		if (duplex == DUPLEX_FULL)
466			ctl |= BMCR_FULLDPLX;
467	}
468	if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
469		ctl |= BMCR_ANENABLE;
470	return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_BMCR, ctl);
471}
472
473int t3_phy_lasi_intr_enable(struct cphy *phy)
474{
475	return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL,
476			     MDIO_PMA_LASI_LSALARM);
477}
478
479int t3_phy_lasi_intr_disable(struct cphy *phy)
480{
481	return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0);
482}
483
484int t3_phy_lasi_intr_clear(struct cphy *phy)
485{
486	u32 val;
487
488	return t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, &val);
489}
490
491int t3_phy_lasi_intr_handler(struct cphy *phy)
492{
493	unsigned int status;
494	int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT,
495			       &status);
496
497	if (err)
498		return err;
499	return (status & MDIO_PMA_LASI_LSALARM) ? cphy_cause_link_change : 0;
500}
501
502static const struct adapter_info t3_adap_info[] = {
503	{1, 1, 0,
504	 F_GPIO2_OEN | F_GPIO4_OEN |
505	 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
506	 &mi1_mdio_ops, "Chelsio PE9000"},
507	{1, 1, 0,
508	 F_GPIO2_OEN | F_GPIO4_OEN |
509	 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
510	 &mi1_mdio_ops, "Chelsio T302"},
511	{1, 0, 0,
512	 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
513	 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
514	 { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
515	 &mi1_mdio_ext_ops, "Chelsio T310"},
516	{1, 1, 0,
517	 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
518	 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
519	 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
520	 { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
521	 &mi1_mdio_ext_ops, "Chelsio T320"},
522	{},
523	{},
524	{1, 0, 0,
525	 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
526	 F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
527	 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
528	 &mi1_mdio_ext_ops, "Chelsio T310" },
529	{1, 0, 0,
530	 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
531	 F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL,
532	 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
533	 &mi1_mdio_ext_ops, "Chelsio N320E-G2" },
534};
535
536/*
537 * Return the adapter_info structure with a given index.  Out-of-range indices
538 * return NULL.
539 */
540const struct adapter_info *t3_get_adapter_info(unsigned int id)
541{
542	return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
543}
544
545struct port_type_info {
546	int (*phy_prep)(struct cphy *phy, struct adapter *adapter,
547			int phy_addr, const struct mdio_ops *ops);
548};
549
550static const struct port_type_info port_types[] = {
551	{ NULL },
552	{ t3_ael1002_phy_prep },
553	{ t3_vsc8211_phy_prep },
554	{ NULL},
555	{ t3_xaui_direct_phy_prep },
556	{ t3_ael2005_phy_prep },
557	{ t3_qt2045_phy_prep },
558	{ t3_ael1006_phy_prep },
559	{ NULL },
560	{ t3_aq100x_phy_prep },
561	{ t3_ael2020_phy_prep },
562};
563
564#define VPD_ENTRY(name, len) \
565	u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
566
567/*
568 * Partial EEPROM Vital Product Data structure.  Includes only the ID and
569 * VPD-R sections.
570 */
571struct t3_vpd {
572	u8 id_tag;
573	u8 id_len[2];
574	u8 id_data[16];
575	u8 vpdr_tag;
576	u8 vpdr_len[2];
577	VPD_ENTRY(pn, 16);	/* part number */
578	VPD_ENTRY(ec, 16);	/* EC level */
579	VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
580	VPD_ENTRY(na, 12);	/* MAC address base */
581	VPD_ENTRY(cclk, 6);	/* core clock */
582	VPD_ENTRY(mclk, 6);	/* mem clock */
583	VPD_ENTRY(uclk, 6);	/* uP clk */
584	VPD_ENTRY(mdc, 6);	/* MDIO clk */
585	VPD_ENTRY(mt, 2);	/* mem timing */
586	VPD_ENTRY(xaui0cfg, 6);	/* XAUI0 config */
587	VPD_ENTRY(xaui1cfg, 6);	/* XAUI1 config */
588	VPD_ENTRY(port0, 2);	/* PHY0 complex */
589	VPD_ENTRY(port1, 2);	/* PHY1 complex */
590	VPD_ENTRY(port2, 2);	/* PHY2 complex */
591	VPD_ENTRY(port3, 2);	/* PHY3 complex */
592	VPD_ENTRY(rv, 1);	/* csum */
593	u32 pad;		/* for multiple-of-4 sizing and alignment */
594};
595
596#define EEPROM_MAX_POLL   40
597#define EEPROM_STAT_ADDR  0x4000
598#define VPD_BASE          0xc00
599
600/**
601 *	t3_seeprom_read - read a VPD EEPROM location
602 *	@adapter: adapter to read
603 *	@addr: EEPROM address
604 *	@data: where to store the read data
605 *
606 *	Read a 32-bit word from a location in VPD EEPROM using the card's PCI
607 *	VPD ROM capability.  A zero is written to the flag bit when the
608 *	addres is written to the control register.  The hardware device will
609 *	set the flag to 1 when 4 bytes have been read into the data register.
610 */
611int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data)
612{
613	u16 val;
614	int attempts = EEPROM_MAX_POLL;
615	u32 v;
616	unsigned int base = adapter->params.pci.vpd_cap_addr;
617
618	if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
619		return -EINVAL;
620
621	pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
622	do {
623		udelay(10);
624		pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
625	} while (!(val & PCI_VPD_ADDR_F) && --attempts);
626
627	if (!(val & PCI_VPD_ADDR_F)) {
628		CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
629		return -EIO;
630	}
631	pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, &v);
632	*data = cpu_to_le32(v);
633	return 0;
634}
635
636/**
637 *	t3_seeprom_write - write a VPD EEPROM location
638 *	@adapter: adapter to write
639 *	@addr: EEPROM address
640 *	@data: value to write
641 *
642 *	Write a 32-bit word to a location in VPD EEPROM using the card's PCI
643 *	VPD ROM capability.
644 */
645int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data)
646{
647	u16 val;
648	int attempts = EEPROM_MAX_POLL;
649	unsigned int base = adapter->params.pci.vpd_cap_addr;
650
651	if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
652		return -EINVAL;
653
654	pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
655			       le32_to_cpu(data));
656	pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
657			      addr | PCI_VPD_ADDR_F);
658	do {
659		msleep(1);
660		pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
661	} while ((val & PCI_VPD_ADDR_F) && --attempts);
662
663	if (val & PCI_VPD_ADDR_F) {
664		CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
665		return -EIO;
666	}
667	return 0;
668}
669
670/**
671 *	t3_seeprom_wp - enable/disable EEPROM write protection
672 *	@adapter: the adapter
673 *	@enable: 1 to enable write protection, 0 to disable it
674 *
675 *	Enables or disables write protection on the serial EEPROM.
676 */
677int t3_seeprom_wp(struct adapter *adapter, int enable)
678{
679	return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
680}
681
682/**
683 *	get_vpd_params - read VPD parameters from VPD EEPROM
684 *	@adapter: adapter to read
685 *	@p: where to store the parameters
686 *
687 *	Reads card parameters stored in VPD EEPROM.
688 */
689static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
690{
691	int i, addr, ret;
692	struct t3_vpd vpd;
693
694	/*
695	 * Card information is normally at VPD_BASE but some early cards had
696	 * it at 0.
697	 */
698	ret = t3_seeprom_read(adapter, VPD_BASE, (__le32 *)&vpd);
699	if (ret)
700		return ret;
701	addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
702
703	for (i = 0; i < sizeof(vpd); i += 4) {
704		ret = t3_seeprom_read(adapter, addr + i,
705				      (__le32 *)((u8 *)&vpd + i));
706		if (ret)
707			return ret;
708	}
709
710	p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
711	p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
712	p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
713	p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
714	p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
715	memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
716
717	/* Old eeproms didn't have port information */
718	if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
719		p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
720		p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
721	} else {
722		p->port_type[0] = hex_to_bin(vpd.port0_data[0]);
723		p->port_type[1] = hex_to_bin(vpd.port1_data[0]);
724		p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
725		p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
726	}
727
728	for (i = 0; i < 6; i++)
729		p->eth_base[i] = hex_to_bin(vpd.na_data[2 * i]) * 16 +
730				 hex_to_bin(vpd.na_data[2 * i + 1]);
731	return 0;
732}
733
734/* serial flash and firmware constants */
735enum {
736	SF_ATTEMPTS = 5,	/* max retries for SF1 operations */
737	SF_SEC_SIZE = 64 * 1024,	/* serial flash sector size */
738	SF_SIZE = SF_SEC_SIZE * 8,	/* serial flash size */
739
740	/* flash command opcodes */
741	SF_PROG_PAGE = 2,	/* program page */
742	SF_WR_DISABLE = 4,	/* disable writes */
743	SF_RD_STATUS = 5,	/* read status register */
744	SF_WR_ENABLE = 6,	/* enable writes */
745	SF_RD_DATA_FAST = 0xb,	/* read flash */
746	SF_ERASE_SECTOR = 0xd8,	/* erase sector */
747
748	FW_FLASH_BOOT_ADDR = 0x70000,	/* start address of FW in flash */
749	FW_VERS_ADDR = 0x7fffc,    /* flash address holding FW version */
750	FW_MIN_SIZE = 8            /* at least version and csum */
751};
752
753/**
754 *	sf1_read - read data from the serial flash
755 *	@adapter: the adapter
756 *	@byte_cnt: number of bytes to read
757 *	@cont: whether another operation will be chained
758 *	@valp: where to store the read data
759 *
760 *	Reads up to 4 bytes of data from the serial flash.  The location of
761 *	the read needs to be specified prior to calling this by issuing the
762 *	appropriate commands to the serial flash.
763 */
764static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
765		    u32 *valp)
766{
767	int ret;
768
769	if (!byte_cnt || byte_cnt > 4)
770		return -EINVAL;
771	if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
772		return -EBUSY;
773	t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
774	ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
775	if (!ret)
776		*valp = t3_read_reg(adapter, A_SF_DATA);
777	return ret;
778}
779
780/**
781 *	sf1_write - write data to the serial flash
782 *	@adapter: the adapter
783 *	@byte_cnt: number of bytes to write
784 *	@cont: whether another operation will be chained
785 *	@val: value to write
786 *
787 *	Writes up to 4 bytes of data to the serial flash.  The location of
788 *	the write needs to be specified prior to calling this by issuing the
789 *	appropriate commands to the serial flash.
790 */
791static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
792		     u32 val)
793{
794	if (!byte_cnt || byte_cnt > 4)
795		return -EINVAL;
796	if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
797		return -EBUSY;
798	t3_write_reg(adapter, A_SF_DATA, val);
799	t3_write_reg(adapter, A_SF_OP,
800		     V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
801	return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
802}
803
804/**
805 *	flash_wait_op - wait for a flash operation to complete
806 *	@adapter: the adapter
807 *	@attempts: max number of polls of the status register
808 *	@delay: delay between polls in ms
809 *
810 *	Wait for a flash operation to complete by polling the status register.
811 */
812static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
813{
814	int ret;
815	u32 status;
816
817	while (1) {
818		if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
819		    (ret = sf1_read(adapter, 1, 0, &status)) != 0)
820			return ret;
821		if (!(status & 1))
822			return 0;
823		if (--attempts == 0)
824			return -EAGAIN;
825		if (delay)
826			msleep(delay);
827	}
828}
829
830/**
831 *	t3_read_flash - read words from serial flash
832 *	@adapter: the adapter
833 *	@addr: the start address for the read
834 *	@nwords: how many 32-bit words to read
835 *	@data: where to store the read data
836 *	@byte_oriented: whether to store data as bytes or as words
837 *
838 *	Read the specified number of 32-bit words from the serial flash.
839 *	If @byte_oriented is set the read data is stored as a byte array
840 *	(i.e., big-endian), otherwise as 32-bit words in the platform's
841 *	natural endianess.
842 */
843int t3_read_flash(struct adapter *adapter, unsigned int addr,
844		  unsigned int nwords, u32 *data, int byte_oriented)
845{
846	int ret;
847
848	if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
849		return -EINVAL;
850
851	addr = swab32(addr) | SF_RD_DATA_FAST;
852
853	if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
854	    (ret = sf1_read(adapter, 1, 1, data)) != 0)
855		return ret;
856
857	for (; nwords; nwords--, data++) {
858		ret = sf1_read(adapter, 4, nwords > 1, data);
859		if (ret)
860			return ret;
861		if (byte_oriented)
862			*data = htonl(*data);
863	}
864	return 0;
865}
866
867/**
868 *	t3_write_flash - write up to a page of data to the serial flash
869 *	@adapter: the adapter
870 *	@addr: the start address to write
871 *	@n: length of data to write
872 *	@data: the data to write
873 *
874 *	Writes up to a page of data (256 bytes) to the serial flash starting
875 *	at the given address.
876 */
877static int t3_write_flash(struct adapter *adapter, unsigned int addr,
878			  unsigned int n, const u8 *data)
879{
880	int ret;
881	u32 buf[64];
882	unsigned int i, c, left, val, offset = addr & 0xff;
883
884	if (addr + n > SF_SIZE || offset + n > 256)
885		return -EINVAL;
886
887	val = swab32(addr) | SF_PROG_PAGE;
888
889	if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
890	    (ret = sf1_write(adapter, 4, 1, val)) != 0)
891		return ret;
892
893	for (left = n; left; left -= c) {
894		c = min(left, 4U);
895		for (val = 0, i = 0; i < c; ++i)
896			val = (val << 8) + *data++;
897
898		ret = sf1_write(adapter, c, c != left, val);
899		if (ret)
900			return ret;
901	}
902	if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
903		return ret;
904
905	/* Read the page to verify the write succeeded */
906	ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
907	if (ret)
908		return ret;
909
910	if (memcmp(data - n, (u8 *) buf + offset, n))
911		return -EIO;
912	return 0;
913}
914
915/**
916 *	t3_get_tp_version - read the tp sram version
917 *	@adapter: the adapter
918 *	@vers: where to place the version
919 *
920 *	Reads the protocol sram version from sram.
921 */
922int t3_get_tp_version(struct adapter *adapter, u32 *vers)
923{
924	int ret;
925
926	/* Get version loaded in SRAM */
927	t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
928	ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
929			      1, 1, 5, 1);
930	if (ret)
931		return ret;
932
933	*vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
934
935	return 0;
936}
937
938/**
939 *	t3_check_tpsram_version - read the tp sram version
940 *	@adapter: the adapter
941 *
942 *	Reads the protocol sram version from flash.
943 */
944int t3_check_tpsram_version(struct adapter *adapter)
945{
946	int ret;
947	u32 vers;
948	unsigned int major, minor;
949
950	if (adapter->params.rev == T3_REV_A)
951		return 0;
952
953
954	ret = t3_get_tp_version(adapter, &vers);
955	if (ret)
956		return ret;
957
958	major = G_TP_VERSION_MAJOR(vers);
959	minor = G_TP_VERSION_MINOR(vers);
960
961	if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
962		return 0;
963	else {
964		CH_ERR(adapter, "found wrong TP version (%u.%u), "
965		       "driver compiled for version %d.%d\n", major, minor,
966		       TP_VERSION_MAJOR, TP_VERSION_MINOR);
967	}
968	return -EINVAL;
969}
970
971/**
972 *	t3_check_tpsram - check if provided protocol SRAM
973 *			  is compatible with this driver
974 *	@adapter: the adapter
975 *	@tp_sram: the firmware image to write
976 *	@size: image size
977 *
978 *	Checks if an adapter's tp sram is compatible with the driver.
979 *	Returns 0 if the versions are compatible, a negative error otherwise.
980 */
981int t3_check_tpsram(struct adapter *adapter, const u8 *tp_sram,
982		    unsigned int size)
983{
984	u32 csum;
985	unsigned int i;
986	const __be32 *p = (const __be32 *)tp_sram;
987
988	/* Verify checksum */
989	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
990		csum += ntohl(p[i]);
991	if (csum != 0xffffffff) {
992		CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
993		       csum);
994		return -EINVAL;
995	}
996
997	return 0;
998}
999
1000enum fw_version_type {
1001	FW_VERSION_N3,
1002	FW_VERSION_T3
1003};
1004
1005/**
1006 *	t3_get_fw_version - read the firmware version
1007 *	@adapter: the adapter
1008 *	@vers: where to place the version
1009 *
1010 *	Reads the FW version from flash.
1011 */
1012int t3_get_fw_version(struct adapter *adapter, u32 *vers)
1013{
1014	return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
1015}
1016
1017/**
1018 *	t3_check_fw_version - check if the FW is compatible with this driver
1019 *	@adapter: the adapter
1020 *
1021 *	Checks if an adapter's FW is compatible with the driver.  Returns 0
1022 *	if the versions are compatible, a negative error otherwise.
1023 */
1024int t3_check_fw_version(struct adapter *adapter)
1025{
1026	int ret;
1027	u32 vers;
1028	unsigned int type, major, minor;
1029
1030	ret = t3_get_fw_version(adapter, &vers);
1031	if (ret)
1032		return ret;
1033
1034	type = G_FW_VERSION_TYPE(vers);
1035	major = G_FW_VERSION_MAJOR(vers);
1036	minor = G_FW_VERSION_MINOR(vers);
1037
1038	if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
1039	    minor == FW_VERSION_MINOR)
1040		return 0;
1041	else if (major != FW_VERSION_MAJOR || minor < FW_VERSION_MINOR)
1042		CH_WARN(adapter, "found old FW minor version(%u.%u), "
1043		        "driver compiled for version %u.%u\n", major, minor,
1044			FW_VERSION_MAJOR, FW_VERSION_MINOR);
1045	else {
1046		CH_WARN(adapter, "found newer FW version(%u.%u), "
1047		        "driver compiled for version %u.%u\n", major, minor,
1048			FW_VERSION_MAJOR, FW_VERSION_MINOR);
1049			return 0;
1050	}
1051	return -EINVAL;
1052}
1053
1054/**
1055 *	t3_flash_erase_sectors - erase a range of flash sectors
1056 *	@adapter: the adapter
1057 *	@start: the first sector to erase
1058 *	@end: the last sector to erase
1059 *
1060 *	Erases the sectors in the given range.
1061 */
1062static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
1063{
1064	while (start <= end) {
1065		int ret;
1066
1067		if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1068		    (ret = sf1_write(adapter, 4, 0,
1069				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
1070		    (ret = flash_wait_op(adapter, 5, 500)) != 0)
1071			return ret;
1072		start++;
1073	}
1074	return 0;
1075}
1076
1077/*
1078 *	t3_load_fw - download firmware
1079 *	@adapter: the adapter
1080 *	@fw_data: the firmware image to write
1081 *	@size: image size
1082 *
1083 *	Write the supplied firmware image to the card's serial flash.
1084 *	The FW image has the following sections: @size - 8 bytes of code and
1085 *	data, followed by 4 bytes of FW version, followed by the 32-bit
1086 *	1's complement checksum of the whole image.
1087 */
1088int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
1089{
1090	u32 csum;
1091	unsigned int i;
1092	const __be32 *p = (const __be32 *)fw_data;
1093	int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1094
1095	if ((size & 3) || size < FW_MIN_SIZE)
1096		return -EINVAL;
1097	if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
1098		return -EFBIG;
1099
1100	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1101		csum += ntohl(p[i]);
1102	if (csum != 0xffffffff) {
1103		CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1104		       csum);
1105		return -EINVAL;
1106	}
1107
1108	ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1109	if (ret)
1110		goto out;
1111
1112	size -= 8;		/* trim off version and checksum */
1113	for (addr = FW_FLASH_BOOT_ADDR; size;) {
1114		unsigned int chunk_size = min(size, 256U);
1115
1116		ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
1117		if (ret)
1118			goto out;
1119
1120		addr += chunk_size;
1121		fw_data += chunk_size;
1122		size -= chunk_size;
1123	}
1124
1125	ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
1126out:
1127	if (ret)
1128		CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1129	return ret;
1130}
1131
1132#define CIM_CTL_BASE 0x2000
1133
1134/**
1135 *      t3_cim_ctl_blk_read - read a block from CIM control region
1136 *
1137 *      @adap: the adapter
1138 *      @addr: the start address within the CIM control region
1139 *      @n: number of words to read
1140 *      @valp: where to store the result
1141 *
1142 *      Reads a block of 4-byte words from the CIM control region.
1143 */
1144int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
1145			unsigned int n, unsigned int *valp)
1146{
1147	int ret = 0;
1148
1149	if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1150		return -EBUSY;
1151
1152	for ( ; !ret && n--; addr += 4) {
1153		t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1154		ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1155				      0, 5, 2);
1156		if (!ret)
1157			*valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1158	}
1159	return ret;
1160}
1161
1162static void t3_gate_rx_traffic(struct cmac *mac, u32 *rx_cfg,
1163			       u32 *rx_hash_high, u32 *rx_hash_low)
1164{
1165	/* stop Rx unicast traffic */
1166	t3_mac_disable_exact_filters(mac);
1167
1168	/* stop broadcast, multicast, promiscuous mode traffic */
1169	*rx_cfg = t3_read_reg(mac->adapter, A_XGM_RX_CFG);
1170	t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1171			 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1172			 F_DISBCAST);
1173
1174	*rx_hash_high = t3_read_reg(mac->adapter, A_XGM_RX_HASH_HIGH);
1175	t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, 0);
1176
1177	*rx_hash_low = t3_read_reg(mac->adapter, A_XGM_RX_HASH_LOW);
1178	t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, 0);
1179
1180	/* Leave time to drain max RX fifo */
1181	msleep(1);
1182}
1183
1184static void t3_open_rx_traffic(struct cmac *mac, u32 rx_cfg,
1185			       u32 rx_hash_high, u32 rx_hash_low)
1186{
1187	t3_mac_enable_exact_filters(mac);
1188	t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1189			 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1190			 rx_cfg);
1191	t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, rx_hash_high);
1192	t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, rx_hash_low);
1193}
1194
1195/**
1196 *	t3_link_changed - handle interface link changes
1197 *	@adapter: the adapter
1198 *	@port_id: the port index that changed link state
1199 *
1200 *	Called when a port's link settings change to propagate the new values
1201 *	to the associated PHY and MAC.  After performing the common tasks it
1202 *	invokes an OS-specific handler.
1203 */
1204void t3_link_changed(struct adapter *adapter, int port_id)
1205{
1206	int link_ok, speed, duplex, fc;
1207	struct port_info *pi = adap2pinfo(adapter, port_id);
1208	struct cphy *phy = &pi->phy;
1209	struct cmac *mac = &pi->mac;
1210	struct link_config *lc = &pi->link_config;
1211
1212	phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1213
1214	if (!lc->link_ok && link_ok) {
1215		u32 rx_cfg, rx_hash_high, rx_hash_low;
1216		u32 status;
1217
1218		t3_xgm_intr_enable(adapter, port_id);
1219		t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1220		t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1221		t3_mac_enable(mac, MAC_DIRECTION_RX);
1222
1223		status = t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
1224		if (status & F_LINKFAULTCHANGE) {
1225			mac->stats.link_faults++;
1226			pi->link_fault = 1;
1227		}
1228		t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1229	}
1230
1231	if (lc->requested_fc & PAUSE_AUTONEG)
1232		fc &= lc->requested_fc;
1233	else
1234		fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1235
1236	if (link_ok == lc->link_ok && speed == lc->speed &&
1237	    duplex == lc->duplex && fc == lc->fc)
1238		return;                            /* nothing changed */
1239
1240	if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1241	    uses_xaui(adapter)) {
1242		if (link_ok)
1243			t3b_pcs_reset(mac);
1244		t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1245			     link_ok ? F_TXACTENABLE | F_RXEN : 0);
1246	}
1247	lc->link_ok = link_ok;
1248	lc->speed = speed < 0 ? SPEED_INVALID : speed;
1249	lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1250
1251	if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1252		/* Set MAC speed, duplex, and flow control to match PHY. */
1253		t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1254		lc->fc = fc;
1255	}
1256
1257	t3_os_link_changed(adapter, port_id, link_ok && !pi->link_fault,
1258			   speed, duplex, fc);
1259}
1260
1261void t3_link_fault(struct adapter *adapter, int port_id)
1262{
1263	struct port_info *pi = adap2pinfo(adapter, port_id);
1264	struct cmac *mac = &pi->mac;
1265	struct cphy *phy = &pi->phy;
1266	struct link_config *lc = &pi->link_config;
1267	int link_ok, speed, duplex, fc, link_fault;
1268	u32 rx_cfg, rx_hash_high, rx_hash_low;
1269
1270	t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1271
1272	if (adapter->params.rev > 0 && uses_xaui(adapter))
1273		t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset, 0);
1274
1275	t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1276	t3_mac_enable(mac, MAC_DIRECTION_RX);
1277
1278	t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1279
1280	link_fault = t3_read_reg(adapter,
1281				 A_XGM_INT_STATUS + mac->offset);
1282	link_fault &= F_LINKFAULTCHANGE;
1283
1284	link_ok = lc->link_ok;
1285	speed = lc->speed;
1286	duplex = lc->duplex;
1287	fc = lc->fc;
1288
1289	phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1290
1291	if (link_fault) {
1292		lc->link_ok = 0;
1293		lc->speed = SPEED_INVALID;
1294		lc->duplex = DUPLEX_INVALID;
1295
1296		t3_os_link_fault(adapter, port_id, 0);
1297
1298		/* Account link faults only when the phy reports a link up */
1299		if (link_ok)
1300			mac->stats.link_faults++;
1301	} else {
1302		if (link_ok)
1303			t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1304				     F_TXACTENABLE | F_RXEN);
1305
1306		pi->link_fault = 0;
1307		lc->link_ok = (unsigned char)link_ok;
1308		lc->speed = speed < 0 ? SPEED_INVALID : speed;
1309		lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1310		t3_os_link_fault(adapter, port_id, link_ok);
1311	}
1312}
1313
1314/**
1315 *	t3_link_start - apply link configuration to MAC/PHY
1316 *	@phy: the PHY to setup
1317 *	@mac: the MAC to setup
1318 *	@lc: the requested link configuration
1319 *
1320 *	Set up a port's MAC and PHY according to a desired link configuration.
1321 *	- If the PHY can auto-negotiate first decide what to advertise, then
1322 *	  enable/disable auto-negotiation as desired, and reset.
1323 *	- If the PHY does not auto-negotiate just reset it.
1324 *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1325 *	  otherwise do it later based on the outcome of auto-negotiation.
1326 */
1327int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1328{
1329	unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1330
1331	lc->link_ok = 0;
1332	if (lc->supported & SUPPORTED_Autoneg) {
1333		lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1334		if (fc) {
1335			lc->advertising |= ADVERTISED_Asym_Pause;
1336			if (fc & PAUSE_RX)
1337				lc->advertising |= ADVERTISED_Pause;
1338		}
1339		phy->ops->advertise(phy, lc->advertising);
1340
1341		if (lc->autoneg == AUTONEG_DISABLE) {
1342			lc->speed = lc->requested_speed;
1343			lc->duplex = lc->requested_duplex;
1344			lc->fc = (unsigned char)fc;
1345			t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1346						   fc);
1347			/* Also disables autoneg */
1348			phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1349		} else
1350			phy->ops->autoneg_enable(phy);
1351	} else {
1352		t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1353		lc->fc = (unsigned char)fc;
1354		phy->ops->reset(phy, 0);
1355	}
1356	return 0;
1357}
1358
1359/**
1360 *	t3_set_vlan_accel - control HW VLAN extraction
1361 *	@adapter: the adapter
1362 *	@ports: bitmap of adapter ports to operate on
1363 *	@on: enable (1) or disable (0) HW VLAN extraction
1364 *
1365 *	Enables or disables HW extraction of VLAN tags for the given port.
1366 */
1367void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1368{
1369	t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1370			 ports << S_VLANEXTRACTIONENABLE,
1371			 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1372}
1373
1374struct intr_info {
1375	unsigned int mask;	/* bits to check in interrupt status */
1376	const char *msg;	/* message to print or NULL */
1377	short stat_idx;		/* stat counter to increment or -1 */
1378	unsigned short fatal;	/* whether the condition reported is fatal */
1379};
1380
1381/**
1382 *	t3_handle_intr_status - table driven interrupt handler
1383 *	@adapter: the adapter that generated the interrupt
1384 *	@reg: the interrupt status register to process
1385 *	@mask: a mask to apply to the interrupt status
1386 *	@acts: table of interrupt actions
1387 *	@stats: statistics counters tracking interrupt occurences
1388 *
1389 *	A table driven interrupt handler that applies a set of masks to an
1390 *	interrupt status word and performs the corresponding actions if the
1391 *	interrupts described by the mask have occured.  The actions include
1392 *	optionally printing a warning or alert message, and optionally
1393 *	incrementing a stat counter.  The table is terminated by an entry
1394 *	specifying mask 0.  Returns the number of fatal interrupt conditions.
1395 */
1396static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1397				 unsigned int mask,
1398				 const struct intr_info *acts,
1399				 unsigned long *stats)
1400{
1401	int fatal = 0;
1402	unsigned int status = t3_read_reg(adapter, reg) & mask;
1403
1404	for (; acts->mask; ++acts) {
1405		if (!(status & acts->mask))
1406			continue;
1407		if (acts->fatal) {
1408			fatal++;
1409			CH_ALERT(adapter, "%s (0x%x)\n",
1410				 acts->msg, status & acts->mask);
1411		} else if (acts->msg)
1412			CH_WARN(adapter, "%s (0x%x)\n",
1413				acts->msg, status & acts->mask);
1414		if (acts->stat_idx >= 0)
1415			stats[acts->stat_idx]++;
1416	}
1417	if (status)		/* clear processed interrupts */
1418		t3_write_reg(adapter, reg, status);
1419	return fatal;
1420}
1421
1422#define SGE_INTR_MASK (F_RSPQDISABLED | \
1423		       F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
1424		       F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
1425		       F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1426		       V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1427		       F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1428		       F_HIRCQPARITYERROR | F_LOPRIORITYDBFULL | \
1429		       F_HIPRIORITYDBFULL | F_LOPRIORITYDBEMPTY | \
1430		       F_HIPRIORITYDBEMPTY | F_HIPIODRBDROPERR | \
1431		       F_LOPIODRBDROPERR)
1432#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1433		       F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1434		       F_NFASRCHFAIL)
1435#define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1436#define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1437		       V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1438		       F_TXFIFO_UNDERRUN)
1439#define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1440			F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1441			F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1442			F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1443			V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1444			V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1445#define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1446			F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1447			/* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1448			F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
1449			F_TXPARERR | V_BISTERR(M_BISTERR))
1450#define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
1451			 F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
1452			 F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
1453#define ULPTX_INTR_MASK 0xfc
1454#define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
1455			 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1456			 F_ZERO_SWITCH_ERROR)
1457#define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1458		       F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1459		       F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1460	 	       F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
1461		       F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
1462		       F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
1463		       F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
1464		       F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
1465#define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1466			V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1467			V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1468#define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1469			V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1470			V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1471#define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1472		       V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1473		       V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1474		       V_MCAPARERRENB(M_MCAPARERRENB))
1475#define XGM_EXTRA_INTR_MASK (F_LINKFAULTCHANGE)
1476#define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1477		      F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1478		      F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1479		      F_MPS0 | F_CPL_SWITCH)
1480/*
1481 * Interrupt handler for the PCIX1 module.
1482 */
1483static void pci_intr_handler(struct adapter *adapter)
1484{
1485	static const struct intr_info pcix1_intr_info[] = {
1486		{F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1487		{F_SIGTARABT, "PCI signaled target abort", -1, 1},
1488		{F_RCVTARABT, "PCI received target abort", -1, 1},
1489		{F_RCVMSTABT, "PCI received master abort", -1, 1},
1490		{F_SIGSYSERR, "PCI signaled system error", -1, 1},
1491		{F_DETPARERR, "PCI detected parity error", -1, 1},
1492		{F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1493		{F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1494		{F_RCVSPLCMPERR, "PCI received split completion error", -1,
1495		 1},
1496		{F_DETCORECCERR, "PCI correctable ECC error",
1497		 STAT_PCI_CORR_ECC, 0},
1498		{F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1499		{F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1500		{V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1501		 1},
1502		{V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1503		 1},
1504		{V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1505		 1},
1506		{V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1507		 "error", -1, 1},
1508		{0}
1509	};
1510
1511	if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1512				  pcix1_intr_info, adapter->irq_stats))
1513		t3_fatal_err(adapter);
1514}
1515
1516/*
1517 * Interrupt handler for the PCIE module.
1518 */
1519static void pcie_intr_handler(struct adapter *adapter)
1520{
1521	static const struct intr_info pcie_intr_info[] = {
1522		{F_PEXERR, "PCI PEX error", -1, 1},
1523		{F_UNXSPLCPLERRR,
1524		 "PCI unexpected split completion DMA read error", -1, 1},
1525		{F_UNXSPLCPLERRC,
1526		 "PCI unexpected split completion DMA command error", -1, 1},
1527		{F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1528		{F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1529		{F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1530		{F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1531		{V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1532		 "PCI MSI-X table/PBA parity error", -1, 1},
1533		{F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1},
1534		{F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1},
1535		{F_RXPARERR, "PCI Rx parity error", -1, 1},
1536		{F_TXPARERR, "PCI Tx parity error", -1, 1},
1537		{V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1538		{0}
1539	};
1540
1541	if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1542		CH_ALERT(adapter, "PEX error code 0x%x\n",
1543			 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1544
1545	if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1546				  pcie_intr_info, adapter->irq_stats))
1547		t3_fatal_err(adapter);
1548}
1549
1550/*
1551 * TP interrupt handler.
1552 */
1553static void tp_intr_handler(struct adapter *adapter)
1554{
1555	static const struct intr_info tp_intr_info[] = {
1556		{0xffffff, "TP parity error", -1, 1},
1557		{0x1000000, "TP out of Rx pages", -1, 1},
1558		{0x2000000, "TP out of Tx pages", -1, 1},
1559		{0}
1560	};
1561
1562	static struct intr_info tp_intr_info_t3c[] = {
1563		{0x1fffffff, "TP parity error", -1, 1},
1564		{F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1},
1565		{F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1},
1566		{0}
1567	};
1568
1569	if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1570				  adapter->params.rev < T3_REV_C ?
1571				  tp_intr_info : tp_intr_info_t3c, NULL))
1572		t3_fatal_err(adapter);
1573}
1574
1575/*
1576 * CIM interrupt handler.
1577 */
1578static void cim_intr_handler(struct adapter *adapter)
1579{
1580	static const struct intr_info cim_intr_info[] = {
1581		{F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1582		{F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1583		{F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1584		{F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1585		{F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1586		{F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1587		{F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1588		{F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1589		{F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1590		{F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1591		{F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1592		{F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
1593		{F_DRAMPARERR, "CIM DRAM parity error", -1, 1},
1594		{F_ICACHEPARERR, "CIM icache parity error", -1, 1},
1595		{F_DCACHEPARERR, "CIM dcache parity error", -1, 1},
1596		{F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1},
1597		{F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1},
1598		{F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1},
1599		{F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1},
1600		{F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1},
1601		{F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1},
1602		{F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1},
1603		{F_ITAGPARERR, "CIM itag parity error", -1, 1},
1604		{F_DTAGPARERR, "CIM dtag parity error", -1, 1},
1605		{0}
1606	};
1607
1608	if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1609				  cim_intr_info, NULL))
1610		t3_fatal_err(adapter);
1611}
1612
1613/*
1614 * ULP RX interrupt handler.
1615 */
1616static void ulprx_intr_handler(struct adapter *adapter)
1617{
1618	static const struct intr_info ulprx_intr_info[] = {
1619		{F_PARERRDATA, "ULP RX data parity error", -1, 1},
1620		{F_PARERRPCMD, "ULP RX command parity error", -1, 1},
1621		{F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1},
1622		{F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1},
1623		{F_ARBFPERR, "ULP RX ArbF parity error", -1, 1},
1624		{F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1},
1625		{F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1},
1626		{F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1},
1627		{0}
1628	};
1629
1630	if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1631				  ulprx_intr_info, NULL))
1632		t3_fatal_err(adapter);
1633}
1634
1635/*
1636 * ULP TX interrupt handler.
1637 */
1638static void ulptx_intr_handler(struct adapter *adapter)
1639{
1640	static const struct intr_info ulptx_intr_info[] = {
1641		{F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1642		 STAT_ULP_CH0_PBL_OOB, 0},
1643		{F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1644		 STAT_ULP_CH1_PBL_OOB, 0},
1645		{0xfc, "ULP TX parity error", -1, 1},
1646		{0}
1647	};
1648
1649	if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1650				  ulptx_intr_info, adapter->irq_stats))
1651		t3_fatal_err(adapter);
1652}
1653
1654#define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1655	F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1656	F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1657	F_ICSPI1_TX_FRAMING_ERROR)
1658#define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1659	F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1660	F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1661	F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1662
1663/*
1664 * PM TX interrupt handler.
1665 */
1666static void pmtx_intr_handler(struct adapter *adapter)
1667{
1668	static const struct intr_info pmtx_intr_info[] = {
1669		{F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1670		{ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1671		{OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1672		{V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1673		 "PMTX ispi parity error", -1, 1},
1674		{V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1675		 "PMTX ospi parity error", -1, 1},
1676		{0}
1677	};
1678
1679	if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1680				  pmtx_intr_info, NULL))
1681		t3_fatal_err(adapter);
1682}
1683
1684#define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1685	F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1686	F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1687	F_IESPI1_TX_FRAMING_ERROR)
1688#define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1689	F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1690	F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1691	F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1692
1693/*
1694 * PM RX interrupt handler.
1695 */
1696static void pmrx_intr_handler(struct adapter *adapter)
1697{
1698	static const struct intr_info pmrx_intr_info[] = {
1699		{F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1700		{IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1701		{OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1702		{V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1703		 "PMRX ispi parity error", -1, 1},
1704		{V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1705		 "PMRX ospi parity error", -1, 1},
1706		{0}
1707	};
1708
1709	if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1710				  pmrx_intr_info, NULL))
1711		t3_fatal_err(adapter);
1712}
1713
1714/*
1715 * CPL switch interrupt handler.
1716 */
1717static void cplsw_intr_handler(struct adapter *adapter)
1718{
1719	static const struct intr_info cplsw_intr_info[] = {
1720		{F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1},
1721		{F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1},
1722		{F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1723		{F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1724		{F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1725		{F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1726		{0}
1727	};
1728
1729	if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1730				  cplsw_intr_info, NULL))
1731		t3_fatal_err(adapter);
1732}
1733
1734/*
1735 * MPS interrupt handler.
1736 */
1737static void mps_intr_handler(struct adapter *adapter)
1738{
1739	static const struct intr_info mps_intr_info[] = {
1740		{0x1ff, "MPS parity error", -1, 1},
1741		{0}
1742	};
1743
1744	if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1745				  mps_intr_info, NULL))
1746		t3_fatal_err(adapter);
1747}
1748
1749#define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1750
1751/*
1752 * MC7 interrupt handler.
1753 */
1754static void mc7_intr_handler(struct mc7 *mc7)
1755{
1756	struct adapter *adapter = mc7->adapter;
1757	u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1758
1759	if (cause & F_CE) {
1760		mc7->stats.corr_err++;
1761		CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1762			"data 0x%x 0x%x 0x%x\n", mc7->name,
1763			t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1764			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1765			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1766			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1767	}
1768
1769	if (cause & F_UE) {
1770		mc7->stats.uncorr_err++;
1771		CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1772			 "data 0x%x 0x%x 0x%x\n", mc7->name,
1773			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1774			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1775			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1776			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1777	}
1778
1779	if (G_PE(cause)) {
1780		mc7->stats.parity_err++;
1781		CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1782			 mc7->name, G_PE(cause));
1783	}
1784
1785	if (cause & F_AE) {
1786		u32 addr = 0;
1787
1788		if (adapter->params.rev > 0)
1789			addr = t3_read_reg(adapter,
1790					   mc7->offset + A_MC7_ERR_ADDR);
1791		mc7->stats.addr_err++;
1792		CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1793			 mc7->name, addr);
1794	}
1795
1796	if (cause & MC7_INTR_FATAL)
1797		t3_fatal_err(adapter);
1798
1799	t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1800}
1801
1802#define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1803			V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1804/*
1805 * XGMAC interrupt handler.
1806 */
1807static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1808{
1809	struct cmac *mac = &adap2pinfo(adap, idx)->mac;
1810	/*
1811	 * We mask out interrupt causes for which we're not taking interrupts.
1812	 * This allows us to use polling logic to monitor some of the other
1813	 * conditions when taking interrupts would impose too much load on the
1814	 * system.
1815	 */
1816	u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset) &
1817		    ~F_RXFIFO_OVERFLOW;
1818
1819	if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1820		mac->stats.tx_fifo_parity_err++;
1821		CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1822	}
1823	if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1824		mac->stats.rx_fifo_parity_err++;
1825		CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1826	}
1827	if (cause & F_TXFIFO_UNDERRUN)
1828		mac->stats.tx_fifo_urun++;
1829	if (cause & F_RXFIFO_OVERFLOW)
1830		mac->stats.rx_fifo_ovfl++;
1831	if (cause & V_SERDES_LOS(M_SERDES_LOS))
1832		mac->stats.serdes_signal_loss++;
1833	if (cause & F_XAUIPCSCTCERR)
1834		mac->stats.xaui_pcs_ctc_err++;
1835	if (cause & F_XAUIPCSALIGNCHANGE)
1836		mac->stats.xaui_pcs_align_change++;
1837	if (cause & F_XGM_INT) {
1838		t3_set_reg_field(adap,
1839				 A_XGM_INT_ENABLE + mac->offset,
1840				 F_XGM_INT, 0);
1841		mac->stats.link_faults++;
1842
1843		t3_os_link_fault_handler(adap, idx);
1844	}
1845
1846	t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1847
1848	if (cause & XGM_INTR_FATAL)
1849		t3_fatal_err(adap);
1850
1851	return cause != 0;
1852}
1853
1854/*
1855 * Interrupt handler for PHY events.
1856 */
1857int t3_phy_intr_handler(struct adapter *adapter)
1858{
1859	u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1860
1861	for_each_port(adapter, i) {
1862		struct port_info *p = adap2pinfo(adapter, i);
1863
1864		if (!(p->phy.caps & SUPPORTED_IRQ))
1865			continue;
1866
1867		if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) {
1868			int phy_cause = p->phy.ops->intr_handler(&p->phy);
1869
1870			if (phy_cause & cphy_cause_link_change)
1871				t3_link_changed(adapter, i);
1872			if (phy_cause & cphy_cause_fifo_error)
1873				p->phy.fifo_errors++;
1874			if (phy_cause & cphy_cause_module_change)
1875				t3_os_phymod_changed(adapter, i);
1876		}
1877	}
1878
1879	t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1880	return 0;
1881}
1882
1883/*
1884 * T3 slow path (non-data) interrupt handler.
1885 */
1886int t3_slow_intr_handler(struct adapter *adapter)
1887{
1888	u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1889
1890	cause &= adapter->slow_intr_mask;
1891	if (!cause)
1892		return 0;
1893	if (cause & F_PCIM0) {
1894		if (is_pcie(adapter))
1895			pcie_intr_handler(adapter);
1896		else
1897			pci_intr_handler(adapter);
1898	}
1899	if (cause & F_SGE3)
1900		t3_sge_err_intr_handler(adapter);
1901	if (cause & F_MC7_PMRX)
1902		mc7_intr_handler(&adapter->pmrx);
1903	if (cause & F_MC7_PMTX)
1904		mc7_intr_handler(&adapter->pmtx);
1905	if (cause & F_MC7_CM)
1906		mc7_intr_handler(&adapter->cm);
1907	if (cause & F_CIM)
1908		cim_intr_handler(adapter);
1909	if (cause & F_TP1)
1910		tp_intr_handler(adapter);
1911	if (cause & F_ULP2_RX)
1912		ulprx_intr_handler(adapter);
1913	if (cause & F_ULP2_TX)
1914		ulptx_intr_handler(adapter);
1915	if (cause & F_PM1_RX)
1916		pmrx_intr_handler(adapter);
1917	if (cause & F_PM1_TX)
1918		pmtx_intr_handler(adapter);
1919	if (cause & F_CPL_SWITCH)
1920		cplsw_intr_handler(adapter);
1921	if (cause & F_MPS0)
1922		mps_intr_handler(adapter);
1923	if (cause & F_MC5A)
1924		t3_mc5_intr_handler(&adapter->mc5);
1925	if (cause & F_XGMAC0_0)
1926		mac_intr_handler(adapter, 0);
1927	if (cause & F_XGMAC0_1)
1928		mac_intr_handler(adapter, 1);
1929	if (cause & F_T3DBG)
1930		t3_os_ext_intr_handler(adapter);
1931
1932	/* Clear the interrupts just processed. */
1933	t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1934	t3_read_reg(adapter, A_PL_INT_CAUSE0);	/* flush */
1935	return 1;
1936}
1937
1938static unsigned int calc_gpio_intr(struct adapter *adap)
1939{
1940	unsigned int i, gpi_intr = 0;
1941
1942	for_each_port(adap, i)
1943		if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) &&
1944		    adapter_info(adap)->gpio_intr[i])
1945			gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i];
1946	return gpi_intr;
1947}
1948
1949/**
1950 *	t3_intr_enable - enable interrupts
1951 *	@adapter: the adapter whose interrupts should be enabled
1952 *
1953 *	Enable interrupts by setting the interrupt enable registers of the
1954 *	various HW modules and then enabling the top-level interrupt
1955 *	concentrator.
1956 */
1957void t3_intr_enable(struct adapter *adapter)
1958{
1959	static const struct addr_val_pair intr_en_avp[] = {
1960		{A_SG_INT_ENABLE, SGE_INTR_MASK},
1961		{A_MC7_INT_ENABLE, MC7_INTR_MASK},
1962		{A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1963		 MC7_INTR_MASK},
1964		{A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1965		 MC7_INTR_MASK},
1966		{A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
1967		{A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
1968		{A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
1969		{A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
1970		{A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
1971		{A_MPS_INT_ENABLE, MPS_INTR_MASK},
1972	};
1973
1974	adapter->slow_intr_mask = PL_INTR_MASK;
1975
1976	t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
1977	t3_write_reg(adapter, A_TP_INT_ENABLE,
1978		     adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
1979
1980	if (adapter->params.rev > 0) {
1981		t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1982			     CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1983		t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1984			     ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1985			     F_PBL_BOUND_ERR_CH1);
1986	} else {
1987		t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1988		t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1989	}
1990
1991	t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter));
1992
1993	if (is_pcie(adapter))
1994		t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1995	else
1996		t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1997	t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1998	t3_read_reg(adapter, A_PL_INT_ENABLE0);	/* flush */
1999}
2000
2001/**
2002 *	t3_intr_disable - disable a card's interrupts
2003 *	@adapter: the adapter whose interrupts should be disabled
2004 *
2005 *	Disable interrupts.  We only disable the top-level interrupt
2006 *	concentrator and the SGE data interrupts.
2007 */
2008void t3_intr_disable(struct adapter *adapter)
2009{
2010	t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
2011	t3_read_reg(adapter, A_PL_INT_ENABLE0);	/* flush */
2012	adapter->slow_intr_mask = 0;
2013}
2014
2015/**
2016 *	t3_intr_clear - clear all interrupts
2017 *	@adapter: the adapter whose interrupts should be cleared
2018 *
2019 *	Clears all interrupts.
2020 */
2021void t3_intr_clear(struct adapter *adapter)
2022{
2023	static const unsigned int cause_reg_addr[] = {
2024		A_SG_INT_CAUSE,
2025		A_SG_RSPQ_FL_STATUS,
2026		A_PCIX_INT_CAUSE,
2027		A_MC7_INT_CAUSE,
2028		A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
2029		A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
2030		A_CIM_HOST_INT_CAUSE,
2031		A_TP_INT_CAUSE,
2032		A_MC5_DB_INT_CAUSE,
2033		A_ULPRX_INT_CAUSE,
2034		A_ULPTX_INT_CAUSE,
2035		A_CPL_INTR_CAUSE,
2036		A_PM1_TX_INT_CAUSE,
2037		A_PM1_RX_INT_CAUSE,
2038		A_MPS_INT_CAUSE,
2039		A_T3DBG_INT_CAUSE,
2040	};
2041	unsigned int i;
2042
2043	/* Clear PHY and MAC interrupts for each port. */
2044	for_each_port(adapter, i)
2045	    t3_port_intr_clear(adapter, i);
2046
2047	for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
2048		t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
2049
2050	if (is_pcie(adapter))
2051		t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
2052	t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
2053	t3_read_reg(adapter, A_PL_INT_CAUSE0);	/* flush */
2054}
2055
2056void t3_xgm_intr_enable(struct adapter *adapter, int idx)
2057{
2058	struct port_info *pi = adap2pinfo(adapter, idx);
2059
2060	t3_write_reg(adapter, A_XGM_XGM_INT_ENABLE + pi->mac.offset,
2061		     XGM_EXTRA_INTR_MASK);
2062}
2063
2064void t3_xgm_intr_disable(struct adapter *adapter, int idx)
2065{
2066	struct port_info *pi = adap2pinfo(adapter, idx);
2067
2068	t3_write_reg(adapter, A_XGM_XGM_INT_DISABLE + pi->mac.offset,
2069		     0x7ff);
2070}
2071
2072/**
2073 *	t3_port_intr_enable - enable port-specific interrupts
2074 *	@adapter: associated adapter
2075 *	@idx: index of port whose interrupts should be enabled
2076 *
2077 *	Enable port-specific (i.e., MAC and PHY) interrupts for the given
2078 *	adapter port.
2079 */
2080void t3_port_intr_enable(struct adapter *adapter, int idx)
2081{
2082	struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2083
2084	t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
2085	t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
2086	phy->ops->intr_enable(phy);
2087}
2088
2089/**
2090 *	t3_port_intr_disable - disable port-specific interrupts
2091 *	@adapter: associated adapter
2092 *	@idx: index of port whose interrupts should be disabled
2093 *
2094 *	Disable port-specific (i.e., MAC and PHY) interrupts for the given
2095 *	adapter port.
2096 */
2097void t3_port_intr_disable(struct adapter *adapter, int idx)
2098{
2099	struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2100
2101	t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
2102	t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
2103	phy->ops->intr_disable(phy);
2104}
2105
2106/**
2107 *	t3_port_intr_clear - clear port-specific interrupts
2108 *	@adapter: associated adapter
2109 *	@idx: index of port whose interrupts to clear
2110 *
2111 *	Clear port-specific (i.e., MAC and PHY) interrupts for the given
2112 *	adapter port.
2113 */
2114void t3_port_intr_clear(struct adapter *adapter, int idx)
2115{
2116	struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2117
2118	t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
2119	t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
2120	phy->ops->intr_clear(phy);
2121}
2122
2123#define SG_CONTEXT_CMD_ATTEMPTS 100
2124
2125/**
2126 * 	t3_sge_write_context - write an SGE context
2127 * 	@adapter: the adapter
2128 * 	@id: the context id
2129 * 	@type: the context type
2130 *
2131 * 	Program an SGE context with the values already loaded in the
2132 * 	CONTEXT_DATA? registers.
2133 */
2134static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
2135				unsigned int type)
2136{
2137	if (type == F_RESPONSEQ) {
2138		/*
2139		 * Can't write the Response Queue Context bits for
2140		 * Interrupt Armed or the Reserve bits after the chip
2141		 * has been initialized out of reset.  Writing to these
2142		 * bits can confuse the hardware.
2143		 */
2144		t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2145		t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2146		t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0x17ffffff);
2147		t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2148	} else {
2149		t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2150		t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2151		t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
2152		t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2153	}
2154	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2155		     V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2156	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2157			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2158}
2159
2160/**
2161 *	clear_sge_ctxt - completely clear an SGE context
2162 *	@adapter: the adapter
2163 *	@id: the context id
2164 *	@type: the context type
2165 *
2166 *	Completely clear an SGE context.  Used predominantly at post-reset
2167 *	initialization.  Note in particular that we don't skip writing to any
2168 *	"sensitive bits" in the contexts the way that t3_sge_write_context()
2169 *	does ...
2170 */
2171static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
2172			  unsigned int type)
2173{
2174	t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
2175	t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
2176	t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
2177	t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
2178	t3_write_reg(adap, A_SG_CONTEXT_MASK0, 0xffffffff);
2179	t3_write_reg(adap, A_SG_CONTEXT_MASK1, 0xffffffff);
2180	t3_write_reg(adap, A_SG_CONTEXT_MASK2, 0xffffffff);
2181	t3_write_reg(adap, A_SG_CONTEXT_MASK3, 0xffffffff);
2182	t3_write_reg(adap, A_SG_CONTEXT_CMD,
2183		     V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2184	return t3_wait_op_done(adap, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2185			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2186}
2187
2188/**
2189 *	t3_sge_init_ecntxt - initialize an SGE egress context
2190 *	@adapter: the adapter to configure
2191 *	@id: the context id
2192 *	@gts_enable: whether to enable GTS for the context
2193 *	@type: the egress context type
2194 *	@respq: associated response queue
2195 *	@base_addr: base address of queue
2196 *	@size: number of queue entries
2197 *	@token: uP token
2198 *	@gen: initial generation value for the context
2199 *	@cidx: consumer pointer
2200 *
2201 *	Initialize an SGE egress context and make it ready for use.  If the
2202 *	platform allows concurrent context operations, the caller is
2203 *	responsible for appropriate locking.
2204 */
2205int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
2206		       enum sge_context_type type, int respq, u64 base_addr,
2207		       unsigned int size, unsigned int token, int gen,
2208		       unsigned int cidx)
2209{
2210	unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
2211
2212	if (base_addr & 0xfff)	/* must be 4K aligned */
2213		return -EINVAL;
2214	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2215		return -EBUSY;
2216
2217	base_addr >>= 12;
2218	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
2219		     V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
2220	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
2221		     V_EC_BASE_LO(base_addr & 0xffff));
2222	base_addr >>= 16;
2223	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
2224	base_addr >>= 32;
2225	t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2226		     V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
2227		     V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
2228		     F_EC_VALID);
2229	return t3_sge_write_context(adapter, id, F_EGRESS);
2230}
2231
2232/**
2233 *	t3_sge_init_flcntxt - initialize an SGE free-buffer list context
2234 *	@adapter: the adapter to configure
2235 *	@id: the context id
2236 *	@gts_enable: whether to enable GTS for the context
2237 *	@base_addr: base address of queue
2238 *	@size: number of queue entries
2239 *	@bsize: size of each buffer for this queue
2240 *	@cong_thres: threshold to signal congestion to upstream producers
2241 *	@gen: initial generation value for the context
2242 *	@cidx: consumer pointer
2243 *
2244 *	Initialize an SGE free list context and make it ready for use.  The
2245 *	caller is responsible for ensuring only one context operation occurs
2246 *	at a time.
2247 */
2248int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
2249			int gts_enable, u64 base_addr, unsigned int size,
2250			unsigned int bsize, unsigned int cong_thres, int gen,
2251			unsigned int cidx)
2252{
2253	if (base_addr & 0xfff)	/* must be 4K aligned */
2254		return -EINVAL;
2255	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2256		return -EBUSY;
2257
2258	base_addr >>= 12;
2259	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
2260	base_addr >>= 32;
2261	t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
2262		     V_FL_BASE_HI((u32) base_addr) |
2263		     V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
2264	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
2265		     V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
2266		     V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
2267	t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2268		     V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
2269		     V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
2270	return t3_sge_write_context(adapter, id, F_FREELIST);
2271}
2272
2273/**
2274 *	t3_sge_init_rspcntxt - initialize an SGE response queue context
2275 *	@adapter: the adapter to configure
2276 *	@id: the context id
2277 *	@irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
2278 *	@base_addr: base address of queue
2279 *	@size: number of queue entries
2280 *	@fl_thres: threshold for selecting the normal or jumbo free list
2281 *	@gen: initial generation value for the context
2282 *	@cidx: consumer pointer
2283 *
2284 *	Initialize an SGE response queue context and make it ready for use.
2285 *	The caller is responsible for ensuring only one context operation
2286 *	occurs at a time.
2287 */
2288int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
2289			 int irq_vec_idx, u64 base_addr, unsigned int size,
2290			 unsigned int fl_thres, int gen, unsigned int cidx)
2291{
2292	unsigned int intr = 0;
2293
2294	if (base_addr & 0xfff)	/* must be 4K aligned */
2295		return -EINVAL;
2296	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2297		return -EBUSY;
2298
2299	base_addr >>= 12;
2300	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2301		     V_CQ_INDEX(cidx));
2302	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2303	base_addr >>= 32;
2304	if (irq_vec_idx >= 0)
2305		intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
2306	t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2307		     V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
2308	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2309	return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2310}
2311
2312/**
2313 *	t3_sge_init_cqcntxt - initialize an SGE completion queue context
2314 *	@adapter: the adapter to configure
2315 *	@id: the context id
2316 *	@base_addr: base address of queue
2317 *	@size: number of queue entries
2318 *	@rspq: response queue for async notifications
2319 *	@ovfl_mode: CQ overflow mode
2320 *	@credits: completion queue credits
2321 *	@credit_thres: the credit threshold
2322 *
2323 *	Initialize an SGE completion queue context and make it ready for use.
2324 *	The caller is responsible for ensuring only one context operation
2325 *	occurs at a time.
2326 */
2327int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
2328			unsigned int size, int rspq, int ovfl_mode,
2329			unsigned int credits, unsigned int credit_thres)
2330{
2331	if (base_addr & 0xfff)	/* must be 4K aligned */
2332		return -EINVAL;
2333	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2334		return -EBUSY;
2335
2336	base_addr >>= 12;
2337	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2338	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2339	base_addr >>= 32;
2340	t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2341		     V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
2342		     V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2343		     V_CQ_ERR(ovfl_mode));
2344	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2345		     V_CQ_CREDIT_THRES(credit_thres));
2346	return t3_sge_write_context(adapter, id, F_CQ);
2347}
2348
2349/**
2350 *	t3_sge_enable_ecntxt - enable/disable an SGE egress context
2351 *	@adapter: the adapter
2352 *	@id: the egress context id
2353 *	@enable: enable (1) or disable (0) the context
2354 *
2355 *	Enable or disable an SGE egress context.  The caller is responsible for
2356 *	ensuring only one context operation occurs at a time.
2357 */
2358int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
2359{
2360	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2361		return -EBUSY;
2362
2363	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2364	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2365	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2366	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2367	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2368	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2369		     V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2370	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2371			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2372}
2373
2374/**
2375 *	t3_sge_disable_fl - disable an SGE free-buffer list
2376 *	@adapter: the adapter
2377 *	@id: the free list context id
2378 *
2379 *	Disable an SGE free-buffer list.  The caller is responsible for
2380 *	ensuring only one context operation occurs at a time.
2381 */
2382int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
2383{
2384	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2385		return -EBUSY;
2386
2387	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2388	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2389	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2390	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2391	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2392	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2393		     V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2394	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2395			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2396}
2397
2398/**
2399 *	t3_sge_disable_rspcntxt - disable an SGE response queue
2400 *	@adapter: the adapter
2401 *	@id: the response queue context id
2402 *
2403 *	Disable an SGE response queue.  The caller is responsible for
2404 *	ensuring only one context operation occurs at a time.
2405 */
2406int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
2407{
2408	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2409		return -EBUSY;
2410
2411	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2412	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2413	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2414	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2415	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2416	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2417		     V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2418	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2419			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2420}
2421
2422/**
2423 *	t3_sge_disable_cqcntxt - disable an SGE completion queue
2424 *	@adapter: the adapter
2425 *	@id: the completion queue context id
2426 *
2427 *	Disable an SGE completion queue.  The caller is responsible for
2428 *	ensuring only one context operation occurs at a time.
2429 */
2430int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2431{
2432	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2433		return -EBUSY;
2434
2435	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2436	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2437	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2438	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2439	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2440	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2441		     V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2442	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2443			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2444}
2445
2446/**
2447 *	t3_sge_cqcntxt_op - perform an operation on a completion queue context
2448 *	@adapter: the adapter
2449 *	@id: the context id
2450 *	@op: the operation to perform
2451 *
2452 *	Perform the selected operation on an SGE completion queue context.
2453 *	The caller is responsible for ensuring only one context operation
2454 *	occurs at a time.
2455 */
2456int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2457		      unsigned int credits)
2458{
2459	u32 val;
2460
2461	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2462		return -EBUSY;
2463
2464	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2465	t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2466		     V_CONTEXT(id) | F_CQ);
2467	if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2468				0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
2469		return -EIO;
2470
2471	if (op >= 2 && op < 7) {
2472		if (adapter->params.rev > 0)
2473			return G_CQ_INDEX(val);
2474
2475		t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2476			     V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2477		if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2478				    F_CONTEXT_CMD_BUSY, 0,
2479				    SG_CONTEXT_CMD_ATTEMPTS, 1))
2480			return -EIO;
2481		return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2482	}
2483	return 0;
2484}
2485
2486/**
2487 * 	t3_sge_read_context - read an SGE context
2488 * 	@type: the context type
2489 * 	@adapter: the adapter
2490 * 	@id: the context id
2491 * 	@data: holds the retrieved context
2492 *
2493 * 	Read an SGE egress context.  The caller is responsible for ensuring
2494 * 	only one context operation occurs at a time.
2495 */
2496static int t3_sge_read_context(unsigned int type, struct adapter *adapter,
2497			       unsigned int id, u32 data[4])
2498{
2499	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2500		return -EBUSY;
2501
2502	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2503		     V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2504	if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
2505			    SG_CONTEXT_CMD_ATTEMPTS, 1))
2506		return -EIO;
2507	data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2508	data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2509	data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2510	data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2511	return 0;
2512}
2513
2514/**
2515 * 	t3_sge_read_ecntxt - read an SGE egress context
2516 * 	@adapter: the adapter
2517 * 	@id: the context id
2518 * 	@data: holds the retrieved context
2519 *
2520 * 	Read an SGE egress context.  The caller is responsible for ensuring
2521 * 	only one context operation occurs at a time.
2522 */
2523int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4])
2524{
2525	if (id >= 65536)
2526		return -EINVAL;
2527	return t3_sge_read_context(F_EGRESS, adapter, id, data);
2528}
2529
2530/**
2531 * 	t3_sge_read_cq - read an SGE CQ context
2532 * 	@adapter: the adapter
2533 * 	@id: the context id
2534 * 	@data: holds the retrieved context
2535 *
2536 * 	Read an SGE CQ context.  The caller is responsible for ensuring
2537 * 	only one context operation occurs at a time.
2538 */
2539int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4])
2540{
2541	if (id >= 65536)
2542		return -EINVAL;
2543	return t3_sge_read_context(F_CQ, adapter, id, data);
2544}
2545
2546/**
2547 * 	t3_sge_read_fl - read an SGE free-list context
2548 * 	@adapter: the adapter
2549 * 	@id: the context id
2550 * 	@data: holds the retrieved context
2551 *
2552 * 	Read an SGE free-list context.  The caller is responsible for ensuring
2553 * 	only one context operation occurs at a time.
2554 */
2555int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4])
2556{
2557	if (id >= SGE_QSETS * 2)
2558		return -EINVAL;
2559	return t3_sge_read_context(F_FREELIST, adapter, id, data);
2560}
2561
2562/**
2563 * 	t3_sge_read_rspq - read an SGE response queue context
2564 * 	@adapter: the adapter
2565 * 	@id: the context id
2566 * 	@data: holds the retrieved context
2567 *
2568 * 	Read an SGE response queue context.  The caller is responsible for
2569 * 	ensuring only one context operation occurs at a time.
2570 */
2571int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4])
2572{
2573	if (id >= SGE_QSETS)
2574		return -EINVAL;
2575	return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2576}
2577
2578/**
2579 *	t3_config_rss - configure Rx packet steering
2580 *	@adapter: the adapter
2581 *	@rss_config: RSS settings (written to TP_RSS_CONFIG)
2582 *	@cpus: values for the CPU lookup table (0xff terminated)
2583 *	@rspq: values for the response queue lookup table (0xffff terminated)
2584 *
2585 *	Programs the receive packet steering logic.  @cpus and @rspq provide
2586 *	the values for the CPU and response queue lookup tables.  If they
2587 *	provide fewer values than the size of the tables the supplied values
2588 *	are used repeatedly until the tables are fully populated.
2589 */
2590void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2591		   const u8 * cpus, const u16 *rspq)
2592{
2593	int i, j, cpu_idx = 0, q_idx = 0;
2594
2595	if (cpus)
2596		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2597			u32 val = i << 16;
2598
2599			for (j = 0; j < 2; ++j) {
2600				val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2601				if (cpus[cpu_idx] == 0xff)
2602					cpu_idx = 0;
2603			}
2604			t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2605		}
2606
2607	if (rspq)
2608		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2609			t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2610				     (i << 16) | rspq[q_idx++]);
2611			if (rspq[q_idx] == 0xffff)
2612				q_idx = 0;
2613		}
2614
2615	t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2616}
2617
2618/**
2619 *	t3_read_rss - read the contents of the RSS tables
2620 *	@adapter: the adapter
2621 *	@lkup: holds the contents of the RSS lookup table
2622 *	@map: holds the contents of the RSS map table
2623 *
2624 *	Reads the contents of the receive packet steering tables.
2625 */
2626int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map)
2627{
2628	int i;
2629	u32 val;
2630
2631	if (lkup)
2632		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2633			t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2634				     0xffff0000 | i);
2635			val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2636			if (!(val & 0x80000000))
2637				return -EAGAIN;
2638			*lkup++ = val;
2639			*lkup++ = (val >> 8);
2640		}
2641
2642	if (map)
2643		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2644			t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2645				     0xffff0000 | i);
2646			val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2647			if (!(val & 0x80000000))
2648				return -EAGAIN;
2649			*map++ = val;
2650		}
2651	return 0;
2652}
2653
2654/**
2655 *	t3_tp_set_offload_mode - put TP in NIC/offload mode
2656 *	@adap: the adapter
2657 *	@enable: 1 to select offload mode, 0 for regular NIC
2658 *
2659 *	Switches TP to NIC/offload mode.
2660 */
2661void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2662{
2663	if (is_offload(adap) || !enable)
2664		t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2665				 V_NICMODE(!enable));
2666}
2667
2668/**
2669 *	pm_num_pages - calculate the number of pages of the payload memory
2670 *	@mem_size: the size of the payload memory
2671 *	@pg_size: the size of each payload memory page
2672 *
2673 *	Calculate the number of pages, each of the given size, that fit in a
2674 *	memory of the specified size, respecting the HW requirement that the
2675 *	number of pages must be a multiple of 24.
2676 */
2677static inline unsigned int pm_num_pages(unsigned int mem_size,
2678					unsigned int pg_size)
2679{
2680	unsigned int n = mem_size / pg_size;
2681
2682	return n - n % 24;
2683}
2684
2685#define mem_region(adap, start, size, reg) \
2686	t3_write_reg((adap), A_ ## reg, (start)); \
2687	start += size
2688
2689/**
2690 *	partition_mem - partition memory and configure TP memory settings
2691 *	@adap: the adapter
2692 *	@p: the TP parameters
2693 *
2694 *	Partitions context and payload memory and configures TP's memory
2695 *	registers.
2696 */
2697static void partition_mem(struct adapter *adap, const struct tp_params *p)
2698{
2699	unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2700	unsigned int timers = 0, timers_shift = 22;
2701
2702	if (adap->params.rev > 0) {
2703		if (tids <= 16 * 1024) {
2704			timers = 1;
2705			timers_shift = 16;
2706		} else if (tids <= 64 * 1024) {
2707			timers = 2;
2708			timers_shift = 18;
2709		} else if (tids <= 256 * 1024) {
2710			timers = 3;
2711			timers_shift = 20;
2712		}
2713	}
2714
2715	t3_write_reg(adap, A_TP_PMM_SIZE,
2716		     p->chan_rx_size | (p->chan_tx_size >> 16));
2717
2718	t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2719	t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2720	t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2721	t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2722			 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2723
2724	t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2725	t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2726	t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2727
2728	pstructs = p->rx_num_pgs + p->tx_num_pgs;
2729	/* Add a bit of headroom and make multiple of 24 */
2730	pstructs += 48;
2731	pstructs -= pstructs % 24;
2732	t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2733
2734	m = tids * TCB_SIZE;
2735	mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2736	mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2737	t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2738	m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2739	mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2740	mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2741	mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2742	mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2743
2744	m = (m + 4095) & ~0xfff;
2745	t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2746	t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2747
2748	tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2749	m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2750	    adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2751	if (tids < m)
2752		adap->params.mc5.nservers += m - tids;
2753}
2754
2755static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2756				  u32 val)
2757{
2758	t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2759	t3_write_reg(adap, A_TP_PIO_DATA, val);
2760}
2761
2762static void tp_config(struct adapter *adap, const struct tp_params *p)
2763{
2764	t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2765		     F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2766		     F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2767	t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2768		     F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2769		     V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
2770	t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2771		     V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2772		     V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) |
2773		     F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2774	t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
2775			 F_IPV6ENABLE | F_NICMODE);
2776	t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2777	t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2778	t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2779			 adap->params.rev > 0 ? F_ENABLEESND :
2780			 F_T3A_ENABLEESND);
2781
2782	t3_set_reg_field(adap, A_TP_PC_CONFIG,
2783			 F_ENABLEEPCMDAFULL,
2784			 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2785			 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
2786	t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
2787			 F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
2788			 F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
2789	t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2790	t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2791
2792	if (adap->params.rev > 0) {
2793		tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2794		t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2795				 F_TXPACEAUTO);
2796		t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2797		t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2798	} else
2799		t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2800
2801	if (adap->params.rev == T3_REV_C)
2802		t3_set_reg_field(adap, A_TP_PC_CONFIG,
2803				 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
2804				 V_TABLELATENCYDELTA(4));
2805
2806	t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2807	t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2808	t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2809	t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
2810}
2811
2812/* Desired TP timer resolution in usec */
2813#define TP_TMR_RES 50
2814
2815/* TCP timer values in ms */
2816#define TP_DACK_TIMER 50
2817#define TP_RTO_MIN    250
2818
2819/**
2820 *	tp_set_timers - set TP timing parameters
2821 *	@adap: the adapter to set
2822 *	@core_clk: the core clock frequency in Hz
2823 *
2824 *	Set TP's timing parameters, such as the various timer resolutions and
2825 *	the TCP timer values.
2826 */
2827static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2828{
2829	unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2830	unsigned int dack_re = fls(core_clk / 5000) - 1;	/* 200us */
2831	unsigned int tstamp_re = fls(core_clk / 1000);	/* 1ms, at least */
2832	unsigned int tps = core_clk >> tre;
2833
2834	t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2835		     V_DELAYEDACKRESOLUTION(dack_re) |
2836		     V_TIMESTAMPRESOLUTION(tstamp_re));
2837	t3_write_reg(adap, A_TP_DACK_TIMER,
2838		     (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2839	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2840	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2841	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2842	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2843	t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2844		     V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2845		     V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2846		     V_KEEPALIVEMAX(9));
2847
2848#define SECONDS * tps
2849
2850	t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2851	t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2852	t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2853	t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2854	t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2855	t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2856	t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2857	t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2858	t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2859
2860#undef SECONDS
2861}
2862
2863/**
2864 *	t3_tp_set_coalescing_size - set receive coalescing size
2865 *	@adap: the adapter
2866 *	@size: the receive coalescing size
2867 *	@psh: whether a set PSH bit should deliver coalesced data
2868 *
2869 *	Set the receive coalescing size and PSH bit handling.
2870 */
2871int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh)
2872{
2873	u32 val;
2874
2875	if (size > MAX_RX_COALESCING_LEN)
2876		return -EINVAL;
2877
2878	val = t3_read_reg(adap, A_TP_PARA_REG3);
2879	val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2880
2881	if (size) {
2882		val |= F_RXCOALESCEENABLE;
2883		if (psh)
2884			val |= F_RXCOALESCEPSHEN;
2885		size = min(MAX_RX_COALESCING_LEN, size);
2886		t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2887			     V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2888	}
2889	t3_write_reg(adap, A_TP_PARA_REG3, val);
2890	return 0;
2891}
2892
2893/**
2894 *	t3_tp_set_max_rxsize - set the max receive size
2895 *	@adap: the adapter
2896 *	@size: the max receive size
2897 *
2898 *	Set TP's max receive size.  This is the limit that applies when
2899 *	receive coalescing is disabled.
2900 */
2901void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2902{
2903	t3_write_reg(adap, A_TP_PARA_REG7,
2904		     V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2905}
2906
2907static void init_mtus(unsigned short mtus[])
2908{
2909	/*
2910	 * See draft-mathis-plpmtud-00.txt for the values.  The min is 88 so
2911	 * it can accomodate max size TCP/IP headers when SACK and timestamps
2912	 * are enabled and still have at least 8 bytes of payload.
2913	 */
2914	mtus[0] = 88;
2915	mtus[1] = 88;
2916	mtus[2] = 256;
2917	mtus[3] = 512;
2918	mtus[4] = 576;
2919	mtus[5] = 1024;
2920	mtus[6] = 1280;
2921	mtus[7] = 1492;
2922	mtus[8] = 1500;
2923	mtus[9] = 2002;
2924	mtus[10] = 2048;
2925	mtus[11] = 4096;
2926	mtus[12] = 4352;
2927	mtus[13] = 8192;
2928	mtus[14] = 9000;
2929	mtus[15] = 9600;
2930}
2931
2932/*
2933 * Initial congestion control parameters.
2934 */
2935static void init_cong_ctrl(unsigned short *a, unsigned short *b)
2936{
2937	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2938	a[9] = 2;
2939	a[10] = 3;
2940	a[11] = 4;
2941	a[12] = 5;
2942	a[13] = 6;
2943	a[14] = 7;
2944	a[15] = 8;
2945	a[16] = 9;
2946	a[17] = 10;
2947	a[18] = 14;
2948	a[19] = 17;
2949	a[20] = 21;
2950	a[21] = 25;
2951	a[22] = 30;
2952	a[23] = 35;
2953	a[24] = 45;
2954	a[25] = 60;
2955	a[26] = 80;
2956	a[27] = 100;
2957	a[28] = 200;
2958	a[29] = 300;
2959	a[30] = 400;
2960	a[31] = 500;
2961
2962	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2963	b[9] = b[10] = 1;
2964	b[11] = b[12] = 2;
2965	b[13] = b[14] = b[15] = b[16] = 3;
2966	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2967	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2968	b[28] = b[29] = 6;
2969	b[30] = b[31] = 7;
2970}
2971
2972/* The minimum additive increment value for the congestion control table */
2973#define CC_MIN_INCR 2U
2974
2975/**
2976 *	t3_load_mtus - write the MTU and congestion control HW tables
2977 *	@adap: the adapter
2978 *	@mtus: the unrestricted values for the MTU table
2979 *	@alphs: the values for the congestion control alpha parameter
2980 *	@beta: the values for the congestion control beta parameter
2981 *	@mtu_cap: the maximum permitted effective MTU
2982 *
2983 *	Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2984 *	Update the high-speed congestion control table with the supplied alpha,
2985 * 	beta, and MTUs.
2986 */
2987void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2988		  unsigned short alpha[NCCTRL_WIN],
2989		  unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2990{
2991	static const unsigned int avg_pkts[NCCTRL_WIN] = {
2992		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2993		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2994		28672, 40960, 57344, 81920, 114688, 163840, 229376
2995	};
2996
2997	unsigned int i, w;
2998
2999	for (i = 0; i < NMTUS; ++i) {
3000		unsigned int mtu = min(mtus[i], mtu_cap);
3001		unsigned int log2 = fls(mtu);
3002
3003		if (!(mtu & ((1 << log2) >> 2)))	/* round */
3004			log2--;
3005		t3_write_reg(adap, A_TP_MTU_TABLE,
3006			     (i << 24) | (log2 << 16) | mtu);
3007
3008		for (w = 0; w < NCCTRL_WIN; ++w) {
3009			unsigned int inc;
3010
3011			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
3012				  CC_MIN_INCR);
3013
3014			t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
3015				     (w << 16) | (beta[w] << 13) | inc);
3016		}
3017	}
3018}
3019
3020/**
3021 *	t3_read_hw_mtus - returns the values in the HW MTU table
3022 *	@adap: the adapter
3023 *	@mtus: where to store the HW MTU values
3024 *
3025 *	Reads the HW MTU table.
3026 */
3027void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS])
3028{
3029	int i;
3030
3031	for (i = 0; i < NMTUS; ++i) {
3032		unsigned int val;
3033
3034		t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
3035		val = t3_read_reg(adap, A_TP_MTU_TABLE);
3036		mtus[i] = val & 0x3fff;
3037	}
3038}
3039
3040/**
3041 *	t3_get_cong_cntl_tab - reads the congestion control table
3042 *	@adap: the adapter
3043 *	@incr: where to store the alpha values
3044 *
3045 *	Reads the additive increments programmed into the HW congestion
3046 *	control table.
3047 */
3048void t3_get_cong_cntl_tab(struct adapter *adap,
3049			  unsigned short incr[NMTUS][NCCTRL_WIN])
3050{
3051	unsigned int mtu, w;
3052
3053	for (mtu = 0; mtu < NMTUS; ++mtu)
3054		for (w = 0; w < NCCTRL_WIN; ++w) {
3055			t3_write_reg(adap, A_TP_CCTRL_TABLE,
3056				     0xffff0000 | (mtu << 5) | w);
3057			incr[mtu][w] = t3_read_reg(adap, A_TP_CCTRL_TABLE) &
3058				       0x1fff;
3059		}
3060}
3061
3062/**
3063 *	t3_tp_get_mib_stats - read TP's MIB counters
3064 *	@adap: the adapter
3065 *	@tps: holds the returned counter values
3066 *
3067 *	Returns the values of TP's MIB counters.
3068 */
3069void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
3070{
3071	t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
3072			 sizeof(*tps) / sizeof(u32), 0);
3073}
3074
3075#define ulp_region(adap, name, start, len) \
3076	t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
3077	t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
3078		     (start) + (len) - 1); \
3079	start += len
3080
3081#define ulptx_region(adap, name, start, len) \
3082	t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
3083	t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
3084		     (start) + (len) - 1)
3085
3086static void ulp_config(struct adapter *adap, const struct tp_params *p)
3087{
3088	unsigned int m = p->chan_rx_size;
3089
3090	ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
3091	ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
3092	ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
3093	ulp_region(adap, STAG, m, p->chan_rx_size / 4);
3094	ulp_region(adap, RQ, m, p->chan_rx_size / 4);
3095	ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
3096	ulp_region(adap, PBL, m, p->chan_rx_size / 4);
3097	t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
3098}
3099
3100/**
3101 *	t3_set_proto_sram - set the contents of the protocol sram
3102 *	@adapter: the adapter
3103 *	@data: the protocol image
3104 *
3105 *	Write the contents of the protocol SRAM.
3106 */
3107int t3_set_proto_sram(struct adapter *adap, const u8 *data)
3108{
3109	int i;
3110	const __be32 *buf = (const __be32 *)data;
3111
3112	for (i = 0; i < PROTO_SRAM_LINES; i++) {
3113		t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, be32_to_cpu(*buf++));
3114		t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, be32_to_cpu(*buf++));
3115		t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, be32_to_cpu(*buf++));
3116		t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, be32_to_cpu(*buf++));
3117		t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, be32_to_cpu(*buf++));
3118
3119		t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
3120		if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
3121			return -EIO;
3122	}
3123	t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
3124
3125	return 0;
3126}
3127
3128void t3_config_trace_filter(struct adapter *adapter,
3129			    const struct trace_params *tp, int filter_index,
3130			    int invert, int enable)
3131{
3132	u32 addr, key[4], mask[4];
3133
3134	key[0] = tp->sport | (tp->sip << 16);
3135	key[1] = (tp->sip >> 16) | (tp->dport << 16);
3136	key[2] = tp->dip;
3137	key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
3138
3139	mask[0] = tp->sport_mask | (tp->sip_mask << 16);
3140	mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
3141	mask[2] = tp->dip_mask;
3142	mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
3143
3144	if (invert)
3145		key[3] |= (1 << 29);
3146	if (enable)
3147		key[3] |= (1 << 28);
3148
3149	addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
3150	tp_wr_indirect(adapter, addr++, key[0]);
3151	tp_wr_indirect(adapter, addr++, mask[0]);
3152	tp_wr_indirect(adapter, addr++, key[1]);
3153	tp_wr_indirect(adapter, addr++, mask[1]);
3154	tp_wr_indirect(adapter, addr++, key[2]);
3155	tp_wr_indirect(adapter, addr++, mask[2]);
3156	tp_wr_indirect(adapter, addr++, key[3]);
3157	tp_wr_indirect(adapter, addr, mask[3]);
3158	t3_read_reg(adapter, A_TP_PIO_DATA);
3159}
3160
3161/**
3162 *	t3_config_sched - configure a HW traffic scheduler
3163 *	@adap: the adapter
3164 *	@kbps: target rate in Kbps
3165 *	@sched: the scheduler index
3166 *
3167 *	Configure a HW scheduler for the target rate
3168 */
3169int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
3170{
3171	unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3172	unsigned int clk = adap->params.vpd.cclk * 1000;
3173	unsigned int selected_cpt = 0, selected_bpt = 0;
3174
3175	if (kbps > 0) {
3176		kbps *= 125;	/* -> bytes */
3177		for (cpt = 1; cpt <= 255; cpt++) {
3178			tps = clk / cpt;
3179			bpt = (kbps + tps / 2) / tps;
3180			if (bpt > 0 && bpt <= 255) {
3181				v = bpt * tps;
3182				delta = v >= kbps ? v - kbps : kbps - v;
3183				if (delta <= mindelta) {
3184					mindelta = delta;
3185					selected_cpt = cpt;
3186					selected_bpt = bpt;
3187				}
3188			} else if (selected_cpt)
3189				break;
3190		}
3191		if (!selected_cpt)
3192			return -EINVAL;
3193	}
3194	t3_write_reg(adap, A_TP_TM_PIO_ADDR,
3195		     A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3196	v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3197	if (sched & 1)
3198		v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3199	else
3200		v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3201	t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3202	return 0;
3203}
3204
3205static int tp_init(struct adapter *adap, const struct tp_params *p)
3206{
3207	int busy = 0;
3208
3209	tp_config(adap, p);
3210	t3_set_vlan_accel(adap, 3, 0);
3211
3212	if (is_offload(adap)) {
3213		tp_set_timers(adap, adap->params.vpd.cclk * 1000);
3214		t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
3215		busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
3216				       0, 1000, 5);
3217		if (busy)
3218			CH_ERR(adap, "TP initialization timed out\n");
3219	}
3220
3221	if (!busy)
3222		t3_write_reg(adap, A_TP_RESET, F_TPRESET);
3223	return busy;
3224}
3225
3226int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
3227{
3228	if (port_mask & ~((1 << adap->params.nports) - 1))
3229		return -EINVAL;
3230	t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
3231			 port_mask << S_PORT0ACTIVE);
3232	return 0;
3233}
3234
3235/*
3236 * Perform the bits of HW initialization that are dependent on the Tx
3237 * channels being used.
3238 */
3239static void chan_init_hw(struct adapter *adap, unsigned int chan_map)
3240{
3241	int i;
3242
3243	if (chan_map != 3) {                                 /* one channel */
3244		t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3245		t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3246		t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT |
3247			     (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE :
3248					      F_TPTXPORT1EN | F_PORT1ACTIVE));
3249		t3_write_reg(adap, A_PM1_TX_CFG,
3250			     chan_map == 1 ? 0xffffffff : 0);
3251	} else {                                             /* two channels */
3252		t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3253		t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3254		t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3255			     V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3256		t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3257			     F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3258			     F_ENFORCEPKT);
3259		t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3260		t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3261		t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3262			     V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3263		for (i = 0; i < 16; i++)
3264			t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3265				     (i << 16) | 0x1010);
3266	}
3267}
3268
3269static int calibrate_xgm(struct adapter *adapter)
3270{
3271	if (uses_xaui(adapter)) {
3272		unsigned int v, i;
3273
3274		for (i = 0; i < 5; ++i) {
3275			t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3276			t3_read_reg(adapter, A_XGM_XAUI_IMP);
3277			msleep(1);
3278			v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3279			if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3280				t3_write_reg(adapter, A_XGM_XAUI_IMP,
3281					     V_XAUIIMP(G_CALIMP(v) >> 2));
3282				return 0;
3283			}
3284		}
3285		CH_ERR(adapter, "MAC calibration failed\n");
3286		return -1;
3287	} else {
3288		t3_write_reg(adapter, A_XGM_RGMII_IMP,
3289			     V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3290		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3291				 F_XGM_IMPSETUPDATE);
3292	}
3293	return 0;
3294}
3295
3296static void calibrate_xgm_t3b(struct adapter *adapter)
3297{
3298	if (!uses_xaui(adapter)) {
3299		t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3300			     F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3301		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3302		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3303				 F_XGM_IMPSETUPDATE);
3304		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3305				 0);
3306		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3307		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3308	}
3309}
3310
3311struct mc7_timing_params {
3312	unsigned char ActToPreDly;
3313	unsigned char ActToRdWrDly;
3314	unsigned char PreCyc;
3315	unsigned char RefCyc[5];
3316	unsigned char BkCyc;
3317	unsigned char WrToRdDly;
3318	unsigned char RdToWrDly;
3319};
3320
3321/*
3322 * Write a value to a register and check that the write completed.  These
3323 * writes normally complete in a cycle or two, so one read should suffice.
3324 * The very first read exists to flush the posted write to the device.
3325 */
3326static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
3327{
3328	t3_write_reg(adapter, addr, val);
3329	t3_read_reg(adapter, addr);	/* flush */
3330	if (!(t3_read_reg(adapter, addr) & F_BUSY))
3331		return 0;
3332	CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3333	return -EIO;
3334}
3335
3336static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3337{
3338	static const unsigned int mc7_mode[] = {
3339		0x632, 0x642, 0x652, 0x432, 0x442
3340	};
3341	static const struct mc7_timing_params mc7_timings[] = {
3342		{12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
3343		{12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
3344		{12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
3345		{9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
3346		{9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
3347	};
3348
3349	u32 val;
3350	unsigned int width, density, slow, attempts;
3351	struct adapter *adapter = mc7->adapter;
3352	const struct mc7_timing_params *p = &mc7_timings[mem_type];
3353
3354	if (!mc7->size)
3355		return 0;
3356
3357	val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3358	slow = val & F_SLOW;
3359	width = G_WIDTH(val);
3360	density = G_DEN(val);
3361
3362	t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3363	val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);	/* flush */
3364	msleep(1);
3365
3366	if (!slow) {
3367		t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3368		t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3369		msleep(1);
3370		if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3371		    (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3372			CH_ERR(adapter, "%s MC7 calibration timed out\n",
3373			       mc7->name);
3374			goto out_fail;
3375		}
3376	}
3377
3378	t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3379		     V_ACTTOPREDLY(p->ActToPreDly) |
3380		     V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3381		     V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3382		     V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3383
3384	t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3385		     val | F_CLKEN | F_TERM150);
3386	t3_read_reg(adapter, mc7->offset + A_MC7_CFG);	/* flush */
3387
3388	if (!slow)
3389		t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3390				 F_DLLENB);
3391	udelay(1);
3392
3393	val = slow ? 3 : 6;
3394	if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3395	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3396	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3397	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3398		goto out_fail;
3399
3400	if (!slow) {
3401		t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3402		t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
3403		udelay(5);
3404	}
3405
3406	if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3407	    wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3408	    wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3409	    wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3410		       mc7_mode[mem_type]) ||
3411	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3412	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3413		goto out_fail;
3414
3415	/* clock value is in KHz */
3416	mc7_clock = mc7_clock * 7812 + mc7_clock / 2;	/* ns */
3417	mc7_clock /= 1000000;	/* KHz->MHz, ns->us */
3418
3419	t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3420		     F_PERREFEN | V_PREREFDIV(mc7_clock));
3421	t3_read_reg(adapter, mc7->offset + A_MC7_REF);	/* flush */
3422
3423	t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
3424	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3425	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3426	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3427		     (mc7->size << width) - 1);
3428	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3429	t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);	/* flush */
3430
3431	attempts = 50;
3432	do {
3433		msleep(250);
3434		val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3435	} while ((val & F_BUSY) && --attempts);
3436	if (val & F_BUSY) {
3437		CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3438		goto out_fail;
3439	}
3440
3441	/* Enable normal memory accesses. */
3442	t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3443	return 0;
3444
3445out_fail:
3446	return -1;
3447}
3448
3449static void config_pcie(struct adapter *adap)
3450{
3451	static const u16 ack_lat[4][6] = {
3452		{237, 416, 559, 1071, 2095, 4143},
3453		{128, 217, 289, 545, 1057, 2081},
3454		{73, 118, 154, 282, 538, 1050},
3455		{67, 107, 86, 150, 278, 534}
3456	};
3457	static const u16 rpl_tmr[4][6] = {
3458		{711, 1248, 1677, 3213, 6285, 12429},
3459		{384, 651, 867, 1635, 3171, 6243},
3460		{219, 354, 462, 846, 1614, 3150},
3461		{201, 321, 258, 450, 834, 1602}
3462	};
3463
3464	u16 val, devid;
3465	unsigned int log2_width, pldsize;
3466	unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3467
3468	pci_read_config_word(adap->pdev,
3469			     adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
3470			     &val);
3471	pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3472
3473	pci_read_config_word(adap->pdev, 0x2, &devid);
3474	if (devid == 0x37) {
3475		pci_write_config_word(adap->pdev,
3476				      adap->params.pci.pcie_cap_addr +
3477				      PCI_EXP_DEVCTL,
3478				      val & ~PCI_EXP_DEVCTL_READRQ &
3479				      ~PCI_EXP_DEVCTL_PAYLOAD);
3480		pldsize = 0;
3481	}
3482
3483	pci_read_config_word(adap->pdev,
3484			     adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3485			     &val);
3486
3487	fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3488	fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3489	    G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3490	log2_width = fls(adap->params.pci.width) - 1;
3491	acklat = ack_lat[log2_width][pldsize];
3492	if (val & 1)		/* check LOsEnable */
3493		acklat += fst_trn_tx * 4;
3494	rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3495
3496	if (adap->params.rev == 0)
3497		t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3498				 V_T3A_ACKLAT(M_T3A_ACKLAT),
3499				 V_T3A_ACKLAT(acklat));
3500	else
3501		t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3502				 V_ACKLAT(acklat));
3503
3504	t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3505			 V_REPLAYLMT(rpllmt));
3506
3507	t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3508	t3_set_reg_field(adap, A_PCIE_CFG, 0,
3509			 F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST |
3510			 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
3511}
3512
3513/*
3514 * Initialize and configure T3 HW modules.  This performs the
3515 * initialization steps that need to be done once after a card is reset.
3516 * MAC and PHY initialization is handled separarely whenever a port is enabled.
3517 *
3518 * fw_params are passed to FW and their value is platform dependent.  Only the
3519 * top 8 bits are available for use, the rest must be 0.
3520 */
3521int t3_init_hw(struct adapter *adapter, u32 fw_params)
3522{
3523	int err = -EIO, attempts, i;
3524	const struct vpd_params *vpd = &adapter->params.vpd;
3525
3526	if (adapter->params.rev > 0)
3527		calibrate_xgm_t3b(adapter);
3528	else if (calibrate_xgm(adapter))
3529		goto out_err;
3530
3531	if (vpd->mclk) {
3532		partition_mem(adapter, &adapter->params.tp);
3533
3534		if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3535		    mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3536		    mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3537		    t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3538				adapter->params.mc5.nfilters,
3539				adapter->params.mc5.nroutes))
3540			goto out_err;
3541
3542		for (i = 0; i < 32; i++)
3543			if (clear_sge_ctxt(adapter, i, F_CQ))
3544				goto out_err;
3545	}
3546
3547	if (tp_init(adapter, &adapter->params.tp))
3548		goto out_err;
3549
3550	t3_tp_set_coalescing_size(adapter,
3551				  min(adapter->params.sge.max_pkt_size,
3552				      MAX_RX_COALESCING_LEN), 1);
3553	t3_tp_set_max_rxsize(adapter,
3554			     min(adapter->params.sge.max_pkt_size, 16384U));
3555	ulp_config(adapter, &adapter->params.tp);
3556
3557	if (is_pcie(adapter))
3558		config_pcie(adapter);
3559	else
3560		t3_set_reg_field(adapter, A_PCIX_CFG, 0,
3561				 F_DMASTOPEN | F_CLIDECEN);
3562
3563	if (adapter->params.rev == T3_REV_C)
3564		t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
3565				 F_CFG_CQE_SOP_MASK);
3566
3567	t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3568	t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3569	t3_write_reg(adapter, A_PM1_TX_MODE, 0);
3570	chan_init_hw(adapter, adapter->params.chan_map);
3571	t3_sge_init(adapter, &adapter->params.sge);
3572
3573	t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
3574
3575	t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3576	t3_write_reg(adapter, A_CIM_BOOT_CFG,
3577		     V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3578	t3_read_reg(adapter, A_CIM_BOOT_CFG);	/* flush */
3579
3580	attempts = 100;
3581	do {			/* wait for uP to initialize */
3582		msleep(20);
3583	} while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3584	if (!attempts) {
3585		CH_ERR(adapter, "uP initialization timed out\n");
3586		goto out_err;
3587	}
3588
3589	err = 0;
3590out_err:
3591	return err;
3592}
3593
3594/**
3595 *	get_pci_mode - determine a card's PCI mode
3596 *	@adapter: the adapter
3597 *	@p: where to store the PCI settings
3598 *
3599 *	Determines a card's PCI mode and associated parameters, such as speed
3600 *	and width.
3601 */
3602static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
3603{
3604	static unsigned short speed_map[] = { 33, 66, 100, 133 };
3605	u32 pci_mode, pcie_cap;
3606
3607	pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
3608	if (pcie_cap) {
3609		u16 val;
3610
3611		p->variant = PCI_VARIANT_PCIE;
3612		p->pcie_cap_addr = pcie_cap;
3613		pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3614					&val);
3615		p->width = (val >> 4) & 0x3f;
3616		return;
3617	}
3618
3619	pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3620	p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3621	p->width = (pci_mode & F_64BIT) ? 64 : 32;
3622	pci_mode = G_PCIXINITPAT(pci_mode);
3623	if (pci_mode == 0)
3624		p->variant = PCI_VARIANT_PCI;
3625	else if (pci_mode < 4)
3626		p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3627	else if (pci_mode < 8)
3628		p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3629	else
3630		p->variant = PCI_VARIANT_PCIX_266_MODE2;
3631}
3632
3633/**
3634 *	init_link_config - initialize a link's SW state
3635 *	@lc: structure holding the link state
3636 *	@ai: information about the current card
3637 *
3638 *	Initializes the SW state maintained for each link, including the link's
3639 *	capabilities and default speed/duplex/flow-control/autonegotiation
3640 *	settings.
3641 */
3642static void init_link_config(struct link_config *lc, unsigned int caps)
3643{
3644	lc->supported = caps;
3645	lc->requested_speed = lc->speed = SPEED_INVALID;
3646	lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3647	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3648	if (lc->supported & SUPPORTED_Autoneg) {
3649		lc->advertising = lc->supported;
3650		lc->autoneg = AUTONEG_ENABLE;
3651		lc->requested_fc |= PAUSE_AUTONEG;
3652	} else {
3653		lc->advertising = 0;
3654		lc->autoneg = AUTONEG_DISABLE;
3655	}
3656}
3657
3658/**
3659 *	mc7_calc_size - calculate MC7 memory size
3660 *	@cfg: the MC7 configuration
3661 *
3662 *	Calculates the size of an MC7 memory in bytes from the value of its
3663 *	configuration register.
3664 */
3665static unsigned int mc7_calc_size(u32 cfg)
3666{
3667	unsigned int width = G_WIDTH(cfg);
3668	unsigned int banks = !!(cfg & F_BKS) + 1;
3669	unsigned int org = !!(cfg & F_ORG) + 1;
3670	unsigned int density = G_DEN(cfg);
3671	unsigned int MBs = ((256 << density) * banks) / (org << width);
3672
3673	return MBs << 20;
3674}
3675
3676static void mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3677		     unsigned int base_addr, const char *name)
3678{
3679	u32 cfg;
3680
3681	mc7->adapter = adapter;
3682	mc7->name = name;
3683	mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3684	cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3685	mc7->size = mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
3686	mc7->width = G_WIDTH(cfg);
3687}
3688
3689void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3690{
3691	u16 devid;
3692
3693	mac->adapter = adapter;
3694	pci_read_config_word(adapter->pdev, 0x2, &devid);
3695
3696	if (devid == 0x37 && !adapter->params.vpd.xauicfg[1])
3697		index = 0;
3698	mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3699	mac->nucast = 1;
3700
3701	if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3702		t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3703			     is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3704		t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3705				 F_ENRGMII, 0);
3706	}
3707}
3708
3709void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
3710{
3711	u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3712
3713	mi1_init(adapter, ai);
3714	t3_write_reg(adapter, A_I2C_CFG,	/* set for 80KHz */
3715		     V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3716	t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3717		     ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
3718	t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
3719	t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
3720
3721	if (adapter->params.rev == 0 || !uses_xaui(adapter))
3722		val |= F_ENRGMII;
3723
3724	/* Enable MAC clocks so we can access the registers */
3725	t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3726	t3_read_reg(adapter, A_XGM_PORT_CFG);
3727
3728	val |= F_CLKDIVRESET_;
3729	t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3730	t3_read_reg(adapter, A_XGM_PORT_CFG);
3731	t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3732	t3_read_reg(adapter, A_XGM_PORT_CFG);
3733}
3734
3735/*
3736 * Reset the adapter.
3737 * Older PCIe cards lose their config space during reset, PCI-X
3738 * ones don't.
3739 */
3740int t3_reset_adapter(struct adapter *adapter)
3741{
3742	int i, save_and_restore_pcie =
3743	    adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
3744	uint16_t devid = 0;
3745
3746	if (save_and_restore_pcie)
3747		pci_save_state(adapter->pdev);
3748	t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3749
3750	for (i = 0; i < 10; i++) {
3751		msleep(50);
3752		pci_read_config_word(adapter->pdev, 0x00, &devid);
3753		if (devid == 0x1425)
3754			break;
3755	}
3756
3757	if (devid != 0x1425)
3758		return -1;
3759
3760	if (save_and_restore_pcie)
3761		pci_restore_state(adapter->pdev);
3762	return 0;
3763}
3764
3765static int init_parity(struct adapter *adap)
3766{
3767		int i, err, addr;
3768
3769	if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
3770		return -EBUSY;
3771
3772	for (err = i = 0; !err && i < 16; i++)
3773		err = clear_sge_ctxt(adap, i, F_EGRESS);
3774	for (i = 0xfff0; !err && i <= 0xffff; i++)
3775		err = clear_sge_ctxt(adap, i, F_EGRESS);
3776	for (i = 0; !err && i < SGE_QSETS; i++)
3777		err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
3778	if (err)
3779		return err;
3780
3781	t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
3782	for (i = 0; i < 4; i++)
3783		for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
3784			t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
3785				     F_IBQDBGWR | V_IBQDBGQID(i) |
3786				     V_IBQDBGADDR(addr));
3787			err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
3788					      F_IBQDBGBUSY, 0, 2, 1);
3789			if (err)
3790				return err;
3791		}
3792	return 0;
3793}
3794
3795/*
3796 * Initialize adapter SW state for the various HW modules, set initial values
3797 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3798 * interface.
3799 */
3800int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
3801		    int reset)
3802{
3803	int ret;
3804	unsigned int i, j = -1;
3805
3806	get_pci_mode(adapter, &adapter->params.pci);
3807
3808	adapter->params.info = ai;
3809	adapter->params.nports = ai->nports0 + ai->nports1;
3810	adapter->params.chan_map = (!!ai->nports0) | (!!ai->nports1 << 1);
3811	adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3812	/*
3813	 * We used to only run the "adapter check task" once a second if
3814	 * we had PHYs which didn't support interrupts (we would check
3815	 * their link status once a second).  Now we check other conditions
3816	 * in that routine which could potentially impose a very high
3817	 * interrupt load on the system.  As such, we now always scan the
3818	 * adapter state once a second ...
3819	 */
3820	adapter->params.linkpoll_period = 10;
3821	adapter->params.stats_update_period = is_10G(adapter) ?
3822	    MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3823	adapter->params.pci.vpd_cap_addr =
3824	    pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3825	ret = get_vpd_params(adapter, &adapter->params.vpd);
3826	if (ret < 0)
3827		return ret;
3828
3829	if (reset && t3_reset_adapter(adapter))
3830		return -1;
3831
3832	t3_sge_prep(adapter, &adapter->params.sge);
3833
3834	if (adapter->params.vpd.mclk) {
3835		struct tp_params *p = &adapter->params.tp;
3836
3837		mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3838		mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3839		mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3840
3841		p->nchan = adapter->params.chan_map == 3 ? 2 : 1;
3842		p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3843		p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3844		p->cm_size = t3_mc7_size(&adapter->cm);
3845		p->chan_rx_size = p->pmrx_size / 2;	/* only 1 Rx channel */
3846		p->chan_tx_size = p->pmtx_size / p->nchan;
3847		p->rx_pg_size = 64 * 1024;
3848		p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3849		p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3850		p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3851		p->ntimer_qs = p->cm_size >= (128 << 20) ||
3852		    adapter->params.rev > 0 ? 12 : 6;
3853	}
3854
3855	adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3856				  t3_mc7_size(&adapter->pmtx) &&
3857				  t3_mc7_size(&adapter->cm);
3858
3859	if (is_offload(adapter)) {
3860		adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3861		adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3862		    DEFAULT_NFILTERS : 0;
3863		adapter->params.mc5.nroutes = 0;
3864		t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3865
3866		init_mtus(adapter->params.mtus);
3867		init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3868	}
3869
3870	early_hw_init(adapter, ai);
3871	ret = init_parity(adapter);
3872	if (ret)
3873		return ret;
3874
3875	for_each_port(adapter, i) {
3876		u8 hw_addr[6];
3877		const struct port_type_info *pti;
3878		struct port_info *p = adap2pinfo(adapter, i);
3879
3880		while (!adapter->params.vpd.port_type[++j])
3881			;
3882
3883		pti = &port_types[adapter->params.vpd.port_type[j]];
3884		if (!pti->phy_prep) {
3885			CH_ALERT(adapter, "Invalid port type index %d\n",
3886				 adapter->params.vpd.port_type[j]);
3887			return -EINVAL;
3888		}
3889
3890		p->phy.mdio.dev = adapter->port[i];
3891		ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3892				    ai->mdio_ops);
3893		if (ret)
3894			return ret;
3895		mac_prep(&p->mac, adapter, j);
3896
3897		/*
3898		 * The VPD EEPROM stores the base Ethernet address for the
3899		 * card.  A port's address is derived from the base by adding
3900		 * the port's index to the base's low octet.
3901		 */
3902		memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3903		hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3904
3905		memcpy(adapter->port[i]->dev_addr, hw_addr,
3906		       ETH_ALEN);
3907		memcpy(adapter->port[i]->perm_addr, hw_addr,
3908		       ETH_ALEN);
3909		init_link_config(&p->link_config, p->phy.caps);
3910		p->phy.ops->power_down(&p->phy, 1);
3911
3912		/*
3913		 * If the PHY doesn't support interrupts for link status
3914		 * changes, schedule a scan of the adapter links at least
3915		 * once a second.
3916		 */
3917		if (!(p->phy.caps & SUPPORTED_IRQ) &&
3918		    adapter->params.linkpoll_period > 10)
3919			adapter->params.linkpoll_period = 10;
3920	}
3921
3922	return 0;
3923}
3924
3925void t3_led_ready(struct adapter *adapter)
3926{
3927	t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3928			 F_GPIO0_OUT_VAL);
3929}
3930
3931int t3_replay_prep_adapter(struct adapter *adapter)
3932{
3933	const struct adapter_info *ai = adapter->params.info;
3934	unsigned int i, j = -1;
3935	int ret;
3936
3937	early_hw_init(adapter, ai);
3938	ret = init_parity(adapter);
3939	if (ret)
3940		return ret;
3941
3942	for_each_port(adapter, i) {
3943		const struct port_type_info *pti;
3944		struct port_info *p = adap2pinfo(adapter, i);
3945
3946		while (!adapter->params.vpd.port_type[++j])
3947			;
3948
3949		pti = &port_types[adapter->params.vpd.port_type[j]];
3950		ret = pti->phy_prep(&p->phy, adapter, p->phy.mdio.prtad, NULL);
3951		if (ret)
3952			return ret;
3953		p->phy.ops->power_down(&p->phy, 1);
3954	}
3955
3956return 0;
3957}
3958