1/*
2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/etherdevice.h>
33#include "common.h"
34#include "regs.h"
35#include "sge_defs.h"
36#include "firmware_exports.h"
37
38static void t3_port_intr_clear(struct adapter *adapter, int idx);
39
40/**
41 *	t3_wait_op_done_val - wait until an operation is completed
42 *	@adapter: the adapter performing the operation
43 *	@reg: the register to check for completion
44 *	@mask: a single-bit field within @reg that indicates completion
45 *	@polarity: the value of the field when the operation is completed
46 *	@attempts: number of check iterations
47 *	@delay: delay in usecs between iterations
48 *	@valp: where to store the value of the register at completion time
49 *
50 *	Wait until an operation is completed by checking a bit in a register
51 *	up to @attempts times.  If @valp is not NULL the value of the register
52 *	at the time it indicated completion is stored there.  Returns 0 if the
53 *	operation completes and -EAGAIN otherwise.
54 */
55
56int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
57			int polarity, int attempts, int delay, u32 *valp)
58{
59	while (1) {
60		u32 val = t3_read_reg(adapter, reg);
61
62		if (!!(val & mask) == polarity) {
63			if (valp)
64				*valp = val;
65			return 0;
66		}
67		if (--attempts == 0)
68			return -EAGAIN;
69		if (delay)
70			udelay(delay);
71	}
72}
73
74/**
75 *	t3_write_regs - write a bunch of registers
76 *	@adapter: the adapter to program
77 *	@p: an array of register address/register value pairs
78 *	@n: the number of address/value pairs
79 *	@offset: register address offset
80 *
81 *	Takes an array of register address/register value pairs and writes each
82 *	value to the corresponding register.  Register addresses are adjusted
83 *	by the supplied offset.
84 */
85void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
86		   int n, unsigned int offset)
87{
88	while (n--) {
89		t3_write_reg(adapter, p->reg_addr + offset, p->val);
90		p++;
91	}
92}
93
94/**
95 *	t3_set_reg_field - set a register field to a value
96 *	@adapter: the adapter to program
97 *	@addr: the register address
98 *	@mask: specifies the portion of the register to modify
99 *	@val: the new value for the register field
100 *
101 *	Sets a register field specified by the supplied mask to the
102 *	given value.
103 */
104void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
105		      u32 val)
106{
107	u32 v = t3_read_reg(adapter, addr) & ~mask;
108
109	t3_write_reg(adapter, addr, v | val);
110	t3_read_reg(adapter, addr);	/* flush */
111}
112
113/**
114 *	t3_read_indirect - read indirectly addressed registers
115 *	@adap: the adapter
116 *	@addr_reg: register holding the indirect address
117 *	@data_reg: register holding the value of the indirect register
118 *	@vals: where the read register values are stored
119 *	@start_idx: index of first indirect register to read
120 *	@nregs: how many indirect registers to read
121 *
122 *	Reads registers that are accessed indirectly through an address/data
123 *	register pair.
124 */
125static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
126			     unsigned int data_reg, u32 *vals,
127			     unsigned int nregs, unsigned int start_idx)
128{
129	while (nregs--) {
130		t3_write_reg(adap, addr_reg, start_idx);
131		*vals++ = t3_read_reg(adap, data_reg);
132		start_idx++;
133	}
134}
135
136/**
137 *	t3_mc7_bd_read - read from MC7 through backdoor accesses
138 *	@mc7: identifies MC7 to read from
139 *	@start: index of first 64-bit word to read
140 *	@n: number of 64-bit words to read
141 *	@buf: where to store the read result
142 *
143 *	Read n 64-bit words from MC7 starting at word start, using backdoor
144 *	accesses.
145 */
146int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
147		   u64 *buf)
148{
149	static const int shift[] = { 0, 0, 16, 24 };
150	static const int step[] = { 0, 32, 16, 8 };
151
152	unsigned int size64 = mc7->size / 8;	/* # of 64-bit words */
153	struct adapter *adap = mc7->adapter;
154
155	if (start >= size64 || start + n > size64)
156		return -EINVAL;
157
158	start *= (8 << mc7->width);
159	while (n--) {
160		int i;
161		u64 val64 = 0;
162
163		for (i = (1 << mc7->width) - 1; i >= 0; --i) {
164			int attempts = 10;
165			u32 val;
166
167			t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
168			t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
169			val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
170			while ((val & F_BUSY) && attempts--)
171				val = t3_read_reg(adap,
172						  mc7->offset + A_MC7_BD_OP);
173			if (val & F_BUSY)
174				return -EIO;
175
176			val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
177			if (mc7->width == 0) {
178				val64 = t3_read_reg(adap,
179						    mc7->offset +
180						    A_MC7_BD_DATA0);
181				val64 |= (u64) val << 32;
182			} else {
183				if (mc7->width > 1)
184					val >>= shift[mc7->width];
185				val64 |= (u64) val << (step[mc7->width] * i);
186			}
187			start += 8;
188		}
189		*buf++ = val64;
190	}
191	return 0;
192}
193
194/*
195 * Initialize MI1.
196 */
197static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
198{
199	u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
200	u32 val = F_PREEN | V_CLKDIV(clkdiv);
201
202	t3_write_reg(adap, A_MI1_CFG, val);
203}
204
205#define MDIO_ATTEMPTS 20
206
207/*
208 * MI1 read/write operations for clause 22 PHYs.
209 */
210static int t3_mi1_read(struct net_device *dev, int phy_addr, int mmd_addr,
211		       u16 reg_addr)
212{
213	struct port_info *pi = netdev_priv(dev);
214	struct adapter *adapter = pi->adapter;
215	int ret;
216	u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
217
218	mutex_lock(&adapter->mdio_lock);
219	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
220	t3_write_reg(adapter, A_MI1_ADDR, addr);
221	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
222	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
223	if (!ret)
224		ret = t3_read_reg(adapter, A_MI1_DATA);
225	mutex_unlock(&adapter->mdio_lock);
226	return ret;
227}
228
229static int t3_mi1_write(struct net_device *dev, int phy_addr, int mmd_addr,
230			u16 reg_addr, u16 val)
231{
232	struct port_info *pi = netdev_priv(dev);
233	struct adapter *adapter = pi->adapter;
234	int ret;
235	u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
236
237	mutex_lock(&adapter->mdio_lock);
238	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
239	t3_write_reg(adapter, A_MI1_ADDR, addr);
240	t3_write_reg(adapter, A_MI1_DATA, val);
241	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
242	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
243	mutex_unlock(&adapter->mdio_lock);
244	return ret;
245}
246
247static const struct mdio_ops mi1_mdio_ops = {
248	.read = t3_mi1_read,
249	.write = t3_mi1_write,
250	.mode_support = MDIO_SUPPORTS_C22
251};
252
253/*
254 * Performs the address cycle for clause 45 PHYs.
255 * Must be called with the MDIO_LOCK held.
256 */
257static int mi1_wr_addr(struct adapter *adapter, int phy_addr, int mmd_addr,
258		       int reg_addr)
259{
260	u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
261
262	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
263	t3_write_reg(adapter, A_MI1_ADDR, addr);
264	t3_write_reg(adapter, A_MI1_DATA, reg_addr);
265	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
266	return t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
267			       MDIO_ATTEMPTS, 10);
268}
269
270/*
271 * MI1 read/write operations for indirect-addressed PHYs.
272 */
273static int mi1_ext_read(struct net_device *dev, int phy_addr, int mmd_addr,
274			u16 reg_addr)
275{
276	struct port_info *pi = netdev_priv(dev);
277	struct adapter *adapter = pi->adapter;
278	int ret;
279
280	mutex_lock(&adapter->mdio_lock);
281	ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
282	if (!ret) {
283		t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
284		ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
285				      MDIO_ATTEMPTS, 10);
286		if (!ret)
287			ret = t3_read_reg(adapter, A_MI1_DATA);
288	}
289	mutex_unlock(&adapter->mdio_lock);
290	return ret;
291}
292
293static int mi1_ext_write(struct net_device *dev, int phy_addr, int mmd_addr,
294			 u16 reg_addr, u16 val)
295{
296	struct port_info *pi = netdev_priv(dev);
297	struct adapter *adapter = pi->adapter;
298	int ret;
299
300	mutex_lock(&adapter->mdio_lock);
301	ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
302	if (!ret) {
303		t3_write_reg(adapter, A_MI1_DATA, val);
304		t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
305		ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
306				      MDIO_ATTEMPTS, 10);
307	}
308	mutex_unlock(&adapter->mdio_lock);
309	return ret;
310}
311
312static const struct mdio_ops mi1_mdio_ext_ops = {
313	.read = mi1_ext_read,
314	.write = mi1_ext_write,
315	.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22
316};
317
318/**
319 *	t3_mdio_change_bits - modify the value of a PHY register
320 *	@phy: the PHY to operate on
321 *	@mmd: the device address
322 *	@reg: the register address
323 *	@clear: what part of the register value to mask off
324 *	@set: what part of the register value to set
325 *
326 *	Changes the value of a PHY register by applying a mask to its current
327 *	value and ORing the result with a new value.
328 */
329int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
330			unsigned int set)
331{
332	int ret;
333	unsigned int val;
334
335	ret = t3_mdio_read(phy, mmd, reg, &val);
336	if (!ret) {
337		val &= ~clear;
338		ret = t3_mdio_write(phy, mmd, reg, val | set);
339	}
340	return ret;
341}
342
343/**
344 *	t3_phy_reset - reset a PHY block
345 *	@phy: the PHY to operate on
346 *	@mmd: the device address of the PHY block to reset
347 *	@wait: how long to wait for the reset to complete in 1ms increments
348 *
349 *	Resets a PHY block and optionally waits for the reset to complete.
350 *	@mmd should be 0 for 10/100/1000 PHYs and the device address to reset
351 *	for 10G PHYs.
352 */
353int t3_phy_reset(struct cphy *phy, int mmd, int wait)
354{
355	int err;
356	unsigned int ctl;
357
358	err = t3_mdio_change_bits(phy, mmd, MDIO_CTRL1, MDIO_CTRL1_LPOWER,
359				  MDIO_CTRL1_RESET);
360	if (err || !wait)
361		return err;
362
363	do {
364		err = t3_mdio_read(phy, mmd, MDIO_CTRL1, &ctl);
365		if (err)
366			return err;
367		ctl &= MDIO_CTRL1_RESET;
368		if (ctl)
369			msleep(1);
370	} while (ctl && --wait);
371
372	return ctl ? -1 : 0;
373}
374
375/**
376 *	t3_phy_advertise - set the PHY advertisement registers for autoneg
377 *	@phy: the PHY to operate on
378 *	@advert: bitmap of capabilities the PHY should advertise
379 *
380 *	Sets a 10/100/1000 PHY's advertisement registers to advertise the
381 *	requested capabilities.
382 */
383int t3_phy_advertise(struct cphy *phy, unsigned int advert)
384{
385	int err;
386	unsigned int val = 0;
387
388	err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_CTRL1000, &val);
389	if (err)
390		return err;
391
392	val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
393	if (advert & ADVERTISED_1000baseT_Half)
394		val |= ADVERTISE_1000HALF;
395	if (advert & ADVERTISED_1000baseT_Full)
396		val |= ADVERTISE_1000FULL;
397
398	err = t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_CTRL1000, val);
399	if (err)
400		return err;
401
402	val = 1;
403	if (advert & ADVERTISED_10baseT_Half)
404		val |= ADVERTISE_10HALF;
405	if (advert & ADVERTISED_10baseT_Full)
406		val |= ADVERTISE_10FULL;
407	if (advert & ADVERTISED_100baseT_Half)
408		val |= ADVERTISE_100HALF;
409	if (advert & ADVERTISED_100baseT_Full)
410		val |= ADVERTISE_100FULL;
411	if (advert & ADVERTISED_Pause)
412		val |= ADVERTISE_PAUSE_CAP;
413	if (advert & ADVERTISED_Asym_Pause)
414		val |= ADVERTISE_PAUSE_ASYM;
415	return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val);
416}
417
418/**
419 *	t3_phy_advertise_fiber - set fiber PHY advertisement register
420 *	@phy: the PHY to operate on
421 *	@advert: bitmap of capabilities the PHY should advertise
422 *
423 *	Sets a fiber PHY's advertisement register to advertise the
424 *	requested capabilities.
425 */
426int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert)
427{
428	unsigned int val = 0;
429
430	if (advert & ADVERTISED_1000baseT_Half)
431		val |= ADVERTISE_1000XHALF;
432	if (advert & ADVERTISED_1000baseT_Full)
433		val |= ADVERTISE_1000XFULL;
434	if (advert & ADVERTISED_Pause)
435		val |= ADVERTISE_1000XPAUSE;
436	if (advert & ADVERTISED_Asym_Pause)
437		val |= ADVERTISE_1000XPSE_ASYM;
438	return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val);
439}
440
441/**
442 *	t3_set_phy_speed_duplex - force PHY speed and duplex
443 *	@phy: the PHY to operate on
444 *	@speed: requested PHY speed
445 *	@duplex: requested PHY duplex
446 *
447 *	Force a 10/100/1000 PHY's speed and duplex.  This also disables
448 *	auto-negotiation except for GigE, where auto-negotiation is mandatory.
449 */
450int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
451{
452	int err;
453	unsigned int ctl;
454
455	err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_BMCR, &ctl);
456	if (err)
457		return err;
458
459	if (speed >= 0) {
460		ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
461		if (speed == SPEED_100)
462			ctl |= BMCR_SPEED100;
463		else if (speed == SPEED_1000)
464			ctl |= BMCR_SPEED1000;
465	}
466	if (duplex >= 0) {
467		ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
468		if (duplex == DUPLEX_FULL)
469			ctl |= BMCR_FULLDPLX;
470	}
471	if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
472		ctl |= BMCR_ANENABLE;
473	return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_BMCR, ctl);
474}
475
476int t3_phy_lasi_intr_enable(struct cphy *phy)
477{
478	return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL,
479			     MDIO_PMA_LASI_LSALARM);
480}
481
482int t3_phy_lasi_intr_disable(struct cphy *phy)
483{
484	return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0);
485}
486
487int t3_phy_lasi_intr_clear(struct cphy *phy)
488{
489	u32 val;
490
491	return t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, &val);
492}
493
494int t3_phy_lasi_intr_handler(struct cphy *phy)
495{
496	unsigned int status;
497	int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT,
498			       &status);
499
500	if (err)
501		return err;
502	return (status & MDIO_PMA_LASI_LSALARM) ? cphy_cause_link_change : 0;
503}
504
505static const struct adapter_info t3_adap_info[] = {
506	{1, 1, 0,
507	 F_GPIO2_OEN | F_GPIO4_OEN |
508	 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
509	 &mi1_mdio_ops, "Chelsio PE9000"},
510	{1, 1, 0,
511	 F_GPIO2_OEN | F_GPIO4_OEN |
512	 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
513	 &mi1_mdio_ops, "Chelsio T302"},
514	{1, 0, 0,
515	 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
516	 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
517	 { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
518	 &mi1_mdio_ext_ops, "Chelsio T310"},
519	{1, 1, 0,
520	 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
521	 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
522	 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
523	 { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
524	 &mi1_mdio_ext_ops, "Chelsio T320"},
525	{},
526	{},
527	{1, 0, 0,
528	 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
529	 F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
530	 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
531	 &mi1_mdio_ext_ops, "Chelsio T310" },
532	{1, 0, 0,
533	 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
534	 F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL,
535	 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
536	 &mi1_mdio_ext_ops, "Chelsio N320E-G2" },
537};
538
539/*
540 * Return the adapter_info structure with a given index.  Out-of-range indices
541 * return NULL.
542 */
543const struct adapter_info *t3_get_adapter_info(unsigned int id)
544{
545	return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
546}
547
548struct port_type_info {
549	int (*phy_prep)(struct cphy *phy, struct adapter *adapter,
550			int phy_addr, const struct mdio_ops *ops);
551};
552
553static const struct port_type_info port_types[] = {
554	{ NULL },
555	{ t3_ael1002_phy_prep },
556	{ t3_vsc8211_phy_prep },
557	{ NULL},
558	{ t3_xaui_direct_phy_prep },
559	{ t3_ael2005_phy_prep },
560	{ t3_qt2045_phy_prep },
561	{ t3_ael1006_phy_prep },
562	{ NULL },
563	{ t3_aq100x_phy_prep },
564	{ t3_ael2020_phy_prep },
565};
566
567#define VPD_ENTRY(name, len) \
568	u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
569
570/*
571 * Partial EEPROM Vital Product Data structure.  Includes only the ID and
572 * VPD-R sections.
573 */
574struct t3_vpd {
575	u8 id_tag;
576	u8 id_len[2];
577	u8 id_data[16];
578	u8 vpdr_tag;
579	u8 vpdr_len[2];
580	VPD_ENTRY(pn, 16);	/* part number */
581	VPD_ENTRY(ec, 16);	/* EC level */
582	VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
583	VPD_ENTRY(na, 12);	/* MAC address base */
584	VPD_ENTRY(cclk, 6);	/* core clock */
585	VPD_ENTRY(mclk, 6);	/* mem clock */
586	VPD_ENTRY(uclk, 6);	/* uP clk */
587	VPD_ENTRY(mdc, 6);	/* MDIO clk */
588	VPD_ENTRY(mt, 2);	/* mem timing */
589	VPD_ENTRY(xaui0cfg, 6);	/* XAUI0 config */
590	VPD_ENTRY(xaui1cfg, 6);	/* XAUI1 config */
591	VPD_ENTRY(port0, 2);	/* PHY0 complex */
592	VPD_ENTRY(port1, 2);	/* PHY1 complex */
593	VPD_ENTRY(port2, 2);	/* PHY2 complex */
594	VPD_ENTRY(port3, 2);	/* PHY3 complex */
595	VPD_ENTRY(rv, 1);	/* csum */
596	u32 pad;		/* for multiple-of-4 sizing and alignment */
597};
598
599#define EEPROM_STAT_ADDR  0x4000
600#define VPD_BASE          0xc00
601
602/**
603 *	t3_seeprom_wp - enable/disable EEPROM write protection
604 *	@adapter: the adapter
605 *	@enable: 1 to enable write protection, 0 to disable it
606 *
607 *	Enables or disables write protection on the serial EEPROM.
608 */
609int t3_seeprom_wp(struct adapter *adapter, int enable)
610{
611	u32 data = enable ? 0xc : 0;
612	int ret;
613
614	/* EEPROM_STAT_ADDR is outside VPD area, use pci_write_vpd_any() */
615	ret = pci_write_vpd_any(adapter->pdev, EEPROM_STAT_ADDR, sizeof(u32),
616				&data);
617
618	return ret < 0 ? ret : 0;
619}
620
621static int vpdstrtouint(char *s, u8 len, unsigned int base, unsigned int *val)
622{
623	char tok[256];
624
625	memcpy(tok, s, len);
626	tok[len] = 0;
627	return kstrtouint(strim(tok), base, val);
628}
629
630static int vpdstrtou16(char *s, u8 len, unsigned int base, u16 *val)
631{
632	char tok[256];
633
634	memcpy(tok, s, len);
635	tok[len] = 0;
636	return kstrtou16(strim(tok), base, val);
637}
638
639/**
640 *	get_vpd_params - read VPD parameters from VPD EEPROM
641 *	@adapter: adapter to read
642 *	@p: where to store the parameters
643 *
644 *	Reads card parameters stored in VPD EEPROM.
645 */
646static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
647{
648	struct t3_vpd vpd;
649	u8 base_val = 0;
650	int addr, ret;
651
652	/*
653	 * Card information is normally at VPD_BASE but some early cards had
654	 * it at 0.
655	 */
656	ret = pci_read_vpd(adapter->pdev, VPD_BASE, 1, &base_val);
657	if (ret < 0)
658		return ret;
659	addr = base_val == PCI_VPD_LRDT_ID_STRING ? VPD_BASE : 0;
660
661	ret = pci_read_vpd(adapter->pdev, addr, sizeof(vpd), &vpd);
662	if (ret < 0)
663		return ret;
664
665	ret = vpdstrtouint(vpd.cclk_data, vpd.cclk_len, 10, &p->cclk);
666	if (ret)
667		return ret;
668	ret = vpdstrtouint(vpd.mclk_data, vpd.mclk_len, 10, &p->mclk);
669	if (ret)
670		return ret;
671	ret = vpdstrtouint(vpd.uclk_data, vpd.uclk_len, 10, &p->uclk);
672	if (ret)
673		return ret;
674	ret = vpdstrtouint(vpd.mdc_data, vpd.mdc_len, 10, &p->mdc);
675	if (ret)
676		return ret;
677	ret = vpdstrtouint(vpd.mt_data, vpd.mt_len, 10, &p->mem_timing);
678	if (ret)
679		return ret;
680	memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
681
682	/* Old eeproms didn't have port information */
683	if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
684		p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
685		p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
686	} else {
687		p->port_type[0] = hex_to_bin(vpd.port0_data[0]);
688		p->port_type[1] = hex_to_bin(vpd.port1_data[0]);
689		ret = vpdstrtou16(vpd.xaui0cfg_data, vpd.xaui0cfg_len, 16,
690				  &p->xauicfg[0]);
691		if (ret)
692			return ret;
693		ret = vpdstrtou16(vpd.xaui1cfg_data, vpd.xaui1cfg_len, 16,
694				  &p->xauicfg[1]);
695		if (ret)
696			return ret;
697	}
698
699	ret = hex2bin(p->eth_base, vpd.na_data, 6);
700	if (ret < 0)
701		return -EINVAL;
702	return 0;
703}
704
705/* serial flash and firmware constants */
706enum {
707	SF_ATTEMPTS = 5,	/* max retries for SF1 operations */
708	SF_SEC_SIZE = 64 * 1024,	/* serial flash sector size */
709	SF_SIZE = SF_SEC_SIZE * 8,	/* serial flash size */
710
711	/* flash command opcodes */
712	SF_PROG_PAGE = 2,	/* program page */
713	SF_WR_DISABLE = 4,	/* disable writes */
714	SF_RD_STATUS = 5,	/* read status register */
715	SF_WR_ENABLE = 6,	/* enable writes */
716	SF_RD_DATA_FAST = 0xb,	/* read flash */
717	SF_ERASE_SECTOR = 0xd8,	/* erase sector */
718
719	FW_FLASH_BOOT_ADDR = 0x70000,	/* start address of FW in flash */
720	FW_VERS_ADDR = 0x7fffc,    /* flash address holding FW version */
721	FW_MIN_SIZE = 8            /* at least version and csum */
722};
723
724/**
725 *	sf1_read - read data from the serial flash
726 *	@adapter: the adapter
727 *	@byte_cnt: number of bytes to read
728 *	@cont: whether another operation will be chained
729 *	@valp: where to store the read data
730 *
731 *	Reads up to 4 bytes of data from the serial flash.  The location of
732 *	the read needs to be specified prior to calling this by issuing the
733 *	appropriate commands to the serial flash.
734 */
735static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
736		    u32 *valp)
737{
738	int ret;
739
740	if (!byte_cnt || byte_cnt > 4)
741		return -EINVAL;
742	if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
743		return -EBUSY;
744	t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
745	ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
746	if (!ret)
747		*valp = t3_read_reg(adapter, A_SF_DATA);
748	return ret;
749}
750
751/**
752 *	sf1_write - write data to the serial flash
753 *	@adapter: the adapter
754 *	@byte_cnt: number of bytes to write
755 *	@cont: whether another operation will be chained
756 *	@val: value to write
757 *
758 *	Writes up to 4 bytes of data to the serial flash.  The location of
759 *	the write needs to be specified prior to calling this by issuing the
760 *	appropriate commands to the serial flash.
761 */
762static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
763		     u32 val)
764{
765	if (!byte_cnt || byte_cnt > 4)
766		return -EINVAL;
767	if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
768		return -EBUSY;
769	t3_write_reg(adapter, A_SF_DATA, val);
770	t3_write_reg(adapter, A_SF_OP,
771		     V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
772	return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
773}
774
775/**
776 *	flash_wait_op - wait for a flash operation to complete
777 *	@adapter: the adapter
778 *	@attempts: max number of polls of the status register
779 *	@delay: delay between polls in ms
780 *
781 *	Wait for a flash operation to complete by polling the status register.
782 */
783static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
784{
785	int ret;
786	u32 status;
787
788	while (1) {
789		if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
790		    (ret = sf1_read(adapter, 1, 0, &status)) != 0)
791			return ret;
792		if (!(status & 1))
793			return 0;
794		if (--attempts == 0)
795			return -EAGAIN;
796		if (delay)
797			msleep(delay);
798	}
799}
800
801/**
802 *	t3_read_flash - read words from serial flash
803 *	@adapter: the adapter
804 *	@addr: the start address for the read
805 *	@nwords: how many 32-bit words to read
806 *	@data: where to store the read data
807 *	@byte_oriented: whether to store data as bytes or as words
808 *
809 *	Read the specified number of 32-bit words from the serial flash.
810 *	If @byte_oriented is set the read data is stored as a byte array
811 *	(i.e., big-endian), otherwise as 32-bit words in the platform's
812 *	natural endianness.
813 */
814static int t3_read_flash(struct adapter *adapter, unsigned int addr,
815			 unsigned int nwords, u32 *data, int byte_oriented)
816{
817	int ret;
818
819	if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
820		return -EINVAL;
821
822	addr = swab32(addr) | SF_RD_DATA_FAST;
823
824	if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
825	    (ret = sf1_read(adapter, 1, 1, data)) != 0)
826		return ret;
827
828	for (; nwords; nwords--, data++) {
829		ret = sf1_read(adapter, 4, nwords > 1, data);
830		if (ret)
831			return ret;
832		if (byte_oriented)
833			*data = htonl(*data);
834	}
835	return 0;
836}
837
838/**
839 *	t3_write_flash - write up to a page of data to the serial flash
840 *	@adapter: the adapter
841 *	@addr: the start address to write
842 *	@n: length of data to write
843 *	@data: the data to write
844 *
845 *	Writes up to a page of data (256 bytes) to the serial flash starting
846 *	at the given address.
847 */
848static int t3_write_flash(struct adapter *adapter, unsigned int addr,
849			  unsigned int n, const u8 *data)
850{
851	int ret;
852	u32 buf[64];
853	unsigned int i, c, left, val, offset = addr & 0xff;
854
855	if (addr + n > SF_SIZE || offset + n > 256)
856		return -EINVAL;
857
858	val = swab32(addr) | SF_PROG_PAGE;
859
860	if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
861	    (ret = sf1_write(adapter, 4, 1, val)) != 0)
862		return ret;
863
864	for (left = n; left; left -= c) {
865		c = min(left, 4U);
866		for (val = 0, i = 0; i < c; ++i)
867			val = (val << 8) + *data++;
868
869		ret = sf1_write(adapter, c, c != left, val);
870		if (ret)
871			return ret;
872	}
873	if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
874		return ret;
875
876	/* Read the page to verify the write succeeded */
877	ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
878	if (ret)
879		return ret;
880
881	if (memcmp(data - n, (u8 *) buf + offset, n))
882		return -EIO;
883	return 0;
884}
885
886/**
887 *	t3_get_tp_version - read the tp sram version
888 *	@adapter: the adapter
889 *	@vers: where to place the version
890 *
891 *	Reads the protocol sram version from sram.
892 */
893int t3_get_tp_version(struct adapter *adapter, u32 *vers)
894{
895	int ret;
896
897	/* Get version loaded in SRAM */
898	t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
899	ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
900			      1, 1, 5, 1);
901	if (ret)
902		return ret;
903
904	*vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
905
906	return 0;
907}
908
909/**
910 *	t3_check_tpsram_version - read the tp sram version
911 *	@adapter: the adapter
912 *
913 *	Reads the protocol sram version from flash.
914 */
915int t3_check_tpsram_version(struct adapter *adapter)
916{
917	int ret;
918	u32 vers;
919	unsigned int major, minor;
920
921	if (adapter->params.rev == T3_REV_A)
922		return 0;
923
924
925	ret = t3_get_tp_version(adapter, &vers);
926	if (ret)
927		return ret;
928
929	major = G_TP_VERSION_MAJOR(vers);
930	minor = G_TP_VERSION_MINOR(vers);
931
932	if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
933		return 0;
934	else {
935		CH_ERR(adapter, "found wrong TP version (%u.%u), "
936		       "driver compiled for version %d.%d\n", major, minor,
937		       TP_VERSION_MAJOR, TP_VERSION_MINOR);
938	}
939	return -EINVAL;
940}
941
942/**
943 *	t3_check_tpsram - check if provided protocol SRAM
944 *			  is compatible with this driver
945 *	@adapter: the adapter
946 *	@tp_sram: the firmware image to write
947 *	@size: image size
948 *
949 *	Checks if an adapter's tp sram is compatible with the driver.
950 *	Returns 0 if the versions are compatible, a negative error otherwise.
951 */
952int t3_check_tpsram(struct adapter *adapter, const u8 *tp_sram,
953		    unsigned int size)
954{
955	u32 csum;
956	unsigned int i;
957	const __be32 *p = (const __be32 *)tp_sram;
958
959	/* Verify checksum */
960	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
961		csum += ntohl(p[i]);
962	if (csum != 0xffffffff) {
963		CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
964		       csum);
965		return -EINVAL;
966	}
967
968	return 0;
969}
970
971enum fw_version_type {
972	FW_VERSION_N3,
973	FW_VERSION_T3
974};
975
976/**
977 *	t3_get_fw_version - read the firmware version
978 *	@adapter: the adapter
979 *	@vers: where to place the version
980 *
981 *	Reads the FW version from flash.
982 */
983int t3_get_fw_version(struct adapter *adapter, u32 *vers)
984{
985	return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
986}
987
988/**
989 *	t3_check_fw_version - check if the FW is compatible with this driver
990 *	@adapter: the adapter
991 *
992 *	Checks if an adapter's FW is compatible with the driver.  Returns 0
993 *	if the versions are compatible, a negative error otherwise.
994 */
995int t3_check_fw_version(struct adapter *adapter)
996{
997	int ret;
998	u32 vers;
999	unsigned int type, major, minor;
1000
1001	ret = t3_get_fw_version(adapter, &vers);
1002	if (ret)
1003		return ret;
1004
1005	type = G_FW_VERSION_TYPE(vers);
1006	major = G_FW_VERSION_MAJOR(vers);
1007	minor = G_FW_VERSION_MINOR(vers);
1008
1009	if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
1010	    minor == FW_VERSION_MINOR)
1011		return 0;
1012	else if (major != FW_VERSION_MAJOR || minor < FW_VERSION_MINOR)
1013		CH_WARN(adapter, "found old FW minor version(%u.%u), "
1014		        "driver compiled for version %u.%u\n", major, minor,
1015			FW_VERSION_MAJOR, FW_VERSION_MINOR);
1016	else {
1017		CH_WARN(adapter, "found newer FW version(%u.%u), "
1018		        "driver compiled for version %u.%u\n", major, minor,
1019			FW_VERSION_MAJOR, FW_VERSION_MINOR);
1020		return 0;
1021	}
1022	return -EINVAL;
1023}
1024
1025/**
1026 *	t3_flash_erase_sectors - erase a range of flash sectors
1027 *	@adapter: the adapter
1028 *	@start: the first sector to erase
1029 *	@end: the last sector to erase
1030 *
1031 *	Erases the sectors in the given range.
1032 */
1033static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
1034{
1035	while (start <= end) {
1036		int ret;
1037
1038		if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1039		    (ret = sf1_write(adapter, 4, 0,
1040				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
1041		    (ret = flash_wait_op(adapter, 5, 500)) != 0)
1042			return ret;
1043		start++;
1044	}
1045	return 0;
1046}
1047
1048/**
1049 *	t3_load_fw - download firmware
1050 *	@adapter: the adapter
1051 *	@fw_data: the firmware image to write
1052 *	@size: image size
1053 *
1054 *	Write the supplied firmware image to the card's serial flash.
1055 *	The FW image has the following sections: @size - 8 bytes of code and
1056 *	data, followed by 4 bytes of FW version, followed by the 32-bit
1057 *	1's complement checksum of the whole image.
1058 */
1059int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
1060{
1061	u32 csum;
1062	unsigned int i;
1063	const __be32 *p = (const __be32 *)fw_data;
1064	int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1065
1066	if ((size & 3) || size < FW_MIN_SIZE)
1067		return -EINVAL;
1068	if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
1069		return -EFBIG;
1070
1071	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1072		csum += ntohl(p[i]);
1073	if (csum != 0xffffffff) {
1074		CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1075		       csum);
1076		return -EINVAL;
1077	}
1078
1079	ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1080	if (ret)
1081		goto out;
1082
1083	size -= 8;		/* trim off version and checksum */
1084	for (addr = FW_FLASH_BOOT_ADDR; size;) {
1085		unsigned int chunk_size = min(size, 256U);
1086
1087		ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
1088		if (ret)
1089			goto out;
1090
1091		addr += chunk_size;
1092		fw_data += chunk_size;
1093		size -= chunk_size;
1094	}
1095
1096	ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
1097out:
1098	if (ret)
1099		CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1100	return ret;
1101}
1102
1103#define CIM_CTL_BASE 0x2000
1104
1105/**
1106 *      t3_cim_ctl_blk_read - read a block from CIM control region
1107 *
1108 *      @adap: the adapter
1109 *      @addr: the start address within the CIM control region
1110 *      @n: number of words to read
1111 *      @valp: where to store the result
1112 *
1113 *      Reads a block of 4-byte words from the CIM control region.
1114 */
1115int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
1116			unsigned int n, unsigned int *valp)
1117{
1118	int ret = 0;
1119
1120	if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1121		return -EBUSY;
1122
1123	for ( ; !ret && n--; addr += 4) {
1124		t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1125		ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1126				      0, 5, 2);
1127		if (!ret)
1128			*valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1129	}
1130	return ret;
1131}
1132
1133static void t3_gate_rx_traffic(struct cmac *mac, u32 *rx_cfg,
1134			       u32 *rx_hash_high, u32 *rx_hash_low)
1135{
1136	/* stop Rx unicast traffic */
1137	t3_mac_disable_exact_filters(mac);
1138
1139	/* stop broadcast, multicast, promiscuous mode traffic */
1140	*rx_cfg = t3_read_reg(mac->adapter, A_XGM_RX_CFG);
1141	t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1142			 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1143			 F_DISBCAST);
1144
1145	*rx_hash_high = t3_read_reg(mac->adapter, A_XGM_RX_HASH_HIGH);
1146	t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, 0);
1147
1148	*rx_hash_low = t3_read_reg(mac->adapter, A_XGM_RX_HASH_LOW);
1149	t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, 0);
1150
1151	/* Leave time to drain max RX fifo */
1152	msleep(1);
1153}
1154
1155static void t3_open_rx_traffic(struct cmac *mac, u32 rx_cfg,
1156			       u32 rx_hash_high, u32 rx_hash_low)
1157{
1158	t3_mac_enable_exact_filters(mac);
1159	t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1160			 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1161			 rx_cfg);
1162	t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, rx_hash_high);
1163	t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, rx_hash_low);
1164}
1165
1166/**
1167 *	t3_link_changed - handle interface link changes
1168 *	@adapter: the adapter
1169 *	@port_id: the port index that changed link state
1170 *
1171 *	Called when a port's link settings change to propagate the new values
1172 *	to the associated PHY and MAC.  After performing the common tasks it
1173 *	invokes an OS-specific handler.
1174 */
1175void t3_link_changed(struct adapter *adapter, int port_id)
1176{
1177	int link_ok, speed, duplex, fc;
1178	struct port_info *pi = adap2pinfo(adapter, port_id);
1179	struct cphy *phy = &pi->phy;
1180	struct cmac *mac = &pi->mac;
1181	struct link_config *lc = &pi->link_config;
1182
1183	phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1184
1185	if (!lc->link_ok && link_ok) {
1186		u32 rx_cfg, rx_hash_high, rx_hash_low;
1187		u32 status;
1188
1189		t3_xgm_intr_enable(adapter, port_id);
1190		t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1191		t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1192		t3_mac_enable(mac, MAC_DIRECTION_RX);
1193
1194		status = t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
1195		if (status & F_LINKFAULTCHANGE) {
1196			mac->stats.link_faults++;
1197			pi->link_fault = 1;
1198		}
1199		t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1200	}
1201
1202	if (lc->requested_fc & PAUSE_AUTONEG)
1203		fc &= lc->requested_fc;
1204	else
1205		fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1206
1207	if (link_ok == lc->link_ok && speed == lc->speed &&
1208	    duplex == lc->duplex && fc == lc->fc)
1209		return;                            /* nothing changed */
1210
1211	if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1212	    uses_xaui(adapter)) {
1213		if (link_ok)
1214			t3b_pcs_reset(mac);
1215		t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1216			     link_ok ? F_TXACTENABLE | F_RXEN : 0);
1217	}
1218	lc->link_ok = link_ok;
1219	lc->speed = speed < 0 ? SPEED_INVALID : speed;
1220	lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1221
1222	if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1223		/* Set MAC speed, duplex, and flow control to match PHY. */
1224		t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1225		lc->fc = fc;
1226	}
1227
1228	t3_os_link_changed(adapter, port_id, link_ok && !pi->link_fault,
1229			   speed, duplex, fc);
1230}
1231
1232void t3_link_fault(struct adapter *adapter, int port_id)
1233{
1234	struct port_info *pi = adap2pinfo(adapter, port_id);
1235	struct cmac *mac = &pi->mac;
1236	struct cphy *phy = &pi->phy;
1237	struct link_config *lc = &pi->link_config;
1238	int link_ok, speed, duplex, fc, link_fault;
1239	u32 rx_cfg, rx_hash_high, rx_hash_low;
1240
1241	t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1242
1243	if (adapter->params.rev > 0 && uses_xaui(adapter))
1244		t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset, 0);
1245
1246	t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1247	t3_mac_enable(mac, MAC_DIRECTION_RX);
1248
1249	t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1250
1251	link_fault = t3_read_reg(adapter,
1252				 A_XGM_INT_STATUS + mac->offset);
1253	link_fault &= F_LINKFAULTCHANGE;
1254
1255	link_ok = lc->link_ok;
1256	speed = lc->speed;
1257	duplex = lc->duplex;
1258	fc = lc->fc;
1259
1260	phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1261
1262	if (link_fault) {
1263		lc->link_ok = 0;
1264		lc->speed = SPEED_INVALID;
1265		lc->duplex = DUPLEX_INVALID;
1266
1267		t3_os_link_fault(adapter, port_id, 0);
1268
1269		/* Account link faults only when the phy reports a link up */
1270		if (link_ok)
1271			mac->stats.link_faults++;
1272	} else {
1273		if (link_ok)
1274			t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1275				     F_TXACTENABLE | F_RXEN);
1276
1277		pi->link_fault = 0;
1278		lc->link_ok = (unsigned char)link_ok;
1279		lc->speed = speed < 0 ? SPEED_INVALID : speed;
1280		lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1281		t3_os_link_fault(adapter, port_id, link_ok);
1282	}
1283}
1284
1285/**
1286 *	t3_link_start - apply link configuration to MAC/PHY
1287 *	@phy: the PHY to setup
1288 *	@mac: the MAC to setup
1289 *	@lc: the requested link configuration
1290 *
1291 *	Set up a port's MAC and PHY according to a desired link configuration.
1292 *	- If the PHY can auto-negotiate first decide what to advertise, then
1293 *	  enable/disable auto-negotiation as desired, and reset.
1294 *	- If the PHY does not auto-negotiate just reset it.
1295 *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1296 *	  otherwise do it later based on the outcome of auto-negotiation.
1297 */
1298int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1299{
1300	unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1301
1302	lc->link_ok = 0;
1303	if (lc->supported & SUPPORTED_Autoneg) {
1304		lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1305		if (fc) {
1306			lc->advertising |= ADVERTISED_Asym_Pause;
1307			if (fc & PAUSE_RX)
1308				lc->advertising |= ADVERTISED_Pause;
1309		}
1310		phy->ops->advertise(phy, lc->advertising);
1311
1312		if (lc->autoneg == AUTONEG_DISABLE) {
1313			lc->speed = lc->requested_speed;
1314			lc->duplex = lc->requested_duplex;
1315			lc->fc = (unsigned char)fc;
1316			t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1317						   fc);
1318			/* Also disables autoneg */
1319			phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1320		} else
1321			phy->ops->autoneg_enable(phy);
1322	} else {
1323		t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1324		lc->fc = (unsigned char)fc;
1325		phy->ops->reset(phy, 0);
1326	}
1327	return 0;
1328}
1329
1330/**
1331 *	t3_set_vlan_accel - control HW VLAN extraction
1332 *	@adapter: the adapter
1333 *	@ports: bitmap of adapter ports to operate on
1334 *	@on: enable (1) or disable (0) HW VLAN extraction
1335 *
1336 *	Enables or disables HW extraction of VLAN tags for the given port.
1337 */
1338void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1339{
1340	t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1341			 ports << S_VLANEXTRACTIONENABLE,
1342			 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1343}
1344
1345struct intr_info {
1346	unsigned int mask;	/* bits to check in interrupt status */
1347	const char *msg;	/* message to print or NULL */
1348	short stat_idx;		/* stat counter to increment or -1 */
1349	unsigned short fatal;	/* whether the condition reported is fatal */
1350};
1351
1352/**
1353 *	t3_handle_intr_status - table driven interrupt handler
1354 *	@adapter: the adapter that generated the interrupt
1355 *	@reg: the interrupt status register to process
1356 *	@mask: a mask to apply to the interrupt status
1357 *	@acts: table of interrupt actions
1358 *	@stats: statistics counters tracking interrupt occurrences
1359 *
1360 *	A table driven interrupt handler that applies a set of masks to an
1361 *	interrupt status word and performs the corresponding actions if the
1362 *	interrupts described by the mask have occurred.  The actions include
1363 *	optionally printing a warning or alert message, and optionally
1364 *	incrementing a stat counter.  The table is terminated by an entry
1365 *	specifying mask 0.  Returns the number of fatal interrupt conditions.
1366 */
1367static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1368				 unsigned int mask,
1369				 const struct intr_info *acts,
1370				 unsigned long *stats)
1371{
1372	int fatal = 0;
1373	unsigned int status = t3_read_reg(adapter, reg) & mask;
1374
1375	for (; acts->mask; ++acts) {
1376		if (!(status & acts->mask))
1377			continue;
1378		if (acts->fatal) {
1379			fatal++;
1380			CH_ALERT(adapter, "%s (0x%x)\n",
1381				 acts->msg, status & acts->mask);
1382			status &= ~acts->mask;
1383		} else if (acts->msg)
1384			CH_WARN(adapter, "%s (0x%x)\n",
1385				acts->msg, status & acts->mask);
1386		if (acts->stat_idx >= 0)
1387			stats[acts->stat_idx]++;
1388	}
1389	if (status)		/* clear processed interrupts */
1390		t3_write_reg(adapter, reg, status);
1391	return fatal;
1392}
1393
1394#define SGE_INTR_MASK (F_RSPQDISABLED | \
1395		       F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
1396		       F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
1397		       F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1398		       V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1399		       F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1400		       F_HIRCQPARITYERROR | F_LOPRIORITYDBFULL | \
1401		       F_HIPRIORITYDBFULL | F_LOPRIORITYDBEMPTY | \
1402		       F_HIPRIORITYDBEMPTY | F_HIPIODRBDROPERR | \
1403		       F_LOPIODRBDROPERR)
1404#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1405		       F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1406		       F_NFASRCHFAIL)
1407#define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1408#define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1409		       V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1410		       F_TXFIFO_UNDERRUN)
1411#define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1412			F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1413			F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1414			F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1415			V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1416			V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1417#define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1418			F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1419			/* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1420			F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
1421			F_TXPARERR | V_BISTERR(M_BISTERR))
1422#define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
1423			 F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
1424			 F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
1425#define ULPTX_INTR_MASK 0xfc
1426#define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
1427			 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1428			 F_ZERO_SWITCH_ERROR)
1429#define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1430		       F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1431		       F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1432	 	       F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
1433		       F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
1434		       F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
1435		       F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
1436		       F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
1437#define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1438			V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1439			V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1440#define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1441			V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1442			V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1443#define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1444		       V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1445		       V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1446		       V_MCAPARERRENB(M_MCAPARERRENB))
1447#define XGM_EXTRA_INTR_MASK (F_LINKFAULTCHANGE)
1448#define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1449		      F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1450		      F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1451		      F_MPS0 | F_CPL_SWITCH)
1452/*
1453 * Interrupt handler for the PCIX1 module.
1454 */
1455static void pci_intr_handler(struct adapter *adapter)
1456{
1457	static const struct intr_info pcix1_intr_info[] = {
1458		{F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1459		{F_SIGTARABT, "PCI signaled target abort", -1, 1},
1460		{F_RCVTARABT, "PCI received target abort", -1, 1},
1461		{F_RCVMSTABT, "PCI received master abort", -1, 1},
1462		{F_SIGSYSERR, "PCI signaled system error", -1, 1},
1463		{F_DETPARERR, "PCI detected parity error", -1, 1},
1464		{F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1465		{F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1466		{F_RCVSPLCMPERR, "PCI received split completion error", -1,
1467		 1},
1468		{F_DETCORECCERR, "PCI correctable ECC error",
1469		 STAT_PCI_CORR_ECC, 0},
1470		{F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1471		{F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1472		{V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1473		 1},
1474		{V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1475		 1},
1476		{V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1477		 1},
1478		{V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1479		 "error", -1, 1},
1480		{0}
1481	};
1482
1483	if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1484				  pcix1_intr_info, adapter->irq_stats))
1485		t3_fatal_err(adapter);
1486}
1487
1488/*
1489 * Interrupt handler for the PCIE module.
1490 */
1491static void pcie_intr_handler(struct adapter *adapter)
1492{
1493	static const struct intr_info pcie_intr_info[] = {
1494		{F_PEXERR, "PCI PEX error", -1, 1},
1495		{F_UNXSPLCPLERRR,
1496		 "PCI unexpected split completion DMA read error", -1, 1},
1497		{F_UNXSPLCPLERRC,
1498		 "PCI unexpected split completion DMA command error", -1, 1},
1499		{F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1500		{F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1501		{F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1502		{F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1503		{V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1504		 "PCI MSI-X table/PBA parity error", -1, 1},
1505		{F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1},
1506		{F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1},
1507		{F_RXPARERR, "PCI Rx parity error", -1, 1},
1508		{F_TXPARERR, "PCI Tx parity error", -1, 1},
1509		{V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1510		{0}
1511	};
1512
1513	if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1514		CH_ALERT(adapter, "PEX error code 0x%x\n",
1515			 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1516
1517	if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1518				  pcie_intr_info, adapter->irq_stats))
1519		t3_fatal_err(adapter);
1520}
1521
1522/*
1523 * TP interrupt handler.
1524 */
1525static void tp_intr_handler(struct adapter *adapter)
1526{
1527	static const struct intr_info tp_intr_info[] = {
1528		{0xffffff, "TP parity error", -1, 1},
1529		{0x1000000, "TP out of Rx pages", -1, 1},
1530		{0x2000000, "TP out of Tx pages", -1, 1},
1531		{0}
1532	};
1533
1534	static const struct intr_info tp_intr_info_t3c[] = {
1535		{0x1fffffff, "TP parity error", -1, 1},
1536		{F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1},
1537		{F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1},
1538		{0}
1539	};
1540
1541	if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1542				  adapter->params.rev < T3_REV_C ?
1543				  tp_intr_info : tp_intr_info_t3c, NULL))
1544		t3_fatal_err(adapter);
1545}
1546
1547/*
1548 * CIM interrupt handler.
1549 */
1550static void cim_intr_handler(struct adapter *adapter)
1551{
1552	static const struct intr_info cim_intr_info[] = {
1553		{F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1554		{F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1555		{F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1556		{F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1557		{F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1558		{F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1559		{F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1560		{F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1561		{F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1562		{F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1563		{F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1564		{F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
1565		{F_DRAMPARERR, "CIM DRAM parity error", -1, 1},
1566		{F_ICACHEPARERR, "CIM icache parity error", -1, 1},
1567		{F_DCACHEPARERR, "CIM dcache parity error", -1, 1},
1568		{F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1},
1569		{F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1},
1570		{F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1},
1571		{F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1},
1572		{F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1},
1573		{F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1},
1574		{F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1},
1575		{F_ITAGPARERR, "CIM itag parity error", -1, 1},
1576		{F_DTAGPARERR, "CIM dtag parity error", -1, 1},
1577		{0}
1578	};
1579
1580	if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1581				  cim_intr_info, NULL))
1582		t3_fatal_err(adapter);
1583}
1584
1585/*
1586 * ULP RX interrupt handler.
1587 */
1588static void ulprx_intr_handler(struct adapter *adapter)
1589{
1590	static const struct intr_info ulprx_intr_info[] = {
1591		{F_PARERRDATA, "ULP RX data parity error", -1, 1},
1592		{F_PARERRPCMD, "ULP RX command parity error", -1, 1},
1593		{F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1},
1594		{F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1},
1595		{F_ARBFPERR, "ULP RX ArbF parity error", -1, 1},
1596		{F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1},
1597		{F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1},
1598		{F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1},
1599		{0}
1600	};
1601
1602	if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1603				  ulprx_intr_info, NULL))
1604		t3_fatal_err(adapter);
1605}
1606
1607/*
1608 * ULP TX interrupt handler.
1609 */
1610static void ulptx_intr_handler(struct adapter *adapter)
1611{
1612	static const struct intr_info ulptx_intr_info[] = {
1613		{F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1614		 STAT_ULP_CH0_PBL_OOB, 0},
1615		{F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1616		 STAT_ULP_CH1_PBL_OOB, 0},
1617		{0xfc, "ULP TX parity error", -1, 1},
1618		{0}
1619	};
1620
1621	if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1622				  ulptx_intr_info, adapter->irq_stats))
1623		t3_fatal_err(adapter);
1624}
1625
1626#define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1627	F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1628	F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1629	F_ICSPI1_TX_FRAMING_ERROR)
1630#define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1631	F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1632	F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1633	F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1634
1635/*
1636 * PM TX interrupt handler.
1637 */
1638static void pmtx_intr_handler(struct adapter *adapter)
1639{
1640	static const struct intr_info pmtx_intr_info[] = {
1641		{F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1642		{ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1643		{OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1644		{V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1645		 "PMTX ispi parity error", -1, 1},
1646		{V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1647		 "PMTX ospi parity error", -1, 1},
1648		{0}
1649	};
1650
1651	if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1652				  pmtx_intr_info, NULL))
1653		t3_fatal_err(adapter);
1654}
1655
1656#define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1657	F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1658	F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1659	F_IESPI1_TX_FRAMING_ERROR)
1660#define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1661	F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1662	F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1663	F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1664
1665/*
1666 * PM RX interrupt handler.
1667 */
1668static void pmrx_intr_handler(struct adapter *adapter)
1669{
1670	static const struct intr_info pmrx_intr_info[] = {
1671		{F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1672		{IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1673		{OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1674		{V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1675		 "PMRX ispi parity error", -1, 1},
1676		{V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1677		 "PMRX ospi parity error", -1, 1},
1678		{0}
1679	};
1680
1681	if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1682				  pmrx_intr_info, NULL))
1683		t3_fatal_err(adapter);
1684}
1685
1686/*
1687 * CPL switch interrupt handler.
1688 */
1689static void cplsw_intr_handler(struct adapter *adapter)
1690{
1691	static const struct intr_info cplsw_intr_info[] = {
1692		{F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1},
1693		{F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1},
1694		{F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1695		{F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1696		{F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1697		{F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1698		{0}
1699	};
1700
1701	if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1702				  cplsw_intr_info, NULL))
1703		t3_fatal_err(adapter);
1704}
1705
1706/*
1707 * MPS interrupt handler.
1708 */
1709static void mps_intr_handler(struct adapter *adapter)
1710{
1711	static const struct intr_info mps_intr_info[] = {
1712		{0x1ff, "MPS parity error", -1, 1},
1713		{0}
1714	};
1715
1716	if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1717				  mps_intr_info, NULL))
1718		t3_fatal_err(adapter);
1719}
1720
1721#define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1722
1723/*
1724 * MC7 interrupt handler.
1725 */
1726static void mc7_intr_handler(struct mc7 *mc7)
1727{
1728	struct adapter *adapter = mc7->adapter;
1729	u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1730
1731	if (cause & F_CE) {
1732		mc7->stats.corr_err++;
1733		CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1734			"data 0x%x 0x%x 0x%x\n", mc7->name,
1735			t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1736			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1737			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1738			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1739	}
1740
1741	if (cause & F_UE) {
1742		mc7->stats.uncorr_err++;
1743		CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1744			 "data 0x%x 0x%x 0x%x\n", mc7->name,
1745			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1746			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1747			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1748			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1749	}
1750
1751	if (G_PE(cause)) {
1752		mc7->stats.parity_err++;
1753		CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1754			 mc7->name, G_PE(cause));
1755	}
1756
1757	if (cause & F_AE) {
1758		u32 addr = 0;
1759
1760		if (adapter->params.rev > 0)
1761			addr = t3_read_reg(adapter,
1762					   mc7->offset + A_MC7_ERR_ADDR);
1763		mc7->stats.addr_err++;
1764		CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1765			 mc7->name, addr);
1766	}
1767
1768	if (cause & MC7_INTR_FATAL)
1769		t3_fatal_err(adapter);
1770
1771	t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1772}
1773
1774#define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1775			V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1776/*
1777 * XGMAC interrupt handler.
1778 */
1779static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1780{
1781	struct cmac *mac = &adap2pinfo(adap, idx)->mac;
1782	/*
1783	 * We mask out interrupt causes for which we're not taking interrupts.
1784	 * This allows us to use polling logic to monitor some of the other
1785	 * conditions when taking interrupts would impose too much load on the
1786	 * system.
1787	 */
1788	u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset) &
1789		    ~F_RXFIFO_OVERFLOW;
1790
1791	if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1792		mac->stats.tx_fifo_parity_err++;
1793		CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1794	}
1795	if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1796		mac->stats.rx_fifo_parity_err++;
1797		CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1798	}
1799	if (cause & F_TXFIFO_UNDERRUN)
1800		mac->stats.tx_fifo_urun++;
1801	if (cause & F_RXFIFO_OVERFLOW)
1802		mac->stats.rx_fifo_ovfl++;
1803	if (cause & V_SERDES_LOS(M_SERDES_LOS))
1804		mac->stats.serdes_signal_loss++;
1805	if (cause & F_XAUIPCSCTCERR)
1806		mac->stats.xaui_pcs_ctc_err++;
1807	if (cause & F_XAUIPCSALIGNCHANGE)
1808		mac->stats.xaui_pcs_align_change++;
1809	if (cause & F_XGM_INT) {
1810		t3_set_reg_field(adap,
1811				 A_XGM_INT_ENABLE + mac->offset,
1812				 F_XGM_INT, 0);
1813		mac->stats.link_faults++;
1814
1815		t3_os_link_fault_handler(adap, idx);
1816	}
1817
1818	if (cause & XGM_INTR_FATAL)
1819		t3_fatal_err(adap);
1820
1821	t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1822	return cause != 0;
1823}
1824
1825/*
1826 * Interrupt handler for PHY events.
1827 */
1828int t3_phy_intr_handler(struct adapter *adapter)
1829{
1830	u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1831
1832	for_each_port(adapter, i) {
1833		struct port_info *p = adap2pinfo(adapter, i);
1834
1835		if (!(p->phy.caps & SUPPORTED_IRQ))
1836			continue;
1837
1838		if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) {
1839			int phy_cause = p->phy.ops->intr_handler(&p->phy);
1840
1841			if (phy_cause & cphy_cause_link_change)
1842				t3_link_changed(adapter, i);
1843			if (phy_cause & cphy_cause_fifo_error)
1844				p->phy.fifo_errors++;
1845			if (phy_cause & cphy_cause_module_change)
1846				t3_os_phymod_changed(adapter, i);
1847		}
1848	}
1849
1850	t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1851	return 0;
1852}
1853
1854/*
1855 * T3 slow path (non-data) interrupt handler.
1856 */
1857int t3_slow_intr_handler(struct adapter *adapter)
1858{
1859	u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1860
1861	cause &= adapter->slow_intr_mask;
1862	if (!cause)
1863		return 0;
1864	if (cause & F_PCIM0) {
1865		if (is_pcie(adapter))
1866			pcie_intr_handler(adapter);
1867		else
1868			pci_intr_handler(adapter);
1869	}
1870	if (cause & F_SGE3)
1871		t3_sge_err_intr_handler(adapter);
1872	if (cause & F_MC7_PMRX)
1873		mc7_intr_handler(&adapter->pmrx);
1874	if (cause & F_MC7_PMTX)
1875		mc7_intr_handler(&adapter->pmtx);
1876	if (cause & F_MC7_CM)
1877		mc7_intr_handler(&adapter->cm);
1878	if (cause & F_CIM)
1879		cim_intr_handler(adapter);
1880	if (cause & F_TP1)
1881		tp_intr_handler(adapter);
1882	if (cause & F_ULP2_RX)
1883		ulprx_intr_handler(adapter);
1884	if (cause & F_ULP2_TX)
1885		ulptx_intr_handler(adapter);
1886	if (cause & F_PM1_RX)
1887		pmrx_intr_handler(adapter);
1888	if (cause & F_PM1_TX)
1889		pmtx_intr_handler(adapter);
1890	if (cause & F_CPL_SWITCH)
1891		cplsw_intr_handler(adapter);
1892	if (cause & F_MPS0)
1893		mps_intr_handler(adapter);
1894	if (cause & F_MC5A)
1895		t3_mc5_intr_handler(&adapter->mc5);
1896	if (cause & F_XGMAC0_0)
1897		mac_intr_handler(adapter, 0);
1898	if (cause & F_XGMAC0_1)
1899		mac_intr_handler(adapter, 1);
1900	if (cause & F_T3DBG)
1901		t3_os_ext_intr_handler(adapter);
1902
1903	/* Clear the interrupts just processed. */
1904	t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1905	t3_read_reg(adapter, A_PL_INT_CAUSE0);	/* flush */
1906	return 1;
1907}
1908
1909static unsigned int calc_gpio_intr(struct adapter *adap)
1910{
1911	unsigned int i, gpi_intr = 0;
1912
1913	for_each_port(adap, i)
1914		if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) &&
1915		    adapter_info(adap)->gpio_intr[i])
1916			gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i];
1917	return gpi_intr;
1918}
1919
1920/**
1921 *	t3_intr_enable - enable interrupts
1922 *	@adapter: the adapter whose interrupts should be enabled
1923 *
1924 *	Enable interrupts by setting the interrupt enable registers of the
1925 *	various HW modules and then enabling the top-level interrupt
1926 *	concentrator.
1927 */
1928void t3_intr_enable(struct adapter *adapter)
1929{
1930	static const struct addr_val_pair intr_en_avp[] = {
1931		{A_SG_INT_ENABLE, SGE_INTR_MASK},
1932		{A_MC7_INT_ENABLE, MC7_INTR_MASK},
1933		{A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1934		 MC7_INTR_MASK},
1935		{A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1936		 MC7_INTR_MASK},
1937		{A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
1938		{A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
1939		{A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
1940		{A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
1941		{A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
1942		{A_MPS_INT_ENABLE, MPS_INTR_MASK},
1943	};
1944
1945	adapter->slow_intr_mask = PL_INTR_MASK;
1946
1947	t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
1948	t3_write_reg(adapter, A_TP_INT_ENABLE,
1949		     adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
1950
1951	if (adapter->params.rev > 0) {
1952		t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1953			     CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1954		t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1955			     ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1956			     F_PBL_BOUND_ERR_CH1);
1957	} else {
1958		t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1959		t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1960	}
1961
1962	t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter));
1963
1964	if (is_pcie(adapter))
1965		t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1966	else
1967		t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1968	t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1969	t3_read_reg(adapter, A_PL_INT_ENABLE0);	/* flush */
1970}
1971
1972/**
1973 *	t3_intr_disable - disable a card's interrupts
1974 *	@adapter: the adapter whose interrupts should be disabled
1975 *
1976 *	Disable interrupts.  We only disable the top-level interrupt
1977 *	concentrator and the SGE data interrupts.
1978 */
1979void t3_intr_disable(struct adapter *adapter)
1980{
1981	t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
1982	t3_read_reg(adapter, A_PL_INT_ENABLE0);	/* flush */
1983	adapter->slow_intr_mask = 0;
1984}
1985
1986/**
1987 *	t3_intr_clear - clear all interrupts
1988 *	@adapter: the adapter whose interrupts should be cleared
1989 *
1990 *	Clears all interrupts.
1991 */
1992void t3_intr_clear(struct adapter *adapter)
1993{
1994	static const unsigned int cause_reg_addr[] = {
1995		A_SG_INT_CAUSE,
1996		A_SG_RSPQ_FL_STATUS,
1997		A_PCIX_INT_CAUSE,
1998		A_MC7_INT_CAUSE,
1999		A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
2000		A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
2001		A_CIM_HOST_INT_CAUSE,
2002		A_TP_INT_CAUSE,
2003		A_MC5_DB_INT_CAUSE,
2004		A_ULPRX_INT_CAUSE,
2005		A_ULPTX_INT_CAUSE,
2006		A_CPL_INTR_CAUSE,
2007		A_PM1_TX_INT_CAUSE,
2008		A_PM1_RX_INT_CAUSE,
2009		A_MPS_INT_CAUSE,
2010		A_T3DBG_INT_CAUSE,
2011	};
2012	unsigned int i;
2013
2014	/* Clear PHY and MAC interrupts for each port. */
2015	for_each_port(adapter, i)
2016	    t3_port_intr_clear(adapter, i);
2017
2018	for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
2019		t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
2020
2021	if (is_pcie(adapter))
2022		t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
2023	t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
2024	t3_read_reg(adapter, A_PL_INT_CAUSE0);	/* flush */
2025}
2026
2027void t3_xgm_intr_enable(struct adapter *adapter, int idx)
2028{
2029	struct port_info *pi = adap2pinfo(adapter, idx);
2030
2031	t3_write_reg(adapter, A_XGM_XGM_INT_ENABLE + pi->mac.offset,
2032		     XGM_EXTRA_INTR_MASK);
2033}
2034
2035void t3_xgm_intr_disable(struct adapter *adapter, int idx)
2036{
2037	struct port_info *pi = adap2pinfo(adapter, idx);
2038
2039	t3_write_reg(adapter, A_XGM_XGM_INT_DISABLE + pi->mac.offset,
2040		     0x7ff);
2041}
2042
2043/**
2044 *	t3_port_intr_enable - enable port-specific interrupts
2045 *	@adapter: associated adapter
2046 *	@idx: index of port whose interrupts should be enabled
2047 *
2048 *	Enable port-specific (i.e., MAC and PHY) interrupts for the given
2049 *	adapter port.
2050 */
2051void t3_port_intr_enable(struct adapter *adapter, int idx)
2052{
2053	struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2054
2055	t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
2056	t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
2057	phy->ops->intr_enable(phy);
2058}
2059
2060/**
2061 *	t3_port_intr_disable - disable port-specific interrupts
2062 *	@adapter: associated adapter
2063 *	@idx: index of port whose interrupts should be disabled
2064 *
2065 *	Disable port-specific (i.e., MAC and PHY) interrupts for the given
2066 *	adapter port.
2067 */
2068void t3_port_intr_disable(struct adapter *adapter, int idx)
2069{
2070	struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2071
2072	t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
2073	t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
2074	phy->ops->intr_disable(phy);
2075}
2076
2077/**
2078 *	t3_port_intr_clear - clear port-specific interrupts
2079 *	@adapter: associated adapter
2080 *	@idx: index of port whose interrupts to clear
2081 *
2082 *	Clear port-specific (i.e., MAC and PHY) interrupts for the given
2083 *	adapter port.
2084 */
2085static void t3_port_intr_clear(struct adapter *adapter, int idx)
2086{
2087	struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2088
2089	t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
2090	t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
2091	phy->ops->intr_clear(phy);
2092}
2093
2094#define SG_CONTEXT_CMD_ATTEMPTS 100
2095
2096/**
2097 * 	t3_sge_write_context - write an SGE context
2098 * 	@adapter: the adapter
2099 * 	@id: the context id
2100 * 	@type: the context type
2101 *
2102 * 	Program an SGE context with the values already loaded in the
2103 * 	CONTEXT_DATA? registers.
2104 */
2105static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
2106				unsigned int type)
2107{
2108	if (type == F_RESPONSEQ) {
2109		/*
2110		 * Can't write the Response Queue Context bits for
2111		 * Interrupt Armed or the Reserve bits after the chip
2112		 * has been initialized out of reset.  Writing to these
2113		 * bits can confuse the hardware.
2114		 */
2115		t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2116		t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2117		t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0x17ffffff);
2118		t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2119	} else {
2120		t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2121		t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2122		t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
2123		t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2124	}
2125	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2126		     V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2127	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2128			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2129}
2130
2131/**
2132 *	clear_sge_ctxt - completely clear an SGE context
2133 *	@adap: the adapter
2134 *	@id: the context id
2135 *	@type: the context type
2136 *
2137 *	Completely clear an SGE context.  Used predominantly at post-reset
2138 *	initialization.  Note in particular that we don't skip writing to any
2139 *	"sensitive bits" in the contexts the way that t3_sge_write_context()
2140 *	does ...
2141 */
2142static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
2143			  unsigned int type)
2144{
2145	t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
2146	t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
2147	t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
2148	t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
2149	t3_write_reg(adap, A_SG_CONTEXT_MASK0, 0xffffffff);
2150	t3_write_reg(adap, A_SG_CONTEXT_MASK1, 0xffffffff);
2151	t3_write_reg(adap, A_SG_CONTEXT_MASK2, 0xffffffff);
2152	t3_write_reg(adap, A_SG_CONTEXT_MASK3, 0xffffffff);
2153	t3_write_reg(adap, A_SG_CONTEXT_CMD,
2154		     V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2155	return t3_wait_op_done(adap, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2156			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2157}
2158
2159/**
2160 *	t3_sge_init_ecntxt - initialize an SGE egress context
2161 *	@adapter: the adapter to configure
2162 *	@id: the context id
2163 *	@gts_enable: whether to enable GTS for the context
2164 *	@type: the egress context type
2165 *	@respq: associated response queue
2166 *	@base_addr: base address of queue
2167 *	@size: number of queue entries
2168 *	@token: uP token
2169 *	@gen: initial generation value for the context
2170 *	@cidx: consumer pointer
2171 *
2172 *	Initialize an SGE egress context and make it ready for use.  If the
2173 *	platform allows concurrent context operations, the caller is
2174 *	responsible for appropriate locking.
2175 */
2176int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
2177		       enum sge_context_type type, int respq, u64 base_addr,
2178		       unsigned int size, unsigned int token, int gen,
2179		       unsigned int cidx)
2180{
2181	unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
2182
2183	if (base_addr & 0xfff)	/* must be 4K aligned */
2184		return -EINVAL;
2185	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2186		return -EBUSY;
2187
2188	base_addr >>= 12;
2189	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
2190		     V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
2191	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
2192		     V_EC_BASE_LO(base_addr & 0xffff));
2193	base_addr >>= 16;
2194	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
2195	base_addr >>= 32;
2196	t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2197		     V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
2198		     V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
2199		     F_EC_VALID);
2200	return t3_sge_write_context(adapter, id, F_EGRESS);
2201}
2202
2203/**
2204 *	t3_sge_init_flcntxt - initialize an SGE free-buffer list context
2205 *	@adapter: the adapter to configure
2206 *	@id: the context id
2207 *	@gts_enable: whether to enable GTS for the context
2208 *	@base_addr: base address of queue
2209 *	@size: number of queue entries
2210 *	@bsize: size of each buffer for this queue
2211 *	@cong_thres: threshold to signal congestion to upstream producers
2212 *	@gen: initial generation value for the context
2213 *	@cidx: consumer pointer
2214 *
2215 *	Initialize an SGE free list context and make it ready for use.  The
2216 *	caller is responsible for ensuring only one context operation occurs
2217 *	at a time.
2218 */
2219int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
2220			int gts_enable, u64 base_addr, unsigned int size,
2221			unsigned int bsize, unsigned int cong_thres, int gen,
2222			unsigned int cidx)
2223{
2224	if (base_addr & 0xfff)	/* must be 4K aligned */
2225		return -EINVAL;
2226	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2227		return -EBUSY;
2228
2229	base_addr >>= 12;
2230	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
2231	base_addr >>= 32;
2232	t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
2233		     V_FL_BASE_HI((u32) base_addr) |
2234		     V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
2235	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
2236		     V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
2237		     V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
2238	t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2239		     V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
2240		     V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
2241	return t3_sge_write_context(adapter, id, F_FREELIST);
2242}
2243
2244/**
2245 *	t3_sge_init_rspcntxt - initialize an SGE response queue context
2246 *	@adapter: the adapter to configure
2247 *	@id: the context id
2248 *	@irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
2249 *	@base_addr: base address of queue
2250 *	@size: number of queue entries
2251 *	@fl_thres: threshold for selecting the normal or jumbo free list
2252 *	@gen: initial generation value for the context
2253 *	@cidx: consumer pointer
2254 *
2255 *	Initialize an SGE response queue context and make it ready for use.
2256 *	The caller is responsible for ensuring only one context operation
2257 *	occurs at a time.
2258 */
2259int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
2260			 int irq_vec_idx, u64 base_addr, unsigned int size,
2261			 unsigned int fl_thres, int gen, unsigned int cidx)
2262{
2263	unsigned int intr = 0;
2264
2265	if (base_addr & 0xfff)	/* must be 4K aligned */
2266		return -EINVAL;
2267	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2268		return -EBUSY;
2269
2270	base_addr >>= 12;
2271	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2272		     V_CQ_INDEX(cidx));
2273	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2274	base_addr >>= 32;
2275	if (irq_vec_idx >= 0)
2276		intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
2277	t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2278		     V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
2279	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2280	return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2281}
2282
2283/**
2284 *	t3_sge_init_cqcntxt - initialize an SGE completion queue context
2285 *	@adapter: the adapter to configure
2286 *	@id: the context id
2287 *	@base_addr: base address of queue
2288 *	@size: number of queue entries
2289 *	@rspq: response queue for async notifications
2290 *	@ovfl_mode: CQ overflow mode
2291 *	@credits: completion queue credits
2292 *	@credit_thres: the credit threshold
2293 *
2294 *	Initialize an SGE completion queue context and make it ready for use.
2295 *	The caller is responsible for ensuring only one context operation
2296 *	occurs at a time.
2297 */
2298int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
2299			unsigned int size, int rspq, int ovfl_mode,
2300			unsigned int credits, unsigned int credit_thres)
2301{
2302	if (base_addr & 0xfff)	/* must be 4K aligned */
2303		return -EINVAL;
2304	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2305		return -EBUSY;
2306
2307	base_addr >>= 12;
2308	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2309	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2310	base_addr >>= 32;
2311	t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2312		     V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
2313		     V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2314		     V_CQ_ERR(ovfl_mode));
2315	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2316		     V_CQ_CREDIT_THRES(credit_thres));
2317	return t3_sge_write_context(adapter, id, F_CQ);
2318}
2319
2320/**
2321 *	t3_sge_enable_ecntxt - enable/disable an SGE egress context
2322 *	@adapter: the adapter
2323 *	@id: the egress context id
2324 *	@enable: enable (1) or disable (0) the context
2325 *
2326 *	Enable or disable an SGE egress context.  The caller is responsible for
2327 *	ensuring only one context operation occurs at a time.
2328 */
2329int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
2330{
2331	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2332		return -EBUSY;
2333
2334	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2335	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2336	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2337	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2338	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2339	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2340		     V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2341	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2342			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2343}
2344
2345/**
2346 *	t3_sge_disable_fl - disable an SGE free-buffer list
2347 *	@adapter: the adapter
2348 *	@id: the free list context id
2349 *
2350 *	Disable an SGE free-buffer list.  The caller is responsible for
2351 *	ensuring only one context operation occurs at a time.
2352 */
2353int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
2354{
2355	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2356		return -EBUSY;
2357
2358	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2359	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2360	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2361	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2362	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2363	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2364		     V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2365	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2366			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2367}
2368
2369/**
2370 *	t3_sge_disable_rspcntxt - disable an SGE response queue
2371 *	@adapter: the adapter
2372 *	@id: the response queue context id
2373 *
2374 *	Disable an SGE response queue.  The caller is responsible for
2375 *	ensuring only one context operation occurs at a time.
2376 */
2377int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
2378{
2379	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2380		return -EBUSY;
2381
2382	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2383	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2384	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2385	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2386	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2387	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2388		     V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2389	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2390			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2391}
2392
2393/**
2394 *	t3_sge_disable_cqcntxt - disable an SGE completion queue
2395 *	@adapter: the adapter
2396 *	@id: the completion queue context id
2397 *
2398 *	Disable an SGE completion queue.  The caller is responsible for
2399 *	ensuring only one context operation occurs at a time.
2400 */
2401int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2402{
2403	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2404		return -EBUSY;
2405
2406	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2407	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2408	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2409	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2410	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2411	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2412		     V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2413	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2414			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2415}
2416
2417/**
2418 *	t3_sge_cqcntxt_op - perform an operation on a completion queue context
2419 *	@adapter: the adapter
2420 *	@id: the context id
2421 *	@op: the operation to perform
2422 *	@credits: credit value to write
2423 *
2424 *	Perform the selected operation on an SGE completion queue context.
2425 *	The caller is responsible for ensuring only one context operation
2426 *	occurs at a time.
2427 */
2428int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2429		      unsigned int credits)
2430{
2431	u32 val;
2432
2433	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2434		return -EBUSY;
2435
2436	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2437	t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2438		     V_CONTEXT(id) | F_CQ);
2439	if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2440				0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
2441		return -EIO;
2442
2443	if (op >= 2 && op < 7) {
2444		if (adapter->params.rev > 0)
2445			return G_CQ_INDEX(val);
2446
2447		t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2448			     V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2449		if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2450				    F_CONTEXT_CMD_BUSY, 0,
2451				    SG_CONTEXT_CMD_ATTEMPTS, 1))
2452			return -EIO;
2453		return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2454	}
2455	return 0;
2456}
2457
2458/**
2459 *	t3_config_rss - configure Rx packet steering
2460 *	@adapter: the adapter
2461 *	@rss_config: RSS settings (written to TP_RSS_CONFIG)
2462 *	@cpus: values for the CPU lookup table (0xff terminated)
2463 *	@rspq: values for the response queue lookup table (0xffff terminated)
2464 *
2465 *	Programs the receive packet steering logic.  @cpus and @rspq provide
2466 *	the values for the CPU and response queue lookup tables.  If they
2467 *	provide fewer values than the size of the tables the supplied values
2468 *	are used repeatedly until the tables are fully populated.
2469 */
2470void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2471		   const u8 * cpus, const u16 *rspq)
2472{
2473	int i, j, cpu_idx = 0, q_idx = 0;
2474
2475	if (cpus)
2476		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2477			u32 val = i << 16;
2478
2479			for (j = 0; j < 2; ++j) {
2480				val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2481				if (cpus[cpu_idx] == 0xff)
2482					cpu_idx = 0;
2483			}
2484			t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2485		}
2486
2487	if (rspq)
2488		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2489			t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2490				     (i << 16) | rspq[q_idx++]);
2491			if (rspq[q_idx] == 0xffff)
2492				q_idx = 0;
2493		}
2494
2495	t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2496}
2497
2498/**
2499 *	t3_tp_set_offload_mode - put TP in NIC/offload mode
2500 *	@adap: the adapter
2501 *	@enable: 1 to select offload mode, 0 for regular NIC
2502 *
2503 *	Switches TP to NIC/offload mode.
2504 */
2505void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2506{
2507	if (is_offload(adap) || !enable)
2508		t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2509				 V_NICMODE(!enable));
2510}
2511
2512/**
2513 *	pm_num_pages - calculate the number of pages of the payload memory
2514 *	@mem_size: the size of the payload memory
2515 *	@pg_size: the size of each payload memory page
2516 *
2517 *	Calculate the number of pages, each of the given size, that fit in a
2518 *	memory of the specified size, respecting the HW requirement that the
2519 *	number of pages must be a multiple of 24.
2520 */
2521static inline unsigned int pm_num_pages(unsigned int mem_size,
2522					unsigned int pg_size)
2523{
2524	unsigned int n = mem_size / pg_size;
2525
2526	return n - n % 24;
2527}
2528
2529#define mem_region(adap, start, size, reg) \
2530	t3_write_reg((adap), A_ ## reg, (start)); \
2531	start += size
2532
2533/**
2534 *	partition_mem - partition memory and configure TP memory settings
2535 *	@adap: the adapter
2536 *	@p: the TP parameters
2537 *
2538 *	Partitions context and payload memory and configures TP's memory
2539 *	registers.
2540 */
2541static void partition_mem(struct adapter *adap, const struct tp_params *p)
2542{
2543	unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2544	unsigned int timers = 0, timers_shift = 22;
2545
2546	if (adap->params.rev > 0) {
2547		if (tids <= 16 * 1024) {
2548			timers = 1;
2549			timers_shift = 16;
2550		} else if (tids <= 64 * 1024) {
2551			timers = 2;
2552			timers_shift = 18;
2553		} else if (tids <= 256 * 1024) {
2554			timers = 3;
2555			timers_shift = 20;
2556		}
2557	}
2558
2559	t3_write_reg(adap, A_TP_PMM_SIZE,
2560		     p->chan_rx_size | (p->chan_tx_size >> 16));
2561
2562	t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2563	t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2564	t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2565	t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2566			 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2567
2568	t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2569	t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2570	t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2571
2572	pstructs = p->rx_num_pgs + p->tx_num_pgs;
2573	/* Add a bit of headroom and make multiple of 24 */
2574	pstructs += 48;
2575	pstructs -= pstructs % 24;
2576	t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2577
2578	m = tids * TCB_SIZE;
2579	mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2580	mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2581	t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2582	m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2583	mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2584	mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2585	mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2586	mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2587
2588	m = (m + 4095) & ~0xfff;
2589	t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2590	t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2591
2592	tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2593	m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2594	    adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2595	if (tids < m)
2596		adap->params.mc5.nservers += m - tids;
2597}
2598
2599static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2600				  u32 val)
2601{
2602	t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2603	t3_write_reg(adap, A_TP_PIO_DATA, val);
2604}
2605
2606static void tp_config(struct adapter *adap, const struct tp_params *p)
2607{
2608	t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2609		     F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2610		     F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2611	t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2612		     F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2613		     V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
2614	t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2615		     V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2616		     V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) |
2617		     F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2618	t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
2619			 F_IPV6ENABLE | F_NICMODE);
2620	t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2621	t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2622	t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2623			 adap->params.rev > 0 ? F_ENABLEESND :
2624			 F_T3A_ENABLEESND);
2625
2626	t3_set_reg_field(adap, A_TP_PC_CONFIG,
2627			 F_ENABLEEPCMDAFULL,
2628			 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2629			 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
2630	t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
2631			 F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
2632			 F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
2633	t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2634	t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2635
2636	if (adap->params.rev > 0) {
2637		tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2638		t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2639				 F_TXPACEAUTO);
2640		t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2641		t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2642	} else
2643		t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2644
2645	if (adap->params.rev == T3_REV_C)
2646		t3_set_reg_field(adap, A_TP_PC_CONFIG,
2647				 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
2648				 V_TABLELATENCYDELTA(4));
2649
2650	t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2651	t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2652	t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2653	t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
2654}
2655
2656/* Desired TP timer resolution in usec */
2657#define TP_TMR_RES 50
2658
2659/* TCP timer values in ms */
2660#define TP_DACK_TIMER 50
2661#define TP_RTO_MIN    250
2662
2663/**
2664 *	tp_set_timers - set TP timing parameters
2665 *	@adap: the adapter to set
2666 *	@core_clk: the core clock frequency in Hz
2667 *
2668 *	Set TP's timing parameters, such as the various timer resolutions and
2669 *	the TCP timer values.
2670 */
2671static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2672{
2673	unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2674	unsigned int dack_re = fls(core_clk / 5000) - 1;	/* 200us */
2675	unsigned int tstamp_re = fls(core_clk / 1000);	/* 1ms, at least */
2676	unsigned int tps = core_clk >> tre;
2677
2678	t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2679		     V_DELAYEDACKRESOLUTION(dack_re) |
2680		     V_TIMESTAMPRESOLUTION(tstamp_re));
2681	t3_write_reg(adap, A_TP_DACK_TIMER,
2682		     (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2683	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2684	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2685	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2686	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2687	t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2688		     V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2689		     V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2690		     V_KEEPALIVEMAX(9));
2691
2692#define SECONDS * tps
2693
2694	t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2695	t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2696	t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2697	t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2698	t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2699	t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2700	t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2701	t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2702	t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2703
2704#undef SECONDS
2705}
2706
2707/**
2708 *	t3_tp_set_coalescing_size - set receive coalescing size
2709 *	@adap: the adapter
2710 *	@size: the receive coalescing size
2711 *	@psh: whether a set PSH bit should deliver coalesced data
2712 *
2713 *	Set the receive coalescing size and PSH bit handling.
2714 */
2715static int t3_tp_set_coalescing_size(struct adapter *adap,
2716				     unsigned int size, int psh)
2717{
2718	u32 val;
2719
2720	if (size > MAX_RX_COALESCING_LEN)
2721		return -EINVAL;
2722
2723	val = t3_read_reg(adap, A_TP_PARA_REG3);
2724	val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2725
2726	if (size) {
2727		val |= F_RXCOALESCEENABLE;
2728		if (psh)
2729			val |= F_RXCOALESCEPSHEN;
2730		size = min(MAX_RX_COALESCING_LEN, size);
2731		t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2732			     V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2733	}
2734	t3_write_reg(adap, A_TP_PARA_REG3, val);
2735	return 0;
2736}
2737
2738/**
2739 *	t3_tp_set_max_rxsize - set the max receive size
2740 *	@adap: the adapter
2741 *	@size: the max receive size
2742 *
2743 *	Set TP's max receive size.  This is the limit that applies when
2744 *	receive coalescing is disabled.
2745 */
2746static void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2747{
2748	t3_write_reg(adap, A_TP_PARA_REG7,
2749		     V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2750}
2751
2752static void init_mtus(unsigned short mtus[])
2753{
2754	/*
2755	 * See draft-mathis-plpmtud-00.txt for the values.  The min is 88 so
2756	 * it can accommodate max size TCP/IP headers when SACK and timestamps
2757	 * are enabled and still have at least 8 bytes of payload.
2758	 */
2759	mtus[0] = 88;
2760	mtus[1] = 88;
2761	mtus[2] = 256;
2762	mtus[3] = 512;
2763	mtus[4] = 576;
2764	mtus[5] = 1024;
2765	mtus[6] = 1280;
2766	mtus[7] = 1492;
2767	mtus[8] = 1500;
2768	mtus[9] = 2002;
2769	mtus[10] = 2048;
2770	mtus[11] = 4096;
2771	mtus[12] = 4352;
2772	mtus[13] = 8192;
2773	mtus[14] = 9000;
2774	mtus[15] = 9600;
2775}
2776
2777/*
2778 * Initial congestion control parameters.
2779 */
2780static void init_cong_ctrl(unsigned short *a, unsigned short *b)
2781{
2782	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2783	a[9] = 2;
2784	a[10] = 3;
2785	a[11] = 4;
2786	a[12] = 5;
2787	a[13] = 6;
2788	a[14] = 7;
2789	a[15] = 8;
2790	a[16] = 9;
2791	a[17] = 10;
2792	a[18] = 14;
2793	a[19] = 17;
2794	a[20] = 21;
2795	a[21] = 25;
2796	a[22] = 30;
2797	a[23] = 35;
2798	a[24] = 45;
2799	a[25] = 60;
2800	a[26] = 80;
2801	a[27] = 100;
2802	a[28] = 200;
2803	a[29] = 300;
2804	a[30] = 400;
2805	a[31] = 500;
2806
2807	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2808	b[9] = b[10] = 1;
2809	b[11] = b[12] = 2;
2810	b[13] = b[14] = b[15] = b[16] = 3;
2811	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2812	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2813	b[28] = b[29] = 6;
2814	b[30] = b[31] = 7;
2815}
2816
2817/* The minimum additive increment value for the congestion control table */
2818#define CC_MIN_INCR 2U
2819
2820/**
2821 *	t3_load_mtus - write the MTU and congestion control HW tables
2822 *	@adap: the adapter
2823 *	@mtus: the unrestricted values for the MTU table
2824 *	@alpha: the values for the congestion control alpha parameter
2825 *	@beta: the values for the congestion control beta parameter
2826 *	@mtu_cap: the maximum permitted effective MTU
2827 *
2828 *	Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2829 *	Update the high-speed congestion control table with the supplied alpha,
2830 * 	beta, and MTUs.
2831 */
2832void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2833		  unsigned short alpha[NCCTRL_WIN],
2834		  unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2835{
2836	static const unsigned int avg_pkts[NCCTRL_WIN] = {
2837		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2838		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2839		28672, 40960, 57344, 81920, 114688, 163840, 229376
2840	};
2841
2842	unsigned int i, w;
2843
2844	for (i = 0; i < NMTUS; ++i) {
2845		unsigned int mtu = min(mtus[i], mtu_cap);
2846		unsigned int log2 = fls(mtu);
2847
2848		if (!(mtu & ((1 << log2) >> 2)))	/* round */
2849			log2--;
2850		t3_write_reg(adap, A_TP_MTU_TABLE,
2851			     (i << 24) | (log2 << 16) | mtu);
2852
2853		for (w = 0; w < NCCTRL_WIN; ++w) {
2854			unsigned int inc;
2855
2856			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2857				  CC_MIN_INCR);
2858
2859			t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2860				     (w << 16) | (beta[w] << 13) | inc);
2861		}
2862	}
2863}
2864
2865/**
2866 *	t3_tp_get_mib_stats - read TP's MIB counters
2867 *	@adap: the adapter
2868 *	@tps: holds the returned counter values
2869 *
2870 *	Returns the values of TP's MIB counters.
2871 */
2872void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
2873{
2874	t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
2875			 sizeof(*tps) / sizeof(u32), 0);
2876}
2877
2878#define ulp_region(adap, name, start, len) \
2879	t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
2880	t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
2881		     (start) + (len) - 1); \
2882	start += len
2883
2884#define ulptx_region(adap, name, start, len) \
2885	t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
2886	t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
2887		     (start) + (len) - 1)
2888
2889static void ulp_config(struct adapter *adap, const struct tp_params *p)
2890{
2891	unsigned int m = p->chan_rx_size;
2892
2893	ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
2894	ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
2895	ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
2896	ulp_region(adap, STAG, m, p->chan_rx_size / 4);
2897	ulp_region(adap, RQ, m, p->chan_rx_size / 4);
2898	ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
2899	ulp_region(adap, PBL, m, p->chan_rx_size / 4);
2900	t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
2901}
2902
2903/**
2904 *	t3_set_proto_sram - set the contents of the protocol sram
2905 *	@adap: the adapter
2906 *	@data: the protocol image
2907 *
2908 *	Write the contents of the protocol SRAM.
2909 */
2910int t3_set_proto_sram(struct adapter *adap, const u8 *data)
2911{
2912	int i;
2913	const __be32 *buf = (const __be32 *)data;
2914
2915	for (i = 0; i < PROTO_SRAM_LINES; i++) {
2916		t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, be32_to_cpu(*buf++));
2917		t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, be32_to_cpu(*buf++));
2918		t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, be32_to_cpu(*buf++));
2919		t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, be32_to_cpu(*buf++));
2920		t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, be32_to_cpu(*buf++));
2921
2922		t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
2923		if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
2924			return -EIO;
2925	}
2926	t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
2927
2928	return 0;
2929}
2930
2931void t3_config_trace_filter(struct adapter *adapter,
2932			    const struct trace_params *tp, int filter_index,
2933			    int invert, int enable)
2934{
2935	u32 addr, key[4], mask[4];
2936
2937	key[0] = tp->sport | (tp->sip << 16);
2938	key[1] = (tp->sip >> 16) | (tp->dport << 16);
2939	key[2] = tp->dip;
2940	key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
2941
2942	mask[0] = tp->sport_mask | (tp->sip_mask << 16);
2943	mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
2944	mask[2] = tp->dip_mask;
2945	mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
2946
2947	if (invert)
2948		key[3] |= (1 << 29);
2949	if (enable)
2950		key[3] |= (1 << 28);
2951
2952	addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
2953	tp_wr_indirect(adapter, addr++, key[0]);
2954	tp_wr_indirect(adapter, addr++, mask[0]);
2955	tp_wr_indirect(adapter, addr++, key[1]);
2956	tp_wr_indirect(adapter, addr++, mask[1]);
2957	tp_wr_indirect(adapter, addr++, key[2]);
2958	tp_wr_indirect(adapter, addr++, mask[2]);
2959	tp_wr_indirect(adapter, addr++, key[3]);
2960	tp_wr_indirect(adapter, addr, mask[3]);
2961	t3_read_reg(adapter, A_TP_PIO_DATA);
2962}
2963
2964/**
2965 *	t3_config_sched - configure a HW traffic scheduler
2966 *	@adap: the adapter
2967 *	@kbps: target rate in Kbps
2968 *	@sched: the scheduler index
2969 *
2970 *	Configure a HW scheduler for the target rate
2971 */
2972int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
2973{
2974	unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
2975	unsigned int clk = adap->params.vpd.cclk * 1000;
2976	unsigned int selected_cpt = 0, selected_bpt = 0;
2977
2978	if (kbps > 0) {
2979		kbps *= 125;	/* -> bytes */
2980		for (cpt = 1; cpt <= 255; cpt++) {
2981			tps = clk / cpt;
2982			bpt = (kbps + tps / 2) / tps;
2983			if (bpt > 0 && bpt <= 255) {
2984				v = bpt * tps;
2985				delta = v >= kbps ? v - kbps : kbps - v;
2986				if (delta <= mindelta) {
2987					mindelta = delta;
2988					selected_cpt = cpt;
2989					selected_bpt = bpt;
2990				}
2991			} else if (selected_cpt)
2992				break;
2993		}
2994		if (!selected_cpt)
2995			return -EINVAL;
2996	}
2997	t3_write_reg(adap, A_TP_TM_PIO_ADDR,
2998		     A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
2999	v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3000	if (sched & 1)
3001		v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3002	else
3003		v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3004	t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3005	return 0;
3006}
3007
3008static int tp_init(struct adapter *adap, const struct tp_params *p)
3009{
3010	int busy = 0;
3011
3012	tp_config(adap, p);
3013	t3_set_vlan_accel(adap, 3, 0);
3014
3015	if (is_offload(adap)) {
3016		tp_set_timers(adap, adap->params.vpd.cclk * 1000);
3017		t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
3018		busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
3019				       0, 1000, 5);
3020		if (busy)
3021			CH_ERR(adap, "TP initialization timed out\n");
3022	}
3023
3024	if (!busy)
3025		t3_write_reg(adap, A_TP_RESET, F_TPRESET);
3026	return busy;
3027}
3028
3029/*
3030 * Perform the bits of HW initialization that are dependent on the Tx
3031 * channels being used.
3032 */
3033static void chan_init_hw(struct adapter *adap, unsigned int chan_map)
3034{
3035	int i;
3036
3037	if (chan_map != 3) {                                 /* one channel */
3038		t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3039		t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3040		t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT |
3041			     (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE :
3042					      F_TPTXPORT1EN | F_PORT1ACTIVE));
3043		t3_write_reg(adap, A_PM1_TX_CFG,
3044			     chan_map == 1 ? 0xffffffff : 0);
3045	} else {                                             /* two channels */
3046		t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3047		t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3048		t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3049			     V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3050		t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3051			     F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3052			     F_ENFORCEPKT);
3053		t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3054		t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3055		t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3056			     V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3057		for (i = 0; i < 16; i++)
3058			t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3059				     (i << 16) | 0x1010);
3060	}
3061}
3062
3063static int calibrate_xgm(struct adapter *adapter)
3064{
3065	if (uses_xaui(adapter)) {
3066		unsigned int v, i;
3067
3068		for (i = 0; i < 5; ++i) {
3069			t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3070			t3_read_reg(adapter, A_XGM_XAUI_IMP);
3071			msleep(1);
3072			v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3073			if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3074				t3_write_reg(adapter, A_XGM_XAUI_IMP,
3075					     V_XAUIIMP(G_CALIMP(v) >> 2));
3076				return 0;
3077			}
3078		}
3079		CH_ERR(adapter, "MAC calibration failed\n");
3080		return -1;
3081	} else {
3082		t3_write_reg(adapter, A_XGM_RGMII_IMP,
3083			     V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3084		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3085				 F_XGM_IMPSETUPDATE);
3086	}
3087	return 0;
3088}
3089
3090static void calibrate_xgm_t3b(struct adapter *adapter)
3091{
3092	if (!uses_xaui(adapter)) {
3093		t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3094			     F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3095		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3096		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3097				 F_XGM_IMPSETUPDATE);
3098		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3099				 0);
3100		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3101		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3102	}
3103}
3104
3105struct mc7_timing_params {
3106	unsigned char ActToPreDly;
3107	unsigned char ActToRdWrDly;
3108	unsigned char PreCyc;
3109	unsigned char RefCyc[5];
3110	unsigned char BkCyc;
3111	unsigned char WrToRdDly;
3112	unsigned char RdToWrDly;
3113};
3114
3115/*
3116 * Write a value to a register and check that the write completed.  These
3117 * writes normally complete in a cycle or two, so one read should suffice.
3118 * The very first read exists to flush the posted write to the device.
3119 */
3120static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
3121{
3122	t3_write_reg(adapter, addr, val);
3123	t3_read_reg(adapter, addr);	/* flush */
3124	if (!(t3_read_reg(adapter, addr) & F_BUSY))
3125		return 0;
3126	CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3127	return -EIO;
3128}
3129
3130static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3131{
3132	static const unsigned int mc7_mode[] = {
3133		0x632, 0x642, 0x652, 0x432, 0x442
3134	};
3135	static const struct mc7_timing_params mc7_timings[] = {
3136		{12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
3137		{12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
3138		{12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
3139		{9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
3140		{9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
3141	};
3142
3143	u32 val;
3144	unsigned int width, density, slow, attempts;
3145	struct adapter *adapter = mc7->adapter;
3146	const struct mc7_timing_params *p = &mc7_timings[mem_type];
3147
3148	if (!mc7->size)
3149		return 0;
3150
3151	val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3152	slow = val & F_SLOW;
3153	width = G_WIDTH(val);
3154	density = G_DEN(val);
3155
3156	t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3157	val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);	/* flush */
3158	msleep(1);
3159
3160	if (!slow) {
3161		t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3162		t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3163		msleep(1);
3164		if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3165		    (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3166			CH_ERR(adapter, "%s MC7 calibration timed out\n",
3167			       mc7->name);
3168			goto out_fail;
3169		}
3170	}
3171
3172	t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3173		     V_ACTTOPREDLY(p->ActToPreDly) |
3174		     V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3175		     V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3176		     V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3177
3178	t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3179		     val | F_CLKEN | F_TERM150);
3180	t3_read_reg(adapter, mc7->offset + A_MC7_CFG);	/* flush */
3181
3182	if (!slow)
3183		t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3184				 F_DLLENB);
3185	udelay(1);
3186
3187	val = slow ? 3 : 6;
3188	if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3189	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3190	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3191	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3192		goto out_fail;
3193
3194	if (!slow) {
3195		t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3196		t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
3197		udelay(5);
3198	}
3199
3200	if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3201	    wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3202	    wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3203	    wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3204		       mc7_mode[mem_type]) ||
3205	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3206	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3207		goto out_fail;
3208
3209	/* clock value is in KHz */
3210	mc7_clock = mc7_clock * 7812 + mc7_clock / 2;	/* ns */
3211	mc7_clock /= 1000000;	/* KHz->MHz, ns->us */
3212
3213	t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3214		     F_PERREFEN | V_PREREFDIV(mc7_clock));
3215	t3_read_reg(adapter, mc7->offset + A_MC7_REF);	/* flush */
3216
3217	t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
3218	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3219	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3220	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3221		     (mc7->size << width) - 1);
3222	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3223	t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);	/* flush */
3224
3225	attempts = 50;
3226	do {
3227		msleep(250);
3228		val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3229	} while ((val & F_BUSY) && --attempts);
3230	if (val & F_BUSY) {
3231		CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3232		goto out_fail;
3233	}
3234
3235	/* Enable normal memory accesses. */
3236	t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3237	return 0;
3238
3239out_fail:
3240	return -1;
3241}
3242
3243static void config_pcie(struct adapter *adap)
3244{
3245	static const u16 ack_lat[4][6] = {
3246		{237, 416, 559, 1071, 2095, 4143},
3247		{128, 217, 289, 545, 1057, 2081},
3248		{73, 118, 154, 282, 538, 1050},
3249		{67, 107, 86, 150, 278, 534}
3250	};
3251	static const u16 rpl_tmr[4][6] = {
3252		{711, 1248, 1677, 3213, 6285, 12429},
3253		{384, 651, 867, 1635, 3171, 6243},
3254		{219, 354, 462, 846, 1614, 3150},
3255		{201, 321, 258, 450, 834, 1602}
3256	};
3257
3258	u16 val, devid;
3259	unsigned int log2_width, pldsize;
3260	unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3261
3262	pcie_capability_read_word(adap->pdev, PCI_EXP_DEVCTL, &val);
3263	pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3264
3265	pci_read_config_word(adap->pdev, 0x2, &devid);
3266	if (devid == 0x37) {
3267		pcie_capability_write_word(adap->pdev, PCI_EXP_DEVCTL,
3268					   val & ~PCI_EXP_DEVCTL_READRQ &
3269					   ~PCI_EXP_DEVCTL_PAYLOAD);
3270		pldsize = 0;
3271	}
3272
3273	pcie_capability_read_word(adap->pdev, PCI_EXP_LNKCTL, &val);
3274
3275	fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3276	fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3277	    G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3278	log2_width = fls(adap->params.pci.width) - 1;
3279	acklat = ack_lat[log2_width][pldsize];
3280	if (val & PCI_EXP_LNKCTL_ASPM_L0S)	/* check LOsEnable */
3281		acklat += fst_trn_tx * 4;
3282	rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3283
3284	if (adap->params.rev == 0)
3285		t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3286				 V_T3A_ACKLAT(M_T3A_ACKLAT),
3287				 V_T3A_ACKLAT(acklat));
3288	else
3289		t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3290				 V_ACKLAT(acklat));
3291
3292	t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3293			 V_REPLAYLMT(rpllmt));
3294
3295	t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3296	t3_set_reg_field(adap, A_PCIE_CFG, 0,
3297			 F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST |
3298			 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
3299}
3300
3301/*
3302 * Initialize and configure T3 HW modules.  This performs the
3303 * initialization steps that need to be done once after a card is reset.
3304 * MAC and PHY initialization is handled separarely whenever a port is enabled.
3305 *
3306 * fw_params are passed to FW and their value is platform dependent.  Only the
3307 * top 8 bits are available for use, the rest must be 0.
3308 */
3309int t3_init_hw(struct adapter *adapter, u32 fw_params)
3310{
3311	int err = -EIO, attempts, i;
3312	const struct vpd_params *vpd = &adapter->params.vpd;
3313
3314	if (adapter->params.rev > 0)
3315		calibrate_xgm_t3b(adapter);
3316	else if (calibrate_xgm(adapter))
3317		goto out_err;
3318
3319	if (vpd->mclk) {
3320		partition_mem(adapter, &adapter->params.tp);
3321
3322		if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3323		    mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3324		    mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3325		    t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3326				adapter->params.mc5.nfilters,
3327				adapter->params.mc5.nroutes))
3328			goto out_err;
3329
3330		for (i = 0; i < 32; i++)
3331			if (clear_sge_ctxt(adapter, i, F_CQ))
3332				goto out_err;
3333	}
3334
3335	if (tp_init(adapter, &adapter->params.tp))
3336		goto out_err;
3337
3338	t3_tp_set_coalescing_size(adapter,
3339				  min(adapter->params.sge.max_pkt_size,
3340				      MAX_RX_COALESCING_LEN), 1);
3341	t3_tp_set_max_rxsize(adapter,
3342			     min(adapter->params.sge.max_pkt_size, 16384U));
3343	ulp_config(adapter, &adapter->params.tp);
3344
3345	if (is_pcie(adapter))
3346		config_pcie(adapter);
3347	else
3348		t3_set_reg_field(adapter, A_PCIX_CFG, 0,
3349				 F_DMASTOPEN | F_CLIDECEN);
3350
3351	if (adapter->params.rev == T3_REV_C)
3352		t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
3353				 F_CFG_CQE_SOP_MASK);
3354
3355	t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3356	t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3357	t3_write_reg(adapter, A_PM1_TX_MODE, 0);
3358	chan_init_hw(adapter, adapter->params.chan_map);
3359	t3_sge_init(adapter, &adapter->params.sge);
3360	t3_set_reg_field(adapter, A_PL_RST, 0, F_FATALPERREN);
3361
3362	t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
3363
3364	t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3365	t3_write_reg(adapter, A_CIM_BOOT_CFG,
3366		     V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3367	t3_read_reg(adapter, A_CIM_BOOT_CFG);	/* flush */
3368
3369	attempts = 100;
3370	do {			/* wait for uP to initialize */
3371		msleep(20);
3372	} while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3373	if (!attempts) {
3374		CH_ERR(adapter, "uP initialization timed out\n");
3375		goto out_err;
3376	}
3377
3378	err = 0;
3379out_err:
3380	return err;
3381}
3382
3383/**
3384 *	get_pci_mode - determine a card's PCI mode
3385 *	@adapter: the adapter
3386 *	@p: where to store the PCI settings
3387 *
3388 *	Determines a card's PCI mode and associated parameters, such as speed
3389 *	and width.
3390 */
3391static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
3392{
3393	static unsigned short speed_map[] = { 33, 66, 100, 133 };
3394	u32 pci_mode;
3395
3396	if (pci_is_pcie(adapter->pdev)) {
3397		u16 val;
3398
3399		p->variant = PCI_VARIANT_PCIE;
3400		pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
3401		p->width = (val >> 4) & 0x3f;
3402		return;
3403	}
3404
3405	pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3406	p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3407	p->width = (pci_mode & F_64BIT) ? 64 : 32;
3408	pci_mode = G_PCIXINITPAT(pci_mode);
3409	if (pci_mode == 0)
3410		p->variant = PCI_VARIANT_PCI;
3411	else if (pci_mode < 4)
3412		p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3413	else if (pci_mode < 8)
3414		p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3415	else
3416		p->variant = PCI_VARIANT_PCIX_266_MODE2;
3417}
3418
3419/**
3420 *	init_link_config - initialize a link's SW state
3421 *	@lc: structure holding the link state
3422 *	@caps: information about the current card
3423 *
3424 *	Initializes the SW state maintained for each link, including the link's
3425 *	capabilities and default speed/duplex/flow-control/autonegotiation
3426 *	settings.
3427 */
3428static void init_link_config(struct link_config *lc, unsigned int caps)
3429{
3430	lc->supported = caps;
3431	lc->requested_speed = lc->speed = SPEED_INVALID;
3432	lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3433	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3434	if (lc->supported & SUPPORTED_Autoneg) {
3435		lc->advertising = lc->supported;
3436		lc->autoneg = AUTONEG_ENABLE;
3437		lc->requested_fc |= PAUSE_AUTONEG;
3438	} else {
3439		lc->advertising = 0;
3440		lc->autoneg = AUTONEG_DISABLE;
3441	}
3442}
3443
3444/**
3445 *	mc7_calc_size - calculate MC7 memory size
3446 *	@cfg: the MC7 configuration
3447 *
3448 *	Calculates the size of an MC7 memory in bytes from the value of its
3449 *	configuration register.
3450 */
3451static unsigned int mc7_calc_size(u32 cfg)
3452{
3453	unsigned int width = G_WIDTH(cfg);
3454	unsigned int banks = !!(cfg & F_BKS) + 1;
3455	unsigned int org = !!(cfg & F_ORG) + 1;
3456	unsigned int density = G_DEN(cfg);
3457	unsigned int MBs = ((256 << density) * banks) / (org << width);
3458
3459	return MBs << 20;
3460}
3461
3462static void mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3463		     unsigned int base_addr, const char *name)
3464{
3465	u32 cfg;
3466
3467	mc7->adapter = adapter;
3468	mc7->name = name;
3469	mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3470	cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3471	mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
3472	mc7->width = G_WIDTH(cfg);
3473}
3474
3475static void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3476{
3477	u16 devid;
3478
3479	mac->adapter = adapter;
3480	pci_read_config_word(adapter->pdev, 0x2, &devid);
3481
3482	if (devid == 0x37 && !adapter->params.vpd.xauicfg[1])
3483		index = 0;
3484	mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3485	mac->nucast = 1;
3486
3487	if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3488		t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3489			     is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3490		t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3491				 F_ENRGMII, 0);
3492	}
3493}
3494
3495static void early_hw_init(struct adapter *adapter,
3496			  const struct adapter_info *ai)
3497{
3498	u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3499
3500	mi1_init(adapter, ai);
3501	t3_write_reg(adapter, A_I2C_CFG,	/* set for 80KHz */
3502		     V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3503	t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3504		     ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
3505	t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
3506	t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
3507
3508	if (adapter->params.rev == 0 || !uses_xaui(adapter))
3509		val |= F_ENRGMII;
3510
3511	/* Enable MAC clocks so we can access the registers */
3512	t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3513	t3_read_reg(adapter, A_XGM_PORT_CFG);
3514
3515	val |= F_CLKDIVRESET_;
3516	t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3517	t3_read_reg(adapter, A_XGM_PORT_CFG);
3518	t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3519	t3_read_reg(adapter, A_XGM_PORT_CFG);
3520}
3521
3522/*
3523 * Reset the adapter.
3524 * Older PCIe cards lose their config space during reset, PCI-X
3525 * ones don't.
3526 */
3527int t3_reset_adapter(struct adapter *adapter)
3528{
3529	int i, save_and_restore_pcie =
3530	    adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
3531	uint16_t devid = 0;
3532
3533	if (save_and_restore_pcie)
3534		pci_save_state(adapter->pdev);
3535	t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3536
3537	/*
3538	 * Delay. Give Some time to device to reset fully.
3539	 * XXX The delay time should be modified.
3540	 */
3541	for (i = 0; i < 10; i++) {
3542		msleep(50);
3543		pci_read_config_word(adapter->pdev, 0x00, &devid);
3544		if (devid == 0x1425)
3545			break;
3546	}
3547
3548	if (devid != 0x1425)
3549		return -1;
3550
3551	if (save_and_restore_pcie)
3552		pci_restore_state(adapter->pdev);
3553	return 0;
3554}
3555
3556static int init_parity(struct adapter *adap)
3557{
3558	int i, err, addr;
3559
3560	if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
3561		return -EBUSY;
3562
3563	for (err = i = 0; !err && i < 16; i++)
3564		err = clear_sge_ctxt(adap, i, F_EGRESS);
3565	for (i = 0xfff0; !err && i <= 0xffff; i++)
3566		err = clear_sge_ctxt(adap, i, F_EGRESS);
3567	for (i = 0; !err && i < SGE_QSETS; i++)
3568		err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
3569	if (err)
3570		return err;
3571
3572	t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
3573	for (i = 0; i < 4; i++)
3574		for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
3575			t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
3576				     F_IBQDBGWR | V_IBQDBGQID(i) |
3577				     V_IBQDBGADDR(addr));
3578			err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
3579					      F_IBQDBGBUSY, 0, 2, 1);
3580			if (err)
3581				return err;
3582		}
3583	return 0;
3584}
3585
3586/*
3587 * Initialize adapter SW state for the various HW modules, set initial values
3588 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3589 * interface.
3590 */
3591int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
3592		    int reset)
3593{
3594	int ret;
3595	unsigned int i, j = -1;
3596
3597	get_pci_mode(adapter, &adapter->params.pci);
3598
3599	adapter->params.info = ai;
3600	adapter->params.nports = ai->nports0 + ai->nports1;
3601	adapter->params.chan_map = (!!ai->nports0) | (!!ai->nports1 << 1);
3602	adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3603	/*
3604	 * We used to only run the "adapter check task" once a second if
3605	 * we had PHYs which didn't support interrupts (we would check
3606	 * their link status once a second).  Now we check other conditions
3607	 * in that routine which could potentially impose a very high
3608	 * interrupt load on the system.  As such, we now always scan the
3609	 * adapter state once a second ...
3610	 */
3611	adapter->params.linkpoll_period = 10;
3612	adapter->params.stats_update_period = is_10G(adapter) ?
3613	    MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3614	adapter->params.pci.vpd_cap_addr =
3615	    pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3616	if (!adapter->params.pci.vpd_cap_addr)
3617		return -ENODEV;
3618	ret = get_vpd_params(adapter, &adapter->params.vpd);
3619	if (ret < 0)
3620		return ret;
3621
3622	if (reset && t3_reset_adapter(adapter))
3623		return -1;
3624
3625	t3_sge_prep(adapter, &adapter->params.sge);
3626
3627	if (adapter->params.vpd.mclk) {
3628		struct tp_params *p = &adapter->params.tp;
3629
3630		mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3631		mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3632		mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3633
3634		p->nchan = adapter->params.chan_map == 3 ? 2 : 1;
3635		p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3636		p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3637		p->cm_size = t3_mc7_size(&adapter->cm);
3638		p->chan_rx_size = p->pmrx_size / 2;	/* only 1 Rx channel */
3639		p->chan_tx_size = p->pmtx_size / p->nchan;
3640		p->rx_pg_size = 64 * 1024;
3641		p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3642		p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3643		p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3644		p->ntimer_qs = p->cm_size >= (128 << 20) ||
3645		    adapter->params.rev > 0 ? 12 : 6;
3646	}
3647
3648	adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3649				  t3_mc7_size(&adapter->pmtx) &&
3650				  t3_mc7_size(&adapter->cm);
3651
3652	if (is_offload(adapter)) {
3653		adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3654		adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3655		    DEFAULT_NFILTERS : 0;
3656		adapter->params.mc5.nroutes = 0;
3657		t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3658
3659		init_mtus(adapter->params.mtus);
3660		init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3661	}
3662
3663	early_hw_init(adapter, ai);
3664	ret = init_parity(adapter);
3665	if (ret)
3666		return ret;
3667
3668	for_each_port(adapter, i) {
3669		u8 hw_addr[6];
3670		const struct port_type_info *pti;
3671		struct port_info *p = adap2pinfo(adapter, i);
3672
3673		while (!adapter->params.vpd.port_type[++j])
3674			;
3675
3676		pti = &port_types[adapter->params.vpd.port_type[j]];
3677		if (!pti->phy_prep) {
3678			CH_ALERT(adapter, "Invalid port type index %d\n",
3679				 adapter->params.vpd.port_type[j]);
3680			return -EINVAL;
3681		}
3682
3683		p->phy.mdio.dev = adapter->port[i];
3684		ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3685				    ai->mdio_ops);
3686		if (ret)
3687			return ret;
3688		mac_prep(&p->mac, adapter, j);
3689
3690		/*
3691		 * The VPD EEPROM stores the base Ethernet address for the
3692		 * card.  A port's address is derived from the base by adding
3693		 * the port's index to the base's low octet.
3694		 */
3695		memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3696		hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3697
3698		eth_hw_addr_set(adapter->port[i], hw_addr);
3699		init_link_config(&p->link_config, p->phy.caps);
3700		p->phy.ops->power_down(&p->phy, 1);
3701
3702		/*
3703		 * If the PHY doesn't support interrupts for link status
3704		 * changes, schedule a scan of the adapter links at least
3705		 * once a second.
3706		 */
3707		if (!(p->phy.caps & SUPPORTED_IRQ) &&
3708		    adapter->params.linkpoll_period > 10)
3709			adapter->params.linkpoll_period = 10;
3710	}
3711
3712	return 0;
3713}
3714
3715void t3_led_ready(struct adapter *adapter)
3716{
3717	t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3718			 F_GPIO0_OUT_VAL);
3719}
3720
3721int t3_replay_prep_adapter(struct adapter *adapter)
3722{
3723	const struct adapter_info *ai = adapter->params.info;
3724	unsigned int i, j = -1;
3725	int ret;
3726
3727	early_hw_init(adapter, ai);
3728	ret = init_parity(adapter);
3729	if (ret)
3730		return ret;
3731
3732	for_each_port(adapter, i) {
3733		const struct port_type_info *pti;
3734		struct port_info *p = adap2pinfo(adapter, i);
3735
3736		while (!adapter->params.vpd.port_type[++j])
3737			;
3738
3739		pti = &port_types[adapter->params.vpd.port_type[j]];
3740		ret = pti->phy_prep(&p->phy, adapter, p->phy.mdio.prtad, NULL);
3741		if (ret)
3742			return ret;
3743		p->phy.ops->power_down(&p->phy, 1);
3744	}
3745
3746	return 0;
3747}
3748
3749