• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/net/
1/* niu.c: Neptune ethernet driver.
2 *
3 * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
4 */
5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8#include <linux/module.h>
9#include <linux/init.h>
10#include <linux/pci.h>
11#include <linux/dma-mapping.h>
12#include <linux/netdevice.h>
13#include <linux/ethtool.h>
14#include <linux/etherdevice.h>
15#include <linux/platform_device.h>
16#include <linux/delay.h>
17#include <linux/bitops.h>
18#include <linux/mii.h>
19#include <linux/if_ether.h>
20#include <linux/if_vlan.h>
21#include <linux/ip.h>
22#include <linux/in.h>
23#include <linux/ipv6.h>
24#include <linux/log2.h>
25#include <linux/jiffies.h>
26#include <linux/crc32.h>
27#include <linux/list.h>
28#include <linux/slab.h>
29
30#include <linux/io.h>
31#include <linux/of_device.h>
32
33#include "niu.h"
34
35#define DRV_MODULE_NAME		"niu"
36#define DRV_MODULE_VERSION	"1.1"
37#define DRV_MODULE_RELDATE	"Apr 22, 2010"
38
39static char version[] __devinitdata =
40	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
41
42MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
43MODULE_DESCRIPTION("NIU ethernet driver");
44MODULE_LICENSE("GPL");
45MODULE_VERSION(DRV_MODULE_VERSION);
46
47#ifndef readq
48static u64 readq(void __iomem *reg)
49{
50	return ((u64) readl(reg)) | (((u64) readl(reg + 4UL)) << 32);
51}
52
53static void writeq(u64 val, void __iomem *reg)
54{
55	writel(val & 0xffffffff, reg);
56	writel(val >> 32, reg + 0x4UL);
57}
58#endif
59
60static DEFINE_PCI_DEVICE_TABLE(niu_pci_tbl) = {
61	{PCI_DEVICE(PCI_VENDOR_ID_SUN, 0xabcd)},
62	{}
63};
64
65MODULE_DEVICE_TABLE(pci, niu_pci_tbl);
66
67#define NIU_TX_TIMEOUT			(5 * HZ)
68
69#define nr64(reg)		readq(np->regs + (reg))
70#define nw64(reg, val)		writeq((val), np->regs + (reg))
71
72#define nr64_mac(reg)		readq(np->mac_regs + (reg))
73#define nw64_mac(reg, val)	writeq((val), np->mac_regs + (reg))
74
75#define nr64_ipp(reg)		readq(np->regs + np->ipp_off + (reg))
76#define nw64_ipp(reg, val)	writeq((val), np->regs + np->ipp_off + (reg))
77
78#define nr64_pcs(reg)		readq(np->regs + np->pcs_off + (reg))
79#define nw64_pcs(reg, val)	writeq((val), np->regs + np->pcs_off + (reg))
80
81#define nr64_xpcs(reg)		readq(np->regs + np->xpcs_off + (reg))
82#define nw64_xpcs(reg, val)	writeq((val), np->regs + np->xpcs_off + (reg))
83
84#define NIU_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
85
86static int niu_debug;
87static int debug = -1;
88module_param(debug, int, 0);
89MODULE_PARM_DESC(debug, "NIU debug level");
90
91#define niu_lock_parent(np, flags) \
92	spin_lock_irqsave(&np->parent->lock, flags)
93#define niu_unlock_parent(np, flags) \
94	spin_unlock_irqrestore(&np->parent->lock, flags)
95
96static int serdes_init_10g_serdes(struct niu *np);
97
98static int __niu_wait_bits_clear_mac(struct niu *np, unsigned long reg,
99				     u64 bits, int limit, int delay)
100{
101	while (--limit >= 0) {
102		u64 val = nr64_mac(reg);
103
104		if (!(val & bits))
105			break;
106		udelay(delay);
107	}
108	if (limit < 0)
109		return -ENODEV;
110	return 0;
111}
112
113static int __niu_set_and_wait_clear_mac(struct niu *np, unsigned long reg,
114					u64 bits, int limit, int delay,
115					const char *reg_name)
116{
117	int err;
118
119	nw64_mac(reg, bits);
120	err = __niu_wait_bits_clear_mac(np, reg, bits, limit, delay);
121	if (err)
122		netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
123			   (unsigned long long)bits, reg_name,
124			   (unsigned long long)nr64_mac(reg));
125	return err;
126}
127
128#define niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
129({	BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
130	__niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
131})
132
133static int __niu_wait_bits_clear_ipp(struct niu *np, unsigned long reg,
134				     u64 bits, int limit, int delay)
135{
136	while (--limit >= 0) {
137		u64 val = nr64_ipp(reg);
138
139		if (!(val & bits))
140			break;
141		udelay(delay);
142	}
143	if (limit < 0)
144		return -ENODEV;
145	return 0;
146}
147
148static int __niu_set_and_wait_clear_ipp(struct niu *np, unsigned long reg,
149					u64 bits, int limit, int delay,
150					const char *reg_name)
151{
152	int err;
153	u64 val;
154
155	val = nr64_ipp(reg);
156	val |= bits;
157	nw64_ipp(reg, val);
158
159	err = __niu_wait_bits_clear_ipp(np, reg, bits, limit, delay);
160	if (err)
161		netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
162			   (unsigned long long)bits, reg_name,
163			   (unsigned long long)nr64_ipp(reg));
164	return err;
165}
166
167#define niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
168({	BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
169	__niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
170})
171
172static int __niu_wait_bits_clear(struct niu *np, unsigned long reg,
173				 u64 bits, int limit, int delay)
174{
175	while (--limit >= 0) {
176		u64 val = nr64(reg);
177
178		if (!(val & bits))
179			break;
180		udelay(delay);
181	}
182	if (limit < 0)
183		return -ENODEV;
184	return 0;
185}
186
187#define niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY) \
188({	BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
189	__niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY); \
190})
191
192static int __niu_set_and_wait_clear(struct niu *np, unsigned long reg,
193				    u64 bits, int limit, int delay,
194				    const char *reg_name)
195{
196	int err;
197
198	nw64(reg, bits);
199	err = __niu_wait_bits_clear(np, reg, bits, limit, delay);
200	if (err)
201		netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
202			   (unsigned long long)bits, reg_name,
203			   (unsigned long long)nr64(reg));
204	return err;
205}
206
207#define niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
208({	BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
209	__niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
210})
211
212static void niu_ldg_rearm(struct niu *np, struct niu_ldg *lp, int on)
213{
214	u64 val = (u64) lp->timer;
215
216	if (on)
217		val |= LDG_IMGMT_ARM;
218
219	nw64(LDG_IMGMT(lp->ldg_num), val);
220}
221
222static int niu_ldn_irq_enable(struct niu *np, int ldn, int on)
223{
224	unsigned long mask_reg, bits;
225	u64 val;
226
227	if (ldn < 0 || ldn > LDN_MAX)
228		return -EINVAL;
229
230	if (ldn < 64) {
231		mask_reg = LD_IM0(ldn);
232		bits = LD_IM0_MASK;
233	} else {
234		mask_reg = LD_IM1(ldn - 64);
235		bits = LD_IM1_MASK;
236	}
237
238	val = nr64(mask_reg);
239	if (on)
240		val &= ~bits;
241	else
242		val |= bits;
243	nw64(mask_reg, val);
244
245	return 0;
246}
247
248static int niu_enable_ldn_in_ldg(struct niu *np, struct niu_ldg *lp, int on)
249{
250	struct niu_parent *parent = np->parent;
251	int i;
252
253	for (i = 0; i <= LDN_MAX; i++) {
254		int err;
255
256		if (parent->ldg_map[i] != lp->ldg_num)
257			continue;
258
259		err = niu_ldn_irq_enable(np, i, on);
260		if (err)
261			return err;
262	}
263	return 0;
264}
265
266static int niu_enable_interrupts(struct niu *np, int on)
267{
268	int i;
269
270	for (i = 0; i < np->num_ldg; i++) {
271		struct niu_ldg *lp = &np->ldg[i];
272		int err;
273
274		err = niu_enable_ldn_in_ldg(np, lp, on);
275		if (err)
276			return err;
277	}
278	for (i = 0; i < np->num_ldg; i++)
279		niu_ldg_rearm(np, &np->ldg[i], on);
280
281	return 0;
282}
283
284static u32 phy_encode(u32 type, int port)
285{
286	return (type << (port * 2));
287}
288
289static u32 phy_decode(u32 val, int port)
290{
291	return (val >> (port * 2)) & PORT_TYPE_MASK;
292}
293
294static int mdio_wait(struct niu *np)
295{
296	int limit = 1000;
297	u64 val;
298
299	while (--limit > 0) {
300		val = nr64(MIF_FRAME_OUTPUT);
301		if ((val >> MIF_FRAME_OUTPUT_TA_SHIFT) & 0x1)
302			return val & MIF_FRAME_OUTPUT_DATA;
303
304		udelay(10);
305	}
306
307	return -ENODEV;
308}
309
310static int mdio_read(struct niu *np, int port, int dev, int reg)
311{
312	int err;
313
314	nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg));
315	err = mdio_wait(np);
316	if (err < 0)
317		return err;
318
319	nw64(MIF_FRAME_OUTPUT, MDIO_READ_OP(port, dev));
320	return mdio_wait(np);
321}
322
323static int mdio_write(struct niu *np, int port, int dev, int reg, int data)
324{
325	int err;
326
327	nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg));
328	err = mdio_wait(np);
329	if (err < 0)
330		return err;
331
332	nw64(MIF_FRAME_OUTPUT, MDIO_WRITE_OP(port, dev, data));
333	err = mdio_wait(np);
334	if (err < 0)
335		return err;
336
337	return 0;
338}
339
340static int mii_read(struct niu *np, int port, int reg)
341{
342	nw64(MIF_FRAME_OUTPUT, MII_READ_OP(port, reg));
343	return mdio_wait(np);
344}
345
346static int mii_write(struct niu *np, int port, int reg, int data)
347{
348	int err;
349
350	nw64(MIF_FRAME_OUTPUT, MII_WRITE_OP(port, reg, data));
351	err = mdio_wait(np);
352	if (err < 0)
353		return err;
354
355	return 0;
356}
357
358static int esr2_set_tx_cfg(struct niu *np, unsigned long channel, u32 val)
359{
360	int err;
361
362	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
363			 ESR2_TI_PLL_TX_CFG_L(channel),
364			 val & 0xffff);
365	if (!err)
366		err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
367				 ESR2_TI_PLL_TX_CFG_H(channel),
368				 val >> 16);
369	return err;
370}
371
372static int esr2_set_rx_cfg(struct niu *np, unsigned long channel, u32 val)
373{
374	int err;
375
376	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
377			 ESR2_TI_PLL_RX_CFG_L(channel),
378			 val & 0xffff);
379	if (!err)
380		err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
381				 ESR2_TI_PLL_RX_CFG_H(channel),
382				 val >> 16);
383	return err;
384}
385
386/* Mode is always 10G fiber.  */
387static int serdes_init_niu_10g_fiber(struct niu *np)
388{
389	struct niu_link_config *lp = &np->link_config;
390	u32 tx_cfg, rx_cfg;
391	unsigned long i;
392
393	tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV);
394	rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
395		  PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
396		  PLL_RX_CFG_EQ_LP_ADAPTIVE);
397
398	if (lp->loopback_mode == LOOPBACK_PHY) {
399		u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
400
401		mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
402			   ESR2_TI_PLL_TEST_CFG_L, test_cfg);
403
404		tx_cfg |= PLL_TX_CFG_ENTEST;
405		rx_cfg |= PLL_RX_CFG_ENTEST;
406	}
407
408	/* Initialize all 4 lanes of the SERDES.  */
409	for (i = 0; i < 4; i++) {
410		int err = esr2_set_tx_cfg(np, i, tx_cfg);
411		if (err)
412			return err;
413	}
414
415	for (i = 0; i < 4; i++) {
416		int err = esr2_set_rx_cfg(np, i, rx_cfg);
417		if (err)
418			return err;
419	}
420
421	return 0;
422}
423
424static int serdes_init_niu_1g_serdes(struct niu *np)
425{
426	struct niu_link_config *lp = &np->link_config;
427	u16 pll_cfg, pll_sts;
428	int max_retry = 100;
429	u64 uninitialized_var(sig), mask, val;
430	u32 tx_cfg, rx_cfg;
431	unsigned long i;
432	int err;
433
434	tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV |
435		  PLL_TX_CFG_RATE_HALF);
436	rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
437		  PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
438		  PLL_RX_CFG_RATE_HALF);
439
440	if (np->port == 0)
441		rx_cfg |= PLL_RX_CFG_EQ_LP_ADAPTIVE;
442
443	if (lp->loopback_mode == LOOPBACK_PHY) {
444		u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
445
446		mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
447			   ESR2_TI_PLL_TEST_CFG_L, test_cfg);
448
449		tx_cfg |= PLL_TX_CFG_ENTEST;
450		rx_cfg |= PLL_RX_CFG_ENTEST;
451	}
452
453	/* Initialize PLL for 1G */
454	pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_8X);
455
456	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
457			 ESR2_TI_PLL_CFG_L, pll_cfg);
458	if (err) {
459		netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n",
460			   np->port, __func__);
461		return err;
462	}
463
464	pll_sts = PLL_CFG_ENPLL;
465
466	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
467			 ESR2_TI_PLL_STS_L, pll_sts);
468	if (err) {
469		netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n",
470			   np->port, __func__);
471		return err;
472	}
473
474	udelay(200);
475
476	/* Initialize all 4 lanes of the SERDES.  */
477	for (i = 0; i < 4; i++) {
478		err = esr2_set_tx_cfg(np, i, tx_cfg);
479		if (err)
480			return err;
481	}
482
483	for (i = 0; i < 4; i++) {
484		err = esr2_set_rx_cfg(np, i, rx_cfg);
485		if (err)
486			return err;
487	}
488
489	switch (np->port) {
490	case 0:
491		val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0);
492		mask = val;
493		break;
494
495	case 1:
496		val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1);
497		mask = val;
498		break;
499
500	default:
501		return -EINVAL;
502	}
503
504	while (max_retry--) {
505		sig = nr64(ESR_INT_SIGNALS);
506		if ((sig & mask) == val)
507			break;
508
509		mdelay(500);
510	}
511
512	if ((sig & mask) != val) {
513		netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
514			   np->port, (int)(sig & mask), (int)val);
515		return -ENODEV;
516	}
517
518	return 0;
519}
520
521static int serdes_init_niu_10g_serdes(struct niu *np)
522{
523	struct niu_link_config *lp = &np->link_config;
524	u32 tx_cfg, rx_cfg, pll_cfg, pll_sts;
525	int max_retry = 100;
526	u64 uninitialized_var(sig), mask, val;
527	unsigned long i;
528	int err;
529
530	tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV);
531	rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
532		  PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
533		  PLL_RX_CFG_EQ_LP_ADAPTIVE);
534
535	if (lp->loopback_mode == LOOPBACK_PHY) {
536		u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
537
538		mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
539			   ESR2_TI_PLL_TEST_CFG_L, test_cfg);
540
541		tx_cfg |= PLL_TX_CFG_ENTEST;
542		rx_cfg |= PLL_RX_CFG_ENTEST;
543	}
544
545	/* Initialize PLL for 10G */
546	pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_10X);
547
548	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
549			 ESR2_TI_PLL_CFG_L, pll_cfg & 0xffff);
550	if (err) {
551		netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n",
552			   np->port, __func__);
553		return err;
554	}
555
556	pll_sts = PLL_CFG_ENPLL;
557
558	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
559			 ESR2_TI_PLL_STS_L, pll_sts & 0xffff);
560	if (err) {
561		netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n",
562			   np->port, __func__);
563		return err;
564	}
565
566	udelay(200);
567
568	/* Initialize all 4 lanes of the SERDES.  */
569	for (i = 0; i < 4; i++) {
570		err = esr2_set_tx_cfg(np, i, tx_cfg);
571		if (err)
572			return err;
573	}
574
575	for (i = 0; i < 4; i++) {
576		err = esr2_set_rx_cfg(np, i, rx_cfg);
577		if (err)
578			return err;
579	}
580
581	/* check if serdes is ready */
582
583	switch (np->port) {
584	case 0:
585		mask = ESR_INT_SIGNALS_P0_BITS;
586		val = (ESR_INT_SRDY0_P0 |
587		       ESR_INT_DET0_P0 |
588		       ESR_INT_XSRDY_P0 |
589		       ESR_INT_XDP_P0_CH3 |
590		       ESR_INT_XDP_P0_CH2 |
591		       ESR_INT_XDP_P0_CH1 |
592		       ESR_INT_XDP_P0_CH0);
593		break;
594
595	case 1:
596		mask = ESR_INT_SIGNALS_P1_BITS;
597		val = (ESR_INT_SRDY0_P1 |
598		       ESR_INT_DET0_P1 |
599		       ESR_INT_XSRDY_P1 |
600		       ESR_INT_XDP_P1_CH3 |
601		       ESR_INT_XDP_P1_CH2 |
602		       ESR_INT_XDP_P1_CH1 |
603		       ESR_INT_XDP_P1_CH0);
604		break;
605
606	default:
607		return -EINVAL;
608	}
609
610	while (max_retry--) {
611		sig = nr64(ESR_INT_SIGNALS);
612		if ((sig & mask) == val)
613			break;
614
615		mdelay(500);
616	}
617
618	if ((sig & mask) != val) {
619		pr_info("NIU Port %u signal bits [%08x] are not [%08x] for 10G...trying 1G\n",
620			np->port, (int)(sig & mask), (int)val);
621
622		/* 10G failed, try initializing at 1G */
623		err = serdes_init_niu_1g_serdes(np);
624		if (!err) {
625			np->flags &= ~NIU_FLAGS_10G;
626			np->mac_xcvr = MAC_XCVR_PCS;
627		}  else {
628			netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n",
629				   np->port);
630			return -ENODEV;
631		}
632	}
633	return 0;
634}
635
636static int esr_read_rxtx_ctrl(struct niu *np, unsigned long chan, u32 *val)
637{
638	int err;
639
640	err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_CTRL_L(chan));
641	if (err >= 0) {
642		*val = (err & 0xffff);
643		err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
644				ESR_RXTX_CTRL_H(chan));
645		if (err >= 0)
646			*val |= ((err & 0xffff) << 16);
647		err = 0;
648	}
649	return err;
650}
651
652static int esr_read_glue0(struct niu *np, unsigned long chan, u32 *val)
653{
654	int err;
655
656	err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
657			ESR_GLUE_CTRL0_L(chan));
658	if (err >= 0) {
659		*val = (err & 0xffff);
660		err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
661				ESR_GLUE_CTRL0_H(chan));
662		if (err >= 0) {
663			*val |= ((err & 0xffff) << 16);
664			err = 0;
665		}
666	}
667	return err;
668}
669
670static int esr_read_reset(struct niu *np, u32 *val)
671{
672	int err;
673
674	err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
675			ESR_RXTX_RESET_CTRL_L);
676	if (err >= 0) {
677		*val = (err & 0xffff);
678		err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
679				ESR_RXTX_RESET_CTRL_H);
680		if (err >= 0) {
681			*val |= ((err & 0xffff) << 16);
682			err = 0;
683		}
684	}
685	return err;
686}
687
688static int esr_write_rxtx_ctrl(struct niu *np, unsigned long chan, u32 val)
689{
690	int err;
691
692	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
693			 ESR_RXTX_CTRL_L(chan), val & 0xffff);
694	if (!err)
695		err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
696				 ESR_RXTX_CTRL_H(chan), (val >> 16));
697	return err;
698}
699
700static int esr_write_glue0(struct niu *np, unsigned long chan, u32 val)
701{
702	int err;
703
704	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
705			ESR_GLUE_CTRL0_L(chan), val & 0xffff);
706	if (!err)
707		err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
708				 ESR_GLUE_CTRL0_H(chan), (val >> 16));
709	return err;
710}
711
712static int esr_reset(struct niu *np)
713{
714	u32 uninitialized_var(reset);
715	int err;
716
717	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
718			 ESR_RXTX_RESET_CTRL_L, 0x0000);
719	if (err)
720		return err;
721	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
722			 ESR_RXTX_RESET_CTRL_H, 0xffff);
723	if (err)
724		return err;
725	udelay(200);
726
727	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
728			 ESR_RXTX_RESET_CTRL_L, 0xffff);
729	if (err)
730		return err;
731	udelay(200);
732
733	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
734			 ESR_RXTX_RESET_CTRL_H, 0x0000);
735	if (err)
736		return err;
737	udelay(200);
738
739	err = esr_read_reset(np, &reset);
740	if (err)
741		return err;
742	if (reset != 0) {
743		netdev_err(np->dev, "Port %u ESR_RESET did not clear [%08x]\n",
744			   np->port, reset);
745		return -ENODEV;
746	}
747
748	return 0;
749}
750
751static int serdes_init_10g(struct niu *np)
752{
753	struct niu_link_config *lp = &np->link_config;
754	unsigned long ctrl_reg, test_cfg_reg, i;
755	u64 ctrl_val, test_cfg_val, sig, mask, val;
756	int err;
757
758	switch (np->port) {
759	case 0:
760		ctrl_reg = ENET_SERDES_0_CTRL_CFG;
761		test_cfg_reg = ENET_SERDES_0_TEST_CFG;
762		break;
763	case 1:
764		ctrl_reg = ENET_SERDES_1_CTRL_CFG;
765		test_cfg_reg = ENET_SERDES_1_TEST_CFG;
766		break;
767
768	default:
769		return -EINVAL;
770	}
771	ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
772		    ENET_SERDES_CTRL_SDET_1 |
773		    ENET_SERDES_CTRL_SDET_2 |
774		    ENET_SERDES_CTRL_SDET_3 |
775		    (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
776		    (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
777		    (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
778		    (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
779		    (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
780		    (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
781		    (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
782		    (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
783	test_cfg_val = 0;
784
785	if (lp->loopback_mode == LOOPBACK_PHY) {
786		test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
787				  ENET_SERDES_TEST_MD_0_SHIFT) |
788				 (ENET_TEST_MD_PAD_LOOPBACK <<
789				  ENET_SERDES_TEST_MD_1_SHIFT) |
790				 (ENET_TEST_MD_PAD_LOOPBACK <<
791				  ENET_SERDES_TEST_MD_2_SHIFT) |
792				 (ENET_TEST_MD_PAD_LOOPBACK <<
793				  ENET_SERDES_TEST_MD_3_SHIFT));
794	}
795
796	nw64(ctrl_reg, ctrl_val);
797	nw64(test_cfg_reg, test_cfg_val);
798
799	/* Initialize all 4 lanes of the SERDES.  */
800	for (i = 0; i < 4; i++) {
801		u32 rxtx_ctrl, glue0;
802
803		err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
804		if (err)
805			return err;
806		err = esr_read_glue0(np, i, &glue0);
807		if (err)
808			return err;
809
810		rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
811		rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
812			      (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
813
814		glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
815			   ESR_GLUE_CTRL0_THCNT |
816			   ESR_GLUE_CTRL0_BLTIME);
817		glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
818			  (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
819			  (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
820			  (BLTIME_300_CYCLES <<
821			   ESR_GLUE_CTRL0_BLTIME_SHIFT));
822
823		err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
824		if (err)
825			return err;
826		err = esr_write_glue0(np, i, glue0);
827		if (err)
828			return err;
829	}
830
831	err = esr_reset(np);
832	if (err)
833		return err;
834
835	sig = nr64(ESR_INT_SIGNALS);
836	switch (np->port) {
837	case 0:
838		mask = ESR_INT_SIGNALS_P0_BITS;
839		val = (ESR_INT_SRDY0_P0 |
840		       ESR_INT_DET0_P0 |
841		       ESR_INT_XSRDY_P0 |
842		       ESR_INT_XDP_P0_CH3 |
843		       ESR_INT_XDP_P0_CH2 |
844		       ESR_INT_XDP_P0_CH1 |
845		       ESR_INT_XDP_P0_CH0);
846		break;
847
848	case 1:
849		mask = ESR_INT_SIGNALS_P1_BITS;
850		val = (ESR_INT_SRDY0_P1 |
851		       ESR_INT_DET0_P1 |
852		       ESR_INT_XSRDY_P1 |
853		       ESR_INT_XDP_P1_CH3 |
854		       ESR_INT_XDP_P1_CH2 |
855		       ESR_INT_XDP_P1_CH1 |
856		       ESR_INT_XDP_P1_CH0);
857		break;
858
859	default:
860		return -EINVAL;
861	}
862
863	if ((sig & mask) != val) {
864		if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
865			np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
866			return 0;
867		}
868		netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
869			   np->port, (int)(sig & mask), (int)val);
870		return -ENODEV;
871	}
872	if (np->flags & NIU_FLAGS_HOTPLUG_PHY)
873		np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT;
874	return 0;
875}
876
877static int serdes_init_1g(struct niu *np)
878{
879	u64 val;
880
881	val = nr64(ENET_SERDES_1_PLL_CFG);
882	val &= ~ENET_SERDES_PLL_FBDIV2;
883	switch (np->port) {
884	case 0:
885		val |= ENET_SERDES_PLL_HRATE0;
886		break;
887	case 1:
888		val |= ENET_SERDES_PLL_HRATE1;
889		break;
890	case 2:
891		val |= ENET_SERDES_PLL_HRATE2;
892		break;
893	case 3:
894		val |= ENET_SERDES_PLL_HRATE3;
895		break;
896	default:
897		return -EINVAL;
898	}
899	nw64(ENET_SERDES_1_PLL_CFG, val);
900
901	return 0;
902}
903
904static int serdes_init_1g_serdes(struct niu *np)
905{
906	struct niu_link_config *lp = &np->link_config;
907	unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i;
908	u64 ctrl_val, test_cfg_val, sig, mask, val;
909	int err;
910	u64 reset_val, val_rd;
911
912	val = ENET_SERDES_PLL_HRATE0 | ENET_SERDES_PLL_HRATE1 |
913		ENET_SERDES_PLL_HRATE2 | ENET_SERDES_PLL_HRATE3 |
914		ENET_SERDES_PLL_FBDIV0;
915	switch (np->port) {
916	case 0:
917		reset_val =  ENET_SERDES_RESET_0;
918		ctrl_reg = ENET_SERDES_0_CTRL_CFG;
919		test_cfg_reg = ENET_SERDES_0_TEST_CFG;
920		pll_cfg = ENET_SERDES_0_PLL_CFG;
921		break;
922	case 1:
923		reset_val =  ENET_SERDES_RESET_1;
924		ctrl_reg = ENET_SERDES_1_CTRL_CFG;
925		test_cfg_reg = ENET_SERDES_1_TEST_CFG;
926		pll_cfg = ENET_SERDES_1_PLL_CFG;
927		break;
928
929	default:
930		return -EINVAL;
931	}
932	ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
933		    ENET_SERDES_CTRL_SDET_1 |
934		    ENET_SERDES_CTRL_SDET_2 |
935		    ENET_SERDES_CTRL_SDET_3 |
936		    (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
937		    (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
938		    (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
939		    (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
940		    (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
941		    (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
942		    (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
943		    (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
944	test_cfg_val = 0;
945
946	if (lp->loopback_mode == LOOPBACK_PHY) {
947		test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
948				  ENET_SERDES_TEST_MD_0_SHIFT) |
949				 (ENET_TEST_MD_PAD_LOOPBACK <<
950				  ENET_SERDES_TEST_MD_1_SHIFT) |
951				 (ENET_TEST_MD_PAD_LOOPBACK <<
952				  ENET_SERDES_TEST_MD_2_SHIFT) |
953				 (ENET_TEST_MD_PAD_LOOPBACK <<
954				  ENET_SERDES_TEST_MD_3_SHIFT));
955	}
956
957	nw64(ENET_SERDES_RESET, reset_val);
958	mdelay(20);
959	val_rd = nr64(ENET_SERDES_RESET);
960	val_rd &= ~reset_val;
961	nw64(pll_cfg, val);
962	nw64(ctrl_reg, ctrl_val);
963	nw64(test_cfg_reg, test_cfg_val);
964	nw64(ENET_SERDES_RESET, val_rd);
965	mdelay(2000);
966
967	/* Initialize all 4 lanes of the SERDES.  */
968	for (i = 0; i < 4; i++) {
969		u32 rxtx_ctrl, glue0;
970
971		err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
972		if (err)
973			return err;
974		err = esr_read_glue0(np, i, &glue0);
975		if (err)
976			return err;
977
978		rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
979		rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
980			      (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
981
982		glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
983			   ESR_GLUE_CTRL0_THCNT |
984			   ESR_GLUE_CTRL0_BLTIME);
985		glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
986			  (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
987			  (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
988			  (BLTIME_300_CYCLES <<
989			   ESR_GLUE_CTRL0_BLTIME_SHIFT));
990
991		err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
992		if (err)
993			return err;
994		err = esr_write_glue0(np, i, glue0);
995		if (err)
996			return err;
997	}
998
999
1000	sig = nr64(ESR_INT_SIGNALS);
1001	switch (np->port) {
1002	case 0:
1003		val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0);
1004		mask = val;
1005		break;
1006
1007	case 1:
1008		val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1);
1009		mask = val;
1010		break;
1011
1012	default:
1013		return -EINVAL;
1014	}
1015
1016	if ((sig & mask) != val) {
1017		netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
1018			   np->port, (int)(sig & mask), (int)val);
1019		return -ENODEV;
1020	}
1021
1022	return 0;
1023}
1024
1025static int link_status_1g_serdes(struct niu *np, int *link_up_p)
1026{
1027	struct niu_link_config *lp = &np->link_config;
1028	int link_up;
1029	u64 val;
1030	u16 current_speed;
1031	unsigned long flags;
1032	u8 current_duplex;
1033
1034	link_up = 0;
1035	current_speed = SPEED_INVALID;
1036	current_duplex = DUPLEX_INVALID;
1037
1038	spin_lock_irqsave(&np->lock, flags);
1039
1040	val = nr64_pcs(PCS_MII_STAT);
1041
1042	if (val & PCS_MII_STAT_LINK_STATUS) {
1043		link_up = 1;
1044		current_speed = SPEED_1000;
1045		current_duplex = DUPLEX_FULL;
1046	}
1047
1048	lp->active_speed = current_speed;
1049	lp->active_duplex = current_duplex;
1050	spin_unlock_irqrestore(&np->lock, flags);
1051
1052	*link_up_p = link_up;
1053	return 0;
1054}
1055
1056static int link_status_10g_serdes(struct niu *np, int *link_up_p)
1057{
1058	unsigned long flags;
1059	struct niu_link_config *lp = &np->link_config;
1060	int link_up = 0;
1061	int link_ok = 1;
1062	u64 val, val2;
1063	u16 current_speed;
1064	u8 current_duplex;
1065
1066	if (!(np->flags & NIU_FLAGS_10G))
1067		return link_status_1g_serdes(np, link_up_p);
1068
1069	current_speed = SPEED_INVALID;
1070	current_duplex = DUPLEX_INVALID;
1071	spin_lock_irqsave(&np->lock, flags);
1072
1073	val = nr64_xpcs(XPCS_STATUS(0));
1074	val2 = nr64_mac(XMAC_INTER2);
1075	if (val2 & 0x01000000)
1076		link_ok = 0;
1077
1078	if ((val & 0x1000ULL) && link_ok) {
1079		link_up = 1;
1080		current_speed = SPEED_10000;
1081		current_duplex = DUPLEX_FULL;
1082	}
1083	lp->active_speed = current_speed;
1084	lp->active_duplex = current_duplex;
1085	spin_unlock_irqrestore(&np->lock, flags);
1086	*link_up_p = link_up;
1087	return 0;
1088}
1089
1090static int link_status_mii(struct niu *np, int *link_up_p)
1091{
1092	struct niu_link_config *lp = &np->link_config;
1093	int err;
1094	int bmsr, advert, ctrl1000, stat1000, lpa, bmcr, estatus;
1095	int supported, advertising, active_speed, active_duplex;
1096
1097	err = mii_read(np, np->phy_addr, MII_BMCR);
1098	if (unlikely(err < 0))
1099		return err;
1100	bmcr = err;
1101
1102	err = mii_read(np, np->phy_addr, MII_BMSR);
1103	if (unlikely(err < 0))
1104		return err;
1105	bmsr = err;
1106
1107	err = mii_read(np, np->phy_addr, MII_ADVERTISE);
1108	if (unlikely(err < 0))
1109		return err;
1110	advert = err;
1111
1112	err = mii_read(np, np->phy_addr, MII_LPA);
1113	if (unlikely(err < 0))
1114		return err;
1115	lpa = err;
1116
1117	if (likely(bmsr & BMSR_ESTATEN)) {
1118		err = mii_read(np, np->phy_addr, MII_ESTATUS);
1119		if (unlikely(err < 0))
1120			return err;
1121		estatus = err;
1122
1123		err = mii_read(np, np->phy_addr, MII_CTRL1000);
1124		if (unlikely(err < 0))
1125			return err;
1126		ctrl1000 = err;
1127
1128		err = mii_read(np, np->phy_addr, MII_STAT1000);
1129		if (unlikely(err < 0))
1130			return err;
1131		stat1000 = err;
1132	} else
1133		estatus = ctrl1000 = stat1000 = 0;
1134
1135	supported = 0;
1136	if (bmsr & BMSR_ANEGCAPABLE)
1137		supported |= SUPPORTED_Autoneg;
1138	if (bmsr & BMSR_10HALF)
1139		supported |= SUPPORTED_10baseT_Half;
1140	if (bmsr & BMSR_10FULL)
1141		supported |= SUPPORTED_10baseT_Full;
1142	if (bmsr & BMSR_100HALF)
1143		supported |= SUPPORTED_100baseT_Half;
1144	if (bmsr & BMSR_100FULL)
1145		supported |= SUPPORTED_100baseT_Full;
1146	if (estatus & ESTATUS_1000_THALF)
1147		supported |= SUPPORTED_1000baseT_Half;
1148	if (estatus & ESTATUS_1000_TFULL)
1149		supported |= SUPPORTED_1000baseT_Full;
1150	lp->supported = supported;
1151
1152	advertising = 0;
1153	if (advert & ADVERTISE_10HALF)
1154		advertising |= ADVERTISED_10baseT_Half;
1155	if (advert & ADVERTISE_10FULL)
1156		advertising |= ADVERTISED_10baseT_Full;
1157	if (advert & ADVERTISE_100HALF)
1158		advertising |= ADVERTISED_100baseT_Half;
1159	if (advert & ADVERTISE_100FULL)
1160		advertising |= ADVERTISED_100baseT_Full;
1161	if (ctrl1000 & ADVERTISE_1000HALF)
1162		advertising |= ADVERTISED_1000baseT_Half;
1163	if (ctrl1000 & ADVERTISE_1000FULL)
1164		advertising |= ADVERTISED_1000baseT_Full;
1165
1166	if (bmcr & BMCR_ANENABLE) {
1167		int neg, neg1000;
1168
1169		lp->active_autoneg = 1;
1170		advertising |= ADVERTISED_Autoneg;
1171
1172		neg = advert & lpa;
1173		neg1000 = (ctrl1000 << 2) & stat1000;
1174
1175		if (neg1000 & (LPA_1000FULL | LPA_1000HALF))
1176			active_speed = SPEED_1000;
1177		else if (neg & LPA_100)
1178			active_speed = SPEED_100;
1179		else if (neg & (LPA_10HALF | LPA_10FULL))
1180			active_speed = SPEED_10;
1181		else
1182			active_speed = SPEED_INVALID;
1183
1184		if ((neg1000 & LPA_1000FULL) || (neg & LPA_DUPLEX))
1185			active_duplex = DUPLEX_FULL;
1186		else if (active_speed != SPEED_INVALID)
1187			active_duplex = DUPLEX_HALF;
1188		else
1189			active_duplex = DUPLEX_INVALID;
1190	} else {
1191		lp->active_autoneg = 0;
1192
1193		if ((bmcr & BMCR_SPEED1000) && !(bmcr & BMCR_SPEED100))
1194			active_speed = SPEED_1000;
1195		else if (bmcr & BMCR_SPEED100)
1196			active_speed = SPEED_100;
1197		else
1198			active_speed = SPEED_10;
1199
1200		if (bmcr & BMCR_FULLDPLX)
1201			active_duplex = DUPLEX_FULL;
1202		else
1203			active_duplex = DUPLEX_HALF;
1204	}
1205
1206	lp->active_advertising = advertising;
1207	lp->active_speed = active_speed;
1208	lp->active_duplex = active_duplex;
1209	*link_up_p = !!(bmsr & BMSR_LSTATUS);
1210
1211	return 0;
1212}
1213
1214static int link_status_1g_rgmii(struct niu *np, int *link_up_p)
1215{
1216	struct niu_link_config *lp = &np->link_config;
1217	u16 current_speed, bmsr;
1218	unsigned long flags;
1219	u8 current_duplex;
1220	int err, link_up;
1221
1222	link_up = 0;
1223	current_speed = SPEED_INVALID;
1224	current_duplex = DUPLEX_INVALID;
1225
1226	spin_lock_irqsave(&np->lock, flags);
1227
1228	err = -EINVAL;
1229
1230	err = mii_read(np, np->phy_addr, MII_BMSR);
1231	if (err < 0)
1232		goto out;
1233
1234	bmsr = err;
1235	if (bmsr & BMSR_LSTATUS) {
1236		u16 adv, lpa, common, estat;
1237
1238		err = mii_read(np, np->phy_addr, MII_ADVERTISE);
1239		if (err < 0)
1240			goto out;
1241		adv = err;
1242
1243		err = mii_read(np, np->phy_addr, MII_LPA);
1244		if (err < 0)
1245			goto out;
1246		lpa = err;
1247
1248		common = adv & lpa;
1249
1250		err = mii_read(np, np->phy_addr, MII_ESTATUS);
1251		if (err < 0)
1252			goto out;
1253		estat = err;
1254		link_up = 1;
1255		current_speed = SPEED_1000;
1256		current_duplex = DUPLEX_FULL;
1257
1258	}
1259	lp->active_speed = current_speed;
1260	lp->active_duplex = current_duplex;
1261	err = 0;
1262
1263out:
1264	spin_unlock_irqrestore(&np->lock, flags);
1265
1266	*link_up_p = link_up;
1267	return err;
1268}
1269
1270static int link_status_1g(struct niu *np, int *link_up_p)
1271{
1272	struct niu_link_config *lp = &np->link_config;
1273	unsigned long flags;
1274	int err;
1275
1276	spin_lock_irqsave(&np->lock, flags);
1277
1278	err = link_status_mii(np, link_up_p);
1279	lp->supported |= SUPPORTED_TP;
1280	lp->active_advertising |= ADVERTISED_TP;
1281
1282	spin_unlock_irqrestore(&np->lock, flags);
1283	return err;
1284}
1285
1286static int bcm8704_reset(struct niu *np)
1287{
1288	int err, limit;
1289
1290	err = mdio_read(np, np->phy_addr,
1291			BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
1292	if (err < 0 || err == 0xffff)
1293		return err;
1294	err |= BMCR_RESET;
1295	err = mdio_write(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
1296			 MII_BMCR, err);
1297	if (err)
1298		return err;
1299
1300	limit = 1000;
1301	while (--limit >= 0) {
1302		err = mdio_read(np, np->phy_addr,
1303				BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
1304		if (err < 0)
1305			return err;
1306		if (!(err & BMCR_RESET))
1307			break;
1308	}
1309	if (limit < 0) {
1310		netdev_err(np->dev, "Port %u PHY will not reset (bmcr=%04x)\n",
1311			   np->port, (err & 0xffff));
1312		return -ENODEV;
1313	}
1314	return 0;
1315}
1316
1317/* When written, certain PHY registers need to be read back twice
1318 * in order for the bits to settle properly.
1319 */
1320static int bcm8704_user_dev3_readback(struct niu *np, int reg)
1321{
1322	int err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg);
1323	if (err < 0)
1324		return err;
1325	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg);
1326	if (err < 0)
1327		return err;
1328	return 0;
1329}
1330
1331static int bcm8706_init_user_dev3(struct niu *np)
1332{
1333	int err;
1334
1335
1336	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1337			BCM8704_USER_OPT_DIGITAL_CTRL);
1338	if (err < 0)
1339		return err;
1340	err &= ~USER_ODIG_CTRL_GPIOS;
1341	err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT);
1342	err |=  USER_ODIG_CTRL_RESV2;
1343	err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1344			 BCM8704_USER_OPT_DIGITAL_CTRL, err);
1345	if (err)
1346		return err;
1347
1348	mdelay(1000);
1349
1350	return 0;
1351}
1352
1353static int bcm8704_init_user_dev3(struct niu *np)
1354{
1355	int err;
1356
1357	err = mdio_write(np, np->phy_addr,
1358			 BCM8704_USER_DEV3_ADDR, BCM8704_USER_CONTROL,
1359			 (USER_CONTROL_OPTXRST_LVL |
1360			  USER_CONTROL_OPBIASFLT_LVL |
1361			  USER_CONTROL_OBTMPFLT_LVL |
1362			  USER_CONTROL_OPPRFLT_LVL |
1363			  USER_CONTROL_OPTXFLT_LVL |
1364			  USER_CONTROL_OPRXLOS_LVL |
1365			  USER_CONTROL_OPRXFLT_LVL |
1366			  USER_CONTROL_OPTXON_LVL |
1367			  (0x3f << USER_CONTROL_RES1_SHIFT)));
1368	if (err)
1369		return err;
1370
1371	err = mdio_write(np, np->phy_addr,
1372			 BCM8704_USER_DEV3_ADDR, BCM8704_USER_PMD_TX_CONTROL,
1373			 (USER_PMD_TX_CTL_XFP_CLKEN |
1374			  (1 << USER_PMD_TX_CTL_TX_DAC_TXD_SH) |
1375			  (2 << USER_PMD_TX_CTL_TX_DAC_TXCK_SH) |
1376			  USER_PMD_TX_CTL_TSCK_LPWREN));
1377	if (err)
1378		return err;
1379
1380	err = bcm8704_user_dev3_readback(np, BCM8704_USER_CONTROL);
1381	if (err)
1382		return err;
1383	err = bcm8704_user_dev3_readback(np, BCM8704_USER_PMD_TX_CONTROL);
1384	if (err)
1385		return err;
1386
1387	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1388			BCM8704_USER_OPT_DIGITAL_CTRL);
1389	if (err < 0)
1390		return err;
1391	err &= ~USER_ODIG_CTRL_GPIOS;
1392	err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT);
1393	err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1394			 BCM8704_USER_OPT_DIGITAL_CTRL, err);
1395	if (err)
1396		return err;
1397
1398	mdelay(1000);
1399
1400	return 0;
1401}
1402
1403static int mrvl88x2011_act_led(struct niu *np, int val)
1404{
1405	int	err;
1406
1407	err  = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
1408		MRVL88X2011_LED_8_TO_11_CTL);
1409	if (err < 0)
1410		return err;
1411
1412	err &= ~MRVL88X2011_LED(MRVL88X2011_LED_ACT,MRVL88X2011_LED_CTL_MASK);
1413	err |=  MRVL88X2011_LED(MRVL88X2011_LED_ACT,val);
1414
1415	return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
1416			  MRVL88X2011_LED_8_TO_11_CTL, err);
1417}
1418
1419static int mrvl88x2011_led_blink_rate(struct niu *np, int rate)
1420{
1421	int	err;
1422
1423	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
1424			MRVL88X2011_LED_BLINK_CTL);
1425	if (err >= 0) {
1426		err &= ~MRVL88X2011_LED_BLKRATE_MASK;
1427		err |= (rate << 4);
1428
1429		err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
1430				 MRVL88X2011_LED_BLINK_CTL, err);
1431	}
1432
1433	return err;
1434}
1435
1436static int xcvr_init_10g_mrvl88x2011(struct niu *np)
1437{
1438	int	err;
1439
1440	/* Set LED functions */
1441	err = mrvl88x2011_led_blink_rate(np, MRVL88X2011_LED_BLKRATE_134MS);
1442	if (err)
1443		return err;
1444
1445	/* led activity */
1446	err = mrvl88x2011_act_led(np, MRVL88X2011_LED_CTL_OFF);
1447	if (err)
1448		return err;
1449
1450	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
1451			MRVL88X2011_GENERAL_CTL);
1452	if (err < 0)
1453		return err;
1454
1455	err |= MRVL88X2011_ENA_XFPREFCLK;
1456
1457	err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
1458			 MRVL88X2011_GENERAL_CTL, err);
1459	if (err < 0)
1460		return err;
1461
1462	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1463			MRVL88X2011_PMA_PMD_CTL_1);
1464	if (err < 0)
1465		return err;
1466
1467	if (np->link_config.loopback_mode == LOOPBACK_MAC)
1468		err |= MRVL88X2011_LOOPBACK;
1469	else
1470		err &= ~MRVL88X2011_LOOPBACK;
1471
1472	err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1473			 MRVL88X2011_PMA_PMD_CTL_1, err);
1474	if (err < 0)
1475		return err;
1476
1477	/* Enable PMD  */
1478	return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1479			  MRVL88X2011_10G_PMD_TX_DIS, MRVL88X2011_ENA_PMDTX);
1480}
1481
1482
1483static int xcvr_diag_bcm870x(struct niu *np)
1484{
1485	u16 analog_stat0, tx_alarm_status;
1486	int err = 0;
1487
1488	err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
1489			MII_STAT1000);
1490	if (err < 0)
1491		return err;
1492	pr_info("Port %u PMA_PMD(MII_STAT1000) [%04x]\n", np->port, err);
1493
1494	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 0x20);
1495	if (err < 0)
1496		return err;
1497	pr_info("Port %u USER_DEV3(0x20) [%04x]\n", np->port, err);
1498
1499	err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
1500			MII_NWAYTEST);
1501	if (err < 0)
1502		return err;
1503	pr_info("Port %u PHYXS(MII_NWAYTEST) [%04x]\n", np->port, err);
1504
1505	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1506			BCM8704_USER_ANALOG_STATUS0);
1507	if (err < 0)
1508		return err;
1509	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1510			BCM8704_USER_ANALOG_STATUS0);
1511	if (err < 0)
1512		return err;
1513	analog_stat0 = err;
1514
1515	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1516			BCM8704_USER_TX_ALARM_STATUS);
1517	if (err < 0)
1518		return err;
1519	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1520			BCM8704_USER_TX_ALARM_STATUS);
1521	if (err < 0)
1522		return err;
1523	tx_alarm_status = err;
1524
1525	if (analog_stat0 != 0x03fc) {
1526		if ((analog_stat0 == 0x43bc) && (tx_alarm_status != 0)) {
1527			pr_info("Port %u cable not connected or bad cable\n",
1528				np->port);
1529		} else if (analog_stat0 == 0x639c) {
1530			pr_info("Port %u optical module is bad or missing\n",
1531				np->port);
1532		}
1533	}
1534
1535	return 0;
1536}
1537
1538static int xcvr_10g_set_lb_bcm870x(struct niu *np)
1539{
1540	struct niu_link_config *lp = &np->link_config;
1541	int err;
1542
1543	err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
1544			MII_BMCR);
1545	if (err < 0)
1546		return err;
1547
1548	err &= ~BMCR_LOOPBACK;
1549
1550	if (lp->loopback_mode == LOOPBACK_MAC)
1551		err |= BMCR_LOOPBACK;
1552
1553	err = mdio_write(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
1554			 MII_BMCR, err);
1555	if (err)
1556		return err;
1557
1558	return 0;
1559}
1560
1561static int xcvr_init_10g_bcm8706(struct niu *np)
1562{
1563	int err = 0;
1564	u64 val;
1565
1566	if ((np->flags & NIU_FLAGS_HOTPLUG_PHY) &&
1567	    (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) == 0)
1568			return err;
1569
1570	val = nr64_mac(XMAC_CONFIG);
1571	val &= ~XMAC_CONFIG_LED_POLARITY;
1572	val |= XMAC_CONFIG_FORCE_LED_ON;
1573	nw64_mac(XMAC_CONFIG, val);
1574
1575	val = nr64(MIF_CONFIG);
1576	val |= MIF_CONFIG_INDIRECT_MODE;
1577	nw64(MIF_CONFIG, val);
1578
1579	err = bcm8704_reset(np);
1580	if (err)
1581		return err;
1582
1583	err = xcvr_10g_set_lb_bcm870x(np);
1584	if (err)
1585		return err;
1586
1587	err = bcm8706_init_user_dev3(np);
1588	if (err)
1589		return err;
1590
1591	err = xcvr_diag_bcm870x(np);
1592	if (err)
1593		return err;
1594
1595	return 0;
1596}
1597
1598static int xcvr_init_10g_bcm8704(struct niu *np)
1599{
1600	int err;
1601
1602	err = bcm8704_reset(np);
1603	if (err)
1604		return err;
1605
1606	err = bcm8704_init_user_dev3(np);
1607	if (err)
1608		return err;
1609
1610	err = xcvr_10g_set_lb_bcm870x(np);
1611	if (err)
1612		return err;
1613
1614	err =  xcvr_diag_bcm870x(np);
1615	if (err)
1616		return err;
1617
1618	return 0;
1619}
1620
1621static int xcvr_init_10g(struct niu *np)
1622{
1623	int phy_id, err;
1624	u64 val;
1625
1626	val = nr64_mac(XMAC_CONFIG);
1627	val &= ~XMAC_CONFIG_LED_POLARITY;
1628	val |= XMAC_CONFIG_FORCE_LED_ON;
1629	nw64_mac(XMAC_CONFIG, val);
1630
1631	val = nr64(MIF_CONFIG);
1632	val |= MIF_CONFIG_INDIRECT_MODE;
1633	nw64(MIF_CONFIG, val);
1634
1635	phy_id = phy_decode(np->parent->port_phy, np->port);
1636	phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port];
1637
1638	/* handle different phy types */
1639	switch (phy_id & NIU_PHY_ID_MASK) {
1640	case NIU_PHY_ID_MRVL88X2011:
1641		err = xcvr_init_10g_mrvl88x2011(np);
1642		break;
1643
1644	default: /* bcom 8704 */
1645		err = xcvr_init_10g_bcm8704(np);
1646		break;
1647	}
1648
1649	return 0;
1650}
1651
1652static int mii_reset(struct niu *np)
1653{
1654	int limit, err;
1655
1656	err = mii_write(np, np->phy_addr, MII_BMCR, BMCR_RESET);
1657	if (err)
1658		return err;
1659
1660	limit = 1000;
1661	while (--limit >= 0) {
1662		udelay(500);
1663		err = mii_read(np, np->phy_addr, MII_BMCR);
1664		if (err < 0)
1665			return err;
1666		if (!(err & BMCR_RESET))
1667			break;
1668	}
1669	if (limit < 0) {
1670		netdev_err(np->dev, "Port %u MII would not reset, bmcr[%04x]\n",
1671			   np->port, err);
1672		return -ENODEV;
1673	}
1674
1675	return 0;
1676}
1677
1678static int xcvr_init_1g_rgmii(struct niu *np)
1679{
1680	int err;
1681	u64 val;
1682	u16 bmcr, bmsr, estat;
1683
1684	val = nr64(MIF_CONFIG);
1685	val &= ~MIF_CONFIG_INDIRECT_MODE;
1686	nw64(MIF_CONFIG, val);
1687
1688	err = mii_reset(np);
1689	if (err)
1690		return err;
1691
1692	err = mii_read(np, np->phy_addr, MII_BMSR);
1693	if (err < 0)
1694		return err;
1695	bmsr = err;
1696
1697	estat = 0;
1698	if (bmsr & BMSR_ESTATEN) {
1699		err = mii_read(np, np->phy_addr, MII_ESTATUS);
1700		if (err < 0)
1701			return err;
1702		estat = err;
1703	}
1704
1705	bmcr = 0;
1706	err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
1707	if (err)
1708		return err;
1709
1710	if (bmsr & BMSR_ESTATEN) {
1711		u16 ctrl1000 = 0;
1712
1713		if (estat & ESTATUS_1000_TFULL)
1714			ctrl1000 |= ADVERTISE_1000FULL;
1715		err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000);
1716		if (err)
1717			return err;
1718	}
1719
1720	bmcr = (BMCR_SPEED1000 | BMCR_FULLDPLX);
1721
1722	err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
1723	if (err)
1724		return err;
1725
1726	err = mii_read(np, np->phy_addr, MII_BMCR);
1727	if (err < 0)
1728		return err;
1729	bmcr = mii_read(np, np->phy_addr, MII_BMCR);
1730
1731	err = mii_read(np, np->phy_addr, MII_BMSR);
1732	if (err < 0)
1733		return err;
1734
1735	return 0;
1736}
1737
1738static int mii_init_common(struct niu *np)
1739{
1740	struct niu_link_config *lp = &np->link_config;
1741	u16 bmcr, bmsr, adv, estat;
1742	int err;
1743
1744	err = mii_reset(np);
1745	if (err)
1746		return err;
1747
1748	err = mii_read(np, np->phy_addr, MII_BMSR);
1749	if (err < 0)
1750		return err;
1751	bmsr = err;
1752
1753	estat = 0;
1754	if (bmsr & BMSR_ESTATEN) {
1755		err = mii_read(np, np->phy_addr, MII_ESTATUS);
1756		if (err < 0)
1757			return err;
1758		estat = err;
1759	}
1760
1761	bmcr = 0;
1762	err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
1763	if (err)
1764		return err;
1765
1766	if (lp->loopback_mode == LOOPBACK_MAC) {
1767		bmcr |= BMCR_LOOPBACK;
1768		if (lp->active_speed == SPEED_1000)
1769			bmcr |= BMCR_SPEED1000;
1770		if (lp->active_duplex == DUPLEX_FULL)
1771			bmcr |= BMCR_FULLDPLX;
1772	}
1773
1774	if (lp->loopback_mode == LOOPBACK_PHY) {
1775		u16 aux;
1776
1777		aux = (BCM5464R_AUX_CTL_EXT_LB |
1778		       BCM5464R_AUX_CTL_WRITE_1);
1779		err = mii_write(np, np->phy_addr, BCM5464R_AUX_CTL, aux);
1780		if (err)
1781			return err;
1782	}
1783
1784	if (lp->autoneg) {
1785		u16 ctrl1000;
1786
1787		adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1788		if ((bmsr & BMSR_10HALF) &&
1789			(lp->advertising & ADVERTISED_10baseT_Half))
1790			adv |= ADVERTISE_10HALF;
1791		if ((bmsr & BMSR_10FULL) &&
1792			(lp->advertising & ADVERTISED_10baseT_Full))
1793			adv |= ADVERTISE_10FULL;
1794		if ((bmsr & BMSR_100HALF) &&
1795			(lp->advertising & ADVERTISED_100baseT_Half))
1796			adv |= ADVERTISE_100HALF;
1797		if ((bmsr & BMSR_100FULL) &&
1798			(lp->advertising & ADVERTISED_100baseT_Full))
1799			adv |= ADVERTISE_100FULL;
1800		err = mii_write(np, np->phy_addr, MII_ADVERTISE, adv);
1801		if (err)
1802			return err;
1803
1804		if (likely(bmsr & BMSR_ESTATEN)) {
1805			ctrl1000 = 0;
1806			if ((estat & ESTATUS_1000_THALF) &&
1807				(lp->advertising & ADVERTISED_1000baseT_Half))
1808				ctrl1000 |= ADVERTISE_1000HALF;
1809			if ((estat & ESTATUS_1000_TFULL) &&
1810				(lp->advertising & ADVERTISED_1000baseT_Full))
1811				ctrl1000 |= ADVERTISE_1000FULL;
1812			err = mii_write(np, np->phy_addr,
1813					MII_CTRL1000, ctrl1000);
1814			if (err)
1815				return err;
1816		}
1817
1818		bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
1819	} else {
1820		/* !lp->autoneg */
1821		int fulldpx;
1822
1823		if (lp->duplex == DUPLEX_FULL) {
1824			bmcr |= BMCR_FULLDPLX;
1825			fulldpx = 1;
1826		} else if (lp->duplex == DUPLEX_HALF)
1827			fulldpx = 0;
1828		else
1829			return -EINVAL;
1830
1831		if (lp->speed == SPEED_1000) {
1832			/* if X-full requested while not supported, or
1833			   X-half requested while not supported... */
1834			if ((fulldpx && !(estat & ESTATUS_1000_TFULL)) ||
1835				(!fulldpx && !(estat & ESTATUS_1000_THALF)))
1836				return -EINVAL;
1837			bmcr |= BMCR_SPEED1000;
1838		} else if (lp->speed == SPEED_100) {
1839			if ((fulldpx && !(bmsr & BMSR_100FULL)) ||
1840				(!fulldpx && !(bmsr & BMSR_100HALF)))
1841				return -EINVAL;
1842			bmcr |= BMCR_SPEED100;
1843		} else if (lp->speed == SPEED_10) {
1844			if ((fulldpx && !(bmsr & BMSR_10FULL)) ||
1845				(!fulldpx && !(bmsr & BMSR_10HALF)))
1846				return -EINVAL;
1847		} else
1848			return -EINVAL;
1849	}
1850
1851	err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
1852	if (err)
1853		return err;
1854
1855
1856	return 0;
1857}
1858
1859static int xcvr_init_1g(struct niu *np)
1860{
1861	u64 val;
1862
1863	val = nr64(MIF_CONFIG);
1864	val &= ~MIF_CONFIG_INDIRECT_MODE;
1865	nw64(MIF_CONFIG, val);
1866
1867	return mii_init_common(np);
1868}
1869
1870static int niu_xcvr_init(struct niu *np)
1871{
1872	const struct niu_phy_ops *ops = np->phy_ops;
1873	int err;
1874
1875	err = 0;
1876	if (ops->xcvr_init)
1877		err = ops->xcvr_init(np);
1878
1879	return err;
1880}
1881
1882static int niu_serdes_init(struct niu *np)
1883{
1884	const struct niu_phy_ops *ops = np->phy_ops;
1885	int err;
1886
1887	err = 0;
1888	if (ops->serdes_init)
1889		err = ops->serdes_init(np);
1890
1891	return err;
1892}
1893
1894static void niu_init_xif(struct niu *);
1895static void niu_handle_led(struct niu *, int status);
1896
1897static int niu_link_status_common(struct niu *np, int link_up)
1898{
1899	struct niu_link_config *lp = &np->link_config;
1900	struct net_device *dev = np->dev;
1901	unsigned long flags;
1902
1903	if (!netif_carrier_ok(dev) && link_up) {
1904		netif_info(np, link, dev, "Link is up at %s, %s duplex\n",
1905			   lp->active_speed == SPEED_10000 ? "10Gb/sec" :
1906			   lp->active_speed == SPEED_1000 ? "1Gb/sec" :
1907			   lp->active_speed == SPEED_100 ? "100Mbit/sec" :
1908			   "10Mbit/sec",
1909			   lp->active_duplex == DUPLEX_FULL ? "full" : "half");
1910
1911		spin_lock_irqsave(&np->lock, flags);
1912		niu_init_xif(np);
1913		niu_handle_led(np, 1);
1914		spin_unlock_irqrestore(&np->lock, flags);
1915
1916		netif_carrier_on(dev);
1917	} else if (netif_carrier_ok(dev) && !link_up) {
1918		netif_warn(np, link, dev, "Link is down\n");
1919		spin_lock_irqsave(&np->lock, flags);
1920		niu_handle_led(np, 0);
1921		spin_unlock_irqrestore(&np->lock, flags);
1922		netif_carrier_off(dev);
1923	}
1924
1925	return 0;
1926}
1927
1928static int link_status_10g_mrvl(struct niu *np, int *link_up_p)
1929{
1930	int err, link_up, pma_status, pcs_status;
1931
1932	link_up = 0;
1933
1934	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1935			MRVL88X2011_10G_PMD_STATUS_2);
1936	if (err < 0)
1937		goto out;
1938
1939	/* Check PMA/PMD Register: 1.0001.2 == 1 */
1940	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1941			MRVL88X2011_PMA_PMD_STATUS_1);
1942	if (err < 0)
1943		goto out;
1944
1945	pma_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0);
1946
1947        /* Check PMC Register : 3.0001.2 == 1: read twice */
1948	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
1949			MRVL88X2011_PMA_PMD_STATUS_1);
1950	if (err < 0)
1951		goto out;
1952
1953	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
1954			MRVL88X2011_PMA_PMD_STATUS_1);
1955	if (err < 0)
1956		goto out;
1957
1958	pcs_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0);
1959
1960        /* Check XGXS Register : 4.0018.[0-3,12] */
1961	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV4_ADDR,
1962			MRVL88X2011_10G_XGXS_LANE_STAT);
1963	if (err < 0)
1964		goto out;
1965
1966	if (err == (PHYXS_XGXS_LANE_STAT_ALINGED | PHYXS_XGXS_LANE_STAT_LANE3 |
1967		    PHYXS_XGXS_LANE_STAT_LANE2 | PHYXS_XGXS_LANE_STAT_LANE1 |
1968		    PHYXS_XGXS_LANE_STAT_LANE0 | PHYXS_XGXS_LANE_STAT_MAGIC |
1969		    0x800))
1970		link_up = (pma_status && pcs_status) ? 1 : 0;
1971
1972	np->link_config.active_speed = SPEED_10000;
1973	np->link_config.active_duplex = DUPLEX_FULL;
1974	err = 0;
1975out:
1976	mrvl88x2011_act_led(np, (link_up ?
1977				 MRVL88X2011_LED_CTL_PCS_ACT :
1978				 MRVL88X2011_LED_CTL_OFF));
1979
1980	*link_up_p = link_up;
1981	return err;
1982}
1983
1984static int link_status_10g_bcm8706(struct niu *np, int *link_up_p)
1985{
1986	int err, link_up;
1987	link_up = 0;
1988
1989	err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
1990			BCM8704_PMD_RCV_SIGDET);
1991	if (err < 0 || err == 0xffff)
1992		goto out;
1993	if (!(err & PMD_RCV_SIGDET_GLOBAL)) {
1994		err = 0;
1995		goto out;
1996	}
1997
1998	err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
1999			BCM8704_PCS_10G_R_STATUS);
2000	if (err < 0)
2001		goto out;
2002
2003	if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) {
2004		err = 0;
2005		goto out;
2006	}
2007
2008	err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
2009			BCM8704_PHYXS_XGXS_LANE_STAT);
2010	if (err < 0)
2011		goto out;
2012	if (err != (PHYXS_XGXS_LANE_STAT_ALINGED |
2013		    PHYXS_XGXS_LANE_STAT_MAGIC |
2014		    PHYXS_XGXS_LANE_STAT_PATTEST |
2015		    PHYXS_XGXS_LANE_STAT_LANE3 |
2016		    PHYXS_XGXS_LANE_STAT_LANE2 |
2017		    PHYXS_XGXS_LANE_STAT_LANE1 |
2018		    PHYXS_XGXS_LANE_STAT_LANE0)) {
2019		err = 0;
2020		np->link_config.active_speed = SPEED_INVALID;
2021		np->link_config.active_duplex = DUPLEX_INVALID;
2022		goto out;
2023	}
2024
2025	link_up = 1;
2026	np->link_config.active_speed = SPEED_10000;
2027	np->link_config.active_duplex = DUPLEX_FULL;
2028	err = 0;
2029
2030out:
2031	*link_up_p = link_up;
2032	return err;
2033}
2034
2035static int link_status_10g_bcom(struct niu *np, int *link_up_p)
2036{
2037	int err, link_up;
2038
2039	link_up = 0;
2040
2041	err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
2042			BCM8704_PMD_RCV_SIGDET);
2043	if (err < 0)
2044		goto out;
2045	if (!(err & PMD_RCV_SIGDET_GLOBAL)) {
2046		err = 0;
2047		goto out;
2048	}
2049
2050	err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
2051			BCM8704_PCS_10G_R_STATUS);
2052	if (err < 0)
2053		goto out;
2054	if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) {
2055		err = 0;
2056		goto out;
2057	}
2058
2059	err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
2060			BCM8704_PHYXS_XGXS_LANE_STAT);
2061	if (err < 0)
2062		goto out;
2063
2064	if (err != (PHYXS_XGXS_LANE_STAT_ALINGED |
2065		    PHYXS_XGXS_LANE_STAT_MAGIC |
2066		    PHYXS_XGXS_LANE_STAT_LANE3 |
2067		    PHYXS_XGXS_LANE_STAT_LANE2 |
2068		    PHYXS_XGXS_LANE_STAT_LANE1 |
2069		    PHYXS_XGXS_LANE_STAT_LANE0)) {
2070		err = 0;
2071		goto out;
2072	}
2073
2074	link_up = 1;
2075	np->link_config.active_speed = SPEED_10000;
2076	np->link_config.active_duplex = DUPLEX_FULL;
2077	err = 0;
2078
2079out:
2080	*link_up_p = link_up;
2081	return err;
2082}
2083
2084static int link_status_10g(struct niu *np, int *link_up_p)
2085{
2086	unsigned long flags;
2087	int err = -EINVAL;
2088
2089	spin_lock_irqsave(&np->lock, flags);
2090
2091	if (np->link_config.loopback_mode == LOOPBACK_DISABLED) {
2092		int phy_id;
2093
2094		phy_id = phy_decode(np->parent->port_phy, np->port);
2095		phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port];
2096
2097		/* handle different phy types */
2098		switch (phy_id & NIU_PHY_ID_MASK) {
2099		case NIU_PHY_ID_MRVL88X2011:
2100			err = link_status_10g_mrvl(np, link_up_p);
2101			break;
2102
2103		default: /* bcom 8704 */
2104			err = link_status_10g_bcom(np, link_up_p);
2105			break;
2106		}
2107	}
2108
2109	spin_unlock_irqrestore(&np->lock, flags);
2110
2111	return err;
2112}
2113
2114static int niu_10g_phy_present(struct niu *np)
2115{
2116	u64 sig, mask, val;
2117
2118	sig = nr64(ESR_INT_SIGNALS);
2119	switch (np->port) {
2120	case 0:
2121		mask = ESR_INT_SIGNALS_P0_BITS;
2122		val = (ESR_INT_SRDY0_P0 |
2123		       ESR_INT_DET0_P0 |
2124		       ESR_INT_XSRDY_P0 |
2125		       ESR_INT_XDP_P0_CH3 |
2126		       ESR_INT_XDP_P0_CH2 |
2127		       ESR_INT_XDP_P0_CH1 |
2128		       ESR_INT_XDP_P0_CH0);
2129		break;
2130
2131	case 1:
2132		mask = ESR_INT_SIGNALS_P1_BITS;
2133		val = (ESR_INT_SRDY0_P1 |
2134		       ESR_INT_DET0_P1 |
2135		       ESR_INT_XSRDY_P1 |
2136		       ESR_INT_XDP_P1_CH3 |
2137		       ESR_INT_XDP_P1_CH2 |
2138		       ESR_INT_XDP_P1_CH1 |
2139		       ESR_INT_XDP_P1_CH0);
2140		break;
2141
2142	default:
2143		return 0;
2144	}
2145
2146	if ((sig & mask) != val)
2147		return 0;
2148	return 1;
2149}
2150
2151static int link_status_10g_hotplug(struct niu *np, int *link_up_p)
2152{
2153	unsigned long flags;
2154	int err = 0;
2155	int phy_present;
2156	int phy_present_prev;
2157
2158	spin_lock_irqsave(&np->lock, flags);
2159
2160	if (np->link_config.loopback_mode == LOOPBACK_DISABLED) {
2161		phy_present_prev = (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) ?
2162			1 : 0;
2163		phy_present = niu_10g_phy_present(np);
2164		if (phy_present != phy_present_prev) {
2165			/* state change */
2166			if (phy_present) {
2167				/* A NEM was just plugged in */
2168				np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT;
2169				if (np->phy_ops->xcvr_init)
2170					err = np->phy_ops->xcvr_init(np);
2171				if (err) {
2172					err = mdio_read(np, np->phy_addr,
2173						BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
2174					if (err == 0xffff) {
2175						/* No mdio, back-to-back XAUI */
2176						goto out;
2177					}
2178					/* debounce */
2179					np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
2180				}
2181			} else {
2182				np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
2183				*link_up_p = 0;
2184				netif_warn(np, link, np->dev,
2185					   "Hotplug PHY Removed\n");
2186			}
2187		}
2188out:
2189		if (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) {
2190			err = link_status_10g_bcm8706(np, link_up_p);
2191			if (err == 0xffff) {
2192				/* No mdio, back-to-back XAUI: it is C10NEM */
2193				*link_up_p = 1;
2194				np->link_config.active_speed = SPEED_10000;
2195				np->link_config.active_duplex = DUPLEX_FULL;
2196			}
2197		}
2198	}
2199
2200	spin_unlock_irqrestore(&np->lock, flags);
2201
2202	return 0;
2203}
2204
2205static int niu_link_status(struct niu *np, int *link_up_p)
2206{
2207	const struct niu_phy_ops *ops = np->phy_ops;
2208	int err;
2209
2210	err = 0;
2211	if (ops->link_status)
2212		err = ops->link_status(np, link_up_p);
2213
2214	return err;
2215}
2216
2217static void niu_timer(unsigned long __opaque)
2218{
2219	struct niu *np = (struct niu *) __opaque;
2220	unsigned long off;
2221	int err, link_up;
2222
2223	err = niu_link_status(np, &link_up);
2224	if (!err)
2225		niu_link_status_common(np, link_up);
2226
2227	if (netif_carrier_ok(np->dev))
2228		off = 5 * HZ;
2229	else
2230		off = 1 * HZ;
2231	np->timer.expires = jiffies + off;
2232
2233	add_timer(&np->timer);
2234}
2235
2236static const struct niu_phy_ops phy_ops_10g_serdes = {
2237	.serdes_init		= serdes_init_10g_serdes,
2238	.link_status		= link_status_10g_serdes,
2239};
2240
2241static const struct niu_phy_ops phy_ops_10g_serdes_niu = {
2242	.serdes_init		= serdes_init_niu_10g_serdes,
2243	.link_status		= link_status_10g_serdes,
2244};
2245
2246static const struct niu_phy_ops phy_ops_1g_serdes_niu = {
2247	.serdes_init		= serdes_init_niu_1g_serdes,
2248	.link_status		= link_status_1g_serdes,
2249};
2250
2251static const struct niu_phy_ops phy_ops_1g_rgmii = {
2252	.xcvr_init		= xcvr_init_1g_rgmii,
2253	.link_status		= link_status_1g_rgmii,
2254};
2255
2256static const struct niu_phy_ops phy_ops_10g_fiber_niu = {
2257	.serdes_init		= serdes_init_niu_10g_fiber,
2258	.xcvr_init		= xcvr_init_10g,
2259	.link_status		= link_status_10g,
2260};
2261
2262static const struct niu_phy_ops phy_ops_10g_fiber = {
2263	.serdes_init		= serdes_init_10g,
2264	.xcvr_init		= xcvr_init_10g,
2265	.link_status		= link_status_10g,
2266};
2267
2268static const struct niu_phy_ops phy_ops_10g_fiber_hotplug = {
2269	.serdes_init		= serdes_init_10g,
2270	.xcvr_init		= xcvr_init_10g_bcm8706,
2271	.link_status		= link_status_10g_hotplug,
2272};
2273
2274static const struct niu_phy_ops phy_ops_niu_10g_hotplug = {
2275	.serdes_init		= serdes_init_niu_10g_fiber,
2276	.xcvr_init		= xcvr_init_10g_bcm8706,
2277	.link_status		= link_status_10g_hotplug,
2278};
2279
2280static const struct niu_phy_ops phy_ops_10g_copper = {
2281	.serdes_init		= serdes_init_10g,
2282	.link_status		= link_status_10g,
2283};
2284
2285static const struct niu_phy_ops phy_ops_1g_fiber = {
2286	.serdes_init		= serdes_init_1g,
2287	.xcvr_init		= xcvr_init_1g,
2288	.link_status		= link_status_1g,
2289};
2290
2291static const struct niu_phy_ops phy_ops_1g_copper = {
2292	.xcvr_init		= xcvr_init_1g,
2293	.link_status		= link_status_1g,
2294};
2295
2296struct niu_phy_template {
2297	const struct niu_phy_ops	*ops;
2298	u32				phy_addr_base;
2299};
2300
2301static const struct niu_phy_template phy_template_niu_10g_fiber = {
2302	.ops		= &phy_ops_10g_fiber_niu,
2303	.phy_addr_base	= 16,
2304};
2305
2306static const struct niu_phy_template phy_template_niu_10g_serdes = {
2307	.ops		= &phy_ops_10g_serdes_niu,
2308	.phy_addr_base	= 0,
2309};
2310
2311static const struct niu_phy_template phy_template_niu_1g_serdes = {
2312	.ops		= &phy_ops_1g_serdes_niu,
2313	.phy_addr_base	= 0,
2314};
2315
2316static const struct niu_phy_template phy_template_10g_fiber = {
2317	.ops		= &phy_ops_10g_fiber,
2318	.phy_addr_base	= 8,
2319};
2320
2321static const struct niu_phy_template phy_template_10g_fiber_hotplug = {
2322	.ops		= &phy_ops_10g_fiber_hotplug,
2323	.phy_addr_base	= 8,
2324};
2325
2326static const struct niu_phy_template phy_template_niu_10g_hotplug = {
2327	.ops		= &phy_ops_niu_10g_hotplug,
2328	.phy_addr_base	= 8,
2329};
2330
2331static const struct niu_phy_template phy_template_10g_copper = {
2332	.ops		= &phy_ops_10g_copper,
2333	.phy_addr_base	= 10,
2334};
2335
2336static const struct niu_phy_template phy_template_1g_fiber = {
2337	.ops		= &phy_ops_1g_fiber,
2338	.phy_addr_base	= 0,
2339};
2340
2341static const struct niu_phy_template phy_template_1g_copper = {
2342	.ops		= &phy_ops_1g_copper,
2343	.phy_addr_base	= 0,
2344};
2345
2346static const struct niu_phy_template phy_template_1g_rgmii = {
2347	.ops		= &phy_ops_1g_rgmii,
2348	.phy_addr_base	= 0,
2349};
2350
2351static const struct niu_phy_template phy_template_10g_serdes = {
2352	.ops		= &phy_ops_10g_serdes,
2353	.phy_addr_base	= 0,
2354};
2355
2356static int niu_atca_port_num[4] = {
2357	0, 0,  11, 10
2358};
2359
2360static int serdes_init_10g_serdes(struct niu *np)
2361{
2362	struct niu_link_config *lp = &np->link_config;
2363	unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i;
2364	u64 ctrl_val, test_cfg_val, sig, mask, val;
2365	u64 reset_val;
2366
2367	switch (np->port) {
2368	case 0:
2369		reset_val =  ENET_SERDES_RESET_0;
2370		ctrl_reg = ENET_SERDES_0_CTRL_CFG;
2371		test_cfg_reg = ENET_SERDES_0_TEST_CFG;
2372		pll_cfg = ENET_SERDES_0_PLL_CFG;
2373		break;
2374	case 1:
2375		reset_val =  ENET_SERDES_RESET_1;
2376		ctrl_reg = ENET_SERDES_1_CTRL_CFG;
2377		test_cfg_reg = ENET_SERDES_1_TEST_CFG;
2378		pll_cfg = ENET_SERDES_1_PLL_CFG;
2379		break;
2380
2381	default:
2382		return -EINVAL;
2383	}
2384	ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
2385		    ENET_SERDES_CTRL_SDET_1 |
2386		    ENET_SERDES_CTRL_SDET_2 |
2387		    ENET_SERDES_CTRL_SDET_3 |
2388		    (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
2389		    (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
2390		    (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
2391		    (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
2392		    (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
2393		    (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
2394		    (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
2395		    (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
2396	test_cfg_val = 0;
2397
2398	if (lp->loopback_mode == LOOPBACK_PHY) {
2399		test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
2400				  ENET_SERDES_TEST_MD_0_SHIFT) |
2401				 (ENET_TEST_MD_PAD_LOOPBACK <<
2402				  ENET_SERDES_TEST_MD_1_SHIFT) |
2403				 (ENET_TEST_MD_PAD_LOOPBACK <<
2404				  ENET_SERDES_TEST_MD_2_SHIFT) |
2405				 (ENET_TEST_MD_PAD_LOOPBACK <<
2406				  ENET_SERDES_TEST_MD_3_SHIFT));
2407	}
2408
2409	esr_reset(np);
2410	nw64(pll_cfg, ENET_SERDES_PLL_FBDIV2);
2411	nw64(ctrl_reg, ctrl_val);
2412	nw64(test_cfg_reg, test_cfg_val);
2413
2414	/* Initialize all 4 lanes of the SERDES.  */
2415	for (i = 0; i < 4; i++) {
2416		u32 rxtx_ctrl, glue0;
2417		int err;
2418
2419		err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
2420		if (err)
2421			return err;
2422		err = esr_read_glue0(np, i, &glue0);
2423		if (err)
2424			return err;
2425
2426		rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
2427		rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
2428			      (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
2429
2430		glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
2431			   ESR_GLUE_CTRL0_THCNT |
2432			   ESR_GLUE_CTRL0_BLTIME);
2433		glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
2434			  (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
2435			  (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
2436			  (BLTIME_300_CYCLES <<
2437			   ESR_GLUE_CTRL0_BLTIME_SHIFT));
2438
2439		err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
2440		if (err)
2441			return err;
2442		err = esr_write_glue0(np, i, glue0);
2443		if (err)
2444			return err;
2445	}
2446
2447
2448	sig = nr64(ESR_INT_SIGNALS);
2449	switch (np->port) {
2450	case 0:
2451		mask = ESR_INT_SIGNALS_P0_BITS;
2452		val = (ESR_INT_SRDY0_P0 |
2453		       ESR_INT_DET0_P0 |
2454		       ESR_INT_XSRDY_P0 |
2455		       ESR_INT_XDP_P0_CH3 |
2456		       ESR_INT_XDP_P0_CH2 |
2457		       ESR_INT_XDP_P0_CH1 |
2458		       ESR_INT_XDP_P0_CH0);
2459		break;
2460
2461	case 1:
2462		mask = ESR_INT_SIGNALS_P1_BITS;
2463		val = (ESR_INT_SRDY0_P1 |
2464		       ESR_INT_DET0_P1 |
2465		       ESR_INT_XSRDY_P1 |
2466		       ESR_INT_XDP_P1_CH3 |
2467		       ESR_INT_XDP_P1_CH2 |
2468		       ESR_INT_XDP_P1_CH1 |
2469		       ESR_INT_XDP_P1_CH0);
2470		break;
2471
2472	default:
2473		return -EINVAL;
2474	}
2475
2476	if ((sig & mask) != val) {
2477		int err;
2478		err = serdes_init_1g_serdes(np);
2479		if (!err) {
2480			np->flags &= ~NIU_FLAGS_10G;
2481			np->mac_xcvr = MAC_XCVR_PCS;
2482		}  else {
2483			netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n",
2484				   np->port);
2485			return -ENODEV;
2486		}
2487	}
2488
2489	return 0;
2490}
2491
2492static int niu_determine_phy_disposition(struct niu *np)
2493{
2494	struct niu_parent *parent = np->parent;
2495	u8 plat_type = parent->plat_type;
2496	const struct niu_phy_template *tp;
2497	u32 phy_addr_off = 0;
2498
2499	if (plat_type == PLAT_TYPE_NIU) {
2500		switch (np->flags &
2501			(NIU_FLAGS_10G |
2502			 NIU_FLAGS_FIBER |
2503			 NIU_FLAGS_XCVR_SERDES)) {
2504		case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
2505			/* 10G Serdes */
2506			tp = &phy_template_niu_10g_serdes;
2507			break;
2508		case NIU_FLAGS_XCVR_SERDES:
2509			/* 1G Serdes */
2510			tp = &phy_template_niu_1g_serdes;
2511			break;
2512		case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
2513			/* 10G Fiber */
2514		default:
2515			if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
2516				tp = &phy_template_niu_10g_hotplug;
2517				if (np->port == 0)
2518					phy_addr_off = 8;
2519				if (np->port == 1)
2520					phy_addr_off = 12;
2521			} else {
2522				tp = &phy_template_niu_10g_fiber;
2523				phy_addr_off += np->port;
2524			}
2525			break;
2526		}
2527	} else {
2528		switch (np->flags &
2529			(NIU_FLAGS_10G |
2530			 NIU_FLAGS_FIBER |
2531			 NIU_FLAGS_XCVR_SERDES)) {
2532		case 0:
2533			/* 1G copper */
2534			tp = &phy_template_1g_copper;
2535			if (plat_type == PLAT_TYPE_VF_P0)
2536				phy_addr_off = 10;
2537			else if (plat_type == PLAT_TYPE_VF_P1)
2538				phy_addr_off = 26;
2539
2540			phy_addr_off += (np->port ^ 0x3);
2541			break;
2542
2543		case NIU_FLAGS_10G:
2544			/* 10G copper */
2545			tp = &phy_template_10g_copper;
2546			break;
2547
2548		case NIU_FLAGS_FIBER:
2549			/* 1G fiber */
2550			tp = &phy_template_1g_fiber;
2551			break;
2552
2553		case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
2554			/* 10G fiber */
2555			tp = &phy_template_10g_fiber;
2556			if (plat_type == PLAT_TYPE_VF_P0 ||
2557			    plat_type == PLAT_TYPE_VF_P1)
2558				phy_addr_off = 8;
2559			phy_addr_off += np->port;
2560			if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
2561				tp = &phy_template_10g_fiber_hotplug;
2562				if (np->port == 0)
2563					phy_addr_off = 8;
2564				if (np->port == 1)
2565					phy_addr_off = 12;
2566			}
2567			break;
2568
2569		case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
2570		case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER:
2571		case NIU_FLAGS_XCVR_SERDES:
2572			switch(np->port) {
2573			case 0:
2574			case 1:
2575				tp = &phy_template_10g_serdes;
2576				break;
2577			case 2:
2578			case 3:
2579				tp = &phy_template_1g_rgmii;
2580				break;
2581			default:
2582				return -EINVAL;
2583				break;
2584			}
2585			phy_addr_off = niu_atca_port_num[np->port];
2586			break;
2587
2588		default:
2589			return -EINVAL;
2590		}
2591	}
2592
2593	np->phy_ops = tp->ops;
2594	np->phy_addr = tp->phy_addr_base + phy_addr_off;
2595
2596	return 0;
2597}
2598
2599static int niu_init_link(struct niu *np)
2600{
2601	struct niu_parent *parent = np->parent;
2602	int err, ignore;
2603
2604	if (parent->plat_type == PLAT_TYPE_NIU) {
2605		err = niu_xcvr_init(np);
2606		if (err)
2607			return err;
2608		msleep(200);
2609	}
2610	err = niu_serdes_init(np);
2611	if (err && !(np->flags & NIU_FLAGS_HOTPLUG_PHY))
2612		return err;
2613	msleep(200);
2614	err = niu_xcvr_init(np);
2615	if (!err || (np->flags & NIU_FLAGS_HOTPLUG_PHY))
2616		niu_link_status(np, &ignore);
2617	return 0;
2618}
2619
2620static void niu_set_primary_mac(struct niu *np, unsigned char *addr)
2621{
2622	u16 reg0 = addr[4] << 8 | addr[5];
2623	u16 reg1 = addr[2] << 8 | addr[3];
2624	u16 reg2 = addr[0] << 8 | addr[1];
2625
2626	if (np->flags & NIU_FLAGS_XMAC) {
2627		nw64_mac(XMAC_ADDR0, reg0);
2628		nw64_mac(XMAC_ADDR1, reg1);
2629		nw64_mac(XMAC_ADDR2, reg2);
2630	} else {
2631		nw64_mac(BMAC_ADDR0, reg0);
2632		nw64_mac(BMAC_ADDR1, reg1);
2633		nw64_mac(BMAC_ADDR2, reg2);
2634	}
2635}
2636
2637static int niu_num_alt_addr(struct niu *np)
2638{
2639	if (np->flags & NIU_FLAGS_XMAC)
2640		return XMAC_NUM_ALT_ADDR;
2641	else
2642		return BMAC_NUM_ALT_ADDR;
2643}
2644
2645static int niu_set_alt_mac(struct niu *np, int index, unsigned char *addr)
2646{
2647	u16 reg0 = addr[4] << 8 | addr[5];
2648	u16 reg1 = addr[2] << 8 | addr[3];
2649	u16 reg2 = addr[0] << 8 | addr[1];
2650
2651	if (index >= niu_num_alt_addr(np))
2652		return -EINVAL;
2653
2654	if (np->flags & NIU_FLAGS_XMAC) {
2655		nw64_mac(XMAC_ALT_ADDR0(index), reg0);
2656		nw64_mac(XMAC_ALT_ADDR1(index), reg1);
2657		nw64_mac(XMAC_ALT_ADDR2(index), reg2);
2658	} else {
2659		nw64_mac(BMAC_ALT_ADDR0(index), reg0);
2660		nw64_mac(BMAC_ALT_ADDR1(index), reg1);
2661		nw64_mac(BMAC_ALT_ADDR2(index), reg2);
2662	}
2663
2664	return 0;
2665}
2666
2667static int niu_enable_alt_mac(struct niu *np, int index, int on)
2668{
2669	unsigned long reg;
2670	u64 val, mask;
2671
2672	if (index >= niu_num_alt_addr(np))
2673		return -EINVAL;
2674
2675	if (np->flags & NIU_FLAGS_XMAC) {
2676		reg = XMAC_ADDR_CMPEN;
2677		mask = 1 << index;
2678	} else {
2679		reg = BMAC_ADDR_CMPEN;
2680		mask = 1 << (index + 1);
2681	}
2682
2683	val = nr64_mac(reg);
2684	if (on)
2685		val |= mask;
2686	else
2687		val &= ~mask;
2688	nw64_mac(reg, val);
2689
2690	return 0;
2691}
2692
2693static void __set_rdc_table_num_hw(struct niu *np, unsigned long reg,
2694				   int num, int mac_pref)
2695{
2696	u64 val = nr64_mac(reg);
2697	val &= ~(HOST_INFO_MACRDCTBLN | HOST_INFO_MPR);
2698	val |= num;
2699	if (mac_pref)
2700		val |= HOST_INFO_MPR;
2701	nw64_mac(reg, val);
2702}
2703
2704static int __set_rdc_table_num(struct niu *np,
2705			       int xmac_index, int bmac_index,
2706			       int rdc_table_num, int mac_pref)
2707{
2708	unsigned long reg;
2709
2710	if (rdc_table_num & ~HOST_INFO_MACRDCTBLN)
2711		return -EINVAL;
2712	if (np->flags & NIU_FLAGS_XMAC)
2713		reg = XMAC_HOST_INFO(xmac_index);
2714	else
2715		reg = BMAC_HOST_INFO(bmac_index);
2716	__set_rdc_table_num_hw(np, reg, rdc_table_num, mac_pref);
2717	return 0;
2718}
2719
2720static int niu_set_primary_mac_rdc_table(struct niu *np, int table_num,
2721					 int mac_pref)
2722{
2723	return __set_rdc_table_num(np, 17, 0, table_num, mac_pref);
2724}
2725
2726static int niu_set_multicast_mac_rdc_table(struct niu *np, int table_num,
2727					   int mac_pref)
2728{
2729	return __set_rdc_table_num(np, 16, 8, table_num, mac_pref);
2730}
2731
2732static int niu_set_alt_mac_rdc_table(struct niu *np, int idx,
2733				     int table_num, int mac_pref)
2734{
2735	if (idx >= niu_num_alt_addr(np))
2736		return -EINVAL;
2737	return __set_rdc_table_num(np, idx, idx + 1, table_num, mac_pref);
2738}
2739
2740static u64 vlan_entry_set_parity(u64 reg_val)
2741{
2742	u64 port01_mask;
2743	u64 port23_mask;
2744
2745	port01_mask = 0x00ff;
2746	port23_mask = 0xff00;
2747
2748	if (hweight64(reg_val & port01_mask) & 1)
2749		reg_val |= ENET_VLAN_TBL_PARITY0;
2750	else
2751		reg_val &= ~ENET_VLAN_TBL_PARITY0;
2752
2753	if (hweight64(reg_val & port23_mask) & 1)
2754		reg_val |= ENET_VLAN_TBL_PARITY1;
2755	else
2756		reg_val &= ~ENET_VLAN_TBL_PARITY1;
2757
2758	return reg_val;
2759}
2760
2761static void vlan_tbl_write(struct niu *np, unsigned long index,
2762			   int port, int vpr, int rdc_table)
2763{
2764	u64 reg_val = nr64(ENET_VLAN_TBL(index));
2765
2766	reg_val &= ~((ENET_VLAN_TBL_VPR |
2767		      ENET_VLAN_TBL_VLANRDCTBLN) <<
2768		     ENET_VLAN_TBL_SHIFT(port));
2769	if (vpr)
2770		reg_val |= (ENET_VLAN_TBL_VPR <<
2771			    ENET_VLAN_TBL_SHIFT(port));
2772	reg_val |= (rdc_table << ENET_VLAN_TBL_SHIFT(port));
2773
2774	reg_val = vlan_entry_set_parity(reg_val);
2775
2776	nw64(ENET_VLAN_TBL(index), reg_val);
2777}
2778
2779static void vlan_tbl_clear(struct niu *np)
2780{
2781	int i;
2782
2783	for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++)
2784		nw64(ENET_VLAN_TBL(i), 0);
2785}
2786
2787static int tcam_wait_bit(struct niu *np, u64 bit)
2788{
2789	int limit = 1000;
2790
2791	while (--limit > 0) {
2792		if (nr64(TCAM_CTL) & bit)
2793			break;
2794		udelay(1);
2795	}
2796	if (limit <= 0)
2797		return -ENODEV;
2798
2799	return 0;
2800}
2801
2802static int tcam_flush(struct niu *np, int index)
2803{
2804	nw64(TCAM_KEY_0, 0x00);
2805	nw64(TCAM_KEY_MASK_0, 0xff);
2806	nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index));
2807
2808	return tcam_wait_bit(np, TCAM_CTL_STAT);
2809}
2810
2811
2812static int tcam_write(struct niu *np, int index,
2813		      u64 *key, u64 *mask)
2814{
2815	nw64(TCAM_KEY_0, key[0]);
2816	nw64(TCAM_KEY_1, key[1]);
2817	nw64(TCAM_KEY_2, key[2]);
2818	nw64(TCAM_KEY_3, key[3]);
2819	nw64(TCAM_KEY_MASK_0, mask[0]);
2820	nw64(TCAM_KEY_MASK_1, mask[1]);
2821	nw64(TCAM_KEY_MASK_2, mask[2]);
2822	nw64(TCAM_KEY_MASK_3, mask[3]);
2823	nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index));
2824
2825	return tcam_wait_bit(np, TCAM_CTL_STAT);
2826}
2827
2828
2829static int tcam_assoc_write(struct niu *np, int index, u64 assoc_data)
2830{
2831	nw64(TCAM_KEY_1, assoc_data);
2832	nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_WRITE | index));
2833
2834	return tcam_wait_bit(np, TCAM_CTL_STAT);
2835}
2836
2837static void tcam_enable(struct niu *np, int on)
2838{
2839	u64 val = nr64(FFLP_CFG_1);
2840
2841	if (on)
2842		val &= ~FFLP_CFG_1_TCAM_DIS;
2843	else
2844		val |= FFLP_CFG_1_TCAM_DIS;
2845	nw64(FFLP_CFG_1, val);
2846}
2847
2848static void tcam_set_lat_and_ratio(struct niu *np, u64 latency, u64 ratio)
2849{
2850	u64 val = nr64(FFLP_CFG_1);
2851
2852	val &= ~(FFLP_CFG_1_FFLPINITDONE |
2853		 FFLP_CFG_1_CAMLAT |
2854		 FFLP_CFG_1_CAMRATIO);
2855	val |= (latency << FFLP_CFG_1_CAMLAT_SHIFT);
2856	val |= (ratio << FFLP_CFG_1_CAMRATIO_SHIFT);
2857	nw64(FFLP_CFG_1, val);
2858
2859	val = nr64(FFLP_CFG_1);
2860	val |= FFLP_CFG_1_FFLPINITDONE;
2861	nw64(FFLP_CFG_1, val);
2862}
2863
2864static int tcam_user_eth_class_enable(struct niu *np, unsigned long class,
2865				      int on)
2866{
2867	unsigned long reg;
2868	u64 val;
2869
2870	if (class < CLASS_CODE_ETHERTYPE1 ||
2871	    class > CLASS_CODE_ETHERTYPE2)
2872		return -EINVAL;
2873
2874	reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1);
2875	val = nr64(reg);
2876	if (on)
2877		val |= L2_CLS_VLD;
2878	else
2879		val &= ~L2_CLS_VLD;
2880	nw64(reg, val);
2881
2882	return 0;
2883}
2884
2885
2886static int tcam_user_ip_class_enable(struct niu *np, unsigned long class,
2887				     int on)
2888{
2889	unsigned long reg;
2890	u64 val;
2891
2892	if (class < CLASS_CODE_USER_PROG1 ||
2893	    class > CLASS_CODE_USER_PROG4)
2894		return -EINVAL;
2895
2896	reg = L3_CLS(class - CLASS_CODE_USER_PROG1);
2897	val = nr64(reg);
2898	if (on)
2899		val |= L3_CLS_VALID;
2900	else
2901		val &= ~L3_CLS_VALID;
2902	nw64(reg, val);
2903
2904	return 0;
2905}
2906
2907static int tcam_user_ip_class_set(struct niu *np, unsigned long class,
2908				  int ipv6, u64 protocol_id,
2909				  u64 tos_mask, u64 tos_val)
2910{
2911	unsigned long reg;
2912	u64 val;
2913
2914	if (class < CLASS_CODE_USER_PROG1 ||
2915	    class > CLASS_CODE_USER_PROG4 ||
2916	    (protocol_id & ~(u64)0xff) != 0 ||
2917	    (tos_mask & ~(u64)0xff) != 0 ||
2918	    (tos_val & ~(u64)0xff) != 0)
2919		return -EINVAL;
2920
2921	reg = L3_CLS(class - CLASS_CODE_USER_PROG1);
2922	val = nr64(reg);
2923	val &= ~(L3_CLS_IPVER | L3_CLS_PID |
2924		 L3_CLS_TOSMASK | L3_CLS_TOS);
2925	if (ipv6)
2926		val |= L3_CLS_IPVER;
2927	val |= (protocol_id << L3_CLS_PID_SHIFT);
2928	val |= (tos_mask << L3_CLS_TOSMASK_SHIFT);
2929	val |= (tos_val << L3_CLS_TOS_SHIFT);
2930	nw64(reg, val);
2931
2932	return 0;
2933}
2934
2935static int tcam_early_init(struct niu *np)
2936{
2937	unsigned long i;
2938	int err;
2939
2940	tcam_enable(np, 0);
2941	tcam_set_lat_and_ratio(np,
2942			       DEFAULT_TCAM_LATENCY,
2943			       DEFAULT_TCAM_ACCESS_RATIO);
2944	for (i = CLASS_CODE_ETHERTYPE1; i <= CLASS_CODE_ETHERTYPE2; i++) {
2945		err = tcam_user_eth_class_enable(np, i, 0);
2946		if (err)
2947			return err;
2948	}
2949	for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_USER_PROG4; i++) {
2950		err = tcam_user_ip_class_enable(np, i, 0);
2951		if (err)
2952			return err;
2953	}
2954
2955	return 0;
2956}
2957
2958static int tcam_flush_all(struct niu *np)
2959{
2960	unsigned long i;
2961
2962	for (i = 0; i < np->parent->tcam_num_entries; i++) {
2963		int err = tcam_flush(np, i);
2964		if (err)
2965			return err;
2966	}
2967	return 0;
2968}
2969
2970static u64 hash_addr_regval(unsigned long index, unsigned long num_entries)
2971{
2972	return ((u64)index | (num_entries == 1 ?
2973			      HASH_TBL_ADDR_AUTOINC : 0));
2974}
2975
2976
2977static int hash_write(struct niu *np, unsigned long partition,
2978		      unsigned long index, unsigned long num_entries,
2979		      u64 *data)
2980{
2981	u64 val = hash_addr_regval(index, num_entries);
2982	unsigned long i;
2983
2984	if (partition >= FCRAM_NUM_PARTITIONS ||
2985	    index + (num_entries * 8) > FCRAM_SIZE)
2986		return -EINVAL;
2987
2988	nw64(HASH_TBL_ADDR(partition), val);
2989	for (i = 0; i < num_entries; i++)
2990		nw64(HASH_TBL_DATA(partition), data[i]);
2991
2992	return 0;
2993}
2994
2995static void fflp_reset(struct niu *np)
2996{
2997	u64 val;
2998
2999	nw64(FFLP_CFG_1, FFLP_CFG_1_PIO_FIO_RST);
3000	udelay(10);
3001	nw64(FFLP_CFG_1, 0);
3002
3003	val = FFLP_CFG_1_FCRAMOUTDR_NORMAL | FFLP_CFG_1_FFLPINITDONE;
3004	nw64(FFLP_CFG_1, val);
3005}
3006
3007static void fflp_set_timings(struct niu *np)
3008{
3009	u64 val = nr64(FFLP_CFG_1);
3010
3011	val &= ~FFLP_CFG_1_FFLPINITDONE;
3012	val |= (DEFAULT_FCRAMRATIO << FFLP_CFG_1_FCRAMRATIO_SHIFT);
3013	nw64(FFLP_CFG_1, val);
3014
3015	val = nr64(FFLP_CFG_1);
3016	val |= FFLP_CFG_1_FFLPINITDONE;
3017	nw64(FFLP_CFG_1, val);
3018
3019	val = nr64(FCRAM_REF_TMR);
3020	val &= ~(FCRAM_REF_TMR_MAX | FCRAM_REF_TMR_MIN);
3021	val |= (DEFAULT_FCRAM_REFRESH_MAX << FCRAM_REF_TMR_MAX_SHIFT);
3022	val |= (DEFAULT_FCRAM_REFRESH_MIN << FCRAM_REF_TMR_MIN_SHIFT);
3023	nw64(FCRAM_REF_TMR, val);
3024}
3025
3026static int fflp_set_partition(struct niu *np, u64 partition,
3027			      u64 mask, u64 base, int enable)
3028{
3029	unsigned long reg;
3030	u64 val;
3031
3032	if (partition >= FCRAM_NUM_PARTITIONS ||
3033	    (mask & ~(u64)0x1f) != 0 ||
3034	    (base & ~(u64)0x1f) != 0)
3035		return -EINVAL;
3036
3037	reg = FLW_PRT_SEL(partition);
3038
3039	val = nr64(reg);
3040	val &= ~(FLW_PRT_SEL_EXT | FLW_PRT_SEL_MASK | FLW_PRT_SEL_BASE);
3041	val |= (mask << FLW_PRT_SEL_MASK_SHIFT);
3042	val |= (base << FLW_PRT_SEL_BASE_SHIFT);
3043	if (enable)
3044		val |= FLW_PRT_SEL_EXT;
3045	nw64(reg, val);
3046
3047	return 0;
3048}
3049
3050static int fflp_disable_all_partitions(struct niu *np)
3051{
3052	unsigned long i;
3053
3054	for (i = 0; i < FCRAM_NUM_PARTITIONS; i++) {
3055		int err = fflp_set_partition(np, 0, 0, 0, 0);
3056		if (err)
3057			return err;
3058	}
3059	return 0;
3060}
3061
3062static void fflp_llcsnap_enable(struct niu *np, int on)
3063{
3064	u64 val = nr64(FFLP_CFG_1);
3065
3066	if (on)
3067		val |= FFLP_CFG_1_LLCSNAP;
3068	else
3069		val &= ~FFLP_CFG_1_LLCSNAP;
3070	nw64(FFLP_CFG_1, val);
3071}
3072
3073static void fflp_errors_enable(struct niu *np, int on)
3074{
3075	u64 val = nr64(FFLP_CFG_1);
3076
3077	if (on)
3078		val &= ~FFLP_CFG_1_ERRORDIS;
3079	else
3080		val |= FFLP_CFG_1_ERRORDIS;
3081	nw64(FFLP_CFG_1, val);
3082}
3083
3084static int fflp_hash_clear(struct niu *np)
3085{
3086	struct fcram_hash_ipv4 ent;
3087	unsigned long i;
3088
3089	/* IPV4 hash entry with valid bit clear, rest is don't care.  */
3090	memset(&ent, 0, sizeof(ent));
3091	ent.header = HASH_HEADER_EXT;
3092
3093	for (i = 0; i < FCRAM_SIZE; i += sizeof(ent)) {
3094		int err = hash_write(np, 0, i, 1, (u64 *) &ent);
3095		if (err)
3096			return err;
3097	}
3098	return 0;
3099}
3100
3101static int fflp_early_init(struct niu *np)
3102{
3103	struct niu_parent *parent;
3104	unsigned long flags;
3105	int err;
3106
3107	niu_lock_parent(np, flags);
3108
3109	parent = np->parent;
3110	err = 0;
3111	if (!(parent->flags & PARENT_FLGS_CLS_HWINIT)) {
3112		if (np->parent->plat_type != PLAT_TYPE_NIU) {
3113			fflp_reset(np);
3114			fflp_set_timings(np);
3115			err = fflp_disable_all_partitions(np);
3116			if (err) {
3117				netif_printk(np, probe, KERN_DEBUG, np->dev,
3118					     "fflp_disable_all_partitions failed, err=%d\n",
3119					     err);
3120				goto out;
3121			}
3122		}
3123
3124		err = tcam_early_init(np);
3125		if (err) {
3126			netif_printk(np, probe, KERN_DEBUG, np->dev,
3127				     "tcam_early_init failed, err=%d\n", err);
3128			goto out;
3129		}
3130		fflp_llcsnap_enable(np, 1);
3131		fflp_errors_enable(np, 0);
3132		nw64(H1POLY, 0);
3133		nw64(H2POLY, 0);
3134
3135		err = tcam_flush_all(np);
3136		if (err) {
3137			netif_printk(np, probe, KERN_DEBUG, np->dev,
3138				     "tcam_flush_all failed, err=%d\n", err);
3139			goto out;
3140		}
3141		if (np->parent->plat_type != PLAT_TYPE_NIU) {
3142			err = fflp_hash_clear(np);
3143			if (err) {
3144				netif_printk(np, probe, KERN_DEBUG, np->dev,
3145					     "fflp_hash_clear failed, err=%d\n",
3146					     err);
3147				goto out;
3148			}
3149		}
3150
3151		vlan_tbl_clear(np);
3152
3153		parent->flags |= PARENT_FLGS_CLS_HWINIT;
3154	}
3155out:
3156	niu_unlock_parent(np, flags);
3157	return err;
3158}
3159
3160static int niu_set_flow_key(struct niu *np, unsigned long class_code, u64 key)
3161{
3162	if (class_code < CLASS_CODE_USER_PROG1 ||
3163	    class_code > CLASS_CODE_SCTP_IPV6)
3164		return -EINVAL;
3165
3166	nw64(FLOW_KEY(class_code - CLASS_CODE_USER_PROG1), key);
3167	return 0;
3168}
3169
3170static int niu_set_tcam_key(struct niu *np, unsigned long class_code, u64 key)
3171{
3172	if (class_code < CLASS_CODE_USER_PROG1 ||
3173	    class_code > CLASS_CODE_SCTP_IPV6)
3174		return -EINVAL;
3175
3176	nw64(TCAM_KEY(class_code - CLASS_CODE_USER_PROG1), key);
3177	return 0;
3178}
3179
3180/* Entries for the ports are interleaved in the TCAM */
3181static u16 tcam_get_index(struct niu *np, u16 idx)
3182{
3183	/* One entry reserved for IP fragment rule */
3184	if (idx >= (np->clas.tcam_sz - 1))
3185		idx = 0;
3186	return (np->clas.tcam_top + ((idx+1) * np->parent->num_ports));
3187}
3188
3189static u16 tcam_get_size(struct niu *np)
3190{
3191	/* One entry reserved for IP fragment rule */
3192	return np->clas.tcam_sz - 1;
3193}
3194
3195static u16 tcam_get_valid_entry_cnt(struct niu *np)
3196{
3197	/* One entry reserved for IP fragment rule */
3198	return np->clas.tcam_valid_entries - 1;
3199}
3200
3201static void niu_rx_skb_append(struct sk_buff *skb, struct page *page,
3202			      u32 offset, u32 size)
3203{
3204	int i = skb_shinfo(skb)->nr_frags;
3205	skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3206
3207	frag->page = page;
3208	frag->page_offset = offset;
3209	frag->size = size;
3210
3211	skb->len += size;
3212	skb->data_len += size;
3213	skb->truesize += size;
3214
3215	skb_shinfo(skb)->nr_frags = i + 1;
3216}
3217
3218static unsigned int niu_hash_rxaddr(struct rx_ring_info *rp, u64 a)
3219{
3220	a >>= PAGE_SHIFT;
3221	a ^= (a >> ilog2(MAX_RBR_RING_SIZE));
3222
3223	return (a & (MAX_RBR_RING_SIZE - 1));
3224}
3225
3226static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr,
3227				    struct page ***link)
3228{
3229	unsigned int h = niu_hash_rxaddr(rp, addr);
3230	struct page *p, **pp;
3231
3232	addr &= PAGE_MASK;
3233	pp = &rp->rxhash[h];
3234	for (; (p = *pp) != NULL; pp = (struct page **) &p->mapping) {
3235		if (p->index == addr) {
3236			*link = pp;
3237			goto found;
3238		}
3239	}
3240	BUG();
3241
3242found:
3243	return p;
3244}
3245
3246static void niu_hash_page(struct rx_ring_info *rp, struct page *page, u64 base)
3247{
3248	unsigned int h = niu_hash_rxaddr(rp, base);
3249
3250	page->index = base;
3251	page->mapping = (struct address_space *) rp->rxhash[h];
3252	rp->rxhash[h] = page;
3253}
3254
3255static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp,
3256			    gfp_t mask, int start_index)
3257{
3258	struct page *page;
3259	u64 addr;
3260	int i;
3261
3262	page = alloc_page(mask);
3263	if (!page)
3264		return -ENOMEM;
3265
3266	addr = np->ops->map_page(np->device, page, 0,
3267				 PAGE_SIZE, DMA_FROM_DEVICE);
3268
3269	niu_hash_page(rp, page, addr);
3270	if (rp->rbr_blocks_per_page > 1)
3271		atomic_add(rp->rbr_blocks_per_page - 1,
3272			   &compound_head(page)->_count);
3273
3274	for (i = 0; i < rp->rbr_blocks_per_page; i++) {
3275		__le32 *rbr = &rp->rbr[start_index + i];
3276
3277		*rbr = cpu_to_le32(addr >> RBR_DESCR_ADDR_SHIFT);
3278		addr += rp->rbr_block_size;
3279	}
3280
3281	return 0;
3282}
3283
3284static void niu_rbr_refill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
3285{
3286	int index = rp->rbr_index;
3287
3288	rp->rbr_pending++;
3289	if ((rp->rbr_pending % rp->rbr_blocks_per_page) == 0) {
3290		int err = niu_rbr_add_page(np, rp, mask, index);
3291
3292		if (unlikely(err)) {
3293			rp->rbr_pending--;
3294			return;
3295		}
3296
3297		rp->rbr_index += rp->rbr_blocks_per_page;
3298		BUG_ON(rp->rbr_index > rp->rbr_table_size);
3299		if (rp->rbr_index == rp->rbr_table_size)
3300			rp->rbr_index = 0;
3301
3302		if (rp->rbr_pending >= rp->rbr_kick_thresh) {
3303			nw64(RBR_KICK(rp->rx_channel), rp->rbr_pending);
3304			rp->rbr_pending = 0;
3305		}
3306	}
3307}
3308
3309static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp)
3310{
3311	unsigned int index = rp->rcr_index;
3312	int num_rcr = 0;
3313
3314	rp->rx_dropped++;
3315	while (1) {
3316		struct page *page, **link;
3317		u64 addr, val;
3318		u32 rcr_size;
3319
3320		num_rcr++;
3321
3322		val = le64_to_cpup(&rp->rcr[index]);
3323		addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
3324			RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
3325		page = niu_find_rxpage(rp, addr, &link);
3326
3327		rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
3328					 RCR_ENTRY_PKTBUFSZ_SHIFT];
3329		if ((page->index + PAGE_SIZE) - rcr_size == addr) {
3330			*link = (struct page *) page->mapping;
3331			np->ops->unmap_page(np->device, page->index,
3332					    PAGE_SIZE, DMA_FROM_DEVICE);
3333			page->index = 0;
3334			page->mapping = NULL;
3335			__free_page(page);
3336			rp->rbr_refill_pending++;
3337		}
3338
3339		index = NEXT_RCR(rp, index);
3340		if (!(val & RCR_ENTRY_MULTI))
3341			break;
3342
3343	}
3344	rp->rcr_index = index;
3345
3346	return num_rcr;
3347}
3348
3349static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
3350			      struct rx_ring_info *rp)
3351{
3352	unsigned int index = rp->rcr_index;
3353	struct rx_pkt_hdr1 *rh;
3354	struct sk_buff *skb;
3355	int len, num_rcr;
3356
3357	skb = netdev_alloc_skb(np->dev, RX_SKB_ALLOC_SIZE);
3358	if (unlikely(!skb))
3359		return niu_rx_pkt_ignore(np, rp);
3360
3361	num_rcr = 0;
3362	while (1) {
3363		struct page *page, **link;
3364		u32 rcr_size, append_size;
3365		u64 addr, val, off;
3366
3367		num_rcr++;
3368
3369		val = le64_to_cpup(&rp->rcr[index]);
3370
3371		len = (val & RCR_ENTRY_L2_LEN) >>
3372			RCR_ENTRY_L2_LEN_SHIFT;
3373		len -= ETH_FCS_LEN;
3374
3375		addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
3376			RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
3377		page = niu_find_rxpage(rp, addr, &link);
3378
3379		rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
3380					 RCR_ENTRY_PKTBUFSZ_SHIFT];
3381
3382		off = addr & ~PAGE_MASK;
3383		append_size = rcr_size;
3384		if (num_rcr == 1) {
3385			int ptype;
3386
3387			ptype = (val >> RCR_ENTRY_PKT_TYPE_SHIFT);
3388			if ((ptype == RCR_PKT_TYPE_TCP ||
3389			     ptype == RCR_PKT_TYPE_UDP) &&
3390			    !(val & (RCR_ENTRY_NOPORT |
3391				     RCR_ENTRY_ERROR)))
3392				skb->ip_summed = CHECKSUM_UNNECESSARY;
3393			else
3394				skb->ip_summed = CHECKSUM_NONE;
3395		} else if (!(val & RCR_ENTRY_MULTI))
3396			append_size = len - skb->len;
3397
3398		niu_rx_skb_append(skb, page, off, append_size);
3399		if ((page->index + rp->rbr_block_size) - rcr_size == addr) {
3400			*link = (struct page *) page->mapping;
3401			np->ops->unmap_page(np->device, page->index,
3402					    PAGE_SIZE, DMA_FROM_DEVICE);
3403			page->index = 0;
3404			page->mapping = NULL;
3405			rp->rbr_refill_pending++;
3406		} else
3407			get_page(page);
3408
3409		index = NEXT_RCR(rp, index);
3410		if (!(val & RCR_ENTRY_MULTI))
3411			break;
3412
3413	}
3414	rp->rcr_index = index;
3415
3416	len += sizeof(*rh);
3417	len = min_t(int, len, sizeof(*rh) + VLAN_ETH_HLEN);
3418	__pskb_pull_tail(skb, len);
3419
3420	rh = (struct rx_pkt_hdr1 *) skb->data;
3421	if (np->dev->features & NETIF_F_RXHASH)
3422		skb->rxhash = ((u32)rh->hashval2_0 << 24 |
3423			       (u32)rh->hashval2_1 << 16 |
3424			       (u32)rh->hashval1_1 << 8 |
3425			       (u32)rh->hashval1_2 << 0);
3426	skb_pull(skb, sizeof(*rh));
3427
3428	rp->rx_packets++;
3429	rp->rx_bytes += skb->len;
3430
3431	skb->protocol = eth_type_trans(skb, np->dev);
3432	skb_record_rx_queue(skb, rp->rx_channel);
3433	napi_gro_receive(napi, skb);
3434
3435	return num_rcr;
3436}
3437
3438static int niu_rbr_fill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
3439{
3440	int blocks_per_page = rp->rbr_blocks_per_page;
3441	int err, index = rp->rbr_index;
3442
3443	err = 0;
3444	while (index < (rp->rbr_table_size - blocks_per_page)) {
3445		err = niu_rbr_add_page(np, rp, mask, index);
3446		if (err)
3447			break;
3448
3449		index += blocks_per_page;
3450	}
3451
3452	rp->rbr_index = index;
3453	return err;
3454}
3455
3456static void niu_rbr_free(struct niu *np, struct rx_ring_info *rp)
3457{
3458	int i;
3459
3460	for (i = 0; i < MAX_RBR_RING_SIZE; i++) {
3461		struct page *page;
3462
3463		page = rp->rxhash[i];
3464		while (page) {
3465			struct page *next = (struct page *) page->mapping;
3466			u64 base = page->index;
3467
3468			np->ops->unmap_page(np->device, base, PAGE_SIZE,
3469					    DMA_FROM_DEVICE);
3470			page->index = 0;
3471			page->mapping = NULL;
3472
3473			__free_page(page);
3474
3475			page = next;
3476		}
3477	}
3478
3479	for (i = 0; i < rp->rbr_table_size; i++)
3480		rp->rbr[i] = cpu_to_le32(0);
3481	rp->rbr_index = 0;
3482}
3483
3484static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx)
3485{
3486	struct tx_buff_info *tb = &rp->tx_buffs[idx];
3487	struct sk_buff *skb = tb->skb;
3488	struct tx_pkt_hdr *tp;
3489	u64 tx_flags;
3490	int i, len;
3491
3492	tp = (struct tx_pkt_hdr *) skb->data;
3493	tx_flags = le64_to_cpup(&tp->flags);
3494
3495	rp->tx_packets++;
3496	rp->tx_bytes += (((tx_flags & TXHDR_LEN) >> TXHDR_LEN_SHIFT) -
3497			 ((tx_flags & TXHDR_PAD) / 2));
3498
3499	len = skb_headlen(skb);
3500	np->ops->unmap_single(np->device, tb->mapping,
3501			      len, DMA_TO_DEVICE);
3502
3503	if (le64_to_cpu(rp->descr[idx]) & TX_DESC_MARK)
3504		rp->mark_pending--;
3505
3506	tb->skb = NULL;
3507	do {
3508		idx = NEXT_TX(rp, idx);
3509		len -= MAX_TX_DESC_LEN;
3510	} while (len > 0);
3511
3512	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3513		tb = &rp->tx_buffs[idx];
3514		BUG_ON(tb->skb != NULL);
3515		np->ops->unmap_page(np->device, tb->mapping,
3516				    skb_shinfo(skb)->frags[i].size,
3517				    DMA_TO_DEVICE);
3518		idx = NEXT_TX(rp, idx);
3519	}
3520
3521	dev_kfree_skb(skb);
3522
3523	return idx;
3524}
3525
3526#define NIU_TX_WAKEUP_THRESH(rp)		((rp)->pending / 4)
3527
3528static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
3529{
3530	struct netdev_queue *txq;
3531	u16 pkt_cnt, tmp;
3532	int cons, index;
3533	u64 cs;
3534
3535	index = (rp - np->tx_rings);
3536	txq = netdev_get_tx_queue(np->dev, index);
3537
3538	cs = rp->tx_cs;
3539	if (unlikely(!(cs & (TX_CS_MK | TX_CS_MMK))))
3540		goto out;
3541
3542	tmp = pkt_cnt = (cs & TX_CS_PKT_CNT) >> TX_CS_PKT_CNT_SHIFT;
3543	pkt_cnt = (pkt_cnt - rp->last_pkt_cnt) &
3544		(TX_CS_PKT_CNT >> TX_CS_PKT_CNT_SHIFT);
3545
3546	rp->last_pkt_cnt = tmp;
3547
3548	cons = rp->cons;
3549
3550	netif_printk(np, tx_done, KERN_DEBUG, np->dev,
3551		     "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons);
3552
3553	while (pkt_cnt--)
3554		cons = release_tx_packet(np, rp, cons);
3555
3556	rp->cons = cons;
3557	smp_mb();
3558
3559out:
3560	if (unlikely(netif_tx_queue_stopped(txq) &&
3561		     (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) {
3562		__netif_tx_lock(txq, smp_processor_id());
3563		if (netif_tx_queue_stopped(txq) &&
3564		    (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))
3565			netif_tx_wake_queue(txq);
3566		__netif_tx_unlock(txq);
3567	}
3568}
3569
3570static inline void niu_sync_rx_discard_stats(struct niu *np,
3571					     struct rx_ring_info *rp,
3572					     const int limit)
3573{
3574	/* This elaborate scheme is needed for reading the RX discard
3575	 * counters, as they are only 16-bit and can overflow quickly,
3576	 * and because the overflow indication bit is not usable as
3577	 * the counter value does not wrap, but remains at max value
3578	 * 0xFFFF.
3579	 *
3580	 * In theory and in practice counters can be lost in between
3581	 * reading nr64() and clearing the counter nw64().  For this
3582	 * reason, the number of counter clearings nw64() is
3583	 * limited/reduced though the limit parameter.
3584	 */
3585	int rx_channel = rp->rx_channel;
3586	u32 misc, wred;
3587
3588	/* RXMISC (Receive Miscellaneous Discard Count), covers the
3589	 * following discard events: IPP (Input Port Process),
3590	 * FFLP/TCAM, Full RCR (Receive Completion Ring) RBR (Receive
3591	 * Block Ring) prefetch buffer is empty.
3592	 */
3593	misc = nr64(RXMISC(rx_channel));
3594	if (unlikely((misc & RXMISC_COUNT) > limit)) {
3595		nw64(RXMISC(rx_channel), 0);
3596		rp->rx_errors += misc & RXMISC_COUNT;
3597
3598		if (unlikely(misc & RXMISC_OFLOW))
3599			dev_err(np->device, "rx-%d: Counter overflow RXMISC discard\n",
3600				rx_channel);
3601
3602		netif_printk(np, rx_err, KERN_DEBUG, np->dev,
3603			     "rx-%d: MISC drop=%u over=%u\n",
3604			     rx_channel, misc, misc-limit);
3605	}
3606
3607	/* WRED (Weighted Random Early Discard) by hardware */
3608	wred = nr64(RED_DIS_CNT(rx_channel));
3609	if (unlikely((wred & RED_DIS_CNT_COUNT) > limit)) {
3610		nw64(RED_DIS_CNT(rx_channel), 0);
3611		rp->rx_dropped += wred & RED_DIS_CNT_COUNT;
3612
3613		if (unlikely(wred & RED_DIS_CNT_OFLOW))
3614			dev_err(np->device, "rx-%d: Counter overflow WRED discard\n", rx_channel);
3615
3616		netif_printk(np, rx_err, KERN_DEBUG, np->dev,
3617			     "rx-%d: WRED drop=%u over=%u\n",
3618			     rx_channel, wred, wred-limit);
3619	}
3620}
3621
3622static int niu_rx_work(struct napi_struct *napi, struct niu *np,
3623		       struct rx_ring_info *rp, int budget)
3624{
3625	int qlen, rcr_done = 0, work_done = 0;
3626	struct rxdma_mailbox *mbox = rp->mbox;
3627	u64 stat;
3628
3629	stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel));
3630	qlen = nr64(RCRSTAT_A(rp->rx_channel)) & RCRSTAT_A_QLEN;
3631	mbox->rx_dma_ctl_stat = 0;
3632	mbox->rcrstat_a = 0;
3633
3634	netif_printk(np, rx_status, KERN_DEBUG, np->dev,
3635		     "%s(chan[%d]), stat[%llx] qlen=%d\n",
3636		     __func__, rp->rx_channel, (unsigned long long)stat, qlen);
3637
3638	rcr_done = work_done = 0;
3639	qlen = min(qlen, budget);
3640	while (work_done < qlen) {
3641		rcr_done += niu_process_rx_pkt(napi, np, rp);
3642		work_done++;
3643	}
3644
3645	if (rp->rbr_refill_pending >= rp->rbr_kick_thresh) {
3646		unsigned int i;
3647
3648		for (i = 0; i < rp->rbr_refill_pending; i++)
3649			niu_rbr_refill(np, rp, GFP_ATOMIC);
3650		rp->rbr_refill_pending = 0;
3651	}
3652
3653	stat = (RX_DMA_CTL_STAT_MEX |
3654		((u64)work_done << RX_DMA_CTL_STAT_PKTREAD_SHIFT) |
3655		((u64)rcr_done << RX_DMA_CTL_STAT_PTRREAD_SHIFT));
3656
3657	nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat);
3658
3659	/* Only sync discards stats when qlen indicate potential for drops */
3660	if (qlen > 10)
3661		niu_sync_rx_discard_stats(np, rp, 0x7FFF);
3662
3663	return work_done;
3664}
3665
3666static int niu_poll_core(struct niu *np, struct niu_ldg *lp, int budget)
3667{
3668	u64 v0 = lp->v0;
3669	u32 tx_vec = (v0 >> 32);
3670	u32 rx_vec = (v0 & 0xffffffff);
3671	int i, work_done = 0;
3672
3673	netif_printk(np, intr, KERN_DEBUG, np->dev,
3674		     "%s() v0[%016llx]\n", __func__, (unsigned long long)v0);
3675
3676	for (i = 0; i < np->num_tx_rings; i++) {
3677		struct tx_ring_info *rp = &np->tx_rings[i];
3678		if (tx_vec & (1 << rp->tx_channel))
3679			niu_tx_work(np, rp);
3680		nw64(LD_IM0(LDN_TXDMA(rp->tx_channel)), 0);
3681	}
3682
3683	for (i = 0; i < np->num_rx_rings; i++) {
3684		struct rx_ring_info *rp = &np->rx_rings[i];
3685
3686		if (rx_vec & (1 << rp->rx_channel)) {
3687			int this_work_done;
3688
3689			this_work_done = niu_rx_work(&lp->napi, np, rp,
3690						     budget);
3691
3692			budget -= this_work_done;
3693			work_done += this_work_done;
3694		}
3695		nw64(LD_IM0(LDN_RXDMA(rp->rx_channel)), 0);
3696	}
3697
3698	return work_done;
3699}
3700
3701static int niu_poll(struct napi_struct *napi, int budget)
3702{
3703	struct niu_ldg *lp = container_of(napi, struct niu_ldg, napi);
3704	struct niu *np = lp->np;
3705	int work_done;
3706
3707	work_done = niu_poll_core(np, lp, budget);
3708
3709	if (work_done < budget) {
3710		napi_complete(napi);
3711		niu_ldg_rearm(np, lp, 1);
3712	}
3713	return work_done;
3714}
3715
3716static void niu_log_rxchan_errors(struct niu *np, struct rx_ring_info *rp,
3717				  u64 stat)
3718{
3719	netdev_err(np->dev, "RX channel %u errors ( ", rp->rx_channel);
3720
3721	if (stat & RX_DMA_CTL_STAT_RBR_TMOUT)
3722		pr_cont("RBR_TMOUT ");
3723	if (stat & RX_DMA_CTL_STAT_RSP_CNT_ERR)
3724		pr_cont("RSP_CNT ");
3725	if (stat & RX_DMA_CTL_STAT_BYTE_EN_BUS)
3726		pr_cont("BYTE_EN_BUS ");
3727	if (stat & RX_DMA_CTL_STAT_RSP_DAT_ERR)
3728		pr_cont("RSP_DAT ");
3729	if (stat & RX_DMA_CTL_STAT_RCR_ACK_ERR)
3730		pr_cont("RCR_ACK ");
3731	if (stat & RX_DMA_CTL_STAT_RCR_SHA_PAR)
3732		pr_cont("RCR_SHA_PAR ");
3733	if (stat & RX_DMA_CTL_STAT_RBR_PRE_PAR)
3734		pr_cont("RBR_PRE_PAR ");
3735	if (stat & RX_DMA_CTL_STAT_CONFIG_ERR)
3736		pr_cont("CONFIG ");
3737	if (stat & RX_DMA_CTL_STAT_RCRINCON)
3738		pr_cont("RCRINCON ");
3739	if (stat & RX_DMA_CTL_STAT_RCRFULL)
3740		pr_cont("RCRFULL ");
3741	if (stat & RX_DMA_CTL_STAT_RBRFULL)
3742		pr_cont("RBRFULL ");
3743	if (stat & RX_DMA_CTL_STAT_RBRLOGPAGE)
3744		pr_cont("RBRLOGPAGE ");
3745	if (stat & RX_DMA_CTL_STAT_CFIGLOGPAGE)
3746		pr_cont("CFIGLOGPAGE ");
3747	if (stat & RX_DMA_CTL_STAT_DC_FIFO_ERR)
3748		pr_cont("DC_FIDO ");
3749
3750	pr_cont(")\n");
3751}
3752
3753static int niu_rx_error(struct niu *np, struct rx_ring_info *rp)
3754{
3755	u64 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel));
3756	int err = 0;
3757
3758
3759	if (stat & (RX_DMA_CTL_STAT_CHAN_FATAL |
3760		    RX_DMA_CTL_STAT_PORT_FATAL))
3761		err = -EINVAL;
3762
3763	if (err) {
3764		netdev_err(np->dev, "RX channel %u error, stat[%llx]\n",
3765			   rp->rx_channel,
3766			   (unsigned long long) stat);
3767
3768		niu_log_rxchan_errors(np, rp, stat);
3769	}
3770
3771	nw64(RX_DMA_CTL_STAT(rp->rx_channel),
3772	     stat & RX_DMA_CTL_WRITE_CLEAR_ERRS);
3773
3774	return err;
3775}
3776
3777static void niu_log_txchan_errors(struct niu *np, struct tx_ring_info *rp,
3778				  u64 cs)
3779{
3780	netdev_err(np->dev, "TX channel %u errors ( ", rp->tx_channel);
3781
3782	if (cs & TX_CS_MBOX_ERR)
3783		pr_cont("MBOX ");
3784	if (cs & TX_CS_PKT_SIZE_ERR)
3785		pr_cont("PKT_SIZE ");
3786	if (cs & TX_CS_TX_RING_OFLOW)
3787		pr_cont("TX_RING_OFLOW ");
3788	if (cs & TX_CS_PREF_BUF_PAR_ERR)
3789		pr_cont("PREF_BUF_PAR ");
3790	if (cs & TX_CS_NACK_PREF)
3791		pr_cont("NACK_PREF ");
3792	if (cs & TX_CS_NACK_PKT_RD)
3793		pr_cont("NACK_PKT_RD ");
3794	if (cs & TX_CS_CONF_PART_ERR)
3795		pr_cont("CONF_PART ");
3796	if (cs & TX_CS_PKT_PRT_ERR)
3797		pr_cont("PKT_PTR ");
3798
3799	pr_cont(")\n");
3800}
3801
3802static int niu_tx_error(struct niu *np, struct tx_ring_info *rp)
3803{
3804	u64 cs, logh, logl;
3805
3806	cs = nr64(TX_CS(rp->tx_channel));
3807	logh = nr64(TX_RNG_ERR_LOGH(rp->tx_channel));
3808	logl = nr64(TX_RNG_ERR_LOGL(rp->tx_channel));
3809
3810	netdev_err(np->dev, "TX channel %u error, cs[%llx] logh[%llx] logl[%llx]\n",
3811		   rp->tx_channel,
3812		   (unsigned long long)cs,
3813		   (unsigned long long)logh,
3814		   (unsigned long long)logl);
3815
3816	niu_log_txchan_errors(np, rp, cs);
3817
3818	return -ENODEV;
3819}
3820
3821static int niu_mif_interrupt(struct niu *np)
3822{
3823	u64 mif_status = nr64(MIF_STATUS);
3824	int phy_mdint = 0;
3825
3826	if (np->flags & NIU_FLAGS_XMAC) {
3827		u64 xrxmac_stat = nr64_mac(XRXMAC_STATUS);
3828
3829		if (xrxmac_stat & XRXMAC_STATUS_PHY_MDINT)
3830			phy_mdint = 1;
3831	}
3832
3833	netdev_err(np->dev, "MIF interrupt, stat[%llx] phy_mdint(%d)\n",
3834		   (unsigned long long)mif_status, phy_mdint);
3835
3836	return -ENODEV;
3837}
3838
3839static void niu_xmac_interrupt(struct niu *np)
3840{
3841	struct niu_xmac_stats *mp = &np->mac_stats.xmac;
3842	u64 val;
3843
3844	val = nr64_mac(XTXMAC_STATUS);
3845	if (val & XTXMAC_STATUS_FRAME_CNT_EXP)
3846		mp->tx_frames += TXMAC_FRM_CNT_COUNT;
3847	if (val & XTXMAC_STATUS_BYTE_CNT_EXP)
3848		mp->tx_bytes += TXMAC_BYTE_CNT_COUNT;
3849	if (val & XTXMAC_STATUS_TXFIFO_XFR_ERR)
3850		mp->tx_fifo_errors++;
3851	if (val & XTXMAC_STATUS_TXMAC_OFLOW)
3852		mp->tx_overflow_errors++;
3853	if (val & XTXMAC_STATUS_MAX_PSIZE_ERR)
3854		mp->tx_max_pkt_size_errors++;
3855	if (val & XTXMAC_STATUS_TXMAC_UFLOW)
3856		mp->tx_underflow_errors++;
3857
3858	val = nr64_mac(XRXMAC_STATUS);
3859	if (val & XRXMAC_STATUS_LCL_FLT_STATUS)
3860		mp->rx_local_faults++;
3861	if (val & XRXMAC_STATUS_RFLT_DET)
3862		mp->rx_remote_faults++;
3863	if (val & XRXMAC_STATUS_LFLT_CNT_EXP)
3864		mp->rx_link_faults += LINK_FAULT_CNT_COUNT;
3865	if (val & XRXMAC_STATUS_ALIGNERR_CNT_EXP)
3866		mp->rx_align_errors += RXMAC_ALIGN_ERR_CNT_COUNT;
3867	if (val & XRXMAC_STATUS_RXFRAG_CNT_EXP)
3868		mp->rx_frags += RXMAC_FRAG_CNT_COUNT;
3869	if (val & XRXMAC_STATUS_RXMULTF_CNT_EXP)
3870		mp->rx_mcasts += RXMAC_MC_FRM_CNT_COUNT;
3871	if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
3872		mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
3873	if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
3874		mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
3875	if (val & XRXMAC_STATUS_RXHIST1_CNT_EXP)
3876		mp->rx_hist_cnt1 += RXMAC_HIST_CNT1_COUNT;
3877	if (val & XRXMAC_STATUS_RXHIST2_CNT_EXP)
3878		mp->rx_hist_cnt2 += RXMAC_HIST_CNT2_COUNT;
3879	if (val & XRXMAC_STATUS_RXHIST3_CNT_EXP)
3880		mp->rx_hist_cnt3 += RXMAC_HIST_CNT3_COUNT;
3881	if (val & XRXMAC_STATUS_RXHIST4_CNT_EXP)
3882		mp->rx_hist_cnt4 += RXMAC_HIST_CNT4_COUNT;
3883	if (val & XRXMAC_STATUS_RXHIST5_CNT_EXP)
3884		mp->rx_hist_cnt5 += RXMAC_HIST_CNT5_COUNT;
3885	if (val & XRXMAC_STATUS_RXHIST6_CNT_EXP)
3886		mp->rx_hist_cnt6 += RXMAC_HIST_CNT6_COUNT;
3887	if (val & XRXMAC_STATUS_RXHIST7_CNT_EXP)
3888		mp->rx_hist_cnt7 += RXMAC_HIST_CNT7_COUNT;
3889	if (val & XRXMAC_STATUS_RXOCTET_CNT_EXP)
3890		mp->rx_octets += RXMAC_BT_CNT_COUNT;
3891	if (val & XRXMAC_STATUS_CVIOLERR_CNT_EXP)
3892		mp->rx_code_violations += RXMAC_CD_VIO_CNT_COUNT;
3893	if (val & XRXMAC_STATUS_LENERR_CNT_EXP)
3894		mp->rx_len_errors += RXMAC_MPSZER_CNT_COUNT;
3895	if (val & XRXMAC_STATUS_CRCERR_CNT_EXP)
3896		mp->rx_crc_errors += RXMAC_CRC_ER_CNT_COUNT;
3897	if (val & XRXMAC_STATUS_RXUFLOW)
3898		mp->rx_underflows++;
3899	if (val & XRXMAC_STATUS_RXOFLOW)
3900		mp->rx_overflows++;
3901
3902	val = nr64_mac(XMAC_FC_STAT);
3903	if (val & XMAC_FC_STAT_TX_MAC_NPAUSE)
3904		mp->pause_off_state++;
3905	if (val & XMAC_FC_STAT_TX_MAC_PAUSE)
3906		mp->pause_on_state++;
3907	if (val & XMAC_FC_STAT_RX_MAC_RPAUSE)
3908		mp->pause_received++;
3909}
3910
3911static void niu_bmac_interrupt(struct niu *np)
3912{
3913	struct niu_bmac_stats *mp = &np->mac_stats.bmac;
3914	u64 val;
3915
3916	val = nr64_mac(BTXMAC_STATUS);
3917	if (val & BTXMAC_STATUS_UNDERRUN)
3918		mp->tx_underflow_errors++;
3919	if (val & BTXMAC_STATUS_MAX_PKT_ERR)
3920		mp->tx_max_pkt_size_errors++;
3921	if (val & BTXMAC_STATUS_BYTE_CNT_EXP)
3922		mp->tx_bytes += BTXMAC_BYTE_CNT_COUNT;
3923	if (val & BTXMAC_STATUS_FRAME_CNT_EXP)
3924		mp->tx_frames += BTXMAC_FRM_CNT_COUNT;
3925
3926	val = nr64_mac(BRXMAC_STATUS);
3927	if (val & BRXMAC_STATUS_OVERFLOW)
3928		mp->rx_overflows++;
3929	if (val & BRXMAC_STATUS_FRAME_CNT_EXP)
3930		mp->rx_frames += BRXMAC_FRAME_CNT_COUNT;
3931	if (val & BRXMAC_STATUS_ALIGN_ERR_EXP)
3932		mp->rx_align_errors += BRXMAC_ALIGN_ERR_CNT_COUNT;
3933	if (val & BRXMAC_STATUS_CRC_ERR_EXP)
3934		mp->rx_crc_errors += BRXMAC_ALIGN_ERR_CNT_COUNT;
3935	if (val & BRXMAC_STATUS_LEN_ERR_EXP)
3936		mp->rx_len_errors += BRXMAC_CODE_VIOL_ERR_CNT_COUNT;
3937
3938	val = nr64_mac(BMAC_CTRL_STATUS);
3939	if (val & BMAC_CTRL_STATUS_NOPAUSE)
3940		mp->pause_off_state++;
3941	if (val & BMAC_CTRL_STATUS_PAUSE)
3942		mp->pause_on_state++;
3943	if (val & BMAC_CTRL_STATUS_PAUSE_RECV)
3944		mp->pause_received++;
3945}
3946
3947static int niu_mac_interrupt(struct niu *np)
3948{
3949	if (np->flags & NIU_FLAGS_XMAC)
3950		niu_xmac_interrupt(np);
3951	else
3952		niu_bmac_interrupt(np);
3953
3954	return 0;
3955}
3956
3957static void niu_log_device_error(struct niu *np, u64 stat)
3958{
3959	netdev_err(np->dev, "Core device errors ( ");
3960
3961	if (stat & SYS_ERR_MASK_META2)
3962		pr_cont("META2 ");
3963	if (stat & SYS_ERR_MASK_META1)
3964		pr_cont("META1 ");
3965	if (stat & SYS_ERR_MASK_PEU)
3966		pr_cont("PEU ");
3967	if (stat & SYS_ERR_MASK_TXC)
3968		pr_cont("TXC ");
3969	if (stat & SYS_ERR_MASK_RDMC)
3970		pr_cont("RDMC ");
3971	if (stat & SYS_ERR_MASK_TDMC)
3972		pr_cont("TDMC ");
3973	if (stat & SYS_ERR_MASK_ZCP)
3974		pr_cont("ZCP ");
3975	if (stat & SYS_ERR_MASK_FFLP)
3976		pr_cont("FFLP ");
3977	if (stat & SYS_ERR_MASK_IPP)
3978		pr_cont("IPP ");
3979	if (stat & SYS_ERR_MASK_MAC)
3980		pr_cont("MAC ");
3981	if (stat & SYS_ERR_MASK_SMX)
3982		pr_cont("SMX ");
3983
3984	pr_cont(")\n");
3985}
3986
3987static int niu_device_error(struct niu *np)
3988{
3989	u64 stat = nr64(SYS_ERR_STAT);
3990
3991	netdev_err(np->dev, "Core device error, stat[%llx]\n",
3992		   (unsigned long long)stat);
3993
3994	niu_log_device_error(np, stat);
3995
3996	return -ENODEV;
3997}
3998
3999static int niu_slowpath_interrupt(struct niu *np, struct niu_ldg *lp,
4000			      u64 v0, u64 v1, u64 v2)
4001{
4002
4003	int i, err = 0;
4004
4005	lp->v0 = v0;
4006	lp->v1 = v1;
4007	lp->v2 = v2;
4008
4009	if (v1 & 0x00000000ffffffffULL) {
4010		u32 rx_vec = (v1 & 0xffffffff);
4011
4012		for (i = 0; i < np->num_rx_rings; i++) {
4013			struct rx_ring_info *rp = &np->rx_rings[i];
4014
4015			if (rx_vec & (1 << rp->rx_channel)) {
4016				int r = niu_rx_error(np, rp);
4017				if (r) {
4018					err = r;
4019				} else {
4020					if (!v0)
4021						nw64(RX_DMA_CTL_STAT(rp->rx_channel),
4022						     RX_DMA_CTL_STAT_MEX);
4023				}
4024			}
4025		}
4026	}
4027	if (v1 & 0x7fffffff00000000ULL) {
4028		u32 tx_vec = (v1 >> 32) & 0x7fffffff;
4029
4030		for (i = 0; i < np->num_tx_rings; i++) {
4031			struct tx_ring_info *rp = &np->tx_rings[i];
4032
4033			if (tx_vec & (1 << rp->tx_channel)) {
4034				int r = niu_tx_error(np, rp);
4035				if (r)
4036					err = r;
4037			}
4038		}
4039	}
4040	if ((v0 | v1) & 0x8000000000000000ULL) {
4041		int r = niu_mif_interrupt(np);
4042		if (r)
4043			err = r;
4044	}
4045	if (v2) {
4046		if (v2 & 0x01ef) {
4047			int r = niu_mac_interrupt(np);
4048			if (r)
4049				err = r;
4050		}
4051		if (v2 & 0x0210) {
4052			int r = niu_device_error(np);
4053			if (r)
4054				err = r;
4055		}
4056	}
4057
4058	if (err)
4059		niu_enable_interrupts(np, 0);
4060
4061	return err;
4062}
4063
4064static void niu_rxchan_intr(struct niu *np, struct rx_ring_info *rp,
4065			    int ldn)
4066{
4067	struct rxdma_mailbox *mbox = rp->mbox;
4068	u64 stat_write, stat = le64_to_cpup(&mbox->rx_dma_ctl_stat);
4069
4070	stat_write = (RX_DMA_CTL_STAT_RCRTHRES |
4071		      RX_DMA_CTL_STAT_RCRTO);
4072	nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat_write);
4073
4074	netif_printk(np, intr, KERN_DEBUG, np->dev,
4075		     "%s() stat[%llx]\n", __func__, (unsigned long long)stat);
4076}
4077
4078static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp,
4079			    int ldn)
4080{
4081	rp->tx_cs = nr64(TX_CS(rp->tx_channel));
4082
4083	netif_printk(np, intr, KERN_DEBUG, np->dev,
4084		     "%s() cs[%llx]\n", __func__, (unsigned long long)rp->tx_cs);
4085}
4086
4087static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0)
4088{
4089	struct niu_parent *parent = np->parent;
4090	u32 rx_vec, tx_vec;
4091	int i;
4092
4093	tx_vec = (v0 >> 32);
4094	rx_vec = (v0 & 0xffffffff);
4095
4096	for (i = 0; i < np->num_rx_rings; i++) {
4097		struct rx_ring_info *rp = &np->rx_rings[i];
4098		int ldn = LDN_RXDMA(rp->rx_channel);
4099
4100		if (parent->ldg_map[ldn] != ldg)
4101			continue;
4102
4103		nw64(LD_IM0(ldn), LD_IM0_MASK);
4104		if (rx_vec & (1 << rp->rx_channel))
4105			niu_rxchan_intr(np, rp, ldn);
4106	}
4107
4108	for (i = 0; i < np->num_tx_rings; i++) {
4109		struct tx_ring_info *rp = &np->tx_rings[i];
4110		int ldn = LDN_TXDMA(rp->tx_channel);
4111
4112		if (parent->ldg_map[ldn] != ldg)
4113			continue;
4114
4115		nw64(LD_IM0(ldn), LD_IM0_MASK);
4116		if (tx_vec & (1 << rp->tx_channel))
4117			niu_txchan_intr(np, rp, ldn);
4118	}
4119}
4120
4121static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp,
4122			      u64 v0, u64 v1, u64 v2)
4123{
4124	if (likely(napi_schedule_prep(&lp->napi))) {
4125		lp->v0 = v0;
4126		lp->v1 = v1;
4127		lp->v2 = v2;
4128		__niu_fastpath_interrupt(np, lp->ldg_num, v0);
4129		__napi_schedule(&lp->napi);
4130	}
4131}
4132
4133static irqreturn_t niu_interrupt(int irq, void *dev_id)
4134{
4135	struct niu_ldg *lp = dev_id;
4136	struct niu *np = lp->np;
4137	int ldg = lp->ldg_num;
4138	unsigned long flags;
4139	u64 v0, v1, v2;
4140
4141	if (netif_msg_intr(np))
4142		printk(KERN_DEBUG KBUILD_MODNAME ": " "%s() ldg[%p](%d)",
4143		       __func__, lp, ldg);
4144
4145	spin_lock_irqsave(&np->lock, flags);
4146
4147	v0 = nr64(LDSV0(ldg));
4148	v1 = nr64(LDSV1(ldg));
4149	v2 = nr64(LDSV2(ldg));
4150
4151	if (netif_msg_intr(np))
4152		pr_cont(" v0[%llx] v1[%llx] v2[%llx]\n",
4153		       (unsigned long long) v0,
4154		       (unsigned long long) v1,
4155		       (unsigned long long) v2);
4156
4157	if (unlikely(!v0 && !v1 && !v2)) {
4158		spin_unlock_irqrestore(&np->lock, flags);
4159		return IRQ_NONE;
4160	}
4161
4162	if (unlikely((v0 & ((u64)1 << LDN_MIF)) || v1 || v2)) {
4163		int err = niu_slowpath_interrupt(np, lp, v0, v1, v2);
4164		if (err)
4165			goto out;
4166	}
4167	if (likely(v0 & ~((u64)1 << LDN_MIF)))
4168		niu_schedule_napi(np, lp, v0, v1, v2);
4169	else
4170		niu_ldg_rearm(np, lp, 1);
4171out:
4172	spin_unlock_irqrestore(&np->lock, flags);
4173
4174	return IRQ_HANDLED;
4175}
4176
4177static void niu_free_rx_ring_info(struct niu *np, struct rx_ring_info *rp)
4178{
4179	if (rp->mbox) {
4180		np->ops->free_coherent(np->device,
4181				       sizeof(struct rxdma_mailbox),
4182				       rp->mbox, rp->mbox_dma);
4183		rp->mbox = NULL;
4184	}
4185	if (rp->rcr) {
4186		np->ops->free_coherent(np->device,
4187				       MAX_RCR_RING_SIZE * sizeof(__le64),
4188				       rp->rcr, rp->rcr_dma);
4189		rp->rcr = NULL;
4190		rp->rcr_table_size = 0;
4191		rp->rcr_index = 0;
4192	}
4193	if (rp->rbr) {
4194		niu_rbr_free(np, rp);
4195
4196		np->ops->free_coherent(np->device,
4197				       MAX_RBR_RING_SIZE * sizeof(__le32),
4198				       rp->rbr, rp->rbr_dma);
4199		rp->rbr = NULL;
4200		rp->rbr_table_size = 0;
4201		rp->rbr_index = 0;
4202	}
4203	kfree(rp->rxhash);
4204	rp->rxhash = NULL;
4205}
4206
4207static void niu_free_tx_ring_info(struct niu *np, struct tx_ring_info *rp)
4208{
4209	if (rp->mbox) {
4210		np->ops->free_coherent(np->device,
4211				       sizeof(struct txdma_mailbox),
4212				       rp->mbox, rp->mbox_dma);
4213		rp->mbox = NULL;
4214	}
4215	if (rp->descr) {
4216		int i;
4217
4218		for (i = 0; i < MAX_TX_RING_SIZE; i++) {
4219			if (rp->tx_buffs[i].skb)
4220				(void) release_tx_packet(np, rp, i);
4221		}
4222
4223		np->ops->free_coherent(np->device,
4224				       MAX_TX_RING_SIZE * sizeof(__le64),
4225				       rp->descr, rp->descr_dma);
4226		rp->descr = NULL;
4227		rp->pending = 0;
4228		rp->prod = 0;
4229		rp->cons = 0;
4230		rp->wrap_bit = 0;
4231	}
4232}
4233
4234static void niu_free_channels(struct niu *np)
4235{
4236	int i;
4237
4238	if (np->rx_rings) {
4239		for (i = 0; i < np->num_rx_rings; i++) {
4240			struct rx_ring_info *rp = &np->rx_rings[i];
4241
4242			niu_free_rx_ring_info(np, rp);
4243		}
4244		kfree(np->rx_rings);
4245		np->rx_rings = NULL;
4246		np->num_rx_rings = 0;
4247	}
4248
4249	if (np->tx_rings) {
4250		for (i = 0; i < np->num_tx_rings; i++) {
4251			struct tx_ring_info *rp = &np->tx_rings[i];
4252
4253			niu_free_tx_ring_info(np, rp);
4254		}
4255		kfree(np->tx_rings);
4256		np->tx_rings = NULL;
4257		np->num_tx_rings = 0;
4258	}
4259}
4260
4261static int niu_alloc_rx_ring_info(struct niu *np,
4262				  struct rx_ring_info *rp)
4263{
4264	BUILD_BUG_ON(sizeof(struct rxdma_mailbox) != 64);
4265
4266	rp->rxhash = kzalloc(MAX_RBR_RING_SIZE * sizeof(struct page *),
4267			     GFP_KERNEL);
4268	if (!rp->rxhash)
4269		return -ENOMEM;
4270
4271	rp->mbox = np->ops->alloc_coherent(np->device,
4272					   sizeof(struct rxdma_mailbox),
4273					   &rp->mbox_dma, GFP_KERNEL);
4274	if (!rp->mbox)
4275		return -ENOMEM;
4276	if ((unsigned long)rp->mbox & (64UL - 1)) {
4277		netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA mailbox %p\n",
4278			   rp->mbox);
4279		return -EINVAL;
4280	}
4281
4282	rp->rcr = np->ops->alloc_coherent(np->device,
4283					  MAX_RCR_RING_SIZE * sizeof(__le64),
4284					  &rp->rcr_dma, GFP_KERNEL);
4285	if (!rp->rcr)
4286		return -ENOMEM;
4287	if ((unsigned long)rp->rcr & (64UL - 1)) {
4288		netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RCR table %p\n",
4289			   rp->rcr);
4290		return -EINVAL;
4291	}
4292	rp->rcr_table_size = MAX_RCR_RING_SIZE;
4293	rp->rcr_index = 0;
4294
4295	rp->rbr = np->ops->alloc_coherent(np->device,
4296					  MAX_RBR_RING_SIZE * sizeof(__le32),
4297					  &rp->rbr_dma, GFP_KERNEL);
4298	if (!rp->rbr)
4299		return -ENOMEM;
4300	if ((unsigned long)rp->rbr & (64UL - 1)) {
4301		netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RBR table %p\n",
4302			   rp->rbr);
4303		return -EINVAL;
4304	}
4305	rp->rbr_table_size = MAX_RBR_RING_SIZE;
4306	rp->rbr_index = 0;
4307	rp->rbr_pending = 0;
4308
4309	return 0;
4310}
4311
4312static void niu_set_max_burst(struct niu *np, struct tx_ring_info *rp)
4313{
4314	int mtu = np->dev->mtu;
4315
4316	/* These values are recommended by the HW designers for fair
4317	 * utilization of DRR amongst the rings.
4318	 */
4319	rp->max_burst = mtu + 32;
4320	if (rp->max_burst > 4096)
4321		rp->max_burst = 4096;
4322}
4323
4324static int niu_alloc_tx_ring_info(struct niu *np,
4325				  struct tx_ring_info *rp)
4326{
4327	BUILD_BUG_ON(sizeof(struct txdma_mailbox) != 64);
4328
4329	rp->mbox = np->ops->alloc_coherent(np->device,
4330					   sizeof(struct txdma_mailbox),
4331					   &rp->mbox_dma, GFP_KERNEL);
4332	if (!rp->mbox)
4333		return -ENOMEM;
4334	if ((unsigned long)rp->mbox & (64UL - 1)) {
4335		netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA mailbox %p\n",
4336			   rp->mbox);
4337		return -EINVAL;
4338	}
4339
4340	rp->descr = np->ops->alloc_coherent(np->device,
4341					    MAX_TX_RING_SIZE * sizeof(__le64),
4342					    &rp->descr_dma, GFP_KERNEL);
4343	if (!rp->descr)
4344		return -ENOMEM;
4345	if ((unsigned long)rp->descr & (64UL - 1)) {
4346		netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA descr table %p\n",
4347			   rp->descr);
4348		return -EINVAL;
4349	}
4350
4351	rp->pending = MAX_TX_RING_SIZE;
4352	rp->prod = 0;
4353	rp->cons = 0;
4354	rp->wrap_bit = 0;
4355
4356	rp->mark_freq = rp->pending / 4;
4357
4358	niu_set_max_burst(np, rp);
4359
4360	return 0;
4361}
4362
4363static void niu_size_rbr(struct niu *np, struct rx_ring_info *rp)
4364{
4365	u16 bss;
4366
4367	bss = min(PAGE_SHIFT, 15);
4368
4369	rp->rbr_block_size = 1 << bss;
4370	rp->rbr_blocks_per_page = 1 << (PAGE_SHIFT-bss);
4371
4372	rp->rbr_sizes[0] = 256;
4373	rp->rbr_sizes[1] = 1024;
4374	if (np->dev->mtu > ETH_DATA_LEN) {
4375		switch (PAGE_SIZE) {
4376		case 4 * 1024:
4377			rp->rbr_sizes[2] = 4096;
4378			break;
4379
4380		default:
4381			rp->rbr_sizes[2] = 8192;
4382			break;
4383		}
4384	} else {
4385		rp->rbr_sizes[2] = 2048;
4386	}
4387	rp->rbr_sizes[3] = rp->rbr_block_size;
4388}
4389
4390static int niu_alloc_channels(struct niu *np)
4391{
4392	struct niu_parent *parent = np->parent;
4393	int first_rx_channel, first_tx_channel;
4394	int i, port, err;
4395
4396	port = np->port;
4397	first_rx_channel = first_tx_channel = 0;
4398	for (i = 0; i < port; i++) {
4399		first_rx_channel += parent->rxchan_per_port[i];
4400		first_tx_channel += parent->txchan_per_port[i];
4401	}
4402
4403	np->num_rx_rings = parent->rxchan_per_port[port];
4404	np->num_tx_rings = parent->txchan_per_port[port];
4405
4406	np->dev->real_num_tx_queues = np->num_tx_rings;
4407
4408	np->rx_rings = kzalloc(np->num_rx_rings * sizeof(struct rx_ring_info),
4409			       GFP_KERNEL);
4410	err = -ENOMEM;
4411	if (!np->rx_rings)
4412		goto out_err;
4413
4414	for (i = 0; i < np->num_rx_rings; i++) {
4415		struct rx_ring_info *rp = &np->rx_rings[i];
4416
4417		rp->np = np;
4418		rp->rx_channel = first_rx_channel + i;
4419
4420		err = niu_alloc_rx_ring_info(np, rp);
4421		if (err)
4422			goto out_err;
4423
4424		niu_size_rbr(np, rp);
4425
4426		rp->nonsyn_window = 64;
4427		rp->nonsyn_threshold = rp->rcr_table_size - 64;
4428		rp->syn_window = 64;
4429		rp->syn_threshold = rp->rcr_table_size - 64;
4430		rp->rcr_pkt_threshold = 16;
4431		rp->rcr_timeout = 8;
4432		rp->rbr_kick_thresh = RBR_REFILL_MIN;
4433		if (rp->rbr_kick_thresh < rp->rbr_blocks_per_page)
4434			rp->rbr_kick_thresh = rp->rbr_blocks_per_page;
4435
4436		err = niu_rbr_fill(np, rp, GFP_KERNEL);
4437		if (err)
4438			return err;
4439	}
4440
4441	np->tx_rings = kzalloc(np->num_tx_rings * sizeof(struct tx_ring_info),
4442			       GFP_KERNEL);
4443	err = -ENOMEM;
4444	if (!np->tx_rings)
4445		goto out_err;
4446
4447	for (i = 0; i < np->num_tx_rings; i++) {
4448		struct tx_ring_info *rp = &np->tx_rings[i];
4449
4450		rp->np = np;
4451		rp->tx_channel = first_tx_channel + i;
4452
4453		err = niu_alloc_tx_ring_info(np, rp);
4454		if (err)
4455			goto out_err;
4456	}
4457
4458	return 0;
4459
4460out_err:
4461	niu_free_channels(np);
4462	return err;
4463}
4464
4465static int niu_tx_cs_sng_poll(struct niu *np, int channel)
4466{
4467	int limit = 1000;
4468
4469	while (--limit > 0) {
4470		u64 val = nr64(TX_CS(channel));
4471		if (val & TX_CS_SNG_STATE)
4472			return 0;
4473	}
4474	return -ENODEV;
4475}
4476
4477static int niu_tx_channel_stop(struct niu *np, int channel)
4478{
4479	u64 val = nr64(TX_CS(channel));
4480
4481	val |= TX_CS_STOP_N_GO;
4482	nw64(TX_CS(channel), val);
4483
4484	return niu_tx_cs_sng_poll(np, channel);
4485}
4486
4487static int niu_tx_cs_reset_poll(struct niu *np, int channel)
4488{
4489	int limit = 1000;
4490
4491	while (--limit > 0) {
4492		u64 val = nr64(TX_CS(channel));
4493		if (!(val & TX_CS_RST))
4494			return 0;
4495	}
4496	return -ENODEV;
4497}
4498
4499static int niu_tx_channel_reset(struct niu *np, int channel)
4500{
4501	u64 val = nr64(TX_CS(channel));
4502	int err;
4503
4504	val |= TX_CS_RST;
4505	nw64(TX_CS(channel), val);
4506
4507	err = niu_tx_cs_reset_poll(np, channel);
4508	if (!err)
4509		nw64(TX_RING_KICK(channel), 0);
4510
4511	return err;
4512}
4513
4514static int niu_tx_channel_lpage_init(struct niu *np, int channel)
4515{
4516	u64 val;
4517
4518	nw64(TX_LOG_MASK1(channel), 0);
4519	nw64(TX_LOG_VAL1(channel), 0);
4520	nw64(TX_LOG_MASK2(channel), 0);
4521	nw64(TX_LOG_VAL2(channel), 0);
4522	nw64(TX_LOG_PAGE_RELO1(channel), 0);
4523	nw64(TX_LOG_PAGE_RELO2(channel), 0);
4524	nw64(TX_LOG_PAGE_HDL(channel), 0);
4525
4526	val  = (u64)np->port << TX_LOG_PAGE_VLD_FUNC_SHIFT;
4527	val |= (TX_LOG_PAGE_VLD_PAGE0 | TX_LOG_PAGE_VLD_PAGE1);
4528	nw64(TX_LOG_PAGE_VLD(channel), val);
4529
4530
4531	return 0;
4532}
4533
4534static void niu_txc_enable_port(struct niu *np, int on)
4535{
4536	unsigned long flags;
4537	u64 val, mask;
4538
4539	niu_lock_parent(np, flags);
4540	val = nr64(TXC_CONTROL);
4541	mask = (u64)1 << np->port;
4542	if (on) {
4543		val |= TXC_CONTROL_ENABLE | mask;
4544	} else {
4545		val &= ~mask;
4546		if ((val & ~TXC_CONTROL_ENABLE) == 0)
4547			val &= ~TXC_CONTROL_ENABLE;
4548	}
4549	nw64(TXC_CONTROL, val);
4550	niu_unlock_parent(np, flags);
4551}
4552
4553static void niu_txc_set_imask(struct niu *np, u64 imask)
4554{
4555	unsigned long flags;
4556	u64 val;
4557
4558	niu_lock_parent(np, flags);
4559	val = nr64(TXC_INT_MASK);
4560	val &= ~TXC_INT_MASK_VAL(np->port);
4561	val |= (imask << TXC_INT_MASK_VAL_SHIFT(np->port));
4562	niu_unlock_parent(np, flags);
4563}
4564
4565static void niu_txc_port_dma_enable(struct niu *np, int on)
4566{
4567	u64 val = 0;
4568
4569	if (on) {
4570		int i;
4571
4572		for (i = 0; i < np->num_tx_rings; i++)
4573			val |= (1 << np->tx_rings[i].tx_channel);
4574	}
4575	nw64(TXC_PORT_DMA(np->port), val);
4576}
4577
4578static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
4579{
4580	int err, channel = rp->tx_channel;
4581	u64 val, ring_len;
4582
4583	err = niu_tx_channel_stop(np, channel);
4584	if (err)
4585		return err;
4586
4587	err = niu_tx_channel_reset(np, channel);
4588	if (err)
4589		return err;
4590
4591	err = niu_tx_channel_lpage_init(np, channel);
4592	if (err)
4593		return err;
4594
4595	nw64(TXC_DMA_MAX(channel), rp->max_burst);
4596	nw64(TX_ENT_MSK(channel), 0);
4597
4598	if (rp->descr_dma & ~(TX_RNG_CFIG_STADDR_BASE |
4599			      TX_RNG_CFIG_STADDR)) {
4600		netdev_err(np->dev, "TX ring channel %d DMA addr (%llx) is not aligned\n",
4601			   channel, (unsigned long long)rp->descr_dma);
4602		return -EINVAL;
4603	}
4604
4605	/* The length field in TX_RNG_CFIG is measured in 64-byte
4606	 * blocks.  rp->pending is the number of TX descriptors in
4607	 * our ring, 8 bytes each, thus we divide by 8 bytes more
4608	 * to get the proper value the chip wants.
4609	 */
4610	ring_len = (rp->pending / 8);
4611
4612	val = ((ring_len << TX_RNG_CFIG_LEN_SHIFT) |
4613	       rp->descr_dma);
4614	nw64(TX_RNG_CFIG(channel), val);
4615
4616	if (((rp->mbox_dma >> 32) & ~TXDMA_MBH_MBADDR) ||
4617	    ((u32)rp->mbox_dma & ~TXDMA_MBL_MBADDR)) {
4618		netdev_err(np->dev, "TX ring channel %d MBOX addr (%llx) has invalid bits\n",
4619			    channel, (unsigned long long)rp->mbox_dma);
4620		return -EINVAL;
4621	}
4622	nw64(TXDMA_MBH(channel), rp->mbox_dma >> 32);
4623	nw64(TXDMA_MBL(channel), rp->mbox_dma & TXDMA_MBL_MBADDR);
4624
4625	nw64(TX_CS(channel), 0);
4626
4627	rp->last_pkt_cnt = 0;
4628
4629	return 0;
4630}
4631
4632static void niu_init_rdc_groups(struct niu *np)
4633{
4634	struct niu_rdc_tables *tp = &np->parent->rdc_group_cfg[np->port];
4635	int i, first_table_num = tp->first_table_num;
4636
4637	for (i = 0; i < tp->num_tables; i++) {
4638		struct rdc_table *tbl = &tp->tables[i];
4639		int this_table = first_table_num + i;
4640		int slot;
4641
4642		for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++)
4643			nw64(RDC_TBL(this_table, slot),
4644			     tbl->rxdma_channel[slot]);
4645	}
4646
4647	nw64(DEF_RDC(np->port), np->parent->rdc_default[np->port]);
4648}
4649
4650static void niu_init_drr_weight(struct niu *np)
4651{
4652	int type = phy_decode(np->parent->port_phy, np->port);
4653	u64 val;
4654
4655	switch (type) {
4656	case PORT_TYPE_10G:
4657		val = PT_DRR_WEIGHT_DEFAULT_10G;
4658		break;
4659
4660	case PORT_TYPE_1G:
4661	default:
4662		val = PT_DRR_WEIGHT_DEFAULT_1G;
4663		break;
4664	}
4665	nw64(PT_DRR_WT(np->port), val);
4666}
4667
4668static int niu_init_hostinfo(struct niu *np)
4669{
4670	struct niu_parent *parent = np->parent;
4671	struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
4672	int i, err, num_alt = niu_num_alt_addr(np);
4673	int first_rdc_table = tp->first_table_num;
4674
4675	err = niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
4676	if (err)
4677		return err;
4678
4679	err = niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
4680	if (err)
4681		return err;
4682
4683	for (i = 0; i < num_alt; i++) {
4684		err = niu_set_alt_mac_rdc_table(np, i, first_rdc_table, 1);
4685		if (err)
4686			return err;
4687	}
4688
4689	return 0;
4690}
4691
4692static int niu_rx_channel_reset(struct niu *np, int channel)
4693{
4694	return niu_set_and_wait_clear(np, RXDMA_CFIG1(channel),
4695				      RXDMA_CFIG1_RST, 1000, 10,
4696				      "RXDMA_CFIG1");
4697}
4698
4699static int niu_rx_channel_lpage_init(struct niu *np, int channel)
4700{
4701	u64 val;
4702
4703	nw64(RX_LOG_MASK1(channel), 0);
4704	nw64(RX_LOG_VAL1(channel), 0);
4705	nw64(RX_LOG_MASK2(channel), 0);
4706	nw64(RX_LOG_VAL2(channel), 0);
4707	nw64(RX_LOG_PAGE_RELO1(channel), 0);
4708	nw64(RX_LOG_PAGE_RELO2(channel), 0);
4709	nw64(RX_LOG_PAGE_HDL(channel), 0);
4710
4711	val  = (u64)np->port << RX_LOG_PAGE_VLD_FUNC_SHIFT;
4712	val |= (RX_LOG_PAGE_VLD_PAGE0 | RX_LOG_PAGE_VLD_PAGE1);
4713	nw64(RX_LOG_PAGE_VLD(channel), val);
4714
4715	return 0;
4716}
4717
4718static void niu_rx_channel_wred_init(struct niu *np, struct rx_ring_info *rp)
4719{
4720	u64 val;
4721
4722	val = (((u64)rp->nonsyn_window << RDC_RED_PARA_WIN_SHIFT) |
4723	       ((u64)rp->nonsyn_threshold << RDC_RED_PARA_THRE_SHIFT) |
4724	       ((u64)rp->syn_window << RDC_RED_PARA_WIN_SYN_SHIFT) |
4725	       ((u64)rp->syn_threshold << RDC_RED_PARA_THRE_SYN_SHIFT));
4726	nw64(RDC_RED_PARA(rp->rx_channel), val);
4727}
4728
4729static int niu_compute_rbr_cfig_b(struct rx_ring_info *rp, u64 *ret)
4730{
4731	u64 val = 0;
4732
4733	*ret = 0;
4734	switch (rp->rbr_block_size) {
4735	case 4 * 1024:
4736		val |= (RBR_BLKSIZE_4K << RBR_CFIG_B_BLKSIZE_SHIFT);
4737		break;
4738	case 8 * 1024:
4739		val |= (RBR_BLKSIZE_8K << RBR_CFIG_B_BLKSIZE_SHIFT);
4740		break;
4741	case 16 * 1024:
4742		val |= (RBR_BLKSIZE_16K << RBR_CFIG_B_BLKSIZE_SHIFT);
4743		break;
4744	case 32 * 1024:
4745		val |= (RBR_BLKSIZE_32K << RBR_CFIG_B_BLKSIZE_SHIFT);
4746		break;
4747	default:
4748		return -EINVAL;
4749	}
4750	val |= RBR_CFIG_B_VLD2;
4751	switch (rp->rbr_sizes[2]) {
4752	case 2 * 1024:
4753		val |= (RBR_BUFSZ2_2K << RBR_CFIG_B_BUFSZ2_SHIFT);
4754		break;
4755	case 4 * 1024:
4756		val |= (RBR_BUFSZ2_4K << RBR_CFIG_B_BUFSZ2_SHIFT);
4757		break;
4758	case 8 * 1024:
4759		val |= (RBR_BUFSZ2_8K << RBR_CFIG_B_BUFSZ2_SHIFT);
4760		break;
4761	case 16 * 1024:
4762		val |= (RBR_BUFSZ2_16K << RBR_CFIG_B_BUFSZ2_SHIFT);
4763		break;
4764
4765	default:
4766		return -EINVAL;
4767	}
4768	val |= RBR_CFIG_B_VLD1;
4769	switch (rp->rbr_sizes[1]) {
4770	case 1 * 1024:
4771		val |= (RBR_BUFSZ1_1K << RBR_CFIG_B_BUFSZ1_SHIFT);
4772		break;
4773	case 2 * 1024:
4774		val |= (RBR_BUFSZ1_2K << RBR_CFIG_B_BUFSZ1_SHIFT);
4775		break;
4776	case 4 * 1024:
4777		val |= (RBR_BUFSZ1_4K << RBR_CFIG_B_BUFSZ1_SHIFT);
4778		break;
4779	case 8 * 1024:
4780		val |= (RBR_BUFSZ1_8K << RBR_CFIG_B_BUFSZ1_SHIFT);
4781		break;
4782
4783	default:
4784		return -EINVAL;
4785	}
4786	val |= RBR_CFIG_B_VLD0;
4787	switch (rp->rbr_sizes[0]) {
4788	case 256:
4789		val |= (RBR_BUFSZ0_256 << RBR_CFIG_B_BUFSZ0_SHIFT);
4790		break;
4791	case 512:
4792		val |= (RBR_BUFSZ0_512 << RBR_CFIG_B_BUFSZ0_SHIFT);
4793		break;
4794	case 1 * 1024:
4795		val |= (RBR_BUFSZ0_1K << RBR_CFIG_B_BUFSZ0_SHIFT);
4796		break;
4797	case 2 * 1024:
4798		val |= (RBR_BUFSZ0_2K << RBR_CFIG_B_BUFSZ0_SHIFT);
4799		break;
4800
4801	default:
4802		return -EINVAL;
4803	}
4804
4805	*ret = val;
4806	return 0;
4807}
4808
4809static int niu_enable_rx_channel(struct niu *np, int channel, int on)
4810{
4811	u64 val = nr64(RXDMA_CFIG1(channel));
4812	int limit;
4813
4814	if (on)
4815		val |= RXDMA_CFIG1_EN;
4816	else
4817		val &= ~RXDMA_CFIG1_EN;
4818	nw64(RXDMA_CFIG1(channel), val);
4819
4820	limit = 1000;
4821	while (--limit > 0) {
4822		if (nr64(RXDMA_CFIG1(channel)) & RXDMA_CFIG1_QST)
4823			break;
4824		udelay(10);
4825	}
4826	if (limit <= 0)
4827		return -ENODEV;
4828	return 0;
4829}
4830
4831static int niu_init_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
4832{
4833	int err, channel = rp->rx_channel;
4834	u64 val;
4835
4836	err = niu_rx_channel_reset(np, channel);
4837	if (err)
4838		return err;
4839
4840	err = niu_rx_channel_lpage_init(np, channel);
4841	if (err)
4842		return err;
4843
4844	niu_rx_channel_wred_init(np, rp);
4845
4846	nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_RBR_EMPTY);
4847	nw64(RX_DMA_CTL_STAT(channel),
4848	     (RX_DMA_CTL_STAT_MEX |
4849	      RX_DMA_CTL_STAT_RCRTHRES |
4850	      RX_DMA_CTL_STAT_RCRTO |
4851	      RX_DMA_CTL_STAT_RBR_EMPTY));
4852	nw64(RXDMA_CFIG1(channel), rp->mbox_dma >> 32);
4853	nw64(RXDMA_CFIG2(channel),
4854	     ((rp->mbox_dma & RXDMA_CFIG2_MBADDR_L) |
4855	      RXDMA_CFIG2_FULL_HDR));
4856	nw64(RBR_CFIG_A(channel),
4857	     ((u64)rp->rbr_table_size << RBR_CFIG_A_LEN_SHIFT) |
4858	     (rp->rbr_dma & (RBR_CFIG_A_STADDR_BASE | RBR_CFIG_A_STADDR)));
4859	err = niu_compute_rbr_cfig_b(rp, &val);
4860	if (err)
4861		return err;
4862	nw64(RBR_CFIG_B(channel), val);
4863	nw64(RCRCFIG_A(channel),
4864	     ((u64)rp->rcr_table_size << RCRCFIG_A_LEN_SHIFT) |
4865	     (rp->rcr_dma & (RCRCFIG_A_STADDR_BASE | RCRCFIG_A_STADDR)));
4866	nw64(RCRCFIG_B(channel),
4867	     ((u64)rp->rcr_pkt_threshold << RCRCFIG_B_PTHRES_SHIFT) |
4868	     RCRCFIG_B_ENTOUT |
4869	     ((u64)rp->rcr_timeout << RCRCFIG_B_TIMEOUT_SHIFT));
4870
4871	err = niu_enable_rx_channel(np, channel, 1);
4872	if (err)
4873		return err;
4874
4875	nw64(RBR_KICK(channel), rp->rbr_index);
4876
4877	val = nr64(RX_DMA_CTL_STAT(channel));
4878	val |= RX_DMA_CTL_STAT_RBR_EMPTY;
4879	nw64(RX_DMA_CTL_STAT(channel), val);
4880
4881	return 0;
4882}
4883
4884static int niu_init_rx_channels(struct niu *np)
4885{
4886	unsigned long flags;
4887	u64 seed = jiffies_64;
4888	int err, i;
4889
4890	niu_lock_parent(np, flags);
4891	nw64(RX_DMA_CK_DIV, np->parent->rxdma_clock_divider);
4892	nw64(RED_RAN_INIT, RED_RAN_INIT_OPMODE | (seed & RED_RAN_INIT_VAL));
4893	niu_unlock_parent(np, flags);
4894
4895
4896	niu_init_rdc_groups(np);
4897	niu_init_drr_weight(np);
4898
4899	err = niu_init_hostinfo(np);
4900	if (err)
4901		return err;
4902
4903	for (i = 0; i < np->num_rx_rings; i++) {
4904		struct rx_ring_info *rp = &np->rx_rings[i];
4905
4906		err = niu_init_one_rx_channel(np, rp);
4907		if (err)
4908			return err;
4909	}
4910
4911	return 0;
4912}
4913
4914static int niu_set_ip_frag_rule(struct niu *np)
4915{
4916	struct niu_parent *parent = np->parent;
4917	struct niu_classifier *cp = &np->clas;
4918	struct niu_tcam_entry *tp;
4919	int index, err;
4920
4921	index = cp->tcam_top;
4922	tp = &parent->tcam[index];
4923
4924	/* Note that the noport bit is the same in both ipv4 and
4925	 * ipv6 format TCAM entries.
4926	 */
4927	memset(tp, 0, sizeof(*tp));
4928	tp->key[1] = TCAM_V4KEY1_NOPORT;
4929	tp->key_mask[1] = TCAM_V4KEY1_NOPORT;
4930	tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET |
4931			  ((u64)0 << TCAM_ASSOCDATA_OFFSET_SHIFT));
4932	err = tcam_write(np, index, tp->key, tp->key_mask);
4933	if (err)
4934		return err;
4935	err = tcam_assoc_write(np, index, tp->assoc_data);
4936	if (err)
4937		return err;
4938	tp->valid = 1;
4939	cp->tcam_valid_entries++;
4940
4941	return 0;
4942}
4943
4944static int niu_init_classifier_hw(struct niu *np)
4945{
4946	struct niu_parent *parent = np->parent;
4947	struct niu_classifier *cp = &np->clas;
4948	int i, err;
4949
4950	nw64(H1POLY, cp->h1_init);
4951	nw64(H2POLY, cp->h2_init);
4952
4953	err = niu_init_hostinfo(np);
4954	if (err)
4955		return err;
4956
4957	for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++) {
4958		struct niu_vlan_rdc *vp = &cp->vlan_mappings[i];
4959
4960		vlan_tbl_write(np, i, np->port,
4961			       vp->vlan_pref, vp->rdc_num);
4962	}
4963
4964	for (i = 0; i < cp->num_alt_mac_mappings; i++) {
4965		struct niu_altmac_rdc *ap = &cp->alt_mac_mappings[i];
4966
4967		err = niu_set_alt_mac_rdc_table(np, ap->alt_mac_num,
4968						ap->rdc_num, ap->mac_pref);
4969		if (err)
4970			return err;
4971	}
4972
4973	for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) {
4974		int index = i - CLASS_CODE_USER_PROG1;
4975
4976		err = niu_set_tcam_key(np, i, parent->tcam_key[index]);
4977		if (err)
4978			return err;
4979		err = niu_set_flow_key(np, i, parent->flow_key[index]);
4980		if (err)
4981			return err;
4982	}
4983
4984	err = niu_set_ip_frag_rule(np);
4985	if (err)
4986		return err;
4987
4988	tcam_enable(np, 1);
4989
4990	return 0;
4991}
4992
4993static int niu_zcp_write(struct niu *np, int index, u64 *data)
4994{
4995	nw64(ZCP_RAM_DATA0, data[0]);
4996	nw64(ZCP_RAM_DATA1, data[1]);
4997	nw64(ZCP_RAM_DATA2, data[2]);
4998	nw64(ZCP_RAM_DATA3, data[3]);
4999	nw64(ZCP_RAM_DATA4, data[4]);
5000	nw64(ZCP_RAM_BE, ZCP_RAM_BE_VAL);
5001	nw64(ZCP_RAM_ACC,
5002	     (ZCP_RAM_ACC_WRITE |
5003	      (0 << ZCP_RAM_ACC_ZFCID_SHIFT) |
5004	      (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT)));
5005
5006	return niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
5007				   1000, 100);
5008}
5009
5010static int niu_zcp_read(struct niu *np, int index, u64 *data)
5011{
5012	int err;
5013
5014	err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
5015				  1000, 100);
5016	if (err) {
5017		netdev_err(np->dev, "ZCP read busy won't clear, ZCP_RAM_ACC[%llx]\n",
5018			   (unsigned long long)nr64(ZCP_RAM_ACC));
5019		return err;
5020	}
5021
5022	nw64(ZCP_RAM_ACC,
5023	     (ZCP_RAM_ACC_READ |
5024	      (0 << ZCP_RAM_ACC_ZFCID_SHIFT) |
5025	      (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT)));
5026
5027	err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
5028				  1000, 100);
5029	if (err) {
5030		netdev_err(np->dev, "ZCP read busy2 won't clear, ZCP_RAM_ACC[%llx]\n",
5031			   (unsigned long long)nr64(ZCP_RAM_ACC));
5032		return err;
5033	}
5034
5035	data[0] = nr64(ZCP_RAM_DATA0);
5036	data[1] = nr64(ZCP_RAM_DATA1);
5037	data[2] = nr64(ZCP_RAM_DATA2);
5038	data[3] = nr64(ZCP_RAM_DATA3);
5039	data[4] = nr64(ZCP_RAM_DATA4);
5040
5041	return 0;
5042}
5043
5044static void niu_zcp_cfifo_reset(struct niu *np)
5045{
5046	u64 val = nr64(RESET_CFIFO);
5047
5048	val |= RESET_CFIFO_RST(np->port);
5049	nw64(RESET_CFIFO, val);
5050	udelay(10);
5051
5052	val &= ~RESET_CFIFO_RST(np->port);
5053	nw64(RESET_CFIFO, val);
5054}
5055
5056static int niu_init_zcp(struct niu *np)
5057{
5058	u64 data[5], rbuf[5];
5059	int i, max, err;
5060
5061	if (np->parent->plat_type != PLAT_TYPE_NIU) {
5062		if (np->port == 0 || np->port == 1)
5063			max = ATLAS_P0_P1_CFIFO_ENTRIES;
5064		else
5065			max = ATLAS_P2_P3_CFIFO_ENTRIES;
5066	} else
5067		max = NIU_CFIFO_ENTRIES;
5068
5069	data[0] = 0;
5070	data[1] = 0;
5071	data[2] = 0;
5072	data[3] = 0;
5073	data[4] = 0;
5074
5075	for (i = 0; i < max; i++) {
5076		err = niu_zcp_write(np, i, data);
5077		if (err)
5078			return err;
5079		err = niu_zcp_read(np, i, rbuf);
5080		if (err)
5081			return err;
5082	}
5083
5084	niu_zcp_cfifo_reset(np);
5085	nw64(CFIFO_ECC(np->port), 0);
5086	nw64(ZCP_INT_STAT, ZCP_INT_STAT_ALL);
5087	(void) nr64(ZCP_INT_STAT);
5088	nw64(ZCP_INT_MASK, ZCP_INT_MASK_ALL);
5089
5090	return 0;
5091}
5092
5093static void niu_ipp_write(struct niu *np, int index, u64 *data)
5094{
5095	u64 val = nr64_ipp(IPP_CFIG);
5096
5097	nw64_ipp(IPP_CFIG, val | IPP_CFIG_DFIFO_PIO_W);
5098	nw64_ipp(IPP_DFIFO_WR_PTR, index);
5099	nw64_ipp(IPP_DFIFO_WR0, data[0]);
5100	nw64_ipp(IPP_DFIFO_WR1, data[1]);
5101	nw64_ipp(IPP_DFIFO_WR2, data[2]);
5102	nw64_ipp(IPP_DFIFO_WR3, data[3]);
5103	nw64_ipp(IPP_DFIFO_WR4, data[4]);
5104	nw64_ipp(IPP_CFIG, val & ~IPP_CFIG_DFIFO_PIO_W);
5105}
5106
5107static void niu_ipp_read(struct niu *np, int index, u64 *data)
5108{
5109	nw64_ipp(IPP_DFIFO_RD_PTR, index);
5110	data[0] = nr64_ipp(IPP_DFIFO_RD0);
5111	data[1] = nr64_ipp(IPP_DFIFO_RD1);
5112	data[2] = nr64_ipp(IPP_DFIFO_RD2);
5113	data[3] = nr64_ipp(IPP_DFIFO_RD3);
5114	data[4] = nr64_ipp(IPP_DFIFO_RD4);
5115}
5116
5117static int niu_ipp_reset(struct niu *np)
5118{
5119	return niu_set_and_wait_clear_ipp(np, IPP_CFIG, IPP_CFIG_SOFT_RST,
5120					  1000, 100, "IPP_CFIG");
5121}
5122
5123static int niu_init_ipp(struct niu *np)
5124{
5125	u64 data[5], rbuf[5], val;
5126	int i, max, err;
5127
5128	if (np->parent->plat_type != PLAT_TYPE_NIU) {
5129		if (np->port == 0 || np->port == 1)
5130			max = ATLAS_P0_P1_DFIFO_ENTRIES;
5131		else
5132			max = ATLAS_P2_P3_DFIFO_ENTRIES;
5133	} else
5134		max = NIU_DFIFO_ENTRIES;
5135
5136	data[0] = 0;
5137	data[1] = 0;
5138	data[2] = 0;
5139	data[3] = 0;
5140	data[4] = 0;
5141
5142	for (i = 0; i < max; i++) {
5143		niu_ipp_write(np, i, data);
5144		niu_ipp_read(np, i, rbuf);
5145	}
5146
5147	(void) nr64_ipp(IPP_INT_STAT);
5148	(void) nr64_ipp(IPP_INT_STAT);
5149
5150	err = niu_ipp_reset(np);
5151	if (err)
5152		return err;
5153
5154	(void) nr64_ipp(IPP_PKT_DIS);
5155	(void) nr64_ipp(IPP_BAD_CS_CNT);
5156	(void) nr64_ipp(IPP_ECC);
5157
5158	(void) nr64_ipp(IPP_INT_STAT);
5159
5160	nw64_ipp(IPP_MSK, ~IPP_MSK_ALL);
5161
5162	val = nr64_ipp(IPP_CFIG);
5163	val &= ~IPP_CFIG_IP_MAX_PKT;
5164	val |= (IPP_CFIG_IPP_ENABLE |
5165		IPP_CFIG_DFIFO_ECC_EN |
5166		IPP_CFIG_DROP_BAD_CRC |
5167		IPP_CFIG_CKSUM_EN |
5168		(0x1ffff << IPP_CFIG_IP_MAX_PKT_SHIFT));
5169	nw64_ipp(IPP_CFIG, val);
5170
5171	return 0;
5172}
5173
5174static void niu_handle_led(struct niu *np, int status)
5175{
5176	u64 val;
5177	val = nr64_mac(XMAC_CONFIG);
5178
5179	if ((np->flags & NIU_FLAGS_10G) != 0 &&
5180	    (np->flags & NIU_FLAGS_FIBER) != 0) {
5181		if (status) {
5182			val |= XMAC_CONFIG_LED_POLARITY;
5183			val &= ~XMAC_CONFIG_FORCE_LED_ON;
5184		} else {
5185			val |= XMAC_CONFIG_FORCE_LED_ON;
5186			val &= ~XMAC_CONFIG_LED_POLARITY;
5187		}
5188	}
5189
5190	nw64_mac(XMAC_CONFIG, val);
5191}
5192
5193static void niu_init_xif_xmac(struct niu *np)
5194{
5195	struct niu_link_config *lp = &np->link_config;
5196	u64 val;
5197
5198	if (np->flags & NIU_FLAGS_XCVR_SERDES) {
5199		val = nr64(MIF_CONFIG);
5200		val |= MIF_CONFIG_ATCA_GE;
5201		nw64(MIF_CONFIG, val);
5202	}
5203
5204	val = nr64_mac(XMAC_CONFIG);
5205	val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC;
5206
5207	val |= XMAC_CONFIG_TX_OUTPUT_EN;
5208
5209	if (lp->loopback_mode == LOOPBACK_MAC) {
5210		val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC;
5211		val |= XMAC_CONFIG_LOOPBACK;
5212	} else {
5213		val &= ~XMAC_CONFIG_LOOPBACK;
5214	}
5215
5216	if (np->flags & NIU_FLAGS_10G) {
5217		val &= ~XMAC_CONFIG_LFS_DISABLE;
5218	} else {
5219		val |= XMAC_CONFIG_LFS_DISABLE;
5220		if (!(np->flags & NIU_FLAGS_FIBER) &&
5221		    !(np->flags & NIU_FLAGS_XCVR_SERDES))
5222			val |= XMAC_CONFIG_1G_PCS_BYPASS;
5223		else
5224			val &= ~XMAC_CONFIG_1G_PCS_BYPASS;
5225	}
5226
5227	val &= ~XMAC_CONFIG_10G_XPCS_BYPASS;
5228
5229	if (lp->active_speed == SPEED_100)
5230		val |= XMAC_CONFIG_SEL_CLK_25MHZ;
5231	else
5232		val &= ~XMAC_CONFIG_SEL_CLK_25MHZ;
5233
5234	nw64_mac(XMAC_CONFIG, val);
5235
5236	val = nr64_mac(XMAC_CONFIG);
5237	val &= ~XMAC_CONFIG_MODE_MASK;
5238	if (np->flags & NIU_FLAGS_10G) {
5239		val |= XMAC_CONFIG_MODE_XGMII;
5240	} else {
5241		if (lp->active_speed == SPEED_1000)
5242			val |= XMAC_CONFIG_MODE_GMII;
5243		else
5244			val |= XMAC_CONFIG_MODE_MII;
5245	}
5246
5247	nw64_mac(XMAC_CONFIG, val);
5248}
5249
5250static void niu_init_xif_bmac(struct niu *np)
5251{
5252	struct niu_link_config *lp = &np->link_config;
5253	u64 val;
5254
5255	val = BMAC_XIF_CONFIG_TX_OUTPUT_EN;
5256
5257	if (lp->loopback_mode == LOOPBACK_MAC)
5258		val |= BMAC_XIF_CONFIG_MII_LOOPBACK;
5259	else
5260		val &= ~BMAC_XIF_CONFIG_MII_LOOPBACK;
5261
5262	if (lp->active_speed == SPEED_1000)
5263		val |= BMAC_XIF_CONFIG_GMII_MODE;
5264	else
5265		val &= ~BMAC_XIF_CONFIG_GMII_MODE;
5266
5267	val &= ~(BMAC_XIF_CONFIG_LINK_LED |
5268		 BMAC_XIF_CONFIG_LED_POLARITY);
5269
5270	if (!(np->flags & NIU_FLAGS_10G) &&
5271	    !(np->flags & NIU_FLAGS_FIBER) &&
5272	    lp->active_speed == SPEED_100)
5273		val |= BMAC_XIF_CONFIG_25MHZ_CLOCK;
5274	else
5275		val &= ~BMAC_XIF_CONFIG_25MHZ_CLOCK;
5276
5277	nw64_mac(BMAC_XIF_CONFIG, val);
5278}
5279
5280static void niu_init_xif(struct niu *np)
5281{
5282	if (np->flags & NIU_FLAGS_XMAC)
5283		niu_init_xif_xmac(np);
5284	else
5285		niu_init_xif_bmac(np);
5286}
5287
5288static void niu_pcs_mii_reset(struct niu *np)
5289{
5290	int limit = 1000;
5291	u64 val = nr64_pcs(PCS_MII_CTL);
5292	val |= PCS_MII_CTL_RST;
5293	nw64_pcs(PCS_MII_CTL, val);
5294	while ((--limit >= 0) && (val & PCS_MII_CTL_RST)) {
5295		udelay(100);
5296		val = nr64_pcs(PCS_MII_CTL);
5297	}
5298}
5299
5300static void niu_xpcs_reset(struct niu *np)
5301{
5302	int limit = 1000;
5303	u64 val = nr64_xpcs(XPCS_CONTROL1);
5304	val |= XPCS_CONTROL1_RESET;
5305	nw64_xpcs(XPCS_CONTROL1, val);
5306	while ((--limit >= 0) && (val & XPCS_CONTROL1_RESET)) {
5307		udelay(100);
5308		val = nr64_xpcs(XPCS_CONTROL1);
5309	}
5310}
5311
5312static int niu_init_pcs(struct niu *np)
5313{
5314	struct niu_link_config *lp = &np->link_config;
5315	u64 val;
5316
5317	switch (np->flags & (NIU_FLAGS_10G |
5318			     NIU_FLAGS_FIBER |
5319			     NIU_FLAGS_XCVR_SERDES)) {
5320	case NIU_FLAGS_FIBER:
5321		/* 1G fiber */
5322		nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE);
5323		nw64_pcs(PCS_DPATH_MODE, 0);
5324		niu_pcs_mii_reset(np);
5325		break;
5326
5327	case NIU_FLAGS_10G:
5328	case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
5329	case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
5330		/* 10G SERDES */
5331		if (!(np->flags & NIU_FLAGS_XMAC))
5332			return -EINVAL;
5333
5334		/* 10G copper or fiber */
5335		val = nr64_mac(XMAC_CONFIG);
5336		val &= ~XMAC_CONFIG_10G_XPCS_BYPASS;
5337		nw64_mac(XMAC_CONFIG, val);
5338
5339		niu_xpcs_reset(np);
5340
5341		val = nr64_xpcs(XPCS_CONTROL1);
5342		if (lp->loopback_mode == LOOPBACK_PHY)
5343			val |= XPCS_CONTROL1_LOOPBACK;
5344		else
5345			val &= ~XPCS_CONTROL1_LOOPBACK;
5346		nw64_xpcs(XPCS_CONTROL1, val);
5347
5348		nw64_xpcs(XPCS_DESKEW_ERR_CNT, 0);
5349		(void) nr64_xpcs(XPCS_SYMERR_CNT01);
5350		(void) nr64_xpcs(XPCS_SYMERR_CNT23);
5351		break;
5352
5353
5354	case NIU_FLAGS_XCVR_SERDES:
5355		/* 1G SERDES */
5356		niu_pcs_mii_reset(np);
5357		nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE);
5358		nw64_pcs(PCS_DPATH_MODE, 0);
5359		break;
5360
5361	case 0:
5362		/* 1G copper */
5363	case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER:
5364		/* 1G RGMII FIBER */
5365		nw64_pcs(PCS_DPATH_MODE, PCS_DPATH_MODE_MII);
5366		niu_pcs_mii_reset(np);
5367		break;
5368
5369	default:
5370		return -EINVAL;
5371	}
5372
5373	return 0;
5374}
5375
5376static int niu_reset_tx_xmac(struct niu *np)
5377{
5378	return niu_set_and_wait_clear_mac(np, XTXMAC_SW_RST,
5379					  (XTXMAC_SW_RST_REG_RS |
5380					   XTXMAC_SW_RST_SOFT_RST),
5381					  1000, 100, "XTXMAC_SW_RST");
5382}
5383
5384static int niu_reset_tx_bmac(struct niu *np)
5385{
5386	int limit;
5387
5388	nw64_mac(BTXMAC_SW_RST, BTXMAC_SW_RST_RESET);
5389	limit = 1000;
5390	while (--limit >= 0) {
5391		if (!(nr64_mac(BTXMAC_SW_RST) & BTXMAC_SW_RST_RESET))
5392			break;
5393		udelay(100);
5394	}
5395	if (limit < 0) {
5396		dev_err(np->device, "Port %u TX BMAC would not reset, BTXMAC_SW_RST[%llx]\n",
5397			np->port,
5398			(unsigned long long) nr64_mac(BTXMAC_SW_RST));
5399		return -ENODEV;
5400	}
5401
5402	return 0;
5403}
5404
5405static int niu_reset_tx_mac(struct niu *np)
5406{
5407	if (np->flags & NIU_FLAGS_XMAC)
5408		return niu_reset_tx_xmac(np);
5409	else
5410		return niu_reset_tx_bmac(np);
5411}
5412
5413static void niu_init_tx_xmac(struct niu *np, u64 min, u64 max)
5414{
5415	u64 val;
5416
5417	val = nr64_mac(XMAC_MIN);
5418	val &= ~(XMAC_MIN_TX_MIN_PKT_SIZE |
5419		 XMAC_MIN_RX_MIN_PKT_SIZE);
5420	val |= (min << XMAC_MIN_RX_MIN_PKT_SIZE_SHFT);
5421	val |= (min << XMAC_MIN_TX_MIN_PKT_SIZE_SHFT);
5422	nw64_mac(XMAC_MIN, val);
5423
5424	nw64_mac(XMAC_MAX, max);
5425
5426	nw64_mac(XTXMAC_STAT_MSK, ~(u64)0);
5427
5428	val = nr64_mac(XMAC_IPG);
5429	if (np->flags & NIU_FLAGS_10G) {
5430		val &= ~XMAC_IPG_IPG_XGMII;
5431		val |= (IPG_12_15_XGMII << XMAC_IPG_IPG_XGMII_SHIFT);
5432	} else {
5433		val &= ~XMAC_IPG_IPG_MII_GMII;
5434		val |= (IPG_12_MII_GMII << XMAC_IPG_IPG_MII_GMII_SHIFT);
5435	}
5436	nw64_mac(XMAC_IPG, val);
5437
5438	val = nr64_mac(XMAC_CONFIG);
5439	val &= ~(XMAC_CONFIG_ALWAYS_NO_CRC |
5440		 XMAC_CONFIG_STRETCH_MODE |
5441		 XMAC_CONFIG_VAR_MIN_IPG_EN |
5442		 XMAC_CONFIG_TX_ENABLE);
5443	nw64_mac(XMAC_CONFIG, val);
5444
5445	nw64_mac(TXMAC_FRM_CNT, 0);
5446	nw64_mac(TXMAC_BYTE_CNT, 0);
5447}
5448
5449static void niu_init_tx_bmac(struct niu *np, u64 min, u64 max)
5450{
5451	u64 val;
5452
5453	nw64_mac(BMAC_MIN_FRAME, min);
5454	nw64_mac(BMAC_MAX_FRAME, max);
5455
5456	nw64_mac(BTXMAC_STATUS_MASK, ~(u64)0);
5457	nw64_mac(BMAC_CTRL_TYPE, 0x8808);
5458	nw64_mac(BMAC_PREAMBLE_SIZE, 7);
5459
5460	val = nr64_mac(BTXMAC_CONFIG);
5461	val &= ~(BTXMAC_CONFIG_FCS_DISABLE |
5462		 BTXMAC_CONFIG_ENABLE);
5463	nw64_mac(BTXMAC_CONFIG, val);
5464}
5465
5466static void niu_init_tx_mac(struct niu *np)
5467{
5468	u64 min, max;
5469
5470	min = 64;
5471	if (np->dev->mtu > ETH_DATA_LEN)
5472		max = 9216;
5473	else
5474		max = 1522;
5475
5476	/* The XMAC_MIN register only accepts values for TX min which
5477	 * have the low 3 bits cleared.
5478	 */
5479	BUG_ON(min & 0x7);
5480
5481	if (np->flags & NIU_FLAGS_XMAC)
5482		niu_init_tx_xmac(np, min, max);
5483	else
5484		niu_init_tx_bmac(np, min, max);
5485}
5486
5487static int niu_reset_rx_xmac(struct niu *np)
5488{
5489	int limit;
5490
5491	nw64_mac(XRXMAC_SW_RST,
5492		 XRXMAC_SW_RST_REG_RS | XRXMAC_SW_RST_SOFT_RST);
5493	limit = 1000;
5494	while (--limit >= 0) {
5495		if (!(nr64_mac(XRXMAC_SW_RST) & (XRXMAC_SW_RST_REG_RS |
5496						 XRXMAC_SW_RST_SOFT_RST)))
5497			break;
5498		udelay(100);
5499	}
5500	if (limit < 0) {
5501		dev_err(np->device, "Port %u RX XMAC would not reset, XRXMAC_SW_RST[%llx]\n",
5502			np->port,
5503			(unsigned long long) nr64_mac(XRXMAC_SW_RST));
5504		return -ENODEV;
5505	}
5506
5507	return 0;
5508}
5509
5510static int niu_reset_rx_bmac(struct niu *np)
5511{
5512	int limit;
5513
5514	nw64_mac(BRXMAC_SW_RST, BRXMAC_SW_RST_RESET);
5515	limit = 1000;
5516	while (--limit >= 0) {
5517		if (!(nr64_mac(BRXMAC_SW_RST) & BRXMAC_SW_RST_RESET))
5518			break;
5519		udelay(100);
5520	}
5521	if (limit < 0) {
5522		dev_err(np->device, "Port %u RX BMAC would not reset, BRXMAC_SW_RST[%llx]\n",
5523			np->port,
5524			(unsigned long long) nr64_mac(BRXMAC_SW_RST));
5525		return -ENODEV;
5526	}
5527
5528	return 0;
5529}
5530
5531static int niu_reset_rx_mac(struct niu *np)
5532{
5533	if (np->flags & NIU_FLAGS_XMAC)
5534		return niu_reset_rx_xmac(np);
5535	else
5536		return niu_reset_rx_bmac(np);
5537}
5538
5539static void niu_init_rx_xmac(struct niu *np)
5540{
5541	struct niu_parent *parent = np->parent;
5542	struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
5543	int first_rdc_table = tp->first_table_num;
5544	unsigned long i;
5545	u64 val;
5546
5547	nw64_mac(XMAC_ADD_FILT0, 0);
5548	nw64_mac(XMAC_ADD_FILT1, 0);
5549	nw64_mac(XMAC_ADD_FILT2, 0);
5550	nw64_mac(XMAC_ADD_FILT12_MASK, 0);
5551	nw64_mac(XMAC_ADD_FILT00_MASK, 0);
5552	for (i = 0; i < MAC_NUM_HASH; i++)
5553		nw64_mac(XMAC_HASH_TBL(i), 0);
5554	nw64_mac(XRXMAC_STAT_MSK, ~(u64)0);
5555	niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
5556	niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
5557
5558	val = nr64_mac(XMAC_CONFIG);
5559	val &= ~(XMAC_CONFIG_RX_MAC_ENABLE |
5560		 XMAC_CONFIG_PROMISCUOUS |
5561		 XMAC_CONFIG_PROMISC_GROUP |
5562		 XMAC_CONFIG_ERR_CHK_DIS |
5563		 XMAC_CONFIG_RX_CRC_CHK_DIS |
5564		 XMAC_CONFIG_RESERVED_MULTICAST |
5565		 XMAC_CONFIG_RX_CODEV_CHK_DIS |
5566		 XMAC_CONFIG_ADDR_FILTER_EN |
5567		 XMAC_CONFIG_RCV_PAUSE_ENABLE |
5568		 XMAC_CONFIG_STRIP_CRC |
5569		 XMAC_CONFIG_PASS_FLOW_CTRL |
5570		 XMAC_CONFIG_MAC2IPP_PKT_CNT_EN);
5571	val |= (XMAC_CONFIG_HASH_FILTER_EN);
5572	nw64_mac(XMAC_CONFIG, val);
5573
5574	nw64_mac(RXMAC_BT_CNT, 0);
5575	nw64_mac(RXMAC_BC_FRM_CNT, 0);
5576	nw64_mac(RXMAC_MC_FRM_CNT, 0);
5577	nw64_mac(RXMAC_FRAG_CNT, 0);
5578	nw64_mac(RXMAC_HIST_CNT1, 0);
5579	nw64_mac(RXMAC_HIST_CNT2, 0);
5580	nw64_mac(RXMAC_HIST_CNT3, 0);
5581	nw64_mac(RXMAC_HIST_CNT4, 0);
5582	nw64_mac(RXMAC_HIST_CNT5, 0);
5583	nw64_mac(RXMAC_HIST_CNT6, 0);
5584	nw64_mac(RXMAC_HIST_CNT7, 0);
5585	nw64_mac(RXMAC_MPSZER_CNT, 0);
5586	nw64_mac(RXMAC_CRC_ER_CNT, 0);
5587	nw64_mac(RXMAC_CD_VIO_CNT, 0);
5588	nw64_mac(LINK_FAULT_CNT, 0);
5589}
5590
5591static void niu_init_rx_bmac(struct niu *np)
5592{
5593	struct niu_parent *parent = np->parent;
5594	struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
5595	int first_rdc_table = tp->first_table_num;
5596	unsigned long i;
5597	u64 val;
5598
5599	nw64_mac(BMAC_ADD_FILT0, 0);
5600	nw64_mac(BMAC_ADD_FILT1, 0);
5601	nw64_mac(BMAC_ADD_FILT2, 0);
5602	nw64_mac(BMAC_ADD_FILT12_MASK, 0);
5603	nw64_mac(BMAC_ADD_FILT00_MASK, 0);
5604	for (i = 0; i < MAC_NUM_HASH; i++)
5605		nw64_mac(BMAC_HASH_TBL(i), 0);
5606	niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
5607	niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
5608	nw64_mac(BRXMAC_STATUS_MASK, ~(u64)0);
5609
5610	val = nr64_mac(BRXMAC_CONFIG);
5611	val &= ~(BRXMAC_CONFIG_ENABLE |
5612		 BRXMAC_CONFIG_STRIP_PAD |
5613		 BRXMAC_CONFIG_STRIP_FCS |
5614		 BRXMAC_CONFIG_PROMISC |
5615		 BRXMAC_CONFIG_PROMISC_GRP |
5616		 BRXMAC_CONFIG_ADDR_FILT_EN |
5617		 BRXMAC_CONFIG_DISCARD_DIS);
5618	val |= (BRXMAC_CONFIG_HASH_FILT_EN);
5619	nw64_mac(BRXMAC_CONFIG, val);
5620
5621	val = nr64_mac(BMAC_ADDR_CMPEN);
5622	val |= BMAC_ADDR_CMPEN_EN0;
5623	nw64_mac(BMAC_ADDR_CMPEN, val);
5624}
5625
5626static void niu_init_rx_mac(struct niu *np)
5627{
5628	niu_set_primary_mac(np, np->dev->dev_addr);
5629
5630	if (np->flags & NIU_FLAGS_XMAC)
5631		niu_init_rx_xmac(np);
5632	else
5633		niu_init_rx_bmac(np);
5634}
5635
5636static void niu_enable_tx_xmac(struct niu *np, int on)
5637{
5638	u64 val = nr64_mac(XMAC_CONFIG);
5639
5640	if (on)
5641		val |= XMAC_CONFIG_TX_ENABLE;
5642	else
5643		val &= ~XMAC_CONFIG_TX_ENABLE;
5644	nw64_mac(XMAC_CONFIG, val);
5645}
5646
5647static void niu_enable_tx_bmac(struct niu *np, int on)
5648{
5649	u64 val = nr64_mac(BTXMAC_CONFIG);
5650
5651	if (on)
5652		val |= BTXMAC_CONFIG_ENABLE;
5653	else
5654		val &= ~BTXMAC_CONFIG_ENABLE;
5655	nw64_mac(BTXMAC_CONFIG, val);
5656}
5657
5658static void niu_enable_tx_mac(struct niu *np, int on)
5659{
5660	if (np->flags & NIU_FLAGS_XMAC)
5661		niu_enable_tx_xmac(np, on);
5662	else
5663		niu_enable_tx_bmac(np, on);
5664}
5665
5666static void niu_enable_rx_xmac(struct niu *np, int on)
5667{
5668	u64 val = nr64_mac(XMAC_CONFIG);
5669
5670	val &= ~(XMAC_CONFIG_HASH_FILTER_EN |
5671		 XMAC_CONFIG_PROMISCUOUS);
5672
5673	if (np->flags & NIU_FLAGS_MCAST)
5674		val |= XMAC_CONFIG_HASH_FILTER_EN;
5675	if (np->flags & NIU_FLAGS_PROMISC)
5676		val |= XMAC_CONFIG_PROMISCUOUS;
5677
5678	if (on)
5679		val |= XMAC_CONFIG_RX_MAC_ENABLE;
5680	else
5681		val &= ~XMAC_CONFIG_RX_MAC_ENABLE;
5682	nw64_mac(XMAC_CONFIG, val);
5683}
5684
5685static void niu_enable_rx_bmac(struct niu *np, int on)
5686{
5687	u64 val = nr64_mac(BRXMAC_CONFIG);
5688
5689	val &= ~(BRXMAC_CONFIG_HASH_FILT_EN |
5690		 BRXMAC_CONFIG_PROMISC);
5691
5692	if (np->flags & NIU_FLAGS_MCAST)
5693		val |= BRXMAC_CONFIG_HASH_FILT_EN;
5694	if (np->flags & NIU_FLAGS_PROMISC)
5695		val |= BRXMAC_CONFIG_PROMISC;
5696
5697	if (on)
5698		val |= BRXMAC_CONFIG_ENABLE;
5699	else
5700		val &= ~BRXMAC_CONFIG_ENABLE;
5701	nw64_mac(BRXMAC_CONFIG, val);
5702}
5703
5704static void niu_enable_rx_mac(struct niu *np, int on)
5705{
5706	if (np->flags & NIU_FLAGS_XMAC)
5707		niu_enable_rx_xmac(np, on);
5708	else
5709		niu_enable_rx_bmac(np, on);
5710}
5711
5712static int niu_init_mac(struct niu *np)
5713{
5714	int err;
5715
5716	niu_init_xif(np);
5717	err = niu_init_pcs(np);
5718	if (err)
5719		return err;
5720
5721	err = niu_reset_tx_mac(np);
5722	if (err)
5723		return err;
5724	niu_init_tx_mac(np);
5725	err = niu_reset_rx_mac(np);
5726	if (err)
5727		return err;
5728	niu_init_rx_mac(np);
5729
5730	/* This looks hookey but the RX MAC reset we just did will
5731	 * undo some of the state we setup in niu_init_tx_mac() so we
5732	 * have to call it again.  In particular, the RX MAC reset will
5733	 * set the XMAC_MAX register back to it's default value.
5734	 */
5735	niu_init_tx_mac(np);
5736	niu_enable_tx_mac(np, 1);
5737
5738	niu_enable_rx_mac(np, 1);
5739
5740	return 0;
5741}
5742
5743static void niu_stop_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
5744{
5745	(void) niu_tx_channel_stop(np, rp->tx_channel);
5746}
5747
5748static void niu_stop_tx_channels(struct niu *np)
5749{
5750	int i;
5751
5752	for (i = 0; i < np->num_tx_rings; i++) {
5753		struct tx_ring_info *rp = &np->tx_rings[i];
5754
5755		niu_stop_one_tx_channel(np, rp);
5756	}
5757}
5758
5759static void niu_reset_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
5760{
5761	(void) niu_tx_channel_reset(np, rp->tx_channel);
5762}
5763
5764static void niu_reset_tx_channels(struct niu *np)
5765{
5766	int i;
5767
5768	for (i = 0; i < np->num_tx_rings; i++) {
5769		struct tx_ring_info *rp = &np->tx_rings[i];
5770
5771		niu_reset_one_tx_channel(np, rp);
5772	}
5773}
5774
5775static void niu_stop_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
5776{
5777	(void) niu_enable_rx_channel(np, rp->rx_channel, 0);
5778}
5779
5780static void niu_stop_rx_channels(struct niu *np)
5781{
5782	int i;
5783
5784	for (i = 0; i < np->num_rx_rings; i++) {
5785		struct rx_ring_info *rp = &np->rx_rings[i];
5786
5787		niu_stop_one_rx_channel(np, rp);
5788	}
5789}
5790
5791static void niu_reset_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
5792{
5793	int channel = rp->rx_channel;
5794
5795	(void) niu_rx_channel_reset(np, channel);
5796	nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_ALL);
5797	nw64(RX_DMA_CTL_STAT(channel), 0);
5798	(void) niu_enable_rx_channel(np, channel, 0);
5799}
5800
5801static void niu_reset_rx_channels(struct niu *np)
5802{
5803	int i;
5804
5805	for (i = 0; i < np->num_rx_rings; i++) {
5806		struct rx_ring_info *rp = &np->rx_rings[i];
5807
5808		niu_reset_one_rx_channel(np, rp);
5809	}
5810}
5811
5812static void niu_disable_ipp(struct niu *np)
5813{
5814	u64 rd, wr, val;
5815	int limit;
5816
5817	rd = nr64_ipp(IPP_DFIFO_RD_PTR);
5818	wr = nr64_ipp(IPP_DFIFO_WR_PTR);
5819	limit = 100;
5820	while (--limit >= 0 && (rd != wr)) {
5821		rd = nr64_ipp(IPP_DFIFO_RD_PTR);
5822		wr = nr64_ipp(IPP_DFIFO_WR_PTR);
5823	}
5824	if (limit < 0 &&
5825	    (rd != 0 && wr != 1)) {
5826		netdev_err(np->dev, "IPP would not quiesce, rd_ptr[%llx] wr_ptr[%llx]\n",
5827			   (unsigned long long)nr64_ipp(IPP_DFIFO_RD_PTR),
5828			   (unsigned long long)nr64_ipp(IPP_DFIFO_WR_PTR));
5829	}
5830
5831	val = nr64_ipp(IPP_CFIG);
5832	val &= ~(IPP_CFIG_IPP_ENABLE |
5833		 IPP_CFIG_DFIFO_ECC_EN |
5834		 IPP_CFIG_DROP_BAD_CRC |
5835		 IPP_CFIG_CKSUM_EN);
5836	nw64_ipp(IPP_CFIG, val);
5837
5838	(void) niu_ipp_reset(np);
5839}
5840
5841static int niu_init_hw(struct niu *np)
5842{
5843	int i, err;
5844
5845	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TXC\n");
5846	niu_txc_enable_port(np, 1);
5847	niu_txc_port_dma_enable(np, 1);
5848	niu_txc_set_imask(np, 0);
5849
5850	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TX channels\n");
5851	for (i = 0; i < np->num_tx_rings; i++) {
5852		struct tx_ring_info *rp = &np->tx_rings[i];
5853
5854		err = niu_init_one_tx_channel(np, rp);
5855		if (err)
5856			return err;
5857	}
5858
5859	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize RX channels\n");
5860	err = niu_init_rx_channels(np);
5861	if (err)
5862		goto out_uninit_tx_channels;
5863
5864	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize classifier\n");
5865	err = niu_init_classifier_hw(np);
5866	if (err)
5867		goto out_uninit_rx_channels;
5868
5869	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize ZCP\n");
5870	err = niu_init_zcp(np);
5871	if (err)
5872		goto out_uninit_rx_channels;
5873
5874	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize IPP\n");
5875	err = niu_init_ipp(np);
5876	if (err)
5877		goto out_uninit_rx_channels;
5878
5879	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize MAC\n");
5880	err = niu_init_mac(np);
5881	if (err)
5882		goto out_uninit_ipp;
5883
5884	return 0;
5885
5886out_uninit_ipp:
5887	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit IPP\n");
5888	niu_disable_ipp(np);
5889
5890out_uninit_rx_channels:
5891	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit RX channels\n");
5892	niu_stop_rx_channels(np);
5893	niu_reset_rx_channels(np);
5894
5895out_uninit_tx_channels:
5896	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit TX channels\n");
5897	niu_stop_tx_channels(np);
5898	niu_reset_tx_channels(np);
5899
5900	return err;
5901}
5902
5903static void niu_stop_hw(struct niu *np)
5904{
5905	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable interrupts\n");
5906	niu_enable_interrupts(np, 0);
5907
5908	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable RX MAC\n");
5909	niu_enable_rx_mac(np, 0);
5910
5911	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable IPP\n");
5912	niu_disable_ipp(np);
5913
5914	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop TX channels\n");
5915	niu_stop_tx_channels(np);
5916
5917	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop RX channels\n");
5918	niu_stop_rx_channels(np);
5919
5920	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset TX channels\n");
5921	niu_reset_tx_channels(np);
5922
5923	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset RX channels\n");
5924	niu_reset_rx_channels(np);
5925}
5926
5927static void niu_set_irq_name(struct niu *np)
5928{
5929	int port = np->port;
5930	int i, j = 1;
5931
5932	sprintf(np->irq_name[0], "%s:MAC", np->dev->name);
5933
5934	if (port == 0) {
5935		sprintf(np->irq_name[1], "%s:MIF", np->dev->name);
5936		sprintf(np->irq_name[2], "%s:SYSERR", np->dev->name);
5937		j = 3;
5938	}
5939
5940	for (i = 0; i < np->num_ldg - j; i++) {
5941		if (i < np->num_rx_rings)
5942			sprintf(np->irq_name[i+j], "%s-rx-%d",
5943				np->dev->name, i);
5944		else if (i < np->num_tx_rings + np->num_rx_rings)
5945			sprintf(np->irq_name[i+j], "%s-tx-%d", np->dev->name,
5946				i - np->num_rx_rings);
5947	}
5948}
5949
5950static int niu_request_irq(struct niu *np)
5951{
5952	int i, j, err;
5953
5954	niu_set_irq_name(np);
5955
5956	err = 0;
5957	for (i = 0; i < np->num_ldg; i++) {
5958		struct niu_ldg *lp = &np->ldg[i];
5959
5960		err = request_irq(lp->irq, niu_interrupt,
5961				  IRQF_SHARED | IRQF_SAMPLE_RANDOM,
5962				  np->irq_name[i], lp);
5963		if (err)
5964			goto out_free_irqs;
5965
5966	}
5967
5968	return 0;
5969
5970out_free_irqs:
5971	for (j = 0; j < i; j++) {
5972		struct niu_ldg *lp = &np->ldg[j];
5973
5974		free_irq(lp->irq, lp);
5975	}
5976	return err;
5977}
5978
5979static void niu_free_irq(struct niu *np)
5980{
5981	int i;
5982
5983	for (i = 0; i < np->num_ldg; i++) {
5984		struct niu_ldg *lp = &np->ldg[i];
5985
5986		free_irq(lp->irq, lp);
5987	}
5988}
5989
5990static void niu_enable_napi(struct niu *np)
5991{
5992	int i;
5993
5994	for (i = 0; i < np->num_ldg; i++)
5995		napi_enable(&np->ldg[i].napi);
5996}
5997
5998static void niu_disable_napi(struct niu *np)
5999{
6000	int i;
6001
6002	for (i = 0; i < np->num_ldg; i++)
6003		napi_disable(&np->ldg[i].napi);
6004}
6005
6006static int niu_open(struct net_device *dev)
6007{
6008	struct niu *np = netdev_priv(dev);
6009	int err;
6010
6011	netif_carrier_off(dev);
6012
6013	err = niu_alloc_channels(np);
6014	if (err)
6015		goto out_err;
6016
6017	err = niu_enable_interrupts(np, 0);
6018	if (err)
6019		goto out_free_channels;
6020
6021	err = niu_request_irq(np);
6022	if (err)
6023		goto out_free_channels;
6024
6025	niu_enable_napi(np);
6026
6027	spin_lock_irq(&np->lock);
6028
6029	err = niu_init_hw(np);
6030	if (!err) {
6031		init_timer(&np->timer);
6032		np->timer.expires = jiffies + HZ;
6033		np->timer.data = (unsigned long) np;
6034		np->timer.function = niu_timer;
6035
6036		err = niu_enable_interrupts(np, 1);
6037		if (err)
6038			niu_stop_hw(np);
6039	}
6040
6041	spin_unlock_irq(&np->lock);
6042
6043	if (err) {
6044		niu_disable_napi(np);
6045		goto out_free_irq;
6046	}
6047
6048	netif_tx_start_all_queues(dev);
6049
6050	if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
6051		netif_carrier_on(dev);
6052
6053	add_timer(&np->timer);
6054
6055	return 0;
6056
6057out_free_irq:
6058	niu_free_irq(np);
6059
6060out_free_channels:
6061	niu_free_channels(np);
6062
6063out_err:
6064	return err;
6065}
6066
6067static void niu_full_shutdown(struct niu *np, struct net_device *dev)
6068{
6069	cancel_work_sync(&np->reset_task);
6070
6071	niu_disable_napi(np);
6072	netif_tx_stop_all_queues(dev);
6073
6074	del_timer_sync(&np->timer);
6075
6076	spin_lock_irq(&np->lock);
6077
6078	niu_stop_hw(np);
6079
6080	spin_unlock_irq(&np->lock);
6081}
6082
6083static int niu_close(struct net_device *dev)
6084{
6085	struct niu *np = netdev_priv(dev);
6086
6087	niu_full_shutdown(np, dev);
6088
6089	niu_free_irq(np);
6090
6091	niu_free_channels(np);
6092
6093	niu_handle_led(np, 0);
6094
6095	return 0;
6096}
6097
6098static void niu_sync_xmac_stats(struct niu *np)
6099{
6100	struct niu_xmac_stats *mp = &np->mac_stats.xmac;
6101
6102	mp->tx_frames += nr64_mac(TXMAC_FRM_CNT);
6103	mp->tx_bytes += nr64_mac(TXMAC_BYTE_CNT);
6104
6105	mp->rx_link_faults += nr64_mac(LINK_FAULT_CNT);
6106	mp->rx_align_errors += nr64_mac(RXMAC_ALIGN_ERR_CNT);
6107	mp->rx_frags += nr64_mac(RXMAC_FRAG_CNT);
6108	mp->rx_mcasts += nr64_mac(RXMAC_MC_FRM_CNT);
6109	mp->rx_bcasts += nr64_mac(RXMAC_BC_FRM_CNT);
6110	mp->rx_hist_cnt1 += nr64_mac(RXMAC_HIST_CNT1);
6111	mp->rx_hist_cnt2 += nr64_mac(RXMAC_HIST_CNT2);
6112	mp->rx_hist_cnt3 += nr64_mac(RXMAC_HIST_CNT3);
6113	mp->rx_hist_cnt4 += nr64_mac(RXMAC_HIST_CNT4);
6114	mp->rx_hist_cnt5 += nr64_mac(RXMAC_HIST_CNT5);
6115	mp->rx_hist_cnt6 += nr64_mac(RXMAC_HIST_CNT6);
6116	mp->rx_hist_cnt7 += nr64_mac(RXMAC_HIST_CNT7);
6117	mp->rx_octets += nr64_mac(RXMAC_BT_CNT);
6118	mp->rx_code_violations += nr64_mac(RXMAC_CD_VIO_CNT);
6119	mp->rx_len_errors += nr64_mac(RXMAC_MPSZER_CNT);
6120	mp->rx_crc_errors += nr64_mac(RXMAC_CRC_ER_CNT);
6121}
6122
6123static void niu_sync_bmac_stats(struct niu *np)
6124{
6125	struct niu_bmac_stats *mp = &np->mac_stats.bmac;
6126
6127	mp->tx_bytes += nr64_mac(BTXMAC_BYTE_CNT);
6128	mp->tx_frames += nr64_mac(BTXMAC_FRM_CNT);
6129
6130	mp->rx_frames += nr64_mac(BRXMAC_FRAME_CNT);
6131	mp->rx_align_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT);
6132	mp->rx_crc_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT);
6133	mp->rx_len_errors += nr64_mac(BRXMAC_CODE_VIOL_ERR_CNT);
6134}
6135
6136static void niu_sync_mac_stats(struct niu *np)
6137{
6138	if (np->flags & NIU_FLAGS_XMAC)
6139		niu_sync_xmac_stats(np);
6140	else
6141		niu_sync_bmac_stats(np);
6142}
6143
6144static void niu_get_rx_stats(struct niu *np)
6145{
6146	unsigned long pkts, dropped, errors, bytes;
6147	int i;
6148
6149	pkts = dropped = errors = bytes = 0;
6150	for (i = 0; i < np->num_rx_rings; i++) {
6151		struct rx_ring_info *rp = &np->rx_rings[i];
6152
6153		niu_sync_rx_discard_stats(np, rp, 0);
6154
6155		pkts += rp->rx_packets;
6156		bytes += rp->rx_bytes;
6157		dropped += rp->rx_dropped;
6158		errors += rp->rx_errors;
6159	}
6160	np->dev->stats.rx_packets = pkts;
6161	np->dev->stats.rx_bytes = bytes;
6162	np->dev->stats.rx_dropped = dropped;
6163	np->dev->stats.rx_errors = errors;
6164}
6165
6166static void niu_get_tx_stats(struct niu *np)
6167{
6168	unsigned long pkts, errors, bytes;
6169	int i;
6170
6171	pkts = errors = bytes = 0;
6172	for (i = 0; i < np->num_tx_rings; i++) {
6173		struct tx_ring_info *rp = &np->tx_rings[i];
6174
6175		pkts += rp->tx_packets;
6176		bytes += rp->tx_bytes;
6177		errors += rp->tx_errors;
6178	}
6179	np->dev->stats.tx_packets = pkts;
6180	np->dev->stats.tx_bytes = bytes;
6181	np->dev->stats.tx_errors = errors;
6182}
6183
6184static struct net_device_stats *niu_get_stats(struct net_device *dev)
6185{
6186	struct niu *np = netdev_priv(dev);
6187
6188	niu_get_rx_stats(np);
6189	niu_get_tx_stats(np);
6190
6191	return &dev->stats;
6192}
6193
6194static void niu_load_hash_xmac(struct niu *np, u16 *hash)
6195{
6196	int i;
6197
6198	for (i = 0; i < 16; i++)
6199		nw64_mac(XMAC_HASH_TBL(i), hash[i]);
6200}
6201
6202static void niu_load_hash_bmac(struct niu *np, u16 *hash)
6203{
6204	int i;
6205
6206	for (i = 0; i < 16; i++)
6207		nw64_mac(BMAC_HASH_TBL(i), hash[i]);
6208}
6209
6210static void niu_load_hash(struct niu *np, u16 *hash)
6211{
6212	if (np->flags & NIU_FLAGS_XMAC)
6213		niu_load_hash_xmac(np, hash);
6214	else
6215		niu_load_hash_bmac(np, hash);
6216}
6217
6218static void niu_set_rx_mode(struct net_device *dev)
6219{
6220	struct niu *np = netdev_priv(dev);
6221	int i, alt_cnt, err;
6222	struct netdev_hw_addr *ha;
6223	unsigned long flags;
6224	u16 hash[16] = { 0, };
6225
6226	spin_lock_irqsave(&np->lock, flags);
6227	niu_enable_rx_mac(np, 0);
6228
6229	np->flags &= ~(NIU_FLAGS_MCAST | NIU_FLAGS_PROMISC);
6230	if (dev->flags & IFF_PROMISC)
6231		np->flags |= NIU_FLAGS_PROMISC;
6232	if ((dev->flags & IFF_ALLMULTI) || (!netdev_mc_empty(dev)))
6233		np->flags |= NIU_FLAGS_MCAST;
6234
6235	alt_cnt = netdev_uc_count(dev);
6236	if (alt_cnt > niu_num_alt_addr(np)) {
6237		alt_cnt = 0;
6238		np->flags |= NIU_FLAGS_PROMISC;
6239	}
6240
6241	if (alt_cnt) {
6242		int index = 0;
6243
6244		netdev_for_each_uc_addr(ha, dev) {
6245			err = niu_set_alt_mac(np, index, ha->addr);
6246			if (err)
6247				netdev_warn(dev, "Error %d adding alt mac %d\n",
6248					    err, index);
6249			err = niu_enable_alt_mac(np, index, 1);
6250			if (err)
6251				netdev_warn(dev, "Error %d enabling alt mac %d\n",
6252					    err, index);
6253
6254			index++;
6255		}
6256	} else {
6257		int alt_start;
6258		if (np->flags & NIU_FLAGS_XMAC)
6259			alt_start = 0;
6260		else
6261			alt_start = 1;
6262		for (i = alt_start; i < niu_num_alt_addr(np); i++) {
6263			err = niu_enable_alt_mac(np, i, 0);
6264			if (err)
6265				netdev_warn(dev, "Error %d disabling alt mac %d\n",
6266					    err, i);
6267		}
6268	}
6269	if (dev->flags & IFF_ALLMULTI) {
6270		for (i = 0; i < 16; i++)
6271			hash[i] = 0xffff;
6272	} else if (!netdev_mc_empty(dev)) {
6273		netdev_for_each_mc_addr(ha, dev) {
6274			u32 crc = ether_crc_le(ETH_ALEN, ha->addr);
6275
6276			crc >>= 24;
6277			hash[crc >> 4] |= (1 << (15 - (crc & 0xf)));
6278		}
6279	}
6280
6281	if (np->flags & NIU_FLAGS_MCAST)
6282		niu_load_hash(np, hash);
6283
6284	niu_enable_rx_mac(np, 1);
6285	spin_unlock_irqrestore(&np->lock, flags);
6286}
6287
6288static int niu_set_mac_addr(struct net_device *dev, void *p)
6289{
6290	struct niu *np = netdev_priv(dev);
6291	struct sockaddr *addr = p;
6292	unsigned long flags;
6293
6294	if (!is_valid_ether_addr(addr->sa_data))
6295		return -EINVAL;
6296
6297	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
6298
6299	if (!netif_running(dev))
6300		return 0;
6301
6302	spin_lock_irqsave(&np->lock, flags);
6303	niu_enable_rx_mac(np, 0);
6304	niu_set_primary_mac(np, dev->dev_addr);
6305	niu_enable_rx_mac(np, 1);
6306	spin_unlock_irqrestore(&np->lock, flags);
6307
6308	return 0;
6309}
6310
6311static int niu_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6312{
6313	return -EOPNOTSUPP;
6314}
6315
6316static void niu_netif_stop(struct niu *np)
6317{
6318	np->dev->trans_start = jiffies;	/* prevent tx timeout */
6319
6320	niu_disable_napi(np);
6321
6322	netif_tx_disable(np->dev);
6323}
6324
6325static void niu_netif_start(struct niu *np)
6326{
6327	/* NOTE: unconditional netif_wake_queue is only appropriate
6328	 * so long as all callers are assured to have free tx slots
6329	 * (such as after niu_init_hw).
6330	 */
6331	netif_tx_wake_all_queues(np->dev);
6332
6333	niu_enable_napi(np);
6334
6335	niu_enable_interrupts(np, 1);
6336}
6337
6338static void niu_reset_buffers(struct niu *np)
6339{
6340	int i, j, k, err;
6341
6342	if (np->rx_rings) {
6343		for (i = 0; i < np->num_rx_rings; i++) {
6344			struct rx_ring_info *rp = &np->rx_rings[i];
6345
6346			for (j = 0, k = 0; j < MAX_RBR_RING_SIZE; j++) {
6347				struct page *page;
6348
6349				page = rp->rxhash[j];
6350				while (page) {
6351					struct page *next =
6352						(struct page *) page->mapping;
6353					u64 base = page->index;
6354					base = base >> RBR_DESCR_ADDR_SHIFT;
6355					rp->rbr[k++] = cpu_to_le32(base);
6356					page = next;
6357				}
6358			}
6359			for (; k < MAX_RBR_RING_SIZE; k++) {
6360				err = niu_rbr_add_page(np, rp, GFP_ATOMIC, k);
6361				if (unlikely(err))
6362					break;
6363			}
6364
6365			rp->rbr_index = rp->rbr_table_size - 1;
6366			rp->rcr_index = 0;
6367			rp->rbr_pending = 0;
6368			rp->rbr_refill_pending = 0;
6369		}
6370	}
6371	if (np->tx_rings) {
6372		for (i = 0; i < np->num_tx_rings; i++) {
6373			struct tx_ring_info *rp = &np->tx_rings[i];
6374
6375			for (j = 0; j < MAX_TX_RING_SIZE; j++) {
6376				if (rp->tx_buffs[j].skb)
6377					(void) release_tx_packet(np, rp, j);
6378			}
6379
6380			rp->pending = MAX_TX_RING_SIZE;
6381			rp->prod = 0;
6382			rp->cons = 0;
6383			rp->wrap_bit = 0;
6384		}
6385	}
6386}
6387
6388static void niu_reset_task(struct work_struct *work)
6389{
6390	struct niu *np = container_of(work, struct niu, reset_task);
6391	unsigned long flags;
6392	int err;
6393
6394	spin_lock_irqsave(&np->lock, flags);
6395	if (!netif_running(np->dev)) {
6396		spin_unlock_irqrestore(&np->lock, flags);
6397		return;
6398	}
6399
6400	spin_unlock_irqrestore(&np->lock, flags);
6401
6402	del_timer_sync(&np->timer);
6403
6404	niu_netif_stop(np);
6405
6406	spin_lock_irqsave(&np->lock, flags);
6407
6408	niu_stop_hw(np);
6409
6410	spin_unlock_irqrestore(&np->lock, flags);
6411
6412	niu_reset_buffers(np);
6413
6414	spin_lock_irqsave(&np->lock, flags);
6415
6416	err = niu_init_hw(np);
6417	if (!err) {
6418		np->timer.expires = jiffies + HZ;
6419		add_timer(&np->timer);
6420		niu_netif_start(np);
6421	}
6422
6423	spin_unlock_irqrestore(&np->lock, flags);
6424}
6425
6426static void niu_tx_timeout(struct net_device *dev)
6427{
6428	struct niu *np = netdev_priv(dev);
6429
6430	dev_err(np->device, "%s: Transmit timed out, resetting\n",
6431		dev->name);
6432
6433	schedule_work(&np->reset_task);
6434}
6435
6436static void niu_set_txd(struct tx_ring_info *rp, int index,
6437			u64 mapping, u64 len, u64 mark,
6438			u64 n_frags)
6439{
6440	__le64 *desc = &rp->descr[index];
6441
6442	*desc = cpu_to_le64(mark |
6443			    (n_frags << TX_DESC_NUM_PTR_SHIFT) |
6444			    (len << TX_DESC_TR_LEN_SHIFT) |
6445			    (mapping & TX_DESC_SAD));
6446}
6447
6448static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr,
6449				u64 pad_bytes, u64 len)
6450{
6451	u16 eth_proto, eth_proto_inner;
6452	u64 csum_bits, l3off, ihl, ret;
6453	u8 ip_proto;
6454	int ipv6;
6455
6456	eth_proto = be16_to_cpu(ehdr->h_proto);
6457	eth_proto_inner = eth_proto;
6458	if (eth_proto == ETH_P_8021Q) {
6459		struct vlan_ethhdr *vp = (struct vlan_ethhdr *) ehdr;
6460		__be16 val = vp->h_vlan_encapsulated_proto;
6461
6462		eth_proto_inner = be16_to_cpu(val);
6463	}
6464
6465	ipv6 = ihl = 0;
6466	switch (skb->protocol) {
6467	case cpu_to_be16(ETH_P_IP):
6468		ip_proto = ip_hdr(skb)->protocol;
6469		ihl = ip_hdr(skb)->ihl;
6470		break;
6471	case cpu_to_be16(ETH_P_IPV6):
6472		ip_proto = ipv6_hdr(skb)->nexthdr;
6473		ihl = (40 >> 2);
6474		ipv6 = 1;
6475		break;
6476	default:
6477		ip_proto = ihl = 0;
6478		break;
6479	}
6480
6481	csum_bits = TXHDR_CSUM_NONE;
6482	if (skb->ip_summed == CHECKSUM_PARTIAL) {
6483		u64 start, stuff;
6484
6485		csum_bits = (ip_proto == IPPROTO_TCP ?
6486			     TXHDR_CSUM_TCP :
6487			     (ip_proto == IPPROTO_UDP ?
6488			      TXHDR_CSUM_UDP : TXHDR_CSUM_SCTP));
6489
6490		start = skb_transport_offset(skb) -
6491			(pad_bytes + sizeof(struct tx_pkt_hdr));
6492		stuff = start + skb->csum_offset;
6493
6494		csum_bits |= (start / 2) << TXHDR_L4START_SHIFT;
6495		csum_bits |= (stuff / 2) << TXHDR_L4STUFF_SHIFT;
6496	}
6497
6498	l3off = skb_network_offset(skb) -
6499		(pad_bytes + sizeof(struct tx_pkt_hdr));
6500
6501	ret = (((pad_bytes / 2) << TXHDR_PAD_SHIFT) |
6502	       (len << TXHDR_LEN_SHIFT) |
6503	       ((l3off / 2) << TXHDR_L3START_SHIFT) |
6504	       (ihl << TXHDR_IHL_SHIFT) |
6505	       ((eth_proto_inner < 1536) ? TXHDR_LLC : 0) |
6506	       ((eth_proto == ETH_P_8021Q) ? TXHDR_VLAN : 0) |
6507	       (ipv6 ? TXHDR_IP_VER : 0) |
6508	       csum_bits);
6509
6510	return ret;
6511}
6512
6513static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
6514				  struct net_device *dev)
6515{
6516	struct niu *np = netdev_priv(dev);
6517	unsigned long align, headroom;
6518	struct netdev_queue *txq;
6519	struct tx_ring_info *rp;
6520	struct tx_pkt_hdr *tp;
6521	unsigned int len, nfg;
6522	struct ethhdr *ehdr;
6523	int prod, i, tlen;
6524	u64 mapping, mrk;
6525
6526	i = skb_get_queue_mapping(skb);
6527	rp = &np->tx_rings[i];
6528	txq = netdev_get_tx_queue(dev, i);
6529
6530	if (niu_tx_avail(rp) <= (skb_shinfo(skb)->nr_frags + 1)) {
6531		netif_tx_stop_queue(txq);
6532		dev_err(np->device, "%s: BUG! Tx ring full when queue awake!\n", dev->name);
6533		rp->tx_errors++;
6534		return NETDEV_TX_BUSY;
6535	}
6536
6537	if (skb->len < ETH_ZLEN) {
6538		unsigned int pad_bytes = ETH_ZLEN - skb->len;
6539
6540		if (skb_pad(skb, pad_bytes))
6541			goto out;
6542		skb_put(skb, pad_bytes);
6543	}
6544
6545	len = sizeof(struct tx_pkt_hdr) + 15;
6546	if (skb_headroom(skb) < len) {
6547		struct sk_buff *skb_new;
6548
6549		skb_new = skb_realloc_headroom(skb, len);
6550		if (!skb_new) {
6551			rp->tx_errors++;
6552			goto out_drop;
6553		}
6554		kfree_skb(skb);
6555		skb = skb_new;
6556	} else
6557		skb_orphan(skb);
6558
6559	align = ((unsigned long) skb->data & (16 - 1));
6560	headroom = align + sizeof(struct tx_pkt_hdr);
6561
6562	ehdr = (struct ethhdr *) skb->data;
6563	tp = (struct tx_pkt_hdr *) skb_push(skb, headroom);
6564
6565	len = skb->len - sizeof(struct tx_pkt_hdr);
6566	tp->flags = cpu_to_le64(niu_compute_tx_flags(skb, ehdr, align, len));
6567	tp->resv = 0;
6568
6569	len = skb_headlen(skb);
6570	mapping = np->ops->map_single(np->device, skb->data,
6571				      len, DMA_TO_DEVICE);
6572
6573	prod = rp->prod;
6574
6575	rp->tx_buffs[prod].skb = skb;
6576	rp->tx_buffs[prod].mapping = mapping;
6577
6578	mrk = TX_DESC_SOP;
6579	if (++rp->mark_counter == rp->mark_freq) {
6580		rp->mark_counter = 0;
6581		mrk |= TX_DESC_MARK;
6582		rp->mark_pending++;
6583	}
6584
6585	tlen = len;
6586	nfg = skb_shinfo(skb)->nr_frags;
6587	while (tlen > 0) {
6588		tlen -= MAX_TX_DESC_LEN;
6589		nfg++;
6590	}
6591
6592	while (len > 0) {
6593		unsigned int this_len = len;
6594
6595		if (this_len > MAX_TX_DESC_LEN)
6596			this_len = MAX_TX_DESC_LEN;
6597
6598		niu_set_txd(rp, prod, mapping, this_len, mrk, nfg);
6599		mrk = nfg = 0;
6600
6601		prod = NEXT_TX(rp, prod);
6602		mapping += this_len;
6603		len -= this_len;
6604	}
6605
6606	for (i = 0; i <  skb_shinfo(skb)->nr_frags; i++) {
6607		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6608
6609		len = frag->size;
6610		mapping = np->ops->map_page(np->device, frag->page,
6611					    frag->page_offset, len,
6612					    DMA_TO_DEVICE);
6613
6614		rp->tx_buffs[prod].skb = NULL;
6615		rp->tx_buffs[prod].mapping = mapping;
6616
6617		niu_set_txd(rp, prod, mapping, len, 0, 0);
6618
6619		prod = NEXT_TX(rp, prod);
6620	}
6621
6622	if (prod < rp->prod)
6623		rp->wrap_bit ^= TX_RING_KICK_WRAP;
6624	rp->prod = prod;
6625
6626	nw64(TX_RING_KICK(rp->tx_channel), rp->wrap_bit | (prod << 3));
6627
6628	if (unlikely(niu_tx_avail(rp) <= (MAX_SKB_FRAGS + 1))) {
6629		netif_tx_stop_queue(txq);
6630		if (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp))
6631			netif_tx_wake_queue(txq);
6632	}
6633
6634out:
6635	return NETDEV_TX_OK;
6636
6637out_drop:
6638	rp->tx_errors++;
6639	kfree_skb(skb);
6640	goto out;
6641}
6642
6643static int niu_change_mtu(struct net_device *dev, int new_mtu)
6644{
6645	struct niu *np = netdev_priv(dev);
6646	int err, orig_jumbo, new_jumbo;
6647
6648	if (new_mtu < 68 || new_mtu > NIU_MAX_MTU)
6649		return -EINVAL;
6650
6651	orig_jumbo = (dev->mtu > ETH_DATA_LEN);
6652	new_jumbo = (new_mtu > ETH_DATA_LEN);
6653
6654	dev->mtu = new_mtu;
6655
6656	if (!netif_running(dev) ||
6657	    (orig_jumbo == new_jumbo))
6658		return 0;
6659
6660	niu_full_shutdown(np, dev);
6661
6662	niu_free_channels(np);
6663
6664	niu_enable_napi(np);
6665
6666	err = niu_alloc_channels(np);
6667	if (err)
6668		return err;
6669
6670	spin_lock_irq(&np->lock);
6671
6672	err = niu_init_hw(np);
6673	if (!err) {
6674		init_timer(&np->timer);
6675		np->timer.expires = jiffies + HZ;
6676		np->timer.data = (unsigned long) np;
6677		np->timer.function = niu_timer;
6678
6679		err = niu_enable_interrupts(np, 1);
6680		if (err)
6681			niu_stop_hw(np);
6682	}
6683
6684	spin_unlock_irq(&np->lock);
6685
6686	if (!err) {
6687		netif_tx_start_all_queues(dev);
6688		if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
6689			netif_carrier_on(dev);
6690
6691		add_timer(&np->timer);
6692	}
6693
6694	return err;
6695}
6696
6697static void niu_get_drvinfo(struct net_device *dev,
6698			    struct ethtool_drvinfo *info)
6699{
6700	struct niu *np = netdev_priv(dev);
6701	struct niu_vpd *vpd = &np->vpd;
6702
6703	strcpy(info->driver, DRV_MODULE_NAME);
6704	strcpy(info->version, DRV_MODULE_VERSION);
6705	sprintf(info->fw_version, "%d.%d",
6706		vpd->fcode_major, vpd->fcode_minor);
6707	if (np->parent->plat_type != PLAT_TYPE_NIU)
6708		strcpy(info->bus_info, pci_name(np->pdev));
6709}
6710
6711static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6712{
6713	struct niu *np = netdev_priv(dev);
6714	struct niu_link_config *lp;
6715
6716	lp = &np->link_config;
6717
6718	memset(cmd, 0, sizeof(*cmd));
6719	cmd->phy_address = np->phy_addr;
6720	cmd->supported = lp->supported;
6721	cmd->advertising = lp->active_advertising;
6722	cmd->autoneg = lp->active_autoneg;
6723	cmd->speed = lp->active_speed;
6724	cmd->duplex = lp->active_duplex;
6725	cmd->port = (np->flags & NIU_FLAGS_FIBER) ? PORT_FIBRE : PORT_TP;
6726	cmd->transceiver = (np->flags & NIU_FLAGS_XCVR_SERDES) ?
6727		XCVR_EXTERNAL : XCVR_INTERNAL;
6728
6729	return 0;
6730}
6731
6732static int niu_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6733{
6734	struct niu *np = netdev_priv(dev);
6735	struct niu_link_config *lp = &np->link_config;
6736
6737	lp->advertising = cmd->advertising;
6738	lp->speed = cmd->speed;
6739	lp->duplex = cmd->duplex;
6740	lp->autoneg = cmd->autoneg;
6741	return niu_init_link(np);
6742}
6743
6744static u32 niu_get_msglevel(struct net_device *dev)
6745{
6746	struct niu *np = netdev_priv(dev);
6747	return np->msg_enable;
6748}
6749
6750static void niu_set_msglevel(struct net_device *dev, u32 value)
6751{
6752	struct niu *np = netdev_priv(dev);
6753	np->msg_enable = value;
6754}
6755
6756static int niu_nway_reset(struct net_device *dev)
6757{
6758	struct niu *np = netdev_priv(dev);
6759
6760	if (np->link_config.autoneg)
6761		return niu_init_link(np);
6762
6763	return 0;
6764}
6765
6766static int niu_get_eeprom_len(struct net_device *dev)
6767{
6768	struct niu *np = netdev_priv(dev);
6769
6770	return np->eeprom_len;
6771}
6772
6773static int niu_get_eeprom(struct net_device *dev,
6774			  struct ethtool_eeprom *eeprom, u8 *data)
6775{
6776	struct niu *np = netdev_priv(dev);
6777	u32 offset, len, val;
6778
6779	offset = eeprom->offset;
6780	len = eeprom->len;
6781
6782	if (offset + len < offset)
6783		return -EINVAL;
6784	if (offset >= np->eeprom_len)
6785		return -EINVAL;
6786	if (offset + len > np->eeprom_len)
6787		len = eeprom->len = np->eeprom_len - offset;
6788
6789	if (offset & 3) {
6790		u32 b_offset, b_count;
6791
6792		b_offset = offset & 3;
6793		b_count = 4 - b_offset;
6794		if (b_count > len)
6795			b_count = len;
6796
6797		val = nr64(ESPC_NCR((offset - b_offset) / 4));
6798		memcpy(data, ((char *)&val) + b_offset, b_count);
6799		data += b_count;
6800		len -= b_count;
6801		offset += b_count;
6802	}
6803	while (len >= 4) {
6804		val = nr64(ESPC_NCR(offset / 4));
6805		memcpy(data, &val, 4);
6806		data += 4;
6807		len -= 4;
6808		offset += 4;
6809	}
6810	if (len) {
6811		val = nr64(ESPC_NCR(offset / 4));
6812		memcpy(data, &val, len);
6813	}
6814	return 0;
6815}
6816
6817static void niu_ethflow_to_l3proto(int flow_type, u8 *pid)
6818{
6819	switch (flow_type) {
6820	case TCP_V4_FLOW:
6821	case TCP_V6_FLOW:
6822		*pid = IPPROTO_TCP;
6823		break;
6824	case UDP_V4_FLOW:
6825	case UDP_V6_FLOW:
6826		*pid = IPPROTO_UDP;
6827		break;
6828	case SCTP_V4_FLOW:
6829	case SCTP_V6_FLOW:
6830		*pid = IPPROTO_SCTP;
6831		break;
6832	case AH_V4_FLOW:
6833	case AH_V6_FLOW:
6834		*pid = IPPROTO_AH;
6835		break;
6836	case ESP_V4_FLOW:
6837	case ESP_V6_FLOW:
6838		*pid = IPPROTO_ESP;
6839		break;
6840	default:
6841		*pid = 0;
6842		break;
6843	}
6844}
6845
6846static int niu_class_to_ethflow(u64 class, int *flow_type)
6847{
6848	switch (class) {
6849	case CLASS_CODE_TCP_IPV4:
6850		*flow_type = TCP_V4_FLOW;
6851		break;
6852	case CLASS_CODE_UDP_IPV4:
6853		*flow_type = UDP_V4_FLOW;
6854		break;
6855	case CLASS_CODE_AH_ESP_IPV4:
6856		*flow_type = AH_V4_FLOW;
6857		break;
6858	case CLASS_CODE_SCTP_IPV4:
6859		*flow_type = SCTP_V4_FLOW;
6860		break;
6861	case CLASS_CODE_TCP_IPV6:
6862		*flow_type = TCP_V6_FLOW;
6863		break;
6864	case CLASS_CODE_UDP_IPV6:
6865		*flow_type = UDP_V6_FLOW;
6866		break;
6867	case CLASS_CODE_AH_ESP_IPV6:
6868		*flow_type = AH_V6_FLOW;
6869		break;
6870	case CLASS_CODE_SCTP_IPV6:
6871		*flow_type = SCTP_V6_FLOW;
6872		break;
6873	case CLASS_CODE_USER_PROG1:
6874	case CLASS_CODE_USER_PROG2:
6875	case CLASS_CODE_USER_PROG3:
6876	case CLASS_CODE_USER_PROG4:
6877		*flow_type = IP_USER_FLOW;
6878		break;
6879	default:
6880		return 0;
6881	}
6882
6883	return 1;
6884}
6885
6886static int niu_ethflow_to_class(int flow_type, u64 *class)
6887{
6888	switch (flow_type) {
6889	case TCP_V4_FLOW:
6890		*class = CLASS_CODE_TCP_IPV4;
6891		break;
6892	case UDP_V4_FLOW:
6893		*class = CLASS_CODE_UDP_IPV4;
6894		break;
6895	case AH_V4_FLOW:
6896	case ESP_V4_FLOW:
6897		*class = CLASS_CODE_AH_ESP_IPV4;
6898		break;
6899	case SCTP_V4_FLOW:
6900		*class = CLASS_CODE_SCTP_IPV4;
6901		break;
6902	case TCP_V6_FLOW:
6903		*class = CLASS_CODE_TCP_IPV6;
6904		break;
6905	case UDP_V6_FLOW:
6906		*class = CLASS_CODE_UDP_IPV6;
6907		break;
6908	case AH_V6_FLOW:
6909	case ESP_V6_FLOW:
6910		*class = CLASS_CODE_AH_ESP_IPV6;
6911		break;
6912	case SCTP_V6_FLOW:
6913		*class = CLASS_CODE_SCTP_IPV6;
6914		break;
6915	default:
6916		return 0;
6917	}
6918
6919	return 1;
6920}
6921
6922static u64 niu_flowkey_to_ethflow(u64 flow_key)
6923{
6924	u64 ethflow = 0;
6925
6926	if (flow_key & FLOW_KEY_L2DA)
6927		ethflow |= RXH_L2DA;
6928	if (flow_key & FLOW_KEY_VLAN)
6929		ethflow |= RXH_VLAN;
6930	if (flow_key & FLOW_KEY_IPSA)
6931		ethflow |= RXH_IP_SRC;
6932	if (flow_key & FLOW_KEY_IPDA)
6933		ethflow |= RXH_IP_DST;
6934	if (flow_key & FLOW_KEY_PROTO)
6935		ethflow |= RXH_L3_PROTO;
6936	if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT))
6937		ethflow |= RXH_L4_B_0_1;
6938	if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT))
6939		ethflow |= RXH_L4_B_2_3;
6940
6941	return ethflow;
6942
6943}
6944
6945static int niu_ethflow_to_flowkey(u64 ethflow, u64 *flow_key)
6946{
6947	u64 key = 0;
6948
6949	if (ethflow & RXH_L2DA)
6950		key |= FLOW_KEY_L2DA;
6951	if (ethflow & RXH_VLAN)
6952		key |= FLOW_KEY_VLAN;
6953	if (ethflow & RXH_IP_SRC)
6954		key |= FLOW_KEY_IPSA;
6955	if (ethflow & RXH_IP_DST)
6956		key |= FLOW_KEY_IPDA;
6957	if (ethflow & RXH_L3_PROTO)
6958		key |= FLOW_KEY_PROTO;
6959	if (ethflow & RXH_L4_B_0_1)
6960		key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT);
6961	if (ethflow & RXH_L4_B_2_3)
6962		key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT);
6963
6964	*flow_key = key;
6965
6966	return 1;
6967
6968}
6969
6970static int niu_get_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc)
6971{
6972	u64 class;
6973
6974	nfc->data = 0;
6975
6976	if (!niu_ethflow_to_class(nfc->flow_type, &class))
6977		return -EINVAL;
6978
6979	if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] &
6980	    TCAM_KEY_DISC)
6981		nfc->data = RXH_DISCARD;
6982	else
6983		nfc->data = niu_flowkey_to_ethflow(np->parent->flow_key[class -
6984						      CLASS_CODE_USER_PROG1]);
6985	return 0;
6986}
6987
6988static void niu_get_ip4fs_from_tcam_key(struct niu_tcam_entry *tp,
6989					struct ethtool_rx_flow_spec *fsp)
6990{
6991
6992	fsp->h_u.tcp_ip4_spec.ip4src = (tp->key[3] & TCAM_V4KEY3_SADDR) >>
6993		TCAM_V4KEY3_SADDR_SHIFT;
6994	fsp->h_u.tcp_ip4_spec.ip4dst = (tp->key[3] & TCAM_V4KEY3_DADDR) >>
6995		TCAM_V4KEY3_DADDR_SHIFT;
6996	fsp->m_u.tcp_ip4_spec.ip4src = (tp->key_mask[3] & TCAM_V4KEY3_SADDR) >>
6997		TCAM_V4KEY3_SADDR_SHIFT;
6998	fsp->m_u.tcp_ip4_spec.ip4dst = (tp->key_mask[3] & TCAM_V4KEY3_DADDR) >>
6999		TCAM_V4KEY3_DADDR_SHIFT;
7000
7001	fsp->h_u.tcp_ip4_spec.ip4src =
7002		cpu_to_be32(fsp->h_u.tcp_ip4_spec.ip4src);
7003	fsp->m_u.tcp_ip4_spec.ip4src =
7004		cpu_to_be32(fsp->m_u.tcp_ip4_spec.ip4src);
7005	fsp->h_u.tcp_ip4_spec.ip4dst =
7006		cpu_to_be32(fsp->h_u.tcp_ip4_spec.ip4dst);
7007	fsp->m_u.tcp_ip4_spec.ip4dst =
7008		cpu_to_be32(fsp->m_u.tcp_ip4_spec.ip4dst);
7009
7010	fsp->h_u.tcp_ip4_spec.tos = (tp->key[2] & TCAM_V4KEY2_TOS) >>
7011		TCAM_V4KEY2_TOS_SHIFT;
7012	fsp->m_u.tcp_ip4_spec.tos = (tp->key_mask[2] & TCAM_V4KEY2_TOS) >>
7013		TCAM_V4KEY2_TOS_SHIFT;
7014
7015	switch (fsp->flow_type) {
7016	case TCP_V4_FLOW:
7017	case UDP_V4_FLOW:
7018	case SCTP_V4_FLOW:
7019		fsp->h_u.tcp_ip4_spec.psrc =
7020			((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
7021			 TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16;
7022		fsp->h_u.tcp_ip4_spec.pdst =
7023			((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
7024			 TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff;
7025		fsp->m_u.tcp_ip4_spec.psrc =
7026			((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
7027			 TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16;
7028		fsp->m_u.tcp_ip4_spec.pdst =
7029			((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
7030			 TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff;
7031
7032		fsp->h_u.tcp_ip4_spec.psrc =
7033			cpu_to_be16(fsp->h_u.tcp_ip4_spec.psrc);
7034		fsp->h_u.tcp_ip4_spec.pdst =
7035			cpu_to_be16(fsp->h_u.tcp_ip4_spec.pdst);
7036		fsp->m_u.tcp_ip4_spec.psrc =
7037			cpu_to_be16(fsp->m_u.tcp_ip4_spec.psrc);
7038		fsp->m_u.tcp_ip4_spec.pdst =
7039			cpu_to_be16(fsp->m_u.tcp_ip4_spec.pdst);
7040		break;
7041	case AH_V4_FLOW:
7042	case ESP_V4_FLOW:
7043		fsp->h_u.ah_ip4_spec.spi =
7044			(tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
7045			TCAM_V4KEY2_PORT_SPI_SHIFT;
7046		fsp->m_u.ah_ip4_spec.spi =
7047			(tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
7048			TCAM_V4KEY2_PORT_SPI_SHIFT;
7049
7050		fsp->h_u.ah_ip4_spec.spi =
7051			cpu_to_be32(fsp->h_u.ah_ip4_spec.spi);
7052		fsp->m_u.ah_ip4_spec.spi =
7053			cpu_to_be32(fsp->m_u.ah_ip4_spec.spi);
7054		break;
7055	case IP_USER_FLOW:
7056		fsp->h_u.usr_ip4_spec.l4_4_bytes =
7057			(tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
7058			TCAM_V4KEY2_PORT_SPI_SHIFT;
7059		fsp->m_u.usr_ip4_spec.l4_4_bytes =
7060			(tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
7061			TCAM_V4KEY2_PORT_SPI_SHIFT;
7062
7063		fsp->h_u.usr_ip4_spec.l4_4_bytes =
7064			cpu_to_be32(fsp->h_u.usr_ip4_spec.l4_4_bytes);
7065		fsp->m_u.usr_ip4_spec.l4_4_bytes =
7066			cpu_to_be32(fsp->m_u.usr_ip4_spec.l4_4_bytes);
7067
7068		fsp->h_u.usr_ip4_spec.proto =
7069			(tp->key[2] & TCAM_V4KEY2_PROTO) >>
7070			TCAM_V4KEY2_PROTO_SHIFT;
7071		fsp->m_u.usr_ip4_spec.proto =
7072			(tp->key_mask[2] & TCAM_V4KEY2_PROTO) >>
7073			TCAM_V4KEY2_PROTO_SHIFT;
7074
7075		fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
7076		break;
7077	default:
7078		break;
7079	}
7080}
7081
7082static int niu_get_ethtool_tcam_entry(struct niu *np,
7083				      struct ethtool_rxnfc *nfc)
7084{
7085	struct niu_parent *parent = np->parent;
7086	struct niu_tcam_entry *tp;
7087	struct ethtool_rx_flow_spec *fsp = &nfc->fs;
7088	u16 idx;
7089	u64 class;
7090	int ret = 0;
7091
7092	idx = tcam_get_index(np, (u16)nfc->fs.location);
7093
7094	tp = &parent->tcam[idx];
7095	if (!tp->valid) {
7096		netdev_info(np->dev, "niu%d: entry [%d] invalid for idx[%d]\n",
7097			    parent->index, (u16)nfc->fs.location, idx);
7098		return -EINVAL;
7099	}
7100
7101	/* fill the flow spec entry */
7102	class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >>
7103		TCAM_V4KEY0_CLASS_CODE_SHIFT;
7104	ret = niu_class_to_ethflow(class, &fsp->flow_type);
7105
7106	if (ret < 0) {
7107		netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n",
7108			    parent->index);
7109		ret = -EINVAL;
7110		goto out;
7111	}
7112
7113	if (fsp->flow_type == AH_V4_FLOW || fsp->flow_type == AH_V6_FLOW) {
7114		u32 proto = (tp->key[2] & TCAM_V4KEY2_PROTO) >>
7115			TCAM_V4KEY2_PROTO_SHIFT;
7116		if (proto == IPPROTO_ESP) {
7117			if (fsp->flow_type == AH_V4_FLOW)
7118				fsp->flow_type = ESP_V4_FLOW;
7119			else
7120				fsp->flow_type = ESP_V6_FLOW;
7121		}
7122	}
7123
7124	switch (fsp->flow_type) {
7125	case TCP_V4_FLOW:
7126	case UDP_V4_FLOW:
7127	case SCTP_V4_FLOW:
7128	case AH_V4_FLOW:
7129	case ESP_V4_FLOW:
7130		niu_get_ip4fs_from_tcam_key(tp, fsp);
7131		break;
7132	case TCP_V6_FLOW:
7133	case UDP_V6_FLOW:
7134	case SCTP_V6_FLOW:
7135	case AH_V6_FLOW:
7136	case ESP_V6_FLOW:
7137		/* Not yet implemented */
7138		ret = -EINVAL;
7139		break;
7140	case IP_USER_FLOW:
7141		niu_get_ip4fs_from_tcam_key(tp, fsp);
7142		break;
7143	default:
7144		ret = -EINVAL;
7145		break;
7146	}
7147
7148	if (ret < 0)
7149		goto out;
7150
7151	if (tp->assoc_data & TCAM_ASSOCDATA_DISC)
7152		fsp->ring_cookie = RX_CLS_FLOW_DISC;
7153	else
7154		fsp->ring_cookie = (tp->assoc_data & TCAM_ASSOCDATA_OFFSET) >>
7155			TCAM_ASSOCDATA_OFFSET_SHIFT;
7156
7157	/* put the tcam size here */
7158	nfc->data = tcam_get_size(np);
7159out:
7160	return ret;
7161}
7162
7163static int niu_get_ethtool_tcam_all(struct niu *np,
7164				    struct ethtool_rxnfc *nfc,
7165				    u32 *rule_locs)
7166{
7167	struct niu_parent *parent = np->parent;
7168	struct niu_tcam_entry *tp;
7169	int i, idx, cnt;
7170	unsigned long flags;
7171	int ret = 0;
7172
7173	/* put the tcam size here */
7174	nfc->data = tcam_get_size(np);
7175
7176	niu_lock_parent(np, flags);
7177	for (cnt = 0, i = 0; i < nfc->data; i++) {
7178		idx = tcam_get_index(np, i);
7179		tp = &parent->tcam[idx];
7180		if (!tp->valid)
7181			continue;
7182		if (cnt == nfc->rule_cnt) {
7183			ret = -EMSGSIZE;
7184			break;
7185		}
7186		rule_locs[cnt] = i;
7187		cnt++;
7188	}
7189	niu_unlock_parent(np, flags);
7190
7191	return ret;
7192}
7193
7194static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
7195		       void *rule_locs)
7196{
7197	struct niu *np = netdev_priv(dev);
7198	int ret = 0;
7199
7200	switch (cmd->cmd) {
7201	case ETHTOOL_GRXFH:
7202		ret = niu_get_hash_opts(np, cmd);
7203		break;
7204	case ETHTOOL_GRXRINGS:
7205		cmd->data = np->num_rx_rings;
7206		break;
7207	case ETHTOOL_GRXCLSRLCNT:
7208		cmd->rule_cnt = tcam_get_valid_entry_cnt(np);
7209		break;
7210	case ETHTOOL_GRXCLSRULE:
7211		ret = niu_get_ethtool_tcam_entry(np, cmd);
7212		break;
7213	case ETHTOOL_GRXCLSRLALL:
7214		ret = niu_get_ethtool_tcam_all(np, cmd, (u32 *)rule_locs);
7215		break;
7216	default:
7217		ret = -EINVAL;
7218		break;
7219	}
7220
7221	return ret;
7222}
7223
7224static int niu_set_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc)
7225{
7226	u64 class;
7227	u64 flow_key = 0;
7228	unsigned long flags;
7229
7230	if (!niu_ethflow_to_class(nfc->flow_type, &class))
7231		return -EINVAL;
7232
7233	if (class < CLASS_CODE_USER_PROG1 ||
7234	    class > CLASS_CODE_SCTP_IPV6)
7235		return -EINVAL;
7236
7237	if (nfc->data & RXH_DISCARD) {
7238		niu_lock_parent(np, flags);
7239		flow_key = np->parent->tcam_key[class -
7240					       CLASS_CODE_USER_PROG1];
7241		flow_key |= TCAM_KEY_DISC;
7242		nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1), flow_key);
7243		np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = flow_key;
7244		niu_unlock_parent(np, flags);
7245		return 0;
7246	} else {
7247		/* Discard was set before, but is not set now */
7248		if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] &
7249		    TCAM_KEY_DISC) {
7250			niu_lock_parent(np, flags);
7251			flow_key = np->parent->tcam_key[class -
7252					       CLASS_CODE_USER_PROG1];
7253			flow_key &= ~TCAM_KEY_DISC;
7254			nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1),
7255			     flow_key);
7256			np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] =
7257				flow_key;
7258			niu_unlock_parent(np, flags);
7259		}
7260	}
7261
7262	if (!niu_ethflow_to_flowkey(nfc->data, &flow_key))
7263		return -EINVAL;
7264
7265	niu_lock_parent(np, flags);
7266	nw64(FLOW_KEY(class - CLASS_CODE_USER_PROG1), flow_key);
7267	np->parent->flow_key[class - CLASS_CODE_USER_PROG1] = flow_key;
7268	niu_unlock_parent(np, flags);
7269
7270	return 0;
7271}
7272
7273static void niu_get_tcamkey_from_ip4fs(struct ethtool_rx_flow_spec *fsp,
7274				       struct niu_tcam_entry *tp,
7275				       int l2_rdc_tab, u64 class)
7276{
7277	u8 pid = 0;
7278	u32 sip, dip, sipm, dipm, spi, spim;
7279	u16 sport, dport, spm, dpm;
7280
7281	sip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4src);
7282	sipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4src);
7283	dip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4dst);
7284	dipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4dst);
7285
7286	tp->key[0] = class << TCAM_V4KEY0_CLASS_CODE_SHIFT;
7287	tp->key_mask[0] = TCAM_V4KEY0_CLASS_CODE;
7288	tp->key[1] = (u64)l2_rdc_tab << TCAM_V4KEY1_L2RDCNUM_SHIFT;
7289	tp->key_mask[1] = TCAM_V4KEY1_L2RDCNUM;
7290
7291	tp->key[3] = (u64)sip << TCAM_V4KEY3_SADDR_SHIFT;
7292	tp->key[3] |= dip;
7293
7294	tp->key_mask[3] = (u64)sipm << TCAM_V4KEY3_SADDR_SHIFT;
7295	tp->key_mask[3] |= dipm;
7296
7297	tp->key[2] |= ((u64)fsp->h_u.tcp_ip4_spec.tos <<
7298		       TCAM_V4KEY2_TOS_SHIFT);
7299	tp->key_mask[2] |= ((u64)fsp->m_u.tcp_ip4_spec.tos <<
7300			    TCAM_V4KEY2_TOS_SHIFT);
7301	switch (fsp->flow_type) {
7302	case TCP_V4_FLOW:
7303	case UDP_V4_FLOW:
7304	case SCTP_V4_FLOW:
7305		sport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.psrc);
7306		spm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.psrc);
7307		dport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.pdst);
7308		dpm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.pdst);
7309
7310		tp->key[2] |= (((u64)sport << 16) | dport);
7311		tp->key_mask[2] |= (((u64)spm << 16) | dpm);
7312		niu_ethflow_to_l3proto(fsp->flow_type, &pid);
7313		break;
7314	case AH_V4_FLOW:
7315	case ESP_V4_FLOW:
7316		spi = be32_to_cpu(fsp->h_u.ah_ip4_spec.spi);
7317		spim = be32_to_cpu(fsp->m_u.ah_ip4_spec.spi);
7318
7319		tp->key[2] |= spi;
7320		tp->key_mask[2] |= spim;
7321		niu_ethflow_to_l3proto(fsp->flow_type, &pid);
7322		break;
7323	case IP_USER_FLOW:
7324		spi = be32_to_cpu(fsp->h_u.usr_ip4_spec.l4_4_bytes);
7325		spim = be32_to_cpu(fsp->m_u.usr_ip4_spec.l4_4_bytes);
7326
7327		tp->key[2] |= spi;
7328		tp->key_mask[2] |= spim;
7329		pid = fsp->h_u.usr_ip4_spec.proto;
7330		break;
7331	default:
7332		break;
7333	}
7334
7335	tp->key[2] |= ((u64)pid << TCAM_V4KEY2_PROTO_SHIFT);
7336	if (pid) {
7337		tp->key_mask[2] |= TCAM_V4KEY2_PROTO;
7338	}
7339}
7340
7341static int niu_add_ethtool_tcam_entry(struct niu *np,
7342				      struct ethtool_rxnfc *nfc)
7343{
7344	struct niu_parent *parent = np->parent;
7345	struct niu_tcam_entry *tp;
7346	struct ethtool_rx_flow_spec *fsp = &nfc->fs;
7347	struct niu_rdc_tables *rdc_table = &parent->rdc_group_cfg[np->port];
7348	int l2_rdc_table = rdc_table->first_table_num;
7349	u16 idx;
7350	u64 class;
7351	unsigned long flags;
7352	int err, ret;
7353
7354	ret = 0;
7355
7356	idx = nfc->fs.location;
7357	if (idx >= tcam_get_size(np))
7358		return -EINVAL;
7359
7360	if (fsp->flow_type == IP_USER_FLOW) {
7361		int i;
7362		int add_usr_cls = 0;
7363		int ipv6 = 0;
7364		struct ethtool_usrip4_spec *uspec = &fsp->h_u.usr_ip4_spec;
7365		struct ethtool_usrip4_spec *umask = &fsp->m_u.usr_ip4_spec;
7366
7367		niu_lock_parent(np, flags);
7368
7369		for (i = 0; i < NIU_L3_PROG_CLS; i++) {
7370			if (parent->l3_cls[i]) {
7371				if (uspec->proto == parent->l3_cls_pid[i]) {
7372					class = parent->l3_cls[i];
7373					parent->l3_cls_refcnt[i]++;
7374					add_usr_cls = 1;
7375					break;
7376				}
7377			} else {
7378				/* Program new user IP class */
7379				switch (i) {
7380				case 0:
7381					class = CLASS_CODE_USER_PROG1;
7382					break;
7383				case 1:
7384					class = CLASS_CODE_USER_PROG2;
7385					break;
7386				case 2:
7387					class = CLASS_CODE_USER_PROG3;
7388					break;
7389				case 3:
7390					class = CLASS_CODE_USER_PROG4;
7391					break;
7392				default:
7393					break;
7394				}
7395				if (uspec->ip_ver == ETH_RX_NFC_IP6)
7396					ipv6 = 1;
7397				ret = tcam_user_ip_class_set(np, class, ipv6,
7398							     uspec->proto,
7399							     uspec->tos,
7400							     umask->tos);
7401				if (ret)
7402					goto out;
7403
7404				ret = tcam_user_ip_class_enable(np, class, 1);
7405				if (ret)
7406					goto out;
7407				parent->l3_cls[i] = class;
7408				parent->l3_cls_pid[i] = uspec->proto;
7409				parent->l3_cls_refcnt[i]++;
7410				add_usr_cls = 1;
7411				break;
7412			}
7413		}
7414		if (!add_usr_cls) {
7415			netdev_info(np->dev, "niu%d: %s(): Could not find/insert class for pid %d\n",
7416				    parent->index, __func__, uspec->proto);
7417			ret = -EINVAL;
7418			goto out;
7419		}
7420		niu_unlock_parent(np, flags);
7421	} else {
7422		if (!niu_ethflow_to_class(fsp->flow_type, &class)) {
7423			return -EINVAL;
7424		}
7425	}
7426
7427	niu_lock_parent(np, flags);
7428
7429	idx = tcam_get_index(np, idx);
7430	tp = &parent->tcam[idx];
7431
7432	memset(tp, 0, sizeof(*tp));
7433
7434	/* fill in the tcam key and mask */
7435	switch (fsp->flow_type) {
7436	case TCP_V4_FLOW:
7437	case UDP_V4_FLOW:
7438	case SCTP_V4_FLOW:
7439	case AH_V4_FLOW:
7440	case ESP_V4_FLOW:
7441		niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, class);
7442		break;
7443	case TCP_V6_FLOW:
7444	case UDP_V6_FLOW:
7445	case SCTP_V6_FLOW:
7446	case AH_V6_FLOW:
7447	case ESP_V6_FLOW:
7448		/* Not yet implemented */
7449		netdev_info(np->dev, "niu%d: In %s(): flow %d for IPv6 not implemented\n",
7450			    parent->index, __func__, fsp->flow_type);
7451		ret = -EINVAL;
7452		goto out;
7453	case IP_USER_FLOW:
7454		if (fsp->h_u.usr_ip4_spec.ip_ver == ETH_RX_NFC_IP4) {
7455			niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table,
7456						   class);
7457		} else {
7458			/* Not yet implemented */
7459			netdev_info(np->dev, "niu%d: In %s(): usr flow for IPv6 not implemented\n",
7460				    parent->index, __func__);
7461			ret = -EINVAL;
7462			goto out;
7463		}
7464		break;
7465	default:
7466		netdev_info(np->dev, "niu%d: In %s(): Unknown flow type %d\n",
7467			    parent->index, __func__, fsp->flow_type);
7468		ret = -EINVAL;
7469		goto out;
7470	}
7471
7472	/* fill in the assoc data */
7473	if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
7474		tp->assoc_data = TCAM_ASSOCDATA_DISC;
7475	} else {
7476		if (fsp->ring_cookie >= np->num_rx_rings) {
7477			netdev_info(np->dev, "niu%d: In %s(): Invalid RX ring %lld\n",
7478				    parent->index, __func__,
7479				    (long long)fsp->ring_cookie);
7480			ret = -EINVAL;
7481			goto out;
7482		}
7483		tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET |
7484				  (fsp->ring_cookie <<
7485				   TCAM_ASSOCDATA_OFFSET_SHIFT));
7486	}
7487
7488	err = tcam_write(np, idx, tp->key, tp->key_mask);
7489	if (err) {
7490		ret = -EINVAL;
7491		goto out;
7492	}
7493	err = tcam_assoc_write(np, idx, tp->assoc_data);
7494	if (err) {
7495		ret = -EINVAL;
7496		goto out;
7497	}
7498
7499	/* validate the entry */
7500	tp->valid = 1;
7501	np->clas.tcam_valid_entries++;
7502out:
7503	niu_unlock_parent(np, flags);
7504
7505	return ret;
7506}
7507
7508static int niu_del_ethtool_tcam_entry(struct niu *np, u32 loc)
7509{
7510	struct niu_parent *parent = np->parent;
7511	struct niu_tcam_entry *tp;
7512	u16 idx;
7513	unsigned long flags;
7514	u64 class;
7515	int ret = 0;
7516
7517	if (loc >= tcam_get_size(np))
7518		return -EINVAL;
7519
7520	niu_lock_parent(np, flags);
7521
7522	idx = tcam_get_index(np, loc);
7523	tp = &parent->tcam[idx];
7524
7525	/* if the entry is of a user defined class, then update*/
7526	class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >>
7527		TCAM_V4KEY0_CLASS_CODE_SHIFT;
7528
7529	if (class >= CLASS_CODE_USER_PROG1 && class <= CLASS_CODE_USER_PROG4) {
7530		int i;
7531		for (i = 0; i < NIU_L3_PROG_CLS; i++) {
7532			if (parent->l3_cls[i] == class) {
7533				parent->l3_cls_refcnt[i]--;
7534				if (!parent->l3_cls_refcnt[i]) {
7535					/* disable class */
7536					ret = tcam_user_ip_class_enable(np,
7537									class,
7538									0);
7539					if (ret)
7540						goto out;
7541					parent->l3_cls[i] = 0;
7542					parent->l3_cls_pid[i] = 0;
7543				}
7544				break;
7545			}
7546		}
7547		if (i == NIU_L3_PROG_CLS) {
7548			netdev_info(np->dev, "niu%d: In %s(): Usr class 0x%llx not found\n",
7549				    parent->index, __func__,
7550				    (unsigned long long)class);
7551			ret = -EINVAL;
7552			goto out;
7553		}
7554	}
7555
7556	ret = tcam_flush(np, idx);
7557	if (ret)
7558		goto out;
7559
7560	/* invalidate the entry */
7561	tp->valid = 0;
7562	np->clas.tcam_valid_entries--;
7563out:
7564	niu_unlock_parent(np, flags);
7565
7566	return ret;
7567}
7568
7569static int niu_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
7570{
7571	struct niu *np = netdev_priv(dev);
7572	int ret = 0;
7573
7574	switch (cmd->cmd) {
7575	case ETHTOOL_SRXFH:
7576		ret = niu_set_hash_opts(np, cmd);
7577		break;
7578	case ETHTOOL_SRXCLSRLINS:
7579		ret = niu_add_ethtool_tcam_entry(np, cmd);
7580		break;
7581	case ETHTOOL_SRXCLSRLDEL:
7582		ret = niu_del_ethtool_tcam_entry(np, cmd->fs.location);
7583		break;
7584	default:
7585		ret = -EINVAL;
7586		break;
7587	}
7588
7589	return ret;
7590}
7591
7592static const struct {
7593	const char string[ETH_GSTRING_LEN];
7594} niu_xmac_stat_keys[] = {
7595	{ "tx_frames" },
7596	{ "tx_bytes" },
7597	{ "tx_fifo_errors" },
7598	{ "tx_overflow_errors" },
7599	{ "tx_max_pkt_size_errors" },
7600	{ "tx_underflow_errors" },
7601	{ "rx_local_faults" },
7602	{ "rx_remote_faults" },
7603	{ "rx_link_faults" },
7604	{ "rx_align_errors" },
7605	{ "rx_frags" },
7606	{ "rx_mcasts" },
7607	{ "rx_bcasts" },
7608	{ "rx_hist_cnt1" },
7609	{ "rx_hist_cnt2" },
7610	{ "rx_hist_cnt3" },
7611	{ "rx_hist_cnt4" },
7612	{ "rx_hist_cnt5" },
7613	{ "rx_hist_cnt6" },
7614	{ "rx_hist_cnt7" },
7615	{ "rx_octets" },
7616	{ "rx_code_violations" },
7617	{ "rx_len_errors" },
7618	{ "rx_crc_errors" },
7619	{ "rx_underflows" },
7620	{ "rx_overflows" },
7621	{ "pause_off_state" },
7622	{ "pause_on_state" },
7623	{ "pause_received" },
7624};
7625
7626#define NUM_XMAC_STAT_KEYS	ARRAY_SIZE(niu_xmac_stat_keys)
7627
7628static const struct {
7629	const char string[ETH_GSTRING_LEN];
7630} niu_bmac_stat_keys[] = {
7631	{ "tx_underflow_errors" },
7632	{ "tx_max_pkt_size_errors" },
7633	{ "tx_bytes" },
7634	{ "tx_frames" },
7635	{ "rx_overflows" },
7636	{ "rx_frames" },
7637	{ "rx_align_errors" },
7638	{ "rx_crc_errors" },
7639	{ "rx_len_errors" },
7640	{ "pause_off_state" },
7641	{ "pause_on_state" },
7642	{ "pause_received" },
7643};
7644
7645#define NUM_BMAC_STAT_KEYS	ARRAY_SIZE(niu_bmac_stat_keys)
7646
7647static const struct {
7648	const char string[ETH_GSTRING_LEN];
7649} niu_rxchan_stat_keys[] = {
7650	{ "rx_channel" },
7651	{ "rx_packets" },
7652	{ "rx_bytes" },
7653	{ "rx_dropped" },
7654	{ "rx_errors" },
7655};
7656
7657#define NUM_RXCHAN_STAT_KEYS	ARRAY_SIZE(niu_rxchan_stat_keys)
7658
7659static const struct {
7660	const char string[ETH_GSTRING_LEN];
7661} niu_txchan_stat_keys[] = {
7662	{ "tx_channel" },
7663	{ "tx_packets" },
7664	{ "tx_bytes" },
7665	{ "tx_errors" },
7666};
7667
7668#define NUM_TXCHAN_STAT_KEYS	ARRAY_SIZE(niu_txchan_stat_keys)
7669
7670static void niu_get_strings(struct net_device *dev, u32 stringset, u8 *data)
7671{
7672	struct niu *np = netdev_priv(dev);
7673	int i;
7674
7675	if (stringset != ETH_SS_STATS)
7676		return;
7677
7678	if (np->flags & NIU_FLAGS_XMAC) {
7679		memcpy(data, niu_xmac_stat_keys,
7680		       sizeof(niu_xmac_stat_keys));
7681		data += sizeof(niu_xmac_stat_keys);
7682	} else {
7683		memcpy(data, niu_bmac_stat_keys,
7684		       sizeof(niu_bmac_stat_keys));
7685		data += sizeof(niu_bmac_stat_keys);
7686	}
7687	for (i = 0; i < np->num_rx_rings; i++) {
7688		memcpy(data, niu_rxchan_stat_keys,
7689		       sizeof(niu_rxchan_stat_keys));
7690		data += sizeof(niu_rxchan_stat_keys);
7691	}
7692	for (i = 0; i < np->num_tx_rings; i++) {
7693		memcpy(data, niu_txchan_stat_keys,
7694		       sizeof(niu_txchan_stat_keys));
7695		data += sizeof(niu_txchan_stat_keys);
7696	}
7697}
7698
7699static int niu_get_sset_count(struct net_device *dev, int stringset)
7700{
7701	struct niu *np = netdev_priv(dev);
7702
7703	if (stringset != ETH_SS_STATS)
7704		return -EINVAL;
7705
7706	return ((np->flags & NIU_FLAGS_XMAC ?
7707		 NUM_XMAC_STAT_KEYS :
7708		 NUM_BMAC_STAT_KEYS) +
7709		(np->num_rx_rings * NUM_RXCHAN_STAT_KEYS) +
7710		(np->num_tx_rings * NUM_TXCHAN_STAT_KEYS));
7711}
7712
7713static void niu_get_ethtool_stats(struct net_device *dev,
7714				  struct ethtool_stats *stats, u64 *data)
7715{
7716	struct niu *np = netdev_priv(dev);
7717	int i;
7718
7719	niu_sync_mac_stats(np);
7720	if (np->flags & NIU_FLAGS_XMAC) {
7721		memcpy(data, &np->mac_stats.xmac,
7722		       sizeof(struct niu_xmac_stats));
7723		data += (sizeof(struct niu_xmac_stats) / sizeof(u64));
7724	} else {
7725		memcpy(data, &np->mac_stats.bmac,
7726		       sizeof(struct niu_bmac_stats));
7727		data += (sizeof(struct niu_bmac_stats) / sizeof(u64));
7728	}
7729	for (i = 0; i < np->num_rx_rings; i++) {
7730		struct rx_ring_info *rp = &np->rx_rings[i];
7731
7732		niu_sync_rx_discard_stats(np, rp, 0);
7733
7734		data[0] = rp->rx_channel;
7735		data[1] = rp->rx_packets;
7736		data[2] = rp->rx_bytes;
7737		data[3] = rp->rx_dropped;
7738		data[4] = rp->rx_errors;
7739		data += 5;
7740	}
7741	for (i = 0; i < np->num_tx_rings; i++) {
7742		struct tx_ring_info *rp = &np->tx_rings[i];
7743
7744		data[0] = rp->tx_channel;
7745		data[1] = rp->tx_packets;
7746		data[2] = rp->tx_bytes;
7747		data[3] = rp->tx_errors;
7748		data += 4;
7749	}
7750}
7751
7752static u64 niu_led_state_save(struct niu *np)
7753{
7754	if (np->flags & NIU_FLAGS_XMAC)
7755		return nr64_mac(XMAC_CONFIG);
7756	else
7757		return nr64_mac(BMAC_XIF_CONFIG);
7758}
7759
7760static void niu_led_state_restore(struct niu *np, u64 val)
7761{
7762	if (np->flags & NIU_FLAGS_XMAC)
7763		nw64_mac(XMAC_CONFIG, val);
7764	else
7765		nw64_mac(BMAC_XIF_CONFIG, val);
7766}
7767
7768static void niu_force_led(struct niu *np, int on)
7769{
7770	u64 val, reg, bit;
7771
7772	if (np->flags & NIU_FLAGS_XMAC) {
7773		reg = XMAC_CONFIG;
7774		bit = XMAC_CONFIG_FORCE_LED_ON;
7775	} else {
7776		reg = BMAC_XIF_CONFIG;
7777		bit = BMAC_XIF_CONFIG_LINK_LED;
7778	}
7779
7780	val = nr64_mac(reg);
7781	if (on)
7782		val |= bit;
7783	else
7784		val &= ~bit;
7785	nw64_mac(reg, val);
7786}
7787
7788static int niu_phys_id(struct net_device *dev, u32 data)
7789{
7790	struct niu *np = netdev_priv(dev);
7791	u64 orig_led_state;
7792	int i;
7793
7794	if (!netif_running(dev))
7795		return -EAGAIN;
7796
7797	if (data == 0)
7798		data = 2;
7799
7800	orig_led_state = niu_led_state_save(np);
7801	for (i = 0; i < (data * 2); i++) {
7802		int on = ((i % 2) == 0);
7803
7804		niu_force_led(np, on);
7805
7806		if (msleep_interruptible(500))
7807			break;
7808	}
7809	niu_led_state_restore(np, orig_led_state);
7810
7811	return 0;
7812}
7813
7814static int niu_set_flags(struct net_device *dev, u32 data)
7815{
7816	return ethtool_op_set_flags(dev, data, ETH_FLAG_RXHASH);
7817}
7818
7819static const struct ethtool_ops niu_ethtool_ops = {
7820	.get_drvinfo		= niu_get_drvinfo,
7821	.get_link		= ethtool_op_get_link,
7822	.get_msglevel		= niu_get_msglevel,
7823	.set_msglevel		= niu_set_msglevel,
7824	.nway_reset		= niu_nway_reset,
7825	.get_eeprom_len		= niu_get_eeprom_len,
7826	.get_eeprom		= niu_get_eeprom,
7827	.get_settings		= niu_get_settings,
7828	.set_settings		= niu_set_settings,
7829	.get_strings		= niu_get_strings,
7830	.get_sset_count		= niu_get_sset_count,
7831	.get_ethtool_stats	= niu_get_ethtool_stats,
7832	.phys_id		= niu_phys_id,
7833	.get_rxnfc		= niu_get_nfc,
7834	.set_rxnfc		= niu_set_nfc,
7835	.set_flags		= niu_set_flags,
7836	.get_flags		= ethtool_op_get_flags,
7837};
7838
7839static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent,
7840			      int ldg, int ldn)
7841{
7842	if (ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX)
7843		return -EINVAL;
7844	if (ldn < 0 || ldn > LDN_MAX)
7845		return -EINVAL;
7846
7847	parent->ldg_map[ldn] = ldg;
7848
7849	if (np->parent->plat_type == PLAT_TYPE_NIU) {
7850		/* On N2 NIU, the ldn-->ldg assignments are setup and fixed by
7851		 * the firmware, and we're not supposed to change them.
7852		 * Validate the mapping, because if it's wrong we probably
7853		 * won't get any interrupts and that's painful to debug.
7854		 */
7855		if (nr64(LDG_NUM(ldn)) != ldg) {
7856			dev_err(np->device, "Port %u, mis-matched LDG assignment for ldn %d, should be %d is %llu\n",
7857				np->port, ldn, ldg,
7858				(unsigned long long) nr64(LDG_NUM(ldn)));
7859			return -EINVAL;
7860		}
7861	} else
7862		nw64(LDG_NUM(ldn), ldg);
7863
7864	return 0;
7865}
7866
7867static int niu_set_ldg_timer_res(struct niu *np, int res)
7868{
7869	if (res < 0 || res > LDG_TIMER_RES_VAL)
7870		return -EINVAL;
7871
7872
7873	nw64(LDG_TIMER_RES, res);
7874
7875	return 0;
7876}
7877
7878static int niu_set_ldg_sid(struct niu *np, int ldg, int func, int vector)
7879{
7880	if ((ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX) ||
7881	    (func < 0 || func > 3) ||
7882	    (vector < 0 || vector > 0x1f))
7883		return -EINVAL;
7884
7885	nw64(SID(ldg), (func << SID_FUNC_SHIFT) | vector);
7886
7887	return 0;
7888}
7889
7890static int __devinit niu_pci_eeprom_read(struct niu *np, u32 addr)
7891{
7892	u64 frame, frame_base = (ESPC_PIO_STAT_READ_START |
7893				 (addr << ESPC_PIO_STAT_ADDR_SHIFT));
7894	int limit;
7895
7896	if (addr > (ESPC_PIO_STAT_ADDR >> ESPC_PIO_STAT_ADDR_SHIFT))
7897		return -EINVAL;
7898
7899	frame = frame_base;
7900	nw64(ESPC_PIO_STAT, frame);
7901	limit = 64;
7902	do {
7903		udelay(5);
7904		frame = nr64(ESPC_PIO_STAT);
7905		if (frame & ESPC_PIO_STAT_READ_END)
7906			break;
7907	} while (limit--);
7908	if (!(frame & ESPC_PIO_STAT_READ_END)) {
7909		dev_err(np->device, "EEPROM read timeout frame[%llx]\n",
7910			(unsigned long long) frame);
7911		return -ENODEV;
7912	}
7913
7914	frame = frame_base;
7915	nw64(ESPC_PIO_STAT, frame);
7916	limit = 64;
7917	do {
7918		udelay(5);
7919		frame = nr64(ESPC_PIO_STAT);
7920		if (frame & ESPC_PIO_STAT_READ_END)
7921			break;
7922	} while (limit--);
7923	if (!(frame & ESPC_PIO_STAT_READ_END)) {
7924		dev_err(np->device, "EEPROM read timeout frame[%llx]\n",
7925			(unsigned long long) frame);
7926		return -ENODEV;
7927	}
7928
7929	frame = nr64(ESPC_PIO_STAT);
7930	return (frame & ESPC_PIO_STAT_DATA) >> ESPC_PIO_STAT_DATA_SHIFT;
7931}
7932
7933static int __devinit niu_pci_eeprom_read16(struct niu *np, u32 off)
7934{
7935	int err = niu_pci_eeprom_read(np, off);
7936	u16 val;
7937
7938	if (err < 0)
7939		return err;
7940	val = (err << 8);
7941	err = niu_pci_eeprom_read(np, off + 1);
7942	if (err < 0)
7943		return err;
7944	val |= (err & 0xff);
7945
7946	return val;
7947}
7948
7949static int __devinit niu_pci_eeprom_read16_swp(struct niu *np, u32 off)
7950{
7951	int err = niu_pci_eeprom_read(np, off);
7952	u16 val;
7953
7954	if (err < 0)
7955		return err;
7956
7957	val = (err & 0xff);
7958	err = niu_pci_eeprom_read(np, off + 1);
7959	if (err < 0)
7960		return err;
7961
7962	val |= (err & 0xff) << 8;
7963
7964	return val;
7965}
7966
7967static int __devinit niu_pci_vpd_get_propname(struct niu *np,
7968					      u32 off,
7969					      char *namebuf,
7970					      int namebuf_len)
7971{
7972	int i;
7973
7974	for (i = 0; i < namebuf_len; i++) {
7975		int err = niu_pci_eeprom_read(np, off + i);
7976		if (err < 0)
7977			return err;
7978		*namebuf++ = err;
7979		if (!err)
7980			break;
7981	}
7982	if (i >= namebuf_len)
7983		return -EINVAL;
7984
7985	return i + 1;
7986}
7987
7988static void __devinit niu_vpd_parse_version(struct niu *np)
7989{
7990	struct niu_vpd *vpd = &np->vpd;
7991	int len = strlen(vpd->version) + 1;
7992	const char *s = vpd->version;
7993	int i;
7994
7995	for (i = 0; i < len - 5; i++) {
7996		if (!strncmp(s + i, "FCode ", 6))
7997			break;
7998	}
7999	if (i >= len - 5)
8000		return;
8001
8002	s += i + 5;
8003	sscanf(s, "%d.%d", &vpd->fcode_major, &vpd->fcode_minor);
8004
8005	netif_printk(np, probe, KERN_DEBUG, np->dev,
8006		     "VPD_SCAN: FCODE major(%d) minor(%d)\n",
8007		     vpd->fcode_major, vpd->fcode_minor);
8008	if (vpd->fcode_major > NIU_VPD_MIN_MAJOR ||
8009	    (vpd->fcode_major == NIU_VPD_MIN_MAJOR &&
8010	     vpd->fcode_minor >= NIU_VPD_MIN_MINOR))
8011		np->flags |= NIU_FLAGS_VPD_VALID;
8012}
8013
8014/* ESPC_PIO_EN_ENABLE must be set */
8015static int __devinit niu_pci_vpd_scan_props(struct niu *np,
8016					    u32 start, u32 end)
8017{
8018	unsigned int found_mask = 0;
8019#define FOUND_MASK_MODEL	0x00000001
8020#define FOUND_MASK_BMODEL	0x00000002
8021#define FOUND_MASK_VERS		0x00000004
8022#define FOUND_MASK_MAC		0x00000008
8023#define FOUND_MASK_NMAC		0x00000010
8024#define FOUND_MASK_PHY		0x00000020
8025#define FOUND_MASK_ALL		0x0000003f
8026
8027	netif_printk(np, probe, KERN_DEBUG, np->dev,
8028		     "VPD_SCAN: start[%x] end[%x]\n", start, end);
8029	while (start < end) {
8030		int len, err, instance, type, prop_len;
8031		char namebuf[64];
8032		u8 *prop_buf;
8033		int max_len;
8034
8035		if (found_mask == FOUND_MASK_ALL) {
8036			niu_vpd_parse_version(np);
8037			return 1;
8038		}
8039
8040		err = niu_pci_eeprom_read(np, start + 2);
8041		if (err < 0)
8042			return err;
8043		len = err;
8044		start += 3;
8045
8046		instance = niu_pci_eeprom_read(np, start);
8047		type = niu_pci_eeprom_read(np, start + 3);
8048		prop_len = niu_pci_eeprom_read(np, start + 4);
8049		err = niu_pci_vpd_get_propname(np, start + 5, namebuf, 64);
8050		if (err < 0)
8051			return err;
8052
8053		prop_buf = NULL;
8054		max_len = 0;
8055		if (!strcmp(namebuf, "model")) {
8056			prop_buf = np->vpd.model;
8057			max_len = NIU_VPD_MODEL_MAX;
8058			found_mask |= FOUND_MASK_MODEL;
8059		} else if (!strcmp(namebuf, "board-model")) {
8060			prop_buf = np->vpd.board_model;
8061			max_len = NIU_VPD_BD_MODEL_MAX;
8062			found_mask |= FOUND_MASK_BMODEL;
8063		} else if (!strcmp(namebuf, "version")) {
8064			prop_buf = np->vpd.version;
8065			max_len = NIU_VPD_VERSION_MAX;
8066			found_mask |= FOUND_MASK_VERS;
8067		} else if (!strcmp(namebuf, "local-mac-address")) {
8068			prop_buf = np->vpd.local_mac;
8069			max_len = ETH_ALEN;
8070			found_mask |= FOUND_MASK_MAC;
8071		} else if (!strcmp(namebuf, "num-mac-addresses")) {
8072			prop_buf = &np->vpd.mac_num;
8073			max_len = 1;
8074			found_mask |= FOUND_MASK_NMAC;
8075		} else if (!strcmp(namebuf, "phy-type")) {
8076			prop_buf = np->vpd.phy_type;
8077			max_len = NIU_VPD_PHY_TYPE_MAX;
8078			found_mask |= FOUND_MASK_PHY;
8079		}
8080
8081		if (max_len && prop_len > max_len) {
8082			dev_err(np->device, "Property '%s' length (%d) is too long\n", namebuf, prop_len);
8083			return -EINVAL;
8084		}
8085
8086		if (prop_buf) {
8087			u32 off = start + 5 + err;
8088			int i;
8089
8090			netif_printk(np, probe, KERN_DEBUG, np->dev,
8091				     "VPD_SCAN: Reading in property [%s] len[%d]\n",
8092				     namebuf, prop_len);
8093			for (i = 0; i < prop_len; i++)
8094				*prop_buf++ = niu_pci_eeprom_read(np, off + i);
8095		}
8096
8097		start += len;
8098	}
8099
8100	return 0;
8101}
8102
8103/* ESPC_PIO_EN_ENABLE must be set */
8104static void __devinit niu_pci_vpd_fetch(struct niu *np, u32 start)
8105{
8106	u32 offset;
8107	int err;
8108
8109	err = niu_pci_eeprom_read16_swp(np, start + 1);
8110	if (err < 0)
8111		return;
8112
8113	offset = err + 3;
8114
8115	while (start + offset < ESPC_EEPROM_SIZE) {
8116		u32 here = start + offset;
8117		u32 end;
8118
8119		err = niu_pci_eeprom_read(np, here);
8120		if (err != 0x90)
8121			return;
8122
8123		err = niu_pci_eeprom_read16_swp(np, here + 1);
8124		if (err < 0)
8125			return;
8126
8127		here = start + offset + 3;
8128		end = start + offset + err;
8129
8130		offset += err;
8131
8132		err = niu_pci_vpd_scan_props(np, here, end);
8133		if (err < 0 || err == 1)
8134			return;
8135	}
8136}
8137
8138/* ESPC_PIO_EN_ENABLE must be set */
8139static u32 __devinit niu_pci_vpd_offset(struct niu *np)
8140{
8141	u32 start = 0, end = ESPC_EEPROM_SIZE, ret;
8142	int err;
8143
8144	while (start < end) {
8145		ret = start;
8146
8147		/* ROM header signature?  */
8148		err = niu_pci_eeprom_read16(np, start +  0);
8149		if (err != 0x55aa)
8150			return 0;
8151
8152		/* Apply offset to PCI data structure.  */
8153		err = niu_pci_eeprom_read16(np, start + 23);
8154		if (err < 0)
8155			return 0;
8156		start += err;
8157
8158		/* Check for "PCIR" signature.  */
8159		err = niu_pci_eeprom_read16(np, start +  0);
8160		if (err != 0x5043)
8161			return 0;
8162		err = niu_pci_eeprom_read16(np, start +  2);
8163		if (err != 0x4952)
8164			return 0;
8165
8166		/* Check for OBP image type.  */
8167		err = niu_pci_eeprom_read(np, start + 20);
8168		if (err < 0)
8169			return 0;
8170		if (err != 0x01) {
8171			err = niu_pci_eeprom_read(np, ret + 2);
8172			if (err < 0)
8173				return 0;
8174
8175			start = ret + (err * 512);
8176			continue;
8177		}
8178
8179		err = niu_pci_eeprom_read16_swp(np, start + 8);
8180		if (err < 0)
8181			return err;
8182		ret += err;
8183
8184		err = niu_pci_eeprom_read(np, ret + 0);
8185		if (err != 0x82)
8186			return 0;
8187
8188		return ret;
8189	}
8190
8191	return 0;
8192}
8193
8194static int __devinit niu_phy_type_prop_decode(struct niu *np,
8195					      const char *phy_prop)
8196{
8197	if (!strcmp(phy_prop, "mif")) {
8198		/* 1G copper, MII */
8199		np->flags &= ~(NIU_FLAGS_FIBER |
8200			       NIU_FLAGS_10G);
8201		np->mac_xcvr = MAC_XCVR_MII;
8202	} else if (!strcmp(phy_prop, "xgf")) {
8203		/* 10G fiber, XPCS */
8204		np->flags |= (NIU_FLAGS_10G |
8205			      NIU_FLAGS_FIBER);
8206		np->mac_xcvr = MAC_XCVR_XPCS;
8207	} else if (!strcmp(phy_prop, "pcs")) {
8208		/* 1G fiber, PCS */
8209		np->flags &= ~NIU_FLAGS_10G;
8210		np->flags |= NIU_FLAGS_FIBER;
8211		np->mac_xcvr = MAC_XCVR_PCS;
8212	} else if (!strcmp(phy_prop, "xgc")) {
8213		/* 10G copper, XPCS */
8214		np->flags |= NIU_FLAGS_10G;
8215		np->flags &= ~NIU_FLAGS_FIBER;
8216		np->mac_xcvr = MAC_XCVR_XPCS;
8217	} else if (!strcmp(phy_prop, "xgsd") || !strcmp(phy_prop, "gsd")) {
8218		/* 10G Serdes or 1G Serdes, default to 10G */
8219		np->flags |= NIU_FLAGS_10G;
8220		np->flags &= ~NIU_FLAGS_FIBER;
8221		np->flags |= NIU_FLAGS_XCVR_SERDES;
8222		np->mac_xcvr = MAC_XCVR_XPCS;
8223	} else {
8224		return -EINVAL;
8225	}
8226	return 0;
8227}
8228
8229static int niu_pci_vpd_get_nports(struct niu *np)
8230{
8231	int ports = 0;
8232
8233	if ((!strcmp(np->vpd.model, NIU_QGC_LP_MDL_STR)) ||
8234	    (!strcmp(np->vpd.model, NIU_QGC_PEM_MDL_STR)) ||
8235	    (!strcmp(np->vpd.model, NIU_MARAMBA_MDL_STR)) ||
8236	    (!strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) ||
8237	    (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR))) {
8238		ports = 4;
8239	} else if ((!strcmp(np->vpd.model, NIU_2XGF_LP_MDL_STR)) ||
8240		   (!strcmp(np->vpd.model, NIU_2XGF_PEM_MDL_STR)) ||
8241		   (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) ||
8242		   (!strcmp(np->vpd.model, NIU_2XGF_MRVL_MDL_STR))) {
8243		ports = 2;
8244	}
8245
8246	return ports;
8247}
8248
8249static void __devinit niu_pci_vpd_validate(struct niu *np)
8250{
8251	struct net_device *dev = np->dev;
8252	struct niu_vpd *vpd = &np->vpd;
8253	u8 val8;
8254
8255	if (!is_valid_ether_addr(&vpd->local_mac[0])) {
8256		dev_err(np->device, "VPD MAC invalid, falling back to SPROM\n");
8257
8258		np->flags &= ~NIU_FLAGS_VPD_VALID;
8259		return;
8260	}
8261
8262	if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) ||
8263	    !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) {
8264		np->flags |= NIU_FLAGS_10G;
8265		np->flags &= ~NIU_FLAGS_FIBER;
8266		np->flags |= NIU_FLAGS_XCVR_SERDES;
8267		np->mac_xcvr = MAC_XCVR_PCS;
8268		if (np->port > 1) {
8269			np->flags |= NIU_FLAGS_FIBER;
8270			np->flags &= ~NIU_FLAGS_10G;
8271		}
8272		if (np->flags & NIU_FLAGS_10G)
8273			np->mac_xcvr = MAC_XCVR_XPCS;
8274	} else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) {
8275		np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER |
8276			      NIU_FLAGS_HOTPLUG_PHY);
8277	} else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
8278		dev_err(np->device, "Illegal phy string [%s]\n",
8279			np->vpd.phy_type);
8280		dev_err(np->device, "Falling back to SPROM\n");
8281		np->flags &= ~NIU_FLAGS_VPD_VALID;
8282		return;
8283	}
8284
8285	memcpy(dev->perm_addr, vpd->local_mac, ETH_ALEN);
8286
8287	val8 = dev->perm_addr[5];
8288	dev->perm_addr[5] += np->port;
8289	if (dev->perm_addr[5] < val8)
8290		dev->perm_addr[4]++;
8291
8292	memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len);
8293}
8294
8295static int __devinit niu_pci_probe_sprom(struct niu *np)
8296{
8297	struct net_device *dev = np->dev;
8298	int len, i;
8299	u64 val, sum;
8300	u8 val8;
8301
8302	val = (nr64(ESPC_VER_IMGSZ) & ESPC_VER_IMGSZ_IMGSZ);
8303	val >>= ESPC_VER_IMGSZ_IMGSZ_SHIFT;
8304	len = val / 4;
8305
8306	np->eeprom_len = len;
8307
8308	netif_printk(np, probe, KERN_DEBUG, np->dev,
8309		     "SPROM: Image size %llu\n", (unsigned long long)val);
8310
8311	sum = 0;
8312	for (i = 0; i < len; i++) {
8313		val = nr64(ESPC_NCR(i));
8314		sum += (val >>  0) & 0xff;
8315		sum += (val >>  8) & 0xff;
8316		sum += (val >> 16) & 0xff;
8317		sum += (val >> 24) & 0xff;
8318	}
8319	netif_printk(np, probe, KERN_DEBUG, np->dev,
8320		     "SPROM: Checksum %x\n", (int)(sum & 0xff));
8321	if ((sum & 0xff) != 0xab) {
8322		dev_err(np->device, "Bad SPROM checksum (%x, should be 0xab)\n", (int)(sum & 0xff));
8323		return -EINVAL;
8324	}
8325
8326	val = nr64(ESPC_PHY_TYPE);
8327	switch (np->port) {
8328	case 0:
8329		val8 = (val & ESPC_PHY_TYPE_PORT0) >>
8330			ESPC_PHY_TYPE_PORT0_SHIFT;
8331		break;
8332	case 1:
8333		val8 = (val & ESPC_PHY_TYPE_PORT1) >>
8334			ESPC_PHY_TYPE_PORT1_SHIFT;
8335		break;
8336	case 2:
8337		val8 = (val & ESPC_PHY_TYPE_PORT2) >>
8338			ESPC_PHY_TYPE_PORT2_SHIFT;
8339		break;
8340	case 3:
8341		val8 = (val & ESPC_PHY_TYPE_PORT3) >>
8342			ESPC_PHY_TYPE_PORT3_SHIFT;
8343		break;
8344	default:
8345		dev_err(np->device, "Bogus port number %u\n",
8346			np->port);
8347		return -EINVAL;
8348	}
8349	netif_printk(np, probe, KERN_DEBUG, np->dev,
8350		     "SPROM: PHY type %x\n", val8);
8351
8352	switch (val8) {
8353	case ESPC_PHY_TYPE_1G_COPPER:
8354		/* 1G copper, MII */
8355		np->flags &= ~(NIU_FLAGS_FIBER |
8356			       NIU_FLAGS_10G);
8357		np->mac_xcvr = MAC_XCVR_MII;
8358		break;
8359
8360	case ESPC_PHY_TYPE_1G_FIBER:
8361		/* 1G fiber, PCS */
8362		np->flags &= ~NIU_FLAGS_10G;
8363		np->flags |= NIU_FLAGS_FIBER;
8364		np->mac_xcvr = MAC_XCVR_PCS;
8365		break;
8366
8367	case ESPC_PHY_TYPE_10G_COPPER:
8368		/* 10G copper, XPCS */
8369		np->flags |= NIU_FLAGS_10G;
8370		np->flags &= ~NIU_FLAGS_FIBER;
8371		np->mac_xcvr = MAC_XCVR_XPCS;
8372		break;
8373
8374	case ESPC_PHY_TYPE_10G_FIBER:
8375		/* 10G fiber, XPCS */
8376		np->flags |= (NIU_FLAGS_10G |
8377			      NIU_FLAGS_FIBER);
8378		np->mac_xcvr = MAC_XCVR_XPCS;
8379		break;
8380
8381	default:
8382		dev_err(np->device, "Bogus SPROM phy type %u\n", val8);
8383		return -EINVAL;
8384	}
8385
8386	val = nr64(ESPC_MAC_ADDR0);
8387	netif_printk(np, probe, KERN_DEBUG, np->dev,
8388		     "SPROM: MAC_ADDR0[%08llx]\n", (unsigned long long)val);
8389	dev->perm_addr[0] = (val >>  0) & 0xff;
8390	dev->perm_addr[1] = (val >>  8) & 0xff;
8391	dev->perm_addr[2] = (val >> 16) & 0xff;
8392	dev->perm_addr[3] = (val >> 24) & 0xff;
8393
8394	val = nr64(ESPC_MAC_ADDR1);
8395	netif_printk(np, probe, KERN_DEBUG, np->dev,
8396		     "SPROM: MAC_ADDR1[%08llx]\n", (unsigned long long)val);
8397	dev->perm_addr[4] = (val >>  0) & 0xff;
8398	dev->perm_addr[5] = (val >>  8) & 0xff;
8399
8400	if (!is_valid_ether_addr(&dev->perm_addr[0])) {
8401		dev_err(np->device, "SPROM MAC address invalid [ %pM ]\n",
8402			dev->perm_addr);
8403		return -EINVAL;
8404	}
8405
8406	val8 = dev->perm_addr[5];
8407	dev->perm_addr[5] += np->port;
8408	if (dev->perm_addr[5] < val8)
8409		dev->perm_addr[4]++;
8410
8411	memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len);
8412
8413	val = nr64(ESPC_MOD_STR_LEN);
8414	netif_printk(np, probe, KERN_DEBUG, np->dev,
8415		     "SPROM: MOD_STR_LEN[%llu]\n", (unsigned long long)val);
8416	if (val >= 8 * 4)
8417		return -EINVAL;
8418
8419	for (i = 0; i < val; i += 4) {
8420		u64 tmp = nr64(ESPC_NCR(5 + (i / 4)));
8421
8422		np->vpd.model[i + 3] = (tmp >>  0) & 0xff;
8423		np->vpd.model[i + 2] = (tmp >>  8) & 0xff;
8424		np->vpd.model[i + 1] = (tmp >> 16) & 0xff;
8425		np->vpd.model[i + 0] = (tmp >> 24) & 0xff;
8426	}
8427	np->vpd.model[val] = '\0';
8428
8429	val = nr64(ESPC_BD_MOD_STR_LEN);
8430	netif_printk(np, probe, KERN_DEBUG, np->dev,
8431		     "SPROM: BD_MOD_STR_LEN[%llu]\n", (unsigned long long)val);
8432	if (val >= 4 * 4)
8433		return -EINVAL;
8434
8435	for (i = 0; i < val; i += 4) {
8436		u64 tmp = nr64(ESPC_NCR(14 + (i / 4)));
8437
8438		np->vpd.board_model[i + 3] = (tmp >>  0) & 0xff;
8439		np->vpd.board_model[i + 2] = (tmp >>  8) & 0xff;
8440		np->vpd.board_model[i + 1] = (tmp >> 16) & 0xff;
8441		np->vpd.board_model[i + 0] = (tmp >> 24) & 0xff;
8442	}
8443	np->vpd.board_model[val] = '\0';
8444
8445	np->vpd.mac_num =
8446		nr64(ESPC_NUM_PORTS_MACS) & ESPC_NUM_PORTS_MACS_VAL;
8447	netif_printk(np, probe, KERN_DEBUG, np->dev,
8448		     "SPROM: NUM_PORTS_MACS[%d]\n", np->vpd.mac_num);
8449
8450	return 0;
8451}
8452
8453static int __devinit niu_get_and_validate_port(struct niu *np)
8454{
8455	struct niu_parent *parent = np->parent;
8456
8457	if (np->port <= 1)
8458		np->flags |= NIU_FLAGS_XMAC;
8459
8460	if (!parent->num_ports) {
8461		if (parent->plat_type == PLAT_TYPE_NIU) {
8462			parent->num_ports = 2;
8463		} else {
8464			parent->num_ports = niu_pci_vpd_get_nports(np);
8465			if (!parent->num_ports) {
8466				/* Fall back to SPROM as last resort.
8467				 * This will fail on most cards.
8468				 */
8469				parent->num_ports = nr64(ESPC_NUM_PORTS_MACS) &
8470					ESPC_NUM_PORTS_MACS_VAL;
8471
8472				/* All of the current probing methods fail on
8473				 * Maramba on-board parts.
8474				 */
8475				if (!parent->num_ports)
8476					parent->num_ports = 4;
8477			}
8478		}
8479	}
8480
8481	if (np->port >= parent->num_ports)
8482		return -ENODEV;
8483
8484	return 0;
8485}
8486
8487static int __devinit phy_record(struct niu_parent *parent,
8488				struct phy_probe_info *p,
8489				int dev_id_1, int dev_id_2, u8 phy_port,
8490				int type)
8491{
8492	u32 id = (dev_id_1 << 16) | dev_id_2;
8493	u8 idx;
8494
8495	if (dev_id_1 < 0 || dev_id_2 < 0)
8496		return 0;
8497	if (type == PHY_TYPE_PMA_PMD || type == PHY_TYPE_PCS) {
8498		if (((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8704) &&
8499		    ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011) &&
8500		    ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8706))
8501			return 0;
8502	} else {
8503		if ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM5464R)
8504			return 0;
8505	}
8506
8507	pr_info("niu%d: Found PHY %08x type %s at phy_port %u\n",
8508		parent->index, id,
8509		type == PHY_TYPE_PMA_PMD ? "PMA/PMD" :
8510		type == PHY_TYPE_PCS ? "PCS" : "MII",
8511		phy_port);
8512
8513	if (p->cur[type] >= NIU_MAX_PORTS) {
8514		pr_err("Too many PHY ports\n");
8515		return -EINVAL;
8516	}
8517	idx = p->cur[type];
8518	p->phy_id[type][idx] = id;
8519	p->phy_port[type][idx] = phy_port;
8520	p->cur[type] = idx + 1;
8521	return 0;
8522}
8523
8524static int __devinit port_has_10g(struct phy_probe_info *p, int port)
8525{
8526	int i;
8527
8528	for (i = 0; i < p->cur[PHY_TYPE_PMA_PMD]; i++) {
8529		if (p->phy_port[PHY_TYPE_PMA_PMD][i] == port)
8530			return 1;
8531	}
8532	for (i = 0; i < p->cur[PHY_TYPE_PCS]; i++) {
8533		if (p->phy_port[PHY_TYPE_PCS][i] == port)
8534			return 1;
8535	}
8536
8537	return 0;
8538}
8539
8540static int __devinit count_10g_ports(struct phy_probe_info *p, int *lowest)
8541{
8542	int port, cnt;
8543
8544	cnt = 0;
8545	*lowest = 32;
8546	for (port = 8; port < 32; port++) {
8547		if (port_has_10g(p, port)) {
8548			if (!cnt)
8549				*lowest = port;
8550			cnt++;
8551		}
8552	}
8553
8554	return cnt;
8555}
8556
8557static int __devinit count_1g_ports(struct phy_probe_info *p, int *lowest)
8558{
8559	*lowest = 32;
8560	if (p->cur[PHY_TYPE_MII])
8561		*lowest = p->phy_port[PHY_TYPE_MII][0];
8562
8563	return p->cur[PHY_TYPE_MII];
8564}
8565
8566static void __devinit niu_n2_divide_channels(struct niu_parent *parent)
8567{
8568	int num_ports = parent->num_ports;
8569	int i;
8570
8571	for (i = 0; i < num_ports; i++) {
8572		parent->rxchan_per_port[i] = (16 / num_ports);
8573		parent->txchan_per_port[i] = (16 / num_ports);
8574
8575		pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n",
8576			parent->index, i,
8577			parent->rxchan_per_port[i],
8578			parent->txchan_per_port[i]);
8579	}
8580}
8581
8582static void __devinit niu_divide_channels(struct niu_parent *parent,
8583					  int num_10g, int num_1g)
8584{
8585	int num_ports = parent->num_ports;
8586	int rx_chans_per_10g, rx_chans_per_1g;
8587	int tx_chans_per_10g, tx_chans_per_1g;
8588	int i, tot_rx, tot_tx;
8589
8590	if (!num_10g || !num_1g) {
8591		rx_chans_per_10g = rx_chans_per_1g =
8592			(NIU_NUM_RXCHAN / num_ports);
8593		tx_chans_per_10g = tx_chans_per_1g =
8594			(NIU_NUM_TXCHAN / num_ports);
8595	} else {
8596		rx_chans_per_1g = NIU_NUM_RXCHAN / 8;
8597		rx_chans_per_10g = (NIU_NUM_RXCHAN -
8598				    (rx_chans_per_1g * num_1g)) /
8599			num_10g;
8600
8601		tx_chans_per_1g = NIU_NUM_TXCHAN / 6;
8602		tx_chans_per_10g = (NIU_NUM_TXCHAN -
8603				    (tx_chans_per_1g * num_1g)) /
8604			num_10g;
8605	}
8606
8607	tot_rx = tot_tx = 0;
8608	for (i = 0; i < num_ports; i++) {
8609		int type = phy_decode(parent->port_phy, i);
8610
8611		if (type == PORT_TYPE_10G) {
8612			parent->rxchan_per_port[i] = rx_chans_per_10g;
8613			parent->txchan_per_port[i] = tx_chans_per_10g;
8614		} else {
8615			parent->rxchan_per_port[i] = rx_chans_per_1g;
8616			parent->txchan_per_port[i] = tx_chans_per_1g;
8617		}
8618		pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n",
8619			parent->index, i,
8620			parent->rxchan_per_port[i],
8621			parent->txchan_per_port[i]);
8622		tot_rx += parent->rxchan_per_port[i];
8623		tot_tx += parent->txchan_per_port[i];
8624	}
8625
8626	if (tot_rx > NIU_NUM_RXCHAN) {
8627		pr_err("niu%d: Too many RX channels (%d), resetting to one per port\n",
8628		       parent->index, tot_rx);
8629		for (i = 0; i < num_ports; i++)
8630			parent->rxchan_per_port[i] = 1;
8631	}
8632	if (tot_tx > NIU_NUM_TXCHAN) {
8633		pr_err("niu%d: Too many TX channels (%d), resetting to one per port\n",
8634		       parent->index, tot_tx);
8635		for (i = 0; i < num_ports; i++)
8636			parent->txchan_per_port[i] = 1;
8637	}
8638	if (tot_rx < NIU_NUM_RXCHAN || tot_tx < NIU_NUM_TXCHAN) {
8639		pr_warning("niu%d: Driver bug, wasted channels, RX[%d] TX[%d]\n",
8640			   parent->index, tot_rx, tot_tx);
8641	}
8642}
8643
8644static void __devinit niu_divide_rdc_groups(struct niu_parent *parent,
8645					    int num_10g, int num_1g)
8646{
8647	int i, num_ports = parent->num_ports;
8648	int rdc_group, rdc_groups_per_port;
8649	int rdc_channel_base;
8650
8651	rdc_group = 0;
8652	rdc_groups_per_port = NIU_NUM_RDC_TABLES / num_ports;
8653
8654	rdc_channel_base = 0;
8655
8656	for (i = 0; i < num_ports; i++) {
8657		struct niu_rdc_tables *tp = &parent->rdc_group_cfg[i];
8658		int grp, num_channels = parent->rxchan_per_port[i];
8659		int this_channel_offset;
8660
8661		tp->first_table_num = rdc_group;
8662		tp->num_tables = rdc_groups_per_port;
8663		this_channel_offset = 0;
8664		for (grp = 0; grp < tp->num_tables; grp++) {
8665			struct rdc_table *rt = &tp->tables[grp];
8666			int slot;
8667
8668			pr_info("niu%d: Port %d RDC tbl(%d) [ ",
8669				parent->index, i, tp->first_table_num + grp);
8670			for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) {
8671				rt->rxdma_channel[slot] =
8672					rdc_channel_base + this_channel_offset;
8673
8674				pr_cont("%d ", rt->rxdma_channel[slot]);
8675
8676				if (++this_channel_offset == num_channels)
8677					this_channel_offset = 0;
8678			}
8679			pr_cont("]\n");
8680		}
8681
8682		parent->rdc_default[i] = rdc_channel_base;
8683
8684		rdc_channel_base += num_channels;
8685		rdc_group += rdc_groups_per_port;
8686	}
8687}
8688
8689static int __devinit fill_phy_probe_info(struct niu *np,
8690					 struct niu_parent *parent,
8691					 struct phy_probe_info *info)
8692{
8693	unsigned long flags;
8694	int port, err;
8695
8696	memset(info, 0, sizeof(*info));
8697
8698	/* Port 0 to 7 are reserved for onboard Serdes, probe the rest.  */
8699	niu_lock_parent(np, flags);
8700	err = 0;
8701	for (port = 8; port < 32; port++) {
8702		int dev_id_1, dev_id_2;
8703
8704		dev_id_1 = mdio_read(np, port,
8705				     NIU_PMA_PMD_DEV_ADDR, MII_PHYSID1);
8706		dev_id_2 = mdio_read(np, port,
8707				     NIU_PMA_PMD_DEV_ADDR, MII_PHYSID2);
8708		err = phy_record(parent, info, dev_id_1, dev_id_2, port,
8709				 PHY_TYPE_PMA_PMD);
8710		if (err)
8711			break;
8712		dev_id_1 = mdio_read(np, port,
8713				     NIU_PCS_DEV_ADDR, MII_PHYSID1);
8714		dev_id_2 = mdio_read(np, port,
8715				     NIU_PCS_DEV_ADDR, MII_PHYSID2);
8716		err = phy_record(parent, info, dev_id_1, dev_id_2, port,
8717				 PHY_TYPE_PCS);
8718		if (err)
8719			break;
8720		dev_id_1 = mii_read(np, port, MII_PHYSID1);
8721		dev_id_2 = mii_read(np, port, MII_PHYSID2);
8722		err = phy_record(parent, info, dev_id_1, dev_id_2, port,
8723				 PHY_TYPE_MII);
8724		if (err)
8725			break;
8726	}
8727	niu_unlock_parent(np, flags);
8728
8729	return err;
8730}
8731
8732static int __devinit walk_phys(struct niu *np, struct niu_parent *parent)
8733{
8734	struct phy_probe_info *info = &parent->phy_probe_info;
8735	int lowest_10g, lowest_1g;
8736	int num_10g, num_1g;
8737	u32 val;
8738	int err;
8739
8740	num_10g = num_1g = 0;
8741
8742	if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) ||
8743	    !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) {
8744		num_10g = 0;
8745		num_1g = 2;
8746		parent->plat_type = PLAT_TYPE_ATCA_CP3220;
8747		parent->num_ports = 4;
8748		val = (phy_encode(PORT_TYPE_1G, 0) |
8749		       phy_encode(PORT_TYPE_1G, 1) |
8750		       phy_encode(PORT_TYPE_1G, 2) |
8751		       phy_encode(PORT_TYPE_1G, 3));
8752	} else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) {
8753		num_10g = 2;
8754		num_1g = 0;
8755		parent->num_ports = 2;
8756		val = (phy_encode(PORT_TYPE_10G, 0) |
8757		       phy_encode(PORT_TYPE_10G, 1));
8758	} else if ((np->flags & NIU_FLAGS_XCVR_SERDES) &&
8759		   (parent->plat_type == PLAT_TYPE_NIU)) {
8760		/* this is the Monza case */
8761		if (np->flags & NIU_FLAGS_10G) {
8762			val = (phy_encode(PORT_TYPE_10G, 0) |
8763			       phy_encode(PORT_TYPE_10G, 1));
8764		} else {
8765			val = (phy_encode(PORT_TYPE_1G, 0) |
8766			       phy_encode(PORT_TYPE_1G, 1));
8767		}
8768	} else {
8769		err = fill_phy_probe_info(np, parent, info);
8770		if (err)
8771			return err;
8772
8773		num_10g = count_10g_ports(info, &lowest_10g);
8774		num_1g = count_1g_ports(info, &lowest_1g);
8775
8776		switch ((num_10g << 4) | num_1g) {
8777		case 0x24:
8778			if (lowest_1g == 10)
8779				parent->plat_type = PLAT_TYPE_VF_P0;
8780			else if (lowest_1g == 26)
8781				parent->plat_type = PLAT_TYPE_VF_P1;
8782			else
8783				goto unknown_vg_1g_port;
8784
8785			/* fallthru */
8786		case 0x22:
8787			val = (phy_encode(PORT_TYPE_10G, 0) |
8788			       phy_encode(PORT_TYPE_10G, 1) |
8789			       phy_encode(PORT_TYPE_1G, 2) |
8790			       phy_encode(PORT_TYPE_1G, 3));
8791			break;
8792
8793		case 0x20:
8794			val = (phy_encode(PORT_TYPE_10G, 0) |
8795			       phy_encode(PORT_TYPE_10G, 1));
8796			break;
8797
8798		case 0x10:
8799			val = phy_encode(PORT_TYPE_10G, np->port);
8800			break;
8801
8802		case 0x14:
8803			if (lowest_1g == 10)
8804				parent->plat_type = PLAT_TYPE_VF_P0;
8805			else if (lowest_1g == 26)
8806				parent->plat_type = PLAT_TYPE_VF_P1;
8807			else
8808				goto unknown_vg_1g_port;
8809
8810			/* fallthru */
8811		case 0x13:
8812			if ((lowest_10g & 0x7) == 0)
8813				val = (phy_encode(PORT_TYPE_10G, 0) |
8814				       phy_encode(PORT_TYPE_1G, 1) |
8815				       phy_encode(PORT_TYPE_1G, 2) |
8816				       phy_encode(PORT_TYPE_1G, 3));
8817			else
8818				val = (phy_encode(PORT_TYPE_1G, 0) |
8819				       phy_encode(PORT_TYPE_10G, 1) |
8820				       phy_encode(PORT_TYPE_1G, 2) |
8821				       phy_encode(PORT_TYPE_1G, 3));
8822			break;
8823
8824		case 0x04:
8825			if (lowest_1g == 10)
8826				parent->plat_type = PLAT_TYPE_VF_P0;
8827			else if (lowest_1g == 26)
8828				parent->plat_type = PLAT_TYPE_VF_P1;
8829			else
8830				goto unknown_vg_1g_port;
8831
8832			val = (phy_encode(PORT_TYPE_1G, 0) |
8833			       phy_encode(PORT_TYPE_1G, 1) |
8834			       phy_encode(PORT_TYPE_1G, 2) |
8835			       phy_encode(PORT_TYPE_1G, 3));
8836			break;
8837
8838		default:
8839			pr_err("Unsupported port config 10G[%d] 1G[%d]\n",
8840			       num_10g, num_1g);
8841			return -EINVAL;
8842		}
8843	}
8844
8845	parent->port_phy = val;
8846
8847	if (parent->plat_type == PLAT_TYPE_NIU)
8848		niu_n2_divide_channels(parent);
8849	else
8850		niu_divide_channels(parent, num_10g, num_1g);
8851
8852	niu_divide_rdc_groups(parent, num_10g, num_1g);
8853
8854	return 0;
8855
8856unknown_vg_1g_port:
8857	pr_err("Cannot identify platform type, 1gport=%d\n", lowest_1g);
8858	return -EINVAL;
8859}
8860
8861static int __devinit niu_probe_ports(struct niu *np)
8862{
8863	struct niu_parent *parent = np->parent;
8864	int err, i;
8865
8866	if (parent->port_phy == PORT_PHY_UNKNOWN) {
8867		err = walk_phys(np, parent);
8868		if (err)
8869			return err;
8870
8871		niu_set_ldg_timer_res(np, 2);
8872		for (i = 0; i <= LDN_MAX; i++)
8873			niu_ldn_irq_enable(np, i, 0);
8874	}
8875
8876	if (parent->port_phy == PORT_PHY_INVALID)
8877		return -EINVAL;
8878
8879	return 0;
8880}
8881
8882static int __devinit niu_classifier_swstate_init(struct niu *np)
8883{
8884	struct niu_classifier *cp = &np->clas;
8885
8886	cp->tcam_top = (u16) np->port;
8887	cp->tcam_sz = np->parent->tcam_num_entries / np->parent->num_ports;
8888	cp->h1_init = 0xffffffff;
8889	cp->h2_init = 0xffff;
8890
8891	return fflp_early_init(np);
8892}
8893
8894static void __devinit niu_link_config_init(struct niu *np)
8895{
8896	struct niu_link_config *lp = &np->link_config;
8897
8898	lp->advertising = (ADVERTISED_10baseT_Half |
8899			   ADVERTISED_10baseT_Full |
8900			   ADVERTISED_100baseT_Half |
8901			   ADVERTISED_100baseT_Full |
8902			   ADVERTISED_1000baseT_Half |
8903			   ADVERTISED_1000baseT_Full |
8904			   ADVERTISED_10000baseT_Full |
8905			   ADVERTISED_Autoneg);
8906	lp->speed = lp->active_speed = SPEED_INVALID;
8907	lp->duplex = DUPLEX_FULL;
8908	lp->active_duplex = DUPLEX_INVALID;
8909	lp->autoneg = 1;
8910	lp->loopback_mode = LOOPBACK_DISABLED;
8911}
8912
8913static int __devinit niu_init_mac_ipp_pcs_base(struct niu *np)
8914{
8915	switch (np->port) {
8916	case 0:
8917		np->mac_regs = np->regs + XMAC_PORT0_OFF;
8918		np->ipp_off  = 0x00000;
8919		np->pcs_off  = 0x04000;
8920		np->xpcs_off = 0x02000;
8921		break;
8922
8923	case 1:
8924		np->mac_regs = np->regs + XMAC_PORT1_OFF;
8925		np->ipp_off  = 0x08000;
8926		np->pcs_off  = 0x0a000;
8927		np->xpcs_off = 0x08000;
8928		break;
8929
8930	case 2:
8931		np->mac_regs = np->regs + BMAC_PORT2_OFF;
8932		np->ipp_off  = 0x04000;
8933		np->pcs_off  = 0x0e000;
8934		np->xpcs_off = ~0UL;
8935		break;
8936
8937	case 3:
8938		np->mac_regs = np->regs + BMAC_PORT3_OFF;
8939		np->ipp_off  = 0x0c000;
8940		np->pcs_off  = 0x12000;
8941		np->xpcs_off = ~0UL;
8942		break;
8943
8944	default:
8945		dev_err(np->device, "Port %u is invalid, cannot compute MAC block offset\n", np->port);
8946		return -EINVAL;
8947	}
8948
8949	return 0;
8950}
8951
8952static void __devinit niu_try_msix(struct niu *np, u8 *ldg_num_map)
8953{
8954	struct msix_entry msi_vec[NIU_NUM_LDG];
8955	struct niu_parent *parent = np->parent;
8956	struct pci_dev *pdev = np->pdev;
8957	int i, num_irqs, err;
8958	u8 first_ldg;
8959
8960	first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
8961	for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
8962		ldg_num_map[i] = first_ldg + i;
8963
8964	num_irqs = (parent->rxchan_per_port[np->port] +
8965		    parent->txchan_per_port[np->port] +
8966		    (np->port == 0 ? 3 : 1));
8967	BUG_ON(num_irqs > (NIU_NUM_LDG / parent->num_ports));
8968
8969retry:
8970	for (i = 0; i < num_irqs; i++) {
8971		msi_vec[i].vector = 0;
8972		msi_vec[i].entry = i;
8973	}
8974
8975	err = pci_enable_msix(pdev, msi_vec, num_irqs);
8976	if (err < 0) {
8977		np->flags &= ~NIU_FLAGS_MSIX;
8978		return;
8979	}
8980	if (err > 0) {
8981		num_irqs = err;
8982		goto retry;
8983	}
8984
8985	np->flags |= NIU_FLAGS_MSIX;
8986	for (i = 0; i < num_irqs; i++)
8987		np->ldg[i].irq = msi_vec[i].vector;
8988	np->num_ldg = num_irqs;
8989}
8990
8991static int __devinit niu_n2_irq_init(struct niu *np, u8 *ldg_num_map)
8992{
8993#ifdef CONFIG_SPARC64
8994	struct platform_device *op = np->op;
8995	const u32 *int_prop;
8996	int i;
8997
8998	int_prop = of_get_property(op->dev.of_node, "interrupts", NULL);
8999	if (!int_prop)
9000		return -ENODEV;
9001
9002	for (i = 0; i < op->archdata.num_irqs; i++) {
9003		ldg_num_map[i] = int_prop[i];
9004		np->ldg[i].irq = op->archdata.irqs[i];
9005	}
9006
9007	np->num_ldg = op->archdata.num_irqs;
9008
9009	return 0;
9010#else
9011	return -EINVAL;
9012#endif
9013}
9014
9015static int __devinit niu_ldg_init(struct niu *np)
9016{
9017	struct niu_parent *parent = np->parent;
9018	u8 ldg_num_map[NIU_NUM_LDG];
9019	int first_chan, num_chan;
9020	int i, err, ldg_rotor;
9021	u8 port;
9022
9023	np->num_ldg = 1;
9024	np->ldg[0].irq = np->dev->irq;
9025	if (parent->plat_type == PLAT_TYPE_NIU) {
9026		err = niu_n2_irq_init(np, ldg_num_map);
9027		if (err)
9028			return err;
9029	} else
9030		niu_try_msix(np, ldg_num_map);
9031
9032	port = np->port;
9033	for (i = 0; i < np->num_ldg; i++) {
9034		struct niu_ldg *lp = &np->ldg[i];
9035
9036		netif_napi_add(np->dev, &lp->napi, niu_poll, 64);
9037
9038		lp->np = np;
9039		lp->ldg_num = ldg_num_map[i];
9040		lp->timer = 2;
9041
9042		/* On N2 NIU the firmware has setup the SID mappings so they go
9043		 * to the correct values that will route the LDG to the proper
9044		 * interrupt in the NCU interrupt table.
9045		 */
9046		if (np->parent->plat_type != PLAT_TYPE_NIU) {
9047			err = niu_set_ldg_sid(np, lp->ldg_num, port, i);
9048			if (err)
9049				return err;
9050		}
9051	}
9052
9053	/* We adopt the LDG assignment ordering used by the N2 NIU
9054	 * 'interrupt' properties because that simplifies a lot of
9055	 * things.  This ordering is:
9056	 *
9057	 *	MAC
9058	 *	MIF	(if port zero)
9059	 *	SYSERR	(if port zero)
9060	 *	RX channels
9061	 *	TX channels
9062	 */
9063
9064	ldg_rotor = 0;
9065
9066	err = niu_ldg_assign_ldn(np, parent, ldg_num_map[ldg_rotor],
9067				  LDN_MAC(port));
9068	if (err)
9069		return err;
9070
9071	ldg_rotor++;
9072	if (ldg_rotor == np->num_ldg)
9073		ldg_rotor = 0;
9074
9075	if (port == 0) {
9076		err = niu_ldg_assign_ldn(np, parent,
9077					 ldg_num_map[ldg_rotor],
9078					 LDN_MIF);
9079		if (err)
9080			return err;
9081
9082		ldg_rotor++;
9083		if (ldg_rotor == np->num_ldg)
9084			ldg_rotor = 0;
9085
9086		err = niu_ldg_assign_ldn(np, parent,
9087					 ldg_num_map[ldg_rotor],
9088					 LDN_DEVICE_ERROR);
9089		if (err)
9090			return err;
9091
9092		ldg_rotor++;
9093		if (ldg_rotor == np->num_ldg)
9094			ldg_rotor = 0;
9095
9096	}
9097
9098	first_chan = 0;
9099	for (i = 0; i < port; i++)
9100		first_chan += parent->rxchan_per_port[port];
9101	num_chan = parent->rxchan_per_port[port];
9102
9103	for (i = first_chan; i < (first_chan + num_chan); i++) {
9104		err = niu_ldg_assign_ldn(np, parent,
9105					 ldg_num_map[ldg_rotor],
9106					 LDN_RXDMA(i));
9107		if (err)
9108			return err;
9109		ldg_rotor++;
9110		if (ldg_rotor == np->num_ldg)
9111			ldg_rotor = 0;
9112	}
9113
9114	first_chan = 0;
9115	for (i = 0; i < port; i++)
9116		first_chan += parent->txchan_per_port[port];
9117	num_chan = parent->txchan_per_port[port];
9118	for (i = first_chan; i < (first_chan + num_chan); i++) {
9119		err = niu_ldg_assign_ldn(np, parent,
9120					 ldg_num_map[ldg_rotor],
9121					 LDN_TXDMA(i));
9122		if (err)
9123			return err;
9124		ldg_rotor++;
9125		if (ldg_rotor == np->num_ldg)
9126			ldg_rotor = 0;
9127	}
9128
9129	return 0;
9130}
9131
9132static void __devexit niu_ldg_free(struct niu *np)
9133{
9134	if (np->flags & NIU_FLAGS_MSIX)
9135		pci_disable_msix(np->pdev);
9136}
9137
9138static int __devinit niu_get_of_props(struct niu *np)
9139{
9140#ifdef CONFIG_SPARC64
9141	struct net_device *dev = np->dev;
9142	struct device_node *dp;
9143	const char *phy_type;
9144	const u8 *mac_addr;
9145	const char *model;
9146	int prop_len;
9147
9148	if (np->parent->plat_type == PLAT_TYPE_NIU)
9149		dp = np->op->dev.of_node;
9150	else
9151		dp = pci_device_to_OF_node(np->pdev);
9152
9153	phy_type = of_get_property(dp, "phy-type", &prop_len);
9154	if (!phy_type) {
9155		netdev_err(dev, "%s: OF node lacks phy-type property\n",
9156			   dp->full_name);
9157		return -EINVAL;
9158	}
9159
9160	if (!strcmp(phy_type, "none"))
9161		return -ENODEV;
9162
9163	strcpy(np->vpd.phy_type, phy_type);
9164
9165	if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
9166		netdev_err(dev, "%s: Illegal phy string [%s]\n",
9167			   dp->full_name, np->vpd.phy_type);
9168		return -EINVAL;
9169	}
9170
9171	mac_addr = of_get_property(dp, "local-mac-address", &prop_len);
9172	if (!mac_addr) {
9173		netdev_err(dev, "%s: OF node lacks local-mac-address property\n",
9174			   dp->full_name);
9175		return -EINVAL;
9176	}
9177	if (prop_len != dev->addr_len) {
9178		netdev_err(dev, "%s: OF MAC address prop len (%d) is wrong\n",
9179			   dp->full_name, prop_len);
9180	}
9181	memcpy(dev->perm_addr, mac_addr, dev->addr_len);
9182	if (!is_valid_ether_addr(&dev->perm_addr[0])) {
9183		netdev_err(dev, "%s: OF MAC address is invalid\n",
9184			   dp->full_name);
9185		netdev_err(dev, "%s: [ %pM ]\n", dp->full_name, dev->perm_addr);
9186		return -EINVAL;
9187	}
9188
9189	memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len);
9190
9191	model = of_get_property(dp, "model", &prop_len);
9192
9193	if (model)
9194		strcpy(np->vpd.model, model);
9195
9196	if (of_find_property(dp, "hot-swappable-phy", &prop_len)) {
9197		np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER |
9198			NIU_FLAGS_HOTPLUG_PHY);
9199	}
9200
9201	return 0;
9202#else
9203	return -EINVAL;
9204#endif
9205}
9206
9207static int __devinit niu_get_invariants(struct niu *np)
9208{
9209	int err, have_props;
9210	u32 offset;
9211
9212	err = niu_get_of_props(np);
9213	if (err == -ENODEV)
9214		return err;
9215
9216	have_props = !err;
9217
9218	err = niu_init_mac_ipp_pcs_base(np);
9219	if (err)
9220		return err;
9221
9222	if (have_props) {
9223		err = niu_get_and_validate_port(np);
9224		if (err)
9225			return err;
9226
9227	} else  {
9228		if (np->parent->plat_type == PLAT_TYPE_NIU)
9229			return -EINVAL;
9230
9231		nw64(ESPC_PIO_EN, ESPC_PIO_EN_ENABLE);
9232		offset = niu_pci_vpd_offset(np);
9233		netif_printk(np, probe, KERN_DEBUG, np->dev,
9234			     "%s() VPD offset [%08x]\n", __func__, offset);
9235		if (offset)
9236			niu_pci_vpd_fetch(np, offset);
9237		nw64(ESPC_PIO_EN, 0);
9238
9239		if (np->flags & NIU_FLAGS_VPD_VALID) {
9240			niu_pci_vpd_validate(np);
9241			err = niu_get_and_validate_port(np);
9242			if (err)
9243				return err;
9244		}
9245
9246		if (!(np->flags & NIU_FLAGS_VPD_VALID)) {
9247			err = niu_get_and_validate_port(np);
9248			if (err)
9249				return err;
9250			err = niu_pci_probe_sprom(np);
9251			if (err)
9252				return err;
9253		}
9254	}
9255
9256	err = niu_probe_ports(np);
9257	if (err)
9258		return err;
9259
9260	niu_ldg_init(np);
9261
9262	niu_classifier_swstate_init(np);
9263	niu_link_config_init(np);
9264
9265	err = niu_determine_phy_disposition(np);
9266	if (!err)
9267		err = niu_init_link(np);
9268
9269	return err;
9270}
9271
9272static LIST_HEAD(niu_parent_list);
9273static DEFINE_MUTEX(niu_parent_lock);
9274static int niu_parent_index;
9275
9276static ssize_t show_port_phy(struct device *dev,
9277			     struct device_attribute *attr, char *buf)
9278{
9279	struct platform_device *plat_dev = to_platform_device(dev);
9280	struct niu_parent *p = plat_dev->dev.platform_data;
9281	u32 port_phy = p->port_phy;
9282	char *orig_buf = buf;
9283	int i;
9284
9285	if (port_phy == PORT_PHY_UNKNOWN ||
9286	    port_phy == PORT_PHY_INVALID)
9287		return 0;
9288
9289	for (i = 0; i < p->num_ports; i++) {
9290		const char *type_str;
9291		int type;
9292
9293		type = phy_decode(port_phy, i);
9294		if (type == PORT_TYPE_10G)
9295			type_str = "10G";
9296		else
9297			type_str = "1G";
9298		buf += sprintf(buf,
9299			       (i == 0) ? "%s" : " %s",
9300			       type_str);
9301	}
9302	buf += sprintf(buf, "\n");
9303	return buf - orig_buf;
9304}
9305
9306static ssize_t show_plat_type(struct device *dev,
9307			      struct device_attribute *attr, char *buf)
9308{
9309	struct platform_device *plat_dev = to_platform_device(dev);
9310	struct niu_parent *p = plat_dev->dev.platform_data;
9311	const char *type_str;
9312
9313	switch (p->plat_type) {
9314	case PLAT_TYPE_ATLAS:
9315		type_str = "atlas";
9316		break;
9317	case PLAT_TYPE_NIU:
9318		type_str = "niu";
9319		break;
9320	case PLAT_TYPE_VF_P0:
9321		type_str = "vf_p0";
9322		break;
9323	case PLAT_TYPE_VF_P1:
9324		type_str = "vf_p1";
9325		break;
9326	default:
9327		type_str = "unknown";
9328		break;
9329	}
9330
9331	return sprintf(buf, "%s\n", type_str);
9332}
9333
9334static ssize_t __show_chan_per_port(struct device *dev,
9335				    struct device_attribute *attr, char *buf,
9336				    int rx)
9337{
9338	struct platform_device *plat_dev = to_platform_device(dev);
9339	struct niu_parent *p = plat_dev->dev.platform_data;
9340	char *orig_buf = buf;
9341	u8 *arr;
9342	int i;
9343
9344	arr = (rx ? p->rxchan_per_port : p->txchan_per_port);
9345
9346	for (i = 0; i < p->num_ports; i++) {
9347		buf += sprintf(buf,
9348			       (i == 0) ? "%d" : " %d",
9349			       arr[i]);
9350	}
9351	buf += sprintf(buf, "\n");
9352
9353	return buf - orig_buf;
9354}
9355
9356static ssize_t show_rxchan_per_port(struct device *dev,
9357				    struct device_attribute *attr, char *buf)
9358{
9359	return __show_chan_per_port(dev, attr, buf, 1);
9360}
9361
9362static ssize_t show_txchan_per_port(struct device *dev,
9363				    struct device_attribute *attr, char *buf)
9364{
9365	return __show_chan_per_port(dev, attr, buf, 1);
9366}
9367
9368static ssize_t show_num_ports(struct device *dev,
9369			      struct device_attribute *attr, char *buf)
9370{
9371	struct platform_device *plat_dev = to_platform_device(dev);
9372	struct niu_parent *p = plat_dev->dev.platform_data;
9373
9374	return sprintf(buf, "%d\n", p->num_ports);
9375}
9376
9377static struct device_attribute niu_parent_attributes[] = {
9378	__ATTR(port_phy, S_IRUGO, show_port_phy, NULL),
9379	__ATTR(plat_type, S_IRUGO, show_plat_type, NULL),
9380	__ATTR(rxchan_per_port, S_IRUGO, show_rxchan_per_port, NULL),
9381	__ATTR(txchan_per_port, S_IRUGO, show_txchan_per_port, NULL),
9382	__ATTR(num_ports, S_IRUGO, show_num_ports, NULL),
9383	{}
9384};
9385
9386static struct niu_parent * __devinit niu_new_parent(struct niu *np,
9387						    union niu_parent_id *id,
9388						    u8 ptype)
9389{
9390	struct platform_device *plat_dev;
9391	struct niu_parent *p;
9392	int i;
9393
9394	plat_dev = platform_device_register_simple("niu", niu_parent_index,
9395						   NULL, 0);
9396	if (IS_ERR(plat_dev))
9397		return NULL;
9398
9399	for (i = 0; attr_name(niu_parent_attributes[i]); i++) {
9400		int err = device_create_file(&plat_dev->dev,
9401					     &niu_parent_attributes[i]);
9402		if (err)
9403			goto fail_unregister;
9404	}
9405
9406	p = kzalloc(sizeof(*p), GFP_KERNEL);
9407	if (!p)
9408		goto fail_unregister;
9409
9410	p->index = niu_parent_index++;
9411
9412	plat_dev->dev.platform_data = p;
9413	p->plat_dev = plat_dev;
9414
9415	memcpy(&p->id, id, sizeof(*id));
9416	p->plat_type = ptype;
9417	INIT_LIST_HEAD(&p->list);
9418	atomic_set(&p->refcnt, 0);
9419	list_add(&p->list, &niu_parent_list);
9420	spin_lock_init(&p->lock);
9421
9422	p->rxdma_clock_divider = 7500;
9423
9424	p->tcam_num_entries = NIU_PCI_TCAM_ENTRIES;
9425	if (p->plat_type == PLAT_TYPE_NIU)
9426		p->tcam_num_entries = NIU_NONPCI_TCAM_ENTRIES;
9427
9428	for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) {
9429		int index = i - CLASS_CODE_USER_PROG1;
9430
9431		p->tcam_key[index] = TCAM_KEY_TSEL;
9432		p->flow_key[index] = (FLOW_KEY_IPSA |
9433				      FLOW_KEY_IPDA |
9434				      FLOW_KEY_PROTO |
9435				      (FLOW_KEY_L4_BYTE12 <<
9436				       FLOW_KEY_L4_0_SHIFT) |
9437				      (FLOW_KEY_L4_BYTE12 <<
9438				       FLOW_KEY_L4_1_SHIFT));
9439	}
9440
9441	for (i = 0; i < LDN_MAX + 1; i++)
9442		p->ldg_map[i] = LDG_INVALID;
9443
9444	return p;
9445
9446fail_unregister:
9447	platform_device_unregister(plat_dev);
9448	return NULL;
9449}
9450
9451static struct niu_parent * __devinit niu_get_parent(struct niu *np,
9452						    union niu_parent_id *id,
9453						    u8 ptype)
9454{
9455	struct niu_parent *p, *tmp;
9456	int port = np->port;
9457
9458	mutex_lock(&niu_parent_lock);
9459	p = NULL;
9460	list_for_each_entry(tmp, &niu_parent_list, list) {
9461		if (!memcmp(id, &tmp->id, sizeof(*id))) {
9462			p = tmp;
9463			break;
9464		}
9465	}
9466	if (!p)
9467		p = niu_new_parent(np, id, ptype);
9468
9469	if (p) {
9470		char port_name[6];
9471		int err;
9472
9473		sprintf(port_name, "port%d", port);
9474		err = sysfs_create_link(&p->plat_dev->dev.kobj,
9475					&np->device->kobj,
9476					port_name);
9477		if (!err) {
9478			p->ports[port] = np;
9479			atomic_inc(&p->refcnt);
9480		}
9481	}
9482	mutex_unlock(&niu_parent_lock);
9483
9484	return p;
9485}
9486
9487static void niu_put_parent(struct niu *np)
9488{
9489	struct niu_parent *p = np->parent;
9490	u8 port = np->port;
9491	char port_name[6];
9492
9493	BUG_ON(!p || p->ports[port] != np);
9494
9495	netif_printk(np, probe, KERN_DEBUG, np->dev,
9496		     "%s() port[%u]\n", __func__, port);
9497
9498	sprintf(port_name, "port%d", port);
9499
9500	mutex_lock(&niu_parent_lock);
9501
9502	sysfs_remove_link(&p->plat_dev->dev.kobj, port_name);
9503
9504	p->ports[port] = NULL;
9505	np->parent = NULL;
9506
9507	if (atomic_dec_and_test(&p->refcnt)) {
9508		list_del(&p->list);
9509		platform_device_unregister(p->plat_dev);
9510	}
9511
9512	mutex_unlock(&niu_parent_lock);
9513}
9514
9515static void *niu_pci_alloc_coherent(struct device *dev, size_t size,
9516				    u64 *handle, gfp_t flag)
9517{
9518	dma_addr_t dh;
9519	void *ret;
9520
9521	ret = dma_alloc_coherent(dev, size, &dh, flag);
9522	if (ret)
9523		*handle = dh;
9524	return ret;
9525}
9526
9527static void niu_pci_free_coherent(struct device *dev, size_t size,
9528				  void *cpu_addr, u64 handle)
9529{
9530	dma_free_coherent(dev, size, cpu_addr, handle);
9531}
9532
9533static u64 niu_pci_map_page(struct device *dev, struct page *page,
9534			    unsigned long offset, size_t size,
9535			    enum dma_data_direction direction)
9536{
9537	return dma_map_page(dev, page, offset, size, direction);
9538}
9539
9540static void niu_pci_unmap_page(struct device *dev, u64 dma_address,
9541			       size_t size, enum dma_data_direction direction)
9542{
9543	dma_unmap_page(dev, dma_address, size, direction);
9544}
9545
9546static u64 niu_pci_map_single(struct device *dev, void *cpu_addr,
9547			      size_t size,
9548			      enum dma_data_direction direction)
9549{
9550	return dma_map_single(dev, cpu_addr, size, direction);
9551}
9552
9553static void niu_pci_unmap_single(struct device *dev, u64 dma_address,
9554				 size_t size,
9555				 enum dma_data_direction direction)
9556{
9557	dma_unmap_single(dev, dma_address, size, direction);
9558}
9559
9560static const struct niu_ops niu_pci_ops = {
9561	.alloc_coherent	= niu_pci_alloc_coherent,
9562	.free_coherent	= niu_pci_free_coherent,
9563	.map_page	= niu_pci_map_page,
9564	.unmap_page	= niu_pci_unmap_page,
9565	.map_single	= niu_pci_map_single,
9566	.unmap_single	= niu_pci_unmap_single,
9567};
9568
9569static void __devinit niu_driver_version(void)
9570{
9571	static int niu_version_printed;
9572
9573	if (niu_version_printed++ == 0)
9574		pr_info("%s", version);
9575}
9576
9577static struct net_device * __devinit niu_alloc_and_init(
9578	struct device *gen_dev, struct pci_dev *pdev,
9579	struct platform_device *op, const struct niu_ops *ops,
9580	u8 port)
9581{
9582	struct net_device *dev;
9583	struct niu *np;
9584
9585	dev = alloc_etherdev_mq(sizeof(struct niu), NIU_NUM_TXCHAN);
9586	if (!dev) {
9587		dev_err(gen_dev, "Etherdev alloc failed, aborting\n");
9588		return NULL;
9589	}
9590
9591	SET_NETDEV_DEV(dev, gen_dev);
9592
9593	np = netdev_priv(dev);
9594	np->dev = dev;
9595	np->pdev = pdev;
9596	np->op = op;
9597	np->device = gen_dev;
9598	np->ops = ops;
9599
9600	np->msg_enable = niu_debug;
9601
9602	spin_lock_init(&np->lock);
9603	INIT_WORK(&np->reset_task, niu_reset_task);
9604
9605	np->port = port;
9606
9607	return dev;
9608}
9609
9610static const struct net_device_ops niu_netdev_ops = {
9611	.ndo_open		= niu_open,
9612	.ndo_stop		= niu_close,
9613	.ndo_start_xmit		= niu_start_xmit,
9614	.ndo_get_stats		= niu_get_stats,
9615	.ndo_set_multicast_list	= niu_set_rx_mode,
9616	.ndo_validate_addr	= eth_validate_addr,
9617	.ndo_set_mac_address	= niu_set_mac_addr,
9618	.ndo_do_ioctl		= niu_ioctl,
9619	.ndo_tx_timeout		= niu_tx_timeout,
9620	.ndo_change_mtu		= niu_change_mtu,
9621};
9622
9623static void __devinit niu_assign_netdev_ops(struct net_device *dev)
9624{
9625	dev->netdev_ops = &niu_netdev_ops;
9626	dev->ethtool_ops = &niu_ethtool_ops;
9627	dev->watchdog_timeo = NIU_TX_TIMEOUT;
9628}
9629
9630static void __devinit niu_device_announce(struct niu *np)
9631{
9632	struct net_device *dev = np->dev;
9633
9634	pr_info("%s: NIU Ethernet %pM\n", dev->name, dev->dev_addr);
9635
9636	if (np->parent->plat_type == PLAT_TYPE_ATCA_CP3220) {
9637		pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
9638				dev->name,
9639				(np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"),
9640				(np->flags & NIU_FLAGS_10G ? "10G" : "1G"),
9641				(np->flags & NIU_FLAGS_FIBER ? "RGMII FIBER" : "SERDES"),
9642				(np->mac_xcvr == MAC_XCVR_MII ? "MII" :
9643				 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")),
9644				np->vpd.phy_type);
9645	} else {
9646		pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
9647				dev->name,
9648				(np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"),
9649				(np->flags & NIU_FLAGS_10G ? "10G" : "1G"),
9650				(np->flags & NIU_FLAGS_FIBER ? "FIBER" :
9651				 (np->flags & NIU_FLAGS_XCVR_SERDES ? "SERDES" :
9652				  "COPPER")),
9653				(np->mac_xcvr == MAC_XCVR_MII ? "MII" :
9654				 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")),
9655				np->vpd.phy_type);
9656	}
9657}
9658
9659static void __devinit niu_set_basic_features(struct net_device *dev)
9660{
9661	dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM |
9662			  NETIF_F_GRO | NETIF_F_RXHASH);
9663}
9664
9665static int __devinit niu_pci_init_one(struct pci_dev *pdev,
9666				      const struct pci_device_id *ent)
9667{
9668	union niu_parent_id parent_id;
9669	struct net_device *dev;
9670	struct niu *np;
9671	int err, pos;
9672	u64 dma_mask;
9673	u16 val16;
9674
9675	niu_driver_version();
9676
9677	err = pci_enable_device(pdev);
9678	if (err) {
9679		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
9680		return err;
9681	}
9682
9683	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
9684	    !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
9685		dev_err(&pdev->dev, "Cannot find proper PCI device base addresses, aborting\n");
9686		err = -ENODEV;
9687		goto err_out_disable_pdev;
9688	}
9689
9690	err = pci_request_regions(pdev, DRV_MODULE_NAME);
9691	if (err) {
9692		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
9693		goto err_out_disable_pdev;
9694	}
9695
9696	pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
9697	if (pos <= 0) {
9698		dev_err(&pdev->dev, "Cannot find PCI Express capability, aborting\n");
9699		goto err_out_free_res;
9700	}
9701
9702	dev = niu_alloc_and_init(&pdev->dev, pdev, NULL,
9703				 &niu_pci_ops, PCI_FUNC(pdev->devfn));
9704	if (!dev) {
9705		err = -ENOMEM;
9706		goto err_out_free_res;
9707	}
9708	np = netdev_priv(dev);
9709
9710	memset(&parent_id, 0, sizeof(parent_id));
9711	parent_id.pci.domain = pci_domain_nr(pdev->bus);
9712	parent_id.pci.bus = pdev->bus->number;
9713	parent_id.pci.device = PCI_SLOT(pdev->devfn);
9714
9715	np->parent = niu_get_parent(np, &parent_id,
9716				    PLAT_TYPE_ATLAS);
9717	if (!np->parent) {
9718		err = -ENOMEM;
9719		goto err_out_free_dev;
9720	}
9721
9722	pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16);
9723	val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
9724	val16 |= (PCI_EXP_DEVCTL_CERE |
9725		  PCI_EXP_DEVCTL_NFERE |
9726		  PCI_EXP_DEVCTL_FERE |
9727		  PCI_EXP_DEVCTL_URRE |
9728		  PCI_EXP_DEVCTL_RELAX_EN);
9729	pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16);
9730
9731	dma_mask = DMA_BIT_MASK(44);
9732	err = pci_set_dma_mask(pdev, dma_mask);
9733	if (!err) {
9734		dev->features |= NETIF_F_HIGHDMA;
9735		err = pci_set_consistent_dma_mask(pdev, dma_mask);
9736		if (err) {
9737			dev_err(&pdev->dev, "Unable to obtain 44 bit DMA for consistent allocations, aborting\n");
9738			goto err_out_release_parent;
9739		}
9740	}
9741	if (err || dma_mask == DMA_BIT_MASK(32)) {
9742		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9743		if (err) {
9744			dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
9745			goto err_out_release_parent;
9746		}
9747	}
9748
9749	niu_set_basic_features(dev);
9750
9751	np->regs = pci_ioremap_bar(pdev, 0);
9752	if (!np->regs) {
9753		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
9754		err = -ENOMEM;
9755		goto err_out_release_parent;
9756	}
9757
9758	pci_set_master(pdev);
9759	pci_save_state(pdev);
9760
9761	dev->irq = pdev->irq;
9762
9763	niu_assign_netdev_ops(dev);
9764
9765	err = niu_get_invariants(np);
9766	if (err) {
9767		if (err != -ENODEV)
9768			dev_err(&pdev->dev, "Problem fetching invariants of chip, aborting\n");
9769		goto err_out_iounmap;
9770	}
9771
9772	err = register_netdev(dev);
9773	if (err) {
9774		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
9775		goto err_out_iounmap;
9776	}
9777
9778	pci_set_drvdata(pdev, dev);
9779
9780	niu_device_announce(np);
9781
9782	return 0;
9783
9784err_out_iounmap:
9785	if (np->regs) {
9786		iounmap(np->regs);
9787		np->regs = NULL;
9788	}
9789
9790err_out_release_parent:
9791	niu_put_parent(np);
9792
9793err_out_free_dev:
9794	free_netdev(dev);
9795
9796err_out_free_res:
9797	pci_release_regions(pdev);
9798
9799err_out_disable_pdev:
9800	pci_disable_device(pdev);
9801	pci_set_drvdata(pdev, NULL);
9802
9803	return err;
9804}
9805
9806static void __devexit niu_pci_remove_one(struct pci_dev *pdev)
9807{
9808	struct net_device *dev = pci_get_drvdata(pdev);
9809
9810	if (dev) {
9811		struct niu *np = netdev_priv(dev);
9812
9813		unregister_netdev(dev);
9814		if (np->regs) {
9815			iounmap(np->regs);
9816			np->regs = NULL;
9817		}
9818
9819		niu_ldg_free(np);
9820
9821		niu_put_parent(np);
9822
9823		free_netdev(dev);
9824		pci_release_regions(pdev);
9825		pci_disable_device(pdev);
9826		pci_set_drvdata(pdev, NULL);
9827	}
9828}
9829
9830static int niu_suspend(struct pci_dev *pdev, pm_message_t state)
9831{
9832	struct net_device *dev = pci_get_drvdata(pdev);
9833	struct niu *np = netdev_priv(dev);
9834	unsigned long flags;
9835
9836	if (!netif_running(dev))
9837		return 0;
9838
9839	flush_scheduled_work();
9840	niu_netif_stop(np);
9841
9842	del_timer_sync(&np->timer);
9843
9844	spin_lock_irqsave(&np->lock, flags);
9845	niu_enable_interrupts(np, 0);
9846	spin_unlock_irqrestore(&np->lock, flags);
9847
9848	netif_device_detach(dev);
9849
9850	spin_lock_irqsave(&np->lock, flags);
9851	niu_stop_hw(np);
9852	spin_unlock_irqrestore(&np->lock, flags);
9853
9854	pci_save_state(pdev);
9855
9856	return 0;
9857}
9858
9859static int niu_resume(struct pci_dev *pdev)
9860{
9861	struct net_device *dev = pci_get_drvdata(pdev);
9862	struct niu *np = netdev_priv(dev);
9863	unsigned long flags;
9864	int err;
9865
9866	if (!netif_running(dev))
9867		return 0;
9868
9869	pci_restore_state(pdev);
9870
9871	netif_device_attach(dev);
9872
9873	spin_lock_irqsave(&np->lock, flags);
9874
9875	err = niu_init_hw(np);
9876	if (!err) {
9877		np->timer.expires = jiffies + HZ;
9878		add_timer(&np->timer);
9879		niu_netif_start(np);
9880	}
9881
9882	spin_unlock_irqrestore(&np->lock, flags);
9883
9884	return err;
9885}
9886
9887static struct pci_driver niu_pci_driver = {
9888	.name		= DRV_MODULE_NAME,
9889	.id_table	= niu_pci_tbl,
9890	.probe		= niu_pci_init_one,
9891	.remove		= __devexit_p(niu_pci_remove_one),
9892	.suspend	= niu_suspend,
9893	.resume		= niu_resume,
9894};
9895
9896#ifdef CONFIG_SPARC64
9897static void *niu_phys_alloc_coherent(struct device *dev, size_t size,
9898				     u64 *dma_addr, gfp_t flag)
9899{
9900	unsigned long order = get_order(size);
9901	unsigned long page = __get_free_pages(flag, order);
9902
9903	if (page == 0UL)
9904		return NULL;
9905	memset((char *)page, 0, PAGE_SIZE << order);
9906	*dma_addr = __pa(page);
9907
9908	return (void *) page;
9909}
9910
9911static void niu_phys_free_coherent(struct device *dev, size_t size,
9912				   void *cpu_addr, u64 handle)
9913{
9914	unsigned long order = get_order(size);
9915
9916	free_pages((unsigned long) cpu_addr, order);
9917}
9918
9919static u64 niu_phys_map_page(struct device *dev, struct page *page,
9920			     unsigned long offset, size_t size,
9921			     enum dma_data_direction direction)
9922{
9923	return page_to_phys(page) + offset;
9924}
9925
9926static void niu_phys_unmap_page(struct device *dev, u64 dma_address,
9927				size_t size, enum dma_data_direction direction)
9928{
9929	/* Nothing to do.  */
9930}
9931
9932static u64 niu_phys_map_single(struct device *dev, void *cpu_addr,
9933			       size_t size,
9934			       enum dma_data_direction direction)
9935{
9936	return __pa(cpu_addr);
9937}
9938
9939static void niu_phys_unmap_single(struct device *dev, u64 dma_address,
9940				  size_t size,
9941				  enum dma_data_direction direction)
9942{
9943	/* Nothing to do.  */
9944}
9945
9946static const struct niu_ops niu_phys_ops = {
9947	.alloc_coherent	= niu_phys_alloc_coherent,
9948	.free_coherent	= niu_phys_free_coherent,
9949	.map_page	= niu_phys_map_page,
9950	.unmap_page	= niu_phys_unmap_page,
9951	.map_single	= niu_phys_map_single,
9952	.unmap_single	= niu_phys_unmap_single,
9953};
9954
9955static int __devinit niu_of_probe(struct platform_device *op,
9956				  const struct of_device_id *match)
9957{
9958	union niu_parent_id parent_id;
9959	struct net_device *dev;
9960	struct niu *np;
9961	const u32 *reg;
9962	int err;
9963
9964	niu_driver_version();
9965
9966	reg = of_get_property(op->dev.of_node, "reg", NULL);
9967	if (!reg) {
9968		dev_err(&op->dev, "%s: No 'reg' property, aborting\n",
9969			op->dev.of_node->full_name);
9970		return -ENODEV;
9971	}
9972
9973	dev = niu_alloc_and_init(&op->dev, NULL, op,
9974				 &niu_phys_ops, reg[0] & 0x1);
9975	if (!dev) {
9976		err = -ENOMEM;
9977		goto err_out;
9978	}
9979	np = netdev_priv(dev);
9980
9981	memset(&parent_id, 0, sizeof(parent_id));
9982	parent_id.of = of_get_parent(op->dev.of_node);
9983
9984	np->parent = niu_get_parent(np, &parent_id,
9985				    PLAT_TYPE_NIU);
9986	if (!np->parent) {
9987		err = -ENOMEM;
9988		goto err_out_free_dev;
9989	}
9990
9991	niu_set_basic_features(dev);
9992
9993	np->regs = of_ioremap(&op->resource[1], 0,
9994			      resource_size(&op->resource[1]),
9995			      "niu regs");
9996	if (!np->regs) {
9997		dev_err(&op->dev, "Cannot map device registers, aborting\n");
9998		err = -ENOMEM;
9999		goto err_out_release_parent;
10000	}
10001
10002	np->vir_regs_1 = of_ioremap(&op->resource[2], 0,
10003				    resource_size(&op->resource[2]),
10004				    "niu vregs-1");
10005	if (!np->vir_regs_1) {
10006		dev_err(&op->dev, "Cannot map device vir registers 1, aborting\n");
10007		err = -ENOMEM;
10008		goto err_out_iounmap;
10009	}
10010
10011	np->vir_regs_2 = of_ioremap(&op->resource[3], 0,
10012				    resource_size(&op->resource[3]),
10013				    "niu vregs-2");
10014	if (!np->vir_regs_2) {
10015		dev_err(&op->dev, "Cannot map device vir registers 2, aborting\n");
10016		err = -ENOMEM;
10017		goto err_out_iounmap;
10018	}
10019
10020	niu_assign_netdev_ops(dev);
10021
10022	err = niu_get_invariants(np);
10023	if (err) {
10024		if (err != -ENODEV)
10025			dev_err(&op->dev, "Problem fetching invariants of chip, aborting\n");
10026		goto err_out_iounmap;
10027	}
10028
10029	err = register_netdev(dev);
10030	if (err) {
10031		dev_err(&op->dev, "Cannot register net device, aborting\n");
10032		goto err_out_iounmap;
10033	}
10034
10035	dev_set_drvdata(&op->dev, dev);
10036
10037	niu_device_announce(np);
10038
10039	return 0;
10040
10041err_out_iounmap:
10042	if (np->vir_regs_1) {
10043		of_iounmap(&op->resource[2], np->vir_regs_1,
10044			   resource_size(&op->resource[2]));
10045		np->vir_regs_1 = NULL;
10046	}
10047
10048	if (np->vir_regs_2) {
10049		of_iounmap(&op->resource[3], np->vir_regs_2,
10050			   resource_size(&op->resource[3]));
10051		np->vir_regs_2 = NULL;
10052	}
10053
10054	if (np->regs) {
10055		of_iounmap(&op->resource[1], np->regs,
10056			   resource_size(&op->resource[1]));
10057		np->regs = NULL;
10058	}
10059
10060err_out_release_parent:
10061	niu_put_parent(np);
10062
10063err_out_free_dev:
10064	free_netdev(dev);
10065
10066err_out:
10067	return err;
10068}
10069
10070static int __devexit niu_of_remove(struct platform_device *op)
10071{
10072	struct net_device *dev = dev_get_drvdata(&op->dev);
10073
10074	if (dev) {
10075		struct niu *np = netdev_priv(dev);
10076
10077		unregister_netdev(dev);
10078
10079		if (np->vir_regs_1) {
10080			of_iounmap(&op->resource[2], np->vir_regs_1,
10081				   resource_size(&op->resource[2]));
10082			np->vir_regs_1 = NULL;
10083		}
10084
10085		if (np->vir_regs_2) {
10086			of_iounmap(&op->resource[3], np->vir_regs_2,
10087				   resource_size(&op->resource[3]));
10088			np->vir_regs_2 = NULL;
10089		}
10090
10091		if (np->regs) {
10092			of_iounmap(&op->resource[1], np->regs,
10093				   resource_size(&op->resource[1]));
10094			np->regs = NULL;
10095		}
10096
10097		niu_ldg_free(np);
10098
10099		niu_put_parent(np);
10100
10101		free_netdev(dev);
10102		dev_set_drvdata(&op->dev, NULL);
10103	}
10104	return 0;
10105}
10106
10107static const struct of_device_id niu_match[] = {
10108	{
10109		.name = "network",
10110		.compatible = "SUNW,niusl",
10111	},
10112	{},
10113};
10114MODULE_DEVICE_TABLE(of, niu_match);
10115
10116static struct of_platform_driver niu_of_driver = {
10117	.driver = {
10118		.name = "niu",
10119		.owner = THIS_MODULE,
10120		.of_match_table = niu_match,
10121	},
10122	.probe		= niu_of_probe,
10123	.remove		= __devexit_p(niu_of_remove),
10124};
10125
10126#endif /* CONFIG_SPARC64 */
10127
10128static int __init niu_init(void)
10129{
10130	int err = 0;
10131
10132	BUILD_BUG_ON(PAGE_SIZE < 4 * 1024);
10133
10134	niu_debug = netif_msg_init(debug, NIU_MSG_DEFAULT);
10135
10136#ifdef CONFIG_SPARC64
10137	err = of_register_platform_driver(&niu_of_driver);
10138#endif
10139
10140	if (!err) {
10141		err = pci_register_driver(&niu_pci_driver);
10142#ifdef CONFIG_SPARC64
10143		if (err)
10144			of_unregister_platform_driver(&niu_of_driver);
10145#endif
10146	}
10147
10148	return err;
10149}
10150
10151static void __exit niu_exit(void)
10152{
10153	pci_unregister_driver(&niu_pci_driver);
10154#ifdef CONFIG_SPARC64
10155	of_unregister_platform_driver(&niu_of_driver);
10156#endif
10157}
10158
10159module_init(niu_init);
10160module_exit(niu_exit);
10161