1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Broadcom GENET (Gigabit Ethernet) controller driver
4 *
5 * Copyright (c) 2014-2024 Broadcom
6 */
7
8#define pr_fmt(fmt)				"bcmgenet: " fmt
9
10#include <linux/acpi.h>
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/sched.h>
14#include <linux/types.h>
15#include <linux/fcntl.h>
16#include <linux/interrupt.h>
17#include <linux/string.h>
18#include <linux/if_ether.h>
19#include <linux/init.h>
20#include <linux/errno.h>
21#include <linux/delay.h>
22#include <linux/platform_device.h>
23#include <linux/dma-mapping.h>
24#include <linux/pm.h>
25#include <linux/clk.h>
26#include <net/arp.h>
27
28#include <linux/mii.h>
29#include <linux/ethtool.h>
30#include <linux/netdevice.h>
31#include <linux/inetdevice.h>
32#include <linux/etherdevice.h>
33#include <linux/skbuff.h>
34#include <linux/in.h>
35#include <linux/ip.h>
36#include <linux/ipv6.h>
37#include <linux/phy.h>
38#include <linux/platform_data/bcmgenet.h>
39
40#include <asm/unaligned.h>
41
42#include "bcmgenet.h"
43
44/* Maximum number of hardware queues, downsized if needed */
45#define GENET_MAX_MQ_CNT	4
46
47/* Default highest priority queue for multi queue support */
48#define GENET_Q0_PRIORITY	0
49
50#define GENET_Q16_RX_BD_CNT	\
51	(TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q)
52#define GENET_Q16_TX_BD_CNT	\
53	(TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q)
54
55#define RX_BUF_LENGTH		2048
56#define SKB_ALIGNMENT		32
57
58/* Tx/Rx DMA register offset, skip 256 descriptors */
59#define WORDS_PER_BD(p)		(p->hw_params->words_per_bd)
60#define DMA_DESC_SIZE		(WORDS_PER_BD(priv) * sizeof(u32))
61
62#define GENET_TDMA_REG_OFF	(priv->hw_params->tdma_offset + \
63				TOTAL_DESC * DMA_DESC_SIZE)
64
65#define GENET_RDMA_REG_OFF	(priv->hw_params->rdma_offset + \
66				TOTAL_DESC * DMA_DESC_SIZE)
67
68/* Forward declarations */
69static void bcmgenet_set_rx_mode(struct net_device *dev);
70
71static inline void bcmgenet_writel(u32 value, void __iomem *offset)
72{
73	/* MIPS chips strapped for BE will automagically configure the
74	 * peripheral registers for CPU-native byte order.
75	 */
76	if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
77		__raw_writel(value, offset);
78	else
79		writel_relaxed(value, offset);
80}
81
82static inline u32 bcmgenet_readl(void __iomem *offset)
83{
84	if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
85		return __raw_readl(offset);
86	else
87		return readl_relaxed(offset);
88}
89
90static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
91					     void __iomem *d, u32 value)
92{
93	bcmgenet_writel(value, d + DMA_DESC_LENGTH_STATUS);
94}
95
96static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
97				    void __iomem *d,
98				    dma_addr_t addr)
99{
100	bcmgenet_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO);
101
102	/* Register writes to GISB bus can take couple hundred nanoseconds
103	 * and are done for each packet, save these expensive writes unless
104	 * the platform is explicitly configured for 64-bits/LPAE.
105	 */
106#ifdef CONFIG_PHYS_ADDR_T_64BIT
107	if (priv->hw_params->flags & GENET_HAS_40BITS)
108		bcmgenet_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI);
109#endif
110}
111
112/* Combined address + length/status setter */
113static inline void dmadesc_set(struct bcmgenet_priv *priv,
114			       void __iomem *d, dma_addr_t addr, u32 val)
115{
116	dmadesc_set_addr(priv, d, addr);
117	dmadesc_set_length_status(priv, d, val);
118}
119
120#define GENET_VER_FMT	"%1d.%1d EPHY: 0x%04x"
121
122#define GENET_MSG_DEFAULT	(NETIF_MSG_DRV | NETIF_MSG_PROBE | \
123				NETIF_MSG_LINK)
124
125static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv)
126{
127	if (GENET_IS_V1(priv))
128		return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1);
129	else
130		return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL);
131}
132
133static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
134{
135	if (GENET_IS_V1(priv))
136		bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1);
137	else
138		bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL);
139}
140
141/* These macros are defined to deal with register map change
142 * between GENET1.1 and GENET2. Only those currently being used
143 * by driver are defined.
144 */
145static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv)
146{
147	if (GENET_IS_V1(priv))
148		return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1);
149	else
150		return bcmgenet_readl(priv->base +
151				      priv->hw_params->tbuf_offset + TBUF_CTRL);
152}
153
154static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
155{
156	if (GENET_IS_V1(priv))
157		bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1);
158	else
159		bcmgenet_writel(val, priv->base +
160				priv->hw_params->tbuf_offset + TBUF_CTRL);
161}
162
163static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv)
164{
165	if (GENET_IS_V1(priv))
166		return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1);
167	else
168		return bcmgenet_readl(priv->base +
169				      priv->hw_params->tbuf_offset + TBUF_BP_MC);
170}
171
172static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val)
173{
174	if (GENET_IS_V1(priv))
175		bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1);
176	else
177		bcmgenet_writel(val, priv->base +
178				priv->hw_params->tbuf_offset + TBUF_BP_MC);
179}
180
181/* RX/TX DMA register accessors */
182enum dma_reg {
183	DMA_RING_CFG = 0,
184	DMA_CTRL,
185	DMA_STATUS,
186	DMA_SCB_BURST_SIZE,
187	DMA_ARB_CTRL,
188	DMA_PRIORITY_0,
189	DMA_PRIORITY_1,
190	DMA_PRIORITY_2,
191	DMA_INDEX2RING_0,
192	DMA_INDEX2RING_1,
193	DMA_INDEX2RING_2,
194	DMA_INDEX2RING_3,
195	DMA_INDEX2RING_4,
196	DMA_INDEX2RING_5,
197	DMA_INDEX2RING_6,
198	DMA_INDEX2RING_7,
199	DMA_RING0_TIMEOUT,
200	DMA_RING1_TIMEOUT,
201	DMA_RING2_TIMEOUT,
202	DMA_RING3_TIMEOUT,
203	DMA_RING4_TIMEOUT,
204	DMA_RING5_TIMEOUT,
205	DMA_RING6_TIMEOUT,
206	DMA_RING7_TIMEOUT,
207	DMA_RING8_TIMEOUT,
208	DMA_RING9_TIMEOUT,
209	DMA_RING10_TIMEOUT,
210	DMA_RING11_TIMEOUT,
211	DMA_RING12_TIMEOUT,
212	DMA_RING13_TIMEOUT,
213	DMA_RING14_TIMEOUT,
214	DMA_RING15_TIMEOUT,
215	DMA_RING16_TIMEOUT,
216};
217
218static const u8 bcmgenet_dma_regs_v3plus[] = {
219	[DMA_RING_CFG]		= 0x00,
220	[DMA_CTRL]		= 0x04,
221	[DMA_STATUS]		= 0x08,
222	[DMA_SCB_BURST_SIZE]	= 0x0C,
223	[DMA_ARB_CTRL]		= 0x2C,
224	[DMA_PRIORITY_0]	= 0x30,
225	[DMA_PRIORITY_1]	= 0x34,
226	[DMA_PRIORITY_2]	= 0x38,
227	[DMA_RING0_TIMEOUT]	= 0x2C,
228	[DMA_RING1_TIMEOUT]	= 0x30,
229	[DMA_RING2_TIMEOUT]	= 0x34,
230	[DMA_RING3_TIMEOUT]	= 0x38,
231	[DMA_RING4_TIMEOUT]	= 0x3c,
232	[DMA_RING5_TIMEOUT]	= 0x40,
233	[DMA_RING6_TIMEOUT]	= 0x44,
234	[DMA_RING7_TIMEOUT]	= 0x48,
235	[DMA_RING8_TIMEOUT]	= 0x4c,
236	[DMA_RING9_TIMEOUT]	= 0x50,
237	[DMA_RING10_TIMEOUT]	= 0x54,
238	[DMA_RING11_TIMEOUT]	= 0x58,
239	[DMA_RING12_TIMEOUT]	= 0x5c,
240	[DMA_RING13_TIMEOUT]	= 0x60,
241	[DMA_RING14_TIMEOUT]	= 0x64,
242	[DMA_RING15_TIMEOUT]	= 0x68,
243	[DMA_RING16_TIMEOUT]	= 0x6C,
244	[DMA_INDEX2RING_0]	= 0x70,
245	[DMA_INDEX2RING_1]	= 0x74,
246	[DMA_INDEX2RING_2]	= 0x78,
247	[DMA_INDEX2RING_3]	= 0x7C,
248	[DMA_INDEX2RING_4]	= 0x80,
249	[DMA_INDEX2RING_5]	= 0x84,
250	[DMA_INDEX2RING_6]	= 0x88,
251	[DMA_INDEX2RING_7]	= 0x8C,
252};
253
254static const u8 bcmgenet_dma_regs_v2[] = {
255	[DMA_RING_CFG]		= 0x00,
256	[DMA_CTRL]		= 0x04,
257	[DMA_STATUS]		= 0x08,
258	[DMA_SCB_BURST_SIZE]	= 0x0C,
259	[DMA_ARB_CTRL]		= 0x30,
260	[DMA_PRIORITY_0]	= 0x34,
261	[DMA_PRIORITY_1]	= 0x38,
262	[DMA_PRIORITY_2]	= 0x3C,
263	[DMA_RING0_TIMEOUT]	= 0x2C,
264	[DMA_RING1_TIMEOUT]	= 0x30,
265	[DMA_RING2_TIMEOUT]	= 0x34,
266	[DMA_RING3_TIMEOUT]	= 0x38,
267	[DMA_RING4_TIMEOUT]	= 0x3c,
268	[DMA_RING5_TIMEOUT]	= 0x40,
269	[DMA_RING6_TIMEOUT]	= 0x44,
270	[DMA_RING7_TIMEOUT]	= 0x48,
271	[DMA_RING8_TIMEOUT]	= 0x4c,
272	[DMA_RING9_TIMEOUT]	= 0x50,
273	[DMA_RING10_TIMEOUT]	= 0x54,
274	[DMA_RING11_TIMEOUT]	= 0x58,
275	[DMA_RING12_TIMEOUT]	= 0x5c,
276	[DMA_RING13_TIMEOUT]	= 0x60,
277	[DMA_RING14_TIMEOUT]	= 0x64,
278	[DMA_RING15_TIMEOUT]	= 0x68,
279	[DMA_RING16_TIMEOUT]	= 0x6C,
280};
281
282static const u8 bcmgenet_dma_regs_v1[] = {
283	[DMA_CTRL]		= 0x00,
284	[DMA_STATUS]		= 0x04,
285	[DMA_SCB_BURST_SIZE]	= 0x0C,
286	[DMA_ARB_CTRL]		= 0x30,
287	[DMA_PRIORITY_0]	= 0x34,
288	[DMA_PRIORITY_1]	= 0x38,
289	[DMA_PRIORITY_2]	= 0x3C,
290	[DMA_RING0_TIMEOUT]	= 0x2C,
291	[DMA_RING1_TIMEOUT]	= 0x30,
292	[DMA_RING2_TIMEOUT]	= 0x34,
293	[DMA_RING3_TIMEOUT]	= 0x38,
294	[DMA_RING4_TIMEOUT]	= 0x3c,
295	[DMA_RING5_TIMEOUT]	= 0x40,
296	[DMA_RING6_TIMEOUT]	= 0x44,
297	[DMA_RING7_TIMEOUT]	= 0x48,
298	[DMA_RING8_TIMEOUT]	= 0x4c,
299	[DMA_RING9_TIMEOUT]	= 0x50,
300	[DMA_RING10_TIMEOUT]	= 0x54,
301	[DMA_RING11_TIMEOUT]	= 0x58,
302	[DMA_RING12_TIMEOUT]	= 0x5c,
303	[DMA_RING13_TIMEOUT]	= 0x60,
304	[DMA_RING14_TIMEOUT]	= 0x64,
305	[DMA_RING15_TIMEOUT]	= 0x68,
306	[DMA_RING16_TIMEOUT]	= 0x6C,
307};
308
309/* Set at runtime once bcmgenet version is known */
310static const u8 *bcmgenet_dma_regs;
311
312static inline struct bcmgenet_priv *dev_to_priv(struct device *dev)
313{
314	return netdev_priv(dev_get_drvdata(dev));
315}
316
317static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv,
318				      enum dma_reg r)
319{
320	return bcmgenet_readl(priv->base + GENET_TDMA_REG_OFF +
321			      DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
322}
323
324static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv,
325					u32 val, enum dma_reg r)
326{
327	bcmgenet_writel(val, priv->base + GENET_TDMA_REG_OFF +
328			DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
329}
330
331static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv,
332				      enum dma_reg r)
333{
334	return bcmgenet_readl(priv->base + GENET_RDMA_REG_OFF +
335			      DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
336}
337
338static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv,
339					u32 val, enum dma_reg r)
340{
341	bcmgenet_writel(val, priv->base + GENET_RDMA_REG_OFF +
342			DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
343}
344
345/* RDMA/TDMA ring registers and accessors
346 * we merge the common fields and just prefix with T/D the registers
347 * having different meaning depending on the direction
348 */
349enum dma_ring_reg {
350	TDMA_READ_PTR = 0,
351	RDMA_WRITE_PTR = TDMA_READ_PTR,
352	TDMA_READ_PTR_HI,
353	RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI,
354	TDMA_CONS_INDEX,
355	RDMA_PROD_INDEX = TDMA_CONS_INDEX,
356	TDMA_PROD_INDEX,
357	RDMA_CONS_INDEX = TDMA_PROD_INDEX,
358	DMA_RING_BUF_SIZE,
359	DMA_START_ADDR,
360	DMA_START_ADDR_HI,
361	DMA_END_ADDR,
362	DMA_END_ADDR_HI,
363	DMA_MBUF_DONE_THRESH,
364	TDMA_FLOW_PERIOD,
365	RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD,
366	TDMA_WRITE_PTR,
367	RDMA_READ_PTR = TDMA_WRITE_PTR,
368	TDMA_WRITE_PTR_HI,
369	RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI
370};
371
372/* GENET v4 supports 40-bits pointer addressing
373 * for obvious reasons the LO and HI word parts
374 * are contiguous, but this offsets the other
375 * registers.
376 */
377static const u8 genet_dma_ring_regs_v4[] = {
378	[TDMA_READ_PTR]			= 0x00,
379	[TDMA_READ_PTR_HI]		= 0x04,
380	[TDMA_CONS_INDEX]		= 0x08,
381	[TDMA_PROD_INDEX]		= 0x0C,
382	[DMA_RING_BUF_SIZE]		= 0x10,
383	[DMA_START_ADDR]		= 0x14,
384	[DMA_START_ADDR_HI]		= 0x18,
385	[DMA_END_ADDR]			= 0x1C,
386	[DMA_END_ADDR_HI]		= 0x20,
387	[DMA_MBUF_DONE_THRESH]		= 0x24,
388	[TDMA_FLOW_PERIOD]		= 0x28,
389	[TDMA_WRITE_PTR]		= 0x2C,
390	[TDMA_WRITE_PTR_HI]		= 0x30,
391};
392
393static const u8 genet_dma_ring_regs_v123[] = {
394	[TDMA_READ_PTR]			= 0x00,
395	[TDMA_CONS_INDEX]		= 0x04,
396	[TDMA_PROD_INDEX]		= 0x08,
397	[DMA_RING_BUF_SIZE]		= 0x0C,
398	[DMA_START_ADDR]		= 0x10,
399	[DMA_END_ADDR]			= 0x14,
400	[DMA_MBUF_DONE_THRESH]		= 0x18,
401	[TDMA_FLOW_PERIOD]		= 0x1C,
402	[TDMA_WRITE_PTR]		= 0x20,
403};
404
405/* Set at runtime once GENET version is known */
406static const u8 *genet_dma_ring_regs;
407
408static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv,
409					   unsigned int ring,
410					   enum dma_ring_reg r)
411{
412	return bcmgenet_readl(priv->base + GENET_TDMA_REG_OFF +
413			      (DMA_RING_SIZE * ring) +
414			      genet_dma_ring_regs[r]);
415}
416
417static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv,
418					     unsigned int ring, u32 val,
419					     enum dma_ring_reg r)
420{
421	bcmgenet_writel(val, priv->base + GENET_TDMA_REG_OFF +
422			(DMA_RING_SIZE * ring) +
423			genet_dma_ring_regs[r]);
424}
425
426static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv,
427					   unsigned int ring,
428					   enum dma_ring_reg r)
429{
430	return bcmgenet_readl(priv->base + GENET_RDMA_REG_OFF +
431			      (DMA_RING_SIZE * ring) +
432			      genet_dma_ring_regs[r]);
433}
434
435static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
436					     unsigned int ring, u32 val,
437					     enum dma_ring_reg r)
438{
439	bcmgenet_writel(val, priv->base + GENET_RDMA_REG_OFF +
440			(DMA_RING_SIZE * ring) +
441			genet_dma_ring_regs[r]);
442}
443
444static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index)
445{
446	u32 offset;
447	u32 reg;
448
449	offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
450	reg = bcmgenet_hfb_reg_readl(priv, offset);
451	reg |= (1 << (f_index % 32));
452	bcmgenet_hfb_reg_writel(priv, reg, offset);
453	reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
454	reg |= RBUF_HFB_EN;
455	bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
456}
457
458static void bcmgenet_hfb_disable_filter(struct bcmgenet_priv *priv, u32 f_index)
459{
460	u32 offset, reg, reg1;
461
462	offset = HFB_FLT_ENABLE_V3PLUS;
463	reg = bcmgenet_hfb_reg_readl(priv, offset);
464	reg1 = bcmgenet_hfb_reg_readl(priv, offset + sizeof(u32));
465	if  (f_index < 32) {
466		reg1 &= ~(1 << (f_index % 32));
467		bcmgenet_hfb_reg_writel(priv, reg1, offset + sizeof(u32));
468	} else {
469		reg &= ~(1 << (f_index % 32));
470		bcmgenet_hfb_reg_writel(priv, reg, offset);
471	}
472	if (!reg && !reg1) {
473		reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
474		reg &= ~RBUF_HFB_EN;
475		bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
476	}
477}
478
479static void bcmgenet_hfb_set_filter_rx_queue_mapping(struct bcmgenet_priv *priv,
480						     u32 f_index, u32 rx_queue)
481{
482	u32 offset;
483	u32 reg;
484
485	offset = f_index / 8;
486	reg = bcmgenet_rdma_readl(priv, DMA_INDEX2RING_0 + offset);
487	reg &= ~(0xF << (4 * (f_index % 8)));
488	reg |= ((rx_queue & 0xF) << (4 * (f_index % 8)));
489	bcmgenet_rdma_writel(priv, reg, DMA_INDEX2RING_0 + offset);
490}
491
492static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv,
493					   u32 f_index, u32 f_length)
494{
495	u32 offset;
496	u32 reg;
497
498	offset = HFB_FLT_LEN_V3PLUS +
499		 ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4) *
500		 sizeof(u32);
501	reg = bcmgenet_hfb_reg_readl(priv, offset);
502	reg &= ~(0xFF << (8 * (f_index % 4)));
503	reg |= ((f_length & 0xFF) << (8 * (f_index % 4)));
504	bcmgenet_hfb_reg_writel(priv, reg, offset);
505}
506
507static int bcmgenet_hfb_validate_mask(void *mask, size_t size)
508{
509	while (size) {
510		switch (*(unsigned char *)mask++) {
511		case 0x00:
512		case 0x0f:
513		case 0xf0:
514		case 0xff:
515			size--;
516			continue;
517		default:
518			return -EINVAL;
519		}
520	}
521
522	return 0;
523}
524
525#define VALIDATE_MASK(x) \
526	bcmgenet_hfb_validate_mask(&(x), sizeof(x))
527
528static int bcmgenet_hfb_insert_data(struct bcmgenet_priv *priv, u32 f_index,
529				    u32 offset, void *val, void *mask,
530				    size_t size)
531{
532	u32 index, tmp;
533
534	index = f_index * priv->hw_params->hfb_filter_size + offset / 2;
535	tmp = bcmgenet_hfb_readl(priv, index * sizeof(u32));
536
537	while (size--) {
538		if (offset++ & 1) {
539			tmp &= ~0x300FF;
540			tmp |= (*(unsigned char *)val++);
541			switch ((*(unsigned char *)mask++)) {
542			case 0xFF:
543				tmp |= 0x30000;
544				break;
545			case 0xF0:
546				tmp |= 0x20000;
547				break;
548			case 0x0F:
549				tmp |= 0x10000;
550				break;
551			}
552			bcmgenet_hfb_writel(priv, tmp, index++ * sizeof(u32));
553			if (size)
554				tmp = bcmgenet_hfb_readl(priv,
555							 index * sizeof(u32));
556		} else {
557			tmp &= ~0xCFF00;
558			tmp |= (*(unsigned char *)val++) << 8;
559			switch ((*(unsigned char *)mask++)) {
560			case 0xFF:
561				tmp |= 0xC0000;
562				break;
563			case 0xF0:
564				tmp |= 0x80000;
565				break;
566			case 0x0F:
567				tmp |= 0x40000;
568				break;
569			}
570			if (!size)
571				bcmgenet_hfb_writel(priv, tmp, index * sizeof(u32));
572		}
573	}
574
575	return 0;
576}
577
578static void bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
579					     struct bcmgenet_rxnfc_rule *rule)
580{
581	struct ethtool_rx_flow_spec *fs = &rule->fs;
582	u32 offset = 0, f_length = 0, f;
583	u8 val_8, mask_8;
584	__be16 val_16;
585	u16 mask_16;
586	size_t size;
587
588	f = fs->location;
589	if (fs->flow_type & FLOW_MAC_EXT) {
590		bcmgenet_hfb_insert_data(priv, f, 0,
591					 &fs->h_ext.h_dest, &fs->m_ext.h_dest,
592					 sizeof(fs->h_ext.h_dest));
593	}
594
595	if (fs->flow_type & FLOW_EXT) {
596		if (fs->m_ext.vlan_etype ||
597		    fs->m_ext.vlan_tci) {
598			bcmgenet_hfb_insert_data(priv, f, 12,
599						 &fs->h_ext.vlan_etype,
600						 &fs->m_ext.vlan_etype,
601						 sizeof(fs->h_ext.vlan_etype));
602			bcmgenet_hfb_insert_data(priv, f, 14,
603						 &fs->h_ext.vlan_tci,
604						 &fs->m_ext.vlan_tci,
605						 sizeof(fs->h_ext.vlan_tci));
606			offset += VLAN_HLEN;
607			f_length += DIV_ROUND_UP(VLAN_HLEN, 2);
608		}
609	}
610
611	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
612	case ETHER_FLOW:
613		f_length += DIV_ROUND_UP(ETH_HLEN, 2);
614		bcmgenet_hfb_insert_data(priv, f, 0,
615					 &fs->h_u.ether_spec.h_dest,
616					 &fs->m_u.ether_spec.h_dest,
617					 sizeof(fs->h_u.ether_spec.h_dest));
618		bcmgenet_hfb_insert_data(priv, f, ETH_ALEN,
619					 &fs->h_u.ether_spec.h_source,
620					 &fs->m_u.ether_spec.h_source,
621					 sizeof(fs->h_u.ether_spec.h_source));
622		bcmgenet_hfb_insert_data(priv, f, (2 * ETH_ALEN) + offset,
623					 &fs->h_u.ether_spec.h_proto,
624					 &fs->m_u.ether_spec.h_proto,
625					 sizeof(fs->h_u.ether_spec.h_proto));
626		break;
627	case IP_USER_FLOW:
628		f_length += DIV_ROUND_UP(ETH_HLEN + 20, 2);
629		/* Specify IP Ether Type */
630		val_16 = htons(ETH_P_IP);
631		mask_16 = 0xFFFF;
632		bcmgenet_hfb_insert_data(priv, f, (2 * ETH_ALEN) + offset,
633					 &val_16, &mask_16, sizeof(val_16));
634		bcmgenet_hfb_insert_data(priv, f, 15 + offset,
635					 &fs->h_u.usr_ip4_spec.tos,
636					 &fs->m_u.usr_ip4_spec.tos,
637					 sizeof(fs->h_u.usr_ip4_spec.tos));
638		bcmgenet_hfb_insert_data(priv, f, 23 + offset,
639					 &fs->h_u.usr_ip4_spec.proto,
640					 &fs->m_u.usr_ip4_spec.proto,
641					 sizeof(fs->h_u.usr_ip4_spec.proto));
642		bcmgenet_hfb_insert_data(priv, f, 26 + offset,
643					 &fs->h_u.usr_ip4_spec.ip4src,
644					 &fs->m_u.usr_ip4_spec.ip4src,
645					 sizeof(fs->h_u.usr_ip4_spec.ip4src));
646		bcmgenet_hfb_insert_data(priv, f, 30 + offset,
647					 &fs->h_u.usr_ip4_spec.ip4dst,
648					 &fs->m_u.usr_ip4_spec.ip4dst,
649					 sizeof(fs->h_u.usr_ip4_spec.ip4dst));
650		if (!fs->m_u.usr_ip4_spec.l4_4_bytes)
651			break;
652
653		/* Only supports 20 byte IPv4 header */
654		val_8 = 0x45;
655		mask_8 = 0xFF;
656		bcmgenet_hfb_insert_data(priv, f, ETH_HLEN + offset,
657					 &val_8, &mask_8,
658					 sizeof(val_8));
659		size = sizeof(fs->h_u.usr_ip4_spec.l4_4_bytes);
660		bcmgenet_hfb_insert_data(priv, f,
661					 ETH_HLEN + 20 + offset,
662					 &fs->h_u.usr_ip4_spec.l4_4_bytes,
663					 &fs->m_u.usr_ip4_spec.l4_4_bytes,
664					 size);
665		f_length += DIV_ROUND_UP(size, 2);
666		break;
667	}
668
669	bcmgenet_hfb_set_filter_length(priv, f, 2 * f_length);
670	if (!fs->ring_cookie || fs->ring_cookie == RX_CLS_FLOW_WAKE) {
671		/* Ring 0 flows can be handled by the default Descriptor Ring
672		 * We'll map them to ring 0, but don't enable the filter
673		 */
674		bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, 0);
675		rule->state = BCMGENET_RXNFC_STATE_DISABLED;
676	} else {
677		/* Other Rx rings are direct mapped here */
678		bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f,
679							 fs->ring_cookie);
680		bcmgenet_hfb_enable_filter(priv, f);
681		rule->state = BCMGENET_RXNFC_STATE_ENABLED;
682	}
683}
684
685/* bcmgenet_hfb_clear
686 *
687 * Clear Hardware Filter Block and disable all filtering.
688 */
689static void bcmgenet_hfb_clear_filter(struct bcmgenet_priv *priv, u32 f_index)
690{
691	u32 base, i;
692
693	base = f_index * priv->hw_params->hfb_filter_size;
694	for (i = 0; i < priv->hw_params->hfb_filter_size; i++)
695		bcmgenet_hfb_writel(priv, 0x0, (base + i) * sizeof(u32));
696}
697
698static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv)
699{
700	u32 i;
701
702	if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
703		return;
704
705	bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL);
706	bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS);
707	bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4);
708
709	for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++)
710		bcmgenet_rdma_writel(priv, 0x0, i);
711
712	for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++)
713		bcmgenet_hfb_reg_writel(priv, 0x0,
714					HFB_FLT_LEN_V3PLUS + i * sizeof(u32));
715
716	for (i = 0; i < priv->hw_params->hfb_filter_cnt; i++)
717		bcmgenet_hfb_clear_filter(priv, i);
718}
719
720static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)
721{
722	int i;
723
724	INIT_LIST_HEAD(&priv->rxnfc_list);
725	if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
726		return;
727
728	for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {
729		INIT_LIST_HEAD(&priv->rxnfc_rules[i].list);
730		priv->rxnfc_rules[i].state = BCMGENET_RXNFC_STATE_UNUSED;
731	}
732
733	bcmgenet_hfb_clear(priv);
734}
735
736static int bcmgenet_begin(struct net_device *dev)
737{
738	struct bcmgenet_priv *priv = netdev_priv(dev);
739
740	/* Turn on the clock */
741	return clk_prepare_enable(priv->clk);
742}
743
744static void bcmgenet_complete(struct net_device *dev)
745{
746	struct bcmgenet_priv *priv = netdev_priv(dev);
747
748	/* Turn off the clock */
749	clk_disable_unprepare(priv->clk);
750}
751
752static int bcmgenet_get_link_ksettings(struct net_device *dev,
753				       struct ethtool_link_ksettings *cmd)
754{
755	if (!netif_running(dev))
756		return -EINVAL;
757
758	if (!dev->phydev)
759		return -ENODEV;
760
761	phy_ethtool_ksettings_get(dev->phydev, cmd);
762
763	return 0;
764}
765
766static int bcmgenet_set_link_ksettings(struct net_device *dev,
767				       const struct ethtool_link_ksettings *cmd)
768{
769	if (!netif_running(dev))
770		return -EINVAL;
771
772	if (!dev->phydev)
773		return -ENODEV;
774
775	return phy_ethtool_ksettings_set(dev->phydev, cmd);
776}
777
778static int bcmgenet_set_features(struct net_device *dev,
779				 netdev_features_t features)
780{
781	struct bcmgenet_priv *priv = netdev_priv(dev);
782	u32 reg;
783	int ret;
784
785	ret = clk_prepare_enable(priv->clk);
786	if (ret)
787		return ret;
788
789	/* Make sure we reflect the value of CRC_CMD_FWD */
790	reg = bcmgenet_umac_readl(priv, UMAC_CMD);
791	priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
792
793	clk_disable_unprepare(priv->clk);
794
795	return ret;
796}
797
798static u32 bcmgenet_get_msglevel(struct net_device *dev)
799{
800	struct bcmgenet_priv *priv = netdev_priv(dev);
801
802	return priv->msg_enable;
803}
804
805static void bcmgenet_set_msglevel(struct net_device *dev, u32 level)
806{
807	struct bcmgenet_priv *priv = netdev_priv(dev);
808
809	priv->msg_enable = level;
810}
811
812static int bcmgenet_get_coalesce(struct net_device *dev,
813				 struct ethtool_coalesce *ec,
814				 struct kernel_ethtool_coalesce *kernel_coal,
815				 struct netlink_ext_ack *extack)
816{
817	struct bcmgenet_priv *priv = netdev_priv(dev);
818	struct bcmgenet_rx_ring *ring;
819	unsigned int i;
820
821	ec->tx_max_coalesced_frames =
822		bcmgenet_tdma_ring_readl(priv, DESC_INDEX,
823					 DMA_MBUF_DONE_THRESH);
824	ec->rx_max_coalesced_frames =
825		bcmgenet_rdma_ring_readl(priv, DESC_INDEX,
826					 DMA_MBUF_DONE_THRESH);
827	ec->rx_coalesce_usecs =
828		bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT) * 8192 / 1000;
829
830	for (i = 0; i < priv->hw_params->rx_queues; i++) {
831		ring = &priv->rx_rings[i];
832		ec->use_adaptive_rx_coalesce |= ring->dim.use_dim;
833	}
834	ring = &priv->rx_rings[DESC_INDEX];
835	ec->use_adaptive_rx_coalesce |= ring->dim.use_dim;
836
837	return 0;
838}
839
840static void bcmgenet_set_rx_coalesce(struct bcmgenet_rx_ring *ring,
841				     u32 usecs, u32 pkts)
842{
843	struct bcmgenet_priv *priv = ring->priv;
844	unsigned int i = ring->index;
845	u32 reg;
846
847	bcmgenet_rdma_ring_writel(priv, i, pkts, DMA_MBUF_DONE_THRESH);
848
849	reg = bcmgenet_rdma_readl(priv, DMA_RING0_TIMEOUT + i);
850	reg &= ~DMA_TIMEOUT_MASK;
851	reg |= DIV_ROUND_UP(usecs * 1000, 8192);
852	bcmgenet_rdma_writel(priv, reg, DMA_RING0_TIMEOUT + i);
853}
854
855static void bcmgenet_set_ring_rx_coalesce(struct bcmgenet_rx_ring *ring,
856					  struct ethtool_coalesce *ec)
857{
858	struct dim_cq_moder moder;
859	u32 usecs, pkts;
860
861	ring->rx_coalesce_usecs = ec->rx_coalesce_usecs;
862	ring->rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
863	usecs = ring->rx_coalesce_usecs;
864	pkts = ring->rx_max_coalesced_frames;
865
866	if (ec->use_adaptive_rx_coalesce && !ring->dim.use_dim) {
867		moder = net_dim_get_def_rx_moderation(ring->dim.dim.mode);
868		usecs = moder.usec;
869		pkts = moder.pkts;
870	}
871
872	ring->dim.use_dim = ec->use_adaptive_rx_coalesce;
873	bcmgenet_set_rx_coalesce(ring, usecs, pkts);
874}
875
876static int bcmgenet_set_coalesce(struct net_device *dev,
877				 struct ethtool_coalesce *ec,
878				 struct kernel_ethtool_coalesce *kernel_coal,
879				 struct netlink_ext_ack *extack)
880{
881	struct bcmgenet_priv *priv = netdev_priv(dev);
882	unsigned int i;
883
884	/* Base system clock is 125Mhz, DMA timeout is this reference clock
885	 * divided by 1024, which yields roughly 8.192us, our maximum value
886	 * has to fit in the DMA_TIMEOUT_MASK (16 bits)
887	 */
888	if (ec->tx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK ||
889	    ec->tx_max_coalesced_frames == 0 ||
890	    ec->rx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK ||
891	    ec->rx_coalesce_usecs > (DMA_TIMEOUT_MASK * 8) + 1)
892		return -EINVAL;
893
894	if (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0)
895		return -EINVAL;
896
897	/* GENET TDMA hardware does not support a configurable timeout, but will
898	 * always generate an interrupt either after MBDONE packets have been
899	 * transmitted, or when the ring is empty.
900	 */
901
902	/* Program all TX queues with the same values, as there is no
903	 * ethtool knob to do coalescing on a per-queue basis
904	 */
905	for (i = 0; i < priv->hw_params->tx_queues; i++)
906		bcmgenet_tdma_ring_writel(priv, i,
907					  ec->tx_max_coalesced_frames,
908					  DMA_MBUF_DONE_THRESH);
909	bcmgenet_tdma_ring_writel(priv, DESC_INDEX,
910				  ec->tx_max_coalesced_frames,
911				  DMA_MBUF_DONE_THRESH);
912
913	for (i = 0; i < priv->hw_params->rx_queues; i++)
914		bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[i], ec);
915	bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[DESC_INDEX], ec);
916
917	return 0;
918}
919
920static void bcmgenet_get_pauseparam(struct net_device *dev,
921				    struct ethtool_pauseparam *epause)
922{
923	struct bcmgenet_priv *priv;
924	u32 umac_cmd;
925
926	priv = netdev_priv(dev);
927
928	epause->autoneg = priv->autoneg_pause;
929
930	if (netif_carrier_ok(dev)) {
931		/* report active state when link is up */
932		umac_cmd = bcmgenet_umac_readl(priv, UMAC_CMD);
933		epause->tx_pause = !(umac_cmd & CMD_TX_PAUSE_IGNORE);
934		epause->rx_pause = !(umac_cmd & CMD_RX_PAUSE_IGNORE);
935	} else {
936		/* otherwise report stored settings */
937		epause->tx_pause = priv->tx_pause;
938		epause->rx_pause = priv->rx_pause;
939	}
940}
941
942static int bcmgenet_set_pauseparam(struct net_device *dev,
943				   struct ethtool_pauseparam *epause)
944{
945	struct bcmgenet_priv *priv = netdev_priv(dev);
946
947	if (!dev->phydev)
948		return -ENODEV;
949
950	if (!phy_validate_pause(dev->phydev, epause))
951		return -EINVAL;
952
953	priv->autoneg_pause = !!epause->autoneg;
954	priv->tx_pause = !!epause->tx_pause;
955	priv->rx_pause = !!epause->rx_pause;
956
957	bcmgenet_phy_pause_set(dev, priv->rx_pause, priv->tx_pause);
958
959	return 0;
960}
961
962/* standard ethtool support functions. */
963enum bcmgenet_stat_type {
964	BCMGENET_STAT_NETDEV = -1,
965	BCMGENET_STAT_MIB_RX,
966	BCMGENET_STAT_MIB_TX,
967	BCMGENET_STAT_RUNT,
968	BCMGENET_STAT_MISC,
969	BCMGENET_STAT_SOFT,
970};
971
972struct bcmgenet_stats {
973	char stat_string[ETH_GSTRING_LEN];
974	int stat_sizeof;
975	int stat_offset;
976	enum bcmgenet_stat_type type;
977	/* reg offset from UMAC base for misc counters */
978	u16 reg_offset;
979};
980
981#define STAT_NETDEV(m) { \
982	.stat_string = __stringify(m), \
983	.stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
984	.stat_offset = offsetof(struct net_device_stats, m), \
985	.type = BCMGENET_STAT_NETDEV, \
986}
987
988#define STAT_GENET_MIB(str, m, _type) { \
989	.stat_string = str, \
990	.stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
991	.stat_offset = offsetof(struct bcmgenet_priv, m), \
992	.type = _type, \
993}
994
995#define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
996#define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
997#define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
998#define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT)
999
1000#define STAT_GENET_MISC(str, m, offset) { \
1001	.stat_string = str, \
1002	.stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
1003	.stat_offset = offsetof(struct bcmgenet_priv, m), \
1004	.type = BCMGENET_STAT_MISC, \
1005	.reg_offset = offset, \
1006}
1007
1008#define STAT_GENET_Q(num) \
1009	STAT_GENET_SOFT_MIB("txq" __stringify(num) "_packets", \
1010			tx_rings[num].packets), \
1011	STAT_GENET_SOFT_MIB("txq" __stringify(num) "_bytes", \
1012			tx_rings[num].bytes), \
1013	STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_bytes", \
1014			rx_rings[num].bytes),	 \
1015	STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_packets", \
1016			rx_rings[num].packets), \
1017	STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_errors", \
1018			rx_rings[num].errors), \
1019	STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_dropped", \
1020			rx_rings[num].dropped)
1021
1022/* There is a 0xC gap between the end of RX and beginning of TX stats and then
1023 * between the end of TX stats and the beginning of the RX RUNT
1024 */
1025#define BCMGENET_STAT_OFFSET	0xc
1026
1027/* Hardware counters must be kept in sync because the order/offset
1028 * is important here (order in structure declaration = order in hardware)
1029 */
1030static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
1031	/* general stats */
1032	STAT_NETDEV(rx_packets),
1033	STAT_NETDEV(tx_packets),
1034	STAT_NETDEV(rx_bytes),
1035	STAT_NETDEV(tx_bytes),
1036	STAT_NETDEV(rx_errors),
1037	STAT_NETDEV(tx_errors),
1038	STAT_NETDEV(rx_dropped),
1039	STAT_NETDEV(tx_dropped),
1040	STAT_NETDEV(multicast),
1041	/* UniMAC RSV counters */
1042	STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
1043	STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
1044	STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
1045	STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
1046	STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
1047	STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
1048	STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
1049	STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
1050	STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
1051	STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
1052	STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt),
1053	STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes),
1054	STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca),
1055	STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca),
1056	STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs),
1057	STAT_GENET_MIB_RX("rx_control", mib.rx.cf),
1058	STAT_GENET_MIB_RX("rx_pause", mib.rx.pf),
1059	STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo),
1060	STAT_GENET_MIB_RX("rx_align", mib.rx.aln),
1061	STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr),
1062	STAT_GENET_MIB_RX("rx_code", mib.rx.cde),
1063	STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr),
1064	STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr),
1065	STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr),
1066	STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue),
1067	STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok),
1068	STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc),
1069	STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp),
1070	STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc),
1071	/* UniMAC TSV counters */
1072	STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
1073	STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
1074	STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
1075	STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
1076	STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
1077	STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
1078	STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
1079	STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
1080	STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
1081	STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
1082	STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts),
1083	STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca),
1084	STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca),
1085	STAT_GENET_MIB_TX("tx_pause", mib.tx.pf),
1086	STAT_GENET_MIB_TX("tx_control", mib.tx.cf),
1087	STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs),
1088	STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr),
1089	STAT_GENET_MIB_TX("tx_defer", mib.tx.drf),
1090	STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf),
1091	STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl),
1092	STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl),
1093	STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl),
1094	STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl),
1095	STAT_GENET_MIB_TX("tx_frags", mib.tx.frg),
1096	STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl),
1097	STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr),
1098	STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes),
1099	STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok),
1100	STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc),
1101	/* UniMAC RUNT counters */
1102	STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
1103	STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
1104	STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
1105	STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
1106	/* Misc UniMAC counters */
1107	STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt,
1108			UMAC_RBUF_OVFL_CNT_V1),
1109	STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt,
1110			UMAC_RBUF_ERR_CNT_V1),
1111	STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
1112	STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
1113	STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
1114	STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed),
1115	STAT_GENET_SOFT_MIB("tx_realloc_tsb", mib.tx_realloc_tsb),
1116	STAT_GENET_SOFT_MIB("tx_realloc_tsb_failed",
1117			    mib.tx_realloc_tsb_failed),
1118	/* Per TX queues */
1119	STAT_GENET_Q(0),
1120	STAT_GENET_Q(1),
1121	STAT_GENET_Q(2),
1122	STAT_GENET_Q(3),
1123	STAT_GENET_Q(16),
1124};
1125
1126#define BCMGENET_STATS_LEN	ARRAY_SIZE(bcmgenet_gstrings_stats)
1127
1128static void bcmgenet_get_drvinfo(struct net_device *dev,
1129				 struct ethtool_drvinfo *info)
1130{
1131	strscpy(info->driver, "bcmgenet", sizeof(info->driver));
1132}
1133
1134static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
1135{
1136	switch (string_set) {
1137	case ETH_SS_STATS:
1138		return BCMGENET_STATS_LEN;
1139	default:
1140		return -EOPNOTSUPP;
1141	}
1142}
1143
1144static void bcmgenet_get_strings(struct net_device *dev, u32 stringset,
1145				 u8 *data)
1146{
1147	int i;
1148
1149	switch (stringset) {
1150	case ETH_SS_STATS:
1151		for (i = 0; i < BCMGENET_STATS_LEN; i++) {
1152			memcpy(data + i * ETH_GSTRING_LEN,
1153			       bcmgenet_gstrings_stats[i].stat_string,
1154			       ETH_GSTRING_LEN);
1155		}
1156		break;
1157	}
1158}
1159
1160static u32 bcmgenet_update_stat_misc(struct bcmgenet_priv *priv, u16 offset)
1161{
1162	u16 new_offset;
1163	u32 val;
1164
1165	switch (offset) {
1166	case UMAC_RBUF_OVFL_CNT_V1:
1167		if (GENET_IS_V2(priv))
1168			new_offset = RBUF_OVFL_CNT_V2;
1169		else
1170			new_offset = RBUF_OVFL_CNT_V3PLUS;
1171
1172		val = bcmgenet_rbuf_readl(priv,	new_offset);
1173		/* clear if overflowed */
1174		if (val == ~0)
1175			bcmgenet_rbuf_writel(priv, 0, new_offset);
1176		break;
1177	case UMAC_RBUF_ERR_CNT_V1:
1178		if (GENET_IS_V2(priv))
1179			new_offset = RBUF_ERR_CNT_V2;
1180		else
1181			new_offset = RBUF_ERR_CNT_V3PLUS;
1182
1183		val = bcmgenet_rbuf_readl(priv,	new_offset);
1184		/* clear if overflowed */
1185		if (val == ~0)
1186			bcmgenet_rbuf_writel(priv, 0, new_offset);
1187		break;
1188	default:
1189		val = bcmgenet_umac_readl(priv, offset);
1190		/* clear if overflowed */
1191		if (val == ~0)
1192			bcmgenet_umac_writel(priv, 0, offset);
1193		break;
1194	}
1195
1196	return val;
1197}
1198
1199static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
1200{
1201	int i, j = 0;
1202
1203	for (i = 0; i < BCMGENET_STATS_LEN; i++) {
1204		const struct bcmgenet_stats *s;
1205		u8 offset = 0;
1206		u32 val = 0;
1207		char *p;
1208
1209		s = &bcmgenet_gstrings_stats[i];
1210		switch (s->type) {
1211		case BCMGENET_STAT_NETDEV:
1212		case BCMGENET_STAT_SOFT:
1213			continue;
1214		case BCMGENET_STAT_RUNT:
1215			offset += BCMGENET_STAT_OFFSET;
1216			fallthrough;
1217		case BCMGENET_STAT_MIB_TX:
1218			offset += BCMGENET_STAT_OFFSET;
1219			fallthrough;
1220		case BCMGENET_STAT_MIB_RX:
1221			val = bcmgenet_umac_readl(priv,
1222						  UMAC_MIB_START + j + offset);
1223			offset = 0;	/* Reset Offset */
1224			break;
1225		case BCMGENET_STAT_MISC:
1226			if (GENET_IS_V1(priv)) {
1227				val = bcmgenet_umac_readl(priv, s->reg_offset);
1228				/* clear if overflowed */
1229				if (val == ~0)
1230					bcmgenet_umac_writel(priv, 0,
1231							     s->reg_offset);
1232			} else {
1233				val = bcmgenet_update_stat_misc(priv,
1234								s->reg_offset);
1235			}
1236			break;
1237		}
1238
1239		j += s->stat_sizeof;
1240		p = (char *)priv + s->stat_offset;
1241		*(u32 *)p = val;
1242	}
1243}
1244
1245static void bcmgenet_get_ethtool_stats(struct net_device *dev,
1246				       struct ethtool_stats *stats,
1247				       u64 *data)
1248{
1249	struct bcmgenet_priv *priv = netdev_priv(dev);
1250	int i;
1251
1252	if (netif_running(dev))
1253		bcmgenet_update_mib_counters(priv);
1254
1255	dev->netdev_ops->ndo_get_stats(dev);
1256
1257	for (i = 0; i < BCMGENET_STATS_LEN; i++) {
1258		const struct bcmgenet_stats *s;
1259		char *p;
1260
1261		s = &bcmgenet_gstrings_stats[i];
1262		if (s->type == BCMGENET_STAT_NETDEV)
1263			p = (char *)&dev->stats;
1264		else
1265			p = (char *)priv;
1266		p += s->stat_offset;
1267		if (sizeof(unsigned long) != sizeof(u32) &&
1268		    s->stat_sizeof == sizeof(unsigned long))
1269			data[i] = *(unsigned long *)p;
1270		else
1271			data[i] = *(u32 *)p;
1272	}
1273}
1274
1275void bcmgenet_eee_enable_set(struct net_device *dev, bool enable,
1276			     bool tx_lpi_enabled)
1277{
1278	struct bcmgenet_priv *priv = netdev_priv(dev);
1279	u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL;
1280	u32 reg;
1281
1282	if (enable && !priv->clk_eee_enabled) {
1283		clk_prepare_enable(priv->clk_eee);
1284		priv->clk_eee_enabled = true;
1285	}
1286
1287	reg = bcmgenet_umac_readl(priv, UMAC_EEE_CTRL);
1288	if (enable)
1289		reg |= EEE_EN;
1290	else
1291		reg &= ~EEE_EN;
1292	bcmgenet_umac_writel(priv, reg, UMAC_EEE_CTRL);
1293
1294	/* Enable EEE and switch to a 27Mhz clock automatically */
1295	reg = bcmgenet_readl(priv->base + off);
1296	if (tx_lpi_enabled)
1297		reg |= TBUF_EEE_EN | TBUF_PM_EN;
1298	else
1299		reg &= ~(TBUF_EEE_EN | TBUF_PM_EN);
1300	bcmgenet_writel(reg, priv->base + off);
1301
1302	/* Do the same for thing for RBUF */
1303	reg = bcmgenet_rbuf_readl(priv, RBUF_ENERGY_CTRL);
1304	if (enable)
1305		reg |= RBUF_EEE_EN | RBUF_PM_EN;
1306	else
1307		reg &= ~(RBUF_EEE_EN | RBUF_PM_EN);
1308	bcmgenet_rbuf_writel(priv, reg, RBUF_ENERGY_CTRL);
1309
1310	if (!enable && priv->clk_eee_enabled) {
1311		clk_disable_unprepare(priv->clk_eee);
1312		priv->clk_eee_enabled = false;
1313	}
1314
1315	priv->eee.eee_enabled = enable;
1316	priv->eee.tx_lpi_enabled = tx_lpi_enabled;
1317}
1318
1319static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_keee *e)
1320{
1321	struct bcmgenet_priv *priv = netdev_priv(dev);
1322	struct ethtool_keee *p = &priv->eee;
1323
1324	if (GENET_IS_V1(priv))
1325		return -EOPNOTSUPP;
1326
1327	if (!dev->phydev)
1328		return -ENODEV;
1329
1330	e->tx_lpi_enabled = p->tx_lpi_enabled;
1331	e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER);
1332
1333	return phy_ethtool_get_eee(dev->phydev, e);
1334}
1335
1336static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_keee *e)
1337{
1338	struct bcmgenet_priv *priv = netdev_priv(dev);
1339	struct ethtool_keee *p = &priv->eee;
1340	bool active;
1341
1342	if (GENET_IS_V1(priv))
1343		return -EOPNOTSUPP;
1344
1345	if (!dev->phydev)
1346		return -ENODEV;
1347
1348	p->eee_enabled = e->eee_enabled;
1349
1350	if (!p->eee_enabled) {
1351		bcmgenet_eee_enable_set(dev, false, false);
1352	} else {
1353		active = phy_init_eee(dev->phydev, false) >= 0;
1354		bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER);
1355		bcmgenet_eee_enable_set(dev, active, e->tx_lpi_enabled);
1356	}
1357
1358	return phy_ethtool_set_eee(dev->phydev, e);
1359}
1360
1361static int bcmgenet_validate_flow(struct net_device *dev,
1362				  struct ethtool_rxnfc *cmd)
1363{
1364	struct ethtool_usrip4_spec *l4_mask;
1365	struct ethhdr *eth_mask;
1366
1367	if (cmd->fs.location >= MAX_NUM_OF_FS_RULES &&
1368	    cmd->fs.location != RX_CLS_LOC_ANY) {
1369		netdev_err(dev, "rxnfc: Invalid location (%d)\n",
1370			   cmd->fs.location);
1371		return -EINVAL;
1372	}
1373
1374	switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
1375	case IP_USER_FLOW:
1376		l4_mask = &cmd->fs.m_u.usr_ip4_spec;
1377		/* don't allow mask which isn't valid */
1378		if (VALIDATE_MASK(l4_mask->ip4src) ||
1379		    VALIDATE_MASK(l4_mask->ip4dst) ||
1380		    VALIDATE_MASK(l4_mask->l4_4_bytes) ||
1381		    VALIDATE_MASK(l4_mask->proto) ||
1382		    VALIDATE_MASK(l4_mask->ip_ver) ||
1383		    VALIDATE_MASK(l4_mask->tos)) {
1384			netdev_err(dev, "rxnfc: Unsupported mask\n");
1385			return -EINVAL;
1386		}
1387		break;
1388	case ETHER_FLOW:
1389		eth_mask = &cmd->fs.m_u.ether_spec;
1390		/* don't allow mask which isn't valid */
1391		if (VALIDATE_MASK(eth_mask->h_dest) ||
1392		    VALIDATE_MASK(eth_mask->h_source) ||
1393		    VALIDATE_MASK(eth_mask->h_proto)) {
1394			netdev_err(dev, "rxnfc: Unsupported mask\n");
1395			return -EINVAL;
1396		}
1397		break;
1398	default:
1399		netdev_err(dev, "rxnfc: Unsupported flow type (0x%x)\n",
1400			   cmd->fs.flow_type);
1401		return -EINVAL;
1402	}
1403
1404	if ((cmd->fs.flow_type & FLOW_EXT)) {
1405		/* don't allow mask which isn't valid */
1406		if (VALIDATE_MASK(cmd->fs.m_ext.vlan_etype) ||
1407		    VALIDATE_MASK(cmd->fs.m_ext.vlan_tci)) {
1408			netdev_err(dev, "rxnfc: Unsupported mask\n");
1409			return -EINVAL;
1410		}
1411		if (cmd->fs.m_ext.data[0] || cmd->fs.m_ext.data[1]) {
1412			netdev_err(dev, "rxnfc: user-def not supported\n");
1413			return -EINVAL;
1414		}
1415	}
1416
1417	if ((cmd->fs.flow_type & FLOW_MAC_EXT)) {
1418		/* don't allow mask which isn't valid */
1419		if (VALIDATE_MASK(cmd->fs.m_ext.h_dest)) {
1420			netdev_err(dev, "rxnfc: Unsupported mask\n");
1421			return -EINVAL;
1422		}
1423	}
1424
1425	return 0;
1426}
1427
1428static int bcmgenet_insert_flow(struct net_device *dev,
1429				struct ethtool_rxnfc *cmd)
1430{
1431	struct bcmgenet_priv *priv = netdev_priv(dev);
1432	struct bcmgenet_rxnfc_rule *loc_rule;
1433	int err, i;
1434
1435	if (priv->hw_params->hfb_filter_size < 128) {
1436		netdev_err(dev, "rxnfc: Not supported by this device\n");
1437		return -EINVAL;
1438	}
1439
1440	if (cmd->fs.ring_cookie > priv->hw_params->rx_queues &&
1441	    cmd->fs.ring_cookie != RX_CLS_FLOW_WAKE) {
1442		netdev_err(dev, "rxnfc: Unsupported action (%llu)\n",
1443			   cmd->fs.ring_cookie);
1444		return -EINVAL;
1445	}
1446
1447	err = bcmgenet_validate_flow(dev, cmd);
1448	if (err)
1449		return err;
1450
1451	if (cmd->fs.location == RX_CLS_LOC_ANY) {
1452		list_for_each_entry(loc_rule, &priv->rxnfc_list, list) {
1453			cmd->fs.location = loc_rule->fs.location;
1454			err = memcmp(&loc_rule->fs, &cmd->fs,
1455				     sizeof(struct ethtool_rx_flow_spec));
1456			if (!err)
1457				/* rule exists so return current location */
1458				return 0;
1459		}
1460		for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {
1461			loc_rule = &priv->rxnfc_rules[i];
1462			if (loc_rule->state == BCMGENET_RXNFC_STATE_UNUSED) {
1463				cmd->fs.location = i;
1464				break;
1465			}
1466		}
1467		if (i == MAX_NUM_OF_FS_RULES) {
1468			cmd->fs.location = RX_CLS_LOC_ANY;
1469			return -ENOSPC;
1470		}
1471	} else {
1472		loc_rule = &priv->rxnfc_rules[cmd->fs.location];
1473	}
1474	if (loc_rule->state == BCMGENET_RXNFC_STATE_ENABLED)
1475		bcmgenet_hfb_disable_filter(priv, cmd->fs.location);
1476	if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED) {
1477		list_del(&loc_rule->list);
1478		bcmgenet_hfb_clear_filter(priv, cmd->fs.location);
1479	}
1480	loc_rule->state = BCMGENET_RXNFC_STATE_UNUSED;
1481	memcpy(&loc_rule->fs, &cmd->fs,
1482	       sizeof(struct ethtool_rx_flow_spec));
1483
1484	bcmgenet_hfb_create_rxnfc_filter(priv, loc_rule);
1485
1486	list_add_tail(&loc_rule->list, &priv->rxnfc_list);
1487
1488	return 0;
1489}
1490
1491static int bcmgenet_delete_flow(struct net_device *dev,
1492				struct ethtool_rxnfc *cmd)
1493{
1494	struct bcmgenet_priv *priv = netdev_priv(dev);
1495	struct bcmgenet_rxnfc_rule *rule;
1496	int err = 0;
1497
1498	if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
1499		return -EINVAL;
1500
1501	rule = &priv->rxnfc_rules[cmd->fs.location];
1502	if (rule->state == BCMGENET_RXNFC_STATE_UNUSED) {
1503		err =  -ENOENT;
1504		goto out;
1505	}
1506
1507	if (rule->state == BCMGENET_RXNFC_STATE_ENABLED)
1508		bcmgenet_hfb_disable_filter(priv, cmd->fs.location);
1509	if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) {
1510		list_del(&rule->list);
1511		bcmgenet_hfb_clear_filter(priv, cmd->fs.location);
1512	}
1513	rule->state = BCMGENET_RXNFC_STATE_UNUSED;
1514	memset(&rule->fs, 0, sizeof(struct ethtool_rx_flow_spec));
1515
1516out:
1517	return err;
1518}
1519
1520static int bcmgenet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
1521{
1522	struct bcmgenet_priv *priv = netdev_priv(dev);
1523	int err = 0;
1524
1525	switch (cmd->cmd) {
1526	case ETHTOOL_SRXCLSRLINS:
1527		err = bcmgenet_insert_flow(dev, cmd);
1528		break;
1529	case ETHTOOL_SRXCLSRLDEL:
1530		err = bcmgenet_delete_flow(dev, cmd);
1531		break;
1532	default:
1533		netdev_warn(priv->dev, "Unsupported ethtool command. (%d)\n",
1534			    cmd->cmd);
1535		return -EINVAL;
1536	}
1537
1538	return err;
1539}
1540
1541static int bcmgenet_get_flow(struct net_device *dev, struct ethtool_rxnfc *cmd,
1542			     int loc)
1543{
1544	struct bcmgenet_priv *priv = netdev_priv(dev);
1545	struct bcmgenet_rxnfc_rule *rule;
1546	int err = 0;
1547
1548	if (loc < 0 || loc >= MAX_NUM_OF_FS_RULES)
1549		return -EINVAL;
1550
1551	rule = &priv->rxnfc_rules[loc];
1552	if (rule->state == BCMGENET_RXNFC_STATE_UNUSED)
1553		err = -ENOENT;
1554	else
1555		memcpy(&cmd->fs, &rule->fs,
1556		       sizeof(struct ethtool_rx_flow_spec));
1557
1558	return err;
1559}
1560
1561static int bcmgenet_get_num_flows(struct bcmgenet_priv *priv)
1562{
1563	struct list_head *pos;
1564	int res = 0;
1565
1566	list_for_each(pos, &priv->rxnfc_list)
1567		res++;
1568
1569	return res;
1570}
1571
1572static int bcmgenet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
1573			      u32 *rule_locs)
1574{
1575	struct bcmgenet_priv *priv = netdev_priv(dev);
1576	struct bcmgenet_rxnfc_rule *rule;
1577	int err = 0;
1578	int i = 0;
1579
1580	switch (cmd->cmd) {
1581	case ETHTOOL_GRXRINGS:
1582		cmd->data = priv->hw_params->rx_queues ?: 1;
1583		break;
1584	case ETHTOOL_GRXCLSRLCNT:
1585		cmd->rule_cnt = bcmgenet_get_num_flows(priv);
1586		cmd->data = MAX_NUM_OF_FS_RULES | RX_CLS_LOC_SPECIAL;
1587		break;
1588	case ETHTOOL_GRXCLSRULE:
1589		err = bcmgenet_get_flow(dev, cmd, cmd->fs.location);
1590		break;
1591	case ETHTOOL_GRXCLSRLALL:
1592		list_for_each_entry(rule, &priv->rxnfc_list, list)
1593			if (i < cmd->rule_cnt)
1594				rule_locs[i++] = rule->fs.location;
1595		cmd->rule_cnt = i;
1596		cmd->data = MAX_NUM_OF_FS_RULES;
1597		break;
1598	default:
1599		err = -EOPNOTSUPP;
1600		break;
1601	}
1602
1603	return err;
1604}
1605
1606/* standard ethtool support functions. */
1607static const struct ethtool_ops bcmgenet_ethtool_ops = {
1608	.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
1609				     ETHTOOL_COALESCE_MAX_FRAMES |
1610				     ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
1611	.begin			= bcmgenet_begin,
1612	.complete		= bcmgenet_complete,
1613	.get_strings		= bcmgenet_get_strings,
1614	.get_sset_count		= bcmgenet_get_sset_count,
1615	.get_ethtool_stats	= bcmgenet_get_ethtool_stats,
1616	.get_drvinfo		= bcmgenet_get_drvinfo,
1617	.get_link		= ethtool_op_get_link,
1618	.get_msglevel		= bcmgenet_get_msglevel,
1619	.set_msglevel		= bcmgenet_set_msglevel,
1620	.get_wol		= bcmgenet_get_wol,
1621	.set_wol		= bcmgenet_set_wol,
1622	.get_eee		= bcmgenet_get_eee,
1623	.set_eee		= bcmgenet_set_eee,
1624	.nway_reset		= phy_ethtool_nway_reset,
1625	.get_coalesce		= bcmgenet_get_coalesce,
1626	.set_coalesce		= bcmgenet_set_coalesce,
1627	.get_link_ksettings	= bcmgenet_get_link_ksettings,
1628	.set_link_ksettings	= bcmgenet_set_link_ksettings,
1629	.get_ts_info		= ethtool_op_get_ts_info,
1630	.get_rxnfc		= bcmgenet_get_rxnfc,
1631	.set_rxnfc		= bcmgenet_set_rxnfc,
1632	.get_pauseparam		= bcmgenet_get_pauseparam,
1633	.set_pauseparam		= bcmgenet_set_pauseparam,
1634};
1635
1636/* Power down the unimac, based on mode. */
1637static int bcmgenet_power_down(struct bcmgenet_priv *priv,
1638				enum bcmgenet_power_mode mode)
1639{
1640	int ret = 0;
1641	u32 reg;
1642
1643	switch (mode) {
1644	case GENET_POWER_CABLE_SENSE:
1645		phy_detach(priv->dev->phydev);
1646		break;
1647
1648	case GENET_POWER_WOL_MAGIC:
1649		ret = bcmgenet_wol_power_down_cfg(priv, mode);
1650		break;
1651
1652	case GENET_POWER_PASSIVE:
1653		/* Power down LED */
1654		if (priv->hw_params->flags & GENET_HAS_EXT) {
1655			reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
1656			if (GENET_IS_V5(priv) && !priv->ephy_16nm)
1657				reg |= EXT_PWR_DOWN_PHY_EN |
1658				       EXT_PWR_DOWN_PHY_RD |
1659				       EXT_PWR_DOWN_PHY_SD |
1660				       EXT_PWR_DOWN_PHY_RX |
1661				       EXT_PWR_DOWN_PHY_TX |
1662				       EXT_IDDQ_GLBL_PWR;
1663			else
1664				reg |= EXT_PWR_DOWN_PHY;
1665
1666			reg |= (EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
1667			bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
1668
1669			bcmgenet_phy_power_set(priv->dev, false);
1670		}
1671		break;
1672	default:
1673		break;
1674	}
1675
1676	return ret;
1677}
1678
1679static void bcmgenet_power_up(struct bcmgenet_priv *priv,
1680			      enum bcmgenet_power_mode mode)
1681{
1682	u32 reg;
1683
1684	if (!(priv->hw_params->flags & GENET_HAS_EXT))
1685		return;
1686
1687	reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
1688
1689	switch (mode) {
1690	case GENET_POWER_PASSIVE:
1691		reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS |
1692			 EXT_ENERGY_DET_MASK);
1693		if (GENET_IS_V5(priv) && !priv->ephy_16nm) {
1694			reg &= ~(EXT_PWR_DOWN_PHY_EN |
1695				 EXT_PWR_DOWN_PHY_RD |
1696				 EXT_PWR_DOWN_PHY_SD |
1697				 EXT_PWR_DOWN_PHY_RX |
1698				 EXT_PWR_DOWN_PHY_TX |
1699				 EXT_IDDQ_GLBL_PWR);
1700			reg |=   EXT_PHY_RESET;
1701			bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
1702			mdelay(1);
1703
1704			reg &=  ~EXT_PHY_RESET;
1705		} else {
1706			reg &= ~EXT_PWR_DOWN_PHY;
1707			reg |= EXT_PWR_DN_EN_LD;
1708		}
1709		bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
1710		bcmgenet_phy_power_set(priv->dev, true);
1711		break;
1712
1713	case GENET_POWER_CABLE_SENSE:
1714		/* enable APD */
1715		if (!GENET_IS_V5(priv)) {
1716			reg |= EXT_PWR_DN_EN_LD;
1717			bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
1718		}
1719		break;
1720	case GENET_POWER_WOL_MAGIC:
1721		bcmgenet_wol_power_up_cfg(priv, mode);
1722		return;
1723	default:
1724		break;
1725	}
1726}
1727
1728static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
1729					 struct bcmgenet_tx_ring *ring)
1730{
1731	struct enet_cb *tx_cb_ptr;
1732
1733	tx_cb_ptr = ring->cbs;
1734	tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
1735
1736	/* Advancing local write pointer */
1737	if (ring->write_ptr == ring->end_ptr)
1738		ring->write_ptr = ring->cb_ptr;
1739	else
1740		ring->write_ptr++;
1741
1742	return tx_cb_ptr;
1743}
1744
1745static struct enet_cb *bcmgenet_put_txcb(struct bcmgenet_priv *priv,
1746					 struct bcmgenet_tx_ring *ring)
1747{
1748	struct enet_cb *tx_cb_ptr;
1749
1750	tx_cb_ptr = ring->cbs;
1751	tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
1752
1753	/* Rewinding local write pointer */
1754	if (ring->write_ptr == ring->cb_ptr)
1755		ring->write_ptr = ring->end_ptr;
1756	else
1757		ring->write_ptr--;
1758
1759	return tx_cb_ptr;
1760}
1761
1762static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring)
1763{
1764	bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
1765				 INTRL2_CPU_MASK_SET);
1766}
1767
1768static inline void bcmgenet_rx_ring16_int_enable(struct bcmgenet_rx_ring *ring)
1769{
1770	bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
1771				 INTRL2_CPU_MASK_CLEAR);
1772}
1773
1774static inline void bcmgenet_rx_ring_int_disable(struct bcmgenet_rx_ring *ring)
1775{
1776	bcmgenet_intrl2_1_writel(ring->priv,
1777				 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index),
1778				 INTRL2_CPU_MASK_SET);
1779}
1780
1781static inline void bcmgenet_rx_ring_int_enable(struct bcmgenet_rx_ring *ring)
1782{
1783	bcmgenet_intrl2_1_writel(ring->priv,
1784				 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index),
1785				 INTRL2_CPU_MASK_CLEAR);
1786}
1787
1788static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_tx_ring *ring)
1789{
1790	bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
1791				 INTRL2_CPU_MASK_SET);
1792}
1793
1794static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_tx_ring *ring)
1795{
1796	bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
1797				 INTRL2_CPU_MASK_CLEAR);
1798}
1799
1800static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_tx_ring *ring)
1801{
1802	bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
1803				 INTRL2_CPU_MASK_CLEAR);
1804}
1805
1806static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring *ring)
1807{
1808	bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
1809				 INTRL2_CPU_MASK_SET);
1810}
1811
1812/* Simple helper to free a transmit control block's resources
1813 * Returns an skb when the last transmit control block associated with the
1814 * skb is freed.  The skb should be freed by the caller if necessary.
1815 */
1816static struct sk_buff *bcmgenet_free_tx_cb(struct device *dev,
1817					   struct enet_cb *cb)
1818{
1819	struct sk_buff *skb;
1820
1821	skb = cb->skb;
1822
1823	if (skb) {
1824		cb->skb = NULL;
1825		if (cb == GENET_CB(skb)->first_cb)
1826			dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr),
1827					 dma_unmap_len(cb, dma_len),
1828					 DMA_TO_DEVICE);
1829		else
1830			dma_unmap_page(dev, dma_unmap_addr(cb, dma_addr),
1831				       dma_unmap_len(cb, dma_len),
1832				       DMA_TO_DEVICE);
1833		dma_unmap_addr_set(cb, dma_addr, 0);
1834
1835		if (cb == GENET_CB(skb)->last_cb)
1836			return skb;
1837
1838	} else if (dma_unmap_addr(cb, dma_addr)) {
1839		dma_unmap_page(dev,
1840			       dma_unmap_addr(cb, dma_addr),
1841			       dma_unmap_len(cb, dma_len),
1842			       DMA_TO_DEVICE);
1843		dma_unmap_addr_set(cb, dma_addr, 0);
1844	}
1845
1846	return NULL;
1847}
1848
1849/* Simple helper to free a receive control block's resources */
1850static struct sk_buff *bcmgenet_free_rx_cb(struct device *dev,
1851					   struct enet_cb *cb)
1852{
1853	struct sk_buff *skb;
1854
1855	skb = cb->skb;
1856	cb->skb = NULL;
1857
1858	if (dma_unmap_addr(cb, dma_addr)) {
1859		dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr),
1860				 dma_unmap_len(cb, dma_len), DMA_FROM_DEVICE);
1861		dma_unmap_addr_set(cb, dma_addr, 0);
1862	}
1863
1864	return skb;
1865}
1866
1867/* Unlocked version of the reclaim routine */
1868static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
1869					  struct bcmgenet_tx_ring *ring)
1870{
1871	struct bcmgenet_priv *priv = netdev_priv(dev);
1872	unsigned int txbds_processed = 0;
1873	unsigned int bytes_compl = 0;
1874	unsigned int pkts_compl = 0;
1875	unsigned int txbds_ready;
1876	unsigned int c_index;
1877	struct sk_buff *skb;
1878
1879	/* Clear status before servicing to reduce spurious interrupts */
1880	if (ring->index == DESC_INDEX)
1881		bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_TXDMA_DONE,
1882					 INTRL2_CPU_CLEAR);
1883	else
1884		bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
1885					 INTRL2_CPU_CLEAR);
1886
1887	/* Compute how many buffers are transmitted since last xmit call */
1888	c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX)
1889		& DMA_C_INDEX_MASK;
1890	txbds_ready = (c_index - ring->c_index) & DMA_C_INDEX_MASK;
1891
1892	netif_dbg(priv, tx_done, dev,
1893		  "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
1894		  __func__, ring->index, ring->c_index, c_index, txbds_ready);
1895
1896	/* Reclaim transmitted buffers */
1897	while (txbds_processed < txbds_ready) {
1898		skb = bcmgenet_free_tx_cb(&priv->pdev->dev,
1899					  &priv->tx_cbs[ring->clean_ptr]);
1900		if (skb) {
1901			pkts_compl++;
1902			bytes_compl += GENET_CB(skb)->bytes_sent;
1903			dev_consume_skb_any(skb);
1904		}
1905
1906		txbds_processed++;
1907		if (likely(ring->clean_ptr < ring->end_ptr))
1908			ring->clean_ptr++;
1909		else
1910			ring->clean_ptr = ring->cb_ptr;
1911	}
1912
1913	ring->free_bds += txbds_processed;
1914	ring->c_index = c_index;
1915
1916	ring->packets += pkts_compl;
1917	ring->bytes += bytes_compl;
1918
1919	netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue),
1920				  pkts_compl, bytes_compl);
1921
1922	return txbds_processed;
1923}
1924
1925static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
1926				struct bcmgenet_tx_ring *ring)
1927{
1928	unsigned int released;
1929
1930	spin_lock_bh(&ring->lock);
1931	released = __bcmgenet_tx_reclaim(dev, ring);
1932	spin_unlock_bh(&ring->lock);
1933
1934	return released;
1935}
1936
1937static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
1938{
1939	struct bcmgenet_tx_ring *ring =
1940		container_of(napi, struct bcmgenet_tx_ring, napi);
1941	unsigned int work_done = 0;
1942	struct netdev_queue *txq;
1943
1944	spin_lock(&ring->lock);
1945	work_done = __bcmgenet_tx_reclaim(ring->priv->dev, ring);
1946	if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
1947		txq = netdev_get_tx_queue(ring->priv->dev, ring->queue);
1948		netif_tx_wake_queue(txq);
1949	}
1950	spin_unlock(&ring->lock);
1951
1952	if (work_done == 0) {
1953		napi_complete(napi);
1954		ring->int_enable(ring);
1955
1956		return 0;
1957	}
1958
1959	return budget;
1960}
1961
1962static void bcmgenet_tx_reclaim_all(struct net_device *dev)
1963{
1964	struct bcmgenet_priv *priv = netdev_priv(dev);
1965	int i;
1966
1967	if (netif_is_multiqueue(dev)) {
1968		for (i = 0; i < priv->hw_params->tx_queues; i++)
1969			bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]);
1970	}
1971
1972	bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]);
1973}
1974
1975/* Reallocate the SKB to put enough headroom in front of it and insert
1976 * the transmit checksum offsets in the descriptors
1977 */
1978static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev,
1979					struct sk_buff *skb)
1980{
1981	struct bcmgenet_priv *priv = netdev_priv(dev);
1982	struct status_64 *status = NULL;
1983	struct sk_buff *new_skb;
1984	u16 offset;
1985	u8 ip_proto;
1986	__be16 ip_ver;
1987	u32 tx_csum_info;
1988
1989	if (unlikely(skb_headroom(skb) < sizeof(*status))) {
1990		/* If 64 byte status block enabled, must make sure skb has
1991		 * enough headroom for us to insert 64B status block.
1992		 */
1993		new_skb = skb_realloc_headroom(skb, sizeof(*status));
1994		if (!new_skb) {
1995			dev_kfree_skb_any(skb);
1996			priv->mib.tx_realloc_tsb_failed++;
1997			dev->stats.tx_dropped++;
1998			return NULL;
1999		}
2000		dev_consume_skb_any(skb);
2001		skb = new_skb;
2002		priv->mib.tx_realloc_tsb++;
2003	}
2004
2005	skb_push(skb, sizeof(*status));
2006	status = (struct status_64 *)skb->data;
2007
2008	if (skb->ip_summed  == CHECKSUM_PARTIAL) {
2009		ip_ver = skb->protocol;
2010		switch (ip_ver) {
2011		case htons(ETH_P_IP):
2012			ip_proto = ip_hdr(skb)->protocol;
2013			break;
2014		case htons(ETH_P_IPV6):
2015			ip_proto = ipv6_hdr(skb)->nexthdr;
2016			break;
2017		default:
2018			/* don't use UDP flag */
2019			ip_proto = 0;
2020			break;
2021		}
2022
2023		offset = skb_checksum_start_offset(skb) - sizeof(*status);
2024		tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) |
2025				(offset + skb->csum_offset) |
2026				STATUS_TX_CSUM_LV;
2027
2028		/* Set the special UDP flag for UDP */
2029		if (ip_proto == IPPROTO_UDP)
2030			tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP;
2031
2032		status->tx_csum_info = tx_csum_info;
2033	}
2034
2035	return skb;
2036}
2037
2038static void bcmgenet_hide_tsb(struct sk_buff *skb)
2039{
2040	__skb_pull(skb, sizeof(struct status_64));
2041}
2042
2043static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
2044{
2045	struct bcmgenet_priv *priv = netdev_priv(dev);
2046	struct device *kdev = &priv->pdev->dev;
2047	struct bcmgenet_tx_ring *ring = NULL;
2048	struct enet_cb *tx_cb_ptr;
2049	struct netdev_queue *txq;
2050	int nr_frags, index;
2051	dma_addr_t mapping;
2052	unsigned int size;
2053	skb_frag_t *frag;
2054	u32 len_stat;
2055	int ret;
2056	int i;
2057
2058	index = skb_get_queue_mapping(skb);
2059	/* Mapping strategy:
2060	 * queue_mapping = 0, unclassified, packet xmited through ring16
2061	 * queue_mapping = 1, goes to ring 0. (highest priority queue
2062	 * queue_mapping = 2, goes to ring 1.
2063	 * queue_mapping = 3, goes to ring 2.
2064	 * queue_mapping = 4, goes to ring 3.
2065	 */
2066	if (index == 0)
2067		index = DESC_INDEX;
2068	else
2069		index -= 1;
2070
2071	ring = &priv->tx_rings[index];
2072	txq = netdev_get_tx_queue(dev, ring->queue);
2073
2074	nr_frags = skb_shinfo(skb)->nr_frags;
2075
2076	spin_lock(&ring->lock);
2077	if (ring->free_bds <= (nr_frags + 1)) {
2078		if (!netif_tx_queue_stopped(txq))
2079			netif_tx_stop_queue(txq);
2080		ret = NETDEV_TX_BUSY;
2081		goto out;
2082	}
2083
2084	/* Retain how many bytes will be sent on the wire, without TSB inserted
2085	 * by transmit checksum offload
2086	 */
2087	GENET_CB(skb)->bytes_sent = skb->len;
2088
2089	/* add the Transmit Status Block */
2090	skb = bcmgenet_add_tsb(dev, skb);
2091	if (!skb) {
2092		ret = NETDEV_TX_OK;
2093		goto out;
2094	}
2095
2096	for (i = 0; i <= nr_frags; i++) {
2097		tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
2098
2099		BUG_ON(!tx_cb_ptr);
2100
2101		if (!i) {
2102			/* Transmit single SKB or head of fragment list */
2103			GENET_CB(skb)->first_cb = tx_cb_ptr;
2104			size = skb_headlen(skb);
2105			mapping = dma_map_single(kdev, skb->data, size,
2106						 DMA_TO_DEVICE);
2107		} else {
2108			/* xmit fragment */
2109			frag = &skb_shinfo(skb)->frags[i - 1];
2110			size = skb_frag_size(frag);
2111			mapping = skb_frag_dma_map(kdev, frag, 0, size,
2112						   DMA_TO_DEVICE);
2113		}
2114
2115		ret = dma_mapping_error(kdev, mapping);
2116		if (ret) {
2117			priv->mib.tx_dma_failed++;
2118			netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
2119			ret = NETDEV_TX_OK;
2120			goto out_unmap_frags;
2121		}
2122		dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
2123		dma_unmap_len_set(tx_cb_ptr, dma_len, size);
2124
2125		tx_cb_ptr->skb = skb;
2126
2127		len_stat = (size << DMA_BUFLENGTH_SHIFT) |
2128			   (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT);
2129
2130		/* Note: if we ever change from DMA_TX_APPEND_CRC below we
2131		 * will need to restore software padding of "runt" packets
2132		 */
2133		len_stat |= DMA_TX_APPEND_CRC;
2134
2135		if (!i) {
2136			len_stat |= DMA_SOP;
2137			if (skb->ip_summed == CHECKSUM_PARTIAL)
2138				len_stat |= DMA_TX_DO_CSUM;
2139		}
2140		if (i == nr_frags)
2141			len_stat |= DMA_EOP;
2142
2143		dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, len_stat);
2144	}
2145
2146	GENET_CB(skb)->last_cb = tx_cb_ptr;
2147
2148	bcmgenet_hide_tsb(skb);
2149	skb_tx_timestamp(skb);
2150
2151	/* Decrement total BD count and advance our write pointer */
2152	ring->free_bds -= nr_frags + 1;
2153	ring->prod_index += nr_frags + 1;
2154	ring->prod_index &= DMA_P_INDEX_MASK;
2155
2156	netdev_tx_sent_queue(txq, GENET_CB(skb)->bytes_sent);
2157
2158	if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
2159		netif_tx_stop_queue(txq);
2160
2161	if (!netdev_xmit_more() || netif_xmit_stopped(txq))
2162		/* Packets are ready, update producer index */
2163		bcmgenet_tdma_ring_writel(priv, ring->index,
2164					  ring->prod_index, TDMA_PROD_INDEX);
2165out:
2166	spin_unlock(&ring->lock);
2167
2168	return ret;
2169
2170out_unmap_frags:
2171	/* Back up for failed control block mapping */
2172	bcmgenet_put_txcb(priv, ring);
2173
2174	/* Unmap successfully mapped control blocks */
2175	while (i-- > 0) {
2176		tx_cb_ptr = bcmgenet_put_txcb(priv, ring);
2177		bcmgenet_free_tx_cb(kdev, tx_cb_ptr);
2178	}
2179
2180	dev_kfree_skb(skb);
2181	goto out;
2182}
2183
2184static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv,
2185					  struct enet_cb *cb)
2186{
2187	struct device *kdev = &priv->pdev->dev;
2188	struct sk_buff *skb;
2189	struct sk_buff *rx_skb;
2190	dma_addr_t mapping;
2191
2192	/* Allocate a new Rx skb */
2193	skb = __netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT,
2194				 GFP_ATOMIC | __GFP_NOWARN);
2195	if (!skb) {
2196		priv->mib.alloc_rx_buff_failed++;
2197		netif_err(priv, rx_err, priv->dev,
2198			  "%s: Rx skb allocation failed\n", __func__);
2199		return NULL;
2200	}
2201
2202	/* DMA-map the new Rx skb */
2203	mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len,
2204				 DMA_FROM_DEVICE);
2205	if (dma_mapping_error(kdev, mapping)) {
2206		priv->mib.rx_dma_failed++;
2207		dev_kfree_skb_any(skb);
2208		netif_err(priv, rx_err, priv->dev,
2209			  "%s: Rx skb DMA mapping failed\n", __func__);
2210		return NULL;
2211	}
2212
2213	/* Grab the current Rx skb from the ring and DMA-unmap it */
2214	rx_skb = bcmgenet_free_rx_cb(kdev, cb);
2215
2216	/* Put the new Rx skb on the ring */
2217	cb->skb = skb;
2218	dma_unmap_addr_set(cb, dma_addr, mapping);
2219	dma_unmap_len_set(cb, dma_len, priv->rx_buf_len);
2220	dmadesc_set_addr(priv, cb->bd_addr, mapping);
2221
2222	/* Return the current Rx skb to caller */
2223	return rx_skb;
2224}
2225
2226/* bcmgenet_desc_rx - descriptor based rx process.
2227 * this could be called from bottom half, or from NAPI polling method.
2228 */
2229static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
2230				     unsigned int budget)
2231{
2232	struct bcmgenet_priv *priv = ring->priv;
2233	struct net_device *dev = priv->dev;
2234	struct enet_cb *cb;
2235	struct sk_buff *skb;
2236	u32 dma_length_status;
2237	unsigned long dma_flag;
2238	int len;
2239	unsigned int rxpktprocessed = 0, rxpkttoprocess;
2240	unsigned int bytes_processed = 0;
2241	unsigned int p_index, mask;
2242	unsigned int discards;
2243
2244	/* Clear status before servicing to reduce spurious interrupts */
2245	if (ring->index == DESC_INDEX) {
2246		bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_DONE,
2247					 INTRL2_CPU_CLEAR);
2248	} else {
2249		mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index);
2250		bcmgenet_intrl2_1_writel(priv,
2251					 mask,
2252					 INTRL2_CPU_CLEAR);
2253	}
2254
2255	p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX);
2256
2257	discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) &
2258		   DMA_P_INDEX_DISCARD_CNT_MASK;
2259	if (discards > ring->old_discards) {
2260		discards = discards - ring->old_discards;
2261		ring->errors += discards;
2262		ring->old_discards += discards;
2263
2264		/* Clear HW register when we reach 75% of maximum 0xFFFF */
2265		if (ring->old_discards >= 0xC000) {
2266			ring->old_discards = 0;
2267			bcmgenet_rdma_ring_writel(priv, ring->index, 0,
2268						  RDMA_PROD_INDEX);
2269		}
2270	}
2271
2272	p_index &= DMA_P_INDEX_MASK;
2273	rxpkttoprocess = (p_index - ring->c_index) & DMA_C_INDEX_MASK;
2274
2275	netif_dbg(priv, rx_status, dev,
2276		  "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
2277
2278	while ((rxpktprocessed < rxpkttoprocess) &&
2279	       (rxpktprocessed < budget)) {
2280		struct status_64 *status;
2281		__be16 rx_csum;
2282
2283		cb = &priv->rx_cbs[ring->read_ptr];
2284		skb = bcmgenet_rx_refill(priv, cb);
2285
2286		if (unlikely(!skb)) {
2287			ring->dropped++;
2288			goto next;
2289		}
2290
2291		status = (struct status_64 *)skb->data;
2292		dma_length_status = status->length_status;
2293		if (dev->features & NETIF_F_RXCSUM) {
2294			rx_csum = (__force __be16)(status->rx_csum & 0xffff);
2295			if (rx_csum) {
2296				skb->csum = (__force __wsum)ntohs(rx_csum);
2297				skb->ip_summed = CHECKSUM_COMPLETE;
2298			}
2299		}
2300
2301		/* DMA flags and length are still valid no matter how
2302		 * we got the Receive Status Vector (64B RSB or register)
2303		 */
2304		dma_flag = dma_length_status & 0xffff;
2305		len = dma_length_status >> DMA_BUFLENGTH_SHIFT;
2306
2307		netif_dbg(priv, rx_status, dev,
2308			  "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n",
2309			  __func__, p_index, ring->c_index,
2310			  ring->read_ptr, dma_length_status);
2311
2312		if (unlikely(len > RX_BUF_LENGTH)) {
2313			netif_err(priv, rx_status, dev, "oversized packet\n");
2314			dev->stats.rx_length_errors++;
2315			dev->stats.rx_errors++;
2316			dev_kfree_skb_any(skb);
2317			goto next;
2318		}
2319
2320		if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
2321			netif_err(priv, rx_status, dev,
2322				  "dropping fragmented packet!\n");
2323			ring->errors++;
2324			dev_kfree_skb_any(skb);
2325			goto next;
2326		}
2327
2328		/* report errors */
2329		if (unlikely(dma_flag & (DMA_RX_CRC_ERROR |
2330						DMA_RX_OV |
2331						DMA_RX_NO |
2332						DMA_RX_LG |
2333						DMA_RX_RXER))) {
2334			netif_err(priv, rx_status, dev, "dma_flag=0x%x\n",
2335				  (unsigned int)dma_flag);
2336			if (dma_flag & DMA_RX_CRC_ERROR)
2337				dev->stats.rx_crc_errors++;
2338			if (dma_flag & DMA_RX_OV)
2339				dev->stats.rx_over_errors++;
2340			if (dma_flag & DMA_RX_NO)
2341				dev->stats.rx_frame_errors++;
2342			if (dma_flag & DMA_RX_LG)
2343				dev->stats.rx_length_errors++;
2344			dev->stats.rx_errors++;
2345			dev_kfree_skb_any(skb);
2346			goto next;
2347		} /* error packet */
2348
2349		skb_put(skb, len);
2350
2351		/* remove RSB and hardware 2bytes added for IP alignment */
2352		skb_pull(skb, 66);
2353		len -= 66;
2354
2355		if (priv->crc_fwd_en) {
2356			skb_trim(skb, len - ETH_FCS_LEN);
2357			len -= ETH_FCS_LEN;
2358		}
2359
2360		bytes_processed += len;
2361
2362		/*Finish setting up the received SKB and send it to the kernel*/
2363		skb->protocol = eth_type_trans(skb, priv->dev);
2364		ring->packets++;
2365		ring->bytes += len;
2366		if (dma_flag & DMA_RX_MULT)
2367			dev->stats.multicast++;
2368
2369		/* Notify kernel */
2370		napi_gro_receive(&ring->napi, skb);
2371		netif_dbg(priv, rx_status, dev, "pushed up to kernel\n");
2372
2373next:
2374		rxpktprocessed++;
2375		if (likely(ring->read_ptr < ring->end_ptr))
2376			ring->read_ptr++;
2377		else
2378			ring->read_ptr = ring->cb_ptr;
2379
2380		ring->c_index = (ring->c_index + 1) & DMA_C_INDEX_MASK;
2381		bcmgenet_rdma_ring_writel(priv, ring->index, ring->c_index, RDMA_CONS_INDEX);
2382	}
2383
2384	ring->dim.bytes = bytes_processed;
2385	ring->dim.packets = rxpktprocessed;
2386
2387	return rxpktprocessed;
2388}
2389
2390/* Rx NAPI polling method */
2391static int bcmgenet_rx_poll(struct napi_struct *napi, int budget)
2392{
2393	struct bcmgenet_rx_ring *ring = container_of(napi,
2394			struct bcmgenet_rx_ring, napi);
2395	struct dim_sample dim_sample = {};
2396	unsigned int work_done;
2397
2398	work_done = bcmgenet_desc_rx(ring, budget);
2399
2400	if (work_done < budget) {
2401		napi_complete_done(napi, work_done);
2402		ring->int_enable(ring);
2403	}
2404
2405	if (ring->dim.use_dim) {
2406		dim_update_sample(ring->dim.event_ctr, ring->dim.packets,
2407				  ring->dim.bytes, &dim_sample);
2408		net_dim(&ring->dim.dim, dim_sample);
2409	}
2410
2411	return work_done;
2412}
2413
2414static void bcmgenet_dim_work(struct work_struct *work)
2415{
2416	struct dim *dim = container_of(work, struct dim, work);
2417	struct bcmgenet_net_dim *ndim =
2418			container_of(dim, struct bcmgenet_net_dim, dim);
2419	struct bcmgenet_rx_ring *ring =
2420			container_of(ndim, struct bcmgenet_rx_ring, dim);
2421	struct dim_cq_moder cur_profile =
2422			net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
2423
2424	bcmgenet_set_rx_coalesce(ring, cur_profile.usec, cur_profile.pkts);
2425	dim->state = DIM_START_MEASURE;
2426}
2427
2428/* Assign skb to RX DMA descriptor. */
2429static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv,
2430				     struct bcmgenet_rx_ring *ring)
2431{
2432	struct enet_cb *cb;
2433	struct sk_buff *skb;
2434	int i;
2435
2436	netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
2437
2438	/* loop here for each buffer needing assign */
2439	for (i = 0; i < ring->size; i++) {
2440		cb = ring->cbs + i;
2441		skb = bcmgenet_rx_refill(priv, cb);
2442		if (skb)
2443			dev_consume_skb_any(skb);
2444		if (!cb->skb)
2445			return -ENOMEM;
2446	}
2447
2448	return 0;
2449}
2450
2451static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
2452{
2453	struct sk_buff *skb;
2454	struct enet_cb *cb;
2455	int i;
2456
2457	for (i = 0; i < priv->num_rx_bds; i++) {
2458		cb = &priv->rx_cbs[i];
2459
2460		skb = bcmgenet_free_rx_cb(&priv->pdev->dev, cb);
2461		if (skb)
2462			dev_consume_skb_any(skb);
2463	}
2464}
2465
2466static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable)
2467{
2468	u32 reg;
2469
2470	spin_lock_bh(&priv->reg_lock);
2471	reg = bcmgenet_umac_readl(priv, UMAC_CMD);
2472	if (reg & CMD_SW_RESET) {
2473		spin_unlock_bh(&priv->reg_lock);
2474		return;
2475	}
2476	if (enable)
2477		reg |= mask;
2478	else
2479		reg &= ~mask;
2480	bcmgenet_umac_writel(priv, reg, UMAC_CMD);
2481	spin_unlock_bh(&priv->reg_lock);
2482
2483	/* UniMAC stops on a packet boundary, wait for a full-size packet
2484	 * to be processed
2485	 */
2486	if (enable == 0)
2487		usleep_range(1000, 2000);
2488}
2489
2490static void reset_umac(struct bcmgenet_priv *priv)
2491{
2492	/* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */
2493	bcmgenet_rbuf_ctrl_set(priv, 0);
2494	udelay(10);
2495
2496	/* issue soft reset and disable MAC while updating its registers */
2497	spin_lock_bh(&priv->reg_lock);
2498	bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
2499	udelay(2);
2500	spin_unlock_bh(&priv->reg_lock);
2501}
2502
2503static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
2504{
2505	/* Mask all interrupts.*/
2506	bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
2507	bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
2508	bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
2509	bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
2510}
2511
2512static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv)
2513{
2514	u32 int0_enable = 0;
2515
2516	/* Monitor cable plug/unplugged event for internal PHY, external PHY
2517	 * and MoCA PHY
2518	 */
2519	if (priv->internal_phy) {
2520		int0_enable |= UMAC_IRQ_LINK_EVENT;
2521		if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv))
2522			int0_enable |= UMAC_IRQ_PHY_DET_R;
2523	} else if (priv->ext_phy) {
2524		int0_enable |= UMAC_IRQ_LINK_EVENT;
2525	} else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
2526		if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
2527			int0_enable |= UMAC_IRQ_LINK_EVENT;
2528	}
2529	bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
2530}
2531
2532static void init_umac(struct bcmgenet_priv *priv)
2533{
2534	struct device *kdev = &priv->pdev->dev;
2535	u32 reg;
2536	u32 int0_enable = 0;
2537
2538	dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
2539
2540	reset_umac(priv);
2541
2542	/* clear tx/rx counter */
2543	bcmgenet_umac_writel(priv,
2544			     MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT,
2545			     UMAC_MIB_CTRL);
2546	bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL);
2547
2548	bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
2549
2550	/* init tx registers, enable TSB */
2551	reg = bcmgenet_tbuf_ctrl_get(priv);
2552	reg |= TBUF_64B_EN;
2553	bcmgenet_tbuf_ctrl_set(priv, reg);
2554
2555	/* init rx registers, enable ip header optimization and RSB */
2556	reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
2557	reg |= RBUF_ALIGN_2B | RBUF_64B_EN;
2558	bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL);
2559
2560	/* enable rx checksumming */
2561	reg = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL);
2562	reg |= RBUF_RXCHK_EN | RBUF_L3_PARSE_DIS;
2563	/* If UniMAC forwards CRC, we need to skip over it to get
2564	 * a valid CHK bit to be set in the per-packet status word
2565	 */
2566	if (priv->crc_fwd_en)
2567		reg |= RBUF_SKIP_FCS;
2568	else
2569		reg &= ~RBUF_SKIP_FCS;
2570	bcmgenet_rbuf_writel(priv, reg, RBUF_CHK_CTRL);
2571
2572	if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv))
2573		bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL);
2574
2575	bcmgenet_intr_disable(priv);
2576
2577	/* Configure backpressure vectors for MoCA */
2578	if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
2579		reg = bcmgenet_bp_mc_get(priv);
2580		reg |= BIT(priv->hw_params->bp_in_en_shift);
2581
2582		/* bp_mask: back pressure mask */
2583		if (netif_is_multiqueue(priv->dev))
2584			reg |= priv->hw_params->bp_in_mask;
2585		else
2586			reg &= ~priv->hw_params->bp_in_mask;
2587		bcmgenet_bp_mc_set(priv, reg);
2588	}
2589
2590	/* Enable MDIO interrupts on GENET v3+ */
2591	if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
2592		int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
2593
2594	bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
2595
2596	dev_dbg(kdev, "done init umac\n");
2597}
2598
2599static void bcmgenet_init_dim(struct bcmgenet_rx_ring *ring,
2600			      void (*cb)(struct work_struct *work))
2601{
2602	struct bcmgenet_net_dim *dim = &ring->dim;
2603
2604	INIT_WORK(&dim->dim.work, cb);
2605	dim->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
2606	dim->event_ctr = 0;
2607	dim->packets = 0;
2608	dim->bytes = 0;
2609}
2610
2611static void bcmgenet_init_rx_coalesce(struct bcmgenet_rx_ring *ring)
2612{
2613	struct bcmgenet_net_dim *dim = &ring->dim;
2614	struct dim_cq_moder moder;
2615	u32 usecs, pkts;
2616
2617	usecs = ring->rx_coalesce_usecs;
2618	pkts = ring->rx_max_coalesced_frames;
2619
2620	/* If DIM was enabled, re-apply default parameters */
2621	if (dim->use_dim) {
2622		moder = net_dim_get_def_rx_moderation(dim->dim.mode);
2623		usecs = moder.usec;
2624		pkts = moder.pkts;
2625	}
2626
2627	bcmgenet_set_rx_coalesce(ring, usecs, pkts);
2628}
2629
2630/* Initialize a Tx ring along with corresponding hardware registers */
2631static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
2632				  unsigned int index, unsigned int size,
2633				  unsigned int start_ptr, unsigned int end_ptr)
2634{
2635	struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
2636	u32 words_per_bd = WORDS_PER_BD(priv);
2637	u32 flow_period_val = 0;
2638
2639	spin_lock_init(&ring->lock);
2640	ring->priv = priv;
2641	ring->index = index;
2642	if (index == DESC_INDEX) {
2643		ring->queue = 0;
2644		ring->int_enable = bcmgenet_tx_ring16_int_enable;
2645		ring->int_disable = bcmgenet_tx_ring16_int_disable;
2646	} else {
2647		ring->queue = index + 1;
2648		ring->int_enable = bcmgenet_tx_ring_int_enable;
2649		ring->int_disable = bcmgenet_tx_ring_int_disable;
2650	}
2651	ring->cbs = priv->tx_cbs + start_ptr;
2652	ring->size = size;
2653	ring->clean_ptr = start_ptr;
2654	ring->c_index = 0;
2655	ring->free_bds = size;
2656	ring->write_ptr = start_ptr;
2657	ring->cb_ptr = start_ptr;
2658	ring->end_ptr = end_ptr - 1;
2659	ring->prod_index = 0;
2660
2661	/* Set flow period for ring != 16 */
2662	if (index != DESC_INDEX)
2663		flow_period_val = ENET_MAX_MTU_SIZE << 16;
2664
2665	bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX);
2666	bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX);
2667	bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
2668	/* Disable rate control for now */
2669	bcmgenet_tdma_ring_writel(priv, index, flow_period_val,
2670				  TDMA_FLOW_PERIOD);
2671	bcmgenet_tdma_ring_writel(priv, index,
2672				  ((size << DMA_RING_SIZE_SHIFT) |
2673				   RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
2674
2675	/* Set start and end address, read and write pointers */
2676	bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
2677				  DMA_START_ADDR);
2678	bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
2679				  TDMA_READ_PTR);
2680	bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
2681				  TDMA_WRITE_PTR);
2682	bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
2683				  DMA_END_ADDR);
2684
2685	/* Initialize Tx NAPI */
2686	netif_napi_add_tx(priv->dev, &ring->napi, bcmgenet_tx_poll);
2687}
2688
2689/* Initialize a RDMA ring */
2690static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
2691				 unsigned int index, unsigned int size,
2692				 unsigned int start_ptr, unsigned int end_ptr)
2693{
2694	struct bcmgenet_rx_ring *ring = &priv->rx_rings[index];
2695	u32 words_per_bd = WORDS_PER_BD(priv);
2696	int ret;
2697
2698	ring->priv = priv;
2699	ring->index = index;
2700	if (index == DESC_INDEX) {
2701		ring->int_enable = bcmgenet_rx_ring16_int_enable;
2702		ring->int_disable = bcmgenet_rx_ring16_int_disable;
2703	} else {
2704		ring->int_enable = bcmgenet_rx_ring_int_enable;
2705		ring->int_disable = bcmgenet_rx_ring_int_disable;
2706	}
2707	ring->cbs = priv->rx_cbs + start_ptr;
2708	ring->size = size;
2709	ring->c_index = 0;
2710	ring->read_ptr = start_ptr;
2711	ring->cb_ptr = start_ptr;
2712	ring->end_ptr = end_ptr - 1;
2713
2714	ret = bcmgenet_alloc_rx_buffers(priv, ring);
2715	if (ret)
2716		return ret;
2717
2718	bcmgenet_init_dim(ring, bcmgenet_dim_work);
2719	bcmgenet_init_rx_coalesce(ring);
2720
2721	/* Initialize Rx NAPI */
2722	netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll);
2723
2724	bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX);
2725	bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX);
2726	bcmgenet_rdma_ring_writel(priv, index,
2727				  ((size << DMA_RING_SIZE_SHIFT) |
2728				   RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
2729	bcmgenet_rdma_ring_writel(priv, index,
2730				  (DMA_FC_THRESH_LO <<
2731				   DMA_XOFF_THRESHOLD_SHIFT) |
2732				   DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH);
2733
2734	/* Set start and end address, read and write pointers */
2735	bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
2736				  DMA_START_ADDR);
2737	bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
2738				  RDMA_READ_PTR);
2739	bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
2740				  RDMA_WRITE_PTR);
2741	bcmgenet_rdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
2742				  DMA_END_ADDR);
2743
2744	return ret;
2745}
2746
2747static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv)
2748{
2749	unsigned int i;
2750	struct bcmgenet_tx_ring *ring;
2751
2752	for (i = 0; i < priv->hw_params->tx_queues; ++i) {
2753		ring = &priv->tx_rings[i];
2754		napi_enable(&ring->napi);
2755		ring->int_enable(ring);
2756	}
2757
2758	ring = &priv->tx_rings[DESC_INDEX];
2759	napi_enable(&ring->napi);
2760	ring->int_enable(ring);
2761}
2762
2763static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv)
2764{
2765	unsigned int i;
2766	struct bcmgenet_tx_ring *ring;
2767
2768	for (i = 0; i < priv->hw_params->tx_queues; ++i) {
2769		ring = &priv->tx_rings[i];
2770		napi_disable(&ring->napi);
2771	}
2772
2773	ring = &priv->tx_rings[DESC_INDEX];
2774	napi_disable(&ring->napi);
2775}
2776
2777static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv)
2778{
2779	unsigned int i;
2780	struct bcmgenet_tx_ring *ring;
2781
2782	for (i = 0; i < priv->hw_params->tx_queues; ++i) {
2783		ring = &priv->tx_rings[i];
2784		netif_napi_del(&ring->napi);
2785	}
2786
2787	ring = &priv->tx_rings[DESC_INDEX];
2788	netif_napi_del(&ring->napi);
2789}
2790
2791/* Initialize Tx queues
2792 *
2793 * Queues 0-3 are priority-based, each one has 32 descriptors,
2794 * with queue 0 being the highest priority queue.
2795 *
2796 * Queue 16 is the default Tx queue with
2797 * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors.
2798 *
2799 * The transmit control block pool is then partitioned as follows:
2800 * - Tx queue 0 uses tx_cbs[0..31]
2801 * - Tx queue 1 uses tx_cbs[32..63]
2802 * - Tx queue 2 uses tx_cbs[64..95]
2803 * - Tx queue 3 uses tx_cbs[96..127]
2804 * - Tx queue 16 uses tx_cbs[128..255]
2805 */
2806static void bcmgenet_init_tx_queues(struct net_device *dev)
2807{
2808	struct bcmgenet_priv *priv = netdev_priv(dev);
2809	u32 i, dma_enable;
2810	u32 dma_ctrl, ring_cfg;
2811	u32 dma_priority[3] = {0, 0, 0};
2812
2813	dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL);
2814	dma_enable = dma_ctrl & DMA_EN;
2815	dma_ctrl &= ~DMA_EN;
2816	bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
2817
2818	dma_ctrl = 0;
2819	ring_cfg = 0;
2820
2821	/* Enable strict priority arbiter mode */
2822	bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL);
2823
2824	/* Initialize Tx priority queues */
2825	for (i = 0; i < priv->hw_params->tx_queues; i++) {
2826		bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q,
2827				      i * priv->hw_params->tx_bds_per_q,
2828				      (i + 1) * priv->hw_params->tx_bds_per_q);
2829		ring_cfg |= (1 << i);
2830		dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2831		dma_priority[DMA_PRIO_REG_INDEX(i)] |=
2832			((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i));
2833	}
2834
2835	/* Initialize Tx default queue 16 */
2836	bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_Q16_TX_BD_CNT,
2837			      priv->hw_params->tx_queues *
2838			      priv->hw_params->tx_bds_per_q,
2839			      TOTAL_DESC);
2840	ring_cfg |= (1 << DESC_INDEX);
2841	dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
2842	dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |=
2843		((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) <<
2844		 DMA_PRIO_REG_SHIFT(DESC_INDEX));
2845
2846	/* Set Tx queue priorities */
2847	bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0);
2848	bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1);
2849	bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2);
2850
2851	/* Enable Tx queues */
2852	bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG);
2853
2854	/* Enable Tx DMA */
2855	if (dma_enable)
2856		dma_ctrl |= DMA_EN;
2857	bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
2858}
2859
2860static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv)
2861{
2862	unsigned int i;
2863	struct bcmgenet_rx_ring *ring;
2864
2865	for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2866		ring = &priv->rx_rings[i];
2867		napi_enable(&ring->napi);
2868		ring->int_enable(ring);
2869	}
2870
2871	ring = &priv->rx_rings[DESC_INDEX];
2872	napi_enable(&ring->napi);
2873	ring->int_enable(ring);
2874}
2875
2876static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv)
2877{
2878	unsigned int i;
2879	struct bcmgenet_rx_ring *ring;
2880
2881	for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2882		ring = &priv->rx_rings[i];
2883		napi_disable(&ring->napi);
2884		cancel_work_sync(&ring->dim.dim.work);
2885	}
2886
2887	ring = &priv->rx_rings[DESC_INDEX];
2888	napi_disable(&ring->napi);
2889	cancel_work_sync(&ring->dim.dim.work);
2890}
2891
2892static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv)
2893{
2894	unsigned int i;
2895	struct bcmgenet_rx_ring *ring;
2896
2897	for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2898		ring = &priv->rx_rings[i];
2899		netif_napi_del(&ring->napi);
2900	}
2901
2902	ring = &priv->rx_rings[DESC_INDEX];
2903	netif_napi_del(&ring->napi);
2904}
2905
2906/* Initialize Rx queues
2907 *
2908 * Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be
2909 * used to direct traffic to these queues.
2910 *
2911 * Queue 16 is the default Rx queue with GENET_Q16_RX_BD_CNT descriptors.
2912 */
2913static int bcmgenet_init_rx_queues(struct net_device *dev)
2914{
2915	struct bcmgenet_priv *priv = netdev_priv(dev);
2916	u32 i;
2917	u32 dma_enable;
2918	u32 dma_ctrl;
2919	u32 ring_cfg;
2920	int ret;
2921
2922	dma_ctrl = bcmgenet_rdma_readl(priv, DMA_CTRL);
2923	dma_enable = dma_ctrl & DMA_EN;
2924	dma_ctrl &= ~DMA_EN;
2925	bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
2926
2927	dma_ctrl = 0;
2928	ring_cfg = 0;
2929
2930	/* Initialize Rx priority queues */
2931	for (i = 0; i < priv->hw_params->rx_queues; i++) {
2932		ret = bcmgenet_init_rx_ring(priv, i,
2933					    priv->hw_params->rx_bds_per_q,
2934					    i * priv->hw_params->rx_bds_per_q,
2935					    (i + 1) *
2936					    priv->hw_params->rx_bds_per_q);
2937		if (ret)
2938			return ret;
2939
2940		ring_cfg |= (1 << i);
2941		dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2942	}
2943
2944	/* Initialize Rx default queue 16 */
2945	ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, GENET_Q16_RX_BD_CNT,
2946				    priv->hw_params->rx_queues *
2947				    priv->hw_params->rx_bds_per_q,
2948				    TOTAL_DESC);
2949	if (ret)
2950		return ret;
2951
2952	ring_cfg |= (1 << DESC_INDEX);
2953	dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
2954
2955	/* Enable rings */
2956	bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG);
2957
2958	/* Configure ring as descriptor ring and re-enable DMA if enabled */
2959	if (dma_enable)
2960		dma_ctrl |= DMA_EN;
2961	bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
2962
2963	return 0;
2964}
2965
2966static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
2967{
2968	int ret = 0;
2969	int timeout = 0;
2970	u32 reg;
2971	u32 dma_ctrl;
2972	int i;
2973
2974	/* Disable TDMA to stop add more frames in TX DMA */
2975	reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2976	reg &= ~DMA_EN;
2977	bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2978
2979	/* Check TDMA status register to confirm TDMA is disabled */
2980	while (timeout++ < DMA_TIMEOUT_VAL) {
2981		reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
2982		if (reg & DMA_DISABLED)
2983			break;
2984
2985		udelay(1);
2986	}
2987
2988	if (timeout == DMA_TIMEOUT_VAL) {
2989		netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
2990		ret = -ETIMEDOUT;
2991	}
2992
2993	/* Wait 10ms for packet drain in both tx and rx dma */
2994	usleep_range(10000, 20000);
2995
2996	/* Disable RDMA */
2997	reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2998	reg &= ~DMA_EN;
2999	bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
3000
3001	timeout = 0;
3002	/* Check RDMA status register to confirm RDMA is disabled */
3003	while (timeout++ < DMA_TIMEOUT_VAL) {
3004		reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
3005		if (reg & DMA_DISABLED)
3006			break;
3007
3008		udelay(1);
3009	}
3010
3011	if (timeout == DMA_TIMEOUT_VAL) {
3012		netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
3013		ret = -ETIMEDOUT;
3014	}
3015
3016	dma_ctrl = 0;
3017	for (i = 0; i < priv->hw_params->rx_queues; i++)
3018		dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
3019	reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
3020	reg &= ~dma_ctrl;
3021	bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
3022
3023	dma_ctrl = 0;
3024	for (i = 0; i < priv->hw_params->tx_queues; i++)
3025		dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
3026	reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
3027	reg &= ~dma_ctrl;
3028	bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
3029
3030	return ret;
3031}
3032
3033static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
3034{
3035	struct netdev_queue *txq;
3036	int i;
3037
3038	bcmgenet_fini_rx_napi(priv);
3039	bcmgenet_fini_tx_napi(priv);
3040
3041	for (i = 0; i < priv->num_tx_bds; i++)
3042		dev_kfree_skb(bcmgenet_free_tx_cb(&priv->pdev->dev,
3043						  priv->tx_cbs + i));
3044
3045	for (i = 0; i < priv->hw_params->tx_queues; i++) {
3046		txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[i].queue);
3047		netdev_tx_reset_queue(txq);
3048	}
3049
3050	txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[DESC_INDEX].queue);
3051	netdev_tx_reset_queue(txq);
3052
3053	bcmgenet_free_rx_buffers(priv);
3054	kfree(priv->rx_cbs);
3055	kfree(priv->tx_cbs);
3056}
3057
3058/* init_edma: Initialize DMA control register */
3059static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
3060{
3061	int ret;
3062	unsigned int i;
3063	struct enet_cb *cb;
3064
3065	netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
3066
3067	/* Initialize common Rx ring structures */
3068	priv->rx_bds = priv->base + priv->hw_params->rdma_offset;
3069	priv->num_rx_bds = TOTAL_DESC;
3070	priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb),
3071			       GFP_KERNEL);
3072	if (!priv->rx_cbs)
3073		return -ENOMEM;
3074
3075	for (i = 0; i < priv->num_rx_bds; i++) {
3076		cb = priv->rx_cbs + i;
3077		cb->bd_addr = priv->rx_bds + i * DMA_DESC_SIZE;
3078	}
3079
3080	/* Initialize common TX ring structures */
3081	priv->tx_bds = priv->base + priv->hw_params->tdma_offset;
3082	priv->num_tx_bds = TOTAL_DESC;
3083	priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb),
3084			       GFP_KERNEL);
3085	if (!priv->tx_cbs) {
3086		kfree(priv->rx_cbs);
3087		return -ENOMEM;
3088	}
3089
3090	for (i = 0; i < priv->num_tx_bds; i++) {
3091		cb = priv->tx_cbs + i;
3092		cb->bd_addr = priv->tx_bds + i * DMA_DESC_SIZE;
3093	}
3094
3095	/* Init rDma */
3096	bcmgenet_rdma_writel(priv, priv->dma_max_burst_length,
3097			     DMA_SCB_BURST_SIZE);
3098
3099	/* Initialize Rx queues */
3100	ret = bcmgenet_init_rx_queues(priv->dev);
3101	if (ret) {
3102		netdev_err(priv->dev, "failed to initialize Rx queues\n");
3103		bcmgenet_free_rx_buffers(priv);
3104		kfree(priv->rx_cbs);
3105		kfree(priv->tx_cbs);
3106		return ret;
3107	}
3108
3109	/* Init tDma */
3110	bcmgenet_tdma_writel(priv, priv->dma_max_burst_length,
3111			     DMA_SCB_BURST_SIZE);
3112
3113	/* Initialize Tx queues */
3114	bcmgenet_init_tx_queues(priv->dev);
3115
3116	return 0;
3117}
3118
3119/* Interrupt bottom half */
3120static void bcmgenet_irq_task(struct work_struct *work)
3121{
3122	unsigned int status;
3123	struct bcmgenet_priv *priv = container_of(
3124			work, struct bcmgenet_priv, bcmgenet_irq_work);
3125
3126	netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
3127
3128	spin_lock_irq(&priv->lock);
3129	status = priv->irq0_stat;
3130	priv->irq0_stat = 0;
3131	spin_unlock_irq(&priv->lock);
3132
3133	if (status & UMAC_IRQ_PHY_DET_R &&
3134	    priv->dev->phydev->autoneg != AUTONEG_ENABLE) {
3135		phy_init_hw(priv->dev->phydev);
3136		genphy_config_aneg(priv->dev->phydev);
3137	}
3138
3139	/* Link UP/DOWN event */
3140	if (status & UMAC_IRQ_LINK_EVENT)
3141		phy_mac_interrupt(priv->dev->phydev);
3142
3143}
3144
3145/* bcmgenet_isr1: handle Rx and Tx priority queues */
3146static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
3147{
3148	struct bcmgenet_priv *priv = dev_id;
3149	struct bcmgenet_rx_ring *rx_ring;
3150	struct bcmgenet_tx_ring *tx_ring;
3151	unsigned int index, status;
3152
3153	/* Read irq status */
3154	status = bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
3155		~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
3156
3157	/* clear interrupts */
3158	bcmgenet_intrl2_1_writel(priv, status, INTRL2_CPU_CLEAR);
3159
3160	netif_dbg(priv, intr, priv->dev,
3161		  "%s: IRQ=0x%x\n", __func__, status);
3162
3163	/* Check Rx priority queue interrupts */
3164	for (index = 0; index < priv->hw_params->rx_queues; index++) {
3165		if (!(status & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
3166			continue;
3167
3168		rx_ring = &priv->rx_rings[index];
3169		rx_ring->dim.event_ctr++;
3170
3171		if (likely(napi_schedule_prep(&rx_ring->napi))) {
3172			rx_ring->int_disable(rx_ring);
3173			__napi_schedule_irqoff(&rx_ring->napi);
3174		}
3175	}
3176
3177	/* Check Tx priority queue interrupts */
3178	for (index = 0; index < priv->hw_params->tx_queues; index++) {
3179		if (!(status & BIT(index)))
3180			continue;
3181
3182		tx_ring = &priv->tx_rings[index];
3183
3184		if (likely(napi_schedule_prep(&tx_ring->napi))) {
3185			tx_ring->int_disable(tx_ring);
3186			__napi_schedule_irqoff(&tx_ring->napi);
3187		}
3188	}
3189
3190	return IRQ_HANDLED;
3191}
3192
3193/* bcmgenet_isr0: handle Rx and Tx default queues + other stuff */
3194static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
3195{
3196	struct bcmgenet_priv *priv = dev_id;
3197	struct bcmgenet_rx_ring *rx_ring;
3198	struct bcmgenet_tx_ring *tx_ring;
3199	unsigned int status;
3200	unsigned long flags;
3201
3202	/* Read irq status */
3203	status = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
3204		~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
3205
3206	/* clear interrupts */
3207	bcmgenet_intrl2_0_writel(priv, status, INTRL2_CPU_CLEAR);
3208
3209	netif_dbg(priv, intr, priv->dev,
3210		  "IRQ=0x%x\n", status);
3211
3212	if (status & UMAC_IRQ_RXDMA_DONE) {
3213		rx_ring = &priv->rx_rings[DESC_INDEX];
3214		rx_ring->dim.event_ctr++;
3215
3216		if (likely(napi_schedule_prep(&rx_ring->napi))) {
3217			rx_ring->int_disable(rx_ring);
3218			__napi_schedule_irqoff(&rx_ring->napi);
3219		}
3220	}
3221
3222	if (status & UMAC_IRQ_TXDMA_DONE) {
3223		tx_ring = &priv->tx_rings[DESC_INDEX];
3224
3225		if (likely(napi_schedule_prep(&tx_ring->napi))) {
3226			tx_ring->int_disable(tx_ring);
3227			__napi_schedule_irqoff(&tx_ring->napi);
3228		}
3229	}
3230
3231	if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
3232		status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
3233		wake_up(&priv->wq);
3234	}
3235
3236	/* all other interested interrupts handled in bottom half */
3237	status &= (UMAC_IRQ_LINK_EVENT | UMAC_IRQ_PHY_DET_R);
3238	if (status) {
3239		/* Save irq status for bottom-half processing. */
3240		spin_lock_irqsave(&priv->lock, flags);
3241		priv->irq0_stat |= status;
3242		spin_unlock_irqrestore(&priv->lock, flags);
3243
3244		schedule_work(&priv->bcmgenet_irq_work);
3245	}
3246
3247	return IRQ_HANDLED;
3248}
3249
3250static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id)
3251{
3252	/* Acknowledge the interrupt */
3253	return IRQ_HANDLED;
3254}
3255
3256static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
3257{
3258	u32 reg;
3259
3260	reg = bcmgenet_rbuf_ctrl_get(priv);
3261	reg |= BIT(1);
3262	bcmgenet_rbuf_ctrl_set(priv, reg);
3263	udelay(10);
3264
3265	reg &= ~BIT(1);
3266	bcmgenet_rbuf_ctrl_set(priv, reg);
3267	udelay(10);
3268}
3269
3270static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
3271				 const unsigned char *addr)
3272{
3273	bcmgenet_umac_writel(priv, get_unaligned_be32(&addr[0]), UMAC_MAC0);
3274	bcmgenet_umac_writel(priv, get_unaligned_be16(&addr[4]), UMAC_MAC1);
3275}
3276
3277static void bcmgenet_get_hw_addr(struct bcmgenet_priv *priv,
3278				 unsigned char *addr)
3279{
3280	u32 addr_tmp;
3281
3282	addr_tmp = bcmgenet_umac_readl(priv, UMAC_MAC0);
3283	put_unaligned_be32(addr_tmp, &addr[0]);
3284	addr_tmp = bcmgenet_umac_readl(priv, UMAC_MAC1);
3285	put_unaligned_be16(addr_tmp, &addr[4]);
3286}
3287
3288/* Returns a reusable dma control register value */
3289static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv, bool flush_rx)
3290{
3291	unsigned int i;
3292	u32 reg;
3293	u32 dma_ctrl;
3294
3295	/* disable DMA */
3296	dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
3297	for (i = 0; i < priv->hw_params->tx_queues; i++)
3298		dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
3299	reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
3300	reg &= ~dma_ctrl;
3301	bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
3302
3303	dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
3304	for (i = 0; i < priv->hw_params->rx_queues; i++)
3305		dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
3306	reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
3307	reg &= ~dma_ctrl;
3308	bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
3309
3310	bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH);
3311	udelay(10);
3312	bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
3313
3314	if (flush_rx) {
3315		reg = bcmgenet_rbuf_ctrl_get(priv);
3316		bcmgenet_rbuf_ctrl_set(priv, reg | BIT(0));
3317		udelay(10);
3318		bcmgenet_rbuf_ctrl_set(priv, reg);
3319		udelay(10);
3320	}
3321
3322	return dma_ctrl;
3323}
3324
3325static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl)
3326{
3327	u32 reg;
3328
3329	reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
3330	reg |= dma_ctrl;
3331	bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
3332
3333	reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
3334	reg |= dma_ctrl;
3335	bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
3336}
3337
3338static void bcmgenet_netif_start(struct net_device *dev)
3339{
3340	struct bcmgenet_priv *priv = netdev_priv(dev);
3341
3342	/* Start the network engine */
3343	netif_addr_lock_bh(dev);
3344	bcmgenet_set_rx_mode(dev);
3345	netif_addr_unlock_bh(dev);
3346	bcmgenet_enable_rx_napi(priv);
3347
3348	umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
3349
3350	bcmgenet_enable_tx_napi(priv);
3351
3352	/* Monitor link interrupts now */
3353	bcmgenet_link_intr_enable(priv);
3354
3355	phy_start(dev->phydev);
3356}
3357
3358static int bcmgenet_open(struct net_device *dev)
3359{
3360	struct bcmgenet_priv *priv = netdev_priv(dev);
3361	unsigned long dma_ctrl;
3362	int ret;
3363
3364	netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
3365
3366	/* Turn on the clock */
3367	clk_prepare_enable(priv->clk);
3368
3369	/* If this is an internal GPHY, power it back on now, before UniMAC is
3370	 * brought out of reset as absolutely no UniMAC activity is allowed
3371	 */
3372	if (priv->internal_phy)
3373		bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
3374
3375	/* take MAC out of reset */
3376	bcmgenet_umac_reset(priv);
3377
3378	init_umac(priv);
3379
3380	/* Apply features again in case we changed them while interface was
3381	 * down
3382	 */
3383	bcmgenet_set_features(dev, dev->features);
3384
3385	bcmgenet_set_hw_addr(priv, dev->dev_addr);
3386
3387	/* Disable RX/TX DMA and flush TX and RX queues */
3388	dma_ctrl = bcmgenet_dma_disable(priv, true);
3389
3390	/* Reinitialize TDMA and RDMA and SW housekeeping */
3391	ret = bcmgenet_init_dma(priv);
3392	if (ret) {
3393		netdev_err(dev, "failed to initialize DMA\n");
3394		goto err_clk_disable;
3395	}
3396
3397	/* Always enable ring 16 - descriptor ring */
3398	bcmgenet_enable_dma(priv, dma_ctrl);
3399
3400	/* HFB init */
3401	bcmgenet_hfb_init(priv);
3402
3403	ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED,
3404			  dev->name, priv);
3405	if (ret < 0) {
3406		netdev_err(dev, "can't request IRQ %d\n", priv->irq0);
3407		goto err_fini_dma;
3408	}
3409
3410	ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED,
3411			  dev->name, priv);
3412	if (ret < 0) {
3413		netdev_err(dev, "can't request IRQ %d\n", priv->irq1);
3414		goto err_irq0;
3415	}
3416
3417	ret = bcmgenet_mii_probe(dev);
3418	if (ret) {
3419		netdev_err(dev, "failed to connect to PHY\n");
3420		goto err_irq1;
3421	}
3422
3423	bcmgenet_phy_pause_set(dev, priv->rx_pause, priv->tx_pause);
3424
3425	bcmgenet_netif_start(dev);
3426
3427	netif_tx_start_all_queues(dev);
3428
3429	return 0;
3430
3431err_irq1:
3432	free_irq(priv->irq1, priv);
3433err_irq0:
3434	free_irq(priv->irq0, priv);
3435err_fini_dma:
3436	bcmgenet_dma_teardown(priv);
3437	bcmgenet_fini_dma(priv);
3438err_clk_disable:
3439	if (priv->internal_phy)
3440		bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
3441	clk_disable_unprepare(priv->clk);
3442	return ret;
3443}
3444
3445static void bcmgenet_netif_stop(struct net_device *dev, bool stop_phy)
3446{
3447	struct bcmgenet_priv *priv = netdev_priv(dev);
3448
3449	bcmgenet_disable_tx_napi(priv);
3450	netif_tx_disable(dev);
3451
3452	/* Disable MAC receive */
3453	umac_enable_set(priv, CMD_RX_EN, false);
3454
3455	bcmgenet_dma_teardown(priv);
3456
3457	/* Disable MAC transmit. TX DMA disabled must be done before this */
3458	umac_enable_set(priv, CMD_TX_EN, false);
3459
3460	if (stop_phy)
3461		phy_stop(dev->phydev);
3462	bcmgenet_disable_rx_napi(priv);
3463	bcmgenet_intr_disable(priv);
3464
3465	/* Wait for pending work items to complete. Since interrupts are
3466	 * disabled no new work will be scheduled.
3467	 */
3468	cancel_work_sync(&priv->bcmgenet_irq_work);
3469
3470	/* tx reclaim */
3471	bcmgenet_tx_reclaim_all(dev);
3472	bcmgenet_fini_dma(priv);
3473}
3474
3475static int bcmgenet_close(struct net_device *dev)
3476{
3477	struct bcmgenet_priv *priv = netdev_priv(dev);
3478	int ret = 0;
3479
3480	netif_dbg(priv, ifdown, dev, "bcmgenet_close\n");
3481
3482	bcmgenet_netif_stop(dev, false);
3483
3484	/* Really kill the PHY state machine and disconnect from it */
3485	phy_disconnect(dev->phydev);
3486
3487	free_irq(priv->irq0, priv);
3488	free_irq(priv->irq1, priv);
3489
3490	if (priv->internal_phy)
3491		ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
3492
3493	clk_disable_unprepare(priv->clk);
3494
3495	return ret;
3496}
3497
3498static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
3499{
3500	struct bcmgenet_priv *priv = ring->priv;
3501	u32 p_index, c_index, intsts, intmsk;
3502	struct netdev_queue *txq;
3503	unsigned int free_bds;
3504	bool txq_stopped;
3505
3506	if (!netif_msg_tx_err(priv))
3507		return;
3508
3509	txq = netdev_get_tx_queue(priv->dev, ring->queue);
3510
3511	spin_lock(&ring->lock);
3512	if (ring->index == DESC_INDEX) {
3513		intsts = ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
3514		intmsk = UMAC_IRQ_TXDMA_DONE | UMAC_IRQ_TXDMA_MBDONE;
3515	} else {
3516		intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
3517		intmsk = 1 << ring->index;
3518	}
3519	c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
3520	p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX);
3521	txq_stopped = netif_tx_queue_stopped(txq);
3522	free_bds = ring->free_bds;
3523	spin_unlock(&ring->lock);
3524
3525	netif_err(priv, tx_err, priv->dev, "Ring %d queue %d status summary\n"
3526		  "TX queue status: %s, interrupts: %s\n"
3527		  "(sw)free_bds: %d (sw)size: %d\n"
3528		  "(sw)p_index: %d (hw)p_index: %d\n"
3529		  "(sw)c_index: %d (hw)c_index: %d\n"
3530		  "(sw)clean_p: %d (sw)write_p: %d\n"
3531		  "(sw)cb_ptr: %d (sw)end_ptr: %d\n",
3532		  ring->index, ring->queue,
3533		  txq_stopped ? "stopped" : "active",
3534		  intsts & intmsk ? "enabled" : "disabled",
3535		  free_bds, ring->size,
3536		  ring->prod_index, p_index & DMA_P_INDEX_MASK,
3537		  ring->c_index, c_index & DMA_C_INDEX_MASK,
3538		  ring->clean_ptr, ring->write_ptr,
3539		  ring->cb_ptr, ring->end_ptr);
3540}
3541
3542static void bcmgenet_timeout(struct net_device *dev, unsigned int txqueue)
3543{
3544	struct bcmgenet_priv *priv = netdev_priv(dev);
3545	u32 int0_enable = 0;
3546	u32 int1_enable = 0;
3547	unsigned int q;
3548
3549	netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
3550
3551	for (q = 0; q < priv->hw_params->tx_queues; q++)
3552		bcmgenet_dump_tx_queue(&priv->tx_rings[q]);
3553	bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]);
3554
3555	bcmgenet_tx_reclaim_all(dev);
3556
3557	for (q = 0; q < priv->hw_params->tx_queues; q++)
3558		int1_enable |= (1 << q);
3559
3560	int0_enable = UMAC_IRQ_TXDMA_DONE;
3561
3562	/* Re-enable TX interrupts if disabled */
3563	bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
3564	bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
3565
3566	netif_trans_update(dev);
3567
3568	dev->stats.tx_errors++;
3569
3570	netif_tx_wake_all_queues(dev);
3571}
3572
3573#define MAX_MDF_FILTER	17
3574
3575static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv,
3576					 const unsigned char *addr,
3577					 int *i)
3578{
3579	bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1],
3580			     UMAC_MDF_ADDR + (*i * 4));
3581	bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 |
3582			     addr[4] << 8 | addr[5],
3583			     UMAC_MDF_ADDR + ((*i + 1) * 4));
3584	*i += 2;
3585}
3586
3587static void bcmgenet_set_rx_mode(struct net_device *dev)
3588{
3589	struct bcmgenet_priv *priv = netdev_priv(dev);
3590	struct netdev_hw_addr *ha;
3591	int i, nfilter;
3592	u32 reg;
3593
3594	netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags);
3595
3596	/* Number of filters needed */
3597	nfilter = netdev_uc_count(dev) + netdev_mc_count(dev) + 2;
3598
3599	/*
3600	 * Turn on promicuous mode for three scenarios
3601	 * 1. IFF_PROMISC flag is set
3602	 * 2. IFF_ALLMULTI flag is set
3603	 * 3. The number of filters needed exceeds the number filters
3604	 *    supported by the hardware.
3605	*/
3606	spin_lock(&priv->reg_lock);
3607	reg = bcmgenet_umac_readl(priv, UMAC_CMD);
3608	if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) ||
3609	    (nfilter > MAX_MDF_FILTER)) {
3610		reg |= CMD_PROMISC;
3611		bcmgenet_umac_writel(priv, reg, UMAC_CMD);
3612		spin_unlock(&priv->reg_lock);
3613		bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
3614		return;
3615	} else {
3616		reg &= ~CMD_PROMISC;
3617		bcmgenet_umac_writel(priv, reg, UMAC_CMD);
3618		spin_unlock(&priv->reg_lock);
3619	}
3620
3621	/* update MDF filter */
3622	i = 0;
3623	/* Broadcast */
3624	bcmgenet_set_mdf_addr(priv, dev->broadcast, &i);
3625	/* my own address.*/
3626	bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i);
3627
3628	/* Unicast */
3629	netdev_for_each_uc_addr(ha, dev)
3630		bcmgenet_set_mdf_addr(priv, ha->addr, &i);
3631
3632	/* Multicast */
3633	netdev_for_each_mc_addr(ha, dev)
3634		bcmgenet_set_mdf_addr(priv, ha->addr, &i);
3635
3636	/* Enable filters */
3637	reg = GENMASK(MAX_MDF_FILTER - 1, MAX_MDF_FILTER - nfilter);
3638	bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
3639}
3640
3641/* Set the hardware MAC address. */
3642static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
3643{
3644	struct sockaddr *addr = p;
3645
3646	/* Setting the MAC address at the hardware level is not possible
3647	 * without disabling the UniMAC RX/TX enable bits.
3648	 */
3649	if (netif_running(dev))
3650		return -EBUSY;
3651
3652	eth_hw_addr_set(dev, addr->sa_data);
3653
3654	return 0;
3655}
3656
3657static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev)
3658{
3659	struct bcmgenet_priv *priv = netdev_priv(dev);
3660	unsigned long tx_bytes = 0, tx_packets = 0;
3661	unsigned long rx_bytes = 0, rx_packets = 0;
3662	unsigned long rx_errors = 0, rx_dropped = 0;
3663	struct bcmgenet_tx_ring *tx_ring;
3664	struct bcmgenet_rx_ring *rx_ring;
3665	unsigned int q;
3666
3667	for (q = 0; q < priv->hw_params->tx_queues; q++) {
3668		tx_ring = &priv->tx_rings[q];
3669		tx_bytes += tx_ring->bytes;
3670		tx_packets += tx_ring->packets;
3671	}
3672	tx_ring = &priv->tx_rings[DESC_INDEX];
3673	tx_bytes += tx_ring->bytes;
3674	tx_packets += tx_ring->packets;
3675
3676	for (q = 0; q < priv->hw_params->rx_queues; q++) {
3677		rx_ring = &priv->rx_rings[q];
3678
3679		rx_bytes += rx_ring->bytes;
3680		rx_packets += rx_ring->packets;
3681		rx_errors += rx_ring->errors;
3682		rx_dropped += rx_ring->dropped;
3683	}
3684	rx_ring = &priv->rx_rings[DESC_INDEX];
3685	rx_bytes += rx_ring->bytes;
3686	rx_packets += rx_ring->packets;
3687	rx_errors += rx_ring->errors;
3688	rx_dropped += rx_ring->dropped;
3689
3690	dev->stats.tx_bytes = tx_bytes;
3691	dev->stats.tx_packets = tx_packets;
3692	dev->stats.rx_bytes = rx_bytes;
3693	dev->stats.rx_packets = rx_packets;
3694	dev->stats.rx_errors = rx_errors;
3695	dev->stats.rx_missed_errors = rx_errors;
3696	dev->stats.rx_dropped = rx_dropped;
3697	return &dev->stats;
3698}
3699
3700static int bcmgenet_change_carrier(struct net_device *dev, bool new_carrier)
3701{
3702	struct bcmgenet_priv *priv = netdev_priv(dev);
3703
3704	if (!dev->phydev || !phy_is_pseudo_fixed_link(dev->phydev) ||
3705	    priv->phy_interface != PHY_INTERFACE_MODE_MOCA)
3706		return -EOPNOTSUPP;
3707
3708	if (new_carrier)
3709		netif_carrier_on(dev);
3710	else
3711		netif_carrier_off(dev);
3712
3713	return 0;
3714}
3715
3716static const struct net_device_ops bcmgenet_netdev_ops = {
3717	.ndo_open		= bcmgenet_open,
3718	.ndo_stop		= bcmgenet_close,
3719	.ndo_start_xmit		= bcmgenet_xmit,
3720	.ndo_tx_timeout		= bcmgenet_timeout,
3721	.ndo_set_rx_mode	= bcmgenet_set_rx_mode,
3722	.ndo_set_mac_address	= bcmgenet_set_mac_addr,
3723	.ndo_eth_ioctl		= phy_do_ioctl_running,
3724	.ndo_set_features	= bcmgenet_set_features,
3725	.ndo_get_stats		= bcmgenet_get_stats,
3726	.ndo_change_carrier	= bcmgenet_change_carrier,
3727};
3728
3729/* Array of GENET hardware parameters/characteristics */
3730static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
3731	[GENET_V1] = {
3732		.tx_queues = 0,
3733		.tx_bds_per_q = 0,
3734		.rx_queues = 0,
3735		.rx_bds_per_q = 0,
3736		.bp_in_en_shift = 16,
3737		.bp_in_mask = 0xffff,
3738		.hfb_filter_cnt = 16,
3739		.qtag_mask = 0x1F,
3740		.hfb_offset = 0x1000,
3741		.rdma_offset = 0x2000,
3742		.tdma_offset = 0x3000,
3743		.words_per_bd = 2,
3744	},
3745	[GENET_V2] = {
3746		.tx_queues = 4,
3747		.tx_bds_per_q = 32,
3748		.rx_queues = 0,
3749		.rx_bds_per_q = 0,
3750		.bp_in_en_shift = 16,
3751		.bp_in_mask = 0xffff,
3752		.hfb_filter_cnt = 16,
3753		.qtag_mask = 0x1F,
3754		.tbuf_offset = 0x0600,
3755		.hfb_offset = 0x1000,
3756		.hfb_reg_offset = 0x2000,
3757		.rdma_offset = 0x3000,
3758		.tdma_offset = 0x4000,
3759		.words_per_bd = 2,
3760		.flags = GENET_HAS_EXT,
3761	},
3762	[GENET_V3] = {
3763		.tx_queues = 4,
3764		.tx_bds_per_q = 32,
3765		.rx_queues = 0,
3766		.rx_bds_per_q = 0,
3767		.bp_in_en_shift = 17,
3768		.bp_in_mask = 0x1ffff,
3769		.hfb_filter_cnt = 48,
3770		.hfb_filter_size = 128,
3771		.qtag_mask = 0x3F,
3772		.tbuf_offset = 0x0600,
3773		.hfb_offset = 0x8000,
3774		.hfb_reg_offset = 0xfc00,
3775		.rdma_offset = 0x10000,
3776		.tdma_offset = 0x11000,
3777		.words_per_bd = 2,
3778		.flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR |
3779			 GENET_HAS_MOCA_LINK_DET,
3780	},
3781	[GENET_V4] = {
3782		.tx_queues = 4,
3783		.tx_bds_per_q = 32,
3784		.rx_queues = 0,
3785		.rx_bds_per_q = 0,
3786		.bp_in_en_shift = 17,
3787		.bp_in_mask = 0x1ffff,
3788		.hfb_filter_cnt = 48,
3789		.hfb_filter_size = 128,
3790		.qtag_mask = 0x3F,
3791		.tbuf_offset = 0x0600,
3792		.hfb_offset = 0x8000,
3793		.hfb_reg_offset = 0xfc00,
3794		.rdma_offset = 0x2000,
3795		.tdma_offset = 0x4000,
3796		.words_per_bd = 3,
3797		.flags = GENET_HAS_40BITS | GENET_HAS_EXT |
3798			 GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
3799	},
3800	[GENET_V5] = {
3801		.tx_queues = 4,
3802		.tx_bds_per_q = 32,
3803		.rx_queues = 0,
3804		.rx_bds_per_q = 0,
3805		.bp_in_en_shift = 17,
3806		.bp_in_mask = 0x1ffff,
3807		.hfb_filter_cnt = 48,
3808		.hfb_filter_size = 128,
3809		.qtag_mask = 0x3F,
3810		.tbuf_offset = 0x0600,
3811		.hfb_offset = 0x8000,
3812		.hfb_reg_offset = 0xfc00,
3813		.rdma_offset = 0x2000,
3814		.tdma_offset = 0x4000,
3815		.words_per_bd = 3,
3816		.flags = GENET_HAS_40BITS | GENET_HAS_EXT |
3817			 GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
3818	},
3819};
3820
3821/* Infer hardware parameters from the detected GENET version */
3822static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
3823{
3824	struct bcmgenet_hw_params *params;
3825	u32 reg;
3826	u8 major;
3827	u16 gphy_rev;
3828
3829	if (GENET_IS_V5(priv) || GENET_IS_V4(priv)) {
3830		bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
3831		genet_dma_ring_regs = genet_dma_ring_regs_v4;
3832	} else if (GENET_IS_V3(priv)) {
3833		bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
3834		genet_dma_ring_regs = genet_dma_ring_regs_v123;
3835	} else if (GENET_IS_V2(priv)) {
3836		bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
3837		genet_dma_ring_regs = genet_dma_ring_regs_v123;
3838	} else if (GENET_IS_V1(priv)) {
3839		bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
3840		genet_dma_ring_regs = genet_dma_ring_regs_v123;
3841	}
3842
3843	/* enum genet_version starts at 1 */
3844	priv->hw_params = &bcmgenet_hw_params[priv->version];
3845	params = priv->hw_params;
3846
3847	/* Read GENET HW version */
3848	reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL);
3849	major = (reg >> 24 & 0x0f);
3850	if (major == 6)
3851		major = 5;
3852	else if (major == 5)
3853		major = 4;
3854	else if (major == 0)
3855		major = 1;
3856	if (major != priv->version) {
3857		dev_err(&priv->pdev->dev,
3858			"GENET version mismatch, got: %d, configured for: %d\n",
3859			major, priv->version);
3860	}
3861
3862	/* Print the GENET core version */
3863	dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT,
3864		 major, (reg >> 16) & 0x0f, reg & 0xffff);
3865
3866	/* Store the integrated PHY revision for the MDIO probing function
3867	 * to pass this information to the PHY driver. The PHY driver expects
3868	 * to find the PHY major revision in bits 15:8 while the GENET register
3869	 * stores that information in bits 7:0, account for that.
3870	 *
3871	 * On newer chips, starting with PHY revision G0, a new scheme is
3872	 * deployed similar to the Starfighter 2 switch with GPHY major
3873	 * revision in bits 15:8 and patch level in bits 7:0. Major revision 0
3874	 * is reserved as well as special value 0x01ff, we have a small
3875	 * heuristic to check for the new GPHY revision and re-arrange things
3876	 * so the GPHY driver is happy.
3877	 */
3878	gphy_rev = reg & 0xffff;
3879
3880	if (GENET_IS_V5(priv)) {
3881		/* The EPHY revision should come from the MDIO registers of
3882		 * the PHY not from GENET.
3883		 */
3884		if (gphy_rev != 0) {
3885			pr_warn("GENET is reporting EPHY revision: 0x%04x\n",
3886				gphy_rev);
3887		}
3888	/* This is reserved so should require special treatment */
3889	} else if (gphy_rev == 0 || gphy_rev == 0x01ff) {
3890		pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
3891		return;
3892	/* This is the good old scheme, just GPHY major, no minor nor patch */
3893	} else if ((gphy_rev & 0xf0) != 0) {
3894		priv->gphy_rev = gphy_rev << 8;
3895	/* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */
3896	} else if ((gphy_rev & 0xff00) != 0) {
3897		priv->gphy_rev = gphy_rev;
3898	}
3899
3900#ifdef CONFIG_PHYS_ADDR_T_64BIT
3901	if (!(params->flags & GENET_HAS_40BITS))
3902		pr_warn("GENET does not support 40-bits PA\n");
3903#endif
3904
3905	pr_debug("Configuration for version: %d\n"
3906		"TXq: %1d, TXqBDs: %1d, RXq: %1d, RXqBDs: %1d\n"
3907		"BP << en: %2d, BP msk: 0x%05x\n"
3908		"HFB count: %2d, QTAQ msk: 0x%05x\n"
3909		"TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n"
3910		"RDMA: 0x%05x, TDMA: 0x%05x\n"
3911		"Words/BD: %d\n",
3912		priv->version,
3913		params->tx_queues, params->tx_bds_per_q,
3914		params->rx_queues, params->rx_bds_per_q,
3915		params->bp_in_en_shift, params->bp_in_mask,
3916		params->hfb_filter_cnt, params->qtag_mask,
3917		params->tbuf_offset, params->hfb_offset,
3918		params->hfb_reg_offset,
3919		params->rdma_offset, params->tdma_offset,
3920		params->words_per_bd);
3921}
3922
3923struct bcmgenet_plat_data {
3924	enum bcmgenet_version version;
3925	u32 dma_max_burst_length;
3926	bool ephy_16nm;
3927};
3928
3929static const struct bcmgenet_plat_data v1_plat_data = {
3930	.version = GENET_V1,
3931	.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
3932};
3933
3934static const struct bcmgenet_plat_data v2_plat_data = {
3935	.version = GENET_V2,
3936	.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
3937};
3938
3939static const struct bcmgenet_plat_data v3_plat_data = {
3940	.version = GENET_V3,
3941	.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
3942};
3943
3944static const struct bcmgenet_plat_data v4_plat_data = {
3945	.version = GENET_V4,
3946	.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
3947};
3948
3949static const struct bcmgenet_plat_data v5_plat_data = {
3950	.version = GENET_V5,
3951	.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
3952};
3953
3954static const struct bcmgenet_plat_data bcm2711_plat_data = {
3955	.version = GENET_V5,
3956	.dma_max_burst_length = 0x08,
3957};
3958
3959static const struct bcmgenet_plat_data bcm7712_plat_data = {
3960	.version = GENET_V5,
3961	.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
3962	.ephy_16nm = true,
3963};
3964
3965static const struct of_device_id bcmgenet_match[] = {
3966	{ .compatible = "brcm,genet-v1", .data = &v1_plat_data },
3967	{ .compatible = "brcm,genet-v2", .data = &v2_plat_data },
3968	{ .compatible = "brcm,genet-v3", .data = &v3_plat_data },
3969	{ .compatible = "brcm,genet-v4", .data = &v4_plat_data },
3970	{ .compatible = "brcm,genet-v5", .data = &v5_plat_data },
3971	{ .compatible = "brcm,bcm2711-genet-v5", .data = &bcm2711_plat_data },
3972	{ .compatible = "brcm,bcm7712-genet-v5", .data = &bcm7712_plat_data },
3973	{ },
3974};
3975MODULE_DEVICE_TABLE(of, bcmgenet_match);
3976
3977static int bcmgenet_probe(struct platform_device *pdev)
3978{
3979	struct bcmgenet_platform_data *pd = pdev->dev.platform_data;
3980	const struct bcmgenet_plat_data *pdata;
3981	struct bcmgenet_priv *priv;
3982	struct net_device *dev;
3983	unsigned int i;
3984	int err = -EIO;
3985
3986	/* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */
3987	dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1,
3988				 GENET_MAX_MQ_CNT + 1);
3989	if (!dev) {
3990		dev_err(&pdev->dev, "can't allocate net device\n");
3991		return -ENOMEM;
3992	}
3993
3994	priv = netdev_priv(dev);
3995	priv->irq0 = platform_get_irq(pdev, 0);
3996	if (priv->irq0 < 0) {
3997		err = priv->irq0;
3998		goto err;
3999	}
4000	priv->irq1 = platform_get_irq(pdev, 1);
4001	if (priv->irq1 < 0) {
4002		err = priv->irq1;
4003		goto err;
4004	}
4005	priv->wol_irq = platform_get_irq_optional(pdev, 2);
4006	if (priv->wol_irq == -EPROBE_DEFER) {
4007		err = priv->wol_irq;
4008		goto err;
4009	}
4010
4011	priv->base = devm_platform_ioremap_resource(pdev, 0);
4012	if (IS_ERR(priv->base)) {
4013		err = PTR_ERR(priv->base);
4014		goto err;
4015	}
4016
4017	spin_lock_init(&priv->reg_lock);
4018	spin_lock_init(&priv->lock);
4019
4020	/* Set default pause parameters */
4021	priv->autoneg_pause = 1;
4022	priv->tx_pause = 1;
4023	priv->rx_pause = 1;
4024
4025	SET_NETDEV_DEV(dev, &pdev->dev);
4026	dev_set_drvdata(&pdev->dev, dev);
4027	dev->watchdog_timeo = 2 * HZ;
4028	dev->ethtool_ops = &bcmgenet_ethtool_ops;
4029	dev->netdev_ops = &bcmgenet_netdev_ops;
4030
4031	priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT);
4032
4033	/* Set default features */
4034	dev->features |= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM |
4035			 NETIF_F_RXCSUM;
4036	dev->hw_features |= dev->features;
4037	dev->vlan_features |= dev->features;
4038
4039	/* Request the WOL interrupt and advertise suspend if available */
4040	priv->wol_irq_disabled = true;
4041	if (priv->wol_irq > 0) {
4042		err = devm_request_irq(&pdev->dev, priv->wol_irq,
4043				       bcmgenet_wol_isr, 0, dev->name, priv);
4044		if (!err)
4045			device_set_wakeup_capable(&pdev->dev, 1);
4046	}
4047
4048	/* Set the needed headroom to account for any possible
4049	 * features enabling/disabling at runtime
4050	 */
4051	dev->needed_headroom += 64;
4052
4053	priv->dev = dev;
4054	priv->pdev = pdev;
4055
4056	pdata = device_get_match_data(&pdev->dev);
4057	if (pdata) {
4058		priv->version = pdata->version;
4059		priv->dma_max_burst_length = pdata->dma_max_burst_length;
4060		priv->ephy_16nm = pdata->ephy_16nm;
4061	} else {
4062		priv->version = pd->genet_version;
4063		priv->dma_max_burst_length = DMA_MAX_BURST_LENGTH;
4064	}
4065
4066	priv->clk = devm_clk_get_optional(&priv->pdev->dev, "enet");
4067	if (IS_ERR(priv->clk)) {
4068		dev_dbg(&priv->pdev->dev, "failed to get enet clock\n");
4069		err = PTR_ERR(priv->clk);
4070		goto err;
4071	}
4072
4073	err = clk_prepare_enable(priv->clk);
4074	if (err)
4075		goto err;
4076
4077	bcmgenet_set_hw_params(priv);
4078
4079	err = -EIO;
4080	if (priv->hw_params->flags & GENET_HAS_40BITS)
4081		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
4082	if (err)
4083		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
4084	if (err)
4085		goto err_clk_disable;
4086
4087	/* Mii wait queue */
4088	init_waitqueue_head(&priv->wq);
4089	/* Always use RX_BUF_LENGTH (2KB) buffer for all chips */
4090	priv->rx_buf_len = RX_BUF_LENGTH;
4091	INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task);
4092
4093	priv->clk_wol = devm_clk_get_optional(&priv->pdev->dev, "enet-wol");
4094	if (IS_ERR(priv->clk_wol)) {
4095		dev_dbg(&priv->pdev->dev, "failed to get enet-wol clock\n");
4096		err = PTR_ERR(priv->clk_wol);
4097		goto err_clk_disable;
4098	}
4099
4100	priv->clk_eee = devm_clk_get_optional(&priv->pdev->dev, "enet-eee");
4101	if (IS_ERR(priv->clk_eee)) {
4102		dev_dbg(&priv->pdev->dev, "failed to get enet-eee clock\n");
4103		err = PTR_ERR(priv->clk_eee);
4104		goto err_clk_disable;
4105	}
4106
4107	/* If this is an internal GPHY, power it on now, before UniMAC is
4108	 * brought out of reset as absolutely no UniMAC activity is allowed
4109	 */
4110	if (device_get_phy_mode(&pdev->dev) == PHY_INTERFACE_MODE_INTERNAL)
4111		bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
4112
4113	if (pd && !IS_ERR_OR_NULL(pd->mac_address))
4114		eth_hw_addr_set(dev, pd->mac_address);
4115	else
4116		if (device_get_ethdev_address(&pdev->dev, dev))
4117			if (has_acpi_companion(&pdev->dev)) {
4118				u8 addr[ETH_ALEN];
4119
4120				bcmgenet_get_hw_addr(priv, addr);
4121				eth_hw_addr_set(dev, addr);
4122			}
4123
4124	if (!is_valid_ether_addr(dev->dev_addr)) {
4125		dev_warn(&pdev->dev, "using random Ethernet MAC\n");
4126		eth_hw_addr_random(dev);
4127	}
4128
4129	reset_umac(priv);
4130
4131	err = bcmgenet_mii_init(dev);
4132	if (err)
4133		goto err_clk_disable;
4134
4135	/* setup number of real queues  + 1 (GENET_V1 has 0 hardware queues
4136	 * just the ring 16 descriptor based TX
4137	 */
4138	netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
4139	netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
4140
4141	/* Set default coalescing parameters */
4142	for (i = 0; i < priv->hw_params->rx_queues; i++)
4143		priv->rx_rings[i].rx_max_coalesced_frames = 1;
4144	priv->rx_rings[DESC_INDEX].rx_max_coalesced_frames = 1;
4145
4146	/* libphy will determine the link state */
4147	netif_carrier_off(dev);
4148
4149	/* Turn off the main clock, WOL clock is handled separately */
4150	clk_disable_unprepare(priv->clk);
4151
4152	err = register_netdev(dev);
4153	if (err) {
4154		bcmgenet_mii_exit(dev);
4155		goto err;
4156	}
4157
4158	return err;
4159
4160err_clk_disable:
4161	clk_disable_unprepare(priv->clk);
4162err:
4163	free_netdev(dev);
4164	return err;
4165}
4166
4167static void bcmgenet_remove(struct platform_device *pdev)
4168{
4169	struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev);
4170
4171	dev_set_drvdata(&pdev->dev, NULL);
4172	unregister_netdev(priv->dev);
4173	bcmgenet_mii_exit(priv->dev);
4174	free_netdev(priv->dev);
4175}
4176
4177static void bcmgenet_shutdown(struct platform_device *pdev)
4178{
4179	bcmgenet_remove(pdev);
4180}
4181
4182#ifdef CONFIG_PM_SLEEP
4183static int bcmgenet_resume_noirq(struct device *d)
4184{
4185	struct net_device *dev = dev_get_drvdata(d);
4186	struct bcmgenet_priv *priv = netdev_priv(dev);
4187	int ret;
4188	u32 reg;
4189
4190	if (!netif_running(dev))
4191		return 0;
4192
4193	/* Turn on the clock */
4194	ret = clk_prepare_enable(priv->clk);
4195	if (ret)
4196		return ret;
4197
4198	if (device_may_wakeup(d) && priv->wolopts) {
4199		/* Account for Wake-on-LAN events and clear those events
4200		 * (Some devices need more time between enabling the clocks
4201		 *  and the interrupt register reflecting the wake event so
4202		 *  read the register twice)
4203		 */
4204		reg = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT);
4205		reg = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT);
4206		if (reg & UMAC_IRQ_WAKE_EVENT)
4207			pm_wakeup_event(&priv->pdev->dev, 0);
4208	}
4209
4210	bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_WAKE_EVENT, INTRL2_CPU_CLEAR);
4211
4212	return 0;
4213}
4214
4215static int bcmgenet_resume(struct device *d)
4216{
4217	struct net_device *dev = dev_get_drvdata(d);
4218	struct bcmgenet_priv *priv = netdev_priv(dev);
4219	struct bcmgenet_rxnfc_rule *rule;
4220	unsigned long dma_ctrl;
4221	int ret;
4222
4223	if (!netif_running(dev))
4224		return 0;
4225
4226	/* From WOL-enabled suspend, switch to regular clock */
4227	if (device_may_wakeup(d) && priv->wolopts)
4228		bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
4229
4230	/* If this is an internal GPHY, power it back on now, before UniMAC is
4231	 * brought out of reset as absolutely no UniMAC activity is allowed
4232	 */
4233	if (priv->internal_phy)
4234		bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
4235
4236	bcmgenet_umac_reset(priv);
4237
4238	init_umac(priv);
4239
4240	phy_init_hw(dev->phydev);
4241
4242	/* Speed settings must be restored */
4243	genphy_config_aneg(dev->phydev);
4244	bcmgenet_mii_config(priv->dev, false);
4245
4246	/* Restore enabled features */
4247	bcmgenet_set_features(dev, dev->features);
4248
4249	bcmgenet_set_hw_addr(priv, dev->dev_addr);
4250
4251	/* Restore hardware filters */
4252	bcmgenet_hfb_clear(priv);
4253	list_for_each_entry(rule, &priv->rxnfc_list, list)
4254		if (rule->state != BCMGENET_RXNFC_STATE_UNUSED)
4255			bcmgenet_hfb_create_rxnfc_filter(priv, rule);
4256
4257	/* Disable RX/TX DMA and flush TX queues */
4258	dma_ctrl = bcmgenet_dma_disable(priv, false);
4259
4260	/* Reinitialize TDMA and RDMA and SW housekeeping */
4261	ret = bcmgenet_init_dma(priv);
4262	if (ret) {
4263		netdev_err(dev, "failed to initialize DMA\n");
4264		goto out_clk_disable;
4265	}
4266
4267	/* Always enable ring 16 - descriptor ring */
4268	bcmgenet_enable_dma(priv, dma_ctrl);
4269
4270	if (!device_may_wakeup(d))
4271		phy_resume(dev->phydev);
4272
4273	bcmgenet_netif_start(dev);
4274
4275	netif_device_attach(dev);
4276
4277	return 0;
4278
4279out_clk_disable:
4280	if (priv->internal_phy)
4281		bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
4282	clk_disable_unprepare(priv->clk);
4283	return ret;
4284}
4285
4286static int bcmgenet_suspend(struct device *d)
4287{
4288	struct net_device *dev = dev_get_drvdata(d);
4289	struct bcmgenet_priv *priv = netdev_priv(dev);
4290
4291	if (!netif_running(dev))
4292		return 0;
4293
4294	netif_device_detach(dev);
4295
4296	bcmgenet_netif_stop(dev, true);
4297
4298	if (!device_may_wakeup(d))
4299		phy_suspend(dev->phydev);
4300
4301	/* Disable filtering */
4302	bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL);
4303
4304	return 0;
4305}
4306
4307static int bcmgenet_suspend_noirq(struct device *d)
4308{
4309	struct net_device *dev = dev_get_drvdata(d);
4310	struct bcmgenet_priv *priv = netdev_priv(dev);
4311	int ret = 0;
4312
4313	if (!netif_running(dev))
4314		return 0;
4315
4316	/* Prepare the device for Wake-on-LAN and switch to the slow clock */
4317	if (device_may_wakeup(d) && priv->wolopts)
4318		ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
4319	else if (priv->internal_phy)
4320		ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
4321
4322	/* Let the framework handle resumption and leave the clocks on */
4323	if (ret)
4324		return ret;
4325
4326	/* Turn off the clocks */
4327	clk_disable_unprepare(priv->clk);
4328
4329	return 0;
4330}
4331#else
4332#define bcmgenet_suspend	NULL
4333#define bcmgenet_suspend_noirq	NULL
4334#define bcmgenet_resume		NULL
4335#define bcmgenet_resume_noirq	NULL
4336#endif /* CONFIG_PM_SLEEP */
4337
4338static const struct dev_pm_ops bcmgenet_pm_ops = {
4339	.suspend	= bcmgenet_suspend,
4340	.suspend_noirq	= bcmgenet_suspend_noirq,
4341	.resume		= bcmgenet_resume,
4342	.resume_noirq	= bcmgenet_resume_noirq,
4343};
4344
4345static const struct acpi_device_id genet_acpi_match[] = {
4346	{ "BCM6E4E", (kernel_ulong_t)&bcm2711_plat_data },
4347	{ },
4348};
4349MODULE_DEVICE_TABLE(acpi, genet_acpi_match);
4350
4351static struct platform_driver bcmgenet_driver = {
4352	.probe	= bcmgenet_probe,
4353	.remove_new = bcmgenet_remove,
4354	.shutdown = bcmgenet_shutdown,
4355	.driver	= {
4356		.name	= "bcmgenet",
4357		.of_match_table = bcmgenet_match,
4358		.pm	= &bcmgenet_pm_ops,
4359		.acpi_match_table = genet_acpi_match,
4360	},
4361};
4362module_platform_driver(bcmgenet_driver);
4363
4364MODULE_AUTHOR("Broadcom Corporation");
4365MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver");
4366MODULE_ALIAS("platform:bcmgenet");
4367MODULE_LICENSE("GPL");
4368MODULE_SOFTDEP("pre: mdio-bcm-unimac");
4369