1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Keystone GBE and XGBE subsystem code
4 *
5 * Copyright (C) 2014 Texas Instruments Incorporated
6 * Authors:	Sandeep Nair <sandeep_n@ti.com>
7 *		Sandeep Paulraj <s-paulraj@ti.com>
8 *		Cyril Chemparathy <cyril@ti.com>
9 *		Santosh Shilimkar <santosh.shilimkar@ti.com>
10 *		Wingman Kwok <w-kwok2@ti.com>
11 */
12
13#include <linux/io.h>
14#include <linux/module.h>
15#include <linux/of_mdio.h>
16#include <linux/of_net.h>
17#include <linux/of_address.h>
18#include <linux/if_vlan.h>
19#include <linux/ptp_classify.h>
20#include <linux/net_tstamp.h>
21#include <linux/ethtool.h>
22
23#include "cpsw.h"
24#include "cpsw_ale.h"
25#include "netcp.h"
26#include "cpts.h"
27
28#define NETCP_DRIVER_NAME		"TI KeyStone Ethernet Driver"
29#define NETCP_DRIVER_VERSION		"v1.0"
30
31#define GBE_IDENT(reg)			((reg >> 16) & 0xffff)
32#define GBE_MAJOR_VERSION(reg)		(reg >> 8 & 0x7)
33#define GBE_MINOR_VERSION(reg)		(reg & 0xff)
34#define GBE_RTL_VERSION(reg)		((reg >> 11) & 0x1f)
35
36/* 1G Ethernet SS defines */
37#define GBE_MODULE_NAME			"netcp-gbe"
38#define GBE_SS_VERSION_14		0x4ed2
39
40#define GBE_SS_REG_INDEX		0
41#define GBE_SGMII34_REG_INDEX		1
42#define GBE_SM_REG_INDEX		2
43/* offset relative to base of GBE_SS_REG_INDEX */
44#define GBE13_SGMII_MODULE_OFFSET	0x100
45/* offset relative to base of GBE_SM_REG_INDEX */
46#define GBE13_HOST_PORT_OFFSET		0x34
47#define GBE13_SLAVE_PORT_OFFSET		0x60
48#define GBE13_EMAC_OFFSET		0x100
49#define GBE13_SLAVE_PORT2_OFFSET	0x200
50#define GBE13_HW_STATS_OFFSET		0x300
51#define GBE13_CPTS_OFFSET		0x500
52#define GBE13_ALE_OFFSET		0x600
53#define GBE13_HOST_PORT_NUM		0
54
55/* 1G Ethernet NU SS defines */
56#define GBENU_MODULE_NAME		"netcp-gbenu"
57#define GBE_SS_ID_NU			0x4ee6
58#define GBE_SS_ID_2U			0x4ee8
59
60#define IS_SS_ID_MU(d) \
61	((GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU) || \
62	 (GBE_IDENT((d)->ss_version) == GBE_SS_ID_2U))
63
64#define IS_SS_ID_NU(d) \
65	(GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU)
66
67#define IS_SS_ID_VER_14(d) \
68	(GBE_IDENT((d)->ss_version) == GBE_SS_VERSION_14)
69#define IS_SS_ID_2U(d) \
70	(GBE_IDENT((d)->ss_version) == GBE_SS_ID_2U)
71
72#define GBENU_SS_REG_INDEX		0
73#define GBENU_SM_REG_INDEX		1
74#define GBENU_SGMII_MODULE_OFFSET	0x100
75#define GBENU_HOST_PORT_OFFSET		0x1000
76#define GBENU_SLAVE_PORT_OFFSET		0x2000
77#define GBENU_EMAC_OFFSET		0x2330
78#define GBENU_HW_STATS_OFFSET		0x1a000
79#define GBENU_CPTS_OFFSET		0x1d000
80#define GBENU_ALE_OFFSET		0x1e000
81#define GBENU_HOST_PORT_NUM		0
82#define GBENU_SGMII_MODULE_SIZE		0x100
83
84/* 10G Ethernet SS defines */
85#define XGBE_MODULE_NAME		"netcp-xgbe"
86#define XGBE_SS_VERSION_10		0x4ee4
87
88#define XGBE_SS_REG_INDEX		0
89#define XGBE_SM_REG_INDEX		1
90#define XGBE_SERDES_REG_INDEX		2
91
92/* offset relative to base of XGBE_SS_REG_INDEX */
93#define XGBE10_SGMII_MODULE_OFFSET	0x100
94#define IS_SS_ID_XGBE(d)		((d)->ss_version == XGBE_SS_VERSION_10)
95/* offset relative to base of XGBE_SM_REG_INDEX */
96#define XGBE10_HOST_PORT_OFFSET		0x34
97#define XGBE10_SLAVE_PORT_OFFSET	0x64
98#define XGBE10_EMAC_OFFSET		0x400
99#define XGBE10_CPTS_OFFSET		0x600
100#define XGBE10_ALE_OFFSET		0x700
101#define XGBE10_HW_STATS_OFFSET		0x800
102#define XGBE10_HOST_PORT_NUM		0
103
104#define	GBE_TIMER_INTERVAL			(HZ / 2)
105
106/* Soft reset register values */
107#define SOFT_RESET_MASK				BIT(0)
108#define SOFT_RESET				BIT(0)
109#define DEVICE_EMACSL_RESET_POLL_COUNT		100
110#define GMACSL_RET_WARN_RESET_INCOMPLETE	-2
111
112#define MACSL_RX_ENABLE_CSF			BIT(23)
113#define MACSL_ENABLE_EXT_CTL			BIT(18)
114#define MACSL_XGMII_ENABLE			BIT(13)
115#define MACSL_XGIG_MODE				BIT(8)
116#define MACSL_GIG_MODE				BIT(7)
117#define MACSL_GMII_ENABLE			BIT(5)
118#define MACSL_FULLDUPLEX			BIT(0)
119
120#define GBE_CTL_P0_ENABLE			BIT(2)
121#define ETH_SW_CTL_P0_TX_CRC_REMOVE		BIT(13)
122#define GBE13_REG_VAL_STAT_ENABLE_ALL		0xff
123#define XGBE_REG_VAL_STAT_ENABLE_ALL		0xf
124#define GBE_STATS_CD_SEL			BIT(28)
125
126#define GBE_PORT_MASK(x)			(BIT(x) - 1)
127#define GBE_MASK_NO_PORTS			0
128
129#define GBE_DEF_1G_MAC_CONTROL					\
130		(MACSL_GIG_MODE | MACSL_GMII_ENABLE |		\
131		 MACSL_ENABLE_EXT_CTL |	MACSL_RX_ENABLE_CSF)
132
133#define GBE_DEF_10G_MAC_CONTROL				\
134		(MACSL_XGIG_MODE | MACSL_XGMII_ENABLE |		\
135		 MACSL_ENABLE_EXT_CTL |	MACSL_RX_ENABLE_CSF)
136
137#define GBE_STATSA_MODULE			0
138#define GBE_STATSB_MODULE			1
139#define GBE_STATSC_MODULE			2
140#define GBE_STATSD_MODULE			3
141
142#define GBENU_STATS0_MODULE			0
143#define GBENU_STATS1_MODULE			1
144#define GBENU_STATS2_MODULE			2
145#define GBENU_STATS3_MODULE			3
146#define GBENU_STATS4_MODULE			4
147#define GBENU_STATS5_MODULE			5
148#define GBENU_STATS6_MODULE			6
149#define GBENU_STATS7_MODULE			7
150#define GBENU_STATS8_MODULE			8
151
152#define XGBE_STATS0_MODULE			0
153#define XGBE_STATS1_MODULE			1
154#define XGBE_STATS2_MODULE			2
155
156/* s: 0-based slave_port */
157#define SGMII_BASE(d, s) \
158	(((s) < 2) ? (d)->sgmii_port_regs : (d)->sgmii_port34_regs)
159
160#define GBE_TX_QUEUE				648
161#define	GBE_TXHOOK_ORDER			0
162#define	GBE_RXHOOK_ORDER			0
163#define GBE_DEFAULT_ALE_AGEOUT			30
164#define SLAVE_LINK_IS_XGMII(s) ((s)->link_interface >= XGMII_LINK_MAC_PHY)
165#define SLAVE_LINK_IS_RGMII(s) \
166	(((s)->link_interface >= RGMII_LINK_MAC_PHY) && \
167	 ((s)->link_interface <= RGMII_LINK_MAC_PHY_NO_MDIO))
168#define SLAVE_LINK_IS_SGMII(s) \
169	((s)->link_interface <= SGMII_LINK_MAC_PHY_NO_MDIO)
170#define NETCP_LINK_STATE_INVALID		-1
171
172#define GBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
173		offsetof(struct gbe##_##rb, rn)
174#define GBENU_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
175		offsetof(struct gbenu##_##rb, rn)
176#define XGBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
177		offsetof(struct xgbe##_##rb, rn)
178#define GBE_REG_ADDR(p, rb, rn) (p->rb + p->rb##_ofs.rn)
179
180#define HOST_TX_PRI_MAP_DEFAULT			0x00000000
181
182#if IS_ENABLED(CONFIG_TI_CPTS)
183/* Px_TS_CTL register fields */
184#define TS_RX_ANX_F_EN				BIT(0)
185#define TS_RX_VLAN_LT1_EN			BIT(1)
186#define TS_RX_VLAN_LT2_EN			BIT(2)
187#define TS_RX_ANX_D_EN				BIT(3)
188#define TS_TX_ANX_F_EN				BIT(4)
189#define TS_TX_VLAN_LT1_EN			BIT(5)
190#define TS_TX_VLAN_LT2_EN			BIT(6)
191#define TS_TX_ANX_D_EN				BIT(7)
192#define TS_LT2_EN				BIT(8)
193#define TS_RX_ANX_E_EN				BIT(9)
194#define TS_TX_ANX_E_EN				BIT(10)
195#define TS_MSG_TYPE_EN_SHIFT			16
196#define TS_MSG_TYPE_EN_MASK			0xffff
197
198/* Px_TS_SEQ_LTYPE register fields */
199#define TS_SEQ_ID_OFS_SHIFT			16
200#define TS_SEQ_ID_OFS_MASK			0x3f
201
202/* Px_TS_CTL_LTYPE2 register fields */
203#define TS_107					BIT(16)
204#define TS_129					BIT(17)
205#define TS_130					BIT(18)
206#define TS_131					BIT(19)
207#define TS_132					BIT(20)
208#define TS_319					BIT(21)
209#define TS_320					BIT(22)
210#define TS_TTL_NONZERO				BIT(23)
211#define TS_UNI_EN				BIT(24)
212#define TS_UNI_EN_SHIFT				24
213
214#define TS_TX_ANX_ALL_EN	 \
215	(TS_TX_ANX_D_EN	| TS_TX_ANX_E_EN | TS_TX_ANX_F_EN)
216
217#define TS_RX_ANX_ALL_EN	 \
218	(TS_RX_ANX_D_EN	| TS_RX_ANX_E_EN | TS_RX_ANX_F_EN)
219
220#define TS_CTL_DST_PORT				TS_319
221#define TS_CTL_DST_PORT_SHIFT			21
222
223#define TS_CTL_MADDR_ALL	\
224	(TS_107 | TS_129 | TS_130 | TS_131 | TS_132)
225
226#define TS_CTL_MADDR_SHIFT			16
227
228/* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
229#define EVENT_MSG_BITS (BIT(0) | BIT(1) | BIT(2) | BIT(3))
230#endif /* CONFIG_TI_CPTS */
231
232struct xgbe_ss_regs {
233	u32	id_ver;
234	u32	synce_count;
235	u32	synce_mux;
236	u32	control;
237};
238
239struct xgbe_switch_regs {
240	u32	id_ver;
241	u32	control;
242	u32	emcontrol;
243	u32	stat_port_en;
244	u32	ptype;
245	u32	soft_idle;
246	u32	thru_rate;
247	u32	gap_thresh;
248	u32	tx_start_wds;
249	u32	flow_control;
250	u32	cppi_thresh;
251};
252
253struct xgbe_port_regs {
254	u32	blk_cnt;
255	u32	port_vlan;
256	u32	tx_pri_map;
257	u32	sa_lo;
258	u32	sa_hi;
259	u32	ts_ctl;
260	u32	ts_seq_ltype;
261	u32	ts_vlan;
262	u32	ts_ctl_ltype2;
263	u32	ts_ctl2;
264	u32	control;
265};
266
267struct xgbe_host_port_regs {
268	u32	blk_cnt;
269	u32	port_vlan;
270	u32	tx_pri_map;
271	u32	src_id;
272	u32	rx_pri_map;
273	u32	rx_maxlen;
274};
275
276struct xgbe_emac_regs {
277	u32	id_ver;
278	u32	mac_control;
279	u32	mac_status;
280	u32	soft_reset;
281	u32	rx_maxlen;
282	u32	__reserved_0;
283	u32	rx_pause;
284	u32	tx_pause;
285	u32	em_control;
286	u32	__reserved_1;
287	u32	tx_gap;
288	u32	rsvd[4];
289};
290
291struct xgbe_host_hw_stats {
292	u32	rx_good_frames;
293	u32	rx_broadcast_frames;
294	u32	rx_multicast_frames;
295	u32	__rsvd_0[3];
296	u32	rx_oversized_frames;
297	u32	__rsvd_1;
298	u32	rx_undersized_frames;
299	u32	__rsvd_2;
300	u32	overrun_type4;
301	u32	overrun_type5;
302	u32	rx_bytes;
303	u32	tx_good_frames;
304	u32	tx_broadcast_frames;
305	u32	tx_multicast_frames;
306	u32	__rsvd_3[9];
307	u32	tx_bytes;
308	u32	tx_64byte_frames;
309	u32	tx_65_to_127byte_frames;
310	u32	tx_128_to_255byte_frames;
311	u32	tx_256_to_511byte_frames;
312	u32	tx_512_to_1023byte_frames;
313	u32	tx_1024byte_frames;
314	u32	net_bytes;
315	u32	rx_sof_overruns;
316	u32	rx_mof_overruns;
317	u32	rx_dma_overruns;
318};
319
320struct xgbe_hw_stats {
321	u32	rx_good_frames;
322	u32	rx_broadcast_frames;
323	u32	rx_multicast_frames;
324	u32	rx_pause_frames;
325	u32	rx_crc_errors;
326	u32	rx_align_code_errors;
327	u32	rx_oversized_frames;
328	u32	rx_jabber_frames;
329	u32	rx_undersized_frames;
330	u32	rx_fragments;
331	u32	overrun_type4;
332	u32	overrun_type5;
333	u32	rx_bytes;
334	u32	tx_good_frames;
335	u32	tx_broadcast_frames;
336	u32	tx_multicast_frames;
337	u32	tx_pause_frames;
338	u32	tx_deferred_frames;
339	u32	tx_collision_frames;
340	u32	tx_single_coll_frames;
341	u32	tx_mult_coll_frames;
342	u32	tx_excessive_collisions;
343	u32	tx_late_collisions;
344	u32	tx_underrun;
345	u32	tx_carrier_sense_errors;
346	u32	tx_bytes;
347	u32	tx_64byte_frames;
348	u32	tx_65_to_127byte_frames;
349	u32	tx_128_to_255byte_frames;
350	u32	tx_256_to_511byte_frames;
351	u32	tx_512_to_1023byte_frames;
352	u32	tx_1024byte_frames;
353	u32	net_bytes;
354	u32	rx_sof_overruns;
355	u32	rx_mof_overruns;
356	u32	rx_dma_overruns;
357};
358
359struct gbenu_ss_regs {
360	u32	id_ver;
361	u32	synce_count;		/* NU */
362	u32	synce_mux;		/* NU */
363	u32	control;		/* 2U */
364	u32	__rsvd_0[2];		/* 2U */
365	u32	rgmii_status;		/* 2U */
366	u32	ss_status;		/* 2U */
367};
368
369struct gbenu_switch_regs {
370	u32	id_ver;
371	u32	control;
372	u32	__rsvd_0[2];
373	u32	emcontrol;
374	u32	stat_port_en;
375	u32	ptype;			/* NU */
376	u32	soft_idle;
377	u32	thru_rate;		/* NU */
378	u32	gap_thresh;		/* NU */
379	u32	tx_start_wds;		/* NU */
380	u32	eee_prescale;		/* 2U */
381	u32	tx_g_oflow_thresh_set;	/* NU */
382	u32	tx_g_oflow_thresh_clr;	/* NU */
383	u32	tx_g_buf_thresh_set_l;	/* NU */
384	u32	tx_g_buf_thresh_set_h;	/* NU */
385	u32	tx_g_buf_thresh_clr_l;	/* NU */
386	u32	tx_g_buf_thresh_clr_h;	/* NU */
387};
388
389struct gbenu_port_regs {
390	u32	__rsvd_0;
391	u32	control;
392	u32	max_blks;		/* 2U */
393	u32	mem_align1;
394	u32	blk_cnt;
395	u32	port_vlan;
396	u32	tx_pri_map;		/* NU */
397	u32	pri_ctl;		/* 2U */
398	u32	rx_pri_map;
399	u32	rx_maxlen;
400	u32	tx_blks_pri;		/* NU */
401	u32	__rsvd_1;
402	u32	idle2lpi;		/* 2U */
403	u32	lpi2idle;		/* 2U */
404	u32	eee_status;		/* 2U */
405	u32	__rsvd_2;
406	u32	__rsvd_3[176];		/* NU: more to add */
407	u32	__rsvd_4[2];
408	u32	sa_lo;
409	u32	sa_hi;
410	u32	ts_ctl;
411	u32	ts_seq_ltype;
412	u32	ts_vlan;
413	u32	ts_ctl_ltype2;
414	u32	ts_ctl2;
415};
416
417struct gbenu_host_port_regs {
418	u32	__rsvd_0;
419	u32	control;
420	u32	flow_id_offset;		/* 2U */
421	u32	__rsvd_1;
422	u32	blk_cnt;
423	u32	port_vlan;
424	u32	tx_pri_map;		/* NU */
425	u32	pri_ctl;
426	u32	rx_pri_map;
427	u32	rx_maxlen;
428	u32	tx_blks_pri;		/* NU */
429	u32	__rsvd_2;
430	u32	idle2lpi;		/* 2U */
431	u32	lpi2wake;		/* 2U */
432	u32	eee_status;		/* 2U */
433	u32	__rsvd_3;
434	u32	__rsvd_4[184];		/* NU */
435	u32	host_blks_pri;		/* NU */
436};
437
438struct gbenu_emac_regs {
439	u32	mac_control;
440	u32	mac_status;
441	u32	soft_reset;
442	u32	boff_test;
443	u32	rx_pause;
444	u32	__rsvd_0[11];		/* NU */
445	u32	tx_pause;
446	u32	__rsvd_1[11];		/* NU */
447	u32	em_control;
448	u32	tx_gap;
449};
450
451/* Some hw stat regs are applicable to slave port only.
452 * This is handled by gbenu_et_stats struct.  Also some
453 * are for SS version NU and some are for 2U.
454 */
455struct gbenu_hw_stats {
456	u32	rx_good_frames;
457	u32	rx_broadcast_frames;
458	u32	rx_multicast_frames;
459	u32	rx_pause_frames;		/* slave */
460	u32	rx_crc_errors;
461	u32	rx_align_code_errors;		/* slave */
462	u32	rx_oversized_frames;
463	u32	rx_jabber_frames;		/* slave */
464	u32	rx_undersized_frames;
465	u32	rx_fragments;			/* slave */
466	u32	ale_drop;
467	u32	ale_overrun_drop;
468	u32	rx_bytes;
469	u32	tx_good_frames;
470	u32	tx_broadcast_frames;
471	u32	tx_multicast_frames;
472	u32	tx_pause_frames;		/* slave */
473	u32	tx_deferred_frames;		/* slave */
474	u32	tx_collision_frames;		/* slave */
475	u32	tx_single_coll_frames;		/* slave */
476	u32	tx_mult_coll_frames;		/* slave */
477	u32	tx_excessive_collisions;	/* slave */
478	u32	tx_late_collisions;		/* slave */
479	u32	rx_ipg_error;			/* slave 10G only */
480	u32	tx_carrier_sense_errors;	/* slave */
481	u32	tx_bytes;
482	u32	tx_64B_frames;
483	u32	tx_65_to_127B_frames;
484	u32	tx_128_to_255B_frames;
485	u32	tx_256_to_511B_frames;
486	u32	tx_512_to_1023B_frames;
487	u32	tx_1024B_frames;
488	u32	net_bytes;
489	u32	rx_bottom_fifo_drop;
490	u32	rx_port_mask_drop;
491	u32	rx_top_fifo_drop;
492	u32	ale_rate_limit_drop;
493	u32	ale_vid_ingress_drop;
494	u32	ale_da_eq_sa_drop;
495	u32	__rsvd_0[3];
496	u32	ale_unknown_ucast;
497	u32	ale_unknown_ucast_bytes;
498	u32	ale_unknown_mcast;
499	u32	ale_unknown_mcast_bytes;
500	u32	ale_unknown_bcast;
501	u32	ale_unknown_bcast_bytes;
502	u32	ale_pol_match;
503	u32	ale_pol_match_red;		/* NU */
504	u32	ale_pol_match_yellow;		/* NU */
505	u32	__rsvd_1[44];
506	u32	tx_mem_protect_err;
507	/* following NU only */
508	u32	tx_pri0;
509	u32	tx_pri1;
510	u32	tx_pri2;
511	u32	tx_pri3;
512	u32	tx_pri4;
513	u32	tx_pri5;
514	u32	tx_pri6;
515	u32	tx_pri7;
516	u32	tx_pri0_bcnt;
517	u32	tx_pri1_bcnt;
518	u32	tx_pri2_bcnt;
519	u32	tx_pri3_bcnt;
520	u32	tx_pri4_bcnt;
521	u32	tx_pri5_bcnt;
522	u32	tx_pri6_bcnt;
523	u32	tx_pri7_bcnt;
524	u32	tx_pri0_drop;
525	u32	tx_pri1_drop;
526	u32	tx_pri2_drop;
527	u32	tx_pri3_drop;
528	u32	tx_pri4_drop;
529	u32	tx_pri5_drop;
530	u32	tx_pri6_drop;
531	u32	tx_pri7_drop;
532	u32	tx_pri0_drop_bcnt;
533	u32	tx_pri1_drop_bcnt;
534	u32	tx_pri2_drop_bcnt;
535	u32	tx_pri3_drop_bcnt;
536	u32	tx_pri4_drop_bcnt;
537	u32	tx_pri5_drop_bcnt;
538	u32	tx_pri6_drop_bcnt;
539	u32	tx_pri7_drop_bcnt;
540};
541
542#define GBENU_HW_STATS_REG_MAP_SZ	0x200
543
544struct gbe_ss_regs {
545	u32	id_ver;
546	u32	synce_count;
547	u32	synce_mux;
548};
549
550struct gbe_ss_regs_ofs {
551	u16	id_ver;
552	u16	control;
553	u16	rgmii_status; /* 2U */
554};
555
556struct gbe_switch_regs {
557	u32	id_ver;
558	u32	control;
559	u32	soft_reset;
560	u32	stat_port_en;
561	u32	ptype;
562	u32	soft_idle;
563	u32	thru_rate;
564	u32	gap_thresh;
565	u32	tx_start_wds;
566	u32	flow_control;
567};
568
569struct gbe_switch_regs_ofs {
570	u16	id_ver;
571	u16	control;
572	u16	soft_reset;
573	u16	emcontrol;
574	u16	stat_port_en;
575	u16	ptype;
576	u16	flow_control;
577};
578
579struct gbe_port_regs {
580	u32	max_blks;
581	u32	blk_cnt;
582	u32	port_vlan;
583	u32	tx_pri_map;
584	u32	sa_lo;
585	u32	sa_hi;
586	u32	ts_ctl;
587	u32	ts_seq_ltype;
588	u32	ts_vlan;
589	u32	ts_ctl_ltype2;
590	u32	ts_ctl2;
591};
592
593struct gbe_port_regs_ofs {
594	u16	port_vlan;
595	u16	tx_pri_map;
596	u16     rx_pri_map;
597	u16	sa_lo;
598	u16	sa_hi;
599	u16	ts_ctl;
600	u16	ts_seq_ltype;
601	u16	ts_vlan;
602	u16	ts_ctl_ltype2;
603	u16	ts_ctl2;
604	u16	rx_maxlen;	/* 2U, NU */
605};
606
607struct gbe_host_port_regs {
608	u32	src_id;
609	u32	port_vlan;
610	u32	rx_pri_map;
611	u32	rx_maxlen;
612};
613
614struct gbe_host_port_regs_ofs {
615	u16	port_vlan;
616	u16	tx_pri_map;
617	u16	rx_maxlen;
618};
619
620struct gbe_emac_regs {
621	u32	id_ver;
622	u32	mac_control;
623	u32	mac_status;
624	u32	soft_reset;
625	u32	rx_maxlen;
626	u32	__reserved_0;
627	u32	rx_pause;
628	u32	tx_pause;
629	u32	__reserved_1;
630	u32	rx_pri_map;
631	u32	rsvd[6];
632};
633
634struct gbe_emac_regs_ofs {
635	u16	mac_control;
636	u16	soft_reset;
637	u16	rx_maxlen;
638};
639
640struct gbe_hw_stats {
641	u32	rx_good_frames;
642	u32	rx_broadcast_frames;
643	u32	rx_multicast_frames;
644	u32	rx_pause_frames;
645	u32	rx_crc_errors;
646	u32	rx_align_code_errors;
647	u32	rx_oversized_frames;
648	u32	rx_jabber_frames;
649	u32	rx_undersized_frames;
650	u32	rx_fragments;
651	u32	__pad_0[2];
652	u32	rx_bytes;
653	u32	tx_good_frames;
654	u32	tx_broadcast_frames;
655	u32	tx_multicast_frames;
656	u32	tx_pause_frames;
657	u32	tx_deferred_frames;
658	u32	tx_collision_frames;
659	u32	tx_single_coll_frames;
660	u32	tx_mult_coll_frames;
661	u32	tx_excessive_collisions;
662	u32	tx_late_collisions;
663	u32	tx_underrun;
664	u32	tx_carrier_sense_errors;
665	u32	tx_bytes;
666	u32	tx_64byte_frames;
667	u32	tx_65_to_127byte_frames;
668	u32	tx_128_to_255byte_frames;
669	u32	tx_256_to_511byte_frames;
670	u32	tx_512_to_1023byte_frames;
671	u32	tx_1024byte_frames;
672	u32	net_bytes;
673	u32	rx_sof_overruns;
674	u32	rx_mof_overruns;
675	u32	rx_dma_overruns;
676};
677
678#define GBE_MAX_HW_STAT_MODS			9
679#define GBE_HW_STATS_REG_MAP_SZ			0x100
680
681struct ts_ctl {
682	int     uni;
683	u8      dst_port_map;
684	u8      maddr_map;
685	u8      ts_mcast_type;
686};
687
688struct gbe_slave {
689	void __iomem			*port_regs;
690	void __iomem			*emac_regs;
691	struct gbe_port_regs_ofs	port_regs_ofs;
692	struct gbe_emac_regs_ofs	emac_regs_ofs;
693	int				slave_num; /* 0 based logical number */
694	int				port_num;  /* actual port number */
695	atomic_t			link_state;
696	bool				open;
697	struct phy_device		*phy;
698	u32				link_interface;
699	u32				mac_control;
700	u8				phy_port_t;
701	struct device_node		*node;
702	struct device_node		*phy_node;
703	struct ts_ctl                   ts_ctl;
704	struct list_head		slave_list;
705};
706
707struct gbe_priv {
708	struct device			*dev;
709	struct netcp_device		*netcp_device;
710	struct timer_list		timer;
711	u32				num_slaves;
712	u32				ale_ports;
713	bool				enable_ale;
714	u8				max_num_slaves;
715	u8				max_num_ports; /* max_num_slaves + 1 */
716	u8				num_stats_mods;
717	struct netcp_tx_pipe		tx_pipe;
718
719	int				host_port;
720	u32				rx_packet_max;
721	u32				ss_version;
722	u32				stats_en_mask;
723
724	void __iomem			*ss_regs;
725	void __iomem			*switch_regs;
726	void __iomem			*host_port_regs;
727	void __iomem			*ale_reg;
728	void __iomem                    *cpts_reg;
729	void __iomem			*sgmii_port_regs;
730	void __iomem			*sgmii_port34_regs;
731	void __iomem			*xgbe_serdes_regs;
732	void __iomem			*hw_stats_regs[GBE_MAX_HW_STAT_MODS];
733
734	struct gbe_ss_regs_ofs		ss_regs_ofs;
735	struct gbe_switch_regs_ofs	switch_regs_ofs;
736	struct gbe_host_port_regs_ofs	host_port_regs_ofs;
737
738	struct cpsw_ale			*ale;
739	unsigned int			tx_queue_id;
740	const char			*dma_chan_name;
741
742	struct list_head		gbe_intf_head;
743	struct list_head		secondary_slaves;
744	struct net_device		*dummy_ndev;
745
746	u64				*hw_stats;
747	u32				*hw_stats_prev;
748	const struct netcp_ethtool_stat *et_stats;
749	int				num_et_stats;
750	/*  Lock for updating the hwstats */
751	spinlock_t			hw_stats_lock;
752
753	int                             cpts_registered;
754	struct cpts                     *cpts;
755	int				rx_ts_enabled;
756	int				tx_ts_enabled;
757};
758
759struct gbe_intf {
760	struct net_device	*ndev;
761	struct device		*dev;
762	struct gbe_priv		*gbe_dev;
763	struct netcp_tx_pipe	tx_pipe;
764	struct gbe_slave	*slave;
765	struct list_head	gbe_intf_list;
766	unsigned long		active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
767};
768
769static struct netcp_module gbe_module;
770static struct netcp_module xgbe_module;
771
772/* Statistic management */
773struct netcp_ethtool_stat {
774	char desc[ETH_GSTRING_LEN];
775	int type;
776	u32 size;
777	int offset;
778};
779
780#define GBE_STATSA_INFO(field)						\
781{									\
782	"GBE_A:"#field, GBE_STATSA_MODULE,				\
783	sizeof_field(struct gbe_hw_stats, field),			\
784	offsetof(struct gbe_hw_stats, field)				\
785}
786
787#define GBE_STATSB_INFO(field)						\
788{									\
789	"GBE_B:"#field, GBE_STATSB_MODULE,				\
790	sizeof_field(struct gbe_hw_stats, field),			\
791	offsetof(struct gbe_hw_stats, field)				\
792}
793
794#define GBE_STATSC_INFO(field)						\
795{									\
796	"GBE_C:"#field, GBE_STATSC_MODULE,				\
797	sizeof_field(struct gbe_hw_stats, field),			\
798	offsetof(struct gbe_hw_stats, field)				\
799}
800
801#define GBE_STATSD_INFO(field)						\
802{									\
803	"GBE_D:"#field, GBE_STATSD_MODULE,				\
804	sizeof_field(struct gbe_hw_stats, field),			\
805	offsetof(struct gbe_hw_stats, field)				\
806}
807
808static const struct netcp_ethtool_stat gbe13_et_stats[] = {
809	/* GBE module A */
810	GBE_STATSA_INFO(rx_good_frames),
811	GBE_STATSA_INFO(rx_broadcast_frames),
812	GBE_STATSA_INFO(rx_multicast_frames),
813	GBE_STATSA_INFO(rx_pause_frames),
814	GBE_STATSA_INFO(rx_crc_errors),
815	GBE_STATSA_INFO(rx_align_code_errors),
816	GBE_STATSA_INFO(rx_oversized_frames),
817	GBE_STATSA_INFO(rx_jabber_frames),
818	GBE_STATSA_INFO(rx_undersized_frames),
819	GBE_STATSA_INFO(rx_fragments),
820	GBE_STATSA_INFO(rx_bytes),
821	GBE_STATSA_INFO(tx_good_frames),
822	GBE_STATSA_INFO(tx_broadcast_frames),
823	GBE_STATSA_INFO(tx_multicast_frames),
824	GBE_STATSA_INFO(tx_pause_frames),
825	GBE_STATSA_INFO(tx_deferred_frames),
826	GBE_STATSA_INFO(tx_collision_frames),
827	GBE_STATSA_INFO(tx_single_coll_frames),
828	GBE_STATSA_INFO(tx_mult_coll_frames),
829	GBE_STATSA_INFO(tx_excessive_collisions),
830	GBE_STATSA_INFO(tx_late_collisions),
831	GBE_STATSA_INFO(tx_underrun),
832	GBE_STATSA_INFO(tx_carrier_sense_errors),
833	GBE_STATSA_INFO(tx_bytes),
834	GBE_STATSA_INFO(tx_64byte_frames),
835	GBE_STATSA_INFO(tx_65_to_127byte_frames),
836	GBE_STATSA_INFO(tx_128_to_255byte_frames),
837	GBE_STATSA_INFO(tx_256_to_511byte_frames),
838	GBE_STATSA_INFO(tx_512_to_1023byte_frames),
839	GBE_STATSA_INFO(tx_1024byte_frames),
840	GBE_STATSA_INFO(net_bytes),
841	GBE_STATSA_INFO(rx_sof_overruns),
842	GBE_STATSA_INFO(rx_mof_overruns),
843	GBE_STATSA_INFO(rx_dma_overruns),
844	/* GBE module B */
845	GBE_STATSB_INFO(rx_good_frames),
846	GBE_STATSB_INFO(rx_broadcast_frames),
847	GBE_STATSB_INFO(rx_multicast_frames),
848	GBE_STATSB_INFO(rx_pause_frames),
849	GBE_STATSB_INFO(rx_crc_errors),
850	GBE_STATSB_INFO(rx_align_code_errors),
851	GBE_STATSB_INFO(rx_oversized_frames),
852	GBE_STATSB_INFO(rx_jabber_frames),
853	GBE_STATSB_INFO(rx_undersized_frames),
854	GBE_STATSB_INFO(rx_fragments),
855	GBE_STATSB_INFO(rx_bytes),
856	GBE_STATSB_INFO(tx_good_frames),
857	GBE_STATSB_INFO(tx_broadcast_frames),
858	GBE_STATSB_INFO(tx_multicast_frames),
859	GBE_STATSB_INFO(tx_pause_frames),
860	GBE_STATSB_INFO(tx_deferred_frames),
861	GBE_STATSB_INFO(tx_collision_frames),
862	GBE_STATSB_INFO(tx_single_coll_frames),
863	GBE_STATSB_INFO(tx_mult_coll_frames),
864	GBE_STATSB_INFO(tx_excessive_collisions),
865	GBE_STATSB_INFO(tx_late_collisions),
866	GBE_STATSB_INFO(tx_underrun),
867	GBE_STATSB_INFO(tx_carrier_sense_errors),
868	GBE_STATSB_INFO(tx_bytes),
869	GBE_STATSB_INFO(tx_64byte_frames),
870	GBE_STATSB_INFO(tx_65_to_127byte_frames),
871	GBE_STATSB_INFO(tx_128_to_255byte_frames),
872	GBE_STATSB_INFO(tx_256_to_511byte_frames),
873	GBE_STATSB_INFO(tx_512_to_1023byte_frames),
874	GBE_STATSB_INFO(tx_1024byte_frames),
875	GBE_STATSB_INFO(net_bytes),
876	GBE_STATSB_INFO(rx_sof_overruns),
877	GBE_STATSB_INFO(rx_mof_overruns),
878	GBE_STATSB_INFO(rx_dma_overruns),
879	/* GBE module C */
880	GBE_STATSC_INFO(rx_good_frames),
881	GBE_STATSC_INFO(rx_broadcast_frames),
882	GBE_STATSC_INFO(rx_multicast_frames),
883	GBE_STATSC_INFO(rx_pause_frames),
884	GBE_STATSC_INFO(rx_crc_errors),
885	GBE_STATSC_INFO(rx_align_code_errors),
886	GBE_STATSC_INFO(rx_oversized_frames),
887	GBE_STATSC_INFO(rx_jabber_frames),
888	GBE_STATSC_INFO(rx_undersized_frames),
889	GBE_STATSC_INFO(rx_fragments),
890	GBE_STATSC_INFO(rx_bytes),
891	GBE_STATSC_INFO(tx_good_frames),
892	GBE_STATSC_INFO(tx_broadcast_frames),
893	GBE_STATSC_INFO(tx_multicast_frames),
894	GBE_STATSC_INFO(tx_pause_frames),
895	GBE_STATSC_INFO(tx_deferred_frames),
896	GBE_STATSC_INFO(tx_collision_frames),
897	GBE_STATSC_INFO(tx_single_coll_frames),
898	GBE_STATSC_INFO(tx_mult_coll_frames),
899	GBE_STATSC_INFO(tx_excessive_collisions),
900	GBE_STATSC_INFO(tx_late_collisions),
901	GBE_STATSC_INFO(tx_underrun),
902	GBE_STATSC_INFO(tx_carrier_sense_errors),
903	GBE_STATSC_INFO(tx_bytes),
904	GBE_STATSC_INFO(tx_64byte_frames),
905	GBE_STATSC_INFO(tx_65_to_127byte_frames),
906	GBE_STATSC_INFO(tx_128_to_255byte_frames),
907	GBE_STATSC_INFO(tx_256_to_511byte_frames),
908	GBE_STATSC_INFO(tx_512_to_1023byte_frames),
909	GBE_STATSC_INFO(tx_1024byte_frames),
910	GBE_STATSC_INFO(net_bytes),
911	GBE_STATSC_INFO(rx_sof_overruns),
912	GBE_STATSC_INFO(rx_mof_overruns),
913	GBE_STATSC_INFO(rx_dma_overruns),
914	/* GBE module D */
915	GBE_STATSD_INFO(rx_good_frames),
916	GBE_STATSD_INFO(rx_broadcast_frames),
917	GBE_STATSD_INFO(rx_multicast_frames),
918	GBE_STATSD_INFO(rx_pause_frames),
919	GBE_STATSD_INFO(rx_crc_errors),
920	GBE_STATSD_INFO(rx_align_code_errors),
921	GBE_STATSD_INFO(rx_oversized_frames),
922	GBE_STATSD_INFO(rx_jabber_frames),
923	GBE_STATSD_INFO(rx_undersized_frames),
924	GBE_STATSD_INFO(rx_fragments),
925	GBE_STATSD_INFO(rx_bytes),
926	GBE_STATSD_INFO(tx_good_frames),
927	GBE_STATSD_INFO(tx_broadcast_frames),
928	GBE_STATSD_INFO(tx_multicast_frames),
929	GBE_STATSD_INFO(tx_pause_frames),
930	GBE_STATSD_INFO(tx_deferred_frames),
931	GBE_STATSD_INFO(tx_collision_frames),
932	GBE_STATSD_INFO(tx_single_coll_frames),
933	GBE_STATSD_INFO(tx_mult_coll_frames),
934	GBE_STATSD_INFO(tx_excessive_collisions),
935	GBE_STATSD_INFO(tx_late_collisions),
936	GBE_STATSD_INFO(tx_underrun),
937	GBE_STATSD_INFO(tx_carrier_sense_errors),
938	GBE_STATSD_INFO(tx_bytes),
939	GBE_STATSD_INFO(tx_64byte_frames),
940	GBE_STATSD_INFO(tx_65_to_127byte_frames),
941	GBE_STATSD_INFO(tx_128_to_255byte_frames),
942	GBE_STATSD_INFO(tx_256_to_511byte_frames),
943	GBE_STATSD_INFO(tx_512_to_1023byte_frames),
944	GBE_STATSD_INFO(tx_1024byte_frames),
945	GBE_STATSD_INFO(net_bytes),
946	GBE_STATSD_INFO(rx_sof_overruns),
947	GBE_STATSD_INFO(rx_mof_overruns),
948	GBE_STATSD_INFO(rx_dma_overruns),
949};
950
951/* This is the size of entries in GBENU_STATS_HOST */
952#define GBENU_ET_STATS_HOST_SIZE	52
953
954#define GBENU_STATS_HOST(field)					\
955{								\
956	"GBE_HOST:"#field, GBENU_STATS0_MODULE,			\
957	sizeof_field(struct gbenu_hw_stats, field),		\
958	offsetof(struct gbenu_hw_stats, field)			\
959}
960
961/* This is the size of entries in GBENU_STATS_PORT */
962#define GBENU_ET_STATS_PORT_SIZE	65
963
964#define GBENU_STATS_P1(field)					\
965{								\
966	"GBE_P1:"#field, GBENU_STATS1_MODULE,			\
967	sizeof_field(struct gbenu_hw_stats, field),		\
968	offsetof(struct gbenu_hw_stats, field)			\
969}
970
971#define GBENU_STATS_P2(field)					\
972{								\
973	"GBE_P2:"#field, GBENU_STATS2_MODULE,			\
974	sizeof_field(struct gbenu_hw_stats, field),		\
975	offsetof(struct gbenu_hw_stats, field)			\
976}
977
978#define GBENU_STATS_P3(field)					\
979{								\
980	"GBE_P3:"#field, GBENU_STATS3_MODULE,			\
981	sizeof_field(struct gbenu_hw_stats, field),		\
982	offsetof(struct gbenu_hw_stats, field)			\
983}
984
985#define GBENU_STATS_P4(field)					\
986{								\
987	"GBE_P4:"#field, GBENU_STATS4_MODULE,			\
988	sizeof_field(struct gbenu_hw_stats, field),		\
989	offsetof(struct gbenu_hw_stats, field)			\
990}
991
992#define GBENU_STATS_P5(field)					\
993{								\
994	"GBE_P5:"#field, GBENU_STATS5_MODULE,			\
995	sizeof_field(struct gbenu_hw_stats, field),		\
996	offsetof(struct gbenu_hw_stats, field)			\
997}
998
999#define GBENU_STATS_P6(field)					\
1000{								\
1001	"GBE_P6:"#field, GBENU_STATS6_MODULE,			\
1002	sizeof_field(struct gbenu_hw_stats, field),		\
1003	offsetof(struct gbenu_hw_stats, field)			\
1004}
1005
1006#define GBENU_STATS_P7(field)					\
1007{								\
1008	"GBE_P7:"#field, GBENU_STATS7_MODULE,			\
1009	sizeof_field(struct gbenu_hw_stats, field),		\
1010	offsetof(struct gbenu_hw_stats, field)			\
1011}
1012
1013#define GBENU_STATS_P8(field)					\
1014{								\
1015	"GBE_P8:"#field, GBENU_STATS8_MODULE,			\
1016	sizeof_field(struct gbenu_hw_stats, field),		\
1017	offsetof(struct gbenu_hw_stats, field)			\
1018}
1019
1020static const struct netcp_ethtool_stat gbenu_et_stats[] = {
1021	/* GBENU Host Module */
1022	GBENU_STATS_HOST(rx_good_frames),
1023	GBENU_STATS_HOST(rx_broadcast_frames),
1024	GBENU_STATS_HOST(rx_multicast_frames),
1025	GBENU_STATS_HOST(rx_crc_errors),
1026	GBENU_STATS_HOST(rx_oversized_frames),
1027	GBENU_STATS_HOST(rx_undersized_frames),
1028	GBENU_STATS_HOST(ale_drop),
1029	GBENU_STATS_HOST(ale_overrun_drop),
1030	GBENU_STATS_HOST(rx_bytes),
1031	GBENU_STATS_HOST(tx_good_frames),
1032	GBENU_STATS_HOST(tx_broadcast_frames),
1033	GBENU_STATS_HOST(tx_multicast_frames),
1034	GBENU_STATS_HOST(tx_bytes),
1035	GBENU_STATS_HOST(tx_64B_frames),
1036	GBENU_STATS_HOST(tx_65_to_127B_frames),
1037	GBENU_STATS_HOST(tx_128_to_255B_frames),
1038	GBENU_STATS_HOST(tx_256_to_511B_frames),
1039	GBENU_STATS_HOST(tx_512_to_1023B_frames),
1040	GBENU_STATS_HOST(tx_1024B_frames),
1041	GBENU_STATS_HOST(net_bytes),
1042	GBENU_STATS_HOST(rx_bottom_fifo_drop),
1043	GBENU_STATS_HOST(rx_port_mask_drop),
1044	GBENU_STATS_HOST(rx_top_fifo_drop),
1045	GBENU_STATS_HOST(ale_rate_limit_drop),
1046	GBENU_STATS_HOST(ale_vid_ingress_drop),
1047	GBENU_STATS_HOST(ale_da_eq_sa_drop),
1048	GBENU_STATS_HOST(ale_unknown_ucast),
1049	GBENU_STATS_HOST(ale_unknown_ucast_bytes),
1050	GBENU_STATS_HOST(ale_unknown_mcast),
1051	GBENU_STATS_HOST(ale_unknown_mcast_bytes),
1052	GBENU_STATS_HOST(ale_unknown_bcast),
1053	GBENU_STATS_HOST(ale_unknown_bcast_bytes),
1054	GBENU_STATS_HOST(ale_pol_match),
1055	GBENU_STATS_HOST(ale_pol_match_red),
1056	GBENU_STATS_HOST(ale_pol_match_yellow),
1057	GBENU_STATS_HOST(tx_mem_protect_err),
1058	GBENU_STATS_HOST(tx_pri0_drop),
1059	GBENU_STATS_HOST(tx_pri1_drop),
1060	GBENU_STATS_HOST(tx_pri2_drop),
1061	GBENU_STATS_HOST(tx_pri3_drop),
1062	GBENU_STATS_HOST(tx_pri4_drop),
1063	GBENU_STATS_HOST(tx_pri5_drop),
1064	GBENU_STATS_HOST(tx_pri6_drop),
1065	GBENU_STATS_HOST(tx_pri7_drop),
1066	GBENU_STATS_HOST(tx_pri0_drop_bcnt),
1067	GBENU_STATS_HOST(tx_pri1_drop_bcnt),
1068	GBENU_STATS_HOST(tx_pri2_drop_bcnt),
1069	GBENU_STATS_HOST(tx_pri3_drop_bcnt),
1070	GBENU_STATS_HOST(tx_pri4_drop_bcnt),
1071	GBENU_STATS_HOST(tx_pri5_drop_bcnt),
1072	GBENU_STATS_HOST(tx_pri6_drop_bcnt),
1073	GBENU_STATS_HOST(tx_pri7_drop_bcnt),
1074	/* GBENU Module 1 */
1075	GBENU_STATS_P1(rx_good_frames),
1076	GBENU_STATS_P1(rx_broadcast_frames),
1077	GBENU_STATS_P1(rx_multicast_frames),
1078	GBENU_STATS_P1(rx_pause_frames),
1079	GBENU_STATS_P1(rx_crc_errors),
1080	GBENU_STATS_P1(rx_align_code_errors),
1081	GBENU_STATS_P1(rx_oversized_frames),
1082	GBENU_STATS_P1(rx_jabber_frames),
1083	GBENU_STATS_P1(rx_undersized_frames),
1084	GBENU_STATS_P1(rx_fragments),
1085	GBENU_STATS_P1(ale_drop),
1086	GBENU_STATS_P1(ale_overrun_drop),
1087	GBENU_STATS_P1(rx_bytes),
1088	GBENU_STATS_P1(tx_good_frames),
1089	GBENU_STATS_P1(tx_broadcast_frames),
1090	GBENU_STATS_P1(tx_multicast_frames),
1091	GBENU_STATS_P1(tx_pause_frames),
1092	GBENU_STATS_P1(tx_deferred_frames),
1093	GBENU_STATS_P1(tx_collision_frames),
1094	GBENU_STATS_P1(tx_single_coll_frames),
1095	GBENU_STATS_P1(tx_mult_coll_frames),
1096	GBENU_STATS_P1(tx_excessive_collisions),
1097	GBENU_STATS_P1(tx_late_collisions),
1098	GBENU_STATS_P1(rx_ipg_error),
1099	GBENU_STATS_P1(tx_carrier_sense_errors),
1100	GBENU_STATS_P1(tx_bytes),
1101	GBENU_STATS_P1(tx_64B_frames),
1102	GBENU_STATS_P1(tx_65_to_127B_frames),
1103	GBENU_STATS_P1(tx_128_to_255B_frames),
1104	GBENU_STATS_P1(tx_256_to_511B_frames),
1105	GBENU_STATS_P1(tx_512_to_1023B_frames),
1106	GBENU_STATS_P1(tx_1024B_frames),
1107	GBENU_STATS_P1(net_bytes),
1108	GBENU_STATS_P1(rx_bottom_fifo_drop),
1109	GBENU_STATS_P1(rx_port_mask_drop),
1110	GBENU_STATS_P1(rx_top_fifo_drop),
1111	GBENU_STATS_P1(ale_rate_limit_drop),
1112	GBENU_STATS_P1(ale_vid_ingress_drop),
1113	GBENU_STATS_P1(ale_da_eq_sa_drop),
1114	GBENU_STATS_P1(ale_unknown_ucast),
1115	GBENU_STATS_P1(ale_unknown_ucast_bytes),
1116	GBENU_STATS_P1(ale_unknown_mcast),
1117	GBENU_STATS_P1(ale_unknown_mcast_bytes),
1118	GBENU_STATS_P1(ale_unknown_bcast),
1119	GBENU_STATS_P1(ale_unknown_bcast_bytes),
1120	GBENU_STATS_P1(ale_pol_match),
1121	GBENU_STATS_P1(ale_pol_match_red),
1122	GBENU_STATS_P1(ale_pol_match_yellow),
1123	GBENU_STATS_P1(tx_mem_protect_err),
1124	GBENU_STATS_P1(tx_pri0_drop),
1125	GBENU_STATS_P1(tx_pri1_drop),
1126	GBENU_STATS_P1(tx_pri2_drop),
1127	GBENU_STATS_P1(tx_pri3_drop),
1128	GBENU_STATS_P1(tx_pri4_drop),
1129	GBENU_STATS_P1(tx_pri5_drop),
1130	GBENU_STATS_P1(tx_pri6_drop),
1131	GBENU_STATS_P1(tx_pri7_drop),
1132	GBENU_STATS_P1(tx_pri0_drop_bcnt),
1133	GBENU_STATS_P1(tx_pri1_drop_bcnt),
1134	GBENU_STATS_P1(tx_pri2_drop_bcnt),
1135	GBENU_STATS_P1(tx_pri3_drop_bcnt),
1136	GBENU_STATS_P1(tx_pri4_drop_bcnt),
1137	GBENU_STATS_P1(tx_pri5_drop_bcnt),
1138	GBENU_STATS_P1(tx_pri6_drop_bcnt),
1139	GBENU_STATS_P1(tx_pri7_drop_bcnt),
1140	/* GBENU Module 2 */
1141	GBENU_STATS_P2(rx_good_frames),
1142	GBENU_STATS_P2(rx_broadcast_frames),
1143	GBENU_STATS_P2(rx_multicast_frames),
1144	GBENU_STATS_P2(rx_pause_frames),
1145	GBENU_STATS_P2(rx_crc_errors),
1146	GBENU_STATS_P2(rx_align_code_errors),
1147	GBENU_STATS_P2(rx_oversized_frames),
1148	GBENU_STATS_P2(rx_jabber_frames),
1149	GBENU_STATS_P2(rx_undersized_frames),
1150	GBENU_STATS_P2(rx_fragments),
1151	GBENU_STATS_P2(ale_drop),
1152	GBENU_STATS_P2(ale_overrun_drop),
1153	GBENU_STATS_P2(rx_bytes),
1154	GBENU_STATS_P2(tx_good_frames),
1155	GBENU_STATS_P2(tx_broadcast_frames),
1156	GBENU_STATS_P2(tx_multicast_frames),
1157	GBENU_STATS_P2(tx_pause_frames),
1158	GBENU_STATS_P2(tx_deferred_frames),
1159	GBENU_STATS_P2(tx_collision_frames),
1160	GBENU_STATS_P2(tx_single_coll_frames),
1161	GBENU_STATS_P2(tx_mult_coll_frames),
1162	GBENU_STATS_P2(tx_excessive_collisions),
1163	GBENU_STATS_P2(tx_late_collisions),
1164	GBENU_STATS_P2(rx_ipg_error),
1165	GBENU_STATS_P2(tx_carrier_sense_errors),
1166	GBENU_STATS_P2(tx_bytes),
1167	GBENU_STATS_P2(tx_64B_frames),
1168	GBENU_STATS_P2(tx_65_to_127B_frames),
1169	GBENU_STATS_P2(tx_128_to_255B_frames),
1170	GBENU_STATS_P2(tx_256_to_511B_frames),
1171	GBENU_STATS_P2(tx_512_to_1023B_frames),
1172	GBENU_STATS_P2(tx_1024B_frames),
1173	GBENU_STATS_P2(net_bytes),
1174	GBENU_STATS_P2(rx_bottom_fifo_drop),
1175	GBENU_STATS_P2(rx_port_mask_drop),
1176	GBENU_STATS_P2(rx_top_fifo_drop),
1177	GBENU_STATS_P2(ale_rate_limit_drop),
1178	GBENU_STATS_P2(ale_vid_ingress_drop),
1179	GBENU_STATS_P2(ale_da_eq_sa_drop),
1180	GBENU_STATS_P2(ale_unknown_ucast),
1181	GBENU_STATS_P2(ale_unknown_ucast_bytes),
1182	GBENU_STATS_P2(ale_unknown_mcast),
1183	GBENU_STATS_P2(ale_unknown_mcast_bytes),
1184	GBENU_STATS_P2(ale_unknown_bcast),
1185	GBENU_STATS_P2(ale_unknown_bcast_bytes),
1186	GBENU_STATS_P2(ale_pol_match),
1187	GBENU_STATS_P2(ale_pol_match_red),
1188	GBENU_STATS_P2(ale_pol_match_yellow),
1189	GBENU_STATS_P2(tx_mem_protect_err),
1190	GBENU_STATS_P2(tx_pri0_drop),
1191	GBENU_STATS_P2(tx_pri1_drop),
1192	GBENU_STATS_P2(tx_pri2_drop),
1193	GBENU_STATS_P2(tx_pri3_drop),
1194	GBENU_STATS_P2(tx_pri4_drop),
1195	GBENU_STATS_P2(tx_pri5_drop),
1196	GBENU_STATS_P2(tx_pri6_drop),
1197	GBENU_STATS_P2(tx_pri7_drop),
1198	GBENU_STATS_P2(tx_pri0_drop_bcnt),
1199	GBENU_STATS_P2(tx_pri1_drop_bcnt),
1200	GBENU_STATS_P2(tx_pri2_drop_bcnt),
1201	GBENU_STATS_P2(tx_pri3_drop_bcnt),
1202	GBENU_STATS_P2(tx_pri4_drop_bcnt),
1203	GBENU_STATS_P2(tx_pri5_drop_bcnt),
1204	GBENU_STATS_P2(tx_pri6_drop_bcnt),
1205	GBENU_STATS_P2(tx_pri7_drop_bcnt),
1206	/* GBENU Module 3 */
1207	GBENU_STATS_P3(rx_good_frames),
1208	GBENU_STATS_P3(rx_broadcast_frames),
1209	GBENU_STATS_P3(rx_multicast_frames),
1210	GBENU_STATS_P3(rx_pause_frames),
1211	GBENU_STATS_P3(rx_crc_errors),
1212	GBENU_STATS_P3(rx_align_code_errors),
1213	GBENU_STATS_P3(rx_oversized_frames),
1214	GBENU_STATS_P3(rx_jabber_frames),
1215	GBENU_STATS_P3(rx_undersized_frames),
1216	GBENU_STATS_P3(rx_fragments),
1217	GBENU_STATS_P3(ale_drop),
1218	GBENU_STATS_P3(ale_overrun_drop),
1219	GBENU_STATS_P3(rx_bytes),
1220	GBENU_STATS_P3(tx_good_frames),
1221	GBENU_STATS_P3(tx_broadcast_frames),
1222	GBENU_STATS_P3(tx_multicast_frames),
1223	GBENU_STATS_P3(tx_pause_frames),
1224	GBENU_STATS_P3(tx_deferred_frames),
1225	GBENU_STATS_P3(tx_collision_frames),
1226	GBENU_STATS_P3(tx_single_coll_frames),
1227	GBENU_STATS_P3(tx_mult_coll_frames),
1228	GBENU_STATS_P3(tx_excessive_collisions),
1229	GBENU_STATS_P3(tx_late_collisions),
1230	GBENU_STATS_P3(rx_ipg_error),
1231	GBENU_STATS_P3(tx_carrier_sense_errors),
1232	GBENU_STATS_P3(tx_bytes),
1233	GBENU_STATS_P3(tx_64B_frames),
1234	GBENU_STATS_P3(tx_65_to_127B_frames),
1235	GBENU_STATS_P3(tx_128_to_255B_frames),
1236	GBENU_STATS_P3(tx_256_to_511B_frames),
1237	GBENU_STATS_P3(tx_512_to_1023B_frames),
1238	GBENU_STATS_P3(tx_1024B_frames),
1239	GBENU_STATS_P3(net_bytes),
1240	GBENU_STATS_P3(rx_bottom_fifo_drop),
1241	GBENU_STATS_P3(rx_port_mask_drop),
1242	GBENU_STATS_P3(rx_top_fifo_drop),
1243	GBENU_STATS_P3(ale_rate_limit_drop),
1244	GBENU_STATS_P3(ale_vid_ingress_drop),
1245	GBENU_STATS_P3(ale_da_eq_sa_drop),
1246	GBENU_STATS_P3(ale_unknown_ucast),
1247	GBENU_STATS_P3(ale_unknown_ucast_bytes),
1248	GBENU_STATS_P3(ale_unknown_mcast),
1249	GBENU_STATS_P3(ale_unknown_mcast_bytes),
1250	GBENU_STATS_P3(ale_unknown_bcast),
1251	GBENU_STATS_P3(ale_unknown_bcast_bytes),
1252	GBENU_STATS_P3(ale_pol_match),
1253	GBENU_STATS_P3(ale_pol_match_red),
1254	GBENU_STATS_P3(ale_pol_match_yellow),
1255	GBENU_STATS_P3(tx_mem_protect_err),
1256	GBENU_STATS_P3(tx_pri0_drop),
1257	GBENU_STATS_P3(tx_pri1_drop),
1258	GBENU_STATS_P3(tx_pri2_drop),
1259	GBENU_STATS_P3(tx_pri3_drop),
1260	GBENU_STATS_P3(tx_pri4_drop),
1261	GBENU_STATS_P3(tx_pri5_drop),
1262	GBENU_STATS_P3(tx_pri6_drop),
1263	GBENU_STATS_P3(tx_pri7_drop),
1264	GBENU_STATS_P3(tx_pri0_drop_bcnt),
1265	GBENU_STATS_P3(tx_pri1_drop_bcnt),
1266	GBENU_STATS_P3(tx_pri2_drop_bcnt),
1267	GBENU_STATS_P3(tx_pri3_drop_bcnt),
1268	GBENU_STATS_P3(tx_pri4_drop_bcnt),
1269	GBENU_STATS_P3(tx_pri5_drop_bcnt),
1270	GBENU_STATS_P3(tx_pri6_drop_bcnt),
1271	GBENU_STATS_P3(tx_pri7_drop_bcnt),
1272	/* GBENU Module 4 */
1273	GBENU_STATS_P4(rx_good_frames),
1274	GBENU_STATS_P4(rx_broadcast_frames),
1275	GBENU_STATS_P4(rx_multicast_frames),
1276	GBENU_STATS_P4(rx_pause_frames),
1277	GBENU_STATS_P4(rx_crc_errors),
1278	GBENU_STATS_P4(rx_align_code_errors),
1279	GBENU_STATS_P4(rx_oversized_frames),
1280	GBENU_STATS_P4(rx_jabber_frames),
1281	GBENU_STATS_P4(rx_undersized_frames),
1282	GBENU_STATS_P4(rx_fragments),
1283	GBENU_STATS_P4(ale_drop),
1284	GBENU_STATS_P4(ale_overrun_drop),
1285	GBENU_STATS_P4(rx_bytes),
1286	GBENU_STATS_P4(tx_good_frames),
1287	GBENU_STATS_P4(tx_broadcast_frames),
1288	GBENU_STATS_P4(tx_multicast_frames),
1289	GBENU_STATS_P4(tx_pause_frames),
1290	GBENU_STATS_P4(tx_deferred_frames),
1291	GBENU_STATS_P4(tx_collision_frames),
1292	GBENU_STATS_P4(tx_single_coll_frames),
1293	GBENU_STATS_P4(tx_mult_coll_frames),
1294	GBENU_STATS_P4(tx_excessive_collisions),
1295	GBENU_STATS_P4(tx_late_collisions),
1296	GBENU_STATS_P4(rx_ipg_error),
1297	GBENU_STATS_P4(tx_carrier_sense_errors),
1298	GBENU_STATS_P4(tx_bytes),
1299	GBENU_STATS_P4(tx_64B_frames),
1300	GBENU_STATS_P4(tx_65_to_127B_frames),
1301	GBENU_STATS_P4(tx_128_to_255B_frames),
1302	GBENU_STATS_P4(tx_256_to_511B_frames),
1303	GBENU_STATS_P4(tx_512_to_1023B_frames),
1304	GBENU_STATS_P4(tx_1024B_frames),
1305	GBENU_STATS_P4(net_bytes),
1306	GBENU_STATS_P4(rx_bottom_fifo_drop),
1307	GBENU_STATS_P4(rx_port_mask_drop),
1308	GBENU_STATS_P4(rx_top_fifo_drop),
1309	GBENU_STATS_P4(ale_rate_limit_drop),
1310	GBENU_STATS_P4(ale_vid_ingress_drop),
1311	GBENU_STATS_P4(ale_da_eq_sa_drop),
1312	GBENU_STATS_P4(ale_unknown_ucast),
1313	GBENU_STATS_P4(ale_unknown_ucast_bytes),
1314	GBENU_STATS_P4(ale_unknown_mcast),
1315	GBENU_STATS_P4(ale_unknown_mcast_bytes),
1316	GBENU_STATS_P4(ale_unknown_bcast),
1317	GBENU_STATS_P4(ale_unknown_bcast_bytes),
1318	GBENU_STATS_P4(ale_pol_match),
1319	GBENU_STATS_P4(ale_pol_match_red),
1320	GBENU_STATS_P4(ale_pol_match_yellow),
1321	GBENU_STATS_P4(tx_mem_protect_err),
1322	GBENU_STATS_P4(tx_pri0_drop),
1323	GBENU_STATS_P4(tx_pri1_drop),
1324	GBENU_STATS_P4(tx_pri2_drop),
1325	GBENU_STATS_P4(tx_pri3_drop),
1326	GBENU_STATS_P4(tx_pri4_drop),
1327	GBENU_STATS_P4(tx_pri5_drop),
1328	GBENU_STATS_P4(tx_pri6_drop),
1329	GBENU_STATS_P4(tx_pri7_drop),
1330	GBENU_STATS_P4(tx_pri0_drop_bcnt),
1331	GBENU_STATS_P4(tx_pri1_drop_bcnt),
1332	GBENU_STATS_P4(tx_pri2_drop_bcnt),
1333	GBENU_STATS_P4(tx_pri3_drop_bcnt),
1334	GBENU_STATS_P4(tx_pri4_drop_bcnt),
1335	GBENU_STATS_P4(tx_pri5_drop_bcnt),
1336	GBENU_STATS_P4(tx_pri6_drop_bcnt),
1337	GBENU_STATS_P4(tx_pri7_drop_bcnt),
1338	/* GBENU Module 5 */
1339	GBENU_STATS_P5(rx_good_frames),
1340	GBENU_STATS_P5(rx_broadcast_frames),
1341	GBENU_STATS_P5(rx_multicast_frames),
1342	GBENU_STATS_P5(rx_pause_frames),
1343	GBENU_STATS_P5(rx_crc_errors),
1344	GBENU_STATS_P5(rx_align_code_errors),
1345	GBENU_STATS_P5(rx_oversized_frames),
1346	GBENU_STATS_P5(rx_jabber_frames),
1347	GBENU_STATS_P5(rx_undersized_frames),
1348	GBENU_STATS_P5(rx_fragments),
1349	GBENU_STATS_P5(ale_drop),
1350	GBENU_STATS_P5(ale_overrun_drop),
1351	GBENU_STATS_P5(rx_bytes),
1352	GBENU_STATS_P5(tx_good_frames),
1353	GBENU_STATS_P5(tx_broadcast_frames),
1354	GBENU_STATS_P5(tx_multicast_frames),
1355	GBENU_STATS_P5(tx_pause_frames),
1356	GBENU_STATS_P5(tx_deferred_frames),
1357	GBENU_STATS_P5(tx_collision_frames),
1358	GBENU_STATS_P5(tx_single_coll_frames),
1359	GBENU_STATS_P5(tx_mult_coll_frames),
1360	GBENU_STATS_P5(tx_excessive_collisions),
1361	GBENU_STATS_P5(tx_late_collisions),
1362	GBENU_STATS_P5(rx_ipg_error),
1363	GBENU_STATS_P5(tx_carrier_sense_errors),
1364	GBENU_STATS_P5(tx_bytes),
1365	GBENU_STATS_P5(tx_64B_frames),
1366	GBENU_STATS_P5(tx_65_to_127B_frames),
1367	GBENU_STATS_P5(tx_128_to_255B_frames),
1368	GBENU_STATS_P5(tx_256_to_511B_frames),
1369	GBENU_STATS_P5(tx_512_to_1023B_frames),
1370	GBENU_STATS_P5(tx_1024B_frames),
1371	GBENU_STATS_P5(net_bytes),
1372	GBENU_STATS_P5(rx_bottom_fifo_drop),
1373	GBENU_STATS_P5(rx_port_mask_drop),
1374	GBENU_STATS_P5(rx_top_fifo_drop),
1375	GBENU_STATS_P5(ale_rate_limit_drop),
1376	GBENU_STATS_P5(ale_vid_ingress_drop),
1377	GBENU_STATS_P5(ale_da_eq_sa_drop),
1378	GBENU_STATS_P5(ale_unknown_ucast),
1379	GBENU_STATS_P5(ale_unknown_ucast_bytes),
1380	GBENU_STATS_P5(ale_unknown_mcast),
1381	GBENU_STATS_P5(ale_unknown_mcast_bytes),
1382	GBENU_STATS_P5(ale_unknown_bcast),
1383	GBENU_STATS_P5(ale_unknown_bcast_bytes),
1384	GBENU_STATS_P5(ale_pol_match),
1385	GBENU_STATS_P5(ale_pol_match_red),
1386	GBENU_STATS_P5(ale_pol_match_yellow),
1387	GBENU_STATS_P5(tx_mem_protect_err),
1388	GBENU_STATS_P5(tx_pri0_drop),
1389	GBENU_STATS_P5(tx_pri1_drop),
1390	GBENU_STATS_P5(tx_pri2_drop),
1391	GBENU_STATS_P5(tx_pri3_drop),
1392	GBENU_STATS_P5(tx_pri4_drop),
1393	GBENU_STATS_P5(tx_pri5_drop),
1394	GBENU_STATS_P5(tx_pri6_drop),
1395	GBENU_STATS_P5(tx_pri7_drop),
1396	GBENU_STATS_P5(tx_pri0_drop_bcnt),
1397	GBENU_STATS_P5(tx_pri1_drop_bcnt),
1398	GBENU_STATS_P5(tx_pri2_drop_bcnt),
1399	GBENU_STATS_P5(tx_pri3_drop_bcnt),
1400	GBENU_STATS_P5(tx_pri4_drop_bcnt),
1401	GBENU_STATS_P5(tx_pri5_drop_bcnt),
1402	GBENU_STATS_P5(tx_pri6_drop_bcnt),
1403	GBENU_STATS_P5(tx_pri7_drop_bcnt),
1404	/* GBENU Module 6 */
1405	GBENU_STATS_P6(rx_good_frames),
1406	GBENU_STATS_P6(rx_broadcast_frames),
1407	GBENU_STATS_P6(rx_multicast_frames),
1408	GBENU_STATS_P6(rx_pause_frames),
1409	GBENU_STATS_P6(rx_crc_errors),
1410	GBENU_STATS_P6(rx_align_code_errors),
1411	GBENU_STATS_P6(rx_oversized_frames),
1412	GBENU_STATS_P6(rx_jabber_frames),
1413	GBENU_STATS_P6(rx_undersized_frames),
1414	GBENU_STATS_P6(rx_fragments),
1415	GBENU_STATS_P6(ale_drop),
1416	GBENU_STATS_P6(ale_overrun_drop),
1417	GBENU_STATS_P6(rx_bytes),
1418	GBENU_STATS_P6(tx_good_frames),
1419	GBENU_STATS_P6(tx_broadcast_frames),
1420	GBENU_STATS_P6(tx_multicast_frames),
1421	GBENU_STATS_P6(tx_pause_frames),
1422	GBENU_STATS_P6(tx_deferred_frames),
1423	GBENU_STATS_P6(tx_collision_frames),
1424	GBENU_STATS_P6(tx_single_coll_frames),
1425	GBENU_STATS_P6(tx_mult_coll_frames),
1426	GBENU_STATS_P6(tx_excessive_collisions),
1427	GBENU_STATS_P6(tx_late_collisions),
1428	GBENU_STATS_P6(rx_ipg_error),
1429	GBENU_STATS_P6(tx_carrier_sense_errors),
1430	GBENU_STATS_P6(tx_bytes),
1431	GBENU_STATS_P6(tx_64B_frames),
1432	GBENU_STATS_P6(tx_65_to_127B_frames),
1433	GBENU_STATS_P6(tx_128_to_255B_frames),
1434	GBENU_STATS_P6(tx_256_to_511B_frames),
1435	GBENU_STATS_P6(tx_512_to_1023B_frames),
1436	GBENU_STATS_P6(tx_1024B_frames),
1437	GBENU_STATS_P6(net_bytes),
1438	GBENU_STATS_P6(rx_bottom_fifo_drop),
1439	GBENU_STATS_P6(rx_port_mask_drop),
1440	GBENU_STATS_P6(rx_top_fifo_drop),
1441	GBENU_STATS_P6(ale_rate_limit_drop),
1442	GBENU_STATS_P6(ale_vid_ingress_drop),
1443	GBENU_STATS_P6(ale_da_eq_sa_drop),
1444	GBENU_STATS_P6(ale_unknown_ucast),
1445	GBENU_STATS_P6(ale_unknown_ucast_bytes),
1446	GBENU_STATS_P6(ale_unknown_mcast),
1447	GBENU_STATS_P6(ale_unknown_mcast_bytes),
1448	GBENU_STATS_P6(ale_unknown_bcast),
1449	GBENU_STATS_P6(ale_unknown_bcast_bytes),
1450	GBENU_STATS_P6(ale_pol_match),
1451	GBENU_STATS_P6(ale_pol_match_red),
1452	GBENU_STATS_P6(ale_pol_match_yellow),
1453	GBENU_STATS_P6(tx_mem_protect_err),
1454	GBENU_STATS_P6(tx_pri0_drop),
1455	GBENU_STATS_P6(tx_pri1_drop),
1456	GBENU_STATS_P6(tx_pri2_drop),
1457	GBENU_STATS_P6(tx_pri3_drop),
1458	GBENU_STATS_P6(tx_pri4_drop),
1459	GBENU_STATS_P6(tx_pri5_drop),
1460	GBENU_STATS_P6(tx_pri6_drop),
1461	GBENU_STATS_P6(tx_pri7_drop),
1462	GBENU_STATS_P6(tx_pri0_drop_bcnt),
1463	GBENU_STATS_P6(tx_pri1_drop_bcnt),
1464	GBENU_STATS_P6(tx_pri2_drop_bcnt),
1465	GBENU_STATS_P6(tx_pri3_drop_bcnt),
1466	GBENU_STATS_P6(tx_pri4_drop_bcnt),
1467	GBENU_STATS_P6(tx_pri5_drop_bcnt),
1468	GBENU_STATS_P6(tx_pri6_drop_bcnt),
1469	GBENU_STATS_P6(tx_pri7_drop_bcnt),
1470	/* GBENU Module 7 */
1471	GBENU_STATS_P7(rx_good_frames),
1472	GBENU_STATS_P7(rx_broadcast_frames),
1473	GBENU_STATS_P7(rx_multicast_frames),
1474	GBENU_STATS_P7(rx_pause_frames),
1475	GBENU_STATS_P7(rx_crc_errors),
1476	GBENU_STATS_P7(rx_align_code_errors),
1477	GBENU_STATS_P7(rx_oversized_frames),
1478	GBENU_STATS_P7(rx_jabber_frames),
1479	GBENU_STATS_P7(rx_undersized_frames),
1480	GBENU_STATS_P7(rx_fragments),
1481	GBENU_STATS_P7(ale_drop),
1482	GBENU_STATS_P7(ale_overrun_drop),
1483	GBENU_STATS_P7(rx_bytes),
1484	GBENU_STATS_P7(tx_good_frames),
1485	GBENU_STATS_P7(tx_broadcast_frames),
1486	GBENU_STATS_P7(tx_multicast_frames),
1487	GBENU_STATS_P7(tx_pause_frames),
1488	GBENU_STATS_P7(tx_deferred_frames),
1489	GBENU_STATS_P7(tx_collision_frames),
1490	GBENU_STATS_P7(tx_single_coll_frames),
1491	GBENU_STATS_P7(tx_mult_coll_frames),
1492	GBENU_STATS_P7(tx_excessive_collisions),
1493	GBENU_STATS_P7(tx_late_collisions),
1494	GBENU_STATS_P7(rx_ipg_error),
1495	GBENU_STATS_P7(tx_carrier_sense_errors),
1496	GBENU_STATS_P7(tx_bytes),
1497	GBENU_STATS_P7(tx_64B_frames),
1498	GBENU_STATS_P7(tx_65_to_127B_frames),
1499	GBENU_STATS_P7(tx_128_to_255B_frames),
1500	GBENU_STATS_P7(tx_256_to_511B_frames),
1501	GBENU_STATS_P7(tx_512_to_1023B_frames),
1502	GBENU_STATS_P7(tx_1024B_frames),
1503	GBENU_STATS_P7(net_bytes),
1504	GBENU_STATS_P7(rx_bottom_fifo_drop),
1505	GBENU_STATS_P7(rx_port_mask_drop),
1506	GBENU_STATS_P7(rx_top_fifo_drop),
1507	GBENU_STATS_P7(ale_rate_limit_drop),
1508	GBENU_STATS_P7(ale_vid_ingress_drop),
1509	GBENU_STATS_P7(ale_da_eq_sa_drop),
1510	GBENU_STATS_P7(ale_unknown_ucast),
1511	GBENU_STATS_P7(ale_unknown_ucast_bytes),
1512	GBENU_STATS_P7(ale_unknown_mcast),
1513	GBENU_STATS_P7(ale_unknown_mcast_bytes),
1514	GBENU_STATS_P7(ale_unknown_bcast),
1515	GBENU_STATS_P7(ale_unknown_bcast_bytes),
1516	GBENU_STATS_P7(ale_pol_match),
1517	GBENU_STATS_P7(ale_pol_match_red),
1518	GBENU_STATS_P7(ale_pol_match_yellow),
1519	GBENU_STATS_P7(tx_mem_protect_err),
1520	GBENU_STATS_P7(tx_pri0_drop),
1521	GBENU_STATS_P7(tx_pri1_drop),
1522	GBENU_STATS_P7(tx_pri2_drop),
1523	GBENU_STATS_P7(tx_pri3_drop),
1524	GBENU_STATS_P7(tx_pri4_drop),
1525	GBENU_STATS_P7(tx_pri5_drop),
1526	GBENU_STATS_P7(tx_pri6_drop),
1527	GBENU_STATS_P7(tx_pri7_drop),
1528	GBENU_STATS_P7(tx_pri0_drop_bcnt),
1529	GBENU_STATS_P7(tx_pri1_drop_bcnt),
1530	GBENU_STATS_P7(tx_pri2_drop_bcnt),
1531	GBENU_STATS_P7(tx_pri3_drop_bcnt),
1532	GBENU_STATS_P7(tx_pri4_drop_bcnt),
1533	GBENU_STATS_P7(tx_pri5_drop_bcnt),
1534	GBENU_STATS_P7(tx_pri6_drop_bcnt),
1535	GBENU_STATS_P7(tx_pri7_drop_bcnt),
1536	/* GBENU Module 8 */
1537	GBENU_STATS_P8(rx_good_frames),
1538	GBENU_STATS_P8(rx_broadcast_frames),
1539	GBENU_STATS_P8(rx_multicast_frames),
1540	GBENU_STATS_P8(rx_pause_frames),
1541	GBENU_STATS_P8(rx_crc_errors),
1542	GBENU_STATS_P8(rx_align_code_errors),
1543	GBENU_STATS_P8(rx_oversized_frames),
1544	GBENU_STATS_P8(rx_jabber_frames),
1545	GBENU_STATS_P8(rx_undersized_frames),
1546	GBENU_STATS_P8(rx_fragments),
1547	GBENU_STATS_P8(ale_drop),
1548	GBENU_STATS_P8(ale_overrun_drop),
1549	GBENU_STATS_P8(rx_bytes),
1550	GBENU_STATS_P8(tx_good_frames),
1551	GBENU_STATS_P8(tx_broadcast_frames),
1552	GBENU_STATS_P8(tx_multicast_frames),
1553	GBENU_STATS_P8(tx_pause_frames),
1554	GBENU_STATS_P8(tx_deferred_frames),
1555	GBENU_STATS_P8(tx_collision_frames),
1556	GBENU_STATS_P8(tx_single_coll_frames),
1557	GBENU_STATS_P8(tx_mult_coll_frames),
1558	GBENU_STATS_P8(tx_excessive_collisions),
1559	GBENU_STATS_P8(tx_late_collisions),
1560	GBENU_STATS_P8(rx_ipg_error),
1561	GBENU_STATS_P8(tx_carrier_sense_errors),
1562	GBENU_STATS_P8(tx_bytes),
1563	GBENU_STATS_P8(tx_64B_frames),
1564	GBENU_STATS_P8(tx_65_to_127B_frames),
1565	GBENU_STATS_P8(tx_128_to_255B_frames),
1566	GBENU_STATS_P8(tx_256_to_511B_frames),
1567	GBENU_STATS_P8(tx_512_to_1023B_frames),
1568	GBENU_STATS_P8(tx_1024B_frames),
1569	GBENU_STATS_P8(net_bytes),
1570	GBENU_STATS_P8(rx_bottom_fifo_drop),
1571	GBENU_STATS_P8(rx_port_mask_drop),
1572	GBENU_STATS_P8(rx_top_fifo_drop),
1573	GBENU_STATS_P8(ale_rate_limit_drop),
1574	GBENU_STATS_P8(ale_vid_ingress_drop),
1575	GBENU_STATS_P8(ale_da_eq_sa_drop),
1576	GBENU_STATS_P8(ale_unknown_ucast),
1577	GBENU_STATS_P8(ale_unknown_ucast_bytes),
1578	GBENU_STATS_P8(ale_unknown_mcast),
1579	GBENU_STATS_P8(ale_unknown_mcast_bytes),
1580	GBENU_STATS_P8(ale_unknown_bcast),
1581	GBENU_STATS_P8(ale_unknown_bcast_bytes),
1582	GBENU_STATS_P8(ale_pol_match),
1583	GBENU_STATS_P8(ale_pol_match_red),
1584	GBENU_STATS_P8(ale_pol_match_yellow),
1585	GBENU_STATS_P8(tx_mem_protect_err),
1586	GBENU_STATS_P8(tx_pri0_drop),
1587	GBENU_STATS_P8(tx_pri1_drop),
1588	GBENU_STATS_P8(tx_pri2_drop),
1589	GBENU_STATS_P8(tx_pri3_drop),
1590	GBENU_STATS_P8(tx_pri4_drop),
1591	GBENU_STATS_P8(tx_pri5_drop),
1592	GBENU_STATS_P8(tx_pri6_drop),
1593	GBENU_STATS_P8(tx_pri7_drop),
1594	GBENU_STATS_P8(tx_pri0_drop_bcnt),
1595	GBENU_STATS_P8(tx_pri1_drop_bcnt),
1596	GBENU_STATS_P8(tx_pri2_drop_bcnt),
1597	GBENU_STATS_P8(tx_pri3_drop_bcnt),
1598	GBENU_STATS_P8(tx_pri4_drop_bcnt),
1599	GBENU_STATS_P8(tx_pri5_drop_bcnt),
1600	GBENU_STATS_P8(tx_pri6_drop_bcnt),
1601	GBENU_STATS_P8(tx_pri7_drop_bcnt),
1602};
1603
1604#define XGBE_STATS0_INFO(field)				\
1605{							\
1606	"GBE_0:"#field, XGBE_STATS0_MODULE,		\
1607	sizeof_field(struct xgbe_hw_stats, field),	\
1608	offsetof(struct xgbe_hw_stats, field)		\
1609}
1610
1611#define XGBE_STATS1_INFO(field)				\
1612{							\
1613	"GBE_1:"#field, XGBE_STATS1_MODULE,		\
1614	sizeof_field(struct xgbe_hw_stats, field),	\
1615	offsetof(struct xgbe_hw_stats, field)		\
1616}
1617
1618#define XGBE_STATS2_INFO(field)				\
1619{							\
1620	"GBE_2:"#field, XGBE_STATS2_MODULE,		\
1621	sizeof_field(struct xgbe_hw_stats, field),	\
1622	offsetof(struct xgbe_hw_stats, field)		\
1623}
1624
1625static const struct netcp_ethtool_stat xgbe10_et_stats[] = {
1626	/* GBE module 0 */
1627	XGBE_STATS0_INFO(rx_good_frames),
1628	XGBE_STATS0_INFO(rx_broadcast_frames),
1629	XGBE_STATS0_INFO(rx_multicast_frames),
1630	XGBE_STATS0_INFO(rx_oversized_frames),
1631	XGBE_STATS0_INFO(rx_undersized_frames),
1632	XGBE_STATS0_INFO(overrun_type4),
1633	XGBE_STATS0_INFO(overrun_type5),
1634	XGBE_STATS0_INFO(rx_bytes),
1635	XGBE_STATS0_INFO(tx_good_frames),
1636	XGBE_STATS0_INFO(tx_broadcast_frames),
1637	XGBE_STATS0_INFO(tx_multicast_frames),
1638	XGBE_STATS0_INFO(tx_bytes),
1639	XGBE_STATS0_INFO(tx_64byte_frames),
1640	XGBE_STATS0_INFO(tx_65_to_127byte_frames),
1641	XGBE_STATS0_INFO(tx_128_to_255byte_frames),
1642	XGBE_STATS0_INFO(tx_256_to_511byte_frames),
1643	XGBE_STATS0_INFO(tx_512_to_1023byte_frames),
1644	XGBE_STATS0_INFO(tx_1024byte_frames),
1645	XGBE_STATS0_INFO(net_bytes),
1646	XGBE_STATS0_INFO(rx_sof_overruns),
1647	XGBE_STATS0_INFO(rx_mof_overruns),
1648	XGBE_STATS0_INFO(rx_dma_overruns),
1649	/* XGBE module 1 */
1650	XGBE_STATS1_INFO(rx_good_frames),
1651	XGBE_STATS1_INFO(rx_broadcast_frames),
1652	XGBE_STATS1_INFO(rx_multicast_frames),
1653	XGBE_STATS1_INFO(rx_pause_frames),
1654	XGBE_STATS1_INFO(rx_crc_errors),
1655	XGBE_STATS1_INFO(rx_align_code_errors),
1656	XGBE_STATS1_INFO(rx_oversized_frames),
1657	XGBE_STATS1_INFO(rx_jabber_frames),
1658	XGBE_STATS1_INFO(rx_undersized_frames),
1659	XGBE_STATS1_INFO(rx_fragments),
1660	XGBE_STATS1_INFO(overrun_type4),
1661	XGBE_STATS1_INFO(overrun_type5),
1662	XGBE_STATS1_INFO(rx_bytes),
1663	XGBE_STATS1_INFO(tx_good_frames),
1664	XGBE_STATS1_INFO(tx_broadcast_frames),
1665	XGBE_STATS1_INFO(tx_multicast_frames),
1666	XGBE_STATS1_INFO(tx_pause_frames),
1667	XGBE_STATS1_INFO(tx_deferred_frames),
1668	XGBE_STATS1_INFO(tx_collision_frames),
1669	XGBE_STATS1_INFO(tx_single_coll_frames),
1670	XGBE_STATS1_INFO(tx_mult_coll_frames),
1671	XGBE_STATS1_INFO(tx_excessive_collisions),
1672	XGBE_STATS1_INFO(tx_late_collisions),
1673	XGBE_STATS1_INFO(tx_underrun),
1674	XGBE_STATS1_INFO(tx_carrier_sense_errors),
1675	XGBE_STATS1_INFO(tx_bytes),
1676	XGBE_STATS1_INFO(tx_64byte_frames),
1677	XGBE_STATS1_INFO(tx_65_to_127byte_frames),
1678	XGBE_STATS1_INFO(tx_128_to_255byte_frames),
1679	XGBE_STATS1_INFO(tx_256_to_511byte_frames),
1680	XGBE_STATS1_INFO(tx_512_to_1023byte_frames),
1681	XGBE_STATS1_INFO(tx_1024byte_frames),
1682	XGBE_STATS1_INFO(net_bytes),
1683	XGBE_STATS1_INFO(rx_sof_overruns),
1684	XGBE_STATS1_INFO(rx_mof_overruns),
1685	XGBE_STATS1_INFO(rx_dma_overruns),
1686	/* XGBE module 2 */
1687	XGBE_STATS2_INFO(rx_good_frames),
1688	XGBE_STATS2_INFO(rx_broadcast_frames),
1689	XGBE_STATS2_INFO(rx_multicast_frames),
1690	XGBE_STATS2_INFO(rx_pause_frames),
1691	XGBE_STATS2_INFO(rx_crc_errors),
1692	XGBE_STATS2_INFO(rx_align_code_errors),
1693	XGBE_STATS2_INFO(rx_oversized_frames),
1694	XGBE_STATS2_INFO(rx_jabber_frames),
1695	XGBE_STATS2_INFO(rx_undersized_frames),
1696	XGBE_STATS2_INFO(rx_fragments),
1697	XGBE_STATS2_INFO(overrun_type4),
1698	XGBE_STATS2_INFO(overrun_type5),
1699	XGBE_STATS2_INFO(rx_bytes),
1700	XGBE_STATS2_INFO(tx_good_frames),
1701	XGBE_STATS2_INFO(tx_broadcast_frames),
1702	XGBE_STATS2_INFO(tx_multicast_frames),
1703	XGBE_STATS2_INFO(tx_pause_frames),
1704	XGBE_STATS2_INFO(tx_deferred_frames),
1705	XGBE_STATS2_INFO(tx_collision_frames),
1706	XGBE_STATS2_INFO(tx_single_coll_frames),
1707	XGBE_STATS2_INFO(tx_mult_coll_frames),
1708	XGBE_STATS2_INFO(tx_excessive_collisions),
1709	XGBE_STATS2_INFO(tx_late_collisions),
1710	XGBE_STATS2_INFO(tx_underrun),
1711	XGBE_STATS2_INFO(tx_carrier_sense_errors),
1712	XGBE_STATS2_INFO(tx_bytes),
1713	XGBE_STATS2_INFO(tx_64byte_frames),
1714	XGBE_STATS2_INFO(tx_65_to_127byte_frames),
1715	XGBE_STATS2_INFO(tx_128_to_255byte_frames),
1716	XGBE_STATS2_INFO(tx_256_to_511byte_frames),
1717	XGBE_STATS2_INFO(tx_512_to_1023byte_frames),
1718	XGBE_STATS2_INFO(tx_1024byte_frames),
1719	XGBE_STATS2_INFO(net_bytes),
1720	XGBE_STATS2_INFO(rx_sof_overruns),
1721	XGBE_STATS2_INFO(rx_mof_overruns),
1722	XGBE_STATS2_INFO(rx_dma_overruns),
1723};
1724
1725#define for_each_intf(i, priv) \
1726	list_for_each_entry((i), &(priv)->gbe_intf_head, gbe_intf_list)
1727
1728#define for_each_sec_slave(slave, priv) \
1729	list_for_each_entry((slave), &(priv)->secondary_slaves, slave_list)
1730
1731#define first_sec_slave(priv)					\
1732	list_first_entry(&priv->secondary_slaves, \
1733			struct gbe_slave, slave_list)
1734
1735static void keystone_get_drvinfo(struct net_device *ndev,
1736				 struct ethtool_drvinfo *info)
1737{
1738	strscpy(info->driver, NETCP_DRIVER_NAME, sizeof(info->driver));
1739	strscpy(info->version, NETCP_DRIVER_VERSION, sizeof(info->version));
1740}
1741
1742static u32 keystone_get_msglevel(struct net_device *ndev)
1743{
1744	struct netcp_intf *netcp = netdev_priv(ndev);
1745
1746	return netcp->msg_enable;
1747}
1748
1749static void keystone_set_msglevel(struct net_device *ndev, u32 value)
1750{
1751	struct netcp_intf *netcp = netdev_priv(ndev);
1752
1753	netcp->msg_enable = value;
1754}
1755
1756static struct gbe_intf *keystone_get_intf_data(struct netcp_intf *netcp)
1757{
1758	struct gbe_intf *gbe_intf;
1759
1760	gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
1761	if (!gbe_intf)
1762		gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
1763
1764	return gbe_intf;
1765}
1766
1767static void keystone_get_stat_strings(struct net_device *ndev,
1768				      uint32_t stringset, uint8_t *data)
1769{
1770	struct netcp_intf *netcp = netdev_priv(ndev);
1771	struct gbe_intf *gbe_intf;
1772	struct gbe_priv *gbe_dev;
1773	int i;
1774
1775	gbe_intf = keystone_get_intf_data(netcp);
1776	if (!gbe_intf)
1777		return;
1778	gbe_dev = gbe_intf->gbe_dev;
1779
1780	switch (stringset) {
1781	case ETH_SS_STATS:
1782		for (i = 0; i < gbe_dev->num_et_stats; i++) {
1783			memcpy(data, gbe_dev->et_stats[i].desc,
1784			       ETH_GSTRING_LEN);
1785			data += ETH_GSTRING_LEN;
1786		}
1787		break;
1788	case ETH_SS_TEST:
1789		break;
1790	}
1791}
1792
1793static int keystone_get_sset_count(struct net_device *ndev, int stringset)
1794{
1795	struct netcp_intf *netcp = netdev_priv(ndev);
1796	struct gbe_intf *gbe_intf;
1797	struct gbe_priv *gbe_dev;
1798
1799	gbe_intf = keystone_get_intf_data(netcp);
1800	if (!gbe_intf)
1801		return -EINVAL;
1802	gbe_dev = gbe_intf->gbe_dev;
1803
1804	switch (stringset) {
1805	case ETH_SS_TEST:
1806		return 0;
1807	case ETH_SS_STATS:
1808		return gbe_dev->num_et_stats;
1809	default:
1810		return -EINVAL;
1811	}
1812}
1813
1814static void gbe_reset_mod_stats(struct gbe_priv *gbe_dev, int stats_mod)
1815{
1816	void __iomem *base = gbe_dev->hw_stats_regs[stats_mod];
1817	u32  __iomem *p_stats_entry;
1818	int i;
1819
1820	for (i = 0; i < gbe_dev->num_et_stats; i++) {
1821		if (gbe_dev->et_stats[i].type == stats_mod) {
1822			p_stats_entry = base + gbe_dev->et_stats[i].offset;
1823			gbe_dev->hw_stats[i] = 0;
1824			gbe_dev->hw_stats_prev[i] = readl(p_stats_entry);
1825		}
1826	}
1827}
1828
1829static inline void gbe_update_hw_stats_entry(struct gbe_priv *gbe_dev,
1830					     int et_stats_entry)
1831{
1832	void __iomem *base = NULL;
1833	u32  __iomem *p_stats_entry;
1834	u32 curr, delta;
1835
1836	/* The hw_stats_regs pointers are already
1837	 * properly set to point to the right base:
1838	 */
1839	base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[et_stats_entry].type];
1840	p_stats_entry = base + gbe_dev->et_stats[et_stats_entry].offset;
1841	curr = readl(p_stats_entry);
1842	delta = curr - gbe_dev->hw_stats_prev[et_stats_entry];
1843	gbe_dev->hw_stats_prev[et_stats_entry] = curr;
1844	gbe_dev->hw_stats[et_stats_entry] += delta;
1845}
1846
1847static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data)
1848{
1849	int i;
1850
1851	for (i = 0; i < gbe_dev->num_et_stats; i++) {
1852		gbe_update_hw_stats_entry(gbe_dev, i);
1853
1854		if (data)
1855			data[i] = gbe_dev->hw_stats[i];
1856	}
1857}
1858
1859static inline void gbe_stats_mod_visible_ver14(struct gbe_priv *gbe_dev,
1860					       int stats_mod)
1861{
1862	u32 val;
1863
1864	val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
1865
1866	switch (stats_mod) {
1867	case GBE_STATSA_MODULE:
1868	case GBE_STATSB_MODULE:
1869		val &= ~GBE_STATS_CD_SEL;
1870		break;
1871	case GBE_STATSC_MODULE:
1872	case GBE_STATSD_MODULE:
1873		val |= GBE_STATS_CD_SEL;
1874		break;
1875	default:
1876		return;
1877	}
1878
1879	/* make the stat module visible */
1880	writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
1881}
1882
1883static void gbe_reset_mod_stats_ver14(struct gbe_priv *gbe_dev, int stats_mod)
1884{
1885	gbe_stats_mod_visible_ver14(gbe_dev, stats_mod);
1886	gbe_reset_mod_stats(gbe_dev, stats_mod);
1887}
1888
1889static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data)
1890{
1891	u32 half_num_et_stats = (gbe_dev->num_et_stats / 2);
1892	int et_entry, j, pair;
1893
1894	for (pair = 0; pair < 2; pair++) {
1895		gbe_stats_mod_visible_ver14(gbe_dev, (pair ?
1896						      GBE_STATSC_MODULE :
1897						      GBE_STATSA_MODULE));
1898
1899		for (j = 0; j < half_num_et_stats; j++) {
1900			et_entry = pair * half_num_et_stats + j;
1901			gbe_update_hw_stats_entry(gbe_dev, et_entry);
1902
1903			if (data)
1904				data[et_entry] = gbe_dev->hw_stats[et_entry];
1905		}
1906	}
1907}
1908
1909static void keystone_get_ethtool_stats(struct net_device *ndev,
1910				       struct ethtool_stats *stats,
1911				       uint64_t *data)
1912{
1913	struct netcp_intf *netcp = netdev_priv(ndev);
1914	struct gbe_intf *gbe_intf;
1915	struct gbe_priv *gbe_dev;
1916
1917	gbe_intf = keystone_get_intf_data(netcp);
1918	if (!gbe_intf)
1919		return;
1920
1921	gbe_dev = gbe_intf->gbe_dev;
1922	spin_lock_bh(&gbe_dev->hw_stats_lock);
1923	if (IS_SS_ID_VER_14(gbe_dev))
1924		gbe_update_stats_ver14(gbe_dev, data);
1925	else
1926		gbe_update_stats(gbe_dev, data);
1927	spin_unlock_bh(&gbe_dev->hw_stats_lock);
1928}
1929
1930static int keystone_get_link_ksettings(struct net_device *ndev,
1931				       struct ethtool_link_ksettings *cmd)
1932{
1933	struct netcp_intf *netcp = netdev_priv(ndev);
1934	struct phy_device *phy = ndev->phydev;
1935	struct gbe_intf *gbe_intf;
1936
1937	if (!phy)
1938		return -EINVAL;
1939
1940	gbe_intf = keystone_get_intf_data(netcp);
1941	if (!gbe_intf)
1942		return -EINVAL;
1943
1944	if (!gbe_intf->slave)
1945		return -EINVAL;
1946
1947	phy_ethtool_ksettings_get(phy, cmd);
1948	cmd->base.port = gbe_intf->slave->phy_port_t;
1949
1950	return 0;
1951}
1952
1953static int keystone_set_link_ksettings(struct net_device *ndev,
1954				       const struct ethtool_link_ksettings *cmd)
1955{
1956	struct netcp_intf *netcp = netdev_priv(ndev);
1957	struct phy_device *phy = ndev->phydev;
1958	struct gbe_intf *gbe_intf;
1959	u8 port = cmd->base.port;
1960	u32 advertising, supported;
1961	u32 features;
1962
1963	ethtool_convert_link_mode_to_legacy_u32(&advertising,
1964						cmd->link_modes.advertising);
1965	ethtool_convert_link_mode_to_legacy_u32(&supported,
1966						cmd->link_modes.supported);
1967	features = advertising & supported;
1968
1969	if (!phy)
1970		return -EINVAL;
1971
1972	gbe_intf = keystone_get_intf_data(netcp);
1973	if (!gbe_intf)
1974		return -EINVAL;
1975
1976	if (!gbe_intf->slave)
1977		return -EINVAL;
1978
1979	if (port != gbe_intf->slave->phy_port_t) {
1980		if ((port == PORT_TP) && !(features & ADVERTISED_TP))
1981			return -EINVAL;
1982
1983		if ((port == PORT_AUI) && !(features & ADVERTISED_AUI))
1984			return -EINVAL;
1985
1986		if ((port == PORT_BNC) && !(features & ADVERTISED_BNC))
1987			return -EINVAL;
1988
1989		if ((port == PORT_MII) && !(features & ADVERTISED_MII))
1990			return -EINVAL;
1991
1992		if ((port == PORT_FIBRE) && !(features & ADVERTISED_FIBRE))
1993			return -EINVAL;
1994	}
1995
1996	gbe_intf->slave->phy_port_t = port;
1997	return phy_ethtool_ksettings_set(phy, cmd);
1998}
1999
2000#if IS_ENABLED(CONFIG_TI_CPTS)
2001static int keystone_get_ts_info(struct net_device *ndev,
2002				struct ethtool_ts_info *info)
2003{
2004	struct netcp_intf *netcp = netdev_priv(ndev);
2005	struct gbe_intf *gbe_intf;
2006
2007	gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
2008	if (!gbe_intf || !gbe_intf->gbe_dev->cpts)
2009		return -EINVAL;
2010
2011	info->so_timestamping =
2012		SOF_TIMESTAMPING_TX_HARDWARE |
2013		SOF_TIMESTAMPING_TX_SOFTWARE |
2014		SOF_TIMESTAMPING_RX_HARDWARE |
2015		SOF_TIMESTAMPING_RX_SOFTWARE |
2016		SOF_TIMESTAMPING_SOFTWARE |
2017		SOF_TIMESTAMPING_RAW_HARDWARE;
2018	info->phc_index = gbe_intf->gbe_dev->cpts->phc_index;
2019	info->tx_types =
2020		(1 << HWTSTAMP_TX_OFF) |
2021		(1 << HWTSTAMP_TX_ON);
2022	info->rx_filters =
2023		(1 << HWTSTAMP_FILTER_NONE) |
2024		(1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
2025		(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
2026	return 0;
2027}
2028#else
2029static int keystone_get_ts_info(struct net_device *ndev,
2030				struct ethtool_ts_info *info)
2031{
2032	info->so_timestamping =
2033		SOF_TIMESTAMPING_TX_SOFTWARE |
2034		SOF_TIMESTAMPING_RX_SOFTWARE |
2035		SOF_TIMESTAMPING_SOFTWARE;
2036	info->phc_index = -1;
2037	info->tx_types = 0;
2038	info->rx_filters = 0;
2039	return 0;
2040}
2041#endif /* CONFIG_TI_CPTS */
2042
2043static const struct ethtool_ops keystone_ethtool_ops = {
2044	.get_drvinfo		= keystone_get_drvinfo,
2045	.get_link		= ethtool_op_get_link,
2046	.get_msglevel		= keystone_get_msglevel,
2047	.set_msglevel		= keystone_set_msglevel,
2048	.get_strings		= keystone_get_stat_strings,
2049	.get_sset_count		= keystone_get_sset_count,
2050	.get_ethtool_stats	= keystone_get_ethtool_stats,
2051	.get_link_ksettings	= keystone_get_link_ksettings,
2052	.set_link_ksettings	= keystone_set_link_ksettings,
2053	.get_ts_info		= keystone_get_ts_info,
2054};
2055
2056static void gbe_set_slave_mac(struct gbe_slave *slave,
2057			      struct gbe_intf *gbe_intf)
2058{
2059	struct net_device *ndev = gbe_intf->ndev;
2060
2061	writel(mac_hi(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_hi));
2062	writel(mac_lo(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_lo));
2063}
2064
2065static int gbe_get_slave_port(struct gbe_priv *priv, u32 slave_num)
2066{
2067	if (priv->host_port == 0)
2068		return slave_num + 1;
2069
2070	return slave_num;
2071}
2072
2073static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev,
2074					  struct net_device *ndev,
2075					  struct gbe_slave *slave,
2076					  int up)
2077{
2078	struct phy_device *phy = slave->phy;
2079	u32 mac_control = 0;
2080
2081	if (up) {
2082		mac_control = slave->mac_control;
2083		if (phy && (phy->speed == SPEED_1000)) {
2084			mac_control |= MACSL_GIG_MODE;
2085			mac_control &= ~MACSL_XGIG_MODE;
2086		} else if (phy && (phy->speed == SPEED_10000)) {
2087			mac_control |= MACSL_XGIG_MODE;
2088			mac_control &= ~MACSL_GIG_MODE;
2089		}
2090
2091		writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
2092						 mac_control));
2093
2094		cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
2095				     ALE_PORT_STATE,
2096				     ALE_PORT_STATE_FORWARD);
2097
2098		if (ndev && slave->open &&
2099		    ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
2100		    (slave->link_interface != RGMII_LINK_MAC_PHY) &&
2101		    (slave->link_interface != XGMII_LINK_MAC_PHY)))
2102			netif_carrier_on(ndev);
2103	} else {
2104		writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
2105						 mac_control));
2106		cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
2107				     ALE_PORT_STATE,
2108				     ALE_PORT_STATE_DISABLE);
2109		if (ndev &&
2110		    ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
2111		    (slave->link_interface != RGMII_LINK_MAC_PHY) &&
2112		    (slave->link_interface != XGMII_LINK_MAC_PHY)))
2113			netif_carrier_off(ndev);
2114	}
2115
2116	if (phy)
2117		phy_print_status(phy);
2118}
2119
2120static bool gbe_phy_link_status(struct gbe_slave *slave)
2121{
2122	 return !slave->phy || slave->phy->link;
2123}
2124
2125#define RGMII_REG_STATUS_LINK	BIT(0)
2126
2127static void netcp_2u_rgmii_get_port_link(struct gbe_priv *gbe_dev, bool *status)
2128{
2129	u32 val = 0;
2130
2131	val = readl(GBE_REG_ADDR(gbe_dev, ss_regs, rgmii_status));
2132	*status = !!(val & RGMII_REG_STATUS_LINK);
2133}
2134
2135static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev,
2136					  struct gbe_slave *slave,
2137					  struct net_device *ndev)
2138{
2139	bool sw_link_state = true, phy_link_state;
2140	int sp = slave->slave_num, link_state;
2141
2142	if (!slave->open)
2143		return;
2144
2145	if (SLAVE_LINK_IS_RGMII(slave))
2146		netcp_2u_rgmii_get_port_link(gbe_dev,
2147					     &sw_link_state);
2148	if (SLAVE_LINK_IS_SGMII(slave))
2149		sw_link_state =
2150		netcp_sgmii_get_port_link(SGMII_BASE(gbe_dev, sp), sp);
2151
2152	phy_link_state = gbe_phy_link_status(slave);
2153	link_state = phy_link_state & sw_link_state;
2154
2155	if (atomic_xchg(&slave->link_state, link_state) != link_state)
2156		netcp_ethss_link_state_action(gbe_dev, ndev, slave,
2157					      link_state);
2158}
2159
2160static void xgbe_adjust_link(struct net_device *ndev)
2161{
2162	struct netcp_intf *netcp = netdev_priv(ndev);
2163	struct gbe_intf *gbe_intf;
2164
2165	gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
2166	if (!gbe_intf)
2167		return;
2168
2169	netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
2170				      ndev);
2171}
2172
2173static void gbe_adjust_link(struct net_device *ndev)
2174{
2175	struct netcp_intf *netcp = netdev_priv(ndev);
2176	struct gbe_intf *gbe_intf;
2177
2178	gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
2179	if (!gbe_intf)
2180		return;
2181
2182	netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
2183				      ndev);
2184}
2185
2186static void gbe_adjust_link_sec_slaves(struct net_device *ndev)
2187{
2188	struct gbe_priv *gbe_dev = netdev_priv(ndev);
2189	struct gbe_slave *slave;
2190
2191	for_each_sec_slave(slave, gbe_dev)
2192		netcp_ethss_update_link_state(gbe_dev, slave, NULL);
2193}
2194
2195/* Reset EMAC
2196 * Soft reset is set and polled until clear, or until a timeout occurs
2197 */
2198static int gbe_port_reset(struct gbe_slave *slave)
2199{
2200	u32 i, v;
2201
2202	/* Set the soft reset bit */
2203	writel(SOFT_RESET, GBE_REG_ADDR(slave, emac_regs, soft_reset));
2204
2205	/* Wait for the bit to clear */
2206	for (i = 0; i < DEVICE_EMACSL_RESET_POLL_COUNT; i++) {
2207		v = readl(GBE_REG_ADDR(slave, emac_regs, soft_reset));
2208		if ((v & SOFT_RESET_MASK) != SOFT_RESET)
2209			return 0;
2210	}
2211
2212	/* Timeout on the reset */
2213	return GMACSL_RET_WARN_RESET_INCOMPLETE;
2214}
2215
2216/* Configure EMAC */
2217static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
2218			    int max_rx_len)
2219{
2220	void __iomem *rx_maxlen_reg;
2221	u32 xgmii_mode;
2222
2223	if (max_rx_len > NETCP_MAX_FRAME_SIZE)
2224		max_rx_len = NETCP_MAX_FRAME_SIZE;
2225
2226	/* Enable correct MII mode at SS level */
2227	if (IS_SS_ID_XGBE(gbe_dev) &&
2228	    (slave->link_interface >= XGMII_LINK_MAC_PHY)) {
2229		xgmii_mode = readl(GBE_REG_ADDR(gbe_dev, ss_regs, control));
2230		xgmii_mode |= (1 << slave->slave_num);
2231		writel(xgmii_mode, GBE_REG_ADDR(gbe_dev, ss_regs, control));
2232	}
2233
2234	if (IS_SS_ID_MU(gbe_dev))
2235		rx_maxlen_reg = GBE_REG_ADDR(slave, port_regs, rx_maxlen);
2236	else
2237		rx_maxlen_reg = GBE_REG_ADDR(slave, emac_regs, rx_maxlen);
2238
2239	writel(max_rx_len, rx_maxlen_reg);
2240	writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control));
2241}
2242
2243static void gbe_sgmii_rtreset(struct gbe_priv *priv,
2244			      struct gbe_slave *slave, bool set)
2245{
2246	if (SLAVE_LINK_IS_XGMII(slave))
2247		return;
2248
2249	netcp_sgmii_rtreset(SGMII_BASE(priv, slave->slave_num),
2250			    slave->slave_num, set);
2251}
2252
2253static void gbe_slave_stop(struct gbe_intf *intf)
2254{
2255	struct gbe_priv *gbe_dev = intf->gbe_dev;
2256	struct gbe_slave *slave = intf->slave;
2257
2258	if (!IS_SS_ID_2U(gbe_dev))
2259		gbe_sgmii_rtreset(gbe_dev, slave, true);
2260	gbe_port_reset(slave);
2261	/* Disable forwarding */
2262	cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
2263			     ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
2264	cpsw_ale_del_mcast(gbe_dev->ale, intf->ndev->broadcast,
2265			   1 << slave->port_num, 0, 0);
2266
2267	if (!slave->phy)
2268		return;
2269
2270	phy_stop(slave->phy);
2271	phy_disconnect(slave->phy);
2272	slave->phy = NULL;
2273}
2274
2275static void gbe_sgmii_config(struct gbe_priv *priv, struct gbe_slave *slave)
2276{
2277	if (SLAVE_LINK_IS_XGMII(slave))
2278		return;
2279
2280	netcp_sgmii_reset(SGMII_BASE(priv, slave->slave_num), slave->slave_num);
2281	netcp_sgmii_config(SGMII_BASE(priv, slave->slave_num), slave->slave_num,
2282			   slave->link_interface);
2283}
2284
2285static int gbe_slave_open(struct gbe_intf *gbe_intf)
2286{
2287	struct gbe_priv *priv = gbe_intf->gbe_dev;
2288	struct gbe_slave *slave = gbe_intf->slave;
2289	phy_interface_t phy_mode;
2290	bool has_phy = false;
2291	int err;
2292
2293	void (*hndlr)(struct net_device *) = gbe_adjust_link;
2294
2295	if (!IS_SS_ID_2U(priv))
2296		gbe_sgmii_config(priv, slave);
2297	gbe_port_reset(slave);
2298	if (!IS_SS_ID_2U(priv))
2299		gbe_sgmii_rtreset(priv, slave, false);
2300	gbe_port_config(priv, slave, priv->rx_packet_max);
2301	gbe_set_slave_mac(slave, gbe_intf);
2302	/* For NU & 2U switch, map the vlan priorities to zero
2303	 * as we only configure to use priority 0
2304	 */
2305	if (IS_SS_ID_MU(priv))
2306		writel(HOST_TX_PRI_MAP_DEFAULT,
2307		       GBE_REG_ADDR(slave, port_regs, rx_pri_map));
2308
2309	/* enable forwarding */
2310	cpsw_ale_control_set(priv->ale, slave->port_num,
2311			     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
2312	cpsw_ale_add_mcast(priv->ale, gbe_intf->ndev->broadcast,
2313			   1 << slave->port_num, 0, 0, ALE_MCAST_FWD_2);
2314
2315	if (slave->link_interface == SGMII_LINK_MAC_PHY) {
2316		has_phy = true;
2317		phy_mode = PHY_INTERFACE_MODE_SGMII;
2318		slave->phy_port_t = PORT_MII;
2319	} else if (slave->link_interface == RGMII_LINK_MAC_PHY) {
2320		has_phy = true;
2321		err = of_get_phy_mode(slave->node, &phy_mode);
2322		/* if phy-mode is not present, default to
2323		 * PHY_INTERFACE_MODE_RGMII
2324		 */
2325		if (err)
2326			phy_mode = PHY_INTERFACE_MODE_RGMII;
2327
2328		if (!phy_interface_mode_is_rgmii(phy_mode)) {
2329			dev_err(priv->dev,
2330				"Unsupported phy mode %d\n", phy_mode);
2331			return -EINVAL;
2332		}
2333		slave->phy_port_t = PORT_MII;
2334	} else if (slave->link_interface == XGMII_LINK_MAC_PHY) {
2335		has_phy = true;
2336		phy_mode = PHY_INTERFACE_MODE_NA;
2337		slave->phy_port_t = PORT_FIBRE;
2338	}
2339
2340	if (has_phy) {
2341		if (IS_SS_ID_XGBE(priv))
2342			hndlr = xgbe_adjust_link;
2343
2344		slave->phy = of_phy_connect(gbe_intf->ndev,
2345					    slave->phy_node,
2346					    hndlr, 0,
2347					    phy_mode);
2348		if (!slave->phy) {
2349			dev_err(priv->dev, "phy not found on slave %d\n",
2350				slave->slave_num);
2351			return -ENODEV;
2352		}
2353		dev_dbg(priv->dev, "phy found: id is: 0x%s\n",
2354			phydev_name(slave->phy));
2355		phy_start(slave->phy);
2356	}
2357	return 0;
2358}
2359
2360static void gbe_init_host_port(struct gbe_priv *priv)
2361{
2362	int bypass_en = 1;
2363
2364	/* Host Tx Pri */
2365	if (IS_SS_ID_NU(priv) || IS_SS_ID_XGBE(priv))
2366		writel(HOST_TX_PRI_MAP_DEFAULT,
2367		       GBE_REG_ADDR(priv, host_port_regs, tx_pri_map));
2368
2369	/* Max length register */
2370	writel(NETCP_MAX_FRAME_SIZE, GBE_REG_ADDR(priv, host_port_regs,
2371						  rx_maxlen));
2372
2373	cpsw_ale_start(priv->ale);
2374
2375	if (priv->enable_ale)
2376		bypass_en = 0;
2377
2378	cpsw_ale_control_set(priv->ale, 0, ALE_BYPASS, bypass_en);
2379
2380	cpsw_ale_control_set(priv->ale, 0, ALE_NO_PORT_VLAN, 1);
2381
2382	cpsw_ale_control_set(priv->ale, priv->host_port,
2383			     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
2384
2385	cpsw_ale_control_set(priv->ale, 0,
2386			     ALE_PORT_UNKNOWN_VLAN_MEMBER,
2387			     GBE_PORT_MASK(priv->ale_ports));
2388
2389	cpsw_ale_control_set(priv->ale, 0,
2390			     ALE_PORT_UNKNOWN_MCAST_FLOOD,
2391			     GBE_PORT_MASK(priv->ale_ports - 1));
2392
2393	cpsw_ale_control_set(priv->ale, 0,
2394			     ALE_PORT_UNKNOWN_REG_MCAST_FLOOD,
2395			     GBE_PORT_MASK(priv->ale_ports));
2396
2397	cpsw_ale_control_set(priv->ale, 0,
2398			     ALE_PORT_UNTAGGED_EGRESS,
2399			     GBE_PORT_MASK(priv->ale_ports));
2400}
2401
2402static void gbe_add_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2403{
2404	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2405	u16 vlan_id;
2406
2407	cpsw_ale_add_mcast(gbe_dev->ale, addr,
2408			   GBE_PORT_MASK(gbe_dev->ale_ports), 0, 0,
2409			   ALE_MCAST_FWD_2);
2410	for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2411		cpsw_ale_add_mcast(gbe_dev->ale, addr,
2412				   GBE_PORT_MASK(gbe_dev->ale_ports),
2413				   ALE_VLAN, vlan_id, ALE_MCAST_FWD_2);
2414	}
2415}
2416
2417static void gbe_add_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2418{
2419	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2420	u16 vlan_id;
2421
2422	cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
2423
2424	for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID)
2425		cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
2426				   ALE_VLAN, vlan_id);
2427}
2428
2429static void gbe_del_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2430{
2431	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2432	u16 vlan_id;
2433
2434	cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, 0, 0);
2435
2436	for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2437		cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, ALE_VLAN, vlan_id);
2438	}
2439}
2440
2441static void gbe_del_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2442{
2443	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2444	u16 vlan_id;
2445
2446	cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
2447
2448	for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2449		cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
2450				   ALE_VLAN, vlan_id);
2451	}
2452}
2453
2454static int gbe_add_addr(void *intf_priv, struct netcp_addr *naddr)
2455{
2456	struct gbe_intf *gbe_intf = intf_priv;
2457	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2458
2459	dev_dbg(gbe_dev->dev, "ethss adding address %pM, type %d\n",
2460		naddr->addr, naddr->type);
2461
2462	switch (naddr->type) {
2463	case ADDR_MCAST:
2464	case ADDR_BCAST:
2465		gbe_add_mcast_addr(gbe_intf, naddr->addr);
2466		break;
2467	case ADDR_UCAST:
2468	case ADDR_DEV:
2469		gbe_add_ucast_addr(gbe_intf, naddr->addr);
2470		break;
2471	case ADDR_ANY:
2472		/* nothing to do for promiscuous */
2473	default:
2474		break;
2475	}
2476
2477	return 0;
2478}
2479
2480static int gbe_del_addr(void *intf_priv, struct netcp_addr *naddr)
2481{
2482	struct gbe_intf *gbe_intf = intf_priv;
2483	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2484
2485	dev_dbg(gbe_dev->dev, "ethss deleting address %pM, type %d\n",
2486		naddr->addr, naddr->type);
2487
2488	switch (naddr->type) {
2489	case ADDR_MCAST:
2490	case ADDR_BCAST:
2491		gbe_del_mcast_addr(gbe_intf, naddr->addr);
2492		break;
2493	case ADDR_UCAST:
2494	case ADDR_DEV:
2495		gbe_del_ucast_addr(gbe_intf, naddr->addr);
2496		break;
2497	case ADDR_ANY:
2498		/* nothing to do for promiscuous */
2499	default:
2500		break;
2501	}
2502
2503	return 0;
2504}
2505
2506static int gbe_add_vid(void *intf_priv, int vid)
2507{
2508	struct gbe_intf *gbe_intf = intf_priv;
2509	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2510
2511	set_bit(vid, gbe_intf->active_vlans);
2512
2513	cpsw_ale_add_vlan(gbe_dev->ale, vid,
2514			  GBE_PORT_MASK(gbe_dev->ale_ports),
2515			  GBE_MASK_NO_PORTS,
2516			  GBE_PORT_MASK(gbe_dev->ale_ports),
2517			  GBE_PORT_MASK(gbe_dev->ale_ports - 1));
2518
2519	return 0;
2520}
2521
2522static int gbe_del_vid(void *intf_priv, int vid)
2523{
2524	struct gbe_intf *gbe_intf = intf_priv;
2525	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2526
2527	cpsw_ale_del_vlan(gbe_dev->ale, vid, 0);
2528	clear_bit(vid, gbe_intf->active_vlans);
2529	return 0;
2530}
2531
2532#if IS_ENABLED(CONFIG_TI_CPTS)
2533
2534static void gbe_txtstamp(void *context, struct sk_buff *skb)
2535{
2536	struct gbe_intf *gbe_intf = context;
2537	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2538
2539	cpts_tx_timestamp(gbe_dev->cpts, skb);
2540}
2541
2542static bool gbe_need_txtstamp(struct gbe_intf *gbe_intf,
2543			      const struct netcp_packet *p_info)
2544{
2545	struct sk_buff *skb = p_info->skb;
2546
2547	return cpts_can_timestamp(gbe_intf->gbe_dev->cpts, skb);
2548}
2549
2550static int gbe_txtstamp_mark_pkt(struct gbe_intf *gbe_intf,
2551				 struct netcp_packet *p_info)
2552{
2553	struct phy_device *phydev = p_info->skb->dev->phydev;
2554	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2555
2556	if (!(skb_shinfo(p_info->skb)->tx_flags & SKBTX_HW_TSTAMP) ||
2557	    !gbe_dev->tx_ts_enabled)
2558		return 0;
2559
2560	/* If phy has the txtstamp api, assume it will do it.
2561	 * We mark it here because skb_tx_timestamp() is called
2562	 * after all the txhooks are called.
2563	 */
2564	if (phy_has_txtstamp(phydev)) {
2565		skb_shinfo(p_info->skb)->tx_flags |= SKBTX_IN_PROGRESS;
2566		return 0;
2567	}
2568
2569	if (gbe_need_txtstamp(gbe_intf, p_info)) {
2570		p_info->txtstamp = gbe_txtstamp;
2571		p_info->ts_context = (void *)gbe_intf;
2572		skb_shinfo(p_info->skb)->tx_flags |= SKBTX_IN_PROGRESS;
2573	}
2574
2575	return 0;
2576}
2577
2578static int gbe_rxtstamp(struct gbe_intf *gbe_intf, struct netcp_packet *p_info)
2579{
2580	struct phy_device *phydev = p_info->skb->dev->phydev;
2581	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2582
2583	if (p_info->rxtstamp_complete)
2584		return 0;
2585
2586	if (phy_has_rxtstamp(phydev)) {
2587		p_info->rxtstamp_complete = true;
2588		return 0;
2589	}
2590
2591	if (gbe_dev->rx_ts_enabled)
2592		cpts_rx_timestamp(gbe_dev->cpts, p_info->skb);
2593
2594	p_info->rxtstamp_complete = true;
2595
2596	return 0;
2597}
2598
2599static int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *ifr)
2600{
2601	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2602	struct cpts *cpts = gbe_dev->cpts;
2603	struct hwtstamp_config cfg;
2604
2605	if (!cpts)
2606		return -EOPNOTSUPP;
2607
2608	cfg.flags = 0;
2609	cfg.tx_type = gbe_dev->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
2610	cfg.rx_filter = gbe_dev->rx_ts_enabled;
2611
2612	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
2613}
2614
2615static void gbe_hwtstamp(struct gbe_intf *gbe_intf)
2616{
2617	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2618	struct gbe_slave *slave = gbe_intf->slave;
2619	u32 ts_en, seq_id, ctl;
2620
2621	if (!gbe_dev->rx_ts_enabled &&
2622	    !gbe_dev->tx_ts_enabled) {
2623		writel(0, GBE_REG_ADDR(slave, port_regs, ts_ctl));
2624		return;
2625	}
2626
2627	seq_id = (30 << TS_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
2628	ts_en = EVENT_MSG_BITS << TS_MSG_TYPE_EN_SHIFT;
2629	ctl = ETH_P_1588 | TS_TTL_NONZERO |
2630		(slave->ts_ctl.dst_port_map << TS_CTL_DST_PORT_SHIFT) |
2631		(slave->ts_ctl.uni ?  TS_UNI_EN :
2632			slave->ts_ctl.maddr_map << TS_CTL_MADDR_SHIFT);
2633
2634	if (gbe_dev->tx_ts_enabled)
2635		ts_en |= (TS_TX_ANX_ALL_EN | TS_TX_VLAN_LT1_EN);
2636
2637	if (gbe_dev->rx_ts_enabled)
2638		ts_en |= (TS_RX_ANX_ALL_EN | TS_RX_VLAN_LT1_EN);
2639
2640	writel(ts_en,  GBE_REG_ADDR(slave, port_regs, ts_ctl));
2641	writel(seq_id, GBE_REG_ADDR(slave, port_regs, ts_seq_ltype));
2642	writel(ctl,    GBE_REG_ADDR(slave, port_regs, ts_ctl_ltype2));
2643}
2644
2645static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr)
2646{
2647	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2648	struct cpts *cpts = gbe_dev->cpts;
2649	struct hwtstamp_config cfg;
2650
2651	if (!cpts)
2652		return -EOPNOTSUPP;
2653
2654	if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
2655		return -EFAULT;
2656
2657	switch (cfg.tx_type) {
2658	case HWTSTAMP_TX_OFF:
2659		gbe_dev->tx_ts_enabled = 0;
2660		break;
2661	case HWTSTAMP_TX_ON:
2662		gbe_dev->tx_ts_enabled = 1;
2663		break;
2664	default:
2665		return -ERANGE;
2666	}
2667
2668	switch (cfg.rx_filter) {
2669	case HWTSTAMP_FILTER_NONE:
2670		gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_NONE;
2671		break;
2672	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2673	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2674	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2675		gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
2676		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
2677		break;
2678	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2679	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2680	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2681	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2682	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2683	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2684	case HWTSTAMP_FILTER_PTP_V2_EVENT:
2685	case HWTSTAMP_FILTER_PTP_V2_SYNC:
2686	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2687		gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
2688		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
2689		break;
2690	default:
2691		return -ERANGE;
2692	}
2693
2694	gbe_hwtstamp(gbe_intf);
2695
2696	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
2697}
2698
2699static void gbe_register_cpts(struct gbe_priv *gbe_dev)
2700{
2701	if (!gbe_dev->cpts)
2702		return;
2703
2704	if (gbe_dev->cpts_registered > 0)
2705		goto done;
2706
2707	if (cpts_register(gbe_dev->cpts)) {
2708		dev_err(gbe_dev->dev, "error registering cpts device\n");
2709		return;
2710	}
2711
2712done:
2713	++gbe_dev->cpts_registered;
2714}
2715
2716static void gbe_unregister_cpts(struct gbe_priv *gbe_dev)
2717{
2718	if (!gbe_dev->cpts || (gbe_dev->cpts_registered <= 0))
2719		return;
2720
2721	if (--gbe_dev->cpts_registered)
2722		return;
2723
2724	cpts_unregister(gbe_dev->cpts);
2725}
2726#else
2727static inline int gbe_txtstamp_mark_pkt(struct gbe_intf *gbe_intf,
2728					struct netcp_packet *p_info)
2729{
2730	return 0;
2731}
2732
2733static inline int gbe_rxtstamp(struct gbe_intf *gbe_intf,
2734			       struct netcp_packet *p_info)
2735{
2736	return 0;
2737}
2738
2739static inline int gbe_hwtstamp(struct gbe_intf *gbe_intf,
2740			       struct ifreq *ifr, int cmd)
2741{
2742	return -EOPNOTSUPP;
2743}
2744
2745static inline void gbe_register_cpts(struct gbe_priv *gbe_dev)
2746{
2747}
2748
2749static inline void gbe_unregister_cpts(struct gbe_priv *gbe_dev)
2750{
2751}
2752
2753static inline int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *req)
2754{
2755	return -EOPNOTSUPP;
2756}
2757
2758static inline int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *req)
2759{
2760	return -EOPNOTSUPP;
2761}
2762#endif /* CONFIG_TI_CPTS */
2763
2764static int gbe_set_rx_mode(void *intf_priv, bool promisc)
2765{
2766	struct gbe_intf *gbe_intf = intf_priv;
2767	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2768	struct cpsw_ale *ale = gbe_dev->ale;
2769	unsigned long timeout;
2770	int i, ret = -ETIMEDOUT;
2771
2772	/* Disable(1)/Enable(0) Learn for all ports (host is port 0 and
2773	 * slaves are port 1 and up
2774	 */
2775	for (i = 0; i <= gbe_dev->num_slaves; i++) {
2776		cpsw_ale_control_set(ale, i,
2777				     ALE_PORT_NOLEARN, !!promisc);
2778		cpsw_ale_control_set(ale, i,
2779				     ALE_PORT_NO_SA_UPDATE, !!promisc);
2780	}
2781
2782	if (!promisc) {
2783		/* Don't Flood All Unicast Packets to Host port */
2784		cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);
2785		dev_vdbg(gbe_dev->dev, "promiscuous mode disabled\n");
2786		return 0;
2787	}
2788
2789	timeout = jiffies + HZ;
2790
2791	/* Clear All Untouched entries */
2792	cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
2793	do {
2794		cpu_relax();
2795		if (cpsw_ale_control_get(ale, 0, ALE_AGEOUT)) {
2796			ret = 0;
2797			break;
2798		}
2799
2800	} while (time_after(timeout, jiffies));
2801
2802	/* Make sure it is not a false timeout */
2803	if (ret && !cpsw_ale_control_get(ale, 0, ALE_AGEOUT))
2804		return ret;
2805
2806	cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
2807
2808	/* Clear all mcast from ALE */
2809	cpsw_ale_flush_multicast(ale,
2810				 GBE_PORT_MASK(gbe_dev->ale_ports),
2811				 -1);
2812
2813	/* Flood All Unicast Packets to Host port */
2814	cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
2815	dev_vdbg(gbe_dev->dev, "promiscuous mode enabled\n");
2816	return ret;
2817}
2818
2819static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd)
2820{
2821	struct gbe_intf *gbe_intf = intf_priv;
2822	struct phy_device *phy = gbe_intf->slave->phy;
2823
2824	if (!phy_has_hwtstamp(phy)) {
2825		switch (cmd) {
2826		case SIOCGHWTSTAMP:
2827			return gbe_hwtstamp_get(gbe_intf, req);
2828		case SIOCSHWTSTAMP:
2829			return gbe_hwtstamp_set(gbe_intf, req);
2830		}
2831	}
2832
2833	if (phy)
2834		return phy_mii_ioctl(phy, req, cmd);
2835
2836	return -EOPNOTSUPP;
2837}
2838
2839static void netcp_ethss_timer(struct timer_list *t)
2840{
2841	struct gbe_priv *gbe_dev = from_timer(gbe_dev, t, timer);
2842	struct gbe_intf *gbe_intf;
2843	struct gbe_slave *slave;
2844
2845	/* Check & update SGMII link state of interfaces */
2846	for_each_intf(gbe_intf, gbe_dev) {
2847		if (!gbe_intf->slave->open)
2848			continue;
2849		netcp_ethss_update_link_state(gbe_dev, gbe_intf->slave,
2850					      gbe_intf->ndev);
2851	}
2852
2853	/* Check & update SGMII link state of secondary ports */
2854	for_each_sec_slave(slave, gbe_dev) {
2855		netcp_ethss_update_link_state(gbe_dev, slave, NULL);
2856	}
2857
2858	/* A timer runs as a BH, no need to block them */
2859	spin_lock(&gbe_dev->hw_stats_lock);
2860
2861	if (IS_SS_ID_VER_14(gbe_dev))
2862		gbe_update_stats_ver14(gbe_dev, NULL);
2863	else
2864		gbe_update_stats(gbe_dev, NULL);
2865
2866	spin_unlock(&gbe_dev->hw_stats_lock);
2867
2868	gbe_dev->timer.expires	= jiffies + GBE_TIMER_INTERVAL;
2869	add_timer(&gbe_dev->timer);
2870}
2871
2872static int gbe_txhook(int order, void *data, struct netcp_packet *p_info)
2873{
2874	struct gbe_intf *gbe_intf = data;
2875
2876	p_info->tx_pipe = &gbe_intf->tx_pipe;
2877
2878	return gbe_txtstamp_mark_pkt(gbe_intf, p_info);
2879}
2880
2881static int gbe_rxhook(int order, void *data, struct netcp_packet *p_info)
2882{
2883	struct gbe_intf *gbe_intf = data;
2884
2885	return gbe_rxtstamp(gbe_intf, p_info);
2886}
2887
2888static int gbe_open(void *intf_priv, struct net_device *ndev)
2889{
2890	struct gbe_intf *gbe_intf = intf_priv;
2891	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2892	struct netcp_intf *netcp = netdev_priv(ndev);
2893	struct gbe_slave *slave = gbe_intf->slave;
2894	int port_num = slave->port_num;
2895	u32 reg, val;
2896	int ret;
2897
2898	reg = readl(GBE_REG_ADDR(gbe_dev, switch_regs, id_ver));
2899	dev_dbg(gbe_dev->dev, "initializing gbe version %d.%d (%d) GBE identification value 0x%x\n",
2900		GBE_MAJOR_VERSION(reg), GBE_MINOR_VERSION(reg),
2901		GBE_RTL_VERSION(reg), GBE_IDENT(reg));
2902
2903	/* For 10G and on NetCP 1.5, use directed to port */
2904	if (IS_SS_ID_XGBE(gbe_dev) || IS_SS_ID_MU(gbe_dev))
2905		gbe_intf->tx_pipe.flags = SWITCH_TO_PORT_IN_TAGINFO;
2906
2907	if (gbe_dev->enable_ale)
2908		gbe_intf->tx_pipe.switch_to_port = 0;
2909	else
2910		gbe_intf->tx_pipe.switch_to_port = port_num;
2911
2912	dev_dbg(gbe_dev->dev,
2913		"opened TX channel %s: %p with to port %d, flags %d\n",
2914		gbe_intf->tx_pipe.dma_chan_name,
2915		gbe_intf->tx_pipe.dma_channel,
2916		gbe_intf->tx_pipe.switch_to_port,
2917		gbe_intf->tx_pipe.flags);
2918
2919	gbe_slave_stop(gbe_intf);
2920
2921	/* disable priority elevation and enable statistics on all ports */
2922	writel(0, GBE_REG_ADDR(gbe_dev, switch_regs, ptype));
2923
2924	/* Control register */
2925	val = GBE_CTL_P0_ENABLE;
2926	if (IS_SS_ID_MU(gbe_dev)) {
2927		val |= ETH_SW_CTL_P0_TX_CRC_REMOVE;
2928		netcp->hw_cap = ETH_SW_CAN_REMOVE_ETH_FCS;
2929	}
2930	writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, control));
2931
2932	/* All statistics enabled and STAT AB visible by default */
2933	writel(gbe_dev->stats_en_mask, GBE_REG_ADDR(gbe_dev, switch_regs,
2934						    stat_port_en));
2935
2936	ret = gbe_slave_open(gbe_intf);
2937	if (ret)
2938		goto fail;
2939
2940	netcp_register_txhook(netcp, GBE_TXHOOK_ORDER, gbe_txhook, gbe_intf);
2941	netcp_register_rxhook(netcp, GBE_RXHOOK_ORDER, gbe_rxhook, gbe_intf);
2942
2943	slave->open = true;
2944	netcp_ethss_update_link_state(gbe_dev, slave, ndev);
2945
2946	gbe_register_cpts(gbe_dev);
2947
2948	return 0;
2949
2950fail:
2951	gbe_slave_stop(gbe_intf);
2952	return ret;
2953}
2954
2955static int gbe_close(void *intf_priv, struct net_device *ndev)
2956{
2957	struct gbe_intf *gbe_intf = intf_priv;
2958	struct netcp_intf *netcp = netdev_priv(ndev);
2959	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2960
2961	gbe_unregister_cpts(gbe_dev);
2962
2963	gbe_slave_stop(gbe_intf);
2964
2965	netcp_unregister_rxhook(netcp, GBE_RXHOOK_ORDER, gbe_rxhook, gbe_intf);
2966	netcp_unregister_txhook(netcp, GBE_TXHOOK_ORDER, gbe_txhook, gbe_intf);
2967
2968	gbe_intf->slave->open = false;
2969	atomic_set(&gbe_intf->slave->link_state, NETCP_LINK_STATE_INVALID);
2970	return 0;
2971}
2972
2973#if IS_ENABLED(CONFIG_TI_CPTS)
2974static void init_slave_ts_ctl(struct gbe_slave *slave)
2975{
2976	slave->ts_ctl.uni = 1;
2977	slave->ts_ctl.dst_port_map =
2978		(TS_CTL_DST_PORT >> TS_CTL_DST_PORT_SHIFT) & 0x3;
2979	slave->ts_ctl.maddr_map =
2980		(TS_CTL_MADDR_ALL >> TS_CTL_MADDR_SHIFT) & 0x1f;
2981}
2982
2983#else
2984static void init_slave_ts_ctl(struct gbe_slave *slave)
2985{
2986}
2987#endif /* CONFIG_TI_CPTS */
2988
2989static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
2990		      struct device_node *node)
2991{
2992	int port_reg_num;
2993	u32 port_reg_ofs, emac_reg_ofs;
2994	u32 port_reg_blk_sz, emac_reg_blk_sz;
2995
2996	if (of_property_read_u32(node, "slave-port", &slave->slave_num)) {
2997		dev_err(gbe_dev->dev, "missing slave-port parameter\n");
2998		return -EINVAL;
2999	}
3000
3001	if (of_property_read_u32(node, "link-interface",
3002				 &slave->link_interface)) {
3003		dev_warn(gbe_dev->dev,
3004			 "missing link-interface value defaulting to 1G mac-phy link\n");
3005		slave->link_interface = SGMII_LINK_MAC_PHY;
3006	}
3007
3008	slave->node = node;
3009	slave->open = false;
3010	if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
3011	    (slave->link_interface == RGMII_LINK_MAC_PHY) ||
3012	    (slave->link_interface == XGMII_LINK_MAC_PHY))
3013		slave->phy_node = of_parse_phandle(node, "phy-handle", 0);
3014	slave->port_num = gbe_get_slave_port(gbe_dev, slave->slave_num);
3015
3016	if (slave->link_interface >= XGMII_LINK_MAC_PHY)
3017		slave->mac_control = GBE_DEF_10G_MAC_CONTROL;
3018	else
3019		slave->mac_control = GBE_DEF_1G_MAC_CONTROL;
3020
3021	/* Emac regs memmap are contiguous but port regs are not */
3022	port_reg_num = slave->slave_num;
3023	if (IS_SS_ID_VER_14(gbe_dev)) {
3024		if (slave->slave_num > 1) {
3025			port_reg_ofs = GBE13_SLAVE_PORT2_OFFSET;
3026			port_reg_num -= 2;
3027		} else {
3028			port_reg_ofs = GBE13_SLAVE_PORT_OFFSET;
3029		}
3030		emac_reg_ofs = GBE13_EMAC_OFFSET;
3031		port_reg_blk_sz = 0x30;
3032		emac_reg_blk_sz = 0x40;
3033	} else if (IS_SS_ID_MU(gbe_dev)) {
3034		port_reg_ofs = GBENU_SLAVE_PORT_OFFSET;
3035		emac_reg_ofs = GBENU_EMAC_OFFSET;
3036		port_reg_blk_sz = 0x1000;
3037		emac_reg_blk_sz = 0x1000;
3038	} else if (IS_SS_ID_XGBE(gbe_dev)) {
3039		port_reg_ofs = XGBE10_SLAVE_PORT_OFFSET;
3040		emac_reg_ofs = XGBE10_EMAC_OFFSET;
3041		port_reg_blk_sz = 0x30;
3042		emac_reg_blk_sz = 0x40;
3043	} else {
3044		dev_err(gbe_dev->dev, "unknown ethss(0x%x)\n",
3045			gbe_dev->ss_version);
3046		return -EINVAL;
3047	}
3048
3049	slave->port_regs = gbe_dev->switch_regs + port_reg_ofs +
3050				(port_reg_blk_sz * port_reg_num);
3051	slave->emac_regs = gbe_dev->switch_regs + emac_reg_ofs +
3052				(emac_reg_blk_sz * slave->slave_num);
3053
3054	if (IS_SS_ID_VER_14(gbe_dev)) {
3055		/* Initialize  slave port register offsets */
3056		GBE_SET_REG_OFS(slave, port_regs, port_vlan);
3057		GBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
3058		GBE_SET_REG_OFS(slave, port_regs, sa_lo);
3059		GBE_SET_REG_OFS(slave, port_regs, sa_hi);
3060		GBE_SET_REG_OFS(slave, port_regs, ts_ctl);
3061		GBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
3062		GBE_SET_REG_OFS(slave, port_regs, ts_vlan);
3063		GBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
3064		GBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
3065
3066		/* Initialize EMAC register offsets */
3067		GBE_SET_REG_OFS(slave, emac_regs, mac_control);
3068		GBE_SET_REG_OFS(slave, emac_regs, soft_reset);
3069		GBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
3070
3071	} else if (IS_SS_ID_MU(gbe_dev)) {
3072		/* Initialize  slave port register offsets */
3073		GBENU_SET_REG_OFS(slave, port_regs, port_vlan);
3074		GBENU_SET_REG_OFS(slave, port_regs, tx_pri_map);
3075		GBENU_SET_REG_OFS(slave, port_regs, rx_pri_map);
3076		GBENU_SET_REG_OFS(slave, port_regs, sa_lo);
3077		GBENU_SET_REG_OFS(slave, port_regs, sa_hi);
3078		GBENU_SET_REG_OFS(slave, port_regs, ts_ctl);
3079		GBENU_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
3080		GBENU_SET_REG_OFS(slave, port_regs, ts_vlan);
3081		GBENU_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
3082		GBENU_SET_REG_OFS(slave, port_regs, ts_ctl2);
3083		GBENU_SET_REG_OFS(slave, port_regs, rx_maxlen);
3084
3085		/* Initialize EMAC register offsets */
3086		GBENU_SET_REG_OFS(slave, emac_regs, mac_control);
3087		GBENU_SET_REG_OFS(slave, emac_regs, soft_reset);
3088
3089	} else if (IS_SS_ID_XGBE(gbe_dev)) {
3090		/* Initialize  slave port register offsets */
3091		XGBE_SET_REG_OFS(slave, port_regs, port_vlan);
3092		XGBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
3093		XGBE_SET_REG_OFS(slave, port_regs, sa_lo);
3094		XGBE_SET_REG_OFS(slave, port_regs, sa_hi);
3095		XGBE_SET_REG_OFS(slave, port_regs, ts_ctl);
3096		XGBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
3097		XGBE_SET_REG_OFS(slave, port_regs, ts_vlan);
3098		XGBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
3099		XGBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
3100
3101		/* Initialize EMAC register offsets */
3102		XGBE_SET_REG_OFS(slave, emac_regs, mac_control);
3103		XGBE_SET_REG_OFS(slave, emac_regs, soft_reset);
3104		XGBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
3105	}
3106
3107	atomic_set(&slave->link_state, NETCP_LINK_STATE_INVALID);
3108
3109	init_slave_ts_ctl(slave);
3110	return 0;
3111}
3112
3113static void init_secondary_ports(struct gbe_priv *gbe_dev,
3114				 struct device_node *node)
3115{
3116	struct device *dev = gbe_dev->dev;
3117	phy_interface_t phy_mode;
3118	struct gbe_priv **priv;
3119	struct device_node *port;
3120	struct gbe_slave *slave;
3121	bool mac_phy_link = false;
3122
3123	for_each_child_of_node(node, port) {
3124		slave = devm_kzalloc(dev, sizeof(*slave), GFP_KERNEL);
3125		if (!slave) {
3126			dev_err(dev, "memory alloc failed for secondary port(%pOFn), skipping...\n",
3127				port);
3128			continue;
3129		}
3130
3131		if (init_slave(gbe_dev, slave, port)) {
3132			dev_err(dev,
3133				"Failed to initialize secondary port(%pOFn), skipping...\n",
3134				port);
3135			devm_kfree(dev, slave);
3136			continue;
3137		}
3138
3139		if (!IS_SS_ID_2U(gbe_dev))
3140			gbe_sgmii_config(gbe_dev, slave);
3141		gbe_port_reset(slave);
3142		gbe_port_config(gbe_dev, slave, gbe_dev->rx_packet_max);
3143		list_add_tail(&slave->slave_list, &gbe_dev->secondary_slaves);
3144		gbe_dev->num_slaves++;
3145		if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
3146		    (slave->link_interface == XGMII_LINK_MAC_PHY))
3147			mac_phy_link = true;
3148
3149		slave->open = true;
3150		if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) {
3151			of_node_put(port);
3152			break;
3153		}
3154	}
3155
3156	/* of_phy_connect() is needed only for MAC-PHY interface */
3157	if (!mac_phy_link)
3158		return;
3159
3160	/* Allocate dummy netdev device for attaching to phy device */
3161	gbe_dev->dummy_ndev = alloc_netdev(sizeof(gbe_dev), "dummy",
3162					NET_NAME_UNKNOWN, ether_setup);
3163	if (!gbe_dev->dummy_ndev) {
3164		dev_err(dev,
3165			"Failed to allocate dummy netdev for secondary ports, skipping phy_connect()...\n");
3166		return;
3167	}
3168	priv = netdev_priv(gbe_dev->dummy_ndev);
3169	*priv = gbe_dev;
3170
3171	if (slave->link_interface == SGMII_LINK_MAC_PHY) {
3172		phy_mode = PHY_INTERFACE_MODE_SGMII;
3173		slave->phy_port_t = PORT_MII;
3174	} else if (slave->link_interface == RGMII_LINK_MAC_PHY) {
3175		phy_mode = PHY_INTERFACE_MODE_RGMII;
3176		slave->phy_port_t = PORT_MII;
3177	} else {
3178		phy_mode = PHY_INTERFACE_MODE_NA;
3179		slave->phy_port_t = PORT_FIBRE;
3180	}
3181
3182	for_each_sec_slave(slave, gbe_dev) {
3183		if ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
3184		    (slave->link_interface != RGMII_LINK_MAC_PHY) &&
3185		    (slave->link_interface != XGMII_LINK_MAC_PHY))
3186			continue;
3187		slave->phy =
3188			of_phy_connect(gbe_dev->dummy_ndev,
3189				       slave->phy_node,
3190				       gbe_adjust_link_sec_slaves,
3191				       0, phy_mode);
3192		if (!slave->phy) {
3193			dev_err(dev, "phy not found for slave %d\n",
3194				slave->slave_num);
3195		} else {
3196			dev_dbg(dev, "phy found: id is: 0x%s\n",
3197				phydev_name(slave->phy));
3198			phy_start(slave->phy);
3199		}
3200	}
3201}
3202
3203static void free_secondary_ports(struct gbe_priv *gbe_dev)
3204{
3205	struct gbe_slave *slave;
3206
3207	while (!list_empty(&gbe_dev->secondary_slaves)) {
3208		slave = first_sec_slave(gbe_dev);
3209
3210		if (slave->phy)
3211			phy_disconnect(slave->phy);
3212		list_del(&slave->slave_list);
3213	}
3214	if (gbe_dev->dummy_ndev)
3215		free_netdev(gbe_dev->dummy_ndev);
3216}
3217
3218static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
3219				 struct device_node *node)
3220{
3221	struct resource res;
3222	void __iomem *regs;
3223	int ret, i;
3224
3225	ret = of_address_to_resource(node, XGBE_SS_REG_INDEX, &res);
3226	if (ret) {
3227		dev_err(gbe_dev->dev,
3228			"Can't xlate xgbe of node(%pOFn) ss address at %d\n",
3229			node, XGBE_SS_REG_INDEX);
3230		return ret;
3231	}
3232
3233	regs = devm_ioremap_resource(gbe_dev->dev, &res);
3234	if (IS_ERR(regs)) {
3235		dev_err(gbe_dev->dev, "Failed to map xgbe ss register base\n");
3236		return PTR_ERR(regs);
3237	}
3238	gbe_dev->ss_regs = regs;
3239
3240	ret = of_address_to_resource(node, XGBE_SM_REG_INDEX, &res);
3241	if (ret) {
3242		dev_err(gbe_dev->dev,
3243			"Can't xlate xgbe of node(%pOFn) sm address at %d\n",
3244			node, XGBE_SM_REG_INDEX);
3245		return ret;
3246	}
3247
3248	regs = devm_ioremap_resource(gbe_dev->dev, &res);
3249	if (IS_ERR(regs)) {
3250		dev_err(gbe_dev->dev, "Failed to map xgbe sm register base\n");
3251		return PTR_ERR(regs);
3252	}
3253	gbe_dev->switch_regs = regs;
3254
3255	ret = of_address_to_resource(node, XGBE_SERDES_REG_INDEX, &res);
3256	if (ret) {
3257		dev_err(gbe_dev->dev,
3258			"Can't xlate xgbe serdes of node(%pOFn) address at %d\n",
3259			node, XGBE_SERDES_REG_INDEX);
3260		return ret;
3261	}
3262
3263	regs = devm_ioremap_resource(gbe_dev->dev, &res);
3264	if (IS_ERR(regs)) {
3265		dev_err(gbe_dev->dev, "Failed to map xgbe serdes register base\n");
3266		return PTR_ERR(regs);
3267	}
3268	gbe_dev->xgbe_serdes_regs = regs;
3269
3270	gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
3271	gbe_dev->et_stats = xgbe10_et_stats;
3272	gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats);
3273
3274	gbe_dev->hw_stats = devm_kcalloc(gbe_dev->dev,
3275					 gbe_dev->num_et_stats, sizeof(u64),
3276					 GFP_KERNEL);
3277	if (!gbe_dev->hw_stats) {
3278		dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
3279		return -ENOMEM;
3280	}
3281
3282	gbe_dev->hw_stats_prev =
3283		devm_kcalloc(gbe_dev->dev,
3284			     gbe_dev->num_et_stats, sizeof(u32),
3285			     GFP_KERNEL);
3286	if (!gbe_dev->hw_stats_prev) {
3287		dev_err(gbe_dev->dev,
3288			"hw_stats_prev memory allocation failed\n");
3289		return -ENOMEM;
3290	}
3291
3292	gbe_dev->ss_version = XGBE_SS_VERSION_10;
3293	gbe_dev->sgmii_port_regs = gbe_dev->ss_regs +
3294					XGBE10_SGMII_MODULE_OFFSET;
3295	gbe_dev->host_port_regs = gbe_dev->ss_regs + XGBE10_HOST_PORT_OFFSET;
3296
3297	for (i = 0; i < gbe_dev->max_num_ports; i++)
3298		gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
3299			XGBE10_HW_STATS_OFFSET + (GBE_HW_STATS_REG_MAP_SZ * i);
3300
3301	gbe_dev->ale_reg = gbe_dev->switch_regs + XGBE10_ALE_OFFSET;
3302	gbe_dev->cpts_reg = gbe_dev->switch_regs + XGBE10_CPTS_OFFSET;
3303	gbe_dev->ale_ports = gbe_dev->max_num_ports;
3304	gbe_dev->host_port = XGBE10_HOST_PORT_NUM;
3305	gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
3306
3307	/* Subsystem registers */
3308	XGBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
3309	XGBE_SET_REG_OFS(gbe_dev, ss_regs, control);
3310
3311	/* Switch module registers */
3312	XGBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
3313	XGBE_SET_REG_OFS(gbe_dev, switch_regs, control);
3314	XGBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
3315	XGBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
3316	XGBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
3317
3318	/* Host port registers */
3319	XGBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
3320	XGBE_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
3321	XGBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
3322	return 0;
3323}
3324
3325static int get_gbe_resource_version(struct gbe_priv *gbe_dev,
3326				    struct device_node *node)
3327{
3328	struct resource res;
3329	void __iomem *regs;
3330	int ret;
3331
3332	ret = of_address_to_resource(node, GBE_SS_REG_INDEX, &res);
3333	if (ret) {
3334		dev_err(gbe_dev->dev,
3335			"Can't translate of node(%pOFn) of gbe ss address at %d\n",
3336			node, GBE_SS_REG_INDEX);
3337		return ret;
3338	}
3339
3340	regs = devm_ioremap_resource(gbe_dev->dev, &res);
3341	if (IS_ERR(regs)) {
3342		dev_err(gbe_dev->dev, "Failed to map gbe register base\n");
3343		return PTR_ERR(regs);
3344	}
3345	gbe_dev->ss_regs = regs;
3346	gbe_dev->ss_version = readl(gbe_dev->ss_regs);
3347	return 0;
3348}
3349
3350static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev,
3351				struct device_node *node)
3352{
3353	struct resource res;
3354	void __iomem *regs;
3355	int i, ret;
3356
3357	ret = of_address_to_resource(node, GBE_SGMII34_REG_INDEX, &res);
3358	if (ret) {
3359		dev_err(gbe_dev->dev,
3360			"Can't translate of gbe node(%pOFn) address at index %d\n",
3361			node, GBE_SGMII34_REG_INDEX);
3362		return ret;
3363	}
3364
3365	regs = devm_ioremap_resource(gbe_dev->dev, &res);
3366	if (IS_ERR(regs)) {
3367		dev_err(gbe_dev->dev,
3368			"Failed to map gbe sgmii port34 register base\n");
3369		return PTR_ERR(regs);
3370	}
3371	gbe_dev->sgmii_port34_regs = regs;
3372
3373	ret = of_address_to_resource(node, GBE_SM_REG_INDEX, &res);
3374	if (ret) {
3375		dev_err(gbe_dev->dev,
3376			"Can't translate of gbe node(%pOFn) address at index %d\n",
3377			node, GBE_SM_REG_INDEX);
3378		return ret;
3379	}
3380
3381	regs = devm_ioremap_resource(gbe_dev->dev, &res);
3382	if (IS_ERR(regs)) {
3383		dev_err(gbe_dev->dev,
3384			"Failed to map gbe switch module register base\n");
3385		return PTR_ERR(regs);
3386	}
3387	gbe_dev->switch_regs = regs;
3388
3389	gbe_dev->num_stats_mods = gbe_dev->max_num_slaves;
3390	gbe_dev->et_stats = gbe13_et_stats;
3391	gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats);
3392
3393	gbe_dev->hw_stats = devm_kcalloc(gbe_dev->dev,
3394					 gbe_dev->num_et_stats, sizeof(u64),
3395					 GFP_KERNEL);
3396	if (!gbe_dev->hw_stats) {
3397		dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
3398		return -ENOMEM;
3399	}
3400
3401	gbe_dev->hw_stats_prev =
3402		devm_kcalloc(gbe_dev->dev,
3403			     gbe_dev->num_et_stats, sizeof(u32),
3404			     GFP_KERNEL);
3405	if (!gbe_dev->hw_stats_prev) {
3406		dev_err(gbe_dev->dev,
3407			"hw_stats_prev memory allocation failed\n");
3408		return -ENOMEM;
3409	}
3410
3411	gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBE13_SGMII_MODULE_OFFSET;
3412	gbe_dev->host_port_regs = gbe_dev->switch_regs + GBE13_HOST_PORT_OFFSET;
3413
3414	/* K2HK has only 2 hw stats modules visible at a time, so
3415	 * module 0 & 2 points to one base and
3416	 * module 1 & 3 points to the other base
3417	 */
3418	for (i = 0; i < gbe_dev->max_num_slaves; i++) {
3419		gbe_dev->hw_stats_regs[i] =
3420			gbe_dev->switch_regs + GBE13_HW_STATS_OFFSET +
3421			(GBE_HW_STATS_REG_MAP_SZ * (i & 0x1));
3422	}
3423
3424	gbe_dev->cpts_reg = gbe_dev->switch_regs + GBE13_CPTS_OFFSET;
3425	gbe_dev->ale_reg = gbe_dev->switch_regs + GBE13_ALE_OFFSET;
3426	gbe_dev->ale_ports = gbe_dev->max_num_ports;
3427	gbe_dev->host_port = GBE13_HOST_PORT_NUM;
3428	gbe_dev->stats_en_mask = GBE13_REG_VAL_STAT_ENABLE_ALL;
3429
3430	/* Subsystem registers */
3431	GBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
3432
3433	/* Switch module registers */
3434	GBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
3435	GBE_SET_REG_OFS(gbe_dev, switch_regs, control);
3436	GBE_SET_REG_OFS(gbe_dev, switch_regs, soft_reset);
3437	GBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
3438	GBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
3439	GBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
3440
3441	/* Host port registers */
3442	GBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
3443	GBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
3444	return 0;
3445}
3446
3447static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
3448				struct device_node *node)
3449{
3450	struct resource res;
3451	void __iomem *regs;
3452	int i, ret;
3453
3454	gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
3455	gbe_dev->et_stats = gbenu_et_stats;
3456
3457	if (IS_SS_ID_MU(gbe_dev))
3458		gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
3459			(gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE);
3460	else
3461		gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
3462					GBENU_ET_STATS_PORT_SIZE;
3463
3464	gbe_dev->hw_stats = devm_kcalloc(gbe_dev->dev,
3465					 gbe_dev->num_et_stats, sizeof(u64),
3466					 GFP_KERNEL);
3467	if (!gbe_dev->hw_stats) {
3468		dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
3469		return -ENOMEM;
3470	}
3471
3472	gbe_dev->hw_stats_prev =
3473		devm_kcalloc(gbe_dev->dev,
3474			     gbe_dev->num_et_stats, sizeof(u32),
3475			     GFP_KERNEL);
3476	if (!gbe_dev->hw_stats_prev) {
3477		dev_err(gbe_dev->dev,
3478			"hw_stats_prev memory allocation failed\n");
3479		return -ENOMEM;
3480	}
3481
3482	ret = of_address_to_resource(node, GBENU_SM_REG_INDEX, &res);
3483	if (ret) {
3484		dev_err(gbe_dev->dev,
3485			"Can't translate of gbenu node(%pOFn) addr at index %d\n",
3486			node, GBENU_SM_REG_INDEX);
3487		return ret;
3488	}
3489
3490	regs = devm_ioremap_resource(gbe_dev->dev, &res);
3491	if (IS_ERR(regs)) {
3492		dev_err(gbe_dev->dev,
3493			"Failed to map gbenu switch module register base\n");
3494		return PTR_ERR(regs);
3495	}
3496	gbe_dev->switch_regs = regs;
3497
3498	if (!IS_SS_ID_2U(gbe_dev))
3499		gbe_dev->sgmii_port_regs =
3500		       gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET;
3501
3502	/* Although sgmii modules are mem mapped to one contiguous
3503	 * region on GBENU devices, setting sgmii_port34_regs allows
3504	 * consistent code when accessing sgmii api
3505	 */
3506	gbe_dev->sgmii_port34_regs = gbe_dev->sgmii_port_regs +
3507				     (2 * GBENU_SGMII_MODULE_SIZE);
3508
3509	gbe_dev->host_port_regs = gbe_dev->switch_regs + GBENU_HOST_PORT_OFFSET;
3510
3511	for (i = 0; i < (gbe_dev->max_num_ports); i++)
3512		gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
3513			GBENU_HW_STATS_OFFSET + (GBENU_HW_STATS_REG_MAP_SZ * i);
3514
3515	gbe_dev->cpts_reg = gbe_dev->switch_regs + GBENU_CPTS_OFFSET;
3516	gbe_dev->ale_reg = gbe_dev->switch_regs + GBENU_ALE_OFFSET;
3517	gbe_dev->ale_ports = gbe_dev->max_num_ports;
3518	gbe_dev->host_port = GBENU_HOST_PORT_NUM;
3519	gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
3520
3521	/* Subsystem registers */
3522	GBENU_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
3523	/* ok to set for MU, but used by 2U only */
3524	GBENU_SET_REG_OFS(gbe_dev, ss_regs, rgmii_status);
3525
3526	/* Switch module registers */
3527	GBENU_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
3528	GBENU_SET_REG_OFS(gbe_dev, switch_regs, control);
3529	GBENU_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
3530	GBENU_SET_REG_OFS(gbe_dev, switch_regs, ptype);
3531
3532	/* Host port registers */
3533	GBENU_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
3534	GBENU_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
3535
3536	/* For NU only.  2U does not need tx_pri_map.
3537	 * NU cppi port 0 tx pkt streaming interface has (n-1)*8 egress threads
3538	 * while 2U has only 1 such thread
3539	 */
3540	GBENU_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
3541	return 0;
3542}
3543
3544static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
3545		     struct device_node *node, void **inst_priv)
3546{
3547	struct device_node *interfaces, *interface, *cpts_node;
3548	struct device_node *secondary_ports;
3549	struct cpsw_ale_params ale_params;
3550	struct gbe_priv *gbe_dev;
3551	u32 slave_num;
3552	int i, ret = 0;
3553
3554	if (!node) {
3555		dev_err(dev, "device tree info unavailable\n");
3556		return -ENODEV;
3557	}
3558
3559	gbe_dev = devm_kzalloc(dev, sizeof(struct gbe_priv), GFP_KERNEL);
3560	if (!gbe_dev)
3561		return -ENOMEM;
3562
3563	if (of_device_is_compatible(node, "ti,netcp-gbe-5") ||
3564	    of_device_is_compatible(node, "ti,netcp-gbe")) {
3565		gbe_dev->max_num_slaves = 4;
3566	} else if (of_device_is_compatible(node, "ti,netcp-gbe-9")) {
3567		gbe_dev->max_num_slaves = 8;
3568	} else if (of_device_is_compatible(node, "ti,netcp-gbe-2")) {
3569		gbe_dev->max_num_slaves = 1;
3570		gbe_module.set_rx_mode = gbe_set_rx_mode;
3571	} else if (of_device_is_compatible(node, "ti,netcp-xgbe")) {
3572		gbe_dev->max_num_slaves = 2;
3573	} else {
3574		dev_err(dev, "device tree node for unknown device\n");
3575		return -EINVAL;
3576	}
3577	gbe_dev->max_num_ports = gbe_dev->max_num_slaves + 1;
3578
3579	gbe_dev->dev = dev;
3580	gbe_dev->netcp_device = netcp_device;
3581	gbe_dev->rx_packet_max = NETCP_MAX_FRAME_SIZE;
3582
3583	/* init the hw stats lock */
3584	spin_lock_init(&gbe_dev->hw_stats_lock);
3585
3586	gbe_dev->enable_ale = of_property_read_bool(node, "enable-ale");
3587	if (gbe_dev->enable_ale)
3588		dev_info(dev, "ALE enabled\n");
3589	else
3590		dev_dbg(dev, "ALE bypass enabled*\n");
3591
3592	ret = of_property_read_u32(node, "tx-queue",
3593				   &gbe_dev->tx_queue_id);
3594	if (ret < 0) {
3595		dev_err(dev, "missing tx_queue parameter\n");
3596		gbe_dev->tx_queue_id = GBE_TX_QUEUE;
3597	}
3598
3599	ret = of_property_read_string(node, "tx-channel",
3600				      &gbe_dev->dma_chan_name);
3601	if (ret < 0) {
3602		dev_err(dev, "missing \"tx-channel\" parameter\n");
3603		return -EINVAL;
3604	}
3605
3606	if (of_node_name_eq(node, "gbe")) {
3607		ret = get_gbe_resource_version(gbe_dev, node);
3608		if (ret)
3609			return ret;
3610
3611		dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version);
3612
3613		if (IS_SS_ID_VER_14(gbe_dev))
3614			ret = set_gbe_ethss14_priv(gbe_dev, node);
3615		else if (IS_SS_ID_MU(gbe_dev))
3616			ret = set_gbenu_ethss_priv(gbe_dev, node);
3617		else
3618			ret = -ENODEV;
3619
3620	} else if (of_node_name_eq(node, "xgbe")) {
3621		ret = set_xgbe_ethss10_priv(gbe_dev, node);
3622		if (ret)
3623			return ret;
3624		ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs,
3625					     gbe_dev->ss_regs);
3626	} else {
3627		dev_err(dev, "unknown GBE node(%pOFn)\n", node);
3628		ret = -ENODEV;
3629	}
3630
3631	if (ret)
3632		return ret;
3633
3634	interfaces = of_get_child_by_name(node, "interfaces");
3635	if (!interfaces)
3636		dev_err(dev, "could not find interfaces\n");
3637
3638	ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
3639				gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
3640	if (ret) {
3641		of_node_put(interfaces);
3642		return ret;
3643	}
3644
3645	ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
3646	if (ret) {
3647		of_node_put(interfaces);
3648		return ret;
3649	}
3650
3651	/* Create network interfaces */
3652	INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
3653	for_each_child_of_node(interfaces, interface) {
3654		ret = of_property_read_u32(interface, "slave-port", &slave_num);
3655		if (ret) {
3656			dev_err(dev, "missing slave-port parameter, skipping interface configuration for %pOFn\n",
3657				interface);
3658			continue;
3659		}
3660		gbe_dev->num_slaves++;
3661		if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) {
3662			of_node_put(interface);
3663			break;
3664		}
3665	}
3666	of_node_put(interfaces);
3667
3668	if (!gbe_dev->num_slaves)
3669		dev_warn(dev, "No network interface configured\n");
3670
3671	/* Initialize Secondary slave ports */
3672	secondary_ports = of_get_child_by_name(node, "secondary-slave-ports");
3673	INIT_LIST_HEAD(&gbe_dev->secondary_slaves);
3674	if (secondary_ports && (gbe_dev->num_slaves <  gbe_dev->max_num_slaves))
3675		init_secondary_ports(gbe_dev, secondary_ports);
3676	of_node_put(secondary_ports);
3677
3678	if (!gbe_dev->num_slaves) {
3679		dev_err(dev,
3680			"No network interface or secondary ports configured\n");
3681		ret = -ENODEV;
3682		goto free_sec_ports;
3683	}
3684
3685	memset(&ale_params, 0, sizeof(ale_params));
3686	ale_params.dev		= gbe_dev->dev;
3687	ale_params.ale_regs	= gbe_dev->ale_reg;
3688	ale_params.ale_ageout	= GBE_DEFAULT_ALE_AGEOUT;
3689	ale_params.ale_ports	= gbe_dev->ale_ports;
3690	ale_params.dev_id	= "cpsw";
3691	if (IS_SS_ID_NU(gbe_dev))
3692		ale_params.dev_id = "66ak2el";
3693	else if (IS_SS_ID_2U(gbe_dev))
3694		ale_params.dev_id = "66ak2g";
3695	else if (IS_SS_ID_XGBE(gbe_dev))
3696		ale_params.dev_id = "66ak2h-xgbe";
3697
3698	gbe_dev->ale = cpsw_ale_create(&ale_params);
3699	if (IS_ERR(gbe_dev->ale)) {
3700		dev_err(gbe_dev->dev, "error initializing ale engine\n");
3701		ret = PTR_ERR(gbe_dev->ale);
3702		goto free_sec_ports;
3703	} else {
3704		dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n");
3705	}
3706
3707	cpts_node = of_get_child_by_name(node, "cpts");
3708	if (!cpts_node)
3709		cpts_node = of_node_get(node);
3710
3711	gbe_dev->cpts = cpts_create(gbe_dev->dev, gbe_dev->cpts_reg,
3712				    cpts_node, 0);
3713	of_node_put(cpts_node);
3714	if (IS_ENABLED(CONFIG_TI_CPTS) && IS_ERR(gbe_dev->cpts)) {
3715		ret = PTR_ERR(gbe_dev->cpts);
3716		goto free_sec_ports;
3717	}
3718
3719	/* initialize host port */
3720	gbe_init_host_port(gbe_dev);
3721
3722	spin_lock_bh(&gbe_dev->hw_stats_lock);
3723	for (i = 0; i < gbe_dev->num_stats_mods; i++) {
3724		if (IS_SS_ID_VER_14(gbe_dev))
3725			gbe_reset_mod_stats_ver14(gbe_dev, i);
3726		else
3727			gbe_reset_mod_stats(gbe_dev, i);
3728	}
3729	spin_unlock_bh(&gbe_dev->hw_stats_lock);
3730
3731	timer_setup(&gbe_dev->timer, netcp_ethss_timer, 0);
3732	gbe_dev->timer.expires	 = jiffies + GBE_TIMER_INTERVAL;
3733	add_timer(&gbe_dev->timer);
3734	*inst_priv = gbe_dev;
3735	return 0;
3736
3737free_sec_ports:
3738	free_secondary_ports(gbe_dev);
3739	return ret;
3740}
3741
3742static int gbe_attach(void *inst_priv, struct net_device *ndev,
3743		      struct device_node *node, void **intf_priv)
3744{
3745	struct gbe_priv *gbe_dev = inst_priv;
3746	struct gbe_intf *gbe_intf;
3747	int ret;
3748
3749	if (!node) {
3750		dev_err(gbe_dev->dev, "interface node not available\n");
3751		return -ENODEV;
3752	}
3753
3754	gbe_intf = devm_kzalloc(gbe_dev->dev, sizeof(*gbe_intf), GFP_KERNEL);
3755	if (!gbe_intf)
3756		return -ENOMEM;
3757
3758	gbe_intf->ndev = ndev;
3759	gbe_intf->dev = gbe_dev->dev;
3760	gbe_intf->gbe_dev = gbe_dev;
3761
3762	gbe_intf->slave = devm_kzalloc(gbe_dev->dev,
3763					sizeof(*gbe_intf->slave),
3764					GFP_KERNEL);
3765	if (!gbe_intf->slave) {
3766		ret = -ENOMEM;
3767		goto fail;
3768	}
3769
3770	if (init_slave(gbe_dev, gbe_intf->slave, node)) {
3771		ret = -ENODEV;
3772		goto fail;
3773	}
3774
3775	gbe_intf->tx_pipe = gbe_dev->tx_pipe;
3776	ndev->ethtool_ops = &keystone_ethtool_ops;
3777	list_add_tail(&gbe_intf->gbe_intf_list, &gbe_dev->gbe_intf_head);
3778	*intf_priv = gbe_intf;
3779	return 0;
3780
3781fail:
3782	if (gbe_intf->slave)
3783		devm_kfree(gbe_dev->dev, gbe_intf->slave);
3784	if (gbe_intf)
3785		devm_kfree(gbe_dev->dev, gbe_intf);
3786	return ret;
3787}
3788
3789static int gbe_release(void *intf_priv)
3790{
3791	struct gbe_intf *gbe_intf = intf_priv;
3792
3793	gbe_intf->ndev->ethtool_ops = NULL;
3794	list_del(&gbe_intf->gbe_intf_list);
3795	devm_kfree(gbe_intf->dev, gbe_intf->slave);
3796	devm_kfree(gbe_intf->dev, gbe_intf);
3797	return 0;
3798}
3799
3800static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv)
3801{
3802	struct gbe_priv *gbe_dev = inst_priv;
3803
3804	del_timer_sync(&gbe_dev->timer);
3805	cpts_release(gbe_dev->cpts);
3806	cpsw_ale_stop(gbe_dev->ale);
3807	netcp_txpipe_close(&gbe_dev->tx_pipe);
3808	free_secondary_ports(gbe_dev);
3809
3810	if (!list_empty(&gbe_dev->gbe_intf_head))
3811		dev_alert(gbe_dev->dev,
3812			  "unreleased ethss interfaces present\n");
3813
3814	return 0;
3815}
3816
3817static struct netcp_module gbe_module = {
3818	.name		= GBE_MODULE_NAME,
3819	.owner		= THIS_MODULE,
3820	.primary	= true,
3821	.probe		= gbe_probe,
3822	.open		= gbe_open,
3823	.close		= gbe_close,
3824	.remove		= gbe_remove,
3825	.attach		= gbe_attach,
3826	.release	= gbe_release,
3827	.add_addr	= gbe_add_addr,
3828	.del_addr	= gbe_del_addr,
3829	.add_vid	= gbe_add_vid,
3830	.del_vid	= gbe_del_vid,
3831	.ioctl		= gbe_ioctl,
3832};
3833
3834static struct netcp_module xgbe_module = {
3835	.name		= XGBE_MODULE_NAME,
3836	.owner		= THIS_MODULE,
3837	.primary	= true,
3838	.probe		= gbe_probe,
3839	.open		= gbe_open,
3840	.close		= gbe_close,
3841	.remove		= gbe_remove,
3842	.attach		= gbe_attach,
3843	.release	= gbe_release,
3844	.add_addr	= gbe_add_addr,
3845	.del_addr	= gbe_del_addr,
3846	.add_vid	= gbe_add_vid,
3847	.del_vid	= gbe_del_vid,
3848	.ioctl		= gbe_ioctl,
3849};
3850
3851static int __init keystone_gbe_init(void)
3852{
3853	int ret;
3854
3855	ret = netcp_register_module(&gbe_module);
3856	if (ret)
3857		return ret;
3858
3859	ret = netcp_register_module(&xgbe_module);
3860	if (ret)
3861		return ret;
3862
3863	return 0;
3864}
3865module_init(keystone_gbe_init);
3866
3867static void __exit keystone_gbe_exit(void)
3868{
3869	netcp_unregister_module(&gbe_module);
3870	netcp_unregister_module(&xgbe_module);
3871}
3872module_exit(keystone_gbe_exit);
3873
3874MODULE_LICENSE("GPL v2");
3875MODULE_DESCRIPTION("TI NETCP ETHSS driver for Keystone SOCs");
3876MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com");
3877