1/*
2 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
3 *
4 * Copyright (C) 2014 Marvell
5 *
6 * Marcin Wojtas <mw@semihalf.com>
7 *
8 * U-Boot version:
9 * Copyright (C) 2016-2017 Stefan Roese <sr@denx.de>
10 *
11 * This file is licensed under the terms of the GNU General Public
12 * License version 2. This program is licensed "as is" without any
13 * warranty of any kind, whether express or implied.
14 */
15
16#include <cpu_func.h>
17#include <dm.h>
18#include <asm/cache.h>
19#include <asm/global_data.h>
20#include <dm/device-internal.h>
21#include <dm/device_compat.h>
22#include <dm/devres.h>
23#include <dm/lists.h>
24#include <net.h>
25#include <netdev.h>
26#include <config.h>
27#include <malloc.h>
28#include <asm/io.h>
29#include <linux/bitops.h>
30#include <linux/bug.h>
31#include <linux/delay.h>
32#include <linux/err.h>
33#include <linux/errno.h>
34#include <phy.h>
35#include <miiphy.h>
36#include <watchdog.h>
37#include <asm/arch/cpu.h>
38#include <asm/arch/soc.h>
39#include <linux/compat.h>
40#include <linux/libfdt.h>
41#include <linux/mbus.h>
42#include <asm-generic/gpio.h>
43#include <fdt_support.h>
44#include <linux/mdio.h>
45
46DECLARE_GLOBAL_DATA_PTR;
47
48#define __verify_pcpu_ptr(ptr)						\
49do {									\
50	const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL;	\
51	(void)__vpp_verify;						\
52} while (0)
53
54#define VERIFY_PERCPU_PTR(__p)						\
55({									\
56	__verify_pcpu_ptr(__p);						\
57	(typeof(*(__p)) __kernel __force *)(__p);			\
58})
59
60#define per_cpu_ptr(ptr, cpu)	({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); })
61#define smp_processor_id()	0
62#define num_present_cpus()	1
63#define for_each_present_cpu(cpu)			\
64	for ((cpu) = 0; (cpu) < 1; (cpu)++)
65
66#define NET_SKB_PAD	max(32, MVPP2_CPU_D_CACHE_LINE_SIZE)
67
68/* 2(HW hdr) 14(MAC hdr) 4(CRC) 32(extra for cache prefetch) */
69#define WRAP			(2 + ETH_HLEN + 4 + 32)
70#define MTU			1500
71#define RX_BUFFER_SIZE		(ALIGN(MTU + WRAP, ARCH_DMA_MINALIGN))
72
73/* RX Fifo Registers */
74#define MVPP2_RX_DATA_FIFO_SIZE_REG(port)	(0x00 + 4 * (port))
75#define MVPP2_RX_ATTR_FIFO_SIZE_REG(port)	(0x20 + 4 * (port))
76#define MVPP2_RX_MIN_PKT_SIZE_REG		0x60
77#define MVPP2_RX_FIFO_INIT_REG			0x64
78
79/* RX DMA Top Registers */
80#define MVPP2_RX_CTRL_REG(port)			(0x140 + 4 * (port))
81#define     MVPP2_RX_LOW_LATENCY_PKT_SIZE(s)	(((s) & 0xfff) << 16)
82#define     MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK	BIT(31)
83#define MVPP2_POOL_BUF_SIZE_REG(pool)		(0x180 + 4 * (pool))
84#define     MVPP2_POOL_BUF_SIZE_OFFSET		5
85#define MVPP2_RXQ_CONFIG_REG(rxq)		(0x800 + 4 * (rxq))
86#define     MVPP2_SNOOP_PKT_SIZE_MASK		0x1ff
87#define     MVPP2_SNOOP_BUF_HDR_MASK		BIT(9)
88#define     MVPP2_RXQ_POOL_SHORT_OFFS		20
89#define     MVPP21_RXQ_POOL_SHORT_MASK		0x700000
90#define     MVPP22_RXQ_POOL_SHORT_MASK		0xf00000
91#define     MVPP2_RXQ_POOL_LONG_OFFS		24
92#define     MVPP21_RXQ_POOL_LONG_MASK		0x7000000
93#define     MVPP22_RXQ_POOL_LONG_MASK		0xf000000
94#define     MVPP2_RXQ_PACKET_OFFSET_OFFS	28
95#define     MVPP2_RXQ_PACKET_OFFSET_MASK	0x70000000
96#define     MVPP2_RXQ_DISABLE_MASK		BIT(31)
97
98/* Parser Registers */
99#define MVPP2_PRS_INIT_LOOKUP_REG		0x1000
100#define     MVPP2_PRS_PORT_LU_MAX		0xf
101#define     MVPP2_PRS_PORT_LU_MASK(port)	(0xff << ((port) * 4))
102#define     MVPP2_PRS_PORT_LU_VAL(port, val)	((val) << ((port) * 4))
103#define MVPP2_PRS_INIT_OFFS_REG(port)		(0x1004 + ((port) & 4))
104#define     MVPP2_PRS_INIT_OFF_MASK(port)	(0x3f << (((port) % 4) * 8))
105#define     MVPP2_PRS_INIT_OFF_VAL(port, val)	((val) << (((port) % 4) * 8))
106#define MVPP2_PRS_MAX_LOOP_REG(port)		(0x100c + ((port) & 4))
107#define     MVPP2_PRS_MAX_LOOP_MASK(port)	(0xff << (((port) % 4) * 8))
108#define     MVPP2_PRS_MAX_LOOP_VAL(port, val)	((val) << (((port) % 4) * 8))
109#define MVPP2_PRS_TCAM_IDX_REG			0x1100
110#define MVPP2_PRS_TCAM_DATA_REG(idx)		(0x1104 + (idx) * 4)
111#define     MVPP2_PRS_TCAM_INV_MASK		BIT(31)
112#define MVPP2_PRS_SRAM_IDX_REG			0x1200
113#define MVPP2_PRS_SRAM_DATA_REG(idx)		(0x1204 + (idx) * 4)
114#define MVPP2_PRS_TCAM_CTRL_REG			0x1230
115#define     MVPP2_PRS_TCAM_EN_MASK		BIT(0)
116
117/* Classifier Registers */
118#define MVPP2_CLS_MODE_REG			0x1800
119#define     MVPP2_CLS_MODE_ACTIVE_MASK		BIT(0)
120#define MVPP2_CLS_PORT_WAY_REG			0x1810
121#define     MVPP2_CLS_PORT_WAY_MASK(port)	(1 << (port))
122#define MVPP2_CLS_LKP_INDEX_REG			0x1814
123#define     MVPP2_CLS_LKP_INDEX_WAY_OFFS	6
124#define MVPP2_CLS_LKP_TBL_REG			0x1818
125#define     MVPP2_CLS_LKP_TBL_RXQ_MASK		0xff
126#define     MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK	BIT(25)
127#define MVPP2_CLS_FLOW_INDEX_REG		0x1820
128#define MVPP2_CLS_FLOW_TBL0_REG			0x1824
129#define MVPP2_CLS_FLOW_TBL1_REG			0x1828
130#define MVPP2_CLS_FLOW_TBL2_REG			0x182c
131#define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port)	(0x1980 + ((port) * 4))
132#define     MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS	3
133#define     MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK	0x7
134#define MVPP2_CLS_SWFWD_P2HQ_REG(port)		(0x19b0 + ((port) * 4))
135#define MVPP2_CLS_SWFWD_PCTRL_REG		0x19d0
136#define     MVPP2_CLS_SWFWD_PCTRL_MASK(port)	(1 << (port))
137
138/* Descriptor Manager Top Registers */
139#define MVPP2_RXQ_NUM_REG			0x2040
140#define MVPP2_RXQ_DESC_ADDR_REG			0x2044
141#define     MVPP22_DESC_ADDR_OFFS		8
142#define MVPP2_RXQ_DESC_SIZE_REG			0x2048
143#define     MVPP2_RXQ_DESC_SIZE_MASK		0x3ff0
144#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq)	(0x3000 + 4 * (rxq))
145#define     MVPP2_RXQ_NUM_PROCESSED_OFFSET	0
146#define     MVPP2_RXQ_NUM_NEW_OFFSET		16
147#define MVPP2_RXQ_STATUS_REG(rxq)		(0x3400 + 4 * (rxq))
148#define     MVPP2_RXQ_OCCUPIED_MASK		0x3fff
149#define     MVPP2_RXQ_NON_OCCUPIED_OFFSET	16
150#define     MVPP2_RXQ_NON_OCCUPIED_MASK		0x3fff0000
151#define MVPP2_RXQ_THRESH_REG			0x204c
152#define     MVPP2_OCCUPIED_THRESH_OFFSET	0
153#define     MVPP2_OCCUPIED_THRESH_MASK		0x3fff
154#define MVPP2_RXQ_INDEX_REG			0x2050
155#define MVPP2_TXQ_NUM_REG			0x2080
156#define MVPP2_TXQ_DESC_ADDR_REG			0x2084
157#define MVPP2_TXQ_DESC_SIZE_REG			0x2088
158#define     MVPP2_TXQ_DESC_SIZE_MASK		0x3ff0
159#define MVPP2_AGGR_TXQ_UPDATE_REG		0x2090
160#define MVPP2_TXQ_THRESH_REG			0x2094
161#define     MVPP2_TRANSMITTED_THRESH_OFFSET	16
162#define     MVPP2_TRANSMITTED_THRESH_MASK	0x3fff0000
163#define MVPP2_TXQ_INDEX_REG			0x2098
164#define MVPP2_TXQ_PREF_BUF_REG			0x209c
165#define     MVPP2_PREF_BUF_PTR(desc)		((desc) & 0xfff)
166#define     MVPP2_PREF_BUF_SIZE_4		(BIT(12) | BIT(13))
167#define     MVPP2_PREF_BUF_SIZE_16		(BIT(12) | BIT(14))
168#define     MVPP2_PREF_BUF_THRESH(val)		((val) << 17)
169#define     MVPP2_TXQ_DRAIN_EN_MASK		BIT(31)
170#define MVPP2_TXQ_PENDING_REG			0x20a0
171#define     MVPP2_TXQ_PENDING_MASK		0x3fff
172#define MVPP2_TXQ_INT_STATUS_REG		0x20a4
173#define MVPP2_TXQ_SENT_REG(txq)			(0x3c00 + 4 * (txq))
174#define     MVPP2_TRANSMITTED_COUNT_OFFSET	16
175#define     MVPP2_TRANSMITTED_COUNT_MASK	0x3fff0000
176#define MVPP2_TXQ_RSVD_REQ_REG			0x20b0
177#define     MVPP2_TXQ_RSVD_REQ_Q_OFFSET		16
178#define MVPP2_TXQ_RSVD_RSLT_REG			0x20b4
179#define     MVPP2_TXQ_RSVD_RSLT_MASK		0x3fff
180#define MVPP2_TXQ_RSVD_CLR_REG			0x20b8
181#define     MVPP2_TXQ_RSVD_CLR_OFFSET		16
182#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu)	(0x2100 + 4 * (cpu))
183#define     MVPP22_AGGR_TXQ_DESC_ADDR_OFFS	8
184#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu)	(0x2140 + 4 * (cpu))
185#define     MVPP2_AGGR_TXQ_DESC_SIZE_MASK	0x3ff0
186#define MVPP2_AGGR_TXQ_STATUS_REG(cpu)		(0x2180 + 4 * (cpu))
187#define     MVPP2_AGGR_TXQ_PENDING_MASK		0x3fff
188#define MVPP2_AGGR_TXQ_INDEX_REG(cpu)		(0x21c0 + 4 * (cpu))
189
190/* MBUS bridge registers */
191#define MVPP2_WIN_BASE(w)			(0x4000 + ((w) << 2))
192#define MVPP2_WIN_SIZE(w)			(0x4020 + ((w) << 2))
193#define MVPP2_WIN_REMAP(w)			(0x4040 + ((w) << 2))
194#define MVPP2_BASE_ADDR_ENABLE			0x4060
195
196/* AXI Bridge Registers */
197#define MVPP22_AXI_BM_WR_ATTR_REG		0x4100
198#define MVPP22_AXI_BM_RD_ATTR_REG		0x4104
199#define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG	0x4110
200#define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG	0x4114
201#define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG	0x4118
202#define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG	0x411c
203#define MVPP22_AXI_RX_DATA_WR_ATTR_REG		0x4120
204#define MVPP22_AXI_TX_DATA_RD_ATTR_REG		0x4130
205#define MVPP22_AXI_RD_NORMAL_CODE_REG		0x4150
206#define MVPP22_AXI_RD_SNOOP_CODE_REG		0x4154
207#define MVPP22_AXI_WR_NORMAL_CODE_REG		0x4160
208#define MVPP22_AXI_WR_SNOOP_CODE_REG		0x4164
209
210/* Values for AXI Bridge registers */
211#define MVPP22_AXI_ATTR_CACHE_OFFS		0
212#define MVPP22_AXI_ATTR_DOMAIN_OFFS		12
213
214#define MVPP22_AXI_CODE_CACHE_OFFS		0
215#define MVPP22_AXI_CODE_DOMAIN_OFFS		4
216
217#define MVPP22_AXI_CODE_CACHE_NON_CACHE		0x3
218#define MVPP22_AXI_CODE_CACHE_WR_CACHE		0x7
219#define MVPP22_AXI_CODE_CACHE_RD_CACHE		0xb
220
221#define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM	2
222#define MVPP22_AXI_CODE_DOMAIN_SYSTEM		3
223
224/* Interrupt Cause and Mask registers */
225#define MVPP2_ISR_RX_THRESHOLD_REG(rxq)		(0x5200 + 4 * (rxq))
226#define MVPP21_ISR_RXQ_GROUP_REG(rxq)		(0x5400 + 4 * (rxq))
227
228#define MVPP22_ISR_RXQ_GROUP_INDEX_REG          0x5400
229#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
230#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK   0x380
231#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7
232
233#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
234#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK   0x380
235
236#define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG     0x5404
237#define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK    0x1f
238#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK      0xf00
239#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET    8
240
241#define MVPP2_ISR_ENABLE_REG(port)		(0x5420 + 4 * (port))
242#define     MVPP2_ISR_ENABLE_INTERRUPT(mask)	((mask) & 0xffff)
243#define     MVPP2_ISR_DISABLE_INTERRUPT(mask)	(((mask) << 16) & 0xffff0000)
244#define MVPP2_ISR_RX_TX_CAUSE_REG(port)		(0x5480 + 4 * (port))
245#define     MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK	0xffff
246#define     MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK	0xff0000
247#define     MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK	BIT(24)
248#define     MVPP2_CAUSE_FCS_ERR_MASK		BIT(25)
249#define     MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK	BIT(26)
250#define     MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK	BIT(29)
251#define     MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK	BIT(30)
252#define     MVPP2_CAUSE_MISC_SUM_MASK		BIT(31)
253#define MVPP2_ISR_RX_TX_MASK_REG(port)		(0x54a0 + 4 * (port))
254#define MVPP2_ISR_PON_RX_TX_MASK_REG		0x54bc
255#define     MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK	0xffff
256#define     MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK	0x3fc00000
257#define     MVPP2_PON_CAUSE_MISC_SUM_MASK		BIT(31)
258#define MVPP2_ISR_MISC_CAUSE_REG		0x55b0
259
260/* Buffer Manager registers */
261#define MVPP2_BM_POOL_BASE_REG(pool)		(0x6000 + ((pool) * 4))
262#define     MVPP2_BM_POOL_BASE_ADDR_MASK	0xfffff80
263#define MVPP2_BM_POOL_SIZE_REG(pool)		(0x6040 + ((pool) * 4))
264#define     MVPP2_BM_POOL_SIZE_MASK		0xfff0
265#define MVPP2_BM_POOL_READ_PTR_REG(pool)	(0x6080 + ((pool) * 4))
266#define     MVPP2_BM_POOL_GET_READ_PTR_MASK	0xfff0
267#define MVPP2_BM_POOL_PTRS_NUM_REG(pool)	(0x60c0 + ((pool) * 4))
268#define     MVPP2_BM_POOL_PTRS_NUM_MASK		0xfff0
269#define MVPP2_BM_BPPI_READ_PTR_REG(pool)	(0x6100 + ((pool) * 4))
270#define MVPP2_BM_BPPI_PTRS_NUM_REG(pool)	(0x6140 + ((pool) * 4))
271#define     MVPP2_BM_BPPI_PTR_NUM_MASK		0x7ff
272#define     MVPP2_BM_BPPI_PREFETCH_FULL_MASK	BIT(16)
273#define MVPP2_BM_POOL_CTRL_REG(pool)		(0x6200 + ((pool) * 4))
274#define     MVPP2_BM_START_MASK			BIT(0)
275#define     MVPP2_BM_STOP_MASK			BIT(1)
276#define     MVPP2_BM_STATE_MASK			BIT(4)
277#define     MVPP2_BM_LOW_THRESH_OFFS		8
278#define     MVPP2_BM_LOW_THRESH_MASK		0x7f00
279#define     MVPP2_BM_LOW_THRESH_VALUE(val)	((val) << \
280						MVPP2_BM_LOW_THRESH_OFFS)
281#define     MVPP2_BM_HIGH_THRESH_OFFS		16
282#define     MVPP2_BM_HIGH_THRESH_MASK		0x7f0000
283#define     MVPP2_BM_HIGH_THRESH_VALUE(val)	((val) << \
284						MVPP2_BM_HIGH_THRESH_OFFS)
285#define MVPP2_BM_INTR_CAUSE_REG(pool)		(0x6240 + ((pool) * 4))
286#define     MVPP2_BM_RELEASED_DELAY_MASK	BIT(0)
287#define     MVPP2_BM_ALLOC_FAILED_MASK		BIT(1)
288#define     MVPP2_BM_BPPE_EMPTY_MASK		BIT(2)
289#define     MVPP2_BM_BPPE_FULL_MASK		BIT(3)
290#define     MVPP2_BM_AVAILABLE_BP_LOW_MASK	BIT(4)
291#define MVPP2_BM_INTR_MASK_REG(pool)		(0x6280 + ((pool) * 4))
292#define MVPP2_BM_PHY_ALLOC_REG(pool)		(0x6400 + ((pool) * 4))
293#define     MVPP2_BM_PHY_ALLOC_GRNTD_MASK	BIT(0)
294#define MVPP2_BM_VIRT_ALLOC_REG			0x6440
295#define MVPP2_BM_ADDR_HIGH_ALLOC		0x6444
296#define     MVPP2_BM_ADDR_HIGH_PHYS_MASK	0xff
297#define     MVPP2_BM_ADDR_HIGH_VIRT_MASK	0xff00
298#define     MVPP2_BM_ADDR_HIGH_VIRT_SHIFT	8
299#define MVPP2_BM_PHY_RLS_REG(pool)		(0x6480 + ((pool) * 4))
300#define     MVPP2_BM_PHY_RLS_MC_BUFF_MASK	BIT(0)
301#define     MVPP2_BM_PHY_RLS_PRIO_EN_MASK	BIT(1)
302#define     MVPP2_BM_PHY_RLS_GRNTD_MASK		BIT(2)
303#define MVPP2_BM_VIRT_RLS_REG			0x64c0
304#define MVPP21_BM_MC_RLS_REG			0x64c4
305#define     MVPP2_BM_MC_ID_MASK			0xfff
306#define     MVPP2_BM_FORCE_RELEASE_MASK		BIT(12)
307#define MVPP22_BM_ADDR_HIGH_RLS_REG		0x64c4
308#define     MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK	0xff
309#define	    MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK	0xff00
310#define     MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT	8
311#define MVPP22_BM_MC_RLS_REG			0x64d4
312#define MVPP22_BM_POOL_BASE_HIGH_REG		0x6310
313#define MVPP22_BM_POOL_BASE_HIGH_MASK		0xff
314
315/* TX Scheduler registers */
316#define MVPP2_TXP_SCHED_PORT_INDEX_REG		0x8000
317#define MVPP2_TXP_SCHED_Q_CMD_REG		0x8004
318#define     MVPP2_TXP_SCHED_ENQ_MASK		0xff
319#define     MVPP2_TXP_SCHED_DISQ_OFFSET		8
320#define MVPP2_TXP_SCHED_CMD_1_REG		0x8010
321#define MVPP2_TXP_SCHED_PERIOD_REG		0x8018
322#define MVPP2_TXP_SCHED_MTU_REG			0x801c
323#define     MVPP2_TXP_MTU_MAX			0x7FFFF
324#define MVPP2_TXP_SCHED_REFILL_REG		0x8020
325#define     MVPP2_TXP_REFILL_TOKENS_ALL_MASK	0x7ffff
326#define     MVPP2_TXP_REFILL_PERIOD_ALL_MASK	0x3ff00000
327#define     MVPP2_TXP_REFILL_PERIOD_MASK(v)	((v) << 20)
328#define MVPP2_TXP_SCHED_TOKEN_SIZE_REG		0x8024
329#define     MVPP2_TXP_TOKEN_SIZE_MAX		0xffffffff
330#define MVPP2_TXQ_SCHED_REFILL_REG(q)		(0x8040 + ((q) << 2))
331#define     MVPP2_TXQ_REFILL_TOKENS_ALL_MASK	0x7ffff
332#define     MVPP2_TXQ_REFILL_PERIOD_ALL_MASK	0x3ff00000
333#define     MVPP2_TXQ_REFILL_PERIOD_MASK(v)	((v) << 20)
334#define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q)	(0x8060 + ((q) << 2))
335#define     MVPP2_TXQ_TOKEN_SIZE_MAX		0x7fffffff
336#define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q)	(0x8080 + ((q) << 2))
337#define     MVPP2_TXQ_TOKEN_CNTR_MAX		0xffffffff
338
339/* TX general registers */
340#define MVPP2_TX_SNOOP_REG			0x8800
341#define MVPP2_TX_PORT_FLUSH_REG			0x8810
342#define     MVPP2_TX_PORT_FLUSH_MASK(port)	(1 << (port))
343
344/* LMS registers */
345#define MVPP2_SRC_ADDR_MIDDLE			0x24
346#define MVPP2_SRC_ADDR_HIGH			0x28
347#define MVPP2_PHY_AN_CFG0_REG			0x34
348#define     MVPP2_PHY_AN_STOP_SMI0_MASK		BIT(7)
349#define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG	0x305c
350#define     MVPP2_EXT_GLOBAL_CTRL_DEFAULT	0x27
351
352/* Per-port registers */
353#define MVPP2_GMAC_CTRL_0_REG			0x0
354#define      MVPP2_GMAC_PORT_EN_MASK		BIT(0)
355#define      MVPP2_GMAC_PORT_TYPE_MASK		BIT(1)
356#define      MVPP2_GMAC_MAX_RX_SIZE_OFFS	2
357#define      MVPP2_GMAC_MAX_RX_SIZE_MASK	0x7ffc
358#define      MVPP2_GMAC_MIB_CNTR_EN_MASK	BIT(15)
359#define MVPP2_GMAC_CTRL_1_REG			0x4
360#define      MVPP2_GMAC_PERIODIC_XON_EN_MASK	BIT(1)
361#define      MVPP2_GMAC_GMII_LB_EN_MASK		BIT(5)
362#define      MVPP2_GMAC_PCS_LB_EN_BIT		6
363#define      MVPP2_GMAC_PCS_LB_EN_MASK		BIT(6)
364#define      MVPP2_GMAC_SA_LOW_OFFS		7
365#define MVPP2_GMAC_CTRL_2_REG			0x8
366#define      MVPP2_GMAC_INBAND_AN_MASK		BIT(0)
367#define      MVPP2_GMAC_SGMII_MODE_MASK		BIT(0)
368#define      MVPP2_GMAC_PCS_ENABLE_MASK		BIT(3)
369#define      MVPP2_GMAC_PORT_RGMII_MASK		BIT(4)
370#define      MVPP2_GMAC_PORT_DIS_PADING_MASK	BIT(5)
371#define      MVPP2_GMAC_PORT_RESET_MASK		BIT(6)
372#define      MVPP2_GMAC_CLK_125_BYPS_EN_MASK	BIT(9)
373#define MVPP2_GMAC_AUTONEG_CONFIG		0xc
374#define      MVPP2_GMAC_FORCE_LINK_DOWN		BIT(0)
375#define      MVPP2_GMAC_FORCE_LINK_PASS		BIT(1)
376#define      MVPP2_GMAC_EN_PCS_AN		BIT(2)
377#define      MVPP2_GMAC_AN_BYPASS_EN		BIT(3)
378#define      MVPP2_GMAC_CONFIG_MII_SPEED	BIT(5)
379#define      MVPP2_GMAC_CONFIG_GMII_SPEED	BIT(6)
380#define      MVPP2_GMAC_AN_SPEED_EN		BIT(7)
381#define      MVPP2_GMAC_FC_ADV_EN		BIT(9)
382#define      MVPP2_GMAC_EN_FC_AN		BIT(11)
383#define      MVPP2_GMAC_CONFIG_FULL_DUPLEX	BIT(12)
384#define      MVPP2_GMAC_AN_DUPLEX_EN		BIT(13)
385#define      MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG	BIT(15)
386#define MVPP2_GMAC_PORT_FIFO_CFG_1_REG		0x1c
387#define      MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS	6
388#define      MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK	0x1fc0
389#define      MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v)	(((v) << 6) & \
390					MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
391#define MVPP2_GMAC_CTRL_4_REG			0x90
392#define      MVPP2_GMAC_CTRL4_EXT_PIN_GMII_SEL_MASK	BIT(0)
393#define      MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK	BIT(5)
394#define      MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK	BIT(6)
395#define      MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK	BIT(7)
396
397/*
398 * Per-port XGMAC registers. PPv2.2 only, only for GOP port 0,
399 * relative to port->base.
400 */
401
402/* Port Mac Control0 */
403#define MVPP22_XLG_CTRL0_REG			0x100
404#define      MVPP22_XLG_PORT_EN			BIT(0)
405#define      MVPP22_XLG_MAC_RESETN		BIT(1)
406#define      MVPP22_XLG_RX_FC_EN		BIT(7)
407#define      MVPP22_XLG_MIBCNT_DIS		BIT(13)
408/* Port Mac Control1 */
409#define MVPP22_XLG_CTRL1_REG			0x104
410#define      MVPP22_XLG_MAX_RX_SIZE_OFFS	0
411#define      MVPP22_XLG_MAX_RX_SIZE_MASK	0x1fff
412/* Port Interrupt Mask */
413#define MVPP22_XLG_INTERRUPT_MASK_REG		0x118
414#define      MVPP22_XLG_INTERRUPT_LINK_CHANGE	BIT(1)
415/* Port Mac Control3 */
416#define MVPP22_XLG_CTRL3_REG			0x11c
417#define      MVPP22_XLG_CTRL3_MACMODESELECT_MASK	(7 << 13)
418#define      MVPP22_XLG_CTRL3_MACMODESELECT_GMAC	(0 << 13)
419#define      MVPP22_XLG_CTRL3_MACMODESELECT_10GMAC	(1 << 13)
420/* Port Mac Control4 */
421#define MVPP22_XLG_CTRL4_REG			0x184
422#define      MVPP22_XLG_FORWARD_802_3X_FC_EN	BIT(5)
423#define      MVPP22_XLG_FORWARD_PFC_EN		BIT(6)
424#define      MVPP22_XLG_MODE_DMA_1G		BIT(12)
425#define      MVPP22_XLG_EN_IDLE_CHECK_FOR_LINK	BIT(14)
426
427/* XPCS registers */
428
429/* Global Configuration 0 */
430#define MVPP22_XPCS_GLOBAL_CFG_0_REG		0x0
431#define      MVPP22_XPCS_PCSRESET		BIT(0)
432#define      MVPP22_XPCS_PCSMODE_OFFS		3
433#define      MVPP22_XPCS_PCSMODE_MASK		(0x3 << \
434						 MVPP22_XPCS_PCSMODE_OFFS)
435#define      MVPP22_XPCS_LANEACTIVE_OFFS	5
436#define      MVPP22_XPCS_LANEACTIVE_MASK	(0x3 << \
437						 MVPP22_XPCS_LANEACTIVE_OFFS)
438
439/* MPCS registers */
440
441#define PCS40G_COMMON_CONTROL			0x14
442#define      FORWARD_ERROR_CORRECTION_MASK	BIT(10)
443
444#define PCS_CLOCK_RESET				0x14c
445#define      TX_SD_CLK_RESET_MASK		BIT(0)
446#define      RX_SD_CLK_RESET_MASK		BIT(1)
447#define      MAC_CLK_RESET_MASK			BIT(2)
448#define      CLK_DIVISION_RATIO_OFFS		4
449#define      CLK_DIVISION_RATIO_MASK		(0x7 << CLK_DIVISION_RATIO_OFFS)
450#define      CLK_DIV_PHASE_SET_MASK		BIT(11)
451
452/* System Soft Reset 1 */
453#define GOP_SOFT_RESET_1_REG			0x108
454#define     NETC_GOP_SOFT_RESET_OFFS		6
455#define     NETC_GOP_SOFT_RESET_MASK		(0x1 << \
456						 NETC_GOP_SOFT_RESET_OFFS)
457
458/* Ports Control 0 */
459#define NETCOMP_PORTS_CONTROL_0_REG		0x110
460#define     NETC_BUS_WIDTH_SELECT_OFFS		1
461#define     NETC_BUS_WIDTH_SELECT_MASK		(0x1 << \
462						 NETC_BUS_WIDTH_SELECT_OFFS)
463#define     NETC_GIG_RX_DATA_SAMPLE_OFFS	29
464#define     NETC_GIG_RX_DATA_SAMPLE_MASK	(0x1 << \
465						 NETC_GIG_RX_DATA_SAMPLE_OFFS)
466#define     NETC_CLK_DIV_PHASE_OFFS		31
467#define     NETC_CLK_DIV_PHASE_MASK		(0x1 << NETC_CLK_DIV_PHASE_OFFS)
468/* Ports Control 1 */
469#define NETCOMP_PORTS_CONTROL_1_REG		0x114
470#define     NETC_PORTS_ACTIVE_OFFSET(p)		(0 + p)
471#define     NETC_PORTS_ACTIVE_MASK(p)		(0x1 << \
472						 NETC_PORTS_ACTIVE_OFFSET(p))
473#define     NETC_PORT_GIG_RF_RESET_OFFS(p)	(28 + p)
474#define     NETC_PORT_GIG_RF_RESET_MASK(p)	(0x1 << \
475						 NETC_PORT_GIG_RF_RESET_OFFS(p))
476#define NETCOMP_CONTROL_0_REG			0x120
477#define     NETC_GBE_PORT0_SGMII_MODE_OFFS	0
478#define     NETC_GBE_PORT0_SGMII_MODE_MASK	(0x1 << \
479						 NETC_GBE_PORT0_SGMII_MODE_OFFS)
480#define     NETC_GBE_PORT1_SGMII_MODE_OFFS	1
481#define     NETC_GBE_PORT1_SGMII_MODE_MASK	(0x1 << \
482						 NETC_GBE_PORT1_SGMII_MODE_OFFS)
483#define     NETC_GBE_PORT1_MII_MODE_OFFS	2
484#define     NETC_GBE_PORT1_MII_MODE_MASK	(0x1 << \
485						 NETC_GBE_PORT1_MII_MODE_OFFS)
486
487#define MVPP22_SMI_MISC_CFG_REG			(MVPP22_SMI + 0x04)
488#define      MVPP22_SMI_POLLING_EN		BIT(10)
489
490#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK	0xff
491
492/* Descriptor ring Macros */
493#define MVPP2_QUEUE_NEXT_DESC(q, index) \
494	(((index) < (q)->last_desc) ? ((index) + 1) : 0)
495
496/* PP2.2: SMI: 0x12a200 -> offset 0x1200 to iface_base */
497#define MVPP22_SMI				0x1200
498
499/* Additional PPv2.2 offsets */
500#define MVPP22_MPCS				0x007000
501#define MVPP22_XPCS				0x007400
502#define MVPP22_PORT_BASE			0x007e00
503#define MVPP22_PORT_OFFSET			0x001000
504#define MVPP22_RFU1				0x318000
505
506/* Maximum number of ports */
507#define MVPP22_GOP_MAC_NUM			4
508
509/* Sets the field located at the specified in data */
510#define MVPP2_RGMII_TX_FIFO_MIN_TH		0x41
511#define MVPP2_SGMII_TX_FIFO_MIN_TH		0x5
512#define MVPP2_SGMII2_5_TX_FIFO_MIN_TH		0xb
513
514/* Net Complex */
515enum mv_netc_topology {
516	MV_NETC_GE_MAC2_SGMII		=	BIT(0),
517	MV_NETC_GE_MAC2_RGMII		=	BIT(1),
518	MV_NETC_GE_MAC3_SGMII		=	BIT(2),
519	MV_NETC_GE_MAC3_RGMII		=	BIT(3),
520};
521
522enum mv_netc_phase {
523	MV_NETC_FIRST_PHASE,
524	MV_NETC_SECOND_PHASE,
525};
526
527enum mv_netc_sgmii_xmi_mode {
528	MV_NETC_GBE_SGMII,
529	MV_NETC_GBE_XMII,
530};
531
532enum mv_netc_mii_mode {
533	MV_NETC_GBE_RGMII,
534	MV_NETC_GBE_MII,
535};
536
537enum mv_netc_lanes {
538	MV_NETC_LANE_23,
539	MV_NETC_LANE_45,
540};
541
542/* Various constants */
543
544/* Coalescing */
545#define MVPP2_TXDONE_COAL_PKTS_THRESH	15
546#define MVPP2_TXDONE_HRTIMER_PERIOD_NS	1000000UL
547#define MVPP2_RX_COAL_PKTS		32
548#define MVPP2_RX_COAL_USEC		100
549
550/* The two bytes Marvell header. Either contains a special value used
551 * by Marvell switches when a specific hardware mode is enabled (not
552 * supported by this driver) or is filled automatically by zeroes on
553 * the RX side. Those two bytes being at the front of the Ethernet
554 * header, they allow to have the IP header aligned on a 4 bytes
555 * boundary automatically: the hardware skips those two bytes on its
556 * own.
557 */
558#define MVPP2_MH_SIZE			2
559#define MVPP2_ETH_TYPE_LEN		2
560#define MVPP2_PPPOE_HDR_SIZE		8
561#define MVPP2_VLAN_TAG_LEN		4
562
563/* Lbtd 802.3 type */
564#define MVPP2_IP_LBDT_TYPE		0xfffa
565
566#define MVPP2_CPU_D_CACHE_LINE_SIZE	32
567#define MVPP2_TX_CSUM_MAX_SIZE		9800
568
569/* Timeout constants */
570#define MVPP2_TX_DISABLE_TIMEOUT_MSEC	1000
571#define MVPP2_TX_PENDING_TIMEOUT_MSEC	1000
572
573#define MVPP2_TX_MTU_MAX		0x7ffff
574
575/* Maximum number of T-CONTs of PON port */
576#define MVPP2_MAX_TCONT			16
577
578/* Maximum number of supported ports */
579#define MVPP2_MAX_PORTS			4
580
581/* Maximum number of TXQs used by single port */
582#define MVPP2_MAX_TXQ			8
583
584/* Default number of TXQs in use */
585#define MVPP2_DEFAULT_TXQ		1
586
587/* Default number of RXQs in use */
588#define MVPP2_DEFAULT_RXQ		1
589#define CFG_MV_ETH_RXQ		8	/* increment by 8 */
590
591/* Max number of Rx descriptors */
592#define MVPP2_MAX_RXD			16
593
594/* Max number of Tx descriptors */
595#define MVPP2_MAX_TXD			16
596
597/* Amount of Tx descriptors that can be reserved at once by CPU */
598#define MVPP2_CPU_DESC_CHUNK		16
599
600/* Max number of Tx descriptors in each aggregated queue */
601#define MVPP2_AGGR_TXQ_SIZE		16
602
603/* Descriptor aligned size */
604#define MVPP2_DESC_ALIGNED_SIZE		32
605
606/* Descriptor alignment mask */
607#define MVPP2_TX_DESC_ALIGN		(MVPP2_DESC_ALIGNED_SIZE - 1)
608
609/* RX FIFO constants */
610#define MVPP21_RX_FIFO_PORT_DATA_SIZE		0x2000
611#define MVPP21_RX_FIFO_PORT_ATTR_SIZE		0x80
612#define MVPP22_RX_FIFO_10GB_PORT_DATA_SIZE	0x8000
613#define MVPP22_RX_FIFO_2_5GB_PORT_DATA_SIZE	0x2000
614#define MVPP22_RX_FIFO_1GB_PORT_DATA_SIZE	0x1000
615#define MVPP22_RX_FIFO_10GB_PORT_ATTR_SIZE	0x200
616#define MVPP22_RX_FIFO_2_5GB_PORT_ATTR_SIZE	0x80
617#define MVPP22_RX_FIFO_1GB_PORT_ATTR_SIZE	0x40
618#define MVPP2_RX_FIFO_PORT_MIN_PKT		0x80
619
620/* TX general registers */
621#define MVPP22_TX_FIFO_SIZE_REG(eth_tx_port)	(0x8860 + ((eth_tx_port) << 2))
622#define MVPP22_TX_FIFO_SIZE_MASK		0xf
623
624/* TX FIFO constants */
625#define MVPP2_TX_FIFO_DATA_SIZE_10KB		0xa
626#define MVPP2_TX_FIFO_DATA_SIZE_3KB		0x3
627
628/* RX buffer constants */
629#define MVPP2_SKB_SHINFO_SIZE \
630	0
631
632#define MVPP2_RX_PKT_SIZE(mtu) \
633	ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
634	      ETH_HLEN + ETH_FCS_LEN, MVPP2_CPU_D_CACHE_LINE_SIZE)
635
636#define MVPP2_RX_BUF_SIZE(pkt_size)	((pkt_size) + NET_SKB_PAD)
637#define MVPP2_RX_TOTAL_SIZE(buf_size)	((buf_size) + MVPP2_SKB_SHINFO_SIZE)
638#define MVPP2_RX_MAX_PKT_SIZE(total_size) \
639	((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
640
641#define MVPP2_BIT_TO_BYTE(bit)		((bit) / 8)
642
643/* IPv6 max L3 address size */
644#define MVPP2_MAX_L3_ADDR_SIZE		16
645
646/* Port flags */
647#define MVPP2_F_LOOPBACK		BIT(0)
648
649/* Marvell tag types */
650enum mvpp2_tag_type {
651	MVPP2_TAG_TYPE_NONE = 0,
652	MVPP2_TAG_TYPE_MH   = 1,
653	MVPP2_TAG_TYPE_DSA  = 2,
654	MVPP2_TAG_TYPE_EDSA = 3,
655	MVPP2_TAG_TYPE_VLAN = 4,
656	MVPP2_TAG_TYPE_LAST = 5
657};
658
659/* Parser constants */
660#define MVPP2_PRS_TCAM_SRAM_SIZE	256
661#define MVPP2_PRS_TCAM_WORDS		6
662#define MVPP2_PRS_SRAM_WORDS		4
663#define MVPP2_PRS_FLOW_ID_SIZE		64
664#define MVPP2_PRS_FLOW_ID_MASK		0x3f
665#define MVPP2_PRS_TCAM_ENTRY_INVALID	1
666#define MVPP2_PRS_TCAM_DSA_TAGGED_BIT	BIT(5)
667#define MVPP2_PRS_IPV4_HEAD		0x40
668#define MVPP2_PRS_IPV4_HEAD_MASK	0xf0
669#define MVPP2_PRS_IPV4_MC		0xe0
670#define MVPP2_PRS_IPV4_MC_MASK		0xf0
671#define MVPP2_PRS_IPV4_BC_MASK		0xff
672#define MVPP2_PRS_IPV4_IHL		0x5
673#define MVPP2_PRS_IPV4_IHL_MASK		0xf
674#define MVPP2_PRS_IPV6_MC		0xff
675#define MVPP2_PRS_IPV6_MC_MASK		0xff
676#define MVPP2_PRS_IPV6_HOP_MASK		0xff
677#define MVPP2_PRS_TCAM_PROTO_MASK	0xff
678#define MVPP2_PRS_TCAM_PROTO_MASK_L	0x3f
679#define MVPP2_PRS_DBL_VLANS_MAX		100
680
681/* Tcam structure:
682 * - lookup ID - 4 bits
683 * - port ID - 1 byte
684 * - additional information - 1 byte
685 * - header data - 8 bytes
686 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
687 */
688#define MVPP2_PRS_AI_BITS			8
689#define MVPP2_PRS_PORT_MASK			0xff
690#define MVPP2_PRS_LU_MASK			0xf
691#define MVPP2_PRS_TCAM_DATA_BYTE(offs)		\
692				    (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
693#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)	\
694					      (((offs) * 2) - ((offs) % 2)  + 2)
695#define MVPP2_PRS_TCAM_AI_BYTE			16
696#define MVPP2_PRS_TCAM_PORT_BYTE		17
697#define MVPP2_PRS_TCAM_LU_BYTE			20
698#define MVPP2_PRS_TCAM_EN_OFFS(offs)		((offs) + 2)
699#define MVPP2_PRS_TCAM_INV_WORD			5
700/* Tcam entries ID */
701#define MVPP2_PE_DROP_ALL		0
702#define MVPP2_PE_FIRST_FREE_TID		1
703#define MVPP2_PE_LAST_FREE_TID		(MVPP2_PRS_TCAM_SRAM_SIZE - 31)
704#define MVPP2_PE_IP6_EXT_PROTO_UN	(MVPP2_PRS_TCAM_SRAM_SIZE - 30)
705#define MVPP2_PE_MAC_MC_IP6		(MVPP2_PRS_TCAM_SRAM_SIZE - 29)
706#define MVPP2_PE_IP6_ADDR_UN		(MVPP2_PRS_TCAM_SRAM_SIZE - 28)
707#define MVPP2_PE_IP4_ADDR_UN		(MVPP2_PRS_TCAM_SRAM_SIZE - 27)
708#define MVPP2_PE_LAST_DEFAULT_FLOW	(MVPP2_PRS_TCAM_SRAM_SIZE - 26)
709#define MVPP2_PE_FIRST_DEFAULT_FLOW	(MVPP2_PRS_TCAM_SRAM_SIZE - 19)
710#define MVPP2_PE_EDSA_TAGGED		(MVPP2_PRS_TCAM_SRAM_SIZE - 18)
711#define MVPP2_PE_EDSA_UNTAGGED		(MVPP2_PRS_TCAM_SRAM_SIZE - 17)
712#define MVPP2_PE_DSA_TAGGED		(MVPP2_PRS_TCAM_SRAM_SIZE - 16)
713#define MVPP2_PE_DSA_UNTAGGED		(MVPP2_PRS_TCAM_SRAM_SIZE - 15)
714#define MVPP2_PE_ETYPE_EDSA_TAGGED	(MVPP2_PRS_TCAM_SRAM_SIZE - 14)
715#define MVPP2_PE_ETYPE_EDSA_UNTAGGED	(MVPP2_PRS_TCAM_SRAM_SIZE - 13)
716#define MVPP2_PE_ETYPE_DSA_TAGGED	(MVPP2_PRS_TCAM_SRAM_SIZE - 12)
717#define MVPP2_PE_ETYPE_DSA_UNTAGGED	(MVPP2_PRS_TCAM_SRAM_SIZE - 11)
718#define MVPP2_PE_MH_DEFAULT		(MVPP2_PRS_TCAM_SRAM_SIZE - 10)
719#define MVPP2_PE_DSA_DEFAULT		(MVPP2_PRS_TCAM_SRAM_SIZE - 9)
720#define MVPP2_PE_IP6_PROTO_UN		(MVPP2_PRS_TCAM_SRAM_SIZE - 8)
721#define MVPP2_PE_IP4_PROTO_UN		(MVPP2_PRS_TCAM_SRAM_SIZE - 7)
722#define MVPP2_PE_ETH_TYPE_UN		(MVPP2_PRS_TCAM_SRAM_SIZE - 6)
723#define MVPP2_PE_VLAN_DBL		(MVPP2_PRS_TCAM_SRAM_SIZE - 5)
724#define MVPP2_PE_VLAN_NONE		(MVPP2_PRS_TCAM_SRAM_SIZE - 4)
725#define MVPP2_PE_MAC_MC_ALL		(MVPP2_PRS_TCAM_SRAM_SIZE - 3)
726#define MVPP2_PE_MAC_PROMISCUOUS	(MVPP2_PRS_TCAM_SRAM_SIZE - 2)
727#define MVPP2_PE_MAC_NON_PROMISCUOUS	(MVPP2_PRS_TCAM_SRAM_SIZE - 1)
728
729/* Sram structure
730 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
731 */
732#define MVPP2_PRS_SRAM_RI_OFFS			0
733#define MVPP2_PRS_SRAM_RI_WORD			0
734#define MVPP2_PRS_SRAM_RI_CTRL_OFFS		32
735#define MVPP2_PRS_SRAM_RI_CTRL_WORD		1
736#define MVPP2_PRS_SRAM_RI_CTRL_BITS		32
737#define MVPP2_PRS_SRAM_SHIFT_OFFS		64
738#define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT		72
739#define MVPP2_PRS_SRAM_UDF_OFFS			73
740#define MVPP2_PRS_SRAM_UDF_BITS			8
741#define MVPP2_PRS_SRAM_UDF_MASK			0xff
742#define MVPP2_PRS_SRAM_UDF_SIGN_BIT		81
743#define MVPP2_PRS_SRAM_UDF_TYPE_OFFS		82
744#define MVPP2_PRS_SRAM_UDF_TYPE_MASK		0x7
745#define MVPP2_PRS_SRAM_UDF_TYPE_L3		1
746#define MVPP2_PRS_SRAM_UDF_TYPE_L4		4
747#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS	85
748#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK	0x3
749#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD		1
750#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD	2
751#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD	3
752#define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS		87
753#define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS		2
754#define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK		0x3
755#define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD		0
756#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD	2
757#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD	3
758#define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS		89
759#define MVPP2_PRS_SRAM_AI_OFFS			90
760#define MVPP2_PRS_SRAM_AI_CTRL_OFFS		98
761#define MVPP2_PRS_SRAM_AI_CTRL_BITS		8
762#define MVPP2_PRS_SRAM_AI_MASK			0xff
763#define MVPP2_PRS_SRAM_NEXT_LU_OFFS		106
764#define MVPP2_PRS_SRAM_NEXT_LU_MASK		0xf
765#define MVPP2_PRS_SRAM_LU_DONE_BIT		110
766#define MVPP2_PRS_SRAM_LU_GEN_BIT		111
767
768/* Sram result info bits assignment */
769#define MVPP2_PRS_RI_MAC_ME_MASK		0x1
770#define MVPP2_PRS_RI_DSA_MASK			0x2
771#define MVPP2_PRS_RI_VLAN_MASK			(BIT(2) | BIT(3))
772#define MVPP2_PRS_RI_VLAN_NONE			0x0
773#define MVPP2_PRS_RI_VLAN_SINGLE		BIT(2)
774#define MVPP2_PRS_RI_VLAN_DOUBLE		BIT(3)
775#define MVPP2_PRS_RI_VLAN_TRIPLE		(BIT(2) | BIT(3))
776#define MVPP2_PRS_RI_CPU_CODE_MASK		0x70
777#define MVPP2_PRS_RI_CPU_CODE_RX_SPEC		BIT(4)
778#define MVPP2_PRS_RI_L2_CAST_MASK		(BIT(9) | BIT(10))
779#define MVPP2_PRS_RI_L2_UCAST			0x0
780#define MVPP2_PRS_RI_L2_MCAST			BIT(9)
781#define MVPP2_PRS_RI_L2_BCAST			BIT(10)
782#define MVPP2_PRS_RI_PPPOE_MASK			0x800
783#define MVPP2_PRS_RI_L3_PROTO_MASK		(BIT(12) | BIT(13) | BIT(14))
784#define MVPP2_PRS_RI_L3_UN			0x0
785#define MVPP2_PRS_RI_L3_IP4			BIT(12)
786#define MVPP2_PRS_RI_L3_IP4_OPT			BIT(13)
787#define MVPP2_PRS_RI_L3_IP4_OTHER		(BIT(12) | BIT(13))
788#define MVPP2_PRS_RI_L3_IP6			BIT(14)
789#define MVPP2_PRS_RI_L3_IP6_EXT			(BIT(12) | BIT(14))
790#define MVPP2_PRS_RI_L3_ARP			(BIT(13) | BIT(14))
791#define MVPP2_PRS_RI_L3_ADDR_MASK		(BIT(15) | BIT(16))
792#define MVPP2_PRS_RI_L3_UCAST			0x0
793#define MVPP2_PRS_RI_L3_MCAST			BIT(15)
794#define MVPP2_PRS_RI_L3_BCAST			(BIT(15) | BIT(16))
795#define MVPP2_PRS_RI_IP_FRAG_MASK		0x20000
796#define MVPP2_PRS_RI_UDF3_MASK			0x300000
797#define MVPP2_PRS_RI_UDF3_RX_SPECIAL		BIT(21)
798#define MVPP2_PRS_RI_L4_PROTO_MASK		0x1c00000
799#define MVPP2_PRS_RI_L4_TCP			BIT(22)
800#define MVPP2_PRS_RI_L4_UDP			BIT(23)
801#define MVPP2_PRS_RI_L4_OTHER			(BIT(22) | BIT(23))
802#define MVPP2_PRS_RI_UDF7_MASK			0x60000000
803#define MVPP2_PRS_RI_UDF7_IP6_LITE		BIT(29)
804#define MVPP2_PRS_RI_DROP_MASK			0x80000000
805
806/* Sram additional info bits assignment */
807#define MVPP2_PRS_IPV4_DIP_AI_BIT		BIT(0)
808#define MVPP2_PRS_IPV6_NO_EXT_AI_BIT		BIT(0)
809#define MVPP2_PRS_IPV6_EXT_AI_BIT		BIT(1)
810#define MVPP2_PRS_IPV6_EXT_AH_AI_BIT		BIT(2)
811#define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT	BIT(3)
812#define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT		BIT(4)
813#define MVPP2_PRS_SINGLE_VLAN_AI		0
814#define MVPP2_PRS_DBL_VLAN_AI_BIT		BIT(7)
815
816/* DSA/EDSA type */
817#define MVPP2_PRS_TAGGED		true
818#define MVPP2_PRS_UNTAGGED		false
819#define MVPP2_PRS_EDSA			true
820#define MVPP2_PRS_DSA			false
821
822/* MAC entries, shadow udf */
823enum mvpp2_prs_udf {
824	MVPP2_PRS_UDF_MAC_DEF,
825	MVPP2_PRS_UDF_MAC_RANGE,
826	MVPP2_PRS_UDF_L2_DEF,
827	MVPP2_PRS_UDF_L2_DEF_COPY,
828	MVPP2_PRS_UDF_L2_USER,
829};
830
831/* Lookup ID */
832enum mvpp2_prs_lookup {
833	MVPP2_PRS_LU_MH,
834	MVPP2_PRS_LU_MAC,
835	MVPP2_PRS_LU_DSA,
836	MVPP2_PRS_LU_VLAN,
837	MVPP2_PRS_LU_L2,
838	MVPP2_PRS_LU_PPPOE,
839	MVPP2_PRS_LU_IP4,
840	MVPP2_PRS_LU_IP6,
841	MVPP2_PRS_LU_FLOWS,
842	MVPP2_PRS_LU_LAST,
843};
844
845/* L3 cast enum */
846enum mvpp2_prs_l3_cast {
847	MVPP2_PRS_L3_UNI_CAST,
848	MVPP2_PRS_L3_MULTI_CAST,
849	MVPP2_PRS_L3_BROAD_CAST
850};
851
852/* Classifier constants */
853#define MVPP2_CLS_FLOWS_TBL_SIZE	512
854#define MVPP2_CLS_FLOWS_TBL_DATA_WORDS	3
855#define MVPP2_CLS_LKP_TBL_SIZE		64
856
857/* BM constants */
858#define MVPP2_BM_POOLS_NUM		1
859#define MVPP2_BM_LONG_BUF_NUM		16
860#define MVPP2_BM_SHORT_BUF_NUM		16
861#define MVPP2_BM_POOL_SIZE_MAX		(16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
862#define MVPP2_BM_POOL_PTR_ALIGN		128
863#define MVPP2_BM_SWF_LONG_POOL(port)	0
864
865/* BM cookie (32 bits) definition */
866#define MVPP2_BM_COOKIE_POOL_OFFS	8
867#define MVPP2_BM_COOKIE_CPU_OFFS	24
868
869/* BM short pool packet size
870 * These value assure that for SWF the total number
871 * of bytes allocated for each buffer will be 512
872 */
873#define MVPP2_BM_SHORT_PKT_SIZE		MVPP2_RX_MAX_PKT_SIZE(512)
874
875enum mvpp2_bm_type {
876	MVPP2_BM_FREE,
877	MVPP2_BM_SWF_LONG,
878	MVPP2_BM_SWF_SHORT
879};
880
881/* Definitions */
882
883/* Shared Packet Processor resources */
884struct mvpp2 {
885	/* Shared registers' base addresses */
886	void __iomem *base;
887	void __iomem *lms_base;
888	void __iomem *iface_base;
889
890	void __iomem *mpcs_base;
891	void __iomem *xpcs_base;
892	void __iomem *rfu1_base;
893
894	u32 netc_config;
895
896	/* List of pointers to port structures */
897	struct mvpp2_port **port_list;
898
899	/* Aggregated TXQs */
900	struct mvpp2_tx_queue *aggr_txqs;
901
902	/* BM pools */
903	struct mvpp2_bm_pool *bm_pools;
904
905	/* PRS shadow table */
906	struct mvpp2_prs_shadow *prs_shadow;
907	/* PRS auxiliary table for double vlan entries control */
908	bool *prs_double_vlans;
909
910	/* Tclk value */
911	u32 tclk;
912
913	/* HW version */
914	enum { MVPP21, MVPP22 } hw_version;
915
916	/* Maximum number of RXQs per port */
917	unsigned int max_port_rxqs;
918
919	int probe_done;
920	u8 num_ports;
921};
922
923struct mvpp2_pcpu_stats {
924	u64	rx_packets;
925	u64	rx_bytes;
926	u64	tx_packets;
927	u64	tx_bytes;
928};
929
930struct mvpp2_port {
931	u8 id;
932
933	/* Index of the port from the "group of ports" complex point
934	 * of view
935	 */
936	int gop_id;
937
938	int irq;
939
940	struct mvpp2 *priv;
941
942	/* Per-port registers' base address */
943	void __iomem *base;
944
945	struct mvpp2_rx_queue **rxqs;
946	struct mvpp2_tx_queue **txqs;
947
948	int pkt_size;
949
950	u32 pending_cause_rx;
951
952	/* Per-CPU port control */
953	struct mvpp2_port_pcpu __percpu *pcpu;
954
955	/* Flags */
956	unsigned long flags;
957
958	u16 tx_ring_size;
959	u16 rx_ring_size;
960	struct mvpp2_pcpu_stats __percpu *stats;
961
962	struct phy_device *phy_dev;
963	phy_interface_t phy_interface;
964	int phyaddr;
965	struct udevice *mdio_dev;
966	struct mii_dev *bus;
967#if CONFIG_IS_ENABLED(DM_GPIO)
968	struct gpio_desc phy_reset_gpio;
969	struct gpio_desc phy_tx_disable_gpio;
970#endif
971	int init;
972	unsigned int link;
973	unsigned int duplex;
974	unsigned int speed;
975
976	struct mvpp2_bm_pool *pool_long;
977	struct mvpp2_bm_pool *pool_short;
978
979	/* Index of first port's physical RXQ */
980	u8 first_rxq;
981
982	u8 dev_addr[ETH_ALEN];
983};
984
985/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
986 * layout of the transmit and reception DMA descriptors, and their
987 * layout is therefore defined by the hardware design
988 */
989
990#define MVPP2_TXD_L3_OFF_SHIFT		0
991#define MVPP2_TXD_IP_HLEN_SHIFT		8
992#define MVPP2_TXD_L4_CSUM_FRAG		BIT(13)
993#define MVPP2_TXD_L4_CSUM_NOT		BIT(14)
994#define MVPP2_TXD_IP_CSUM_DISABLE	BIT(15)
995#define MVPP2_TXD_PADDING_DISABLE	BIT(23)
996#define MVPP2_TXD_L4_UDP		BIT(24)
997#define MVPP2_TXD_L3_IP6		BIT(26)
998#define MVPP2_TXD_L_DESC		BIT(28)
999#define MVPP2_TXD_F_DESC		BIT(29)
1000
1001#define MVPP2_RXD_ERR_SUMMARY		BIT(15)
1002#define MVPP2_RXD_ERR_CODE_MASK		(BIT(13) | BIT(14))
1003#define MVPP2_RXD_ERR_CRC		0x0
1004#define MVPP2_RXD_ERR_OVERRUN		BIT(13)
1005#define MVPP2_RXD_ERR_RESOURCE		(BIT(13) | BIT(14))
1006#define MVPP2_RXD_BM_POOL_ID_OFFS	16
1007#define MVPP2_RXD_BM_POOL_ID_MASK	(BIT(16) | BIT(17) | BIT(18))
1008#define MVPP2_RXD_HWF_SYNC		BIT(21)
1009#define MVPP2_RXD_L4_CSUM_OK		BIT(22)
1010#define MVPP2_RXD_IP4_HEADER_ERR	BIT(24)
1011#define MVPP2_RXD_L4_TCP		BIT(25)
1012#define MVPP2_RXD_L4_UDP		BIT(26)
1013#define MVPP2_RXD_L3_IP4		BIT(28)
1014#define MVPP2_RXD_L3_IP6		BIT(30)
1015#define MVPP2_RXD_BUF_HDR		BIT(31)
1016
1017/* HW TX descriptor for PPv2.1 */
1018struct mvpp21_tx_desc {
1019	u32 command;		/* Options used by HW for packet transmitting.*/
1020	u8  packet_offset;	/* the offset from the buffer beginning	*/
1021	u8  phys_txq;		/* destination queue ID			*/
1022	u16 data_size;		/* data size of transmitted packet in bytes */
1023	u32 buf_dma_addr;	/* physical addr of transmitted buffer	*/
1024	u32 buf_cookie;		/* cookie for access to TX buffer in tx path */
1025	u32 reserved1[3];	/* hw_cmd (for future use, BM, PON, PNC) */
1026	u32 reserved2;		/* reserved (for future use)		*/
1027};
1028
1029/* HW RX descriptor for PPv2.1 */
1030struct mvpp21_rx_desc {
1031	u32 status;		/* info about received packet		*/
1032	u16 reserved1;		/* parser_info (for future use, PnC)	*/
1033	u16 data_size;		/* size of received packet in bytes	*/
1034	u32 buf_dma_addr;	/* physical address of the buffer	*/
1035	u32 buf_cookie;		/* cookie for access to RX buffer in rx path */
1036	u16 reserved2;		/* gem_port_id (for future use, PON)	*/
1037	u16 reserved3;		/* csum_l4 (for future use, PnC)	*/
1038	u8  reserved4;		/* bm_qset (for future use, BM)		*/
1039	u8  reserved5;
1040	u16 reserved6;		/* classify_info (for future use, PnC)	*/
1041	u32 reserved7;		/* flow_id (for future use, PnC) */
1042	u32 reserved8;
1043};
1044
1045/* HW TX descriptor for PPv2.2 */
1046struct mvpp22_tx_desc {
1047	u32 command;
1048	u8  packet_offset;
1049	u8  phys_txq;
1050	u16 data_size;
1051	u64 reserved1;
1052	u64 buf_dma_addr_ptp;
1053	u64 buf_cookie_misc;
1054};
1055
1056/* HW RX descriptor for PPv2.2 */
1057struct mvpp22_rx_desc {
1058	u32 status;
1059	u16 reserved1;
1060	u16 data_size;
1061	u32 reserved2;
1062	u32 reserved3;
1063	u64 buf_dma_addr_key_hash;
1064	u64 buf_cookie_misc;
1065};
1066
1067/* Opaque type used by the driver to manipulate the HW TX and RX
1068 * descriptors
1069 */
1070struct mvpp2_tx_desc {
1071	union {
1072		struct mvpp21_tx_desc pp21;
1073		struct mvpp22_tx_desc pp22;
1074	};
1075};
1076
1077struct mvpp2_rx_desc {
1078	union {
1079		struct mvpp21_rx_desc pp21;
1080		struct mvpp22_rx_desc pp22;
1081	};
1082};
1083
1084/* Per-CPU Tx queue control */
1085struct mvpp2_txq_pcpu {
1086	int cpu;
1087
1088	/* Number of Tx DMA descriptors in the descriptor ring */
1089	int size;
1090
1091	/* Number of currently used Tx DMA descriptor in the
1092	 * descriptor ring
1093	 */
1094	int count;
1095
1096	/* Number of Tx DMA descriptors reserved for each CPU */
1097	int reserved_num;
1098
1099	/* Index of last TX DMA descriptor that was inserted */
1100	int txq_put_index;
1101
1102	/* Index of the TX DMA descriptor to be cleaned up */
1103	int txq_get_index;
1104};
1105
1106struct mvpp2_tx_queue {
1107	/* Physical number of this Tx queue */
1108	u8 id;
1109
1110	/* Logical number of this Tx queue */
1111	u8 log_id;
1112
1113	/* Number of Tx DMA descriptors in the descriptor ring */
1114	int size;
1115
1116	/* Number of currently used Tx DMA descriptor in the descriptor ring */
1117	int count;
1118
1119	/* Per-CPU control of physical Tx queues */
1120	struct mvpp2_txq_pcpu __percpu *pcpu;
1121
1122	u32 done_pkts_coal;
1123
1124	/* Virtual address of thex Tx DMA descriptors array */
1125	struct mvpp2_tx_desc *descs;
1126
1127	/* DMA address of the Tx DMA descriptors array */
1128	dma_addr_t descs_dma;
1129
1130	/* Index of the last Tx DMA descriptor */
1131	int last_desc;
1132
1133	/* Index of the next Tx DMA descriptor to process */
1134	int next_desc_to_proc;
1135};
1136
1137struct mvpp2_rx_queue {
1138	/* RX queue number, in the range 0-31 for physical RXQs */
1139	u8 id;
1140
1141	/* Num of rx descriptors in the rx descriptor ring */
1142	int size;
1143
1144	u32 pkts_coal;
1145	u32 time_coal;
1146
1147	/* Virtual address of the RX DMA descriptors array */
1148	struct mvpp2_rx_desc *descs;
1149
1150	/* DMA address of the RX DMA descriptors array */
1151	dma_addr_t descs_dma;
1152
1153	/* Index of the last RX DMA descriptor */
1154	int last_desc;
1155
1156	/* Index of the next RX DMA descriptor to process */
1157	int next_desc_to_proc;
1158
1159	/* ID of port to which physical RXQ is mapped */
1160	int port;
1161
1162	/* Port's logic RXQ number to which physical RXQ is mapped */
1163	int logic_rxq;
1164};
1165
1166union mvpp2_prs_tcam_entry {
1167	u32 word[MVPP2_PRS_TCAM_WORDS];
1168	u8  byte[MVPP2_PRS_TCAM_WORDS * 4];
1169};
1170
1171union mvpp2_prs_sram_entry {
1172	u32 word[MVPP2_PRS_SRAM_WORDS];
1173	u8  byte[MVPP2_PRS_SRAM_WORDS * 4];
1174};
1175
1176struct mvpp2_prs_entry {
1177	u32 index;
1178	union mvpp2_prs_tcam_entry tcam;
1179	union mvpp2_prs_sram_entry sram;
1180};
1181
1182struct mvpp2_prs_shadow {
1183	bool valid;
1184	bool finish;
1185
1186	/* Lookup ID */
1187	int lu;
1188
1189	/* User defined offset */
1190	int udf;
1191
1192	/* Result info */
1193	u32 ri;
1194	u32 ri_mask;
1195};
1196
1197struct mvpp2_cls_flow_entry {
1198	u32 index;
1199	u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
1200};
1201
1202struct mvpp2_cls_lookup_entry {
1203	u32 lkpid;
1204	u32 way;
1205	u32 data;
1206};
1207
1208struct mvpp2_bm_pool {
1209	/* Pool number in the range 0-7 */
1210	int id;
1211	enum mvpp2_bm_type type;
1212
1213	/* Buffer Pointers Pool External (BPPE) size */
1214	int size;
1215	/* Number of buffers for this pool */
1216	int buf_num;
1217	/* Pool buffer size */
1218	int buf_size;
1219	/* Packet size */
1220	int pkt_size;
1221
1222	/* BPPE virtual base address */
1223	unsigned long *virt_addr;
1224	/* BPPE DMA base address */
1225	dma_addr_t dma_addr;
1226
1227	/* Ports using BM pool */
1228	u32 port_map;
1229};
1230
1231/* Static declaractions */
1232
1233/* Number of RXQs used by single port */
1234static int rxq_number = MVPP2_DEFAULT_RXQ;
1235/* Number of TXQs used by single port */
1236static int txq_number = MVPP2_DEFAULT_TXQ;
1237
1238static int base_id;
1239
1240#define MVPP2_DRIVER_NAME "mvpp2"
1241#define MVPP2_DRIVER_VERSION "1.0"
1242
1243/*
1244 * U-Boot internal data, mostly uncached buffers for descriptors and data
1245 */
1246struct buffer_location {
1247	struct mvpp2_tx_desc *aggr_tx_descs;
1248	struct mvpp2_tx_desc *tx_descs;
1249	struct mvpp2_rx_desc *rx_descs;
1250	unsigned long *bm_pool[MVPP2_BM_POOLS_NUM];
1251	unsigned long *rx_buffer[MVPP2_BM_LONG_BUF_NUM];
1252	int first_rxq;
1253};
1254
1255/*
1256 * All 4 interfaces use the same global buffer, since only one interface
1257 * can be enabled at once
1258 */
1259static struct buffer_location buffer_loc;
1260static int buffer_loc_init;
1261
1262/*
1263 * Page table entries are set to 1MB, or multiples of 1MB
1264 * (not < 1MB). driver uses less bd's so use 1MB bdspace.
1265 */
1266#define BD_SPACE	(1 << 20)
1267
1268/* Utility/helper methods */
1269
1270static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
1271{
1272	writel(data, priv->base + offset);
1273}
1274
1275static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
1276{
1277	return readl(priv->base + offset);
1278}
1279
1280static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
1281				      struct mvpp2_tx_desc *tx_desc,
1282				      dma_addr_t dma_addr)
1283{
1284	if (port->priv->hw_version == MVPP21) {
1285		tx_desc->pp21.buf_dma_addr = dma_addr;
1286	} else {
1287		u64 val = (u64)dma_addr;
1288
1289		tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0);
1290		tx_desc->pp22.buf_dma_addr_ptp |= val;
1291	}
1292}
1293
1294static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
1295				  struct mvpp2_tx_desc *tx_desc,
1296				  size_t size)
1297{
1298	if (port->priv->hw_version == MVPP21)
1299		tx_desc->pp21.data_size = size;
1300	else
1301		tx_desc->pp22.data_size = size;
1302}
1303
1304static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
1305				 struct mvpp2_tx_desc *tx_desc,
1306				 unsigned int txq)
1307{
1308	if (port->priv->hw_version == MVPP21)
1309		tx_desc->pp21.phys_txq = txq;
1310	else
1311		tx_desc->pp22.phys_txq = txq;
1312}
1313
1314static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
1315				 struct mvpp2_tx_desc *tx_desc,
1316				 unsigned int command)
1317{
1318	if (port->priv->hw_version == MVPP21)
1319		tx_desc->pp21.command = command;
1320	else
1321		tx_desc->pp22.command = command;
1322}
1323
1324static void mvpp2_txdesc_offset_set(struct mvpp2_port *port,
1325				    struct mvpp2_tx_desc *tx_desc,
1326				    unsigned int offset)
1327{
1328	if (port->priv->hw_version == MVPP21)
1329		tx_desc->pp21.packet_offset = offset;
1330	else
1331		tx_desc->pp22.packet_offset = offset;
1332}
1333
1334static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
1335					    struct mvpp2_rx_desc *rx_desc)
1336{
1337	if (port->priv->hw_version == MVPP21)
1338		return rx_desc->pp21.buf_dma_addr;
1339	else
1340		return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0);
1341}
1342
1343static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
1344					     struct mvpp2_rx_desc *rx_desc)
1345{
1346	if (port->priv->hw_version == MVPP21)
1347		return rx_desc->pp21.buf_cookie;
1348	else
1349		return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0);
1350}
1351
1352static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
1353				    struct mvpp2_rx_desc *rx_desc)
1354{
1355	if (port->priv->hw_version == MVPP21)
1356		return rx_desc->pp21.data_size;
1357	else
1358		return rx_desc->pp22.data_size;
1359}
1360
1361static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
1362				   struct mvpp2_rx_desc *rx_desc)
1363{
1364	if (port->priv->hw_version == MVPP21)
1365		return rx_desc->pp21.status;
1366	else
1367		return rx_desc->pp22.status;
1368}
1369
1370static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
1371{
1372	txq_pcpu->txq_get_index++;
1373	if (txq_pcpu->txq_get_index == txq_pcpu->size)
1374		txq_pcpu->txq_get_index = 0;
1375}
1376
1377/* Get number of physical egress port */
1378static inline int mvpp2_egress_port(struct mvpp2_port *port)
1379{
1380	return MVPP2_MAX_TCONT + port->id;
1381}
1382
1383/* Get number of physical TXQ */
1384static inline int mvpp2_txq_phys(int port, int txq)
1385{
1386	return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
1387}
1388
1389/* Parser configuration routines */
1390
1391/* Update parser tcam and sram hw entries */
1392static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1393{
1394	int i;
1395
1396	if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1397		return -EINVAL;
1398
1399	/* Clear entry invalidation bit */
1400	pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
1401
1402	/* Write tcam index - indirect access */
1403	mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1404	for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1405		mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
1406
1407	/* Write sram index - indirect access */
1408	mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1409	for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1410		mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
1411
1412	return 0;
1413}
1414
1415/* Read tcam entry from hw */
1416static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1417{
1418	int i;
1419
1420	if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1421		return -EINVAL;
1422
1423	/* Write tcam index - indirect access */
1424	mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1425
1426	pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
1427			      MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
1428	if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
1429		return MVPP2_PRS_TCAM_ENTRY_INVALID;
1430
1431	for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1432		pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
1433
1434	/* Write sram index - indirect access */
1435	mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1436	for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1437		pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
1438
1439	return 0;
1440}
1441
1442/* Invalidate tcam hw entry */
1443static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
1444{
1445	/* Write index - indirect access */
1446	mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
1447	mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
1448		    MVPP2_PRS_TCAM_INV_MASK);
1449}
1450
1451/* Enable shadow table entry and set its lookup ID */
1452static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
1453{
1454	priv->prs_shadow[index].valid = true;
1455	priv->prs_shadow[index].lu = lu;
1456}
1457
1458/* Update ri fields in shadow table entry */
1459static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
1460				    unsigned int ri, unsigned int ri_mask)
1461{
1462	priv->prs_shadow[index].ri_mask = ri_mask;
1463	priv->prs_shadow[index].ri = ri;
1464}
1465
1466/* Update lookup field in tcam sw entry */
1467static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
1468{
1469	int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
1470
1471	pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
1472	pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
1473}
1474
1475/* Update mask for single port in tcam sw entry */
1476static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
1477				    unsigned int port, bool add)
1478{
1479	int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1480
1481	if (add)
1482		pe->tcam.byte[enable_off] &= ~(1 << port);
1483	else
1484		pe->tcam.byte[enable_off] |= 1 << port;
1485}
1486
1487/* Update port map in tcam sw entry */
1488static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
1489					unsigned int ports)
1490{
1491	unsigned char port_mask = MVPP2_PRS_PORT_MASK;
1492	int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1493
1494	pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
1495	pe->tcam.byte[enable_off] &= ~port_mask;
1496	pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
1497}
1498
1499/* Obtain port map from tcam sw entry */
1500static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
1501{
1502	int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1503
1504	return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
1505}
1506
1507/* Set byte of data and its enable bits in tcam sw entry */
1508static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
1509					 unsigned int offs, unsigned char byte,
1510					 unsigned char enable)
1511{
1512	pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
1513	pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
1514}
1515
1516/* Get byte of data and its enable bits from tcam sw entry */
1517static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
1518					 unsigned int offs, unsigned char *byte,
1519					 unsigned char *enable)
1520{
1521	*byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
1522	*enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
1523}
1524
1525/* Set ethertype in tcam sw entry */
1526static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
1527				  unsigned short ethertype)
1528{
1529	mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
1530	mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
1531}
1532
1533/* Set bits in sram sw entry */
1534static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
1535				    int val)
1536{
1537	pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
1538}
1539
1540/* Clear bits in sram sw entry */
1541static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
1542				      int val)
1543{
1544	pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
1545}
1546
1547/* Update ri bits in sram sw entry */
1548static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
1549				     unsigned int bits, unsigned int mask)
1550{
1551	unsigned int i;
1552
1553	for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
1554		int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
1555
1556		if (!(mask & BIT(i)))
1557			continue;
1558
1559		if (bits & BIT(i))
1560			mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
1561		else
1562			mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
1563
1564		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
1565	}
1566}
1567
1568/* Update ai bits in sram sw entry */
1569static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
1570				     unsigned int bits, unsigned int mask)
1571{
1572	unsigned int i;
1573	int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
1574
1575	for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
1576
1577		if (!(mask & BIT(i)))
1578			continue;
1579
1580		if (bits & BIT(i))
1581			mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
1582		else
1583			mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
1584
1585		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
1586	}
1587}
1588
1589/* Read ai bits from sram sw entry */
1590static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
1591{
1592	u8 bits;
1593	int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
1594	int ai_en_off = ai_off + 1;
1595	int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
1596
1597	bits = (pe->sram.byte[ai_off] >> ai_shift) |
1598	       (pe->sram.byte[ai_en_off] << (8 - ai_shift));
1599
1600	return bits;
1601}
1602
1603/* In sram sw entry set lookup ID field of the tcam key to be used in the next
1604 * lookup interation
1605 */
1606static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
1607				       unsigned int lu)
1608{
1609	int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
1610
1611	mvpp2_prs_sram_bits_clear(pe, sram_next_off,
1612				  MVPP2_PRS_SRAM_NEXT_LU_MASK);
1613	mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
1614}
1615
1616/* In the sram sw entry set sign and value of the next lookup offset
1617 * and the offset value generated to the classifier
1618 */
1619static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
1620				     unsigned int op)
1621{
1622	/* Set sign */
1623	if (shift < 0) {
1624		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1625		shift = 0 - shift;
1626	} else {
1627		mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1628	}
1629
1630	/* Set value */
1631	pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
1632							   (unsigned char)shift;
1633
1634	/* Reset and set operation */
1635	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
1636				  MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
1637	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
1638
1639	/* Set base offset as current */
1640	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1641}
1642
1643/* In the sram sw entry set sign and value of the user defined offset
1644 * generated to the classifier
1645 */
1646static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
1647				      unsigned int type, int offset,
1648				      unsigned int op)
1649{
1650	/* Set sign */
1651	if (offset < 0) {
1652		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1653		offset = 0 - offset;
1654	} else {
1655		mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1656	}
1657
1658	/* Set value */
1659	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
1660				  MVPP2_PRS_SRAM_UDF_MASK);
1661	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
1662	pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1663					MVPP2_PRS_SRAM_UDF_BITS)] &=
1664	      ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1665	pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1666					MVPP2_PRS_SRAM_UDF_BITS)] |=
1667				(offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1668
1669	/* Set offset type */
1670	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
1671				  MVPP2_PRS_SRAM_UDF_TYPE_MASK);
1672	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
1673
1674	/* Set offset operation */
1675	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
1676				  MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
1677	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
1678
1679	pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1680					MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
1681					     ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
1682				    (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1683
1684	pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1685					MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
1686			     (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1687
1688	/* Set base offset as current */
1689	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1690}
1691
1692/* Find parser flow entry */
1693static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
1694{
1695	struct mvpp2_prs_entry *pe;
1696	int tid;
1697
1698	pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1699	if (!pe)
1700		return NULL;
1701	mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
1702
1703	/* Go through the all entires with MVPP2_PRS_LU_FLOWS */
1704	for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
1705		u8 bits;
1706
1707		if (!priv->prs_shadow[tid].valid ||
1708		    priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
1709			continue;
1710
1711		pe->index = tid;
1712		mvpp2_prs_hw_read(priv, pe);
1713		bits = mvpp2_prs_sram_ai_get(pe);
1714
1715		/* Sram store classification lookup ID in AI bits [5:0] */
1716		if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
1717			return pe;
1718	}
1719	kfree(pe);
1720
1721	return NULL;
1722}
1723
1724/* Return first free tcam index, seeking from start to end */
1725static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
1726				     unsigned char end)
1727{
1728	int tid;
1729
1730	if (start > end)
1731		swap(start, end);
1732
1733	if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
1734		end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
1735
1736	for (tid = start; tid <= end; tid++) {
1737		if (!priv->prs_shadow[tid].valid)
1738			return tid;
1739	}
1740
1741	return -EINVAL;
1742}
1743
1744/* Enable/disable dropping all mac da's */
1745static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
1746{
1747	struct mvpp2_prs_entry pe;
1748
1749	if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
1750		/* Entry exist - update port only */
1751		pe.index = MVPP2_PE_DROP_ALL;
1752		mvpp2_prs_hw_read(priv, &pe);
1753	} else {
1754		/* Entry doesn't exist - create new */
1755		memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1756		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1757		pe.index = MVPP2_PE_DROP_ALL;
1758
1759		/* Non-promiscuous mode for all ports - DROP unknown packets */
1760		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1761					 MVPP2_PRS_RI_DROP_MASK);
1762
1763		mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1764		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1765
1766		/* Update shadow table */
1767		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1768
1769		/* Mask all ports */
1770		mvpp2_prs_tcam_port_map_set(&pe, 0);
1771	}
1772
1773	/* Update port mask */
1774	mvpp2_prs_tcam_port_set(&pe, port, add);
1775
1776	mvpp2_prs_hw_write(priv, &pe);
1777}
1778
1779/* Set port to promiscuous mode */
1780static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add)
1781{
1782	struct mvpp2_prs_entry pe;
1783
1784	/* Promiscuous mode - Accept unknown packets */
1785
1786	if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) {
1787		/* Entry exist - update port only */
1788		pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1789		mvpp2_prs_hw_read(priv, &pe);
1790	} else {
1791		/* Entry doesn't exist - create new */
1792		memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1793		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1794		pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1795
1796		/* Continue - set next lookup */
1797		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1798
1799		/* Set result info bits */
1800		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST,
1801					 MVPP2_PRS_RI_L2_CAST_MASK);
1802
1803		/* Shift to ethertype */
1804		mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1805					 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1806
1807		/* Mask all ports */
1808		mvpp2_prs_tcam_port_map_set(&pe, 0);
1809
1810		/* Update shadow table */
1811		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1812	}
1813
1814	/* Update port mask */
1815	mvpp2_prs_tcam_port_set(&pe, port, add);
1816
1817	mvpp2_prs_hw_write(priv, &pe);
1818}
1819
1820/* Accept multicast */
1821static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index,
1822				    bool add)
1823{
1824	struct mvpp2_prs_entry pe;
1825	unsigned char da_mc;
1826
1827	/* Ethernet multicast address first byte is
1828	 * 0x01 for IPv4 and 0x33 for IPv6
1829	 */
1830	da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33;
1831
1832	if (priv->prs_shadow[index].valid) {
1833		/* Entry exist - update port only */
1834		pe.index = index;
1835		mvpp2_prs_hw_read(priv, &pe);
1836	} else {
1837		/* Entry doesn't exist - create new */
1838		memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1839		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1840		pe.index = index;
1841
1842		/* Continue - set next lookup */
1843		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1844
1845		/* Set result info bits */
1846		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST,
1847					 MVPP2_PRS_RI_L2_CAST_MASK);
1848
1849		/* Update tcam entry data first byte */
1850		mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff);
1851
1852		/* Shift to ethertype */
1853		mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1854					 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1855
1856		/* Mask all ports */
1857		mvpp2_prs_tcam_port_map_set(&pe, 0);
1858
1859		/* Update shadow table */
1860		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1861	}
1862
1863	/* Update port mask */
1864	mvpp2_prs_tcam_port_set(&pe, port, add);
1865
1866	mvpp2_prs_hw_write(priv, &pe);
1867}
1868
1869/* Parser per-port initialization */
1870static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
1871				   int lu_max, int offset)
1872{
1873	u32 val;
1874
1875	/* Set lookup ID */
1876	val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
1877	val &= ~MVPP2_PRS_PORT_LU_MASK(port);
1878	val |=  MVPP2_PRS_PORT_LU_VAL(port, lu_first);
1879	mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
1880
1881	/* Set maximum number of loops for packet received from port */
1882	val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
1883	val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
1884	val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
1885	mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
1886
1887	/* Set initial offset for packet header extraction for the first
1888	 * searching loop
1889	 */
1890	val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
1891	val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
1892	val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
1893	mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
1894}
1895
1896/* Default flow entries initialization for all ports */
1897static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
1898{
1899	struct mvpp2_prs_entry pe;
1900	int port;
1901
1902	for (port = 0; port < MVPP2_MAX_PORTS; port++) {
1903		memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1904		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1905		pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
1906
1907		/* Mask all ports */
1908		mvpp2_prs_tcam_port_map_set(&pe, 0);
1909
1910		/* Set flow ID*/
1911		mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
1912		mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
1913
1914		/* Update shadow table and hw entry */
1915		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
1916		mvpp2_prs_hw_write(priv, &pe);
1917	}
1918}
1919
1920/* Set default entry for Marvell Header field */
1921static void mvpp2_prs_mh_init(struct mvpp2 *priv)
1922{
1923	struct mvpp2_prs_entry pe;
1924
1925	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1926
1927	pe.index = MVPP2_PE_MH_DEFAULT;
1928	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
1929	mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
1930				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1931	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
1932
1933	/* Unmask all ports */
1934	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1935
1936	/* Update shadow table and hw entry */
1937	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
1938	mvpp2_prs_hw_write(priv, &pe);
1939}
1940
1941/* Set default entires (place holder) for promiscuous, non-promiscuous and
1942 * multicast MAC addresses
1943 */
1944static void mvpp2_prs_mac_init(struct mvpp2 *priv)
1945{
1946	struct mvpp2_prs_entry pe;
1947
1948	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1949
1950	/* Non-promiscuous mode for all ports - DROP unknown packets */
1951	pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
1952	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1953
1954	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1955				 MVPP2_PRS_RI_DROP_MASK);
1956	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1957	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1958
1959	/* Unmask all ports */
1960	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1961
1962	/* Update shadow table and hw entry */
1963	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1964	mvpp2_prs_hw_write(priv, &pe);
1965
1966	/* place holders only - no ports */
1967	mvpp2_prs_mac_drop_all_set(priv, 0, false);
1968	mvpp2_prs_mac_promisc_set(priv, 0, false);
1969	mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false);
1970	mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false);
1971}
1972
1973/* Match basic ethertypes */
1974static int mvpp2_prs_etype_init(struct mvpp2 *priv)
1975{
1976	struct mvpp2_prs_entry pe;
1977	int tid;
1978
1979	/* Ethertype: PPPoE */
1980	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1981					MVPP2_PE_LAST_FREE_TID);
1982	if (tid < 0)
1983		return tid;
1984
1985	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1986	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1987	pe.index = tid;
1988
1989	mvpp2_prs_match_etype(&pe, 0, PROT_PPP_SES);
1990
1991	mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
1992				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1993	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1994	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
1995				 MVPP2_PRS_RI_PPPOE_MASK);
1996
1997	/* Update shadow table and hw entry */
1998	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1999	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2000	priv->prs_shadow[pe.index].finish = false;
2001	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
2002				MVPP2_PRS_RI_PPPOE_MASK);
2003	mvpp2_prs_hw_write(priv, &pe);
2004
2005	/* Ethertype: ARP */
2006	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2007					MVPP2_PE_LAST_FREE_TID);
2008	if (tid < 0)
2009		return tid;
2010
2011	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2012	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2013	pe.index = tid;
2014
2015	mvpp2_prs_match_etype(&pe, 0, PROT_ARP);
2016
2017	/* Generate flow in the next iteration*/
2018	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2019	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2020	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
2021				 MVPP2_PRS_RI_L3_PROTO_MASK);
2022	/* Set L3 offset */
2023	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2024				  MVPP2_ETH_TYPE_LEN,
2025				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2026
2027	/* Update shadow table and hw entry */
2028	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2029	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2030	priv->prs_shadow[pe.index].finish = true;
2031	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
2032				MVPP2_PRS_RI_L3_PROTO_MASK);
2033	mvpp2_prs_hw_write(priv, &pe);
2034
2035	/* Ethertype: LBTD */
2036	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2037					MVPP2_PE_LAST_FREE_TID);
2038	if (tid < 0)
2039		return tid;
2040
2041	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2042	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2043	pe.index = tid;
2044
2045	mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
2046
2047	/* Generate flow in the next iteration*/
2048	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2049	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2050	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2051				 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2052				 MVPP2_PRS_RI_CPU_CODE_MASK |
2053				 MVPP2_PRS_RI_UDF3_MASK);
2054	/* Set L3 offset */
2055	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2056				  MVPP2_ETH_TYPE_LEN,
2057				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2058
2059	/* Update shadow table and hw entry */
2060	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2061	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2062	priv->prs_shadow[pe.index].finish = true;
2063	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2064				MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2065				MVPP2_PRS_RI_CPU_CODE_MASK |
2066				MVPP2_PRS_RI_UDF3_MASK);
2067	mvpp2_prs_hw_write(priv, &pe);
2068
2069	/* Ethertype: IPv4 without options */
2070	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2071					MVPP2_PE_LAST_FREE_TID);
2072	if (tid < 0)
2073		return tid;
2074
2075	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2076	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2077	pe.index = tid;
2078
2079	mvpp2_prs_match_etype(&pe, 0, PROT_IP);
2080	mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2081				     MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2082				     MVPP2_PRS_IPV4_HEAD_MASK |
2083				     MVPP2_PRS_IPV4_IHL_MASK);
2084
2085	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2086	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2087				 MVPP2_PRS_RI_L3_PROTO_MASK);
2088	/* Skip eth_type + 4 bytes of IP header */
2089	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2090				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2091	/* Set L3 offset */
2092	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2093				  MVPP2_ETH_TYPE_LEN,
2094				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2095
2096	/* Update shadow table and hw entry */
2097	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2098	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2099	priv->prs_shadow[pe.index].finish = false;
2100	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
2101				MVPP2_PRS_RI_L3_PROTO_MASK);
2102	mvpp2_prs_hw_write(priv, &pe);
2103
2104	/* Ethertype: IPv4 with options */
2105	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2106					MVPP2_PE_LAST_FREE_TID);
2107	if (tid < 0)
2108		return tid;
2109
2110	pe.index = tid;
2111
2112	/* Clear tcam data before updating */
2113	pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
2114	pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
2115
2116	mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2117				     MVPP2_PRS_IPV4_HEAD,
2118				     MVPP2_PRS_IPV4_HEAD_MASK);
2119
2120	/* Clear ri before updating */
2121	pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2122	pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2123	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
2124				 MVPP2_PRS_RI_L3_PROTO_MASK);
2125
2126	/* Update shadow table and hw entry */
2127	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2128	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2129	priv->prs_shadow[pe.index].finish = false;
2130	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
2131				MVPP2_PRS_RI_L3_PROTO_MASK);
2132	mvpp2_prs_hw_write(priv, &pe);
2133
2134	/* Ethertype: IPv6 without options */
2135	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2136					MVPP2_PE_LAST_FREE_TID);
2137	if (tid < 0)
2138		return tid;
2139
2140	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2141	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2142	pe.index = tid;
2143
2144	mvpp2_prs_match_etype(&pe, 0, PROT_IPV6);
2145
2146	/* Skip DIP of IPV6 header */
2147	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
2148				 MVPP2_MAX_L3_ADDR_SIZE,
2149				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2150	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2151	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
2152				 MVPP2_PRS_RI_L3_PROTO_MASK);
2153	/* Set L3 offset */
2154	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2155				  MVPP2_ETH_TYPE_LEN,
2156				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2157
2158	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2159	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2160	priv->prs_shadow[pe.index].finish = false;
2161	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
2162				MVPP2_PRS_RI_L3_PROTO_MASK);
2163	mvpp2_prs_hw_write(priv, &pe);
2164
2165	/* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
2166	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2167	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2168	pe.index = MVPP2_PE_ETH_TYPE_UN;
2169
2170	/* Unmask all ports */
2171	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2172
2173	/* Generate flow in the next iteration*/
2174	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2175	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2176	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
2177				 MVPP2_PRS_RI_L3_PROTO_MASK);
2178	/* Set L3 offset even it's unknown L3 */
2179	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2180				  MVPP2_ETH_TYPE_LEN,
2181				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2182
2183	/* Update shadow table and hw entry */
2184	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2185	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2186	priv->prs_shadow[pe.index].finish = true;
2187	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
2188				MVPP2_PRS_RI_L3_PROTO_MASK);
2189	mvpp2_prs_hw_write(priv, &pe);
2190
2191	return 0;
2192}
2193
2194/* Parser default initialization */
2195static int mvpp2_prs_default_init(struct udevice *dev,
2196				  struct mvpp2 *priv)
2197{
2198	int err, index, i;
2199
2200	/* Enable tcam table */
2201	mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
2202
2203	/* Clear all tcam and sram entries */
2204	for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
2205		mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
2206		for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
2207			mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
2208
2209		mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
2210		for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
2211			mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
2212	}
2213
2214	/* Invalidate all tcam entries */
2215	for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
2216		mvpp2_prs_hw_inv(priv, index);
2217
2218	priv->prs_shadow = devm_kcalloc(dev, MVPP2_PRS_TCAM_SRAM_SIZE,
2219					sizeof(struct mvpp2_prs_shadow),
2220					GFP_KERNEL);
2221	if (!priv->prs_shadow)
2222		return -ENOMEM;
2223
2224	/* Always start from lookup = 0 */
2225	for (index = 0; index < MVPP2_MAX_PORTS; index++)
2226		mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
2227				       MVPP2_PRS_PORT_LU_MAX, 0);
2228
2229	mvpp2_prs_def_flow_init(priv);
2230
2231	mvpp2_prs_mh_init(priv);
2232
2233	mvpp2_prs_mac_init(priv);
2234
2235	err = mvpp2_prs_etype_init(priv);
2236	if (err)
2237		return err;
2238
2239	return 0;
2240}
2241
2242/* Compare MAC DA with tcam entry data */
2243static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
2244				       const u8 *da, unsigned char *mask)
2245{
2246	unsigned char tcam_byte, tcam_mask;
2247	int index;
2248
2249	for (index = 0; index < ETH_ALEN; index++) {
2250		mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
2251		if (tcam_mask != mask[index])
2252			return false;
2253
2254		if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
2255			return false;
2256	}
2257
2258	return true;
2259}
2260
2261/* Find tcam entry with matched pair <MAC DA, port> */
2262static struct mvpp2_prs_entry *
2263mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
2264			    unsigned char *mask, int udf_type)
2265{
2266	struct mvpp2_prs_entry *pe;
2267	int tid;
2268
2269	pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2270	if (!pe)
2271		return NULL;
2272	mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
2273
2274	/* Go through the all entires with MVPP2_PRS_LU_MAC */
2275	for (tid = MVPP2_PE_FIRST_FREE_TID;
2276	     tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
2277		unsigned int entry_pmap;
2278
2279		if (!priv->prs_shadow[tid].valid ||
2280		    (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
2281		    (priv->prs_shadow[tid].udf != udf_type))
2282			continue;
2283
2284		pe->index = tid;
2285		mvpp2_prs_hw_read(priv, pe);
2286		entry_pmap = mvpp2_prs_tcam_port_map_get(pe);
2287
2288		if (mvpp2_prs_mac_range_equals(pe, da, mask) &&
2289		    entry_pmap == pmap)
2290			return pe;
2291	}
2292	kfree(pe);
2293
2294	return NULL;
2295}
2296
2297/* Update parser's mac da entry */
2298static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
2299				   const u8 *da, bool add)
2300{
2301	struct mvpp2_prs_entry *pe;
2302	unsigned int pmap, len, ri;
2303	unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
2304	int tid;
2305
2306	/* Scan TCAM and see if entry with this <MAC DA, port> already exist */
2307	pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask,
2308					 MVPP2_PRS_UDF_MAC_DEF);
2309
2310	/* No such entry */
2311	if (!pe) {
2312		if (!add)
2313			return 0;
2314
2315		/* Create new TCAM entry */
2316		/* Find first range mac entry*/
2317		for (tid = MVPP2_PE_FIRST_FREE_TID;
2318		     tid <= MVPP2_PE_LAST_FREE_TID; tid++)
2319			if (priv->prs_shadow[tid].valid &&
2320			    (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) &&
2321			    (priv->prs_shadow[tid].udf ==
2322						       MVPP2_PRS_UDF_MAC_RANGE))
2323				break;
2324
2325		/* Go through the all entries from first to last */
2326		tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2327						tid - 1);
2328		if (tid < 0)
2329			return tid;
2330
2331		pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2332		if (!pe)
2333			return -1;
2334		mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
2335		pe->index = tid;
2336
2337		/* Mask all ports */
2338		mvpp2_prs_tcam_port_map_set(pe, 0);
2339	}
2340
2341	/* Update port mask */
2342	mvpp2_prs_tcam_port_set(pe, port, add);
2343
2344	/* Invalidate the entry if no ports are left enabled */
2345	pmap = mvpp2_prs_tcam_port_map_get(pe);
2346	if (pmap == 0) {
2347		if (add) {
2348			kfree(pe);
2349			return -1;
2350		}
2351		mvpp2_prs_hw_inv(priv, pe->index);
2352		priv->prs_shadow[pe->index].valid = false;
2353		kfree(pe);
2354		return 0;
2355	}
2356
2357	/* Continue - set next lookup */
2358	mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA);
2359
2360	/* Set match on DA */
2361	len = ETH_ALEN;
2362	while (len--)
2363		mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff);
2364
2365	/* Set result info bits */
2366	ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
2367
2368	mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
2369				 MVPP2_PRS_RI_MAC_ME_MASK);
2370	mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
2371				MVPP2_PRS_RI_MAC_ME_MASK);
2372
2373	/* Shift to ethertype */
2374	mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN,
2375				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2376
2377	/* Update shadow table and hw entry */
2378	priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF;
2379	mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC);
2380	mvpp2_prs_hw_write(priv, pe);
2381
2382	kfree(pe);
2383
2384	return 0;
2385}
2386
2387static int mvpp2_prs_update_mac_da(struct mvpp2_port *port, const u8 *da)
2388{
2389	int err;
2390
2391	/* Remove old parser entry */
2392	err = mvpp2_prs_mac_da_accept(port->priv, port->id, port->dev_addr,
2393				      false);
2394	if (err)
2395		return err;
2396
2397	/* Add new parser entry */
2398	err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true);
2399	if (err)
2400		return err;
2401
2402	/* Set addr in the device */
2403	memcpy(port->dev_addr, da, ETH_ALEN);
2404
2405	return 0;
2406}
2407
2408/* Set prs flow for the port */
2409static int mvpp2_prs_def_flow(struct mvpp2_port *port)
2410{
2411	struct mvpp2_prs_entry *pe;
2412	int tid;
2413
2414	pe = mvpp2_prs_flow_find(port->priv, port->id);
2415
2416	/* Such entry not exist */
2417	if (!pe) {
2418		/* Go through the all entires from last to first */
2419		tid = mvpp2_prs_tcam_first_free(port->priv,
2420						MVPP2_PE_LAST_FREE_TID,
2421					       MVPP2_PE_FIRST_FREE_TID);
2422		if (tid < 0)
2423			return tid;
2424
2425		pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2426		if (!pe)
2427			return -ENOMEM;
2428
2429		mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
2430		pe->index = tid;
2431
2432		/* Set flow ID*/
2433		mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
2434		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2435
2436		/* Update shadow table */
2437		mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS);
2438	}
2439
2440	mvpp2_prs_tcam_port_map_set(pe, (1 << port->id));
2441	mvpp2_prs_hw_write(port->priv, pe);
2442	kfree(pe);
2443
2444	return 0;
2445}
2446
2447/* Classifier configuration routines */
2448
2449/* Update classification flow table registers */
2450static void mvpp2_cls_flow_write(struct mvpp2 *priv,
2451				 struct mvpp2_cls_flow_entry *fe)
2452{
2453	mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
2454	mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG,  fe->data[0]);
2455	mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG,  fe->data[1]);
2456	mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG,  fe->data[2]);
2457}
2458
2459/* Update classification lookup table register */
2460static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
2461				   struct mvpp2_cls_lookup_entry *le)
2462{
2463	u32 val;
2464
2465	val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
2466	mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
2467	mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
2468}
2469
2470/* Classifier default initialization */
2471static void mvpp2_cls_init(struct mvpp2 *priv)
2472{
2473	struct mvpp2_cls_lookup_entry le;
2474	struct mvpp2_cls_flow_entry fe;
2475	int index;
2476
2477	/* Enable classifier */
2478	mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
2479
2480	/* Clear classifier flow table */
2481	memset(&fe.data, 0, MVPP2_CLS_FLOWS_TBL_DATA_WORDS);
2482	for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
2483		fe.index = index;
2484		mvpp2_cls_flow_write(priv, &fe);
2485	}
2486
2487	/* Clear classifier lookup table */
2488	le.data = 0;
2489	for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
2490		le.lkpid = index;
2491		le.way = 0;
2492		mvpp2_cls_lookup_write(priv, &le);
2493
2494		le.way = 1;
2495		mvpp2_cls_lookup_write(priv, &le);
2496	}
2497}
2498
2499static void mvpp2_cls_port_config(struct mvpp2_port *port)
2500{
2501	struct mvpp2_cls_lookup_entry le;
2502	u32 val;
2503
2504	/* Set way for the port */
2505	val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
2506	val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
2507	mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
2508
2509	/* Pick the entry to be accessed in lookup ID decoding table
2510	 * according to the way and lkpid.
2511	 */
2512	le.lkpid = port->id;
2513	le.way = 0;
2514	le.data = 0;
2515
2516	/* Set initial CPU queue for receiving packets */
2517	le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
2518	le.data |= port->first_rxq;
2519
2520	/* Disable classification engines */
2521	le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
2522
2523	/* Update lookup ID table entry */
2524	mvpp2_cls_lookup_write(port->priv, &le);
2525}
2526
2527/* Set CPU queue number for oversize packets */
2528static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
2529{
2530	u32 val;
2531
2532	mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
2533		    port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
2534
2535	mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
2536		    (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
2537
2538	val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
2539	val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
2540	mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
2541}
2542
2543/* Buffer Manager configuration routines */
2544
2545/* Create pool */
2546static int mvpp2_bm_pool_create(struct udevice *dev,
2547				struct mvpp2 *priv,
2548				struct mvpp2_bm_pool *bm_pool, int size)
2549{
2550	u32 val;
2551
2552	/* Number of buffer pointers must be a multiple of 16, as per
2553	 * hardware constraints
2554	 */
2555	if (!IS_ALIGNED(size, 16))
2556		return -EINVAL;
2557
2558	bm_pool->virt_addr = buffer_loc.bm_pool[bm_pool->id];
2559	bm_pool->dma_addr = (dma_addr_t)buffer_loc.bm_pool[bm_pool->id];
2560	if (!bm_pool->virt_addr)
2561		return -ENOMEM;
2562
2563	if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
2564			MVPP2_BM_POOL_PTR_ALIGN)) {
2565		dev_err(dev, "BM pool %d is not %d bytes aligned\n",
2566			bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
2567		return -ENOMEM;
2568	}
2569
2570	mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
2571		    lower_32_bits(bm_pool->dma_addr));
2572	if (priv->hw_version == MVPP22)
2573		mvpp2_write(priv, MVPP22_BM_POOL_BASE_HIGH_REG,
2574			    (upper_32_bits(bm_pool->dma_addr) &
2575			    MVPP22_BM_POOL_BASE_HIGH_MASK));
2576	mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
2577
2578	val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
2579	val |= MVPP2_BM_START_MASK;
2580	mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
2581
2582	bm_pool->type = MVPP2_BM_FREE;
2583	bm_pool->size = size;
2584	bm_pool->pkt_size = 0;
2585	bm_pool->buf_num = 0;
2586
2587	return 0;
2588}
2589
2590/* Set pool buffer size */
2591static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
2592				      struct mvpp2_bm_pool *bm_pool,
2593				      int buf_size)
2594{
2595	u32 val;
2596
2597	bm_pool->buf_size = buf_size;
2598
2599	val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
2600	mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
2601}
2602
2603/* Free all buffers from the pool */
2604static void mvpp2_bm_bufs_free(struct udevice *dev, struct mvpp2 *priv,
2605			       struct mvpp2_bm_pool *bm_pool)
2606{
2607	int i;
2608
2609	for (i = 0; i < bm_pool->buf_num; i++) {
2610		/* Allocate buffer back from the buffer manager */
2611		mvpp2_read(priv, MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
2612	}
2613
2614	bm_pool->buf_num = 0;
2615}
2616
2617/* Cleanup pool */
2618static int mvpp2_bm_pool_destroy(struct udevice *dev,
2619				 struct mvpp2 *priv,
2620				 struct mvpp2_bm_pool *bm_pool)
2621{
2622	u32 val;
2623
2624	mvpp2_bm_bufs_free(dev, priv, bm_pool);
2625	if (bm_pool->buf_num) {
2626		dev_err(dev, "cannot free all buffers in pool %d\n", bm_pool->id);
2627		return 0;
2628	}
2629
2630	val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
2631	val |= MVPP2_BM_STOP_MASK;
2632	mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
2633
2634	return 0;
2635}
2636
2637static int mvpp2_bm_pools_init(struct udevice *dev,
2638			       struct mvpp2 *priv)
2639{
2640	int i, err, size;
2641	struct mvpp2_bm_pool *bm_pool;
2642
2643	/* Create all pools with maximum size */
2644	size = MVPP2_BM_POOL_SIZE_MAX;
2645	for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
2646		bm_pool = &priv->bm_pools[i];
2647		bm_pool->id = i;
2648		err = mvpp2_bm_pool_create(dev, priv, bm_pool, size);
2649		if (err)
2650			goto err_unroll_pools;
2651		mvpp2_bm_pool_bufsize_set(priv, bm_pool, RX_BUFFER_SIZE);
2652	}
2653	return 0;
2654
2655err_unroll_pools:
2656	dev_err(dev, "failed to create BM pool %d, size %d\n", i, size);
2657	for (i = i - 1; i >= 0; i--)
2658		mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]);
2659	return err;
2660}
2661
2662static int mvpp2_bm_init(struct udevice *dev, struct mvpp2 *priv)
2663{
2664	int i, err;
2665
2666	for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
2667		/* Mask BM all interrupts */
2668		mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
2669		/* Clear BM cause register */
2670		mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
2671	}
2672
2673	/* Allocate and initialize BM pools */
2674	priv->bm_pools = devm_kcalloc(dev, MVPP2_BM_POOLS_NUM,
2675				     sizeof(struct mvpp2_bm_pool), GFP_KERNEL);
2676	if (!priv->bm_pools)
2677		return -ENOMEM;
2678
2679	err = mvpp2_bm_pools_init(dev, priv);
2680	if (err < 0)
2681		return err;
2682	return 0;
2683}
2684
2685/* Attach long pool to rxq */
2686static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
2687				    int lrxq, int long_pool)
2688{
2689	u32 val, mask;
2690	int prxq;
2691
2692	/* Get queue physical ID */
2693	prxq = port->rxqs[lrxq]->id;
2694
2695	if (port->priv->hw_version == MVPP21)
2696		mask = MVPP21_RXQ_POOL_LONG_MASK;
2697	else
2698		mask = MVPP22_RXQ_POOL_LONG_MASK;
2699
2700	val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
2701	val &= ~mask;
2702	val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask;
2703	mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
2704}
2705
2706/* Set pool number in a BM cookie */
2707static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool)
2708{
2709	u32 bm;
2710
2711	bm = cookie & ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS);
2712	bm |= ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS);
2713
2714	return bm;
2715}
2716
2717/* Get pool number from a BM cookie */
2718static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie)
2719{
2720	return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF;
2721}
2722
2723/* Release buffer to BM */
2724static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
2725				     dma_addr_t buf_dma_addr,
2726				     unsigned long buf_phys_addr)
2727{
2728	if (port->priv->hw_version == MVPP22) {
2729		u32 val = 0;
2730
2731		if (sizeof(dma_addr_t) == 8)
2732			val |= upper_32_bits(buf_dma_addr) &
2733				MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
2734
2735		if (sizeof(phys_addr_t) == 8)
2736			val |= (upper_32_bits(buf_phys_addr)
2737				<< MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
2738				MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
2739
2740		mvpp2_write(port->priv, MVPP22_BM_ADDR_HIGH_RLS_REG, val);
2741	}
2742
2743	/* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
2744	 * returned in the "cookie" field of the RX
2745	 * descriptor. Instead of storing the virtual address, we
2746	 * store the physical address
2747	 */
2748	mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
2749	mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
2750}
2751
2752/* Refill BM pool */
2753static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm,
2754			      dma_addr_t dma_addr,
2755			      phys_addr_t phys_addr)
2756{
2757	int pool = mvpp2_bm_cookie_pool_get(bm);
2758
2759	mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
2760}
2761
2762/* Allocate buffers for the pool */
2763static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
2764			     struct mvpp2_bm_pool *bm_pool, int buf_num)
2765{
2766	int i;
2767
2768	if (buf_num < 0 ||
2769	    (buf_num + bm_pool->buf_num > bm_pool->size)) {
2770		dev_err(port->phy_dev->dev,
2771			"cannot allocate %d buffers for pool %d\n", buf_num,
2772			bm_pool->id);
2773		return 0;
2774	}
2775
2776	for (i = 0; i < buf_num; i++) {
2777		mvpp2_bm_pool_put(port, bm_pool->id,
2778				  (dma_addr_t)buffer_loc.rx_buffer[i],
2779				  (unsigned long)buffer_loc.rx_buffer[i]);
2780
2781	}
2782
2783	/* Update BM driver with number of buffers added to pool */
2784	bm_pool->buf_num += i;
2785
2786	return i;
2787}
2788
2789/* Notify the driver that BM pool is being used as specific type and return the
2790 * pool pointer on success
2791 */
2792static struct mvpp2_bm_pool *
2793mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
2794		  int pkt_size)
2795{
2796	struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
2797	int num;
2798
2799	if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) {
2800		dev_err(port->phy_dev->dev, "mixing pool types is forbidden\n");
2801		return NULL;
2802	}
2803
2804	if (new_pool->type == MVPP2_BM_FREE)
2805		new_pool->type = type;
2806
2807	/* Allocate buffers in case BM pool is used as long pool, but packet
2808	 * size doesn't match MTU or BM pool hasn't being used yet
2809	 */
2810	if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) ||
2811	    (new_pool->pkt_size == 0)) {
2812		int pkts_num;
2813
2814		/* Set default buffer number or free all the buffers in case
2815		 * the pool is not empty
2816		 */
2817		pkts_num = new_pool->buf_num;
2818		if (pkts_num == 0)
2819			pkts_num = type == MVPP2_BM_SWF_LONG ?
2820				   MVPP2_BM_LONG_BUF_NUM :
2821				   MVPP2_BM_SHORT_BUF_NUM;
2822		else
2823			mvpp2_bm_bufs_free(NULL,
2824					   port->priv, new_pool);
2825
2826		new_pool->pkt_size = pkt_size;
2827
2828		/* Allocate buffers for this pool */
2829		num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
2830		if (num != pkts_num) {
2831			dev_err(port->phy_dev->dev,
2832				"pool %d: %d of %d allocated\n", new_pool->id,
2833				num, pkts_num);
2834			return NULL;
2835		}
2836	}
2837
2838	return new_pool;
2839}
2840
2841/* Initialize pools for swf */
2842static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
2843{
2844	int rxq;
2845
2846	if (!port->pool_long) {
2847		port->pool_long =
2848		       mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id),
2849					 MVPP2_BM_SWF_LONG,
2850					 port->pkt_size);
2851		if (!port->pool_long)
2852			return -ENOMEM;
2853
2854		port->pool_long->port_map |= (1 << port->id);
2855
2856		for (rxq = 0; rxq < rxq_number; rxq++)
2857			mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
2858	}
2859
2860	return 0;
2861}
2862
2863/* Port configuration routines */
2864
2865static void mvpp2_port_mii_set(struct mvpp2_port *port)
2866{
2867	u32 val;
2868
2869	val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
2870
2871	switch (port->phy_interface) {
2872	case PHY_INTERFACE_MODE_SGMII:
2873		val |= MVPP2_GMAC_INBAND_AN_MASK;
2874		break;
2875	case PHY_INTERFACE_MODE_1000BASEX:
2876	case PHY_INTERFACE_MODE_2500BASEX:
2877		val &= ~MVPP2_GMAC_INBAND_AN_MASK;
2878		break;
2879	case PHY_INTERFACE_MODE_RGMII:
2880	case PHY_INTERFACE_MODE_RGMII_ID:
2881		val |= MVPP2_GMAC_PORT_RGMII_MASK;
2882	default:
2883		val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
2884	}
2885
2886	writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
2887}
2888
2889static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port)
2890{
2891	u32 val;
2892
2893	val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
2894	val |= MVPP2_GMAC_FC_ADV_EN;
2895	writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
2896}
2897
2898static void mvpp2_port_enable(struct mvpp2_port *port)
2899{
2900	u32 val;
2901
2902	val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
2903	val |= MVPP2_GMAC_PORT_EN_MASK;
2904	val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
2905	writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
2906}
2907
2908static void mvpp2_port_disable(struct mvpp2_port *port)
2909{
2910	u32 val;
2911
2912	val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
2913	val &= ~(MVPP2_GMAC_PORT_EN_MASK);
2914	writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
2915}
2916
2917/* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
2918static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
2919{
2920	u32 val;
2921
2922	val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
2923		    ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
2924	writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
2925}
2926
2927/* Configure loopback port */
2928static void mvpp2_port_loopback_set(struct mvpp2_port *port)
2929{
2930	u32 val;
2931
2932	val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
2933
2934	if (port->speed == 1000)
2935		val |= MVPP2_GMAC_GMII_LB_EN_MASK;
2936	else
2937		val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
2938
2939	if (port->phy_interface == PHY_INTERFACE_MODE_SGMII ||
2940	    port->phy_interface == PHY_INTERFACE_MODE_1000BASEX ||
2941	    port->phy_interface == PHY_INTERFACE_MODE_2500BASEX)
2942		val |= MVPP2_GMAC_PCS_LB_EN_MASK;
2943	else
2944		val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
2945
2946	writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
2947}
2948
2949static void mvpp2_port_reset(struct mvpp2_port *port)
2950{
2951	u32 val;
2952
2953	val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
2954		    ~MVPP2_GMAC_PORT_RESET_MASK;
2955	writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
2956
2957	while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
2958	       MVPP2_GMAC_PORT_RESET_MASK)
2959		continue;
2960}
2961
2962/* Change maximum receive size of the port */
2963static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
2964{
2965	u32 val;
2966
2967	val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
2968	val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
2969	val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
2970		    MVPP2_GMAC_MAX_RX_SIZE_OFFS);
2971	writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
2972}
2973
2974/* PPv2.2 GoP/GMAC config */
2975
2976/* Set the MAC to reset or exit from reset */
2977static int gop_gmac_reset(struct mvpp2_port *port, int reset)
2978{
2979	u32 val;
2980
2981	/* read - modify - write */
2982	val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
2983	if (reset)
2984		val |= MVPP2_GMAC_PORT_RESET_MASK;
2985	else
2986		val &= ~MVPP2_GMAC_PORT_RESET_MASK;
2987	writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
2988
2989	return 0;
2990}
2991
2992/*
2993 * gop_gpcs_mode_cfg
2994 *
2995 * Configure port to working with Gig PCS or don't.
2996 */
2997static int gop_gpcs_mode_cfg(struct mvpp2_port *port, int en)
2998{
2999	u32 val;
3000
3001	val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
3002	if (en)
3003		val |= MVPP2_GMAC_PCS_ENABLE_MASK;
3004	else
3005		val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
3006	/* enable / disable PCS on this port */
3007	writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
3008
3009	return 0;
3010}
3011
3012static int gop_bypass_clk_cfg(struct mvpp2_port *port, int en)
3013{
3014	u32 val;
3015
3016	val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
3017	if (en)
3018		val |= MVPP2_GMAC_CLK_125_BYPS_EN_MASK;
3019	else
3020		val &= ~MVPP2_GMAC_CLK_125_BYPS_EN_MASK;
3021	/* enable / disable PCS on this port */
3022	writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
3023
3024	return 0;
3025}
3026
3027static void gop_gmac_sgmii_cfg(struct mvpp2_port *port)
3028{
3029	u32 val, thresh;
3030
3031	/*
3032	 * Configure minimal level of the Tx FIFO before the lower part
3033	 * starts to read a packet
3034	 */
3035	thresh = MVPP2_SGMII_TX_FIFO_MIN_TH;
3036	val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3037	val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
3038	val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh);
3039	writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3040
3041	/* Disable bypass of sync module */
3042	val = readl(port->base + MVPP2_GMAC_CTRL_4_REG);
3043	val |= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK;
3044	/* configure DP clock select according to mode */
3045	val &= ~MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK;
3046	/* configure QSGMII bypass according to mode */
3047	val |= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK;
3048	writel(val, port->base + MVPP2_GMAC_CTRL_4_REG);
3049
3050	val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3051	/* configure GIG MAC to SGMII mode */
3052	val &= ~MVPP2_GMAC_PORT_TYPE_MASK;
3053	writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3054
3055	/* configure AN */
3056	val = MVPP2_GMAC_EN_PCS_AN |
3057		MVPP2_GMAC_AN_BYPASS_EN |
3058		MVPP2_GMAC_AN_SPEED_EN  |
3059		MVPP2_GMAC_EN_FC_AN     |
3060		MVPP2_GMAC_AN_DUPLEX_EN |
3061		MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG;
3062	writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3063}
3064
3065static void gop_gmac_2500basex_cfg(struct mvpp2_port *port)
3066{
3067	u32 val, thresh;
3068
3069	/*
3070	 * Configure minimal level of the Tx FIFO before the lower part
3071	 * starts to read a packet
3072	 */
3073	thresh = MVPP2_SGMII2_5_TX_FIFO_MIN_TH;
3074	val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3075	val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
3076	val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh);
3077	writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3078
3079	/* Disable bypass of sync module */
3080	val = readl(port->base + MVPP2_GMAC_CTRL_4_REG);
3081	val |= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK;
3082	/* configure DP clock select according to mode */
3083	val |= MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK;
3084	/* configure QSGMII bypass according to mode */
3085	val |= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK;
3086	writel(val, port->base + MVPP2_GMAC_CTRL_4_REG);
3087
3088	val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3089	/*
3090	 * Configure GIG MAC to 2500Base-X mode connected to a fiber
3091	 * transceiver
3092	 */
3093	val |= MVPP2_GMAC_PORT_TYPE_MASK;
3094	writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3095
3096	/* In 2500BaseX mode, we can't negotiate speed
3097	 * and we do not want InBand autoneg
3098	 * bypass enabled (link interrupt storm risk
3099	 * otherwise).
3100	 */
3101	val = MVPP2_GMAC_AN_BYPASS_EN |
3102		MVPP2_GMAC_EN_PCS_AN |
3103		MVPP2_GMAC_CONFIG_GMII_SPEED  |
3104		MVPP2_GMAC_CONFIG_FULL_DUPLEX |
3105		MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG;
3106	writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3107}
3108
3109static void gop_gmac_1000basex_cfg(struct mvpp2_port *port)
3110{
3111	u32 val, thresh;
3112
3113	/*
3114	 * Configure minimal level of the Tx FIFO before the lower part
3115	 * starts to read a packet
3116	 */
3117	thresh = MVPP2_SGMII_TX_FIFO_MIN_TH;
3118	val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3119	val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
3120	val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh);
3121	writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3122
3123	/* Disable bypass of sync module */
3124	val = readl(port->base + MVPP2_GMAC_CTRL_4_REG);
3125	val |= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK;
3126	/* configure DP clock select according to mode */
3127	val &= ~MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK;
3128	/* configure QSGMII bypass according to mode */
3129	val |= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK;
3130	writel(val, port->base + MVPP2_GMAC_CTRL_4_REG);
3131
3132	val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3133	/* configure GIG MAC to 1000BASEX mode */
3134	val |= MVPP2_GMAC_PORT_TYPE_MASK;
3135	writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3136
3137	/* In 1000BaseX mode, we can't negotiate speed (it's
3138	 * only 1000), and we do not want InBand autoneg
3139	 * bypass enabled (link interrupt storm risk
3140	 * otherwise).
3141	 */
3142	val = MVPP2_GMAC_AN_BYPASS_EN |
3143		MVPP2_GMAC_EN_PCS_AN |
3144		MVPP2_GMAC_CONFIG_GMII_SPEED  |
3145		MVPP2_GMAC_CONFIG_FULL_DUPLEX |
3146		MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG;
3147	writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3148}
3149
3150static void gop_gmac_rgmii_cfg(struct mvpp2_port *port)
3151{
3152	u32 val, thresh;
3153
3154	/*
3155	 * Configure minimal level of the Tx FIFO before the lower part
3156	 * starts to read a packet
3157	 */
3158	thresh = MVPP2_RGMII_TX_FIFO_MIN_TH;
3159	val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3160	val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
3161	val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh);
3162	writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3163
3164	/* Disable bypass of sync module */
3165	val = readl(port->base + MVPP2_GMAC_CTRL_4_REG);
3166	val |= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK;
3167	/* configure DP clock select according to mode */
3168	val &= ~MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK;
3169	val |= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK;
3170	val |= MVPP2_GMAC_CTRL4_EXT_PIN_GMII_SEL_MASK;
3171	writel(val, port->base + MVPP2_GMAC_CTRL_4_REG);
3172
3173	val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3174	/* configure GIG MAC to SGMII mode */
3175	val &= ~MVPP2_GMAC_PORT_TYPE_MASK;
3176	writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3177
3178	/* configure AN 0xb8e8 */
3179	val = MVPP2_GMAC_AN_BYPASS_EN |
3180		MVPP2_GMAC_AN_SPEED_EN   |
3181		MVPP2_GMAC_EN_FC_AN      |
3182		MVPP2_GMAC_AN_DUPLEX_EN  |
3183		MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG;
3184	writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3185}
3186
3187/* Set the internal mux's to the required MAC in the GOP */
3188static int gop_gmac_mode_cfg(struct mvpp2_port *port)
3189{
3190	u32 val;
3191
3192	/* Set TX FIFO thresholds */
3193	switch (port->phy_interface) {
3194	case PHY_INTERFACE_MODE_SGMII:
3195		gop_gmac_sgmii_cfg(port);
3196		break;
3197	case PHY_INTERFACE_MODE_1000BASEX:
3198		gop_gmac_1000basex_cfg(port);
3199		break;
3200
3201	case PHY_INTERFACE_MODE_2500BASEX:
3202		gop_gmac_2500basex_cfg(port);
3203		break;
3204
3205	case PHY_INTERFACE_MODE_RGMII:
3206	case PHY_INTERFACE_MODE_RGMII_ID:
3207		gop_gmac_rgmii_cfg(port);
3208		break;
3209
3210	default:
3211		return -1;
3212	}
3213
3214	/* Jumbo frame support - 0x1400*2= 0x2800 bytes */
3215	val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3216	val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
3217	val |= 0x1400 << MVPP2_GMAC_MAX_RX_SIZE_OFFS;
3218	writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3219
3220	/* PeriodicXonEn disable */
3221	val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
3222	val &= ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
3223	writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
3224
3225	return 0;
3226}
3227
3228static void gop_xlg_2_gig_mac_cfg(struct mvpp2_port *port)
3229{
3230	u32 val;
3231
3232	/* relevant only for MAC0 (XLG0 and GMAC0) */
3233	if (port->gop_id > 0)
3234		return;
3235
3236	/* configure 1Gig MAC mode */
3237	val = readl(port->base + MVPP22_XLG_CTRL3_REG);
3238	val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
3239	val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
3240	writel(val, port->base + MVPP22_XLG_CTRL3_REG);
3241}
3242
3243static int gop_gpcs_reset(struct mvpp2_port *port, int reset)
3244{
3245	u32 val;
3246
3247	val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
3248	if (reset)
3249		val &= ~MVPP2_GMAC_SGMII_MODE_MASK;
3250	else
3251		val |= MVPP2_GMAC_SGMII_MODE_MASK;
3252	writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
3253
3254	return 0;
3255}
3256
3257static int gop_mpcs_mode(struct mvpp2_port *port)
3258{
3259	u32 val;
3260
3261	/* configure PCS40G COMMON CONTROL */
3262	val = readl(port->priv->mpcs_base + port->gop_id * MVPP22_PORT_OFFSET +
3263		    PCS40G_COMMON_CONTROL);
3264	val &= ~FORWARD_ERROR_CORRECTION_MASK;
3265	writel(val, port->priv->mpcs_base + port->gop_id * MVPP22_PORT_OFFSET +
3266	       PCS40G_COMMON_CONTROL);
3267
3268	/* configure PCS CLOCK RESET */
3269	val = readl(port->priv->mpcs_base + port->gop_id * MVPP22_PORT_OFFSET +
3270		    PCS_CLOCK_RESET);
3271	val &= ~CLK_DIVISION_RATIO_MASK;
3272	val |= 1 << CLK_DIVISION_RATIO_OFFS;
3273	writel(val, port->priv->mpcs_base + port->gop_id * MVPP22_PORT_OFFSET +
3274	       PCS_CLOCK_RESET);
3275
3276	val &= ~CLK_DIV_PHASE_SET_MASK;
3277	val |= MAC_CLK_RESET_MASK;
3278	val |= RX_SD_CLK_RESET_MASK;
3279	val |= TX_SD_CLK_RESET_MASK;
3280	writel(val, port->priv->mpcs_base + port->gop_id * MVPP22_PORT_OFFSET +
3281	       PCS_CLOCK_RESET);
3282
3283	return 0;
3284}
3285
3286/* Set the internal mux's to the required MAC in the GOP */
3287static int gop_xlg_mac_mode_cfg(struct mvpp2_port *port, int num_of_act_lanes)
3288{
3289	u32 val;
3290
3291	/* configure 10G MAC mode */
3292	val = readl(port->base + MVPP22_XLG_CTRL0_REG);
3293	val |= MVPP22_XLG_RX_FC_EN;
3294	writel(val, port->base + MVPP22_XLG_CTRL0_REG);
3295
3296	val = readl(port->base + MVPP22_XLG_CTRL3_REG);
3297	val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
3298	val |= MVPP22_XLG_CTRL3_MACMODESELECT_10GMAC;
3299	writel(val, port->base + MVPP22_XLG_CTRL3_REG);
3300
3301	/* read - modify - write */
3302	val = readl(port->base + MVPP22_XLG_CTRL4_REG);
3303	val &= ~MVPP22_XLG_MODE_DMA_1G;
3304	val |= MVPP22_XLG_FORWARD_PFC_EN;
3305	val |= MVPP22_XLG_FORWARD_802_3X_FC_EN;
3306	val &= ~MVPP22_XLG_EN_IDLE_CHECK_FOR_LINK;
3307	writel(val, port->base + MVPP22_XLG_CTRL4_REG);
3308
3309	/* Jumbo frame support: 0x1400 * 2 = 0x2800 bytes */
3310	val = readl(port->base + MVPP22_XLG_CTRL1_REG);
3311	val &= ~MVPP22_XLG_MAX_RX_SIZE_MASK;
3312	val |= 0x1400 << MVPP22_XLG_MAX_RX_SIZE_OFFS;
3313	writel(val, port->base + MVPP22_XLG_CTRL1_REG);
3314
3315	/* unmask link change interrupt */
3316	val = readl(port->base + MVPP22_XLG_INTERRUPT_MASK_REG);
3317	val |= MVPP22_XLG_INTERRUPT_LINK_CHANGE;
3318	val |= 1; /* unmask summary bit */
3319	writel(val, port->base + MVPP22_XLG_INTERRUPT_MASK_REG);
3320
3321	return 0;
3322}
3323
3324/* Set the MAC to reset or exit from reset */
3325static int gop_xlg_mac_reset(struct mvpp2_port *port, int reset)
3326{
3327	u32 val;
3328
3329	/* read - modify - write */
3330	val = readl(port->base + MVPP22_XLG_CTRL0_REG);
3331	if (reset)
3332		val &= ~MVPP22_XLG_MAC_RESETN;
3333	else
3334		val |= MVPP22_XLG_MAC_RESETN;
3335	writel(val, port->base + MVPP22_XLG_CTRL0_REG);
3336
3337	return 0;
3338}
3339
3340/*
3341 * gop_port_init
3342 *
3343 * Init physical port. Configures the port mode and all it's elements
3344 * accordingly.
3345 * Does not verify that the selected mode/port number is valid at the
3346 * core level.
3347 */
3348static int gop_port_init(struct mvpp2_port *port)
3349{
3350	int mac_num = port->gop_id;
3351	int num_of_act_lanes;
3352
3353	if (mac_num >= MVPP22_GOP_MAC_NUM) {
3354		log_err("illegal port number %d", mac_num);
3355		return -1;
3356	}
3357
3358	switch (port->phy_interface) {
3359	case PHY_INTERFACE_MODE_RGMII:
3360	case PHY_INTERFACE_MODE_RGMII_ID:
3361		gop_gmac_reset(port, 1);
3362
3363		/* configure PCS */
3364		gop_gpcs_mode_cfg(port, 0);
3365		gop_bypass_clk_cfg(port, 1);
3366
3367		/* configure MAC */
3368		gop_gmac_mode_cfg(port);
3369		/* pcs unreset */
3370		gop_gpcs_reset(port, 0);
3371
3372		/* mac unreset */
3373		gop_gmac_reset(port, 0);
3374		break;
3375
3376	case PHY_INTERFACE_MODE_SGMII:
3377	case PHY_INTERFACE_MODE_1000BASEX:
3378	case PHY_INTERFACE_MODE_2500BASEX:
3379		/* configure PCS */
3380		gop_gpcs_mode_cfg(port, 1);
3381
3382		/* configure MAC */
3383		gop_gmac_mode_cfg(port);
3384		/* select proper Mac mode */
3385		gop_xlg_2_gig_mac_cfg(port);
3386
3387		/* pcs unreset */
3388		gop_gpcs_reset(port, 0);
3389		/* mac unreset */
3390		gop_gmac_reset(port, 0);
3391		break;
3392
3393	case PHY_INTERFACE_MODE_10GBASER:
3394	case PHY_INTERFACE_MODE_5GBASER:
3395	case PHY_INTERFACE_MODE_XAUI:
3396		num_of_act_lanes = 2;
3397		mac_num = 0;
3398		/* configure PCS */
3399		gop_mpcs_mode(port);
3400		/* configure MAC */
3401		gop_xlg_mac_mode_cfg(port, num_of_act_lanes);
3402
3403		/* mac unreset */
3404		gop_xlg_mac_reset(port, 0);
3405		break;
3406
3407	default:
3408		log_err("Requested port mode (%d) not supported\n",
3409			port->phy_interface);
3410		return -1;
3411	}
3412
3413	return 0;
3414}
3415
3416static void gop_xlg_mac_port_enable(struct mvpp2_port *port, int enable)
3417{
3418	u32 val;
3419
3420	val = readl(port->base + MVPP22_XLG_CTRL0_REG);
3421	if (enable) {
3422		/* Enable port and MIB counters update */
3423		val |= MVPP22_XLG_PORT_EN;
3424		val &= ~MVPP22_XLG_MIBCNT_DIS;
3425	} else {
3426		/* Disable port */
3427		val &= ~MVPP22_XLG_PORT_EN;
3428	}
3429	writel(val, port->base + MVPP22_XLG_CTRL0_REG);
3430}
3431
3432static void gop_port_enable(struct mvpp2_port *port, int enable)
3433{
3434	switch (port->phy_interface) {
3435	case PHY_INTERFACE_MODE_RGMII:
3436	case PHY_INTERFACE_MODE_RGMII_ID:
3437	case PHY_INTERFACE_MODE_SGMII:
3438	case PHY_INTERFACE_MODE_1000BASEX:
3439	case PHY_INTERFACE_MODE_2500BASEX:
3440		if (enable)
3441			mvpp2_port_enable(port);
3442		else
3443			mvpp2_port_disable(port);
3444		break;
3445
3446	case PHY_INTERFACE_MODE_10GBASER:
3447	case PHY_INTERFACE_MODE_5GBASER:
3448	case PHY_INTERFACE_MODE_XAUI:
3449		gop_xlg_mac_port_enable(port, enable);
3450
3451		break;
3452	default:
3453		log_err("%s: Wrong port mode (%d)\n", __func__,
3454			port->phy_interface);
3455		return;
3456	}
3457}
3458
3459/* RFU1 functions */
3460static inline u32 gop_rfu1_read(struct mvpp2 *priv, u32 offset)
3461{
3462	return readl(priv->rfu1_base + offset);
3463}
3464
3465static inline void gop_rfu1_write(struct mvpp2 *priv, u32 offset, u32 data)
3466{
3467	writel(data, priv->rfu1_base + offset);
3468}
3469
3470static u32 mvpp2_netc_cfg_create(int gop_id, phy_interface_t phy_type)
3471{
3472	u32 val = 0;
3473
3474	if (gop_id == 2) {
3475		if (phy_type == PHY_INTERFACE_MODE_SGMII ||
3476		    phy_type == PHY_INTERFACE_MODE_1000BASEX ||
3477		    phy_type == PHY_INTERFACE_MODE_2500BASEX)
3478			val |= MV_NETC_GE_MAC2_SGMII;
3479		else if (phy_type == PHY_INTERFACE_MODE_RGMII ||
3480			 phy_type == PHY_INTERFACE_MODE_RGMII_ID)
3481			val |= MV_NETC_GE_MAC2_RGMII;
3482	}
3483
3484	if (gop_id == 3) {
3485		if (phy_type == PHY_INTERFACE_MODE_SGMII ||
3486		    phy_type == PHY_INTERFACE_MODE_1000BASEX ||
3487		    phy_type == PHY_INTERFACE_MODE_2500BASEX)
3488			val |= MV_NETC_GE_MAC3_SGMII;
3489		else if (phy_type == PHY_INTERFACE_MODE_RGMII ||
3490			 phy_type == PHY_INTERFACE_MODE_RGMII_ID)
3491			val |= MV_NETC_GE_MAC3_RGMII;
3492	}
3493
3494	return val;
3495}
3496
3497static void gop_netc_active_port(struct mvpp2 *priv, int gop_id, u32 val)
3498{
3499	u32 reg;
3500
3501	reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_1_REG);
3502	reg &= ~(NETC_PORTS_ACTIVE_MASK(gop_id));
3503
3504	val <<= NETC_PORTS_ACTIVE_OFFSET(gop_id);
3505	val &= NETC_PORTS_ACTIVE_MASK(gop_id);
3506
3507	reg |= val;
3508
3509	gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_1_REG, reg);
3510}
3511
3512static void gop_netc_mii_mode(struct mvpp2 *priv, int gop_id, u32 val)
3513{
3514	u32 reg;
3515
3516	reg = gop_rfu1_read(priv, NETCOMP_CONTROL_0_REG);
3517	reg &= ~NETC_GBE_PORT1_MII_MODE_MASK;
3518
3519	val <<= NETC_GBE_PORT1_MII_MODE_OFFS;
3520	val &= NETC_GBE_PORT1_MII_MODE_MASK;
3521
3522	reg |= val;
3523
3524	gop_rfu1_write(priv, NETCOMP_CONTROL_0_REG, reg);
3525}
3526
3527static void gop_netc_gop_reset(struct mvpp2 *priv, u32 val)
3528{
3529	u32 reg;
3530
3531	reg = gop_rfu1_read(priv, GOP_SOFT_RESET_1_REG);
3532	reg &= ~NETC_GOP_SOFT_RESET_MASK;
3533
3534	val <<= NETC_GOP_SOFT_RESET_OFFS;
3535	val &= NETC_GOP_SOFT_RESET_MASK;
3536
3537	reg |= val;
3538
3539	gop_rfu1_write(priv, GOP_SOFT_RESET_1_REG, reg);
3540}
3541
3542static void gop_netc_gop_clock_logic_set(struct mvpp2 *priv, u32 val)
3543{
3544	u32 reg;
3545
3546	reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_0_REG);
3547	reg &= ~NETC_CLK_DIV_PHASE_MASK;
3548
3549	val <<= NETC_CLK_DIV_PHASE_OFFS;
3550	val &= NETC_CLK_DIV_PHASE_MASK;
3551
3552	reg |= val;
3553
3554	gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_0_REG, reg);
3555}
3556
3557static void gop_netc_port_rf_reset(struct mvpp2 *priv, int gop_id, u32 val)
3558{
3559	u32 reg;
3560
3561	reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_1_REG);
3562	reg &= ~(NETC_PORT_GIG_RF_RESET_MASK(gop_id));
3563
3564	val <<= NETC_PORT_GIG_RF_RESET_OFFS(gop_id);
3565	val &= NETC_PORT_GIG_RF_RESET_MASK(gop_id);
3566
3567	reg |= val;
3568
3569	gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_1_REG, reg);
3570}
3571
3572static void gop_netc_gbe_sgmii_mode_select(struct mvpp2 *priv, int gop_id,
3573					   u32 val)
3574{
3575	u32 reg, mask, offset;
3576
3577	if (gop_id == 2) {
3578		mask = NETC_GBE_PORT0_SGMII_MODE_MASK;
3579		offset = NETC_GBE_PORT0_SGMII_MODE_OFFS;
3580	} else {
3581		mask = NETC_GBE_PORT1_SGMII_MODE_MASK;
3582		offset = NETC_GBE_PORT1_SGMII_MODE_OFFS;
3583	}
3584	reg = gop_rfu1_read(priv, NETCOMP_CONTROL_0_REG);
3585	reg &= ~mask;
3586
3587	val <<= offset;
3588	val &= mask;
3589
3590	reg |= val;
3591
3592	gop_rfu1_write(priv, NETCOMP_CONTROL_0_REG, reg);
3593}
3594
3595static void gop_netc_bus_width_select(struct mvpp2 *priv, u32 val)
3596{
3597	u32 reg;
3598
3599	reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_0_REG);
3600	reg &= ~NETC_BUS_WIDTH_SELECT_MASK;
3601
3602	val <<= NETC_BUS_WIDTH_SELECT_OFFS;
3603	val &= NETC_BUS_WIDTH_SELECT_MASK;
3604
3605	reg |= val;
3606
3607	gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_0_REG, reg);
3608}
3609
3610static void gop_netc_sample_stages_timing(struct mvpp2 *priv, u32 val)
3611{
3612	u32 reg;
3613
3614	reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_0_REG);
3615	reg &= ~NETC_GIG_RX_DATA_SAMPLE_MASK;
3616
3617	val <<= NETC_GIG_RX_DATA_SAMPLE_OFFS;
3618	val &= NETC_GIG_RX_DATA_SAMPLE_MASK;
3619
3620	reg |= val;
3621
3622	gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_0_REG, reg);
3623}
3624
3625static void gop_netc_mac_to_xgmii(struct mvpp2 *priv, int gop_id,
3626				  enum mv_netc_phase phase)
3627{
3628	switch (phase) {
3629	case MV_NETC_FIRST_PHASE:
3630		/* Set Bus Width to HB mode = 1 */
3631		gop_netc_bus_width_select(priv, 1);
3632		/* Select RGMII mode */
3633		gop_netc_gbe_sgmii_mode_select(priv, gop_id, MV_NETC_GBE_XMII);
3634		break;
3635
3636	case MV_NETC_SECOND_PHASE:
3637		/* De-assert the relevant port HB reset */
3638		gop_netc_port_rf_reset(priv, gop_id, 1);
3639		break;
3640	}
3641}
3642
3643static void gop_netc_mac_to_sgmii(struct mvpp2 *priv, int gop_id,
3644				  enum mv_netc_phase phase)
3645{
3646	switch (phase) {
3647	case MV_NETC_FIRST_PHASE:
3648		/* Set Bus Width to HB mode = 1 */
3649		gop_netc_bus_width_select(priv, 1);
3650		/* Select SGMII mode */
3651		if (gop_id >= 1) {
3652			gop_netc_gbe_sgmii_mode_select(priv, gop_id,
3653						       MV_NETC_GBE_SGMII);
3654		}
3655
3656		/* Configure the sample stages */
3657		gop_netc_sample_stages_timing(priv, 0);
3658		/* Configure the ComPhy Selector */
3659		/* gop_netc_com_phy_selector_config(netComplex); */
3660		break;
3661
3662	case MV_NETC_SECOND_PHASE:
3663		/* De-assert the relevant port HB reset */
3664		gop_netc_port_rf_reset(priv, gop_id, 1);
3665		break;
3666	}
3667}
3668
3669static int gop_netc_init(struct mvpp2 *priv, enum mv_netc_phase phase)
3670{
3671	u32 c = priv->netc_config;
3672
3673	if (c & MV_NETC_GE_MAC2_SGMII)
3674		gop_netc_mac_to_sgmii(priv, 2, phase);
3675	else if (c & MV_NETC_GE_MAC2_RGMII)
3676		gop_netc_mac_to_xgmii(priv, 2, phase);
3677
3678	if (c & MV_NETC_GE_MAC3_SGMII) {
3679		gop_netc_mac_to_sgmii(priv, 3, phase);
3680	} else {
3681		gop_netc_mac_to_xgmii(priv, 3, phase);
3682		if (c & MV_NETC_GE_MAC3_RGMII)
3683			gop_netc_mii_mode(priv, 3, MV_NETC_GBE_RGMII);
3684		else
3685			gop_netc_mii_mode(priv, 3, MV_NETC_GBE_MII);
3686	}
3687
3688	/* Activate gop ports 0, 2, 3 */
3689	gop_netc_active_port(priv, 0, 1);
3690	gop_netc_active_port(priv, 2, 1);
3691	gop_netc_active_port(priv, 3, 1);
3692
3693	if (phase == MV_NETC_SECOND_PHASE) {
3694		/* Enable the GOP internal clock logic */
3695		gop_netc_gop_clock_logic_set(priv, 1);
3696		/* De-assert GOP unit reset */
3697		gop_netc_gop_reset(priv, 1);
3698	}
3699
3700	return 0;
3701}
3702
3703/* Set defaults to the MVPP2 port */
3704static void mvpp2_defaults_set(struct mvpp2_port *port)
3705{
3706	int tx_port_num, val, queue, ptxq, lrxq;
3707
3708	if (port->priv->hw_version == MVPP21) {
3709		/* Configure port to loopback if needed */
3710		if (port->flags & MVPP2_F_LOOPBACK)
3711			mvpp2_port_loopback_set(port);
3712
3713		/* Update TX FIFO MIN Threshold */
3714		val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3715		val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
3716		/* Min. TX threshold must be less than minimal packet length */
3717		val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
3718		writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3719	}
3720
3721	/* Disable Legacy WRR, Disable EJP, Release from reset */
3722	tx_port_num = mvpp2_egress_port(port);
3723	mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
3724		    tx_port_num);
3725	mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
3726
3727	/* Close bandwidth for all queues */
3728	for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
3729		ptxq = mvpp2_txq_phys(port->id, queue);
3730		mvpp2_write(port->priv,
3731			    MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
3732	}
3733
3734	/* Set refill period to 1 usec, refill tokens
3735	 * and bucket size to maximum
3736	 */
3737	mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG, 0xc8);
3738	val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
3739	val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
3740	val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
3741	val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
3742	mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
3743	val = MVPP2_TXP_TOKEN_SIZE_MAX;
3744	mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
3745
3746	/* Set MaximumLowLatencyPacketSize value to 256 */
3747	mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
3748		    MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
3749		    MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
3750
3751	/* Enable Rx cache snoop */
3752	for (lrxq = 0; lrxq < rxq_number; lrxq++) {
3753		queue = port->rxqs[lrxq]->id;
3754		val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
3755		val |= MVPP2_SNOOP_PKT_SIZE_MASK |
3756			   MVPP2_SNOOP_BUF_HDR_MASK;
3757		mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
3758	}
3759}
3760
3761/* Enable/disable receiving packets */
3762static void mvpp2_ingress_enable(struct mvpp2_port *port)
3763{
3764	u32 val;
3765	int lrxq, queue;
3766
3767	for (lrxq = 0; lrxq < rxq_number; lrxq++) {
3768		queue = port->rxqs[lrxq]->id;
3769		val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
3770		val &= ~MVPP2_RXQ_DISABLE_MASK;
3771		mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
3772	}
3773}
3774
3775static void mvpp2_ingress_disable(struct mvpp2_port *port)
3776{
3777	u32 val;
3778	int lrxq, queue;
3779
3780	for (lrxq = 0; lrxq < rxq_number; lrxq++) {
3781		queue = port->rxqs[lrxq]->id;
3782		val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
3783		val |= MVPP2_RXQ_DISABLE_MASK;
3784		mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
3785	}
3786}
3787
3788/* Enable transmit via physical egress queue
3789 * - HW starts take descriptors from DRAM
3790 */
3791static void mvpp2_egress_enable(struct mvpp2_port *port)
3792{
3793	u32 qmap;
3794	int queue;
3795	int tx_port_num = mvpp2_egress_port(port);
3796
3797	/* Enable all initialized TXs. */
3798	qmap = 0;
3799	for (queue = 0; queue < txq_number; queue++) {
3800		struct mvpp2_tx_queue *txq = port->txqs[queue];
3801
3802		if (txq->descs != NULL)
3803			qmap |= (1 << queue);
3804	}
3805
3806	mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
3807	mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
3808}
3809
3810/* Disable transmit via physical egress queue
3811 * - HW doesn't take descriptors from DRAM
3812 */
3813static void mvpp2_egress_disable(struct mvpp2_port *port)
3814{
3815	u32 reg_data;
3816	int delay;
3817	int tx_port_num = mvpp2_egress_port(port);
3818
3819	/* Issue stop command for active channels only */
3820	mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
3821	reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
3822		    MVPP2_TXP_SCHED_ENQ_MASK;
3823	if (reg_data != 0)
3824		mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
3825			    (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
3826
3827	/* Wait for all Tx activity to terminate. */
3828	delay = 0;
3829	do {
3830		if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
3831			dev_warn(port->phy_dev->dev,
3832				 "Tx stop timed out, status=0x%08x\n",
3833				 reg_data);
3834			break;
3835		}
3836		mdelay(1);
3837		delay++;
3838
3839		/* Check port TX Command register that all
3840		 * Tx queues are stopped
3841		 */
3842		reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
3843	} while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
3844}
3845
3846/* Rx descriptors helper methods */
3847
3848/* Get number of Rx descriptors occupied by received packets */
3849static inline int
3850mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
3851{
3852	u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
3853
3854	return val & MVPP2_RXQ_OCCUPIED_MASK;
3855}
3856
3857/* Update Rx queue status with the number of occupied and available
3858 * Rx descriptor slots.
3859 */
3860static inline void
3861mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
3862			int used_count, int free_count)
3863{
3864	/* Decrement the number of used descriptors and increment count
3865	 * increment the number of free descriptors.
3866	 */
3867	u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
3868
3869	mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
3870}
3871
3872/* Get pointer to next RX descriptor to be processed by SW */
3873static inline struct mvpp2_rx_desc *
3874mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
3875{
3876	int rx_desc = rxq->next_desc_to_proc;
3877
3878	rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
3879	prefetch(rxq->descs + rxq->next_desc_to_proc);
3880	return rxq->descs + rx_desc;
3881}
3882
3883/* Set rx queue offset */
3884static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
3885				 int prxq, int offset)
3886{
3887	u32 val;
3888
3889	/* Convert offset from bytes to units of 32 bytes */
3890	offset = offset >> 5;
3891
3892	val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
3893	val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
3894
3895	/* Offset is in */
3896	val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
3897		    MVPP2_RXQ_PACKET_OFFSET_MASK);
3898
3899	mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
3900}
3901
3902/* Obtain BM cookie information from descriptor */
3903static u32 mvpp2_bm_cookie_build(struct mvpp2_port *port,
3904				 struct mvpp2_rx_desc *rx_desc)
3905{
3906	int cpu = smp_processor_id();
3907	int pool;
3908
3909	pool = (mvpp2_rxdesc_status_get(port, rx_desc) &
3910		MVPP2_RXD_BM_POOL_ID_MASK) >>
3911		MVPP2_RXD_BM_POOL_ID_OFFS;
3912
3913	return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) |
3914	       ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS);
3915}
3916
3917/* Tx descriptors helper methods */
3918
3919/* Get number of Tx descriptors waiting to be transmitted by HW */
3920static int mvpp2_txq_pend_desc_num_get(struct mvpp2_port *port,
3921				       struct mvpp2_tx_queue *txq)
3922{
3923	u32 val;
3924
3925	mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
3926	val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
3927
3928	return val & MVPP2_TXQ_PENDING_MASK;
3929}
3930
3931/* Get pointer to next Tx descriptor to be processed (send) by HW */
3932static struct mvpp2_tx_desc *
3933mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
3934{
3935	int tx_desc = txq->next_desc_to_proc;
3936
3937	txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
3938	return txq->descs + tx_desc;
3939}
3940
3941/* Update HW with number of aggregated Tx descriptors to be sent */
3942static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
3943{
3944	/* aggregated access - relevant TXQ number is written in TX desc */
3945	mvpp2_write(port->priv, MVPP2_AGGR_TXQ_UPDATE_REG, pending);
3946}
3947
3948/* Get number of sent descriptors and decrement counter.
3949 * The number of sent descriptors is returned.
3950 * Per-CPU access
3951 */
3952static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
3953					   struct mvpp2_tx_queue *txq)
3954{
3955	u32 val;
3956
3957	/* Reading status reg resets transmitted descriptor counter */
3958	val = mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(txq->id));
3959
3960	return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
3961		MVPP2_TRANSMITTED_COUNT_OFFSET;
3962}
3963
3964static void mvpp2_txq_sent_counter_clear(void *arg)
3965{
3966	struct mvpp2_port *port = arg;
3967	int queue;
3968
3969	for (queue = 0; queue < txq_number; queue++) {
3970		int id = port->txqs[queue]->id;
3971
3972		mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(id));
3973	}
3974}
3975
3976/* Set max sizes for Tx queues */
3977static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
3978{
3979	u32	val, size, mtu;
3980	int	txq, tx_port_num;
3981
3982	mtu = port->pkt_size * 8;
3983	if (mtu > MVPP2_TXP_MTU_MAX)
3984		mtu = MVPP2_TXP_MTU_MAX;
3985
3986	/* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
3987	mtu = 3 * mtu;
3988
3989	/* Indirect access to registers */
3990	tx_port_num = mvpp2_egress_port(port);
3991	mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
3992
3993	/* Set MTU */
3994	val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
3995	val &= ~MVPP2_TXP_MTU_MAX;
3996	val |= mtu;
3997	mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
3998
3999	/* TXP token size and all TXQs token size must be larger that MTU */
4000	val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
4001	size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
4002	if (size < mtu) {
4003		size = mtu;
4004		val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
4005		val |= size;
4006		mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
4007	}
4008
4009	for (txq = 0; txq < txq_number; txq++) {
4010		val = mvpp2_read(port->priv,
4011				 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
4012		size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
4013
4014		if (size < mtu) {
4015			size = mtu;
4016			val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
4017			val |= size;
4018			mvpp2_write(port->priv,
4019				    MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
4020				    val);
4021		}
4022	}
4023}
4024
4025/* Free Tx queue skbuffs */
4026static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
4027				struct mvpp2_tx_queue *txq,
4028				struct mvpp2_txq_pcpu *txq_pcpu, int num)
4029{
4030	int i;
4031
4032	for (i = 0; i < num; i++)
4033		mvpp2_txq_inc_get(txq_pcpu);
4034}
4035
4036static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
4037							u32 cause)
4038{
4039	int queue = fls(cause) - 1;
4040
4041	return port->rxqs[queue];
4042}
4043
4044static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
4045							u32 cause)
4046{
4047	int queue = fls(cause) - 1;
4048
4049	return port->txqs[queue];
4050}
4051
4052/* Rx/Tx queue initialization/cleanup methods */
4053
4054/* Allocate and initialize descriptors for aggr TXQ */
4055static int mvpp2_aggr_txq_init(struct udevice *dev,
4056			       struct mvpp2_tx_queue *aggr_txq,
4057			       int desc_num, int cpu,
4058			       struct mvpp2 *priv)
4059{
4060	u32 txq_dma;
4061
4062	/* Allocate memory for TX descriptors */
4063	aggr_txq->descs = buffer_loc.aggr_tx_descs;
4064	aggr_txq->descs_dma = (dma_addr_t)buffer_loc.aggr_tx_descs;
4065	if (!aggr_txq->descs)
4066		return -ENOMEM;
4067
4068	/* Make sure descriptor address is cache line size aligned  */
4069	BUG_ON(aggr_txq->descs !=
4070	       PTR_ALIGN(aggr_txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
4071
4072	aggr_txq->last_desc = aggr_txq->size - 1;
4073
4074	/* Aggr TXQ no reset WA */
4075	aggr_txq->next_desc_to_proc = mvpp2_read(priv,
4076						 MVPP2_AGGR_TXQ_INDEX_REG(cpu));
4077
4078	/* Set Tx descriptors queue starting address indirect
4079	 * access
4080	 */
4081	if (priv->hw_version == MVPP21)
4082		txq_dma = aggr_txq->descs_dma;
4083	else
4084		txq_dma = aggr_txq->descs_dma >>
4085			MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
4086
4087	mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma);
4088	mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num);
4089
4090	return 0;
4091}
4092
4093/* Create a specified Rx queue */
4094static int mvpp2_rxq_init(struct mvpp2_port *port,
4095			  struct mvpp2_rx_queue *rxq)
4096
4097{
4098	u32 rxq_dma;
4099
4100	rxq->size = port->rx_ring_size;
4101
4102	/* Allocate memory for RX descriptors */
4103	rxq->descs = buffer_loc.rx_descs;
4104	rxq->descs_dma = (dma_addr_t)buffer_loc.rx_descs;
4105	if (!rxq->descs)
4106		return -ENOMEM;
4107
4108	BUG_ON(rxq->descs !=
4109	       PTR_ALIGN(rxq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
4110
4111	rxq->last_desc = rxq->size - 1;
4112
4113	/* Zero occupied and non-occupied counters - direct access */
4114	mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
4115
4116	/* Set Rx descriptors queue starting address - indirect access */
4117	mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
4118	if (port->priv->hw_version == MVPP21)
4119		rxq_dma = rxq->descs_dma;
4120	else
4121		rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
4122	mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
4123	mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
4124	mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0);
4125
4126	/* Set Offset */
4127	mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
4128
4129	/* Add number of descriptors ready for receiving packets */
4130	mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
4131
4132	return 0;
4133}
4134
4135/* Push packets received by the RXQ to BM pool */
4136static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
4137				struct mvpp2_rx_queue *rxq)
4138{
4139	int rx_received, i;
4140
4141	rx_received = mvpp2_rxq_received(port, rxq->id);
4142	if (!rx_received)
4143		return;
4144
4145	for (i = 0; i < rx_received; i++) {
4146		struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
4147		u32 bm = mvpp2_bm_cookie_build(port, rx_desc);
4148
4149		mvpp2_pool_refill(port, bm,
4150				  mvpp2_rxdesc_dma_addr_get(port, rx_desc),
4151				  mvpp2_rxdesc_cookie_get(port, rx_desc));
4152	}
4153	mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
4154}
4155
4156/* Cleanup Rx queue */
4157static void mvpp2_rxq_deinit(struct mvpp2_port *port,
4158			     struct mvpp2_rx_queue *rxq)
4159{
4160	mvpp2_rxq_drop_pkts(port, rxq);
4161
4162	rxq->descs             = NULL;
4163	rxq->last_desc         = 0;
4164	rxq->next_desc_to_proc = 0;
4165	rxq->descs_dma         = 0;
4166
4167	/* Clear Rx descriptors queue starting address and size;
4168	 * free descriptor number
4169	 */
4170	mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
4171	mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
4172	mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, 0);
4173	mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, 0);
4174}
4175
4176/* Create and initialize a Tx queue */
4177static int mvpp2_txq_init(struct mvpp2_port *port,
4178			  struct mvpp2_tx_queue *txq)
4179{
4180	u32 val;
4181	int cpu, desc, desc_per_txq, tx_port_num;
4182	struct mvpp2_txq_pcpu *txq_pcpu;
4183
4184	txq->size = port->tx_ring_size;
4185
4186	/* Allocate memory for Tx descriptors */
4187	txq->descs = buffer_loc.tx_descs;
4188	txq->descs_dma = (dma_addr_t)buffer_loc.tx_descs;
4189	if (!txq->descs)
4190		return -ENOMEM;
4191
4192	/* Make sure descriptor address is cache line size aligned  */
4193	BUG_ON(txq->descs !=
4194	       PTR_ALIGN(txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
4195
4196	txq->last_desc = txq->size - 1;
4197
4198	/* Set Tx descriptors queue starting address - indirect access */
4199	mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4200	mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_dma);
4201	mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size &
4202					     MVPP2_TXQ_DESC_SIZE_MASK);
4203	mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0);
4204	mvpp2_write(port->priv, MVPP2_TXQ_RSVD_CLR_REG,
4205		    txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
4206	val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
4207	val &= ~MVPP2_TXQ_PENDING_MASK;
4208	mvpp2_write(port->priv, MVPP2_TXQ_PENDING_REG, val);
4209
4210	/* Calculate base address in prefetch buffer. We reserve 16 descriptors
4211	 * for each existing TXQ.
4212	 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
4213	 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
4214	 */
4215	desc_per_txq = 16;
4216	desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
4217	       (txq->log_id * desc_per_txq);
4218
4219	mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG,
4220		    MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
4221		    MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
4222
4223	/* WRR / EJP configuration - indirect access */
4224	tx_port_num = mvpp2_egress_port(port);
4225	mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4226
4227	val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
4228	val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
4229	val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
4230	val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
4231	mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
4232
4233	val = MVPP2_TXQ_TOKEN_SIZE_MAX;
4234	mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
4235		    val);
4236
4237	for_each_present_cpu(cpu) {
4238		txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4239		txq_pcpu->size = txq->size;
4240	}
4241
4242	return 0;
4243}
4244
4245/* Free allocated TXQ resources */
4246static void mvpp2_txq_deinit(struct mvpp2_port *port,
4247			     struct mvpp2_tx_queue *txq)
4248{
4249	txq->descs             = NULL;
4250	txq->last_desc         = 0;
4251	txq->next_desc_to_proc = 0;
4252	txq->descs_dma         = 0;
4253
4254	/* Set minimum bandwidth for disabled TXQs */
4255	mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
4256
4257	/* Set Tx descriptors queue starting address and size */
4258	mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4259	mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, 0);
4260	mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, 0);
4261}
4262
4263/* Cleanup Tx ports */
4264static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
4265{
4266	struct mvpp2_txq_pcpu *txq_pcpu;
4267	int delay, pending, cpu;
4268	u32 val;
4269
4270	mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4271	val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG);
4272	val |= MVPP2_TXQ_DRAIN_EN_MASK;
4273	mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
4274
4275	/* The napi queue has been stopped so wait for all packets
4276	 * to be transmitted.
4277	 */
4278	delay = 0;
4279	do {
4280		if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
4281			dev_warn(port->phy_dev->dev,
4282				 "port %d: cleaning queue %d timed out\n",
4283				 port->id, txq->log_id);
4284			break;
4285		}
4286		mdelay(1);
4287		delay++;
4288
4289		pending = mvpp2_txq_pend_desc_num_get(port, txq);
4290	} while (pending);
4291
4292	val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
4293	mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
4294
4295	for_each_present_cpu(cpu) {
4296		txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4297
4298		/* Release all packets */
4299		mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
4300
4301		/* Reset queue */
4302		txq_pcpu->count = 0;
4303		txq_pcpu->txq_put_index = 0;
4304		txq_pcpu->txq_get_index = 0;
4305	}
4306}
4307
4308/* Cleanup all Tx queues */
4309static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
4310{
4311	struct mvpp2_tx_queue *txq;
4312	int queue;
4313	u32 val;
4314
4315	val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
4316
4317	/* Reset Tx ports and delete Tx queues */
4318	val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
4319	mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
4320
4321	for (queue = 0; queue < txq_number; queue++) {
4322		txq = port->txqs[queue];
4323		mvpp2_txq_clean(port, txq);
4324		mvpp2_txq_deinit(port, txq);
4325	}
4326
4327	mvpp2_txq_sent_counter_clear(port);
4328
4329	val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
4330	mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
4331}
4332
4333/* Cleanup all Rx queues */
4334static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
4335{
4336	int queue;
4337
4338	for (queue = 0; queue < rxq_number; queue++)
4339		mvpp2_rxq_deinit(port, port->rxqs[queue]);
4340}
4341
4342/* Init all Rx queues for port */
4343static int mvpp2_setup_rxqs(struct mvpp2_port *port)
4344{
4345	int queue, err;
4346
4347	for (queue = 0; queue < rxq_number; queue++) {
4348		err = mvpp2_rxq_init(port, port->rxqs[queue]);
4349		if (err)
4350			goto err_cleanup;
4351	}
4352	return 0;
4353
4354err_cleanup:
4355	mvpp2_cleanup_rxqs(port);
4356	return err;
4357}
4358
4359/* Init all tx queues for port */
4360static int mvpp2_setup_txqs(struct mvpp2_port *port)
4361{
4362	struct mvpp2_tx_queue *txq;
4363	int queue, err;
4364
4365	for (queue = 0; queue < txq_number; queue++) {
4366		txq = port->txqs[queue];
4367		err = mvpp2_txq_init(port, txq);
4368		if (err)
4369			goto err_cleanup;
4370	}
4371
4372	mvpp2_txq_sent_counter_clear(port);
4373	return 0;
4374
4375err_cleanup:
4376	mvpp2_cleanup_txqs(port);
4377	return err;
4378}
4379
4380/* Adjust link */
4381static void mvpp2_link_event(struct mvpp2_port *port)
4382{
4383	struct phy_device *phydev = port->phy_dev;
4384	int status_change = 0;
4385	u32 val;
4386
4387	if (phydev->link) {
4388		if ((port->speed != phydev->speed) ||
4389		    (port->duplex != phydev->duplex)) {
4390			u32 val;
4391
4392			val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4393			val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
4394				 MVPP2_GMAC_CONFIG_GMII_SPEED |
4395				 MVPP2_GMAC_CONFIG_FULL_DUPLEX |
4396				 MVPP2_GMAC_AN_SPEED_EN |
4397				 MVPP2_GMAC_AN_DUPLEX_EN);
4398
4399			if (phydev->duplex)
4400				val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
4401
4402			if (phydev->speed == SPEED_1000 ||
4403			    phydev->speed == 2500)
4404				val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
4405			else if (phydev->speed == SPEED_100)
4406				val |= MVPP2_GMAC_CONFIG_MII_SPEED;
4407
4408			writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4409
4410			port->duplex = phydev->duplex;
4411			port->speed  = phydev->speed;
4412		}
4413	}
4414
4415	if (phydev->link != port->link) {
4416		if (!phydev->link) {
4417			port->duplex = -1;
4418			port->speed = 0;
4419		}
4420
4421		port->link = phydev->link;
4422		status_change = 1;
4423	}
4424
4425	if (status_change) {
4426		if (phydev->link) {
4427			val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4428			val |= (MVPP2_GMAC_FORCE_LINK_PASS |
4429				MVPP2_GMAC_FORCE_LINK_DOWN);
4430			writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4431			mvpp2_egress_enable(port);
4432			mvpp2_ingress_enable(port);
4433		} else {
4434			mvpp2_ingress_disable(port);
4435			mvpp2_egress_disable(port);
4436		}
4437	}
4438}
4439
4440/* Main RX/TX processing routines */
4441
4442/* Display more error info */
4443static void mvpp2_rx_error(struct mvpp2_port *port,
4444			   struct mvpp2_rx_desc *rx_desc)
4445{
4446	u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
4447	size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
4448
4449	switch (status & MVPP2_RXD_ERR_CODE_MASK) {
4450	case MVPP2_RXD_ERR_CRC:
4451		dev_err(port->phy_dev->dev,
4452			"bad rx status %08x (crc error), size=%zu\n", status,
4453			sz);
4454		break;
4455	case MVPP2_RXD_ERR_OVERRUN:
4456		dev_err(port->phy_dev->dev,
4457			"bad rx status %08x (overrun error), size=%zu\n",
4458			status, sz);
4459		break;
4460	case MVPP2_RXD_ERR_RESOURCE:
4461		dev_err(port->phy_dev->dev,
4462			"bad rx status %08x (resource error), size=%zu\n",
4463			status, sz);
4464		break;
4465	}
4466}
4467
4468/* Reuse skb if possible, or allocate a new skb and add it to BM pool */
4469static int mvpp2_rx_refill(struct mvpp2_port *port,
4470			   struct mvpp2_bm_pool *bm_pool,
4471			   u32 bm, dma_addr_t dma_addr)
4472{
4473	mvpp2_pool_refill(port, bm, dma_addr, (unsigned long)dma_addr);
4474	return 0;
4475}
4476
4477/* Set hw internals when starting port */
4478static void mvpp2_start_dev(struct mvpp2_port *port)
4479{
4480	switch (port->phy_interface) {
4481	case PHY_INTERFACE_MODE_RGMII:
4482	case PHY_INTERFACE_MODE_RGMII_ID:
4483	case PHY_INTERFACE_MODE_SGMII:
4484	case PHY_INTERFACE_MODE_1000BASEX:
4485	case PHY_INTERFACE_MODE_2500BASEX:
4486		mvpp2_gmac_max_rx_size_set(port);
4487	default:
4488		break;
4489	}
4490
4491	mvpp2_txp_max_tx_size_set(port);
4492
4493	if (port->priv->hw_version == MVPP21)
4494		mvpp2_port_enable(port);
4495	else
4496		gop_port_enable(port, 1);
4497}
4498
4499/* Set hw internals when stopping port */
4500static void mvpp2_stop_dev(struct mvpp2_port *port)
4501{
4502	/* Stop new packets from arriving to RXQs */
4503	mvpp2_ingress_disable(port);
4504
4505	mvpp2_egress_disable(port);
4506
4507	if (port->priv->hw_version == MVPP21)
4508		mvpp2_port_disable(port);
4509	else
4510		gop_port_enable(port, 0);
4511}
4512
4513static void mvpp2_phy_connect(struct udevice *dev, struct mvpp2_port *port)
4514{
4515	struct phy_device *phy_dev;
4516
4517	if (!port->init || port->link == 0) {
4518		phy_dev = dm_mdio_phy_connect(port->mdio_dev, port->phyaddr,
4519					      dev, port->phy_interface);
4520
4521		/*
4522		 * If the phy doesn't match with any existing u-boot drivers the
4523		 * phy framework will connect it to generic one which
4524		 * uid == 0xffffffff. In this case act as if the phy wouldn't be
4525		 * declared in dts. Otherwise in case of 3310 (for which the
4526		 * driver doesn't exist) the link will not be correctly
4527		 * detected. Removing phy entry from dts in case of 3310 is not
4528		 * an option because it is required for the phy_fw_down
4529		 * procedure.
4530		 */
4531		if (phy_dev &&
4532		    phy_dev->drv->uid == 0xffffffff) {/* Generic phy */
4533			dev_warn(port->phy_dev->dev,
4534				 "Marking phy as invalid, link will not be checked\n");
4535			/* set phy_addr to invalid value */
4536			port->phyaddr = PHY_MAX_ADDR;
4537			mvpp2_egress_enable(port);
4538			mvpp2_ingress_enable(port);
4539
4540			return;
4541		}
4542
4543		port->phy_dev = phy_dev;
4544		if (!phy_dev) {
4545			dev_err(port->phy_dev->dev, "cannot connect to phy\n");
4546			return;
4547		}
4548		phy_dev->supported &= PHY_GBIT_FEATURES;
4549		phy_dev->advertising = phy_dev->supported;
4550
4551		port->phy_dev = phy_dev;
4552		port->link    = 0;
4553		port->duplex  = 0;
4554		port->speed   = 0;
4555
4556		phy_config(phy_dev);
4557		phy_startup(phy_dev);
4558		if (!phy_dev->link)
4559			printf("%s: No link\n", phy_dev->dev->name);
4560		else
4561			port->init = 1;
4562	} else {
4563		mvpp2_egress_enable(port);
4564		mvpp2_ingress_enable(port);
4565	}
4566}
4567
4568static int mvpp2_open(struct udevice *dev, struct mvpp2_port *port)
4569{
4570	unsigned char mac_bcast[ETH_ALEN] = {
4571			0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
4572	int err;
4573
4574	err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true);
4575	if (err) {
4576		dev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
4577		return err;
4578	}
4579	err = mvpp2_prs_mac_da_accept(port->priv, port->id,
4580				      port->dev_addr, true);
4581	if (err) {
4582		dev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n");
4583		return err;
4584	}
4585	err = mvpp2_prs_def_flow(port);
4586	if (err) {
4587		dev_err(dev, "mvpp2_prs_def_flow failed\n");
4588		return err;
4589	}
4590
4591	/* Allocate the Rx/Tx queues */
4592	err = mvpp2_setup_rxqs(port);
4593	if (err) {
4594		dev_err(port->phy_dev->dev, "cannot allocate Rx queues\n");
4595		return err;
4596	}
4597
4598	err = mvpp2_setup_txqs(port);
4599	if (err) {
4600		dev_err(port->phy_dev->dev, "cannot allocate Tx queues\n");
4601		return err;
4602	}
4603
4604	if (port->phyaddr < PHY_MAX_ADDR) {
4605		mvpp2_phy_connect(dev, port);
4606		mvpp2_link_event(port);
4607	} else {
4608		mvpp2_egress_enable(port);
4609		mvpp2_ingress_enable(port);
4610	}
4611
4612	mvpp2_start_dev(port);
4613
4614	return 0;
4615}
4616
4617/* No Device ops here in U-Boot */
4618
4619/* Driver initialization */
4620
4621static void mvpp2_port_power_up(struct mvpp2_port *port)
4622{
4623	struct mvpp2 *priv = port->priv;
4624
4625	/* On PPv2.2 the GoP / interface configuration has already been done */
4626	if (priv->hw_version == MVPP21)
4627		mvpp2_port_mii_set(port);
4628	mvpp2_port_periodic_xon_disable(port);
4629	if (priv->hw_version == MVPP21)
4630		mvpp2_port_fc_adv_enable(port);
4631	mvpp2_port_reset(port);
4632}
4633
4634/* Initialize port HW */
4635static int mvpp2_port_init(struct udevice *dev, struct mvpp2_port *port)
4636{
4637	struct mvpp2 *priv = port->priv;
4638	struct mvpp2_txq_pcpu *txq_pcpu;
4639	int queue, cpu, err;
4640
4641	if (port->first_rxq + rxq_number >
4642	    MVPP2_MAX_PORTS * priv->max_port_rxqs)
4643		return -EINVAL;
4644
4645	/* Disable port */
4646	mvpp2_egress_disable(port);
4647	if (priv->hw_version == MVPP21)
4648		mvpp2_port_disable(port);
4649	else
4650		gop_port_enable(port, 0);
4651
4652	port->txqs = devm_kcalloc(dev, txq_number, sizeof(*port->txqs),
4653				  GFP_KERNEL);
4654	if (!port->txqs)
4655		return -ENOMEM;
4656
4657	/* Associate physical Tx queues to this port and initialize.
4658	 * The mapping is predefined.
4659	 */
4660	for (queue = 0; queue < txq_number; queue++) {
4661		int queue_phy_id = mvpp2_txq_phys(port->id, queue);
4662		struct mvpp2_tx_queue *txq;
4663
4664		txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
4665		if (!txq)
4666			return -ENOMEM;
4667
4668		txq->pcpu = devm_kzalloc(dev, sizeof(struct mvpp2_txq_pcpu),
4669					 GFP_KERNEL);
4670		if (!txq->pcpu)
4671			return -ENOMEM;
4672
4673		txq->id = queue_phy_id;
4674		txq->log_id = queue;
4675		txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
4676		for_each_present_cpu(cpu) {
4677			txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4678			txq_pcpu->cpu = cpu;
4679		}
4680
4681		port->txqs[queue] = txq;
4682	}
4683
4684	port->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*port->rxqs),
4685				  GFP_KERNEL);
4686	if (!port->rxqs)
4687		return -ENOMEM;
4688
4689	/* Allocate and initialize Rx queue for this port */
4690	for (queue = 0; queue < rxq_number; queue++) {
4691		struct mvpp2_rx_queue *rxq;
4692
4693		/* Map physical Rx queue to port's logical Rx queue */
4694		rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
4695		if (!rxq)
4696			return -ENOMEM;
4697		/* Map this Rx queue to a physical queue */
4698		rxq->id = port->first_rxq + queue;
4699		rxq->port = port->id;
4700		rxq->logic_rxq = queue;
4701
4702		port->rxqs[queue] = rxq;
4703	}
4704
4705
4706	/* Create Rx descriptor rings */
4707	for (queue = 0; queue < rxq_number; queue++) {
4708		struct mvpp2_rx_queue *rxq = port->rxqs[queue];
4709
4710		rxq->size = port->rx_ring_size;
4711		rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
4712		rxq->time_coal = MVPP2_RX_COAL_USEC;
4713	}
4714
4715	mvpp2_ingress_disable(port);
4716
4717	/* Port default configuration */
4718	mvpp2_defaults_set(port);
4719
4720	/* Port's classifier configuration */
4721	mvpp2_cls_oversize_rxq_set(port);
4722	mvpp2_cls_port_config(port);
4723
4724	/* Provide an initial Rx packet size */
4725	port->pkt_size = MVPP2_RX_PKT_SIZE(PKTSIZE_ALIGN);
4726
4727	/* Initialize pools for swf */
4728	err = mvpp2_swf_bm_pool_init(port);
4729	if (err)
4730		return err;
4731
4732	return 0;
4733}
4734
4735static int phy_info_parse(struct udevice *dev, struct mvpp2_port *port)
4736{
4737	int port_node = dev_of_offset(dev);
4738	int phy_node;
4739	u32 id;
4740	u32 phyaddr = 0;
4741	int fixed_link = 0;
4742	int ret;
4743
4744	phy_node = fdtdec_lookup_phandle(gd->fdt_blob, port_node, "phy");
4745	fixed_link = fdt_subnode_offset(gd->fdt_blob, port_node, "fixed-link");
4746
4747	if (phy_node > 0) {
4748		int parent;
4749
4750		if (fixed_link != -FDT_ERR_NOTFOUND) {
4751			/* phy_addr is set to invalid value for fixed links */
4752			phyaddr = PHY_MAX_ADDR;
4753		} else {
4754			phyaddr = fdtdec_get_int(gd->fdt_blob, phy_node,
4755						 "reg", 0);
4756			if (phyaddr < 0) {
4757				dev_err(dev, "could not find phy address\n");
4758				return -1;
4759			}
4760		}
4761		parent = fdt_parent_offset(gd->fdt_blob, phy_node);
4762		ret = uclass_get_device_by_of_offset(UCLASS_MDIO, parent,
4763						     &port->mdio_dev);
4764		if (ret)
4765			return ret;
4766	} else {
4767		/* phy_addr is set to invalid value */
4768		phyaddr = PHY_MAX_ADDR;
4769	}
4770
4771	port->phy_interface = dev_read_phy_mode(dev);
4772	if (port->phy_interface == PHY_INTERFACE_MODE_NA) {
4773		dev_err(dev, "incorrect phy mode\n");
4774		return -EINVAL;
4775	}
4776
4777	id = fdtdec_get_int(gd->fdt_blob, port_node, "port-id", -1);
4778	if (id == -1) {
4779		dev_err(dev, "missing port-id value\n");
4780		return -EINVAL;
4781	}
4782
4783#if CONFIG_IS_ENABLED(DM_GPIO)
4784	gpio_request_by_name(dev, "phy-reset-gpios", 0,
4785			     &port->phy_reset_gpio, GPIOD_IS_OUT);
4786	gpio_request_by_name(dev, "marvell,sfp-tx-disable-gpio", 0,
4787			     &port->phy_tx_disable_gpio, GPIOD_IS_OUT);
4788#endif
4789
4790	port->id = id;
4791	if (port->priv->hw_version == MVPP21)
4792		port->first_rxq = port->id * rxq_number;
4793	else
4794		port->first_rxq = port->id * port->priv->max_port_rxqs;
4795	port->phyaddr = phyaddr;
4796
4797	return 0;
4798}
4799
4800#if CONFIG_IS_ENABLED(DM_GPIO)
4801/* Port GPIO initialization */
4802static void mvpp2_gpio_init(struct mvpp2_port *port)
4803{
4804	if (dm_gpio_is_valid(&port->phy_reset_gpio)) {
4805		dm_gpio_set_value(&port->phy_reset_gpio, 1);
4806		mdelay(10);
4807		dm_gpio_set_value(&port->phy_reset_gpio, 0);
4808	}
4809
4810	if (dm_gpio_is_valid(&port->phy_tx_disable_gpio))
4811		dm_gpio_set_value(&port->phy_tx_disable_gpio, 0);
4812}
4813#endif
4814
4815/* Ports initialization */
4816static int mvpp2_port_probe(struct udevice *dev,
4817			    struct mvpp2_port *port,
4818			    int port_node,
4819			    struct mvpp2 *priv)
4820{
4821	int err;
4822
4823	port->tx_ring_size = MVPP2_MAX_TXD;
4824	port->rx_ring_size = MVPP2_MAX_RXD;
4825
4826	err = mvpp2_port_init(dev, port);
4827	if (err < 0) {
4828		dev_err(dev, "failed to init port %d\n", port->id);
4829		return err;
4830	}
4831	mvpp2_port_power_up(port);
4832
4833#if CONFIG_IS_ENABLED(DM_GPIO)
4834	mvpp2_gpio_init(port);
4835#endif
4836
4837	priv->port_list[port->id] = port;
4838	priv->num_ports++;
4839	return 0;
4840}
4841
4842/* Initialize decoding windows */
4843static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
4844				    struct mvpp2 *priv)
4845{
4846	u32 win_enable;
4847	int i;
4848
4849	for (i = 0; i < 6; i++) {
4850		mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
4851		mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
4852
4853		if (i < 4)
4854			mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
4855	}
4856
4857	win_enable = 0;
4858
4859	for (i = 0; i < dram->num_cs; i++) {
4860		const struct mbus_dram_window *cs = dram->cs + i;
4861
4862		mvpp2_write(priv, MVPP2_WIN_BASE(i),
4863			    (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
4864			    dram->mbus_dram_target_id);
4865
4866		mvpp2_write(priv, MVPP2_WIN_SIZE(i),
4867			    (cs->size - 1) & 0xffff0000);
4868
4869		win_enable |= (1 << i);
4870	}
4871
4872	mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
4873}
4874
4875/* Initialize Rx FIFO's */
4876static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
4877{
4878	int port;
4879
4880	for (port = 0; port < MVPP2_MAX_PORTS; port++) {
4881		if (priv->hw_version == MVPP22) {
4882			if (port == 0) {
4883				mvpp2_write(priv,
4884					    MVPP2_RX_DATA_FIFO_SIZE_REG(port),
4885					    MVPP22_RX_FIFO_10GB_PORT_DATA_SIZE);
4886				mvpp2_write(priv,
4887					    MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
4888					    MVPP22_RX_FIFO_10GB_PORT_ATTR_SIZE);
4889			} else if (port == 1) {
4890				mvpp2_write(priv,
4891					    MVPP2_RX_DATA_FIFO_SIZE_REG(port),
4892					    MVPP22_RX_FIFO_2_5GB_PORT_DATA_SIZE);
4893				mvpp2_write(priv,
4894					    MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
4895					    MVPP22_RX_FIFO_2_5GB_PORT_ATTR_SIZE);
4896			} else {
4897				mvpp2_write(priv,
4898					    MVPP2_RX_DATA_FIFO_SIZE_REG(port),
4899					    MVPP22_RX_FIFO_1GB_PORT_DATA_SIZE);
4900				mvpp2_write(priv,
4901					    MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
4902					    MVPP22_RX_FIFO_1GB_PORT_ATTR_SIZE);
4903			}
4904		} else {
4905			mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
4906				    MVPP21_RX_FIFO_PORT_DATA_SIZE);
4907			mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
4908				    MVPP21_RX_FIFO_PORT_ATTR_SIZE);
4909		}
4910	}
4911
4912	mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
4913		    MVPP2_RX_FIFO_PORT_MIN_PKT);
4914	mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
4915}
4916
4917/* Initialize Tx FIFO's */
4918static void mvpp2_tx_fifo_init(struct mvpp2 *priv)
4919{
4920	int port, val;
4921
4922	for (port = 0; port < MVPP2_MAX_PORTS; port++) {
4923		/* Port 0 supports 10KB TX FIFO */
4924		if (port == 0) {
4925			val = MVPP2_TX_FIFO_DATA_SIZE_10KB &
4926				MVPP22_TX_FIFO_SIZE_MASK;
4927		} else {
4928			val = MVPP2_TX_FIFO_DATA_SIZE_3KB &
4929				MVPP22_TX_FIFO_SIZE_MASK;
4930		}
4931		mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), val);
4932	}
4933}
4934
4935static void mvpp2_axi_init(struct mvpp2 *priv)
4936{
4937	u32 val, rdval, wrval;
4938
4939	mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0);
4940
4941	/* AXI Bridge Configuration */
4942
4943	rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE
4944		<< MVPP22_AXI_ATTR_CACHE_OFFS;
4945	rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
4946		<< MVPP22_AXI_ATTR_DOMAIN_OFFS;
4947
4948	wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE
4949		<< MVPP22_AXI_ATTR_CACHE_OFFS;
4950	wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
4951		<< MVPP22_AXI_ATTR_DOMAIN_OFFS;
4952
4953	/* BM */
4954	mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval);
4955	mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval);
4956
4957	/* Descriptors */
4958	mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval);
4959	mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval);
4960	mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval);
4961	mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval);
4962
4963	/* Buffer Data */
4964	mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval);
4965	mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval);
4966
4967	val = MVPP22_AXI_CODE_CACHE_NON_CACHE
4968		<< MVPP22_AXI_CODE_CACHE_OFFS;
4969	val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM
4970		<< MVPP22_AXI_CODE_DOMAIN_OFFS;
4971	mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val);
4972	mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val);
4973
4974	val = MVPP22_AXI_CODE_CACHE_RD_CACHE
4975		<< MVPP22_AXI_CODE_CACHE_OFFS;
4976	val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
4977		<< MVPP22_AXI_CODE_DOMAIN_OFFS;
4978
4979	mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val);
4980
4981	val = MVPP22_AXI_CODE_CACHE_WR_CACHE
4982		<< MVPP22_AXI_CODE_CACHE_OFFS;
4983	val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
4984		<< MVPP22_AXI_CODE_DOMAIN_OFFS;
4985
4986	mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val);
4987}
4988
4989/* Initialize network controller common part HW */
4990static int mvpp2_init(struct udevice *dev, struct mvpp2 *priv)
4991{
4992	const struct mbus_dram_target_info *dram_target_info;
4993	int err, i;
4994	u32 val;
4995
4996	/* Checks for hardware constraints (U-Boot uses only one rxq) */
4997	if ((rxq_number > priv->max_port_rxqs) ||
4998	    (txq_number > MVPP2_MAX_TXQ)) {
4999		dev_err(dev, "invalid queue size parameter\n");
5000		return -EINVAL;
5001	}
5002
5003	if (priv->hw_version == MVPP22)
5004		mvpp2_axi_init(priv);
5005	else {
5006		/* MBUS windows configuration */
5007		dram_target_info = mvebu_mbus_dram_info();
5008		if (dram_target_info)
5009			mvpp2_conf_mbus_windows(dram_target_info, priv);
5010	}
5011
5012	if (priv->hw_version == MVPP21) {
5013		/* Disable HW PHY polling */
5014		val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
5015		val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
5016		writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
5017	} else {
5018		/* Enable HW PHY polling */
5019		val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
5020		val |= MVPP22_SMI_POLLING_EN;
5021		writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
5022	}
5023
5024	/* Allocate and initialize aggregated TXQs */
5025	priv->aggr_txqs = devm_kcalloc(dev, num_present_cpus(),
5026				       sizeof(struct mvpp2_tx_queue),
5027				       GFP_KERNEL);
5028	if (!priv->aggr_txqs)
5029		return -ENOMEM;
5030
5031	for_each_present_cpu(i) {
5032		priv->aggr_txqs[i].id = i;
5033		priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
5034		err = mvpp2_aggr_txq_init(dev, &priv->aggr_txqs[i],
5035					  MVPP2_AGGR_TXQ_SIZE, i, priv);
5036		if (err < 0)
5037			return err;
5038	}
5039
5040	/* Rx Fifo Init */
5041	mvpp2_rx_fifo_init(priv);
5042
5043	/* Tx Fifo Init */
5044	if (priv->hw_version == MVPP22)
5045		mvpp2_tx_fifo_init(priv);
5046
5047	if (priv->hw_version == MVPP21)
5048		writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
5049		       priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
5050
5051	/* Allow cache snoop when transmiting packets */
5052	mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
5053
5054	/* Buffer Manager initialization */
5055	err = mvpp2_bm_init(dev, priv);
5056	if (err < 0)
5057		return err;
5058
5059	/* Parser default initialization */
5060	err = mvpp2_prs_default_init(dev, priv);
5061	if (err < 0)
5062		return err;
5063
5064	/* Classifier default initialization */
5065	mvpp2_cls_init(priv);
5066
5067	return 0;
5068}
5069
5070static int mvpp2_recv(struct udevice *dev, int flags, uchar **packetp)
5071{
5072	struct mvpp2_port *port = dev_get_priv(dev);
5073	struct mvpp2_rx_desc *rx_desc;
5074	struct mvpp2_bm_pool *bm_pool;
5075	dma_addr_t dma_addr;
5076	u32 bm, rx_status;
5077	int pool, rx_bytes, err;
5078	int rx_received;
5079	struct mvpp2_rx_queue *rxq;
5080	u8 *data;
5081
5082	if (port->phyaddr < PHY_MAX_ADDR)
5083		if (!port->phy_dev->link)
5084			return 0;
5085
5086	/* Process RX packets */
5087	rxq = port->rxqs[0];
5088
5089	/* Get number of received packets and clamp the to-do */
5090	rx_received = mvpp2_rxq_received(port, rxq->id);
5091
5092	/* Return if no packets are received */
5093	if (!rx_received)
5094		return 0;
5095
5096	rx_desc = mvpp2_rxq_next_desc_get(rxq);
5097	rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
5098	rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
5099	rx_bytes -= MVPP2_MH_SIZE;
5100	dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
5101
5102	bm = mvpp2_bm_cookie_build(port, rx_desc);
5103	pool = mvpp2_bm_cookie_pool_get(bm);
5104	bm_pool = &port->priv->bm_pools[pool];
5105
5106	/* In case of an error, release the requested buffer pointer
5107	 * to the Buffer Manager. This request process is controlled
5108	 * by the hardware, and the information about the buffer is
5109	 * comprised by the RX descriptor.
5110	 */
5111	if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
5112		mvpp2_rx_error(port, rx_desc);
5113		/* Return the buffer to the pool */
5114		mvpp2_pool_refill(port, bm, dma_addr, dma_addr);
5115		return 0;
5116	}
5117
5118	err = mvpp2_rx_refill(port, bm_pool, bm, dma_addr);
5119	if (err) {
5120		dev_err(port->phy_dev->dev, "failed to refill BM pools\n");
5121		return 0;
5122	}
5123
5124	/* Update Rx queue management counters */
5125	mb();
5126	mvpp2_rxq_status_update(port, rxq->id, 1, 1);
5127
5128	/* give packet to stack - skip on first n bytes */
5129	data = (u8 *)dma_addr + 2 + 32;
5130
5131	if (rx_bytes <= 0)
5132		return 0;
5133
5134	/*
5135	 * No cache invalidation needed here, since the rx_buffer's are
5136	 * located in a uncached memory region
5137	 */
5138	*packetp = data;
5139
5140	return rx_bytes;
5141}
5142
5143static int mvpp2_send(struct udevice *dev, void *packet, int length)
5144{
5145	struct mvpp2_port *port = dev_get_priv(dev);
5146	struct mvpp2_tx_queue *txq, *aggr_txq;
5147	struct mvpp2_tx_desc *tx_desc;
5148	int tx_done;
5149	int timeout;
5150
5151	if (port->phyaddr < PHY_MAX_ADDR)
5152		if (!port->phy_dev->link)
5153			return 0;
5154
5155	txq = port->txqs[0];
5156	aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
5157
5158	/* Get a descriptor for the first part of the packet */
5159	tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
5160	mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
5161	mvpp2_txdesc_size_set(port, tx_desc, length);
5162	mvpp2_txdesc_offset_set(port, tx_desc,
5163				(dma_addr_t)packet & MVPP2_TX_DESC_ALIGN);
5164	mvpp2_txdesc_dma_addr_set(port, tx_desc,
5165				  (dma_addr_t)packet & ~MVPP2_TX_DESC_ALIGN);
5166	/* First and Last descriptor */
5167	mvpp2_txdesc_cmd_set(port, tx_desc,
5168			     MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE
5169			     | MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC);
5170
5171	/* Flush tx data */
5172	flush_dcache_range((unsigned long)packet,
5173			   (unsigned long)packet + ALIGN(length, PKTALIGN));
5174
5175	/* Enable transmit */
5176	mb();
5177	mvpp2_aggr_txq_pend_desc_add(port, 1);
5178
5179	mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
5180
5181	timeout = 0;
5182	do {
5183		if (timeout++ > 10000) {
5184			printf("timeout: packet not sent from aggregated to phys TXQ\n");
5185			return 0;
5186		}
5187		tx_done = mvpp2_txq_pend_desc_num_get(port, txq);
5188	} while (tx_done);
5189
5190	timeout = 0;
5191	do {
5192		if (timeout++ > 10000) {
5193			printf("timeout: packet not sent\n");
5194			return 0;
5195		}
5196		tx_done = mvpp2_txq_sent_desc_proc(port, txq);
5197	} while (!tx_done);
5198
5199	return 0;
5200}
5201
5202static int mvpp2_start(struct udevice *dev)
5203{
5204	struct eth_pdata *pdata = dev_get_plat(dev);
5205	struct mvpp2_port *port = dev_get_priv(dev);
5206
5207	/* Load current MAC address */
5208	memcpy(port->dev_addr, pdata->enetaddr, ETH_ALEN);
5209
5210	/* Reconfigure parser accept the original MAC address */
5211	mvpp2_prs_update_mac_da(port, port->dev_addr);
5212
5213	switch (port->phy_interface) {
5214	case PHY_INTERFACE_MODE_RGMII:
5215	case PHY_INTERFACE_MODE_RGMII_ID:
5216	case PHY_INTERFACE_MODE_SGMII:
5217	case PHY_INTERFACE_MODE_1000BASEX:
5218	case PHY_INTERFACE_MODE_2500BASEX:
5219		mvpp2_port_power_up(port);
5220	default:
5221		break;
5222	}
5223
5224	mvpp2_open(dev, port);
5225
5226	return 0;
5227}
5228
5229static void mvpp2_stop(struct udevice *dev)
5230{
5231	struct mvpp2_port *port = dev_get_priv(dev);
5232
5233	mvpp2_stop_dev(port);
5234	mvpp2_cleanup_rxqs(port);
5235	mvpp2_cleanup_txqs(port);
5236}
5237
5238static int mvpp2_write_hwaddr(struct udevice *dev)
5239{
5240	struct mvpp2_port *port = dev_get_priv(dev);
5241
5242	return mvpp2_prs_update_mac_da(port, port->dev_addr);
5243}
5244
5245static int mvpp2_base_probe(struct udevice *dev)
5246{
5247	struct mvpp2 *priv = dev_get_priv(dev);
5248	void *bd_space;
5249	u32 size = 0;
5250	int i;
5251
5252	/* Save hw-version */
5253	priv->hw_version = dev_get_driver_data(dev);
5254
5255	/*
5256	 * U-Boot special buffer handling:
5257	 *
5258	 * Allocate buffer area for descs and rx_buffers. This is only
5259	 * done once for all interfaces. As only one interface can
5260	 * be active. Make this area DMA-safe by disabling the D-cache
5261	 */
5262
5263	if (!buffer_loc_init) {
5264		/* Align buffer area for descs and rx_buffers to 1MiB */
5265		bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE);
5266		mmu_set_region_dcache_behaviour((unsigned long)bd_space,
5267						BD_SPACE, DCACHE_OFF);
5268
5269		buffer_loc.aggr_tx_descs = (struct mvpp2_tx_desc *)bd_space;
5270		size += MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE;
5271
5272		buffer_loc.tx_descs =
5273			(struct mvpp2_tx_desc *)((unsigned long)bd_space + size);
5274		size += MVPP2_MAX_TXD * MVPP2_DESC_ALIGNED_SIZE;
5275
5276		buffer_loc.rx_descs =
5277			(struct mvpp2_rx_desc *)((unsigned long)bd_space + size);
5278		size += MVPP2_MAX_RXD * MVPP2_DESC_ALIGNED_SIZE;
5279
5280		for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
5281			buffer_loc.bm_pool[i] =
5282				(unsigned long *)((unsigned long)bd_space + size);
5283			if (priv->hw_version == MVPP21)
5284				size += MVPP2_BM_POOL_SIZE_MAX * 2 * sizeof(u32);
5285			else
5286				size += MVPP2_BM_POOL_SIZE_MAX * 2 * sizeof(u64);
5287		}
5288
5289		for (i = 0; i < MVPP2_BM_LONG_BUF_NUM; i++) {
5290			buffer_loc.rx_buffer[i] =
5291				(unsigned long *)((unsigned long)bd_space + size);
5292			size += RX_BUFFER_SIZE;
5293		}
5294
5295		/* Clear the complete area so that all descriptors are cleared */
5296		memset(bd_space, 0, size);
5297
5298		buffer_loc_init = 1;
5299	}
5300
5301	/* Save base addresses for later use */
5302	priv->base = devfdt_get_addr_index_ptr(dev, 0);
5303	if (!priv->base)
5304		return -EINVAL;
5305
5306	if (priv->hw_version == MVPP21) {
5307		priv->lms_base = devfdt_get_addr_index_ptr(dev, 1);
5308		if (!priv->lms_base)
5309			return -EINVAL;
5310	} else {
5311		priv->iface_base = devfdt_get_addr_index_ptr(dev, 1);
5312		if (!priv->iface_base)
5313			return -EINVAL;
5314
5315		/* Store common base addresses for all ports */
5316		priv->mpcs_base = priv->iface_base + MVPP22_MPCS;
5317		priv->xpcs_base = priv->iface_base + MVPP22_XPCS;
5318		priv->rfu1_base = priv->iface_base + MVPP22_RFU1;
5319	}
5320
5321	if (priv->hw_version == MVPP21)
5322		priv->max_port_rxqs = 8;
5323	else
5324		priv->max_port_rxqs = 32;
5325
5326	return 0;
5327}
5328
5329static int mvpp2_probe(struct udevice *dev)
5330{
5331	struct mvpp2_port *port = dev_get_priv(dev);
5332	struct mvpp2 *priv = dev_get_priv(dev->parent);
5333	int err;
5334
5335	/* Only call the probe function for the parent once */
5336	if (!priv->probe_done)
5337		err = mvpp2_base_probe(dev->parent);
5338
5339	port->priv = priv;
5340
5341	err = phy_info_parse(dev, port);
5342	if (err)
5343		return err;
5344
5345	/*
5346	 * We need the port specific io base addresses at this stage, since
5347	 * gop_port_init() accesses these registers
5348	 */
5349	if (priv->hw_version == MVPP21) {
5350		int priv_common_regs_num = 2;
5351
5352		port->base = devfdt_get_addr_index_ptr(
5353			dev->parent, priv_common_regs_num + port->id);
5354		if (!port->base)
5355			return -EINVAL;
5356	} else {
5357		port->gop_id = fdtdec_get_int(gd->fdt_blob, dev_of_offset(dev),
5358					      "gop-port-id", -1);
5359		if (port->id == -1) {
5360			dev_err(dev, "missing gop-port-id value\n");
5361			return -EINVAL;
5362		}
5363
5364		port->base = priv->iface_base + MVPP22_PORT_BASE +
5365			port->gop_id * MVPP22_PORT_OFFSET;
5366
5367		/* GoP Init */
5368		gop_port_init(port);
5369	}
5370
5371	if (!priv->probe_done) {
5372		/* Initialize network controller */
5373		err = mvpp2_init(dev, priv);
5374		if (err < 0) {
5375			dev_err(dev, "failed to initialize controller\n");
5376			return err;
5377		}
5378		priv->num_ports = 0;
5379		priv->probe_done = 1;
5380	}
5381
5382	err = mvpp2_port_probe(dev, port, dev_of_offset(dev), priv);
5383	if (err)
5384		return err;
5385
5386	if (priv->hw_version == MVPP22) {
5387		priv->netc_config |= mvpp2_netc_cfg_create(port->gop_id,
5388							   port->phy_interface);
5389
5390		/* Netcomplex configurations for all ports */
5391		gop_netc_init(priv, MV_NETC_FIRST_PHASE);
5392		gop_netc_init(priv, MV_NETC_SECOND_PHASE);
5393	}
5394
5395	return 0;
5396}
5397
5398/*
5399 * Empty BM pool and stop its activity before the OS is started
5400 */
5401static int mvpp2_remove(struct udevice *dev)
5402{
5403	struct mvpp2_port *port = dev_get_priv(dev);
5404	struct mvpp2 *priv = port->priv;
5405	int i;
5406
5407	priv->num_ports--;
5408
5409	if (priv->num_ports)
5410		return 0;
5411
5412	for (i = 0; i < MVPP2_BM_POOLS_NUM; i++)
5413		mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]);
5414
5415	return 0;
5416}
5417
5418static const struct eth_ops mvpp2_ops = {
5419	.start		= mvpp2_start,
5420	.send		= mvpp2_send,
5421	.recv		= mvpp2_recv,
5422	.stop		= mvpp2_stop,
5423	.write_hwaddr	= mvpp2_write_hwaddr
5424};
5425
5426static struct driver mvpp2_driver = {
5427	.name	= "mvpp2",
5428	.id	= UCLASS_ETH,
5429	.probe	= mvpp2_probe,
5430	.remove = mvpp2_remove,
5431	.ops	= &mvpp2_ops,
5432	.priv_auto	= sizeof(struct mvpp2_port),
5433	.plat_auto	= sizeof(struct eth_pdata),
5434	.flags	= DM_FLAG_ACTIVE_DMA,
5435};
5436
5437/*
5438 * Use a MISC device to bind the n instances (child nodes) of the
5439 * network base controller in UCLASS_ETH.
5440 */
5441static int mvpp2_base_bind(struct udevice *parent)
5442{
5443	const void *blob = gd->fdt_blob;
5444	int node = dev_of_offset(parent);
5445	struct uclass_driver *drv;
5446	struct udevice *dev;
5447	struct eth_pdata *plat;
5448	char *name;
5449	int subnode;
5450	u32 id;
5451	int base_id_add;
5452
5453	/* Lookup eth driver */
5454	drv = lists_uclass_lookup(UCLASS_ETH);
5455	if (!drv) {
5456		puts("Cannot find eth driver\n");
5457		return -ENOENT;
5458	}
5459
5460	base_id_add = base_id;
5461
5462	fdt_for_each_subnode(subnode, blob, node) {
5463		/* Increment base_id for all subnodes, also the disabled ones */
5464		base_id++;
5465
5466		/* Skip disabled ports */
5467		if (!fdtdec_get_is_enabled(blob, subnode))
5468			continue;
5469
5470		plat = calloc(1, sizeof(*plat));
5471		if (!plat)
5472			return -ENOMEM;
5473
5474		id = fdtdec_get_int(blob, subnode, "port-id", -1);
5475		id += base_id_add;
5476
5477		name = calloc(1, 16);
5478		if (!name) {
5479			free(plat);
5480			return -ENOMEM;
5481		}
5482		sprintf(name, "mvpp2-%d", id);
5483
5484		/* Create child device UCLASS_ETH and bind it */
5485		device_bind(parent, &mvpp2_driver, name, plat,
5486			    offset_to_ofnode(subnode), &dev);
5487	}
5488
5489	return 0;
5490}
5491
5492static const struct udevice_id mvpp2_ids[] = {
5493	{
5494		.compatible = "marvell,armada-375-pp2",
5495		.data = MVPP21,
5496	},
5497	{
5498		.compatible = "marvell,armada-7k-pp22",
5499		.data = MVPP22,
5500	},
5501	{ }
5502};
5503
5504U_BOOT_DRIVER(mvpp2_base) = {
5505	.name	= "mvpp2_base",
5506	.id	= UCLASS_MISC,
5507	.of_match = mvpp2_ids,
5508	.bind	= mvpp2_base_bind,
5509	.priv_auto	= sizeof(struct mvpp2),
5510};
5511