1// SPDX-License-Identifier: GPL-2.0-only
2/* 10G controller driver for Samsung SoCs
3 *
4 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
5 *		http://www.samsung.com
6 *
7 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
8 */
9
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12#include <linux/io.h>
13#include <linux/errno.h>
14#include <linux/export.h>
15#include <linux/jiffies.h>
16
17#include "sxgbe_mtl.h"
18#include "sxgbe_reg.h"
19
20static void sxgbe_mtl_init(void __iomem *ioaddr, unsigned int etsalg,
21			   unsigned int raa)
22{
23	u32 reg_val;
24
25	reg_val = readl(ioaddr + SXGBE_MTL_OP_MODE_REG);
26	reg_val &= ETS_RST;
27
28	/* ETS Algorith */
29	switch (etsalg & SXGBE_MTL_OPMODE_ESTMASK) {
30	case ETS_WRR:
31		reg_val &= ETS_WRR;
32		break;
33	case ETS_WFQ:
34		reg_val |= ETS_WFQ;
35		break;
36	case ETS_DWRR:
37		reg_val |= ETS_DWRR;
38		break;
39	}
40	writel(reg_val, ioaddr + SXGBE_MTL_OP_MODE_REG);
41
42	switch (raa & SXGBE_MTL_OPMODE_RAAMASK) {
43	case RAA_SP:
44		reg_val &= RAA_SP;
45		break;
46	case RAA_WSP:
47		reg_val |= RAA_WSP;
48		break;
49	}
50	writel(reg_val, ioaddr + SXGBE_MTL_OP_MODE_REG);
51}
52
53/* For Dynamic DMA channel mapping for Rx queue */
54static void sxgbe_mtl_dma_dm_rxqueue(void __iomem *ioaddr)
55{
56	writel(RX_QUEUE_DYNAMIC, ioaddr + SXGBE_MTL_RXQ_DMAMAP0_REG);
57	writel(RX_QUEUE_DYNAMIC, ioaddr + SXGBE_MTL_RXQ_DMAMAP1_REG);
58	writel(RX_QUEUE_DYNAMIC, ioaddr + SXGBE_MTL_RXQ_DMAMAP2_REG);
59}
60
61static void sxgbe_mtl_set_txfifosize(void __iomem *ioaddr, int queue_num,
62				     int queue_fifo)
63{
64	u32 fifo_bits, reg_val;
65
66	/* 0 means 256 bytes */
67	fifo_bits = (queue_fifo / SXGBE_MTL_TX_FIFO_DIV) - 1;
68	reg_val = readl(ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
69	reg_val |= (fifo_bits << SXGBE_MTL_FIFO_LSHIFT);
70	writel(reg_val, ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
71}
72
73static void sxgbe_mtl_set_rxfifosize(void __iomem *ioaddr, int queue_num,
74				     int queue_fifo)
75{
76	u32 fifo_bits, reg_val;
77
78	/* 0 means 256 bytes */
79	fifo_bits = (queue_fifo / SXGBE_MTL_RX_FIFO_DIV)-1;
80	reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
81	reg_val |= (fifo_bits << SXGBE_MTL_FIFO_LSHIFT);
82	writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
83}
84
85static void sxgbe_mtl_enable_txqueue(void __iomem *ioaddr, int queue_num)
86{
87	u32 reg_val;
88
89	reg_val = readl(ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
90	reg_val |= SXGBE_MTL_ENABLE_QUEUE;
91	writel(reg_val, ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
92}
93
94static void sxgbe_mtl_disable_txqueue(void __iomem *ioaddr, int queue_num)
95{
96	u32 reg_val;
97
98	reg_val = readl(ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
99	reg_val &= ~SXGBE_MTL_ENABLE_QUEUE;
100	writel(reg_val, ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
101}
102
103static void sxgbe_mtl_fc_active(void __iomem *ioaddr, int queue_num,
104				int threshold)
105{
106	u32 reg_val;
107
108	reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
109	reg_val &= ~(SXGBE_MTL_FCMASK << RX_FC_ACTIVE);
110	reg_val |= (threshold << RX_FC_ACTIVE);
111
112	writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
113}
114
115static void sxgbe_mtl_fc_enable(void __iomem *ioaddr, int queue_num)
116{
117	u32 reg_val;
118
119	reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
120	reg_val |= SXGBE_MTL_ENABLE_FC;
121	writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
122}
123
124static void sxgbe_mtl_fc_deactive(void __iomem *ioaddr, int queue_num,
125				  int threshold)
126{
127	u32 reg_val;
128
129	reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
130	reg_val &= ~(SXGBE_MTL_FCMASK << RX_FC_DEACTIVE);
131	reg_val |= (threshold << RX_FC_DEACTIVE);
132
133	writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
134}
135
136static void sxgbe_mtl_fep_enable(void __iomem *ioaddr, int queue_num)
137{
138	u32 reg_val;
139
140	reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
141	reg_val |= SXGBE_MTL_RXQ_OP_FEP;
142
143	writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
144}
145
146static void sxgbe_mtl_fep_disable(void __iomem *ioaddr, int queue_num)
147{
148	u32 reg_val;
149
150	reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
151	reg_val &= ~(SXGBE_MTL_RXQ_OP_FEP);
152
153	writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
154}
155
156static void sxgbe_mtl_fup_enable(void __iomem *ioaddr, int queue_num)
157{
158	u32 reg_val;
159
160	reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
161	reg_val |= SXGBE_MTL_RXQ_OP_FUP;
162
163	writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
164}
165
166static void sxgbe_mtl_fup_disable(void __iomem *ioaddr, int queue_num)
167{
168	u32 reg_val;
169
170	reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
171	reg_val &= ~(SXGBE_MTL_RXQ_OP_FUP);
172
173	writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
174}
175
176
177static void sxgbe_set_tx_mtl_mode(void __iomem *ioaddr, int queue_num,
178				  int tx_mode)
179{
180	u32 reg_val;
181
182	reg_val = readl(ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
183	/* TX specific MTL mode settings */
184	if (tx_mode == SXGBE_MTL_SFMODE) {
185		reg_val |= SXGBE_MTL_SFMODE;
186	} else {
187		/* set the TTC values */
188		if (tx_mode <= 64)
189			reg_val |= MTL_CONTROL_TTC_64;
190		else if (tx_mode <= 96)
191			reg_val |= MTL_CONTROL_TTC_96;
192		else if (tx_mode <= 128)
193			reg_val |= MTL_CONTROL_TTC_128;
194		else if (tx_mode <= 192)
195			reg_val |= MTL_CONTROL_TTC_192;
196		else if (tx_mode <= 256)
197			reg_val |= MTL_CONTROL_TTC_256;
198		else if (tx_mode <= 384)
199			reg_val |= MTL_CONTROL_TTC_384;
200		else
201			reg_val |= MTL_CONTROL_TTC_512;
202	}
203
204	/* write into TXQ operation register */
205	writel(reg_val, ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
206}
207
208static void sxgbe_set_rx_mtl_mode(void __iomem *ioaddr, int queue_num,
209				  int rx_mode)
210{
211	u32 reg_val;
212
213	reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
214	/* RX specific MTL mode settings */
215	if (rx_mode == SXGBE_RX_MTL_SFMODE) {
216		reg_val |= SXGBE_RX_MTL_SFMODE;
217	} else {
218		if (rx_mode <= 64)
219			reg_val |= MTL_CONTROL_RTC_64;
220		else if (rx_mode <= 96)
221			reg_val |= MTL_CONTROL_RTC_96;
222		else if (rx_mode <= 128)
223			reg_val |= MTL_CONTROL_RTC_128;
224	}
225
226	/* write into RXQ operation register */
227	writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
228}
229
230static const struct sxgbe_mtl_ops mtl_ops = {
231	.mtl_set_txfifosize		= sxgbe_mtl_set_txfifosize,
232	.mtl_set_rxfifosize		= sxgbe_mtl_set_rxfifosize,
233	.mtl_enable_txqueue		= sxgbe_mtl_enable_txqueue,
234	.mtl_disable_txqueue		= sxgbe_mtl_disable_txqueue,
235	.mtl_dynamic_dma_rxqueue	= sxgbe_mtl_dma_dm_rxqueue,
236	.set_tx_mtl_mode		= sxgbe_set_tx_mtl_mode,
237	.set_rx_mtl_mode		= sxgbe_set_rx_mtl_mode,
238	.mtl_init			= sxgbe_mtl_init,
239	.mtl_fc_active			= sxgbe_mtl_fc_active,
240	.mtl_fc_deactive		= sxgbe_mtl_fc_deactive,
241	.mtl_fc_enable			= sxgbe_mtl_fc_enable,
242	.mtl_fep_enable			= sxgbe_mtl_fep_enable,
243	.mtl_fep_disable		= sxgbe_mtl_fep_disable,
244	.mtl_fup_enable			= sxgbe_mtl_fup_enable,
245	.mtl_fup_disable		= sxgbe_mtl_fup_disable
246};
247
248const struct sxgbe_mtl_ops *sxgbe_get_mtl_ops(void)
249{
250	return &mtl_ops;
251}
252