1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * (C) Copyright 2018 Xilinx
4 *
5 * Xilinx ZynqMP Generic Quad-SPI(QSPI) controller driver(master mode only)
6 */
7
8#define LOG_CATEGORY UCLASS_SPI
9
10#include <cpu_func.h>
11#include <log.h>
12#include <asm/arch/sys_proto.h>
13#include <asm/cache.h>
14#include <asm/io.h>
15#include <clk.h>
16#include <dm.h>
17#include <malloc.h>
18#include <memalign.h>
19#include <spi.h>
20#include <spi-mem.h>
21#include <ubi_uboot.h>
22#include <wait_bit.h>
23#include <dm/device_compat.h>
24#include <linux/bitops.h>
25#include <linux/err.h>
26#include <linux/sizes.h>
27#include <zynqmp_firmware.h>
28
29#define GQSPI_GFIFO_STRT_MODE_MASK	BIT(29)
30#define GQSPI_CONFIG_MODE_EN_MASK	(3 << 30)
31#define GQSPI_CONFIG_DMA_MODE		(2 << 30)
32#define GQSPI_CONFIG_CPHA_MASK		BIT(2)
33#define GQSPI_CONFIG_CPOL_MASK		BIT(1)
34
35/*
36 * QSPI Interrupt Registers bit Masks
37 *
38 * All the four interrupt registers (Status/Mask/Enable/Disable) have the same
39 * bit definitions.
40 */
41#define GQSPI_IXR_TXNFULL_MASK		0x00000004 /* QSPI TX FIFO Overflow */
42#define GQSPI_IXR_TXFULL_MASK		0x00000008 /* QSPI TX FIFO is full */
43#define GQSPI_IXR_TXFIFOEMPTY_MASK	0x00000100 /* QSPI TX FIFO is Empty */
44#define GQSPI_IXR_RXNEMTY_MASK		0x00000010 /* QSPI RX FIFO Not Empty */
45#define GQSPI_IXR_GFEMTY_MASK		0x00000080 /* QSPI Generic FIFO Empty */
46#define GQSPI_IXR_GFNFULL_MASK		0x00000200 /* QSPI GENFIFO not full */
47#define GQSPI_IXR_ALL_MASK		(GQSPI_IXR_TXNFULL_MASK | \
48					 GQSPI_IXR_RXNEMTY_MASK)
49
50/*
51 * QSPI Enable Register bit Masks
52 *
53 * This register is used to enable or disable the QSPI controller
54 */
55#define GQSPI_ENABLE_ENABLE_MASK	0x00000001 /* QSPI Enable Bit Mask */
56
57#define GQSPI_GFIFO_LOW_BUS		BIT(14)
58#define GQSPI_GFIFO_CS_LOWER		BIT(12)
59#define GQSPI_GFIFO_UP_BUS		BIT(15)
60#define GQSPI_GFIFO_CS_UPPER		BIT(13)
61#define GQSPI_SPI_MODE_QSPI		(3 << 10)
62#define GQSPI_SPI_MODE_SPI		BIT(10)
63#define GQSPI_SPI_MODE_DUAL_SPI		(2 << 10)
64#define GQSPI_IMD_DATA_CS_ASSERT	5
65#define GQSPI_IMD_DATA_CS_DEASSERT	5
66#define GQSPI_GFIFO_TX			BIT(16)
67#define GQSPI_GFIFO_RX			BIT(17)
68#define GQSPI_GFIFO_STRIPE_MASK		BIT(18)
69#define GQSPI_GFIFO_IMD_MASK		0xFF
70#define GQSPI_GFIFO_EXP_MASK		BIT(9)
71#define GQSPI_GFIFO_DATA_XFR_MASK	BIT(8)
72#define GQSPI_STRT_GEN_FIFO		BIT(28)
73#define GQSPI_GEN_FIFO_STRT_MOD		BIT(29)
74#define GQSPI_GFIFO_WP_HOLD		BIT(19)
75#define GQSPI_BAUD_DIV_MASK		(7 << 3)
76#define GQSPI_DFLT_BAUD_RATE_DIV	BIT(3)
77#define GQSPI_GFIFO_ALL_INT_MASK	0xFBE
78#define GQSPI_DMA_DST_I_STS_DONE	BIT(1)
79#define GQSPI_DMA_DST_I_STS_MASK	0xFE
80#define MODEBITS			0x6
81
82#define GQSPI_GFIFO_SELECT		BIT(0)
83#define GQSPI_FIFO_THRESHOLD		1
84#define GQSPI_GENFIFO_THRESHOLD		31
85
86#define SPI_XFER_ON_BOTH		0
87#define SPI_XFER_ON_LOWER		1
88#define SPI_XFER_ON_UPPER		2
89
90#define GQSPI_DMA_ALIGN			0x4
91#define GQSPI_MAX_BAUD_RATE_VAL		7
92#define GQSPI_DFLT_BAUD_RATE_VAL	2
93
94#define GQSPI_TIMEOUT			100000000
95
96#define GQSPI_BAUD_DIV_SHIFT		2
97#define GQSPI_LPBK_DLY_ADJ_LPBK_SHIFT	5
98#define GQSPI_LPBK_DLY_ADJ_DLY_1	0x1
99#define GQSPI_LPBK_DLY_ADJ_DLY_1_SHIFT	3
100#define GQSPI_LPBK_DLY_ADJ_DLY_0	0x3
101#define GQSPI_USE_DATA_DLY		0x1
102#define GQSPI_USE_DATA_DLY_SHIFT	31
103#define GQSPI_DATA_DLY_ADJ_VALUE	0x2
104#define GQSPI_DATA_DLY_ADJ_SHIFT	28
105#define TAP_DLY_BYPASS_LQSPI_RX_VALUE	0x1
106#define TAP_DLY_BYPASS_LQSPI_RX_SHIFT	2
107#define GQSPI_DATA_DLY_ADJ_OFST		0x000001F8
108#define IOU_TAPDLY_BYPASS_OFST !(IS_ENABLED(CONFIG_ARCH_VERSAL) || \
109				 IS_ENABLED(CONFIG_ARCH_VERSAL_NET)) ? \
110				0xFF180390 : 0xF103003C
111#define GQSPI_LPBK_DLY_ADJ_LPBK_MASK	0x00000020
112#define GQSPI_FREQ_37_5MHZ		37500000
113#define GQSPI_FREQ_40MHZ		40000000
114#define GQSPI_FREQ_100MHZ		100000000
115#define GQSPI_FREQ_150MHZ		150000000
116#define IOU_TAPDLY_BYPASS_MASK		0x7
117
118#define GQSPI_REG_OFFSET		0x100
119#define GQSPI_DMA_REG_OFFSET		0x800
120
121/* QSPI register offsets */
122struct zynqmp_qspi_regs {
123	u32 confr;	/* 0x00 */
124	u32 isr;	/* 0x04 */
125	u32 ier;	/* 0x08 */
126	u32 idisr;	/* 0x0C */
127	u32 imaskr;	/* 0x10 */
128	u32 enbr;	/* 0x14 */
129	u32 dr;		/* 0x18 */
130	u32 txd0r;	/* 0x1C */
131	u32 drxr;	/* 0x20 */
132	u32 sicr;	/* 0x24 */
133	u32 txftr;	/* 0x28 */
134	u32 rxftr;	/* 0x2C */
135	u32 gpior;	/* 0x30 */
136	u32 reserved0;	/* 0x34 */
137	u32 lpbkdly;	/* 0x38 */
138	u32 reserved1;	/* 0x3C */
139	u32 genfifo;	/* 0x40 */
140	u32 gqspisel;	/* 0x44 */
141	u32 reserved2;	/* 0x48 */
142	u32 gqfifoctrl;	/* 0x4C */
143	u32 gqfthr;	/* 0x50 */
144	u32 gqpollcfg;	/* 0x54 */
145	u32 gqpollto;	/* 0x58 */
146	u32 gqxfersts;	/* 0x5C */
147	u32 gqfifosnap;	/* 0x60 */
148	u32 gqrxcpy;	/* 0x64 */
149	u32 reserved3[36];	/* 0x68 */
150	u32 gqspidlyadj;	/* 0xF8 */
151};
152
153struct zynqmp_qspi_dma_regs {
154	u32 dmadst;	/* 0x00 */
155	u32 dmasize;	/* 0x04 */
156	u32 dmasts;	/* 0x08 */
157	u32 dmactrl;	/* 0x0C */
158	u32 reserved0;	/* 0x10 */
159	u32 dmaisr;	/* 0x14 */
160	u32 dmaier;	/* 0x18 */
161	u32 dmaidr;	/* 0x1C */
162	u32 dmaimr;	/* 0x20 */
163	u32 dmactrl2;	/* 0x24 */
164	u32 dmadstmsb;	/* 0x28 */
165};
166
167struct zynqmp_qspi_plat {
168	struct zynqmp_qspi_regs *regs;
169	struct zynqmp_qspi_dma_regs *dma_regs;
170	u32 frequency;
171	u32 speed_hz;
172	unsigned int io_mode;
173};
174
175struct zynqmp_qspi_priv {
176	struct zynqmp_qspi_regs *regs;
177	struct zynqmp_qspi_dma_regs *dma_regs;
178	const void *tx_buf;
179	void *rx_buf;
180	unsigned int len;
181	unsigned int io_mode;
182	int bytes_to_transfer;
183	int bytes_to_receive;
184	const struct spi_mem_op *op;
185};
186
187__weak int zynqmp_mmio_write(const u32 address, const u32 mask, const u32 value)
188{
189	return 0;
190}
191
192static int zynqmp_qspi_of_to_plat(struct udevice *bus)
193{
194	struct zynqmp_qspi_plat *plat = dev_get_plat(bus);
195
196	plat->regs = (struct zynqmp_qspi_regs *)(dev_read_addr(bus) +
197						 GQSPI_REG_OFFSET);
198	plat->dma_regs = (struct zynqmp_qspi_dma_regs *)
199			  (dev_read_addr(bus) + GQSPI_DMA_REG_OFFSET);
200
201	plat->io_mode = dev_read_bool(bus, "has-io-mode");
202
203	return 0;
204}
205
206static void zynqmp_qspi_init_hw(struct zynqmp_qspi_priv *priv)
207{
208	u32 config_reg;
209	struct zynqmp_qspi_regs *regs = priv->regs;
210
211	writel(GQSPI_GFIFO_SELECT, &regs->gqspisel);
212	writel(GQSPI_GFIFO_ALL_INT_MASK, &regs->idisr);
213	writel(GQSPI_FIFO_THRESHOLD, &regs->txftr);
214	writel(GQSPI_FIFO_THRESHOLD, &regs->rxftr);
215	writel(GQSPI_GENFIFO_THRESHOLD, &regs->gqfthr);
216	writel(GQSPI_GFIFO_ALL_INT_MASK, &regs->isr);
217	writel(~GQSPI_ENABLE_ENABLE_MASK, &regs->enbr);
218
219	config_reg = readl(&regs->confr);
220	config_reg &= ~(GQSPI_GFIFO_STRT_MODE_MASK |
221			GQSPI_CONFIG_MODE_EN_MASK);
222	config_reg |= GQSPI_GFIFO_WP_HOLD | GQSPI_DFLT_BAUD_RATE_DIV;
223	config_reg |= GQSPI_GFIFO_STRT_MODE_MASK;
224	if (!priv->io_mode)
225		config_reg |= GQSPI_CONFIG_DMA_MODE;
226
227	writel(config_reg, &regs->confr);
228
229	writel(GQSPI_ENABLE_ENABLE_MASK, &regs->enbr);
230}
231
232static u32 zynqmp_qspi_bus_select(struct zynqmp_qspi_priv *priv)
233{
234	u32 gqspi_fifo_reg = 0;
235
236	gqspi_fifo_reg = GQSPI_GFIFO_LOW_BUS |
237			 GQSPI_GFIFO_CS_LOWER;
238
239	return gqspi_fifo_reg;
240}
241
242static u32 zynqmp_qspi_genfifo_mode(u8 buswidth)
243{
244	switch (buswidth) {
245	case 1:
246		return GQSPI_SPI_MODE_SPI;
247	case 2:
248		return GQSPI_SPI_MODE_DUAL_SPI;
249	case 4:
250		return GQSPI_SPI_MODE_QSPI;
251	default:
252		log_warning("Unsupported bus width %u\n", buswidth);
253		return GQSPI_SPI_MODE_SPI;
254	}
255}
256
257static void zynqmp_qspi_fill_gen_fifo(struct zynqmp_qspi_priv *priv,
258				      u32 gqspi_fifo_reg)
259{
260	struct zynqmp_qspi_regs *regs = priv->regs;
261	u32 config_reg, ier;
262	int ret = 0;
263
264	log_content("%s, GFIFO_CMD: 0x%X\n", __func__, gqspi_fifo_reg);
265
266	writel(gqspi_fifo_reg, &regs->genfifo);
267
268	config_reg = readl(&regs->confr);
269	/* Manual start if needed */
270	config_reg |= GQSPI_STRT_GEN_FIFO;
271	writel(config_reg, &regs->confr);
272
273	/* Enable interrupts */
274	ier = readl(&regs->ier);
275	ier |= GQSPI_IXR_GFEMTY_MASK;
276	writel(ier, &regs->ier);
277
278	/* Wait until the gen fifo is empty to write the new command */
279	ret = wait_for_bit_le32(&regs->isr, GQSPI_IXR_GFEMTY_MASK, 1,
280				GQSPI_TIMEOUT, 1);
281	if (ret)
282		log_warning("%s, Timeout\n", __func__);
283
284}
285
286static void zynqmp_qspi_chipselect(struct zynqmp_qspi_priv *priv, int is_on)
287{
288	u32 gqspi_fifo_reg = 0;
289
290	log_debug("%s, assert: %d\r\n", __func__, is_on);
291
292	if (is_on) {
293		gqspi_fifo_reg = zynqmp_qspi_bus_select(priv);
294		gqspi_fifo_reg |= GQSPI_SPI_MODE_SPI |
295				  GQSPI_IMD_DATA_CS_ASSERT;
296	} else {
297		gqspi_fifo_reg = GQSPI_GFIFO_LOW_BUS;
298		gqspi_fifo_reg |= GQSPI_IMD_DATA_CS_DEASSERT;
299	}
300
301	zynqmp_qspi_fill_gen_fifo(priv, gqspi_fifo_reg);
302}
303
304static void zynqmp_qspi_set_tapdelay(struct udevice *bus, u32 baudrateval)
305{
306	struct zynqmp_qspi_plat *plat = dev_get_plat(bus);
307	struct zynqmp_qspi_priv *priv = dev_get_priv(bus);
308	struct zynqmp_qspi_regs *regs = priv->regs;
309	u32 tapdlybypass = 0, lpbkdlyadj = 0, datadlyadj = 0, clk_rate;
310	u32 reqhz = 0;
311
312	clk_rate = plat->frequency;
313	reqhz = (clk_rate / (GQSPI_BAUD_DIV_SHIFT << baudrateval));
314
315	log_debug("%s, clk_rate:%d, baudrateval:%d, bus_clk: %d\n",
316		  __func__, clk_rate, baudrateval, reqhz);
317
318	if (!(IS_ENABLED(CONFIG_ARCH_VERSAL) ||
319	      IS_ENABLED(CONFIG_ARCH_VERSAL_NET))) {
320		if (reqhz <= GQSPI_FREQ_40MHZ) {
321			tapdlybypass = TAP_DLY_BYPASS_LQSPI_RX_VALUE <<
322					TAP_DLY_BYPASS_LQSPI_RX_SHIFT;
323		} else if (reqhz <= GQSPI_FREQ_100MHZ) {
324			tapdlybypass = TAP_DLY_BYPASS_LQSPI_RX_VALUE <<
325					TAP_DLY_BYPASS_LQSPI_RX_SHIFT;
326			lpbkdlyadj = GQSPI_LPBK_DLY_ADJ_LPBK_MASK;
327			datadlyadj = (GQSPI_USE_DATA_DLY <<
328				      GQSPI_USE_DATA_DLY_SHIFT) |
329				       (GQSPI_DATA_DLY_ADJ_VALUE <<
330					GQSPI_DATA_DLY_ADJ_SHIFT);
331		} else if (reqhz <= GQSPI_FREQ_150MHZ) {
332			lpbkdlyadj = GQSPI_LPBK_DLY_ADJ_LPBK_MASK |
333				      GQSPI_LPBK_DLY_ADJ_DLY_0;
334		}
335		zynqmp_mmio_write(IOU_TAPDLY_BYPASS_OFST,
336				  IOU_TAPDLY_BYPASS_MASK, tapdlybypass);
337	} else {
338		if (reqhz <= GQSPI_FREQ_37_5MHZ) {
339			tapdlybypass = TAP_DLY_BYPASS_LQSPI_RX_VALUE <<
340					TAP_DLY_BYPASS_LQSPI_RX_SHIFT;
341		} else if (reqhz <= GQSPI_FREQ_100MHZ) {
342			tapdlybypass = TAP_DLY_BYPASS_LQSPI_RX_VALUE <<
343					TAP_DLY_BYPASS_LQSPI_RX_SHIFT;
344			lpbkdlyadj = GQSPI_LPBK_DLY_ADJ_LPBK_MASK;
345			datadlyadj = GQSPI_USE_DATA_DLY <<
346				      GQSPI_USE_DATA_DLY_SHIFT;
347		} else if (reqhz <= GQSPI_FREQ_150MHZ) {
348			lpbkdlyadj = GQSPI_LPBK_DLY_ADJ_LPBK_MASK |
349				      (GQSPI_LPBK_DLY_ADJ_DLY_1 <<
350				       GQSPI_LPBK_DLY_ADJ_DLY_1_SHIFT);
351		}
352		writel(tapdlybypass, IOU_TAPDLY_BYPASS_OFST);
353	}
354	writel(lpbkdlyadj, &regs->lpbkdly);
355	writel(datadlyadj, &regs->gqspidlyadj);
356}
357
358static int zynqmp_qspi_set_speed(struct udevice *bus, uint speed)
359{
360	struct zynqmp_qspi_plat *plat = dev_get_plat(bus);
361	struct zynqmp_qspi_priv *priv = dev_get_priv(bus);
362	struct zynqmp_qspi_regs *regs = priv->regs;
363	u32 confr;
364	u8 baud_rate_val = 0;
365
366	log_debug("%s, Speed: %d, Max: %d\n", __func__, speed, plat->frequency);
367
368	if (speed > plat->frequency)
369		speed = plat->frequency;
370
371	if (plat->speed_hz != speed) {
372		/* Set the clock frequency */
373		/* If speed == 0, default to lowest speed */
374		while ((baud_rate_val < 8) &&
375		       ((plat->frequency /
376		       (2 << baud_rate_val)) > speed))
377			baud_rate_val++;
378
379		if (baud_rate_val > GQSPI_MAX_BAUD_RATE_VAL)
380			baud_rate_val = GQSPI_DFLT_BAUD_RATE_VAL;
381
382		plat->speed_hz = plat->frequency / (2 << baud_rate_val);
383
384		confr = readl(&regs->confr);
385		confr &= ~GQSPI_BAUD_DIV_MASK;
386		confr |= (baud_rate_val << 3);
387		writel(confr, &regs->confr);
388
389		zynqmp_qspi_set_tapdelay(bus, baud_rate_val);
390	}
391
392	return 0;
393}
394
395static int zynqmp_qspi_probe(struct udevice *bus)
396{
397	struct zynqmp_qspi_plat *plat = dev_get_plat(bus);
398	struct zynqmp_qspi_priv *priv = dev_get_priv(bus);
399	struct clk clk;
400	unsigned long clock;
401	int ret;
402
403	priv->regs = plat->regs;
404	priv->dma_regs = plat->dma_regs;
405	priv->io_mode = plat->io_mode;
406
407	ret = clk_get_by_index(bus, 0, &clk);
408	if (ret < 0) {
409		dev_err(bus, "failed to get clock\n");
410		return ret;
411	}
412
413	clock = clk_get_rate(&clk);
414	if (IS_ERR_VALUE(clock)) {
415		dev_err(bus, "failed to get rate\n");
416		return clock;
417	}
418
419	ret = clk_enable(&clk);
420	if (ret) {
421		dev_err(bus, "failed to enable clock\n");
422		return ret;
423	}
424	plat->frequency = clock;
425	plat->speed_hz = plat->frequency / 2;
426
427	/* init the zynq spi hw */
428	zynqmp_qspi_init_hw(priv);
429
430	log_debug("%s, Rerence clock frequency: %ld\n", __func__, clock);
431
432	return 0;
433}
434
435static int zynqmp_qspi_set_mode(struct udevice *bus, uint mode)
436{
437	struct zynqmp_qspi_priv *priv = dev_get_priv(bus);
438	struct zynqmp_qspi_regs *regs = priv->regs;
439	u32 confr;
440
441	log_debug("%s, 0x%X\n", __func__, mode);
442
443	/* Set the SPI Clock phase and polarities */
444	confr = readl(&regs->confr);
445	confr &= ~(GQSPI_CONFIG_CPHA_MASK | GQSPI_CONFIG_CPOL_MASK);
446
447	if (mode & SPI_CPHA)
448		confr |= GQSPI_CONFIG_CPHA_MASK;
449	if (mode & SPI_CPOL)
450		confr |= GQSPI_CONFIG_CPOL_MASK;
451
452	writel(confr, &regs->confr);
453
454	return 0;
455}
456
457static int zynqmp_qspi_fill_tx_fifo(struct zynqmp_qspi_priv *priv, u32 size)
458{
459	u32 data;
460	int ret = 0;
461	struct zynqmp_qspi_regs *regs = priv->regs;
462	u32 *buf = (u32 *)priv->tx_buf;
463	u32 len = size;
464
465	while (size) {
466		ret = wait_for_bit_le32(&regs->isr, GQSPI_IXR_TXNFULL_MASK, 1,
467					GQSPI_TIMEOUT, 1);
468		if (ret)
469			return log_msg_ret("Timeout\n", ret);
470
471		if (size >= 4) {
472			writel(*buf, &regs->txd0r);
473			buf++;
474			size -= 4;
475		} else {
476			switch (size) {
477			case 1:
478				data = *((u8 *)buf);
479				buf += 1;
480				data |= GENMASK(31, 8);
481				break;
482			case 2:
483				data = *((u16 *)buf);
484				buf += 2;
485				data |= GENMASK(31, 16);
486				break;
487			case 3:
488				data = *buf;
489				buf += 3;
490				data |= GENMASK(31, 24);
491				break;
492			}
493			writel(data, &regs->txd0r);
494			size = 0;
495		}
496	}
497
498	ret = wait_for_bit_le32(&regs->isr, GQSPI_IXR_TXFIFOEMPTY_MASK, 1,
499				GQSPI_TIMEOUT, 1);
500	if (ret)
501		return log_msg_ret("Timeout\n", ret);
502
503	priv->tx_buf += len;
504	return 0;
505}
506
507static void zynqmp_qspi_genfifo_cmd(struct zynqmp_qspi_priv *priv)
508{
509	const struct spi_mem_op *op = priv->op;
510	u32 gen_fifo_cmd;
511	u8 i, dummy_cycles, addr;
512
513	log_debug("%s, opcode: 0x%0X, addr.nbytes: %d, dummy.mbytes: %d\r\n",
514		  __func__, op->cmd.opcode, op->addr.nbytes, op->dummy.nbytes);
515
516	/* Send opcode */
517	gen_fifo_cmd = zynqmp_qspi_bus_select(priv);
518	gen_fifo_cmd |= zynqmp_qspi_genfifo_mode(op->cmd.buswidth);
519	gen_fifo_cmd |= GQSPI_GFIFO_TX;
520	gen_fifo_cmd |= op->cmd.opcode;
521	zynqmp_qspi_fill_gen_fifo(priv, gen_fifo_cmd);
522
523	/* Send address */
524	for (i = 0; i < op->addr.nbytes; i++) {
525		addr = op->addr.val >> (8 * (op->addr.nbytes - i - 1));
526
527		gen_fifo_cmd = zynqmp_qspi_bus_select(priv);
528		gen_fifo_cmd |= zynqmp_qspi_genfifo_mode(op->addr.buswidth);
529		gen_fifo_cmd |= GQSPI_GFIFO_TX;
530		gen_fifo_cmd |= addr;
531
532		zynqmp_qspi_fill_gen_fifo(priv, gen_fifo_cmd);
533	}
534
535	/* Send dummy */
536	if (op->dummy.nbytes) {
537		dummy_cycles = op->dummy.nbytes * 8 / op->dummy.buswidth;
538
539		gen_fifo_cmd = zynqmp_qspi_bus_select(priv);
540		gen_fifo_cmd |= zynqmp_qspi_genfifo_mode(op->dummy.buswidth);
541		gen_fifo_cmd &= ~(GQSPI_GFIFO_TX | GQSPI_GFIFO_RX);
542		gen_fifo_cmd |= GQSPI_GFIFO_DATA_XFR_MASK;
543		gen_fifo_cmd |= dummy_cycles;
544		zynqmp_qspi_fill_gen_fifo(priv, gen_fifo_cmd);
545	}
546}
547
548static u32 zynqmp_qspi_calc_exp(struct zynqmp_qspi_priv *priv,
549				u32 *gen_fifo_cmd)
550{
551	u32 expval = 8;
552	u32 len;
553
554	while (1) {
555		if (priv->len > 255) {
556			if (priv->len & (1 << expval)) {
557				*gen_fifo_cmd &= ~GQSPI_GFIFO_IMD_MASK;
558				*gen_fifo_cmd |= GQSPI_GFIFO_EXP_MASK;
559				*gen_fifo_cmd |= expval;
560				priv->len -= (1 << expval);
561				return expval;
562			}
563			expval++;
564		} else {
565			*gen_fifo_cmd &= ~(GQSPI_GFIFO_IMD_MASK |
566					  GQSPI_GFIFO_EXP_MASK);
567			*gen_fifo_cmd |= (u8)priv->len;
568			len = (u8)priv->len;
569			priv->len  = 0;
570			return len;
571		}
572	}
573}
574
575static int zynqmp_qspi_genfifo_fill_tx(struct zynqmp_qspi_priv *priv)
576{
577	u32 gen_fifo_cmd;
578	u32 len;
579	int ret = 0;
580
581	log_debug("%s, length: %d\r\n", __func__, priv->len);
582
583	gen_fifo_cmd = zynqmp_qspi_bus_select(priv);
584	gen_fifo_cmd |= zynqmp_qspi_genfifo_mode(priv->op->data.buswidth);
585	gen_fifo_cmd |= GQSPI_GFIFO_TX | GQSPI_GFIFO_DATA_XFR_MASK;
586
587	while (priv->len) {
588		len = zynqmp_qspi_calc_exp(priv, &gen_fifo_cmd);
589		zynqmp_qspi_fill_gen_fifo(priv, gen_fifo_cmd);
590
591		if (gen_fifo_cmd & GQSPI_GFIFO_EXP_MASK)
592			ret = zynqmp_qspi_fill_tx_fifo(priv, 1 << len);
593		else
594			ret = zynqmp_qspi_fill_tx_fifo(priv, len);
595
596		if (ret)
597			return ret;
598	}
599	return ret;
600}
601
602static int zynqmp_qspi_start_io(struct zynqmp_qspi_priv *priv,
603				u32 gen_fifo_cmd, u32 *buf)
604{
605	u32 len;
606	u32 config_reg, ier, isr;
607	u32 timeout = GQSPI_TIMEOUT;
608	struct zynqmp_qspi_regs *regs = priv->regs;
609	u32 last_bits;
610	u32 *traverse = buf;
611
612	while (priv->len) {
613		len = zynqmp_qspi_calc_exp(priv, &gen_fifo_cmd);
614		/* If exponent bit is set, reset immediate to be 2^len */
615		if (gen_fifo_cmd & GQSPI_GFIFO_EXP_MASK)
616			priv->bytes_to_receive = (1 << len);
617		else
618			priv->bytes_to_receive = len;
619		zynqmp_qspi_fill_gen_fifo(priv, gen_fifo_cmd);
620
621		/* Manual start */
622		config_reg = readl(&regs->confr);
623		config_reg |= GQSPI_STRT_GEN_FIFO;
624		writel(config_reg, &regs->confr);
625		/* Enable RX interrupts for IO mode */
626		ier = readl(&regs->ier);
627		ier |= GQSPI_IXR_ALL_MASK;
628		writel(ier, &regs->ier);
629		while (priv->bytes_to_receive && timeout) {
630			isr = readl(&regs->isr);
631			if (isr & GQSPI_IXR_RXNEMTY_MASK) {
632				if (priv->bytes_to_receive >= 4) {
633					*traverse = readl(&regs->drxr);
634					traverse++;
635					priv->bytes_to_receive -= 4;
636				} else {
637					last_bits = readl(&regs->drxr);
638					memcpy(traverse, &last_bits,
639					       priv->bytes_to_receive);
640					priv->bytes_to_receive = 0;
641				}
642				timeout = GQSPI_TIMEOUT;
643			} else {
644				udelay(1);
645				timeout--;
646			}
647		}
648
649		if (!timeout)
650			return log_msg_retz("Timeout\n", timeout);
651	}
652
653	return 0;
654}
655
656static int zynqmp_qspi_start_dma(struct zynqmp_qspi_priv *priv,
657				 u32 gen_fifo_cmd, u32 *buf)
658{
659	unsigned long addr;
660	u32 size;
661	u32 actuallen = priv->len;
662	u32 totallen = priv->len;
663	int ret = 0;
664	struct zynqmp_qspi_dma_regs *dma_regs = priv->dma_regs;
665
666	while (totallen) {
667		if (totallen >= SZ_512M)
668			priv->len = SZ_256M;
669		else
670			priv->len = totallen;
671
672		totallen -= priv->len; /* Save remaining bytes length to read */
673		actuallen = priv->len; /* Actual number of bytes reading */
674
675		writel(lower_32_bits((unsigned long)buf), &dma_regs->dmadst);
676		writel(upper_32_bits((unsigned long)buf) & GENMASK(11, 0),
677							&dma_regs->dmadstmsb);
678		writel(roundup(priv->len, GQSPI_DMA_ALIGN), &dma_regs->dmasize);
679		writel(GQSPI_DMA_DST_I_STS_MASK, &dma_regs->dmaier);
680		addr = (unsigned long)buf;
681		size = roundup(priv->len, GQSPI_DMA_ALIGN);
682		invalidate_dcache_range(addr, addr + size);
683
684		while (priv->len) {
685			zynqmp_qspi_calc_exp(priv, &gen_fifo_cmd);
686			zynqmp_qspi_fill_gen_fifo(priv, gen_fifo_cmd);
687		}
688
689		ret = wait_for_bit_le32(&dma_regs->dmaisr,
690					GQSPI_DMA_DST_I_STS_DONE, 1,
691					GQSPI_TIMEOUT, 1);
692		if (ret)
693			return log_msg_ret("Timeout:\n", ret);
694
695		invalidate_dcache_range(addr, addr + size);
696
697		writel(GQSPI_DMA_DST_I_STS_DONE, &dma_regs->dmaisr);
698
699		if (buf != priv->rx_buf)
700			memcpy(priv->rx_buf, buf, actuallen);
701
702		buf = (u32 *)((u8 *)buf + actuallen);
703		priv->rx_buf = (u8 *)priv->rx_buf + actuallen;
704	}
705
706	return 0;
707}
708
709static int zynqmp_qspi_genfifo_fill_rx(struct zynqmp_qspi_priv *priv)
710{
711	u32 gen_fifo_cmd;
712	u32 *buf;
713	u32 actuallen = priv->len;
714
715	log_debug("%s, length: %d\r\n", __func__, priv->len);
716
717	gen_fifo_cmd = zynqmp_qspi_bus_select(priv);
718	gen_fifo_cmd |= zynqmp_qspi_genfifo_mode(priv->op->data.buswidth);
719	gen_fifo_cmd |= GQSPI_GFIFO_RX | GQSPI_GFIFO_DATA_XFR_MASK;
720
721	/*
722	 * Check if receive buffer is aligned to 4 byte and length
723	 * is multiples of four byte as we are using dma to receive.
724	 */
725	if ((!((unsigned long)priv->rx_buf & (GQSPI_DMA_ALIGN - 1)) &&
726	     !(actuallen % GQSPI_DMA_ALIGN)) || priv->io_mode) {
727		buf = (u32 *)priv->rx_buf;
728		if (priv->io_mode)
729			return zynqmp_qspi_start_io(priv, gen_fifo_cmd, buf);
730		else
731			return zynqmp_qspi_start_dma(priv, gen_fifo_cmd, buf);
732	}
733
734	ALLOC_CACHE_ALIGN_BUFFER(u8, tmp, roundup(priv->len, GQSPI_DMA_ALIGN));
735	buf = (u32 *)tmp;
736	return zynqmp_qspi_start_dma(priv, gen_fifo_cmd, buf);
737}
738
739static int zynqmp_qspi_claim_bus(struct udevice *dev)
740{
741	struct udevice *bus = dev->parent;
742	struct zynqmp_qspi_priv *priv = dev_get_priv(bus);
743	struct zynqmp_qspi_regs *regs = priv->regs;
744
745	writel(GQSPI_ENABLE_ENABLE_MASK, &regs->enbr);
746
747	return 0;
748}
749
750static int zynqmp_qspi_release_bus(struct udevice *dev)
751{
752	struct udevice *bus = dev->parent;
753	struct zynqmp_qspi_priv *priv = dev_get_priv(bus);
754	struct zynqmp_qspi_regs *regs = priv->regs;
755
756	writel(~GQSPI_ENABLE_ENABLE_MASK, &regs->enbr);
757
758	return 0;
759}
760
761static int zynqmp_qspi_exec_op(struct spi_slave *slave,
762			       const struct spi_mem_op *op)
763{
764	struct zynqmp_qspi_priv *priv = dev_get_priv(slave->dev->parent);
765	int ret = 0;
766
767	priv->op = op;
768	priv->tx_buf = op->data.buf.out;
769	priv->rx_buf = op->data.buf.in;
770	priv->len = op->data.nbytes;
771
772	zynqmp_qspi_chipselect(priv, 1);
773
774	/* Send opcode, addr, dummy */
775	zynqmp_qspi_genfifo_cmd(priv);
776
777	/* Request the transfer */
778	if (op->data.dir == SPI_MEM_DATA_IN)
779		ret = zynqmp_qspi_genfifo_fill_rx(priv);
780	else if (op->data.dir == SPI_MEM_DATA_OUT)
781		ret = zynqmp_qspi_genfifo_fill_tx(priv);
782
783	zynqmp_qspi_chipselect(priv, 0);
784
785	return ret;
786}
787
788static const struct spi_controller_mem_ops zynqmp_qspi_mem_ops = {
789	.exec_op = zynqmp_qspi_exec_op,
790};
791
792static const struct dm_spi_ops zynqmp_qspi_ops = {
793	.claim_bus      = zynqmp_qspi_claim_bus,
794	.release_bus    = zynqmp_qspi_release_bus,
795	.set_speed      = zynqmp_qspi_set_speed,
796	.set_mode       = zynqmp_qspi_set_mode,
797	.mem_ops        = &zynqmp_qspi_mem_ops,
798};
799
800static const struct udevice_id zynqmp_qspi_ids[] = {
801	{ .compatible = "xlnx,zynqmp-qspi-1.0" },
802	{ .compatible = "xlnx,versal-qspi-1.0" },
803	{ }
804};
805
806U_BOOT_DRIVER(zynqmp_qspi) = {
807	.name   = "zynqmp_qspi",
808	.id     = UCLASS_SPI,
809	.of_match = zynqmp_qspi_ids,
810	.ops    = &zynqmp_qspi_ops,
811	.of_to_plat = zynqmp_qspi_of_to_plat,
812	.plat_auto	= sizeof(struct zynqmp_qspi_plat),
813	.priv_auto	= sizeof(struct zynqmp_qspi_priv),
814	.probe  = zynqmp_qspi_probe,
815};
816