1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright 2011, Marvell Semiconductor Inc.
4 * Lei Wen <leiwen@marvell.com>
5 *
6 * Back ported to the 8xx platform (from the 8260 platform) by
7 * Murray.Jensen@cmst.csiro.au, 27-Jan-01.
8 */
9
10#include <cpu_func.h>
11#include <dm.h>
12#include <errno.h>
13#include <log.h>
14#include <malloc.h>
15#include <mmc.h>
16#include <sdhci.h>
17#include <time.h>
18#include <asm/cache.h>
19#include <linux/bitops.h>
20#include <linux/delay.h>
21#include <linux/dma-mapping.h>
22#include <linux/printk.h>
23#include <phys2bus.h>
24#include <power/regulator.h>
25
26static void sdhci_reset(struct sdhci_host *host, u8 mask)
27{
28	unsigned long timeout;
29
30	/* Wait max 100 ms */
31	timeout = 100;
32	sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
33	while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
34		if (timeout == 0) {
35			printf("%s: Reset 0x%x never completed.\n",
36			       __func__, (int)mask);
37			return;
38		}
39		timeout--;
40		udelay(1000);
41	}
42}
43
44static void sdhci_cmd_done(struct sdhci_host *host, struct mmc_cmd *cmd)
45{
46	int i;
47	if (cmd->resp_type & MMC_RSP_136) {
48		/* CRC is stripped so we need to do some shifting. */
49		for (i = 0; i < 4; i++) {
50			cmd->response[i] = sdhci_readl(host,
51					SDHCI_RESPONSE + (3-i)*4) << 8;
52			if (i != 3)
53				cmd->response[i] |= sdhci_readb(host,
54						SDHCI_RESPONSE + (3-i)*4-1);
55		}
56	} else {
57		cmd->response[0] = sdhci_readl(host, SDHCI_RESPONSE);
58	}
59}
60
61static void sdhci_transfer_pio(struct sdhci_host *host, struct mmc_data *data)
62{
63	int i;
64	char *offs;
65	for (i = 0; i < data->blocksize; i += 4) {
66		offs = data->dest + i;
67		if (data->flags == MMC_DATA_READ)
68			*(u32 *)offs = sdhci_readl(host, SDHCI_BUFFER);
69		else
70			sdhci_writel(host, *(u32 *)offs, SDHCI_BUFFER);
71	}
72}
73
74#if (CONFIG_IS_ENABLED(MMC_SDHCI_SDMA) || CONFIG_IS_ENABLED(MMC_SDHCI_ADMA))
75static void sdhci_prepare_dma(struct sdhci_host *host, struct mmc_data *data,
76			      int *is_aligned, int trans_bytes)
77{
78	dma_addr_t dma_addr;
79	unsigned char ctrl;
80	void *buf;
81
82	if (data->flags == MMC_DATA_READ)
83		buf = data->dest;
84	else
85		buf = (void *)data->src;
86
87	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
88	ctrl &= ~SDHCI_CTRL_DMA_MASK;
89	if (host->flags & USE_ADMA64)
90		ctrl |= SDHCI_CTRL_ADMA64;
91	else if (host->flags & USE_ADMA)
92		ctrl |= SDHCI_CTRL_ADMA32;
93	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
94
95	if (host->flags & USE_SDMA &&
96	    (host->force_align_buffer ||
97	     (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR &&
98	      ((unsigned long)buf & 0x7) != 0x0))) {
99		*is_aligned = 0;
100		if (data->flags != MMC_DATA_READ)
101			memcpy(host->align_buffer, buf, trans_bytes);
102		buf = host->align_buffer;
103	}
104
105	host->start_addr = dma_map_single(buf, trans_bytes,
106					  mmc_get_dma_dir(data));
107
108	if (host->flags & USE_SDMA) {
109		dma_addr = dev_phys_to_bus(mmc_to_dev(host->mmc), host->start_addr);
110		sdhci_writel(host, dma_addr, SDHCI_DMA_ADDRESS);
111	}
112#if CONFIG_IS_ENABLED(MMC_SDHCI_ADMA)
113	else if (host->flags & (USE_ADMA | USE_ADMA64)) {
114		sdhci_prepare_adma_table(host, host->adma_desc_table, data,
115					 host->start_addr);
116
117		sdhci_writel(host, lower_32_bits(host->adma_addr),
118			     SDHCI_ADMA_ADDRESS);
119		if (host->flags & USE_ADMA64)
120			sdhci_writel(host, upper_32_bits(host->adma_addr),
121				     SDHCI_ADMA_ADDRESS_HI);
122	}
123#endif
124}
125#else
126static void sdhci_prepare_dma(struct sdhci_host *host, struct mmc_data *data,
127			      int *is_aligned, int trans_bytes)
128{}
129#endif
130static int sdhci_transfer_data(struct sdhci_host *host, struct mmc_data *data)
131{
132	dma_addr_t start_addr = host->start_addr;
133	unsigned int stat, rdy, mask, timeout, block = 0;
134	bool transfer_done = false;
135
136	timeout = 1000000;
137	rdy = SDHCI_INT_SPACE_AVAIL | SDHCI_INT_DATA_AVAIL;
138	mask = SDHCI_DATA_AVAILABLE | SDHCI_SPACE_AVAILABLE;
139	do {
140		stat = sdhci_readl(host, SDHCI_INT_STATUS);
141		if (stat & SDHCI_INT_ERROR) {
142			pr_debug("%s: Error detected in status(0x%X)!\n",
143				 __func__, stat);
144			return -EIO;
145		}
146		if (!transfer_done && (stat & rdy)) {
147			if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & mask))
148				continue;
149			sdhci_writel(host, rdy, SDHCI_INT_STATUS);
150			sdhci_transfer_pio(host, data);
151			data->dest += data->blocksize;
152			if (++block >= data->blocks) {
153				/* Keep looping until the SDHCI_INT_DATA_END is
154				 * cleared, even if we finished sending all the
155				 * blocks.
156				 */
157				transfer_done = true;
158				continue;
159			}
160		}
161		if ((host->flags & USE_DMA) && !transfer_done &&
162		    (stat & SDHCI_INT_DMA_END)) {
163			sdhci_writel(host, SDHCI_INT_DMA_END, SDHCI_INT_STATUS);
164			if (host->flags & USE_SDMA) {
165				start_addr &=
166				~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1);
167				start_addr += SDHCI_DEFAULT_BOUNDARY_SIZE;
168				start_addr = dev_phys_to_bus(mmc_to_dev(host->mmc),
169							     start_addr);
170				sdhci_writel(host, start_addr, SDHCI_DMA_ADDRESS);
171			}
172		}
173		if (timeout-- > 0)
174			udelay(10);
175		else {
176			printf("%s: Transfer data timeout\n", __func__);
177			return -ETIMEDOUT;
178		}
179	} while (!(stat & SDHCI_INT_DATA_END));
180
181#if (CONFIG_IS_ENABLED(MMC_SDHCI_SDMA) || CONFIG_IS_ENABLED(MMC_SDHCI_ADMA))
182	dma_unmap_single(host->start_addr, data->blocks * data->blocksize,
183			 mmc_get_dma_dir(data));
184#endif
185
186	return 0;
187}
188
189/*
190 * No command will be sent by driver if card is busy, so driver must wait
191 * for card ready state.
192 * Every time when card is busy after timeout then (last) timeout value will be
193 * increased twice but only if it doesn't exceed global defined maximum.
194 * Each function call will use last timeout value.
195 */
196#define SDHCI_CMD_MAX_TIMEOUT			3200
197#define SDHCI_CMD_DEFAULT_TIMEOUT		100
198#define SDHCI_READ_STATUS_TIMEOUT		1000
199
200#ifdef CONFIG_DM_MMC
201static int sdhci_send_command(struct udevice *dev, struct mmc_cmd *cmd,
202			      struct mmc_data *data)
203{
204	struct mmc *mmc = mmc_get_mmc_dev(dev);
205
206#else
207static int sdhci_send_command(struct mmc *mmc, struct mmc_cmd *cmd,
208			      struct mmc_data *data)
209{
210#endif
211	struct sdhci_host *host = mmc->priv;
212	unsigned int stat = 0;
213	int ret = 0;
214	int trans_bytes = 0, is_aligned = 1;
215	u32 mask, flags, mode = 0;
216	unsigned int time = 0;
217	int mmc_dev = mmc_get_blk_desc(mmc)->devnum;
218	ulong start = get_timer(0);
219
220	host->start_addr = 0;
221	/* Timeout unit - ms */
222	static unsigned int cmd_timeout = SDHCI_CMD_DEFAULT_TIMEOUT;
223
224	mask = SDHCI_CMD_INHIBIT | SDHCI_DATA_INHIBIT;
225
226	/* We shouldn't wait for data inihibit for stop commands, even
227	   though they might use busy signaling */
228	if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION ||
229	    ((cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK ||
230	      cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200) && !data))
231		mask &= ~SDHCI_DATA_INHIBIT;
232
233	while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
234		if (time >= cmd_timeout) {
235			printf("%s: MMC: %d busy ", __func__, mmc_dev);
236			if (2 * cmd_timeout <= SDHCI_CMD_MAX_TIMEOUT) {
237				cmd_timeout += cmd_timeout;
238				printf("timeout increasing to: %u ms.\n",
239				       cmd_timeout);
240			} else {
241				puts("timeout.\n");
242				return -ECOMM;
243			}
244		}
245		time++;
246		udelay(1000);
247	}
248
249	sdhci_writel(host, SDHCI_INT_ALL_MASK, SDHCI_INT_STATUS);
250
251	mask = SDHCI_INT_RESPONSE;
252	if ((cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK ||
253	     cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200) && !data)
254		mask = SDHCI_INT_DATA_AVAIL;
255
256	if (!(cmd->resp_type & MMC_RSP_PRESENT))
257		flags = SDHCI_CMD_RESP_NONE;
258	else if (cmd->resp_type & MMC_RSP_136)
259		flags = SDHCI_CMD_RESP_LONG;
260	else if (cmd->resp_type & MMC_RSP_BUSY) {
261		flags = SDHCI_CMD_RESP_SHORT_BUSY;
262		mask |= SDHCI_INT_DATA_END;
263	} else
264		flags = SDHCI_CMD_RESP_SHORT;
265
266	if (cmd->resp_type & MMC_RSP_CRC)
267		flags |= SDHCI_CMD_CRC;
268	if (cmd->resp_type & MMC_RSP_OPCODE)
269		flags |= SDHCI_CMD_INDEX;
270	if (data || cmd->cmdidx ==  MMC_CMD_SEND_TUNING_BLOCK ||
271	    cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200)
272		flags |= SDHCI_CMD_DATA;
273
274	/* Set Transfer mode regarding to data flag */
275	if (data) {
276		sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL);
277
278		if (!(host->quirks & SDHCI_QUIRK_SUPPORT_SINGLE))
279			mode = SDHCI_TRNS_BLK_CNT_EN;
280		trans_bytes = data->blocks * data->blocksize;
281		if (data->blocks > 1)
282			mode |= SDHCI_TRNS_MULTI | SDHCI_TRNS_BLK_CNT_EN;
283
284		if (data->flags == MMC_DATA_READ)
285			mode |= SDHCI_TRNS_READ;
286
287		if (host->flags & USE_DMA) {
288			mode |= SDHCI_TRNS_DMA;
289			sdhci_prepare_dma(host, data, &is_aligned, trans_bytes);
290		}
291
292		sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG,
293				data->blocksize),
294				SDHCI_BLOCK_SIZE);
295		sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
296		sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
297	} else if (cmd->resp_type & MMC_RSP_BUSY) {
298		sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL);
299	}
300
301	sdhci_writel(host, cmd->cmdarg, SDHCI_ARGUMENT);
302	sdhci_writew(host, SDHCI_MAKE_CMD(cmd->cmdidx, flags), SDHCI_COMMAND);
303	start = get_timer(0);
304	do {
305		stat = sdhci_readl(host, SDHCI_INT_STATUS);
306		if (stat & SDHCI_INT_ERROR)
307			break;
308
309		if (host->quirks & SDHCI_QUIRK_BROKEN_R1B &&
310		    cmd->resp_type & MMC_RSP_BUSY && !data) {
311			unsigned int state =
312				sdhci_readl(host, SDHCI_PRESENT_STATE);
313
314			if (!(state & SDHCI_DAT_ACTIVE))
315				return 0;
316		}
317
318		if (get_timer(start) >= SDHCI_READ_STATUS_TIMEOUT) {
319			printf("%s: Timeout for status update: %08x %08x\n",
320			       __func__, stat, mask);
321			return -ETIMEDOUT;
322		}
323	} while ((stat & mask) != mask);
324
325	if ((stat & (SDHCI_INT_ERROR | mask)) == mask) {
326		sdhci_cmd_done(host, cmd);
327		sdhci_writel(host, mask, SDHCI_INT_STATUS);
328	} else
329		ret = -1;
330
331	if (!ret && data)
332		ret = sdhci_transfer_data(host, data);
333
334	if (host->quirks & SDHCI_QUIRK_WAIT_SEND_CMD)
335		udelay(1000);
336
337	stat = sdhci_readl(host, SDHCI_INT_STATUS);
338	sdhci_writel(host, SDHCI_INT_ALL_MASK, SDHCI_INT_STATUS);
339	if (!ret) {
340		if ((host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) &&
341				!is_aligned && (data->flags == MMC_DATA_READ))
342			memcpy(data->dest, host->align_buffer, trans_bytes);
343		return 0;
344	}
345
346	sdhci_reset(host, SDHCI_RESET_CMD);
347	sdhci_reset(host, SDHCI_RESET_DATA);
348	if (stat & SDHCI_INT_TIMEOUT)
349		return -ETIMEDOUT;
350	else
351		return -ECOMM;
352}
353
354#if defined(CONFIG_DM_MMC) && CONFIG_IS_ENABLED(MMC_SUPPORTS_TUNING)
355static int sdhci_execute_tuning(struct udevice *dev, uint opcode)
356{
357	int err;
358	struct mmc *mmc = mmc_get_mmc_dev(dev);
359	struct sdhci_host *host = mmc->priv;
360
361	debug("%s\n", __func__);
362
363	if (host->ops && host->ops->platform_execute_tuning) {
364		err = host->ops->platform_execute_tuning(mmc, opcode);
365		if (err)
366			return err;
367		return 0;
368	}
369	return 0;
370}
371#endif
372int sdhci_set_clock(struct mmc *mmc, unsigned int clock)
373{
374	struct sdhci_host *host = mmc->priv;
375	unsigned int div, clk = 0, timeout;
376	int ret;
377
378	/* Wait max 20 ms */
379	timeout = 200;
380	while (sdhci_readl(host, SDHCI_PRESENT_STATE) &
381			   (SDHCI_CMD_INHIBIT | SDHCI_DATA_INHIBIT)) {
382		if (timeout == 0) {
383			printf("%s: Timeout to wait cmd & data inhibit\n",
384			       __func__);
385			return -EBUSY;
386		}
387
388		timeout--;
389		udelay(100);
390	}
391
392	sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
393
394	if (clock == 0)
395		return 0;
396
397	if (host->ops && host->ops->set_delay) {
398		ret = host->ops->set_delay(host);
399		if (ret) {
400			printf("%s: Error while setting tap delay\n", __func__);
401			return ret;
402		}
403	}
404
405	if (host->ops && host->ops->config_dll) {
406		ret = host->ops->config_dll(host, clock, false);
407		if (ret) {
408			printf("%s: Error while configuring dll\n", __func__);
409			return ret;
410		}
411	}
412
413	if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) {
414		/*
415		 * Check if the Host Controller supports Programmable Clock
416		 * Mode.
417		 */
418		if (host->clk_mul) {
419			for (div = 1; div <= 1024; div++) {
420				if ((host->max_clk / div) <= clock)
421					break;
422			}
423
424			/*
425			 * Set Programmable Clock Mode in the Clock
426			 * Control register.
427			 */
428			clk = SDHCI_PROG_CLOCK_MODE;
429			div--;
430		} else {
431			/* Version 3.00 divisors must be a multiple of 2. */
432			if (host->max_clk <= clock) {
433				div = 1;
434			} else {
435				for (div = 2;
436				     div < SDHCI_MAX_DIV_SPEC_300;
437				     div += 2) {
438					if ((host->max_clk / div) <= clock)
439						break;
440				}
441			}
442			div >>= 1;
443		}
444	} else {
445		/* Version 2.00 divisors must be a power of 2. */
446		for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
447			if ((host->max_clk / div) <= clock)
448				break;
449		}
450		div >>= 1;
451	}
452
453	if (host->ops && host->ops->set_clock)
454		host->ops->set_clock(host, div);
455
456	if (host->ops && host->ops->config_dll) {
457		ret = host->ops->config_dll(host, clock, true);
458		if (ret) {
459			printf("%s: Error while configuring dll\n", __func__);
460			return ret;
461		}
462	}
463
464	clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
465	clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
466		<< SDHCI_DIVIDER_HI_SHIFT;
467	clk |= SDHCI_CLOCK_INT_EN;
468	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
469
470	/* Wait max 20 ms */
471	timeout = 20;
472	while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
473		& SDHCI_CLOCK_INT_STABLE)) {
474		if (timeout == 0) {
475			printf("%s: Internal clock never stabilised.\n",
476			       __func__);
477			return -EBUSY;
478		}
479		timeout--;
480		udelay(1000);
481	}
482
483	clk |= SDHCI_CLOCK_CARD_EN;
484	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
485	return 0;
486}
487
488static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
489{
490	u8 pwr = 0;
491
492	if (power != (unsigned short)-1) {
493		switch (1 << power) {
494		case MMC_VDD_165_195:
495			pwr = SDHCI_POWER_180;
496			break;
497		case MMC_VDD_29_30:
498		case MMC_VDD_30_31:
499			pwr = SDHCI_POWER_300;
500			break;
501		case MMC_VDD_32_33:
502		case MMC_VDD_33_34:
503			pwr = SDHCI_POWER_330;
504			break;
505		}
506	}
507
508	if (pwr == 0) {
509		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
510		return;
511	}
512
513	pwr |= SDHCI_POWER_ON;
514
515	sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
516}
517
518void sdhci_set_uhs_timing(struct sdhci_host *host)
519{
520	struct mmc *mmc = host->mmc;
521	u32 reg;
522
523	reg = sdhci_readw(host, SDHCI_HOST_CONTROL2);
524	reg &= ~SDHCI_CTRL_UHS_MASK;
525
526	switch (mmc->selected_mode) {
527	case UHS_SDR25:
528	case MMC_HS:
529		reg |= SDHCI_CTRL_UHS_SDR25;
530		break;
531	case UHS_SDR50:
532	case MMC_HS_52:
533		reg |= SDHCI_CTRL_UHS_SDR50;
534		break;
535	case UHS_DDR50:
536	case MMC_DDR_52:
537		reg |= SDHCI_CTRL_UHS_DDR50;
538		break;
539	case UHS_SDR104:
540	case MMC_HS_200:
541		reg |= SDHCI_CTRL_UHS_SDR104;
542		break;
543	case MMC_HS_400:
544	case MMC_HS_400_ES:
545		reg |= SDHCI_CTRL_HS400;
546		break;
547	default:
548		reg |= SDHCI_CTRL_UHS_SDR12;
549	}
550
551	sdhci_writew(host, reg, SDHCI_HOST_CONTROL2);
552}
553
554static void sdhci_set_voltage(struct sdhci_host *host)
555{
556	if (IS_ENABLED(CONFIG_MMC_IO_VOLTAGE)) {
557		struct mmc *mmc = (struct mmc *)host->mmc;
558		u32 ctrl;
559
560		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
561
562		switch (mmc->signal_voltage) {
563		case MMC_SIGNAL_VOLTAGE_330:
564#if CONFIG_IS_ENABLED(DM_REGULATOR)
565			if (mmc->vqmmc_supply) {
566				if (regulator_set_enable_if_allowed(mmc->vqmmc_supply, false)) {
567					pr_err("failed to disable vqmmc-supply\n");
568					return;
569				}
570
571				if (regulator_set_value(mmc->vqmmc_supply, 3300000)) {
572					pr_err("failed to set vqmmc-voltage to 3.3V\n");
573					return;
574				}
575
576				if (regulator_set_enable_if_allowed(mmc->vqmmc_supply, true)) {
577					pr_err("failed to enable vqmmc-supply\n");
578					return;
579				}
580			}
581#endif
582			if (IS_SD(mmc)) {
583				ctrl &= ~SDHCI_CTRL_VDD_180;
584				sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
585			}
586
587			/* Wait for 5ms */
588			mdelay(5);
589
590			/* 3.3V regulator output should be stable within 5 ms */
591			if (IS_SD(mmc)) {
592				if (ctrl & SDHCI_CTRL_VDD_180) {
593					pr_err("3.3V regulator output did not become stable\n");
594					return;
595				}
596			}
597
598			break;
599		case MMC_SIGNAL_VOLTAGE_180:
600#if CONFIG_IS_ENABLED(DM_REGULATOR)
601			if (mmc->vqmmc_supply) {
602				if (regulator_set_enable_if_allowed(mmc->vqmmc_supply, false)) {
603					pr_err("failed to disable vqmmc-supply\n");
604					return;
605				}
606
607				if (regulator_set_value(mmc->vqmmc_supply, 1800000)) {
608					pr_err("failed to set vqmmc-voltage to 1.8V\n");
609					return;
610				}
611
612				if (regulator_set_enable_if_allowed(mmc->vqmmc_supply, true)) {
613					pr_err("failed to enable vqmmc-supply\n");
614					return;
615				}
616			}
617#endif
618			if (IS_SD(mmc)) {
619				ctrl |= SDHCI_CTRL_VDD_180;
620				sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
621			}
622
623			/* Wait for 5 ms */
624			mdelay(5);
625
626			/* 1.8V regulator output has to be stable within 5 ms */
627			if (IS_SD(mmc)) {
628				if (!(ctrl & SDHCI_CTRL_VDD_180)) {
629					pr_err("1.8V regulator output did not become stable\n");
630					return;
631				}
632			}
633
634			break;
635		default:
636			/* No signal voltage switch required */
637			return;
638		}
639	}
640}
641
642void sdhci_set_control_reg(struct sdhci_host *host)
643{
644	sdhci_set_voltage(host);
645	sdhci_set_uhs_timing(host);
646}
647
648#ifdef CONFIG_DM_MMC
649static int sdhci_set_ios(struct udevice *dev)
650{
651	struct mmc *mmc = mmc_get_mmc_dev(dev);
652#else
653static int sdhci_set_ios(struct mmc *mmc)
654{
655#endif
656	u32 ctrl;
657	struct sdhci_host *host = mmc->priv;
658	bool no_hispd_bit = false;
659
660	if (host->ops && host->ops->set_control_reg)
661		host->ops->set_control_reg(host);
662
663	if (mmc->clock != host->clock)
664		sdhci_set_clock(mmc, mmc->clock);
665
666	if (mmc->clk_disable)
667		sdhci_set_clock(mmc, 0);
668
669	/* Set bus width */
670	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
671	if (mmc->bus_width == 8) {
672		ctrl &= ~SDHCI_CTRL_4BITBUS;
673		if ((SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) ||
674				(host->quirks & SDHCI_QUIRK_USE_WIDE8))
675			ctrl |= SDHCI_CTRL_8BITBUS;
676	} else {
677		if ((SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) ||
678				(host->quirks & SDHCI_QUIRK_USE_WIDE8))
679			ctrl &= ~SDHCI_CTRL_8BITBUS;
680		if (mmc->bus_width == 4)
681			ctrl |= SDHCI_CTRL_4BITBUS;
682		else
683			ctrl &= ~SDHCI_CTRL_4BITBUS;
684	}
685
686	if ((host->quirks & SDHCI_QUIRK_NO_HISPD_BIT) ||
687	    (host->quirks & SDHCI_QUIRK_BROKEN_HISPD_MODE)) {
688		ctrl &= ~SDHCI_CTRL_HISPD;
689		no_hispd_bit = true;
690	}
691
692	if (!no_hispd_bit) {
693		if (mmc->selected_mode == MMC_HS ||
694		    mmc->selected_mode == SD_HS ||
695		    mmc->selected_mode == MMC_HS_52 ||
696		    mmc->selected_mode == MMC_DDR_52 ||
697		    mmc->selected_mode == MMC_HS_200 ||
698		    mmc->selected_mode == MMC_HS_400 ||
699		    mmc->selected_mode == MMC_HS_400_ES ||
700		    mmc->selected_mode == UHS_SDR25 ||
701		    mmc->selected_mode == UHS_SDR50 ||
702		    mmc->selected_mode == UHS_SDR104 ||
703		    mmc->selected_mode == UHS_DDR50)
704			ctrl |= SDHCI_CTRL_HISPD;
705		else
706			ctrl &= ~SDHCI_CTRL_HISPD;
707	}
708
709	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
710
711	/* If available, call the driver specific "post" set_ios() function */
712	if (host->ops && host->ops->set_ios_post)
713		return host->ops->set_ios_post(host);
714
715	return 0;
716}
717
718static int sdhci_init(struct mmc *mmc)
719{
720	struct sdhci_host *host = mmc->priv;
721#if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_GPIO)
722	struct udevice *dev = mmc->dev;
723
724	gpio_request_by_name(dev, "cd-gpios", 0,
725			     &host->cd_gpio, GPIOD_IS_IN);
726#endif
727
728	sdhci_reset(host, SDHCI_RESET_ALL);
729
730#if defined(CONFIG_FIXED_SDHCI_ALIGNED_BUFFER)
731	host->align_buffer = (void *)CONFIG_FIXED_SDHCI_ALIGNED_BUFFER;
732	/*
733	 * Always use this bounce-buffer when CONFIG_FIXED_SDHCI_ALIGNED_BUFFER
734	 * is defined.
735	 */
736	host->force_align_buffer = true;
737#else
738	if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) {
739		host->align_buffer = memalign(8, 512 * 1024);
740		if (!host->align_buffer) {
741			printf("%s: Aligned buffer alloc failed!!!\n",
742			       __func__);
743			return -ENOMEM;
744		}
745	}
746#endif
747
748	sdhci_set_power(host, fls(mmc->cfg->voltages) - 1);
749
750	if (host->ops && host->ops->get_cd)
751		host->ops->get_cd(host);
752
753	/* Enable only interrupts served by the SD controller */
754	sdhci_writel(host, SDHCI_INT_DATA_MASK | SDHCI_INT_CMD_MASK,
755		     SDHCI_INT_ENABLE);
756	/* Mask all sdhci interrupt sources */
757	sdhci_writel(host, 0x0, SDHCI_SIGNAL_ENABLE);
758
759	return 0;
760}
761
762#ifdef CONFIG_DM_MMC
763int sdhci_probe(struct udevice *dev)
764{
765	struct mmc *mmc = mmc_get_mmc_dev(dev);
766
767	return sdhci_init(mmc);
768}
769
770static int sdhci_deferred_probe(struct udevice *dev)
771{
772	int err;
773	struct mmc *mmc = mmc_get_mmc_dev(dev);
774	struct sdhci_host *host = mmc->priv;
775
776	if (host->ops && host->ops->deferred_probe) {
777		err = host->ops->deferred_probe(host);
778		if (err)
779			return err;
780	}
781	return 0;
782}
783
784static int sdhci_get_cd(struct udevice *dev)
785{
786	struct mmc *mmc = mmc_get_mmc_dev(dev);
787	struct sdhci_host *host = mmc->priv;
788	int value;
789
790	/* If nonremovable, assume that the card is always present. */
791	if (mmc->cfg->host_caps & MMC_CAP_NONREMOVABLE)
792		return 1;
793	/* If polling, assume that the card is always present. */
794	if (mmc->cfg->host_caps & MMC_CAP_NEEDS_POLL)
795		return 1;
796
797#if CONFIG_IS_ENABLED(DM_GPIO)
798	value = dm_gpio_get_value(&host->cd_gpio);
799	if (value >= 0) {
800		if (mmc->cfg->host_caps & MMC_CAP_CD_ACTIVE_HIGH)
801			return !value;
802		else
803			return value;
804	}
805#endif
806	value = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) &
807		   SDHCI_CARD_PRESENT);
808	if (mmc->cfg->host_caps & MMC_CAP_CD_ACTIVE_HIGH)
809		return !value;
810	else
811		return value;
812}
813
814static int sdhci_wait_dat0(struct udevice *dev, int state,
815			   int timeout_us)
816{
817	int tmp;
818	struct mmc *mmc = mmc_get_mmc_dev(dev);
819	struct sdhci_host *host = mmc->priv;
820	unsigned long timeout = timer_get_us() + timeout_us;
821
822	// readx_poll_timeout is unsuitable because sdhci_readl accepts
823	// two arguments
824	do {
825		tmp = sdhci_readl(host, SDHCI_PRESENT_STATE);
826		if (!!(tmp & SDHCI_DATA_0_LVL_MASK) == !!state)
827			return 0;
828	} while (!timeout_us || !time_after(timer_get_us(), timeout));
829
830	return -ETIMEDOUT;
831}
832
833#if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
834static int sdhci_set_enhanced_strobe(struct udevice *dev)
835{
836	struct mmc *mmc = mmc_get_mmc_dev(dev);
837	struct sdhci_host *host = mmc->priv;
838
839	if (host->ops && host->ops->set_enhanced_strobe)
840		return host->ops->set_enhanced_strobe(host);
841
842	return -ENOTSUPP;
843}
844#endif
845
846const struct dm_mmc_ops sdhci_ops = {
847	.send_cmd	= sdhci_send_command,
848	.set_ios	= sdhci_set_ios,
849	.get_cd		= sdhci_get_cd,
850	.deferred_probe	= sdhci_deferred_probe,
851#if CONFIG_IS_ENABLED(MMC_SUPPORTS_TUNING)
852	.execute_tuning	= sdhci_execute_tuning,
853#endif
854	.wait_dat0	= sdhci_wait_dat0,
855#if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
856	.set_enhanced_strobe = sdhci_set_enhanced_strobe,
857#endif
858};
859#else
860static const struct mmc_ops sdhci_ops = {
861	.send_cmd	= sdhci_send_command,
862	.set_ios	= sdhci_set_ios,
863	.init		= sdhci_init,
864};
865#endif
866
867int sdhci_setup_cfg(struct mmc_config *cfg, struct sdhci_host *host,
868		u32 f_max, u32 f_min)
869{
870	u32 caps, caps_1 = 0;
871#if CONFIG_IS_ENABLED(DM_MMC)
872	u64 dt_caps, dt_caps_mask;
873
874	dt_caps_mask = dev_read_u64_default(host->mmc->dev,
875					    "sdhci-caps-mask", 0);
876	dt_caps = dev_read_u64_default(host->mmc->dev,
877				       "sdhci-caps", 0);
878	caps = ~lower_32_bits(dt_caps_mask) &
879	       sdhci_readl(host, SDHCI_CAPABILITIES);
880	caps |= lower_32_bits(dt_caps);
881#else
882	caps = sdhci_readl(host, SDHCI_CAPABILITIES);
883#endif
884	debug("%s, caps: 0x%x\n", __func__, caps);
885
886#if CONFIG_IS_ENABLED(MMC_SDHCI_SDMA)
887	if ((caps & SDHCI_CAN_DO_SDMA)) {
888		host->flags |= USE_SDMA;
889	} else {
890		debug("%s: Your controller doesn't support SDMA!!\n",
891		      __func__);
892	}
893#endif
894#if CONFIG_IS_ENABLED(MMC_SDHCI_ADMA)
895	if (!(caps & SDHCI_CAN_DO_ADMA2)) {
896		printf("%s: Your controller doesn't support ADMA!!\n",
897		       __func__);
898		return -EINVAL;
899	}
900	if (!host->adma_desc_table) {
901		host->adma_desc_table = sdhci_adma_init();
902		host->adma_addr = virt_to_phys(host->adma_desc_table);
903	}
904
905	if (IS_ENABLED(CONFIG_MMC_SDHCI_ADMA_64BIT))
906		host->flags |= USE_ADMA64;
907	else
908		host->flags |= USE_ADMA;
909#endif
910	if (host->quirks & SDHCI_QUIRK_REG32_RW)
911		host->version =
912			sdhci_readl(host, SDHCI_HOST_VERSION - 2) >> 16;
913	else
914		host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
915
916	cfg->name = host->name;
917#ifndef CONFIG_DM_MMC
918	cfg->ops = &sdhci_ops;
919#endif
920
921	/* Check whether the clock multiplier is supported or not */
922	if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) {
923#if CONFIG_IS_ENABLED(DM_MMC)
924		caps_1 = ~upper_32_bits(dt_caps_mask) &
925			 sdhci_readl(host, SDHCI_CAPABILITIES_1);
926		caps_1 |= upper_32_bits(dt_caps);
927#else
928		caps_1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
929#endif
930		debug("%s, caps_1: 0x%x\n", __func__, caps_1);
931		host->clk_mul = (caps_1 & SDHCI_CLOCK_MUL_MASK) >>
932				SDHCI_CLOCK_MUL_SHIFT;
933
934		/*
935		 * In case the value in Clock Multiplier is 0, then programmable
936		 * clock mode is not supported, otherwise the actual clock
937		 * multiplier is one more than the value of Clock Multiplier
938		 * in the Capabilities Register.
939		 */
940		if (host->clk_mul)
941			host->clk_mul += 1;
942	}
943
944	if (host->max_clk == 0) {
945		if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300)
946			host->max_clk = (caps & SDHCI_CLOCK_V3_BASE_MASK) >>
947				SDHCI_CLOCK_BASE_SHIFT;
948		else
949			host->max_clk = (caps & SDHCI_CLOCK_BASE_MASK) >>
950				SDHCI_CLOCK_BASE_SHIFT;
951		host->max_clk *= 1000000;
952		if (host->clk_mul)
953			host->max_clk *= host->clk_mul;
954	}
955	if (host->max_clk == 0) {
956		printf("%s: Hardware doesn't specify base clock frequency\n",
957		       __func__);
958		return -EINVAL;
959	}
960	if (f_max && (f_max < host->max_clk))
961		cfg->f_max = f_max;
962	else
963		cfg->f_max = host->max_clk;
964	if (f_min)
965		cfg->f_min = f_min;
966	else {
967		if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300)
968			cfg->f_min = cfg->f_max / SDHCI_MAX_DIV_SPEC_300;
969		else
970			cfg->f_min = cfg->f_max / SDHCI_MAX_DIV_SPEC_200;
971	}
972	cfg->voltages = 0;
973	if (caps & SDHCI_CAN_VDD_330)
974		cfg->voltages |= MMC_VDD_32_33 | MMC_VDD_33_34;
975	if (caps & SDHCI_CAN_VDD_300)
976		cfg->voltages |= MMC_VDD_29_30 | MMC_VDD_30_31;
977	if (caps & SDHCI_CAN_VDD_180)
978		cfg->voltages |= MMC_VDD_165_195;
979
980	if (host->quirks & SDHCI_QUIRK_BROKEN_VOLTAGE)
981		cfg->voltages |= host->voltages;
982
983	if (caps & SDHCI_CAN_DO_HISPD)
984		cfg->host_caps |= MMC_MODE_HS | MMC_MODE_HS_52MHz;
985
986	cfg->host_caps |= MMC_MODE_4BIT;
987
988	/* Since Host Controller Version3.0 */
989	if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) {
990		if (!(caps & SDHCI_CAN_DO_8BIT))
991			cfg->host_caps &= ~MMC_MODE_8BIT;
992	}
993
994	if (host->quirks & SDHCI_QUIRK_BROKEN_HISPD_MODE) {
995		cfg->host_caps &= ~MMC_MODE_HS;
996		cfg->host_caps &= ~MMC_MODE_HS_52MHz;
997	}
998
999	if (!(cfg->voltages & MMC_VDD_165_195) ||
1000	    (host->quirks & SDHCI_QUIRK_NO_1_8_V))
1001		caps_1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
1002			    SDHCI_SUPPORT_DDR50);
1003
1004	if (caps_1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
1005		      SDHCI_SUPPORT_DDR50))
1006		cfg->host_caps |= MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25);
1007
1008	if (caps_1 & SDHCI_SUPPORT_SDR104) {
1009		cfg->host_caps |= MMC_CAP(UHS_SDR104) | MMC_CAP(UHS_SDR50);
1010		/*
1011		 * SD3.0: SDR104 is supported so (for eMMC) the caps2
1012		 * field can be promoted to support HS200.
1013		 */
1014		cfg->host_caps |= MMC_CAP(MMC_HS_200);
1015	} else if (caps_1 & SDHCI_SUPPORT_SDR50) {
1016		cfg->host_caps |= MMC_CAP(UHS_SDR50);
1017	}
1018
1019	if ((host->quirks & SDHCI_QUIRK_CAPS_BIT63_FOR_HS400) &&
1020	    (caps_1 & SDHCI_SUPPORT_HS400))
1021		cfg->host_caps |= MMC_CAP(MMC_HS_400);
1022
1023	if (caps_1 & SDHCI_SUPPORT_DDR50)
1024		cfg->host_caps |= MMC_CAP(UHS_DDR50);
1025
1026	if (host->host_caps)
1027		cfg->host_caps |= host->host_caps;
1028
1029	cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT;
1030
1031	return 0;
1032}
1033
1034#ifdef CONFIG_BLK
1035int sdhci_bind(struct udevice *dev, struct mmc *mmc, struct mmc_config *cfg)
1036{
1037	return mmc_bind(dev, mmc, cfg);
1038}
1039#else
1040int add_sdhci(struct sdhci_host *host, u32 f_max, u32 f_min)
1041{
1042	int ret;
1043
1044	ret = sdhci_setup_cfg(&host->cfg, host, f_max, f_min);
1045	if (ret)
1046		return ret;
1047
1048	host->mmc = mmc_create(&host->cfg, host);
1049	if (host->mmc == NULL) {
1050		printf("%s: mmc create fail!\n", __func__);
1051		return -ENOMEM;
1052	}
1053
1054	return 0;
1055}
1056#endif
1057