1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright 2008, Freescale Semiconductor, Inc
4 * Copyright 2020 NXP
5 * Andy Fleming
6 *
7 * Based vaguely on the Linux code
8 */
9
10#include <config.h>
11#include <common.h>
12#include <blk.h>
13#include <command.h>
14#include <dm.h>
15#include <log.h>
16#include <dm/device-internal.h>
17#include <errno.h>
18#include <mmc.h>
19#include <part.h>
20#include <linux/bitops.h>
21#include <linux/delay.h>
22#include <linux/printk.h>
23#include <power/regulator.h>
24#include <malloc.h>
25#include <memalign.h>
26#include <linux/list.h>
27#include <div64.h>
28#include "mmc_private.h"
29
30#define DEFAULT_CMD6_TIMEOUT_MS  500
31
32static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
33
34#if !CONFIG_IS_ENABLED(DM_MMC)
35
36static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout_us)
37{
38	if (mmc->cfg->ops->wait_dat0)
39		return mmc->cfg->ops->wait_dat0(mmc, state, timeout_us);
40
41	return -ENOSYS;
42}
43
44__weak int board_mmc_getwp(struct mmc *mmc)
45{
46	return -1;
47}
48
49int mmc_getwp(struct mmc *mmc)
50{
51	int wp;
52
53	wp = board_mmc_getwp(mmc);
54
55	if (wp < 0) {
56		if (mmc->cfg->ops->getwp)
57			wp = mmc->cfg->ops->getwp(mmc);
58		else
59			wp = 0;
60	}
61
62	return wp;
63}
64
65__weak int board_mmc_getcd(struct mmc *mmc)
66{
67	return -1;
68}
69#endif
70
71#ifdef CONFIG_MMC_TRACE
72void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
73{
74	printf("CMD_SEND:%d\n", cmd->cmdidx);
75	printf("\t\tARG\t\t\t 0x%08x\n", cmd->cmdarg);
76}
77
78void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
79{
80	int i;
81	u8 *ptr;
82
83	if (ret) {
84		printf("\t\tRET\t\t\t %d\n", ret);
85	} else {
86		switch (cmd->resp_type) {
87		case MMC_RSP_NONE:
88			printf("\t\tMMC_RSP_NONE\n");
89			break;
90		case MMC_RSP_R1:
91			printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08x \n",
92				cmd->response[0]);
93			break;
94		case MMC_RSP_R1b:
95			printf("\t\tMMC_RSP_R1b\t\t 0x%08x \n",
96				cmd->response[0]);
97			break;
98		case MMC_RSP_R2:
99			printf("\t\tMMC_RSP_R2\t\t 0x%08x \n",
100				cmd->response[0]);
101			printf("\t\t          \t\t 0x%08x \n",
102				cmd->response[1]);
103			printf("\t\t          \t\t 0x%08x \n",
104				cmd->response[2]);
105			printf("\t\t          \t\t 0x%08x \n",
106				cmd->response[3]);
107			printf("\n");
108			printf("\t\t\t\t\tDUMPING DATA\n");
109			for (i = 0; i < 4; i++) {
110				int j;
111				printf("\t\t\t\t\t%03d - ", i*4);
112				ptr = (u8 *)&cmd->response[i];
113				ptr += 3;
114				for (j = 0; j < 4; j++)
115					printf("%02x ", *ptr--);
116				printf("\n");
117			}
118			break;
119		case MMC_RSP_R3:
120			printf("\t\tMMC_RSP_R3,4\t\t 0x%08x \n",
121				cmd->response[0]);
122			break;
123		default:
124			printf("\t\tERROR MMC rsp not supported\n");
125			break;
126		}
127	}
128}
129
130void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
131{
132	int status;
133
134	status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
135	printf("CURR STATE:%d\n", status);
136}
137#endif
138
139#if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG) || CONFIG_VAL(LOGLEVEL) >= LOGL_DEBUG
140const char *mmc_mode_name(enum bus_mode mode)
141{
142	static const char *const names[] = {
143	      [MMC_LEGACY]	= "MMC legacy",
144	      [MMC_HS]		= "MMC High Speed (26MHz)",
145	      [SD_HS]		= "SD High Speed (50MHz)",
146	      [UHS_SDR12]	= "UHS SDR12 (25MHz)",
147	      [UHS_SDR25]	= "UHS SDR25 (50MHz)",
148	      [UHS_SDR50]	= "UHS SDR50 (100MHz)",
149	      [UHS_SDR104]	= "UHS SDR104 (208MHz)",
150	      [UHS_DDR50]	= "UHS DDR50 (50MHz)",
151	      [MMC_HS_52]	= "MMC High Speed (52MHz)",
152	      [MMC_DDR_52]	= "MMC DDR52 (52MHz)",
153	      [MMC_HS_200]	= "HS200 (200MHz)",
154	      [MMC_HS_400]	= "HS400 (200MHz)",
155	      [MMC_HS_400_ES]	= "HS400ES (200MHz)",
156	};
157
158	if (mode >= MMC_MODES_END)
159		return "Unknown mode";
160	else
161		return names[mode];
162}
163#endif
164
165static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
166{
167	static const int freqs[] = {
168	      [MMC_LEGACY]	= 25000000,
169	      [MMC_HS]		= 26000000,
170	      [SD_HS]		= 50000000,
171	      [MMC_HS_52]	= 52000000,
172	      [MMC_DDR_52]	= 52000000,
173	      [UHS_SDR12]	= 25000000,
174	      [UHS_SDR25]	= 50000000,
175	      [UHS_SDR50]	= 100000000,
176	      [UHS_DDR50]	= 50000000,
177	      [UHS_SDR104]	= 208000000,
178	      [MMC_HS_200]	= 200000000,
179	      [MMC_HS_400]	= 200000000,
180	      [MMC_HS_400_ES]	= 200000000,
181	};
182
183	if (mode == MMC_LEGACY)
184		return mmc->legacy_speed;
185	else if (mode >= MMC_MODES_END)
186		return 0;
187	else
188		return freqs[mode];
189}
190
191static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
192{
193	mmc->selected_mode = mode;
194	mmc->tran_speed = mmc_mode2freq(mmc, mode);
195	mmc->ddr_mode = mmc_is_mode_ddr(mode);
196	pr_debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
197		 mmc->tran_speed / 1000000);
198	return 0;
199}
200
201#if !CONFIG_IS_ENABLED(DM_MMC)
202int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
203{
204	int ret;
205
206	mmmc_trace_before_send(mmc, cmd);
207	ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
208	mmmc_trace_after_send(mmc, cmd, ret);
209
210	return ret;
211}
212#endif
213
214/**
215 * mmc_send_cmd_retry() - send a command to the mmc device, retrying on error
216 *
217 * @dev:	device to receive the command
218 * @cmd:	command to send
219 * @data:	additional data to send/receive
220 * @retries:	how many times to retry; mmc_send_cmd is always called at least
221 *              once
222 * Return: 0 if ok, -ve on error
223 */
224static int mmc_send_cmd_retry(struct mmc *mmc, struct mmc_cmd *cmd,
225			      struct mmc_data *data, uint retries)
226{
227	int ret;
228
229	do {
230		ret = mmc_send_cmd(mmc, cmd, data);
231	} while (ret && retries--);
232
233	return ret;
234}
235
236/**
237 * mmc_send_cmd_quirks() - send a command to the mmc device, retrying if a
238 *                         specific quirk is enabled
239 *
240 * @dev:	device to receive the command
241 * @cmd:	command to send
242 * @data:	additional data to send/receive
243 * @quirk:	retry only if this quirk is enabled
244 * @retries:	how many times to retry; mmc_send_cmd is always called at least
245 *              once
246 * Return: 0 if ok, -ve on error
247 */
248static int mmc_send_cmd_quirks(struct mmc *mmc, struct mmc_cmd *cmd,
249			       struct mmc_data *data, u32 quirk, uint retries)
250{
251	if (IS_ENABLED(CONFIG_MMC_QUIRKS) && mmc->quirks & quirk)
252		return mmc_send_cmd_retry(mmc, cmd, data, retries);
253	else
254		return mmc_send_cmd(mmc, cmd, data);
255}
256
257int mmc_send_status(struct mmc *mmc, unsigned int *status)
258{
259	struct mmc_cmd cmd;
260	int ret;
261
262	cmd.cmdidx = MMC_CMD_SEND_STATUS;
263	cmd.resp_type = MMC_RSP_R1;
264	if (!mmc_host_is_spi(mmc))
265		cmd.cmdarg = mmc->rca << 16;
266
267	ret = mmc_send_cmd_retry(mmc, &cmd, NULL, 4);
268	mmc_trace_state(mmc, &cmd);
269	if (!ret)
270		*status = cmd.response[0];
271
272	return ret;
273}
274
275int mmc_poll_for_busy(struct mmc *mmc, int timeout_ms)
276{
277	unsigned int status;
278	int err;
279
280	err = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
281	if (err != -ENOSYS)
282		return err;
283
284	while (1) {
285		err = mmc_send_status(mmc, &status);
286		if (err)
287			return err;
288
289		if ((status & MMC_STATUS_RDY_FOR_DATA) &&
290		    (status & MMC_STATUS_CURR_STATE) !=
291		     MMC_STATE_PRG)
292			break;
293
294		if (status & MMC_STATUS_MASK) {
295#if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
296			pr_err("Status Error: 0x%08x\n", status);
297#endif
298			return -ECOMM;
299		}
300
301		if (timeout_ms-- <= 0)
302			break;
303
304		udelay(1000);
305	}
306
307	if (timeout_ms <= 0) {
308#if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
309		pr_err("Timeout waiting card ready\n");
310#endif
311		return -ETIMEDOUT;
312	}
313
314	return 0;
315}
316
317int mmc_set_blocklen(struct mmc *mmc, int len)
318{
319	struct mmc_cmd cmd;
320
321	if (mmc->ddr_mode)
322		return 0;
323
324	cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
325	cmd.resp_type = MMC_RSP_R1;
326	cmd.cmdarg = len;
327
328	return mmc_send_cmd_quirks(mmc, &cmd, NULL,
329				   MMC_QUIRK_RETRY_SET_BLOCKLEN, 4);
330}
331
332#ifdef MMC_SUPPORTS_TUNING
333static const u8 tuning_blk_pattern_4bit[] = {
334	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
335	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
336	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
337	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
338	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
339	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
340	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
341	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
342};
343
344static const u8 tuning_blk_pattern_8bit[] = {
345	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
346	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
347	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
348	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
349	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
350	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
351	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
352	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
353	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
354	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
355	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
356	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
357	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
358	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
359	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
360	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
361};
362
363int mmc_send_tuning(struct mmc *mmc, u32 opcode)
364{
365	struct mmc_cmd cmd;
366	struct mmc_data data;
367	const u8 *tuning_block_pattern;
368	int size, err;
369
370	if (mmc->bus_width == 8) {
371		tuning_block_pattern = tuning_blk_pattern_8bit;
372		size = sizeof(tuning_blk_pattern_8bit);
373	} else if (mmc->bus_width == 4) {
374		tuning_block_pattern = tuning_blk_pattern_4bit;
375		size = sizeof(tuning_blk_pattern_4bit);
376	} else {
377		return -EINVAL;
378	}
379
380	ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
381
382	cmd.cmdidx = opcode;
383	cmd.cmdarg = 0;
384	cmd.resp_type = MMC_RSP_R1;
385
386	data.dest = (void *)data_buf;
387	data.blocks = 1;
388	data.blocksize = size;
389	data.flags = MMC_DATA_READ;
390
391	err = mmc_send_cmd(mmc, &cmd, &data);
392	if (err)
393		return err;
394
395	if (memcmp(data_buf, tuning_block_pattern, size))
396		return -EIO;
397
398	return 0;
399}
400#endif
401
402int mmc_send_stop_transmission(struct mmc *mmc, bool write)
403{
404	struct mmc_cmd cmd;
405
406	cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
407	cmd.cmdarg = 0;
408	/*
409	 * JEDEC Standard No. 84-B51 Page 126
410	 * CMD12 STOP_TRANSMISSION R1/R1b[3]
411	 * NOTE 3 R1 for read cases and R1b for write cases.
412	 *
413	 * Physical Layer Simplified Specification Version 9.00
414	 * 7.3.1.3 Detailed Command Description
415	 * CMD12 R1b
416	 */
417	cmd.resp_type = (IS_SD(mmc) || write) ? MMC_RSP_R1b : MMC_RSP_R1;
418
419	return mmc_send_cmd(mmc, &cmd, NULL);
420}
421
422static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
423			   lbaint_t blkcnt)
424{
425	struct mmc_cmd cmd;
426	struct mmc_data data;
427
428	if (blkcnt > 1)
429		cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
430	else
431		cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
432
433	if (mmc->high_capacity)
434		cmd.cmdarg = start;
435	else
436		cmd.cmdarg = start * mmc->read_bl_len;
437
438	cmd.resp_type = MMC_RSP_R1;
439
440	data.dest = dst;
441	data.blocks = blkcnt;
442	data.blocksize = mmc->read_bl_len;
443	data.flags = MMC_DATA_READ;
444
445	if (mmc_send_cmd(mmc, &cmd, &data))
446		return 0;
447
448	if (blkcnt > 1) {
449		if (mmc_send_stop_transmission(mmc, false)) {
450#if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
451			pr_err("mmc fail to send stop cmd\n");
452#endif
453			return 0;
454		}
455	}
456
457	return blkcnt;
458}
459
460#if !CONFIG_IS_ENABLED(DM_MMC)
461static int mmc_get_b_max(struct mmc *mmc, void *dst, lbaint_t blkcnt)
462{
463	if (mmc->cfg->ops->get_b_max)
464		return mmc->cfg->ops->get_b_max(mmc, dst, blkcnt);
465	else
466		return mmc->cfg->b_max;
467}
468#endif
469
470#if CONFIG_IS_ENABLED(BLK)
471ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
472#else
473ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
474		void *dst)
475#endif
476{
477#if CONFIG_IS_ENABLED(BLK)
478	struct blk_desc *block_dev = dev_get_uclass_plat(dev);
479#endif
480	int dev_num = block_dev->devnum;
481	int err;
482	lbaint_t cur, blocks_todo = blkcnt;
483	uint b_max;
484
485	if (blkcnt == 0)
486		return 0;
487
488	struct mmc *mmc = find_mmc_device(dev_num);
489	if (!mmc)
490		return 0;
491
492	if (CONFIG_IS_ENABLED(MMC_TINY))
493		err = mmc_switch_part(mmc, block_dev->hwpart);
494	else
495		err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
496
497	if (err < 0)
498		return 0;
499
500	if ((start + blkcnt) > block_dev->lba) {
501#if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
502		pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
503		       start + blkcnt, block_dev->lba);
504#endif
505		return 0;
506	}
507
508	if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
509		pr_debug("%s: Failed to set blocklen\n", __func__);
510		return 0;
511	}
512
513	b_max = mmc_get_b_max(mmc, dst, blkcnt);
514
515	do {
516		cur = (blocks_todo > b_max) ? b_max : blocks_todo;
517		if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
518			pr_debug("%s: Failed to read blocks\n", __func__);
519			return 0;
520		}
521		blocks_todo -= cur;
522		start += cur;
523		dst += cur * mmc->read_bl_len;
524	} while (blocks_todo > 0);
525
526	return blkcnt;
527}
528
529static int mmc_go_idle(struct mmc *mmc)
530{
531	struct mmc_cmd cmd;
532	int err;
533
534	udelay(1000);
535
536	cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
537	cmd.cmdarg = 0;
538	cmd.resp_type = MMC_RSP_NONE;
539
540	err = mmc_send_cmd(mmc, &cmd, NULL);
541
542	if (err)
543		return err;
544
545	udelay(2000);
546
547	return 0;
548}
549
550#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
551static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
552{
553	struct mmc_cmd cmd;
554	int err = 0;
555
556	/*
557	 * Send CMD11 only if the request is to switch the card to
558	 * 1.8V signalling.
559	 */
560	if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
561		return mmc_set_signal_voltage(mmc, signal_voltage);
562
563	cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
564	cmd.cmdarg = 0;
565	cmd.resp_type = MMC_RSP_R1;
566
567	err = mmc_send_cmd(mmc, &cmd, NULL);
568	if (err)
569		return err;
570
571	if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
572		return -EIO;
573
574	/*
575	 * The card should drive cmd and dat[0:3] low immediately
576	 * after the response of cmd11, but wait 100 us to be sure
577	 */
578	err = mmc_wait_dat0(mmc, 0, 100);
579	if (err == -ENOSYS)
580		udelay(100);
581	else if (err)
582		return -ETIMEDOUT;
583
584	/*
585	 * During a signal voltage level switch, the clock must be gated
586	 * for 5 ms according to the SD spec
587	 */
588	mmc_set_clock(mmc, mmc->clock, MMC_CLK_DISABLE);
589
590	err = mmc_set_signal_voltage(mmc, signal_voltage);
591	if (err)
592		return err;
593
594	/* Keep clock gated for at least 10 ms, though spec only says 5 ms */
595	mdelay(10);
596	mmc_set_clock(mmc, mmc->clock, MMC_CLK_ENABLE);
597
598	/*
599	 * Failure to switch is indicated by the card holding
600	 * dat[0:3] low. Wait for at least 1 ms according to spec
601	 */
602	err = mmc_wait_dat0(mmc, 1, 1000);
603	if (err == -ENOSYS)
604		udelay(1000);
605	else if (err)
606		return -ETIMEDOUT;
607
608	return 0;
609}
610#endif
611
612static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
613{
614	int timeout = 1000;
615	int err;
616	struct mmc_cmd cmd;
617
618	while (1) {
619		cmd.cmdidx = MMC_CMD_APP_CMD;
620		cmd.resp_type = MMC_RSP_R1;
621		cmd.cmdarg = 0;
622
623		err = mmc_send_cmd(mmc, &cmd, NULL);
624
625		if (err)
626			return err;
627
628		cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
629		cmd.resp_type = MMC_RSP_R3;
630
631		/*
632		 * Most cards do not answer if some reserved bits
633		 * in the ocr are set. However, Some controller
634		 * can set bit 7 (reserved for low voltages), but
635		 * how to manage low voltages SD card is not yet
636		 * specified.
637		 */
638		cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
639			(mmc->cfg->voltages & 0xff8000);
640
641		if (mmc->version == SD_VERSION_2)
642			cmd.cmdarg |= OCR_HCS;
643
644		if (uhs_en)
645			cmd.cmdarg |= OCR_S18R;
646
647		err = mmc_send_cmd(mmc, &cmd, NULL);
648
649		if (err)
650			return err;
651
652		if (cmd.response[0] & OCR_BUSY)
653			break;
654
655		if (timeout-- <= 0)
656			return -EOPNOTSUPP;
657
658		udelay(1000);
659	}
660
661	if (mmc->version != SD_VERSION_2)
662		mmc->version = SD_VERSION_1_0;
663
664	if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
665		cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
666		cmd.resp_type = MMC_RSP_R3;
667		cmd.cmdarg = 0;
668
669		err = mmc_send_cmd(mmc, &cmd, NULL);
670
671		if (err)
672			return err;
673	}
674
675	mmc->ocr = cmd.response[0];
676
677#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
678	if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
679	    == 0x41000000) {
680		err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
681		if (err)
682			return err;
683	}
684#endif
685
686	mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
687	mmc->rca = 0;
688
689	return 0;
690}
691
692static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
693{
694	struct mmc_cmd cmd;
695	int err;
696
697	cmd.cmdidx = MMC_CMD_SEND_OP_COND;
698	cmd.resp_type = MMC_RSP_R3;
699	cmd.cmdarg = 0;
700	if (use_arg && !mmc_host_is_spi(mmc))
701		cmd.cmdarg = OCR_HCS |
702			(mmc->cfg->voltages &
703			(mmc->ocr & OCR_VOLTAGE_MASK)) |
704			(mmc->ocr & OCR_ACCESS_MODE);
705
706	err = mmc_send_cmd(mmc, &cmd, NULL);
707	if (err)
708		return err;
709	mmc->ocr = cmd.response[0];
710	return 0;
711}
712
713static int mmc_send_op_cond(struct mmc *mmc)
714{
715	int err, i;
716	int timeout = 1000;
717	uint start;
718
719	/* Some cards seem to need this */
720	mmc_go_idle(mmc);
721
722	start = get_timer(0);
723	/* Asking to the card its capabilities */
724	for (i = 0; ; i++) {
725		err = mmc_send_op_cond_iter(mmc, i != 0);
726		if (err)
727			return err;
728
729		/* exit if not busy (flag seems to be inverted) */
730		if (mmc->ocr & OCR_BUSY)
731			break;
732
733		if (get_timer(start) > timeout)
734			return -ETIMEDOUT;
735		udelay(100);
736	}
737	mmc->op_cond_pending = 1;
738	return 0;
739}
740
741static int mmc_complete_op_cond(struct mmc *mmc)
742{
743	struct mmc_cmd cmd;
744	int timeout = 1000;
745	ulong start;
746	int err;
747
748	mmc->op_cond_pending = 0;
749	if (!(mmc->ocr & OCR_BUSY)) {
750		/* Some cards seem to need this */
751		mmc_go_idle(mmc);
752
753		start = get_timer(0);
754		while (1) {
755			err = mmc_send_op_cond_iter(mmc, 1);
756			if (err)
757				return err;
758			if (mmc->ocr & OCR_BUSY)
759				break;
760			if (get_timer(start) > timeout)
761				return -EOPNOTSUPP;
762			udelay(100);
763		}
764	}
765
766	if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
767		cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
768		cmd.resp_type = MMC_RSP_R3;
769		cmd.cmdarg = 0;
770
771		err = mmc_send_cmd(mmc, &cmd, NULL);
772
773		if (err)
774			return err;
775
776		mmc->ocr = cmd.response[0];
777	}
778
779	mmc->version = MMC_VERSION_UNKNOWN;
780
781	mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
782	mmc->rca = 1;
783
784	return 0;
785}
786
787
788int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
789{
790	struct mmc_cmd cmd;
791	struct mmc_data data;
792	int err;
793
794	/* Get the Card Status Register */
795	cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
796	cmd.resp_type = MMC_RSP_R1;
797	cmd.cmdarg = 0;
798
799	data.dest = (char *)ext_csd;
800	data.blocks = 1;
801	data.blocksize = MMC_MAX_BLOCK_LEN;
802	data.flags = MMC_DATA_READ;
803
804	err = mmc_send_cmd(mmc, &cmd, &data);
805
806	return err;
807}
808
809static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value,
810			bool send_status)
811{
812	unsigned int status, start;
813	struct mmc_cmd cmd;
814	int timeout_ms = DEFAULT_CMD6_TIMEOUT_MS;
815	bool is_part_switch = (set == EXT_CSD_CMD_SET_NORMAL) &&
816			      (index == EXT_CSD_PART_CONF);
817	int ret;
818
819	if (mmc->gen_cmd6_time)
820		timeout_ms = mmc->gen_cmd6_time * 10;
821
822	if (is_part_switch  && mmc->part_switch_time)
823		timeout_ms = mmc->part_switch_time * 10;
824
825	cmd.cmdidx = MMC_CMD_SWITCH;
826	cmd.resp_type = MMC_RSP_R1b;
827	cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
828				 (index << 16) |
829				 (value << 8);
830
831	ret = mmc_send_cmd_retry(mmc, &cmd, NULL, 3);
832	if (ret)
833		return ret;
834
835	start = get_timer(0);
836
837	/* poll dat0 for rdy/buys status */
838	ret = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
839	if (ret && ret != -ENOSYS)
840		return ret;
841
842	/*
843	 * In cases when neiter allowed to poll by using CMD13 nor we are
844	 * capable of polling by using mmc_wait_dat0, then rely on waiting the
845	 * stated timeout to be sufficient.
846	 */
847	if (ret == -ENOSYS && !send_status) {
848		mdelay(timeout_ms);
849		return 0;
850	}
851
852	if (!send_status)
853		return 0;
854
855	/* Finally wait until the card is ready or indicates a failure
856	 * to switch. It doesn't hurt to use CMD13 here even if send_status
857	 * is false, because by now (after 'timeout_ms' ms) the bus should be
858	 * reliable.
859	 */
860	do {
861		ret = mmc_send_status(mmc, &status);
862
863		if (!ret && (status & MMC_STATUS_SWITCH_ERROR)) {
864			pr_debug("switch failed %d/%d/0x%x !\n", set, index,
865				 value);
866			return -EIO;
867		}
868		if (!ret && (status & MMC_STATUS_RDY_FOR_DATA) &&
869		    (status & MMC_STATUS_CURR_STATE) == MMC_STATE_TRANS)
870			return 0;
871		udelay(100);
872	} while (get_timer(start) < timeout_ms);
873
874	return -ETIMEDOUT;
875}
876
877int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
878{
879	return __mmc_switch(mmc, set, index, value, true);
880}
881
882int mmc_boot_wp(struct mmc *mmc)
883{
884	return mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP, 1);
885}
886
887int mmc_boot_wp_single_partition(struct mmc *mmc, int partition)
888{
889	u8 value;
890	int ret;
891
892	value = EXT_CSD_BOOT_WP_B_PWR_WP_EN;
893
894	if (partition == 0) {
895		value |= EXT_CSD_BOOT_WP_B_SEC_WP_SEL;
896		ret = mmc_switch(mmc,
897				 EXT_CSD_CMD_SET_NORMAL,
898				 EXT_CSD_BOOT_WP,
899				 value);
900	} else if (partition == 1) {
901		value |= EXT_CSD_BOOT_WP_B_SEC_WP_SEL;
902		value |= EXT_CSD_BOOT_WP_B_PWR_WP_SEC_SEL;
903		ret = mmc_switch(mmc,
904				 EXT_CSD_CMD_SET_NORMAL,
905				 EXT_CSD_BOOT_WP,
906				 value);
907	} else {
908		ret = mmc_boot_wp(mmc);
909	}
910
911	return ret;
912}
913
914#if !CONFIG_IS_ENABLED(MMC_TINY)
915static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode,
916			      bool hsdowngrade)
917{
918	int err;
919	int speed_bits;
920
921	ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
922
923	switch (mode) {
924	case MMC_HS:
925	case MMC_HS_52:
926	case MMC_DDR_52:
927		speed_bits = EXT_CSD_TIMING_HS;
928		break;
929#if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
930	case MMC_HS_200:
931		speed_bits = EXT_CSD_TIMING_HS200;
932		break;
933#endif
934#if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
935	case MMC_HS_400:
936		speed_bits = EXT_CSD_TIMING_HS400;
937		break;
938#endif
939#if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
940	case MMC_HS_400_ES:
941		speed_bits = EXT_CSD_TIMING_HS400;
942		break;
943#endif
944	case MMC_LEGACY:
945		speed_bits = EXT_CSD_TIMING_LEGACY;
946		break;
947	default:
948		return -EINVAL;
949	}
950
951	err = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
952			   speed_bits, !hsdowngrade);
953	if (err)
954		return err;
955
956#if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
957    CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
958	/*
959	 * In case the eMMC is in HS200/HS400 mode and we are downgrading
960	 * to HS mode, the card clock are still running much faster than
961	 * the supported HS mode clock, so we can not reliably read out
962	 * Extended CSD. Reconfigure the controller to run at HS mode.
963	 */
964	if (hsdowngrade) {
965		mmc_select_mode(mmc, MMC_HS);
966		mmc_set_clock(mmc, mmc_mode2freq(mmc, MMC_HS), false);
967	}
968#endif
969
970	if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
971		/* Now check to see that it worked */
972		err = mmc_send_ext_csd(mmc, test_csd);
973		if (err)
974			return err;
975
976		/* No high-speed support */
977		if (!test_csd[EXT_CSD_HS_TIMING])
978			return -ENOTSUPP;
979	}
980
981	return 0;
982}
983
984static int mmc_get_capabilities(struct mmc *mmc)
985{
986	u8 *ext_csd = mmc->ext_csd;
987	char cardtype;
988
989	mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
990
991	if (mmc_host_is_spi(mmc))
992		return 0;
993
994	/* Only version 4 supports high-speed */
995	if (mmc->version < MMC_VERSION_4)
996		return 0;
997
998	if (!ext_csd) {
999		pr_err("No ext_csd found!\n"); /* this should enver happen */
1000		return -ENOTSUPP;
1001	}
1002
1003	mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
1004
1005	cardtype = ext_csd[EXT_CSD_CARD_TYPE];
1006	mmc->cardtype = cardtype;
1007
1008#if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1009	if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
1010			EXT_CSD_CARD_TYPE_HS200_1_8V)) {
1011		mmc->card_caps |= MMC_MODE_HS200;
1012	}
1013#endif
1014#if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT) || \
1015	CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
1016	if (cardtype & (EXT_CSD_CARD_TYPE_HS400_1_2V |
1017			EXT_CSD_CARD_TYPE_HS400_1_8V)) {
1018		mmc->card_caps |= MMC_MODE_HS400;
1019	}
1020#endif
1021	if (cardtype & EXT_CSD_CARD_TYPE_52) {
1022		if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
1023			mmc->card_caps |= MMC_MODE_DDR_52MHz;
1024		mmc->card_caps |= MMC_MODE_HS_52MHz;
1025	}
1026	if (cardtype & EXT_CSD_CARD_TYPE_26)
1027		mmc->card_caps |= MMC_MODE_HS;
1028
1029#if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
1030	if (ext_csd[EXT_CSD_STROBE_SUPPORT] &&
1031	    (mmc->card_caps & MMC_MODE_HS400)) {
1032		mmc->card_caps |= MMC_MODE_HS400_ES;
1033	}
1034#endif
1035
1036	return 0;
1037}
1038#endif
1039
1040static int mmc_set_capacity(struct mmc *mmc, int part_num)
1041{
1042	switch (part_num) {
1043	case 0:
1044		mmc->capacity = mmc->capacity_user;
1045		break;
1046	case 1:
1047	case 2:
1048		mmc->capacity = mmc->capacity_boot;
1049		break;
1050	case 3:
1051		mmc->capacity = mmc->capacity_rpmb;
1052		break;
1053	case 4:
1054	case 5:
1055	case 6:
1056	case 7:
1057		mmc->capacity = mmc->capacity_gp[part_num - 4];
1058		break;
1059	default:
1060		return -1;
1061	}
1062
1063	mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
1064
1065	return 0;
1066}
1067
1068int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
1069{
1070	int ret;
1071	int retry = 3;
1072
1073	do {
1074		ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1075				 EXT_CSD_PART_CONF,
1076				 (mmc->part_config & ~PART_ACCESS_MASK)
1077				 | (part_num & PART_ACCESS_MASK));
1078	} while (ret && retry--);
1079
1080	/*
1081	 * Set the capacity if the switch succeeded or was intended
1082	 * to return to representing the raw device.
1083	 */
1084	if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
1085		ret = mmc_set_capacity(mmc, part_num);
1086		mmc_get_blk_desc(mmc)->hwpart = part_num;
1087	}
1088
1089	return ret;
1090}
1091
1092#if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
1093int mmc_hwpart_config(struct mmc *mmc,
1094		      const struct mmc_hwpart_conf *conf,
1095		      enum mmc_hwpart_conf_mode mode)
1096{
1097	u8 part_attrs = 0;
1098	u32 enh_size_mult;
1099	u32 enh_start_addr;
1100	u32 gp_size_mult[4];
1101	u32 max_enh_size_mult;
1102	u32 tot_enh_size_mult = 0;
1103	u8 wr_rel_set;
1104	int i, pidx, err;
1105	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1106
1107	if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
1108		return -EINVAL;
1109
1110	if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
1111		pr_err("eMMC >= 4.4 required for enhanced user data area\n");
1112		return -EMEDIUMTYPE;
1113	}
1114
1115	if (!(mmc->part_support & PART_SUPPORT)) {
1116		pr_err("Card does not support partitioning\n");
1117		return -EMEDIUMTYPE;
1118	}
1119
1120	if (!mmc->hc_wp_grp_size) {
1121		pr_err("Card does not define HC WP group size\n");
1122		return -EMEDIUMTYPE;
1123	}
1124
1125	/* check partition alignment and total enhanced size */
1126	if (conf->user.enh_size) {
1127		if (conf->user.enh_size % mmc->hc_wp_grp_size ||
1128		    conf->user.enh_start % mmc->hc_wp_grp_size) {
1129			pr_err("User data enhanced area not HC WP group "
1130			       "size aligned\n");
1131			return -EINVAL;
1132		}
1133		part_attrs |= EXT_CSD_ENH_USR;
1134		enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
1135		if (mmc->high_capacity) {
1136			enh_start_addr = conf->user.enh_start;
1137		} else {
1138			enh_start_addr = (conf->user.enh_start << 9);
1139		}
1140	} else {
1141		enh_size_mult = 0;
1142		enh_start_addr = 0;
1143	}
1144	tot_enh_size_mult += enh_size_mult;
1145
1146	for (pidx = 0; pidx < 4; pidx++) {
1147		if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1148			pr_err("GP%i partition not HC WP group size "
1149			       "aligned\n", pidx+1);
1150			return -EINVAL;
1151		}
1152		gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1153		if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1154			part_attrs |= EXT_CSD_ENH_GP(pidx);
1155			tot_enh_size_mult += gp_size_mult[pidx];
1156		}
1157	}
1158
1159	if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1160		pr_err("Card does not support enhanced attribute\n");
1161		return -EMEDIUMTYPE;
1162	}
1163
1164	err = mmc_send_ext_csd(mmc, ext_csd);
1165	if (err)
1166		return err;
1167
1168	max_enh_size_mult =
1169		(ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1170		(ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1171		ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1172	if (tot_enh_size_mult > max_enh_size_mult) {
1173		pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1174		       tot_enh_size_mult, max_enh_size_mult);
1175		return -EMEDIUMTYPE;
1176	}
1177
1178	/* The default value of EXT_CSD_WR_REL_SET is device
1179	 * dependent, the values can only be changed if the
1180	 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1181	 * changed only once and before partitioning is completed. */
1182	wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1183	if (conf->user.wr_rel_change) {
1184		if (conf->user.wr_rel_set)
1185			wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1186		else
1187			wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1188	}
1189	for (pidx = 0; pidx < 4; pidx++) {
1190		if (conf->gp_part[pidx].wr_rel_change) {
1191			if (conf->gp_part[pidx].wr_rel_set)
1192				wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1193			else
1194				wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1195		}
1196	}
1197
1198	if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1199	    !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1200		puts("Card does not support host controlled partition write "
1201		     "reliability settings\n");
1202		return -EMEDIUMTYPE;
1203	}
1204
1205	if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1206	    EXT_CSD_PARTITION_SETTING_COMPLETED) {
1207		pr_err("Card already partitioned\n");
1208		return -EPERM;
1209	}
1210
1211	if (mode == MMC_HWPART_CONF_CHECK)
1212		return 0;
1213
1214	/* Partitioning requires high-capacity size definitions */
1215	if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1216		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1217				 EXT_CSD_ERASE_GROUP_DEF, 1);
1218
1219		if (err)
1220			return err;
1221
1222		ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1223
1224#if CONFIG_IS_ENABLED(MMC_WRITE)
1225		/* update erase group size to be high-capacity */
1226		mmc->erase_grp_size =
1227			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1228#endif
1229
1230	}
1231
1232	/* all OK, write the configuration */
1233	for (i = 0; i < 4; i++) {
1234		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1235				 EXT_CSD_ENH_START_ADDR+i,
1236				 (enh_start_addr >> (i*8)) & 0xFF);
1237		if (err)
1238			return err;
1239	}
1240	for (i = 0; i < 3; i++) {
1241		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1242				 EXT_CSD_ENH_SIZE_MULT+i,
1243				 (enh_size_mult >> (i*8)) & 0xFF);
1244		if (err)
1245			return err;
1246	}
1247	for (pidx = 0; pidx < 4; pidx++) {
1248		for (i = 0; i < 3; i++) {
1249			err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1250					 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1251					 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1252			if (err)
1253				return err;
1254		}
1255	}
1256	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1257			 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1258	if (err)
1259		return err;
1260
1261	if (mode == MMC_HWPART_CONF_SET)
1262		return 0;
1263
1264	/* The WR_REL_SET is a write-once register but shall be
1265	 * written before setting PART_SETTING_COMPLETED. As it is
1266	 * write-once we can only write it when completing the
1267	 * partitioning. */
1268	if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1269		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1270				 EXT_CSD_WR_REL_SET, wr_rel_set);
1271		if (err)
1272			return err;
1273	}
1274
1275	/* Setting PART_SETTING_COMPLETED confirms the partition
1276	 * configuration but it only becomes effective after power
1277	 * cycle, so we do not adjust the partition related settings
1278	 * in the mmc struct. */
1279
1280	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1281			 EXT_CSD_PARTITION_SETTING,
1282			 EXT_CSD_PARTITION_SETTING_COMPLETED);
1283	if (err)
1284		return err;
1285
1286	return 0;
1287}
1288#endif
1289
1290#if !CONFIG_IS_ENABLED(DM_MMC)
1291int mmc_getcd(struct mmc *mmc)
1292{
1293	int cd;
1294
1295	cd = board_mmc_getcd(mmc);
1296
1297	if (cd < 0) {
1298		if (mmc->cfg->ops->getcd)
1299			cd = mmc->cfg->ops->getcd(mmc);
1300		else
1301			cd = 1;
1302	}
1303
1304	return cd;
1305}
1306#endif
1307
1308#if !CONFIG_IS_ENABLED(MMC_TINY)
1309static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1310{
1311	struct mmc_cmd cmd;
1312	struct mmc_data data;
1313
1314	/* Switch the frequency */
1315	cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1316	cmd.resp_type = MMC_RSP_R1;
1317	cmd.cmdarg = (mode << 31) | 0xffffff;
1318	cmd.cmdarg &= ~(0xf << (group * 4));
1319	cmd.cmdarg |= value << (group * 4);
1320
1321	data.dest = (char *)resp;
1322	data.blocksize = 64;
1323	data.blocks = 1;
1324	data.flags = MMC_DATA_READ;
1325
1326	return mmc_send_cmd(mmc, &cmd, &data);
1327}
1328
1329static int sd_get_capabilities(struct mmc *mmc)
1330{
1331	int err;
1332	struct mmc_cmd cmd;
1333	ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1334	ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1335	struct mmc_data data;
1336	int timeout;
1337#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1338	u32 sd3_bus_mode;
1339#endif
1340
1341	mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
1342
1343	if (mmc_host_is_spi(mmc))
1344		return 0;
1345
1346	/* Read the SCR to find out if this card supports higher speeds */
1347	cmd.cmdidx = MMC_CMD_APP_CMD;
1348	cmd.resp_type = MMC_RSP_R1;
1349	cmd.cmdarg = mmc->rca << 16;
1350
1351	err = mmc_send_cmd(mmc, &cmd, NULL);
1352
1353	if (err)
1354		return err;
1355
1356	cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1357	cmd.resp_type = MMC_RSP_R1;
1358	cmd.cmdarg = 0;
1359
1360	data.dest = (char *)scr;
1361	data.blocksize = 8;
1362	data.blocks = 1;
1363	data.flags = MMC_DATA_READ;
1364
1365	err = mmc_send_cmd_retry(mmc, &cmd, &data, 3);
1366
1367	if (err)
1368		return err;
1369
1370	mmc->scr[0] = __be32_to_cpu(scr[0]);
1371	mmc->scr[1] = __be32_to_cpu(scr[1]);
1372
1373	switch ((mmc->scr[0] >> 24) & 0xf) {
1374	case 0:
1375		mmc->version = SD_VERSION_1_0;
1376		break;
1377	case 1:
1378		mmc->version = SD_VERSION_1_10;
1379		break;
1380	case 2:
1381		mmc->version = SD_VERSION_2;
1382		if ((mmc->scr[0] >> 15) & 0x1)
1383			mmc->version = SD_VERSION_3;
1384		break;
1385	default:
1386		mmc->version = SD_VERSION_1_0;
1387		break;
1388	}
1389
1390	if (mmc->scr[0] & SD_DATA_4BIT)
1391		mmc->card_caps |= MMC_MODE_4BIT;
1392
1393	/* Version 1.0 doesn't support switching */
1394	if (mmc->version == SD_VERSION_1_0)
1395		return 0;
1396
1397	timeout = 4;
1398	while (timeout--) {
1399		err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1400				(u8 *)switch_status);
1401
1402		if (err)
1403			return err;
1404
1405		/* The high-speed function is busy.  Try again */
1406		if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1407			break;
1408	}
1409
1410	/* If high-speed isn't supported, we return */
1411	if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1412		mmc->card_caps |= MMC_CAP(SD_HS);
1413
1414#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1415	/* Version before 3.0 don't support UHS modes */
1416	if (mmc->version < SD_VERSION_3)
1417		return 0;
1418
1419	sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1420	if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1421		mmc->card_caps |= MMC_CAP(UHS_SDR104);
1422	if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1423		mmc->card_caps |= MMC_CAP(UHS_SDR50);
1424	if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1425		mmc->card_caps |= MMC_CAP(UHS_SDR25);
1426	if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1427		mmc->card_caps |= MMC_CAP(UHS_SDR12);
1428	if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1429		mmc->card_caps |= MMC_CAP(UHS_DDR50);
1430#endif
1431
1432	return 0;
1433}
1434
1435static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1436{
1437	int err;
1438
1439	ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1440	int speed;
1441
1442	/* SD version 1.00 and 1.01 does not support CMD 6 */
1443	if (mmc->version == SD_VERSION_1_0)
1444		return 0;
1445
1446	switch (mode) {
1447	case MMC_LEGACY:
1448		speed = UHS_SDR12_BUS_SPEED;
1449		break;
1450	case SD_HS:
1451		speed = HIGH_SPEED_BUS_SPEED;
1452		break;
1453#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1454	case UHS_SDR12:
1455		speed = UHS_SDR12_BUS_SPEED;
1456		break;
1457	case UHS_SDR25:
1458		speed = UHS_SDR25_BUS_SPEED;
1459		break;
1460	case UHS_SDR50:
1461		speed = UHS_SDR50_BUS_SPEED;
1462		break;
1463	case UHS_DDR50:
1464		speed = UHS_DDR50_BUS_SPEED;
1465		break;
1466	case UHS_SDR104:
1467		speed = UHS_SDR104_BUS_SPEED;
1468		break;
1469#endif
1470	default:
1471		return -EINVAL;
1472	}
1473
1474	err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1475	if (err)
1476		return err;
1477
1478	if (((__be32_to_cpu(switch_status[4]) >> 24) & 0xF) != speed)
1479		return -ENOTSUPP;
1480
1481	return 0;
1482}
1483
1484static int sd_select_bus_width(struct mmc *mmc, int w)
1485{
1486	int err;
1487	struct mmc_cmd cmd;
1488
1489	if ((w != 4) && (w != 1))
1490		return -EINVAL;
1491
1492	cmd.cmdidx = MMC_CMD_APP_CMD;
1493	cmd.resp_type = MMC_RSP_R1;
1494	cmd.cmdarg = mmc->rca << 16;
1495
1496	err = mmc_send_cmd(mmc, &cmd, NULL);
1497	if (err)
1498		return err;
1499
1500	cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1501	cmd.resp_type = MMC_RSP_R1;
1502	if (w == 4)
1503		cmd.cmdarg = 2;
1504	else if (w == 1)
1505		cmd.cmdarg = 0;
1506	err = mmc_send_cmd(mmc, &cmd, NULL);
1507	if (err)
1508		return err;
1509
1510	return 0;
1511}
1512#endif
1513
1514#if CONFIG_IS_ENABLED(MMC_WRITE)
1515static int sd_read_ssr(struct mmc *mmc)
1516{
1517	static const unsigned int sd_au_size[] = {
1518		0,		SZ_16K / 512,		SZ_32K / 512,
1519		SZ_64K / 512,	SZ_128K / 512,		SZ_256K / 512,
1520		SZ_512K / 512,	SZ_1M / 512,		SZ_2M / 512,
1521		SZ_4M / 512,	SZ_8M / 512,		(SZ_8M + SZ_4M) / 512,
1522		SZ_16M / 512,	(SZ_16M + SZ_8M) / 512,	SZ_32M / 512,
1523		SZ_64M / 512,
1524	};
1525	int err, i;
1526	struct mmc_cmd cmd;
1527	ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1528	struct mmc_data data;
1529	unsigned int au, eo, et, es;
1530
1531	cmd.cmdidx = MMC_CMD_APP_CMD;
1532	cmd.resp_type = MMC_RSP_R1;
1533	cmd.cmdarg = mmc->rca << 16;
1534
1535	err = mmc_send_cmd_quirks(mmc, &cmd, NULL, MMC_QUIRK_RETRY_APP_CMD, 4);
1536	if (err)
1537		return err;
1538
1539	cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1540	cmd.resp_type = MMC_RSP_R1;
1541	cmd.cmdarg = 0;
1542
1543	data.dest = (char *)ssr;
1544	data.blocksize = 64;
1545	data.blocks = 1;
1546	data.flags = MMC_DATA_READ;
1547
1548	err = mmc_send_cmd_retry(mmc, &cmd, &data, 3);
1549	if (err)
1550		return err;
1551
1552	for (i = 0; i < 16; i++)
1553		ssr[i] = be32_to_cpu(ssr[i]);
1554
1555	au = (ssr[2] >> 12) & 0xF;
1556	if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1557		mmc->ssr.au = sd_au_size[au];
1558		es = (ssr[3] >> 24) & 0xFF;
1559		es |= (ssr[2] & 0xFF) << 8;
1560		et = (ssr[3] >> 18) & 0x3F;
1561		if (es && et) {
1562			eo = (ssr[3] >> 16) & 0x3;
1563			mmc->ssr.erase_timeout = (et * 1000) / es;
1564			mmc->ssr.erase_offset = eo * 1000;
1565		}
1566	} else {
1567		pr_debug("Invalid Allocation Unit Size.\n");
1568	}
1569
1570	return 0;
1571}
1572#endif
1573/*
1574 * TRAN_SPEED bits 0:2 encode the frequency unit:
1575 * 0 = 100KHz, 1 = 1MHz, 2 = 10MHz, 3 = 100MHz, values 4 - 7 are reserved.
1576 * The values in fbase[] are divided by 10 to avoid floats in multiplier[].
1577 */
1578static const int fbase[] = {
1579	10000,
1580	100000,
1581	1000000,
1582	10000000,
1583	0,	/* reserved */
1584	0,	/* reserved */
1585	0,	/* reserved */
1586	0,	/* reserved */
1587};
1588
1589/* Multiplier values for TRAN_SPEED.  Multiplied by 10 to be nice
1590 * to platforms without floating point.
1591 */
1592static const u8 multipliers[] = {
1593	0,	/* reserved */
1594	10,
1595	12,
1596	13,
1597	15,
1598	20,
1599	25,
1600	30,
1601	35,
1602	40,
1603	45,
1604	50,
1605	55,
1606	60,
1607	70,
1608	80,
1609};
1610
1611static inline int bus_width(uint cap)
1612{
1613	if (cap == MMC_MODE_8BIT)
1614		return 8;
1615	if (cap == MMC_MODE_4BIT)
1616		return 4;
1617	if (cap == MMC_MODE_1BIT)
1618		return 1;
1619	pr_warn("invalid bus witdh capability 0x%x\n", cap);
1620	return 0;
1621}
1622
1623#if !CONFIG_IS_ENABLED(DM_MMC)
1624#ifdef MMC_SUPPORTS_TUNING
1625static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1626{
1627	return -ENOTSUPP;
1628}
1629#endif
1630
1631static int mmc_set_ios(struct mmc *mmc)
1632{
1633	int ret = 0;
1634
1635	if (mmc->cfg->ops->set_ios)
1636		ret = mmc->cfg->ops->set_ios(mmc);
1637
1638	return ret;
1639}
1640
1641static int mmc_host_power_cycle(struct mmc *mmc)
1642{
1643	int ret = 0;
1644
1645	if (mmc->cfg->ops->host_power_cycle)
1646		ret = mmc->cfg->ops->host_power_cycle(mmc);
1647
1648	return ret;
1649}
1650#endif
1651
1652int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1653{
1654	if (!disable) {
1655		if (clock > mmc->cfg->f_max)
1656			clock = mmc->cfg->f_max;
1657
1658		if (clock < mmc->cfg->f_min)
1659			clock = mmc->cfg->f_min;
1660	}
1661
1662	mmc->clock = clock;
1663	mmc->clk_disable = disable;
1664
1665	debug("clock is %s (%dHz)\n", disable ? "disabled" : "enabled", clock);
1666
1667	return mmc_set_ios(mmc);
1668}
1669
1670static int mmc_set_bus_width(struct mmc *mmc, uint width)
1671{
1672	mmc->bus_width = width;
1673
1674	return mmc_set_ios(mmc);
1675}
1676
1677#if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1678/*
1679 * helper function to display the capabilities in a human
1680 * friendly manner. The capabilities include bus width and
1681 * supported modes.
1682 */
1683void mmc_dump_capabilities(const char *text, uint caps)
1684{
1685	enum bus_mode mode;
1686
1687	pr_debug("%s: widths [", text);
1688	if (caps & MMC_MODE_8BIT)
1689		pr_debug("8, ");
1690	if (caps & MMC_MODE_4BIT)
1691		pr_debug("4, ");
1692	if (caps & MMC_MODE_1BIT)
1693		pr_debug("1, ");
1694	pr_debug("\b\b] modes [");
1695	for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1696		if (MMC_CAP(mode) & caps)
1697			pr_debug("%s, ", mmc_mode_name(mode));
1698	pr_debug("\b\b]\n");
1699}
1700#endif
1701
1702struct mode_width_tuning {
1703	enum bus_mode mode;
1704	uint widths;
1705#ifdef MMC_SUPPORTS_TUNING
1706	uint tuning;
1707#endif
1708};
1709
1710#if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1711int mmc_voltage_to_mv(enum mmc_voltage voltage)
1712{
1713	switch (voltage) {
1714	case MMC_SIGNAL_VOLTAGE_000: return 0;
1715	case MMC_SIGNAL_VOLTAGE_330: return 3300;
1716	case MMC_SIGNAL_VOLTAGE_180: return 1800;
1717	case MMC_SIGNAL_VOLTAGE_120: return 1200;
1718	}
1719	return -EINVAL;
1720}
1721
1722static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1723{
1724	int err;
1725
1726	if (mmc->signal_voltage == signal_voltage)
1727		return 0;
1728
1729	mmc->signal_voltage = signal_voltage;
1730	err = mmc_set_ios(mmc);
1731	if (err)
1732		pr_debug("unable to set voltage (err %d)\n", err);
1733
1734	return err;
1735}
1736#else
1737static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1738{
1739	return 0;
1740}
1741#endif
1742
1743#if !CONFIG_IS_ENABLED(MMC_TINY)
1744static const struct mode_width_tuning sd_modes_by_pref[] = {
1745#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1746#ifdef MMC_SUPPORTS_TUNING
1747	{
1748		.mode = UHS_SDR104,
1749		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1750		.tuning = MMC_CMD_SEND_TUNING_BLOCK
1751	},
1752#endif
1753	{
1754		.mode = UHS_SDR50,
1755		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1756	},
1757	{
1758		.mode = UHS_DDR50,
1759		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1760	},
1761	{
1762		.mode = UHS_SDR25,
1763		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1764	},
1765#endif
1766	{
1767		.mode = SD_HS,
1768		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1769	},
1770#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1771	{
1772		.mode = UHS_SDR12,
1773		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1774	},
1775#endif
1776	{
1777		.mode = MMC_LEGACY,
1778		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1779	}
1780};
1781
1782#define for_each_sd_mode_by_pref(caps, mwt) \
1783	for (mwt = sd_modes_by_pref;\
1784	     mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1785	     mwt++) \
1786		if (caps & MMC_CAP(mwt->mode))
1787
1788static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1789{
1790	int err;
1791	uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1792	const struct mode_width_tuning *mwt;
1793#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1794	bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1795#else
1796	bool uhs_en = false;
1797#endif
1798	uint caps;
1799
1800#ifdef DEBUG
1801	mmc_dump_capabilities("sd card", card_caps);
1802	mmc_dump_capabilities("host", mmc->host_caps);
1803#endif
1804
1805	if (mmc_host_is_spi(mmc)) {
1806		mmc_set_bus_width(mmc, 1);
1807		mmc_select_mode(mmc, MMC_LEGACY);
1808		mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
1809#if CONFIG_IS_ENABLED(MMC_WRITE)
1810		err = sd_read_ssr(mmc);
1811		if (err)
1812			pr_warn("unable to read ssr\n");
1813#endif
1814		return 0;
1815	}
1816
1817	/* Restrict card's capabilities by what the host can do */
1818	caps = card_caps & mmc->host_caps;
1819
1820	if (!uhs_en)
1821		caps &= ~UHS_CAPS;
1822
1823	for_each_sd_mode_by_pref(caps, mwt) {
1824		uint *w;
1825
1826		for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1827			if (*w & caps & mwt->widths) {
1828				pr_debug("trying mode %s width %d (at %d MHz)\n",
1829					 mmc_mode_name(mwt->mode),
1830					 bus_width(*w),
1831					 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1832
1833				/* configure the bus width (card + host) */
1834				err = sd_select_bus_width(mmc, bus_width(*w));
1835				if (err)
1836					goto error;
1837				mmc_set_bus_width(mmc, bus_width(*w));
1838
1839				/* configure the bus mode (card) */
1840				err = sd_set_card_speed(mmc, mwt->mode);
1841				if (err)
1842					goto error;
1843
1844				/* configure the bus mode (host) */
1845				mmc_select_mode(mmc, mwt->mode);
1846				mmc_set_clock(mmc, mmc->tran_speed,
1847						MMC_CLK_ENABLE);
1848
1849#ifdef MMC_SUPPORTS_TUNING
1850				/* execute tuning if needed */
1851				if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1852					err = mmc_execute_tuning(mmc,
1853								 mwt->tuning);
1854					if (err) {
1855						pr_debug("tuning failed\n");
1856						goto error;
1857					}
1858				}
1859#endif
1860
1861#if CONFIG_IS_ENABLED(MMC_WRITE)
1862				err = sd_read_ssr(mmc);
1863				if (err)
1864					pr_warn("unable to read ssr\n");
1865#endif
1866				if (!err)
1867					return 0;
1868
1869error:
1870				/* revert to a safer bus speed */
1871				mmc_select_mode(mmc, MMC_LEGACY);
1872				mmc_set_clock(mmc, mmc->tran_speed,
1873						MMC_CLK_ENABLE);
1874			}
1875		}
1876	}
1877
1878	pr_err("unable to select a mode\n");
1879	return -ENOTSUPP;
1880}
1881
1882/*
1883 * read the compare the part of ext csd that is constant.
1884 * This can be used to check that the transfer is working
1885 * as expected.
1886 */
1887static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1888{
1889	int err;
1890	const u8 *ext_csd = mmc->ext_csd;
1891	ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1892
1893	if (mmc->version < MMC_VERSION_4)
1894		return 0;
1895
1896	err = mmc_send_ext_csd(mmc, test_csd);
1897	if (err)
1898		return err;
1899
1900	/* Only compare read only fields */
1901	if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1902		== test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1903	    ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1904		== test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1905	    ext_csd[EXT_CSD_REV]
1906		== test_csd[EXT_CSD_REV] &&
1907	    ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1908		== test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1909	    memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1910		   &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1911		return 0;
1912
1913	return -EBADMSG;
1914}
1915
1916#if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1917static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1918				  uint32_t allowed_mask)
1919{
1920	u32 card_mask = 0;
1921
1922	switch (mode) {
1923	case MMC_HS_400_ES:
1924	case MMC_HS_400:
1925	case MMC_HS_200:
1926		if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_8V |
1927		    EXT_CSD_CARD_TYPE_HS400_1_8V))
1928			card_mask |= MMC_SIGNAL_VOLTAGE_180;
1929		if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
1930		    EXT_CSD_CARD_TYPE_HS400_1_2V))
1931			card_mask |= MMC_SIGNAL_VOLTAGE_120;
1932		break;
1933	case MMC_DDR_52:
1934		if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1935			card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1936				     MMC_SIGNAL_VOLTAGE_180;
1937		if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1938			card_mask |= MMC_SIGNAL_VOLTAGE_120;
1939		break;
1940	default:
1941		card_mask |= MMC_SIGNAL_VOLTAGE_330;
1942		break;
1943	}
1944
1945	while (card_mask & allowed_mask) {
1946		enum mmc_voltage best_match;
1947
1948		best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1949		if (!mmc_set_signal_voltage(mmc,  best_match))
1950			return 0;
1951
1952		allowed_mask &= ~best_match;
1953	}
1954
1955	return -ENOTSUPP;
1956}
1957#else
1958static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1959					 uint32_t allowed_mask)
1960{
1961	return 0;
1962}
1963#endif
1964
1965static const struct mode_width_tuning mmc_modes_by_pref[] = {
1966#if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
1967	{
1968		.mode = MMC_HS_400_ES,
1969		.widths = MMC_MODE_8BIT,
1970	},
1971#endif
1972#if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1973	{
1974		.mode = MMC_HS_400,
1975		.widths = MMC_MODE_8BIT,
1976		.tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1977	},
1978#endif
1979#if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1980	{
1981		.mode = MMC_HS_200,
1982		.widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1983		.tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1984	},
1985#endif
1986	{
1987		.mode = MMC_DDR_52,
1988		.widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1989	},
1990	{
1991		.mode = MMC_HS_52,
1992		.widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1993	},
1994	{
1995		.mode = MMC_HS,
1996		.widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1997	},
1998	{
1999		.mode = MMC_LEGACY,
2000		.widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
2001	}
2002};
2003
2004#define for_each_mmc_mode_by_pref(caps, mwt) \
2005	for (mwt = mmc_modes_by_pref;\
2006	    mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
2007	    mwt++) \
2008		if (caps & MMC_CAP(mwt->mode))
2009
2010static const struct ext_csd_bus_width {
2011	uint cap;
2012	bool is_ddr;
2013	uint ext_csd_bits;
2014} ext_csd_bus_width[] = {
2015	{MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
2016	{MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
2017	{MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
2018	{MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
2019	{MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
2020};
2021
2022#if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2023static int mmc_select_hs400(struct mmc *mmc)
2024{
2025	int err;
2026
2027	/* Set timing to HS200 for tuning */
2028	err = mmc_set_card_speed(mmc, MMC_HS_200, false);
2029	if (err)
2030		return err;
2031
2032	/* configure the bus mode (host) */
2033	mmc_select_mode(mmc, MMC_HS_200);
2034	mmc_set_clock(mmc, mmc->tran_speed, false);
2035
2036	/* execute tuning if needed */
2037	mmc->hs400_tuning = true;
2038	err = mmc_execute_tuning(mmc, MMC_CMD_SEND_TUNING_BLOCK_HS200);
2039	mmc->hs400_tuning = false;
2040	if (err) {
2041		debug("tuning failed\n");
2042		return err;
2043	}
2044
2045	/* Set back to HS */
2046	mmc_set_card_speed(mmc, MMC_HS, true);
2047
2048	err = mmc_hs400_prepare_ddr(mmc);
2049	if (err)
2050		return err;
2051
2052	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
2053			 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG);
2054	if (err)
2055		return err;
2056
2057	err = mmc_set_card_speed(mmc, MMC_HS_400, false);
2058	if (err)
2059		return err;
2060
2061	mmc_select_mode(mmc, MMC_HS_400);
2062	err = mmc_set_clock(mmc, mmc->tran_speed, false);
2063	if (err)
2064		return err;
2065
2066	return 0;
2067}
2068#else
2069static int mmc_select_hs400(struct mmc *mmc)
2070{
2071	return -ENOTSUPP;
2072}
2073#endif
2074
2075#if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
2076#if !CONFIG_IS_ENABLED(DM_MMC)
2077static int mmc_set_enhanced_strobe(struct mmc *mmc)
2078{
2079	return -ENOTSUPP;
2080}
2081#endif
2082static int mmc_select_hs400es(struct mmc *mmc)
2083{
2084	int err;
2085
2086	err = mmc_set_card_speed(mmc, MMC_HS, true);
2087	if (err)
2088		return err;
2089
2090	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
2091			 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG |
2092			 EXT_CSD_BUS_WIDTH_STROBE);
2093	if (err) {
2094		printf("switch to bus width for hs400 failed\n");
2095		return err;
2096	}
2097	/* TODO: driver strength */
2098	err = mmc_set_card_speed(mmc, MMC_HS_400_ES, false);
2099	if (err)
2100		return err;
2101
2102	mmc_select_mode(mmc, MMC_HS_400_ES);
2103	err = mmc_set_clock(mmc, mmc->tran_speed, false);
2104	if (err)
2105		return err;
2106
2107	return mmc_set_enhanced_strobe(mmc);
2108}
2109#else
2110static int mmc_select_hs400es(struct mmc *mmc)
2111{
2112	return -ENOTSUPP;
2113}
2114#endif
2115
2116#define for_each_supported_width(caps, ddr, ecbv) \
2117	for (ecbv = ext_csd_bus_width;\
2118	    ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
2119	    ecbv++) \
2120		if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
2121
2122static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
2123{
2124	int err = 0;
2125	const struct mode_width_tuning *mwt;
2126	const struct ext_csd_bus_width *ecbw;
2127
2128#ifdef DEBUG
2129	mmc_dump_capabilities("mmc", card_caps);
2130	mmc_dump_capabilities("host", mmc->host_caps);
2131#endif
2132
2133	if (mmc_host_is_spi(mmc)) {
2134		mmc_set_bus_width(mmc, 1);
2135		mmc_select_mode(mmc, MMC_LEGACY);
2136		mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
2137		return 0;
2138	}
2139
2140	/* Restrict card's capabilities by what the host can do */
2141	card_caps &= mmc->host_caps;
2142
2143	/* Only version 4 of MMC supports wider bus widths */
2144	if (mmc->version < MMC_VERSION_4)
2145		return 0;
2146
2147	if (!mmc->ext_csd) {
2148		pr_debug("No ext_csd found!\n"); /* this should enver happen */
2149		return -ENOTSUPP;
2150	}
2151
2152#if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2153    CONFIG_IS_ENABLED(MMC_HS400_SUPPORT) || \
2154    CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
2155	/*
2156	 * In case the eMMC is in HS200/HS400 mode, downgrade to HS mode
2157	 * before doing anything else, since a transition from either of
2158	 * the HS200/HS400 mode directly to legacy mode is not supported.
2159	 */
2160	if (mmc->selected_mode == MMC_HS_200 ||
2161	    mmc->selected_mode == MMC_HS_400 ||
2162	    mmc->selected_mode == MMC_HS_400_ES)
2163		mmc_set_card_speed(mmc, MMC_HS, true);
2164	else
2165#endif
2166		mmc_set_clock(mmc, mmc->legacy_speed, MMC_CLK_ENABLE);
2167
2168	for_each_mmc_mode_by_pref(card_caps, mwt) {
2169		for_each_supported_width(card_caps & mwt->widths,
2170					 mmc_is_mode_ddr(mwt->mode), ecbw) {
2171			enum mmc_voltage old_voltage;
2172			pr_debug("trying mode %s width %d (at %d MHz)\n",
2173				 mmc_mode_name(mwt->mode),
2174				 bus_width(ecbw->cap),
2175				 mmc_mode2freq(mmc, mwt->mode) / 1000000);
2176			old_voltage = mmc->signal_voltage;
2177			err = mmc_set_lowest_voltage(mmc, mwt->mode,
2178						     MMC_ALL_SIGNAL_VOLTAGE);
2179			if (err)
2180				continue;
2181
2182			/* configure the bus width (card + host) */
2183			err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2184				    EXT_CSD_BUS_WIDTH,
2185				    ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
2186			if (err)
2187				goto error;
2188			mmc_set_bus_width(mmc, bus_width(ecbw->cap));
2189
2190			if (mwt->mode == MMC_HS_400) {
2191				err = mmc_select_hs400(mmc);
2192				if (err) {
2193					printf("Select HS400 failed %d\n", err);
2194					goto error;
2195				}
2196			} else if (mwt->mode == MMC_HS_400_ES) {
2197				err = mmc_select_hs400es(mmc);
2198				if (err) {
2199					printf("Select HS400ES failed %d\n",
2200					       err);
2201					goto error;
2202				}
2203			} else {
2204				/* configure the bus speed (card) */
2205				err = mmc_set_card_speed(mmc, mwt->mode, false);
2206				if (err)
2207					goto error;
2208
2209				/*
2210				 * configure the bus width AND the ddr mode
2211				 * (card). The host side will be taken care
2212				 * of in the next step
2213				 */
2214				if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
2215					err = mmc_switch(mmc,
2216							 EXT_CSD_CMD_SET_NORMAL,
2217							 EXT_CSD_BUS_WIDTH,
2218							 ecbw->ext_csd_bits);
2219					if (err)
2220						goto error;
2221				}
2222
2223				/* configure the bus mode (host) */
2224				mmc_select_mode(mmc, mwt->mode);
2225				mmc_set_clock(mmc, mmc->tran_speed,
2226					      MMC_CLK_ENABLE);
2227#ifdef MMC_SUPPORTS_TUNING
2228
2229				/* execute tuning if needed */
2230				if (mwt->tuning) {
2231					err = mmc_execute_tuning(mmc,
2232								 mwt->tuning);
2233					if (err) {
2234						pr_debug("tuning failed : %d\n", err);
2235						goto error;
2236					}
2237				}
2238#endif
2239			}
2240
2241			/* do a transfer to check the configuration */
2242			err = mmc_read_and_compare_ext_csd(mmc);
2243			if (!err)
2244				return 0;
2245error:
2246			mmc_set_signal_voltage(mmc, old_voltage);
2247			/* if an error occurred, revert to a safer bus mode */
2248			mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2249				   EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
2250			mmc_select_mode(mmc, MMC_LEGACY);
2251			mmc_set_clock(mmc, mmc->legacy_speed, MMC_CLK_ENABLE);
2252			mmc_set_bus_width(mmc, 1);
2253		}
2254	}
2255
2256	pr_err("unable to select a mode : %d\n", err);
2257
2258	return -ENOTSUPP;
2259}
2260#else
2261static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
2262{
2263	return 0;
2264};
2265
2266static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
2267{
2268	return 0;
2269};
2270#endif
2271
2272#if CONFIG_IS_ENABLED(MMC_TINY)
2273DEFINE_CACHE_ALIGN_BUFFER(u8, ext_csd_bkup, MMC_MAX_BLOCK_LEN);
2274#endif
2275
2276static int mmc_startup_v4(struct mmc *mmc)
2277{
2278	int err, i;
2279	u64 capacity;
2280	bool has_parts = false;
2281	bool part_completed;
2282	static const u32 mmc_versions[] = {
2283		MMC_VERSION_4,
2284		MMC_VERSION_4_1,
2285		MMC_VERSION_4_2,
2286		MMC_VERSION_4_3,
2287		MMC_VERSION_4_4,
2288		MMC_VERSION_4_41,
2289		MMC_VERSION_4_5,
2290		MMC_VERSION_5_0,
2291		MMC_VERSION_5_1
2292	};
2293
2294#if CONFIG_IS_ENABLED(MMC_TINY)
2295	u8 *ext_csd = ext_csd_bkup;
2296
2297	if (IS_SD(mmc) || mmc->version < MMC_VERSION_4)
2298		return 0;
2299
2300	if (!mmc->ext_csd)
2301		memset(ext_csd_bkup, 0, MMC_MAX_BLOCK_LEN);
2302
2303	err = mmc_send_ext_csd(mmc, ext_csd);
2304	if (err)
2305		goto error;
2306
2307	/* store the ext csd for future reference */
2308	if (!mmc->ext_csd)
2309		mmc->ext_csd = ext_csd;
2310#else
2311	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2312
2313	if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
2314		return 0;
2315
2316	/* check  ext_csd version and capacity */
2317	err = mmc_send_ext_csd(mmc, ext_csd);
2318	if (err)
2319		goto error;
2320
2321	/* store the ext csd for future reference */
2322	if (!mmc->ext_csd)
2323		mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
2324	if (!mmc->ext_csd)
2325		return -ENOMEM;
2326	memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
2327#endif
2328	if (ext_csd[EXT_CSD_REV] >= ARRAY_SIZE(mmc_versions))
2329		return -EINVAL;
2330
2331	mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
2332
2333	if (mmc->version >= MMC_VERSION_4_2) {
2334		/*
2335		 * According to the JEDEC Standard, the value of
2336		 * ext_csd's capacity is valid if the value is more
2337		 * than 2GB
2338		 */
2339		capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
2340				| ext_csd[EXT_CSD_SEC_CNT + 1] << 8
2341				| ext_csd[EXT_CSD_SEC_CNT + 2] << 16
2342				| ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
2343		capacity *= MMC_MAX_BLOCK_LEN;
2344		if ((capacity >> 20) > 2 * 1024)
2345			mmc->capacity_user = capacity;
2346	}
2347
2348	if (mmc->version >= MMC_VERSION_4_5)
2349		mmc->gen_cmd6_time = ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
2350
2351	/* The partition data may be non-zero but it is only
2352	 * effective if PARTITION_SETTING_COMPLETED is set in
2353	 * EXT_CSD, so ignore any data if this bit is not set,
2354	 * except for enabling the high-capacity group size
2355	 * definition (see below).
2356	 */
2357	part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
2358			    EXT_CSD_PARTITION_SETTING_COMPLETED);
2359
2360	mmc->part_switch_time = ext_csd[EXT_CSD_PART_SWITCH_TIME];
2361	/* Some eMMC set the value too low so set a minimum */
2362	if (mmc->part_switch_time < MMC_MIN_PART_SWITCH_TIME && mmc->part_switch_time)
2363		mmc->part_switch_time = MMC_MIN_PART_SWITCH_TIME;
2364
2365	/* store the partition info of emmc */
2366	mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2367	if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2368	    ext_csd[EXT_CSD_BOOT_MULT])
2369		mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2370	if (part_completed &&
2371	    (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2372		mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2373
2374	mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2375
2376	mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2377
2378	for (i = 0; i < 4; i++) {
2379		int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2380		uint mult = (ext_csd[idx + 2] << 16) +
2381			(ext_csd[idx + 1] << 8) + ext_csd[idx];
2382		if (mult)
2383			has_parts = true;
2384		if (!part_completed)
2385			continue;
2386		mmc->capacity_gp[i] = mult;
2387		mmc->capacity_gp[i] *=
2388			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2389		mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2390		mmc->capacity_gp[i] <<= 19;
2391	}
2392
2393#ifndef CONFIG_SPL_BUILD
2394	if (part_completed) {
2395		mmc->enh_user_size =
2396			(ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2397			(ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2398			ext_csd[EXT_CSD_ENH_SIZE_MULT];
2399		mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2400		mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2401		mmc->enh_user_size <<= 19;
2402		mmc->enh_user_start =
2403			(ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2404			(ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2405			(ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2406			ext_csd[EXT_CSD_ENH_START_ADDR];
2407		if (mmc->high_capacity)
2408			mmc->enh_user_start <<= 9;
2409	}
2410#endif
2411
2412	/*
2413	 * Host needs to enable ERASE_GRP_DEF bit if device is
2414	 * partitioned. This bit will be lost every time after a reset
2415	 * or power off. This will affect erase size.
2416	 */
2417	if (part_completed)
2418		has_parts = true;
2419	if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2420	    (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2421		has_parts = true;
2422	if (has_parts) {
2423		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2424				 EXT_CSD_ERASE_GROUP_DEF, 1);
2425
2426		if (err)
2427			goto error;
2428
2429		ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2430	}
2431
2432	if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2433#if CONFIG_IS_ENABLED(MMC_WRITE)
2434		/* Read out group size from ext_csd */
2435		mmc->erase_grp_size =
2436			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2437#endif
2438		/*
2439		 * if high capacity and partition setting completed
2440		 * SEC_COUNT is valid even if it is smaller than 2 GiB
2441		 * JEDEC Standard JESD84-B45, 6.2.4
2442		 */
2443		if (mmc->high_capacity && part_completed) {
2444			capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2445				(ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2446				(ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2447				(ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2448			capacity *= MMC_MAX_BLOCK_LEN;
2449			mmc->capacity_user = capacity;
2450		}
2451	}
2452#if CONFIG_IS_ENABLED(MMC_WRITE)
2453	else {
2454		/* Calculate the group size from the csd value. */
2455		int erase_gsz, erase_gmul;
2456
2457		erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2458		erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2459		mmc->erase_grp_size = (erase_gsz + 1)
2460			* (erase_gmul + 1);
2461	}
2462#endif
2463#if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
2464	mmc->hc_wp_grp_size = 1024
2465		* ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2466		* ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2467#endif
2468
2469	mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2470
2471	mmc->can_trim =
2472		!!(ext_csd[EXT_CSD_SEC_FEATURE] & EXT_CSD_SEC_FEATURE_TRIM_EN);
2473
2474	return 0;
2475error:
2476	if (mmc->ext_csd) {
2477#if !CONFIG_IS_ENABLED(MMC_TINY)
2478		free(mmc->ext_csd);
2479#endif
2480		mmc->ext_csd = NULL;
2481	}
2482	return err;
2483}
2484
2485static int mmc_startup(struct mmc *mmc)
2486{
2487	int err, i;
2488	uint mult, freq;
2489	u64 cmult, csize;
2490	struct mmc_cmd cmd;
2491	struct blk_desc *bdesc;
2492
2493#ifdef CONFIG_MMC_SPI_CRC_ON
2494	if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2495		cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2496		cmd.resp_type = MMC_RSP_R1;
2497		cmd.cmdarg = 1;
2498		err = mmc_send_cmd(mmc, &cmd, NULL);
2499		if (err)
2500			return err;
2501	}
2502#endif
2503
2504	/* Put the Card in Identify Mode */
2505	cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2506		MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
2507	cmd.resp_type = MMC_RSP_R2;
2508	cmd.cmdarg = 0;
2509
2510	err = mmc_send_cmd_quirks(mmc, &cmd, NULL, MMC_QUIRK_RETRY_SEND_CID, 4);
2511	if (err)
2512		return err;
2513
2514	memcpy(mmc->cid, cmd.response, 16);
2515
2516	/*
2517	 * For MMC cards, set the Relative Address.
2518	 * For SD cards, get the Relatvie Address.
2519	 * This also puts the cards into Standby State
2520	 */
2521	if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2522		cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2523		cmd.cmdarg = mmc->rca << 16;
2524		cmd.resp_type = MMC_RSP_R6;
2525
2526		err = mmc_send_cmd(mmc, &cmd, NULL);
2527
2528		if (err)
2529			return err;
2530
2531		if (IS_SD(mmc))
2532			mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2533	}
2534
2535	/* Get the Card-Specific Data */
2536	cmd.cmdidx = MMC_CMD_SEND_CSD;
2537	cmd.resp_type = MMC_RSP_R2;
2538	cmd.cmdarg = mmc->rca << 16;
2539
2540	err = mmc_send_cmd(mmc, &cmd, NULL);
2541
2542	if (err)
2543		return err;
2544
2545	mmc->csd[0] = cmd.response[0];
2546	mmc->csd[1] = cmd.response[1];
2547	mmc->csd[2] = cmd.response[2];
2548	mmc->csd[3] = cmd.response[3];
2549
2550	if (mmc->version == MMC_VERSION_UNKNOWN) {
2551		int version = (cmd.response[0] >> 26) & 0xf;
2552
2553		switch (version) {
2554		case 0:
2555			mmc->version = MMC_VERSION_1_2;
2556			break;
2557		case 1:
2558			mmc->version = MMC_VERSION_1_4;
2559			break;
2560		case 2:
2561			mmc->version = MMC_VERSION_2_2;
2562			break;
2563		case 3:
2564			mmc->version = MMC_VERSION_3;
2565			break;
2566		case 4:
2567			mmc->version = MMC_VERSION_4;
2568			break;
2569		default:
2570			mmc->version = MMC_VERSION_1_2;
2571			break;
2572		}
2573	}
2574
2575	/* divide frequency by 10, since the mults are 10x bigger */
2576	freq = fbase[(cmd.response[0] & 0x7)];
2577	mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2578
2579	mmc->legacy_speed = freq * mult;
2580	if (!mmc->legacy_speed)
2581		log_debug("TRAN_SPEED: reserved value");
2582	mmc_select_mode(mmc, MMC_LEGACY);
2583
2584	mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2585	mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2586#if CONFIG_IS_ENABLED(MMC_WRITE)
2587
2588	if (IS_SD(mmc))
2589		mmc->write_bl_len = mmc->read_bl_len;
2590	else
2591		mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2592#endif
2593
2594	if (mmc->high_capacity) {
2595		csize = (mmc->csd[1] & 0x3f) << 16
2596			| (mmc->csd[2] & 0xffff0000) >> 16;
2597		cmult = 8;
2598	} else {
2599		csize = (mmc->csd[1] & 0x3ff) << 2
2600			| (mmc->csd[2] & 0xc0000000) >> 30;
2601		cmult = (mmc->csd[2] & 0x00038000) >> 15;
2602	}
2603
2604	mmc->capacity_user = (csize + 1) << (cmult + 2);
2605	mmc->capacity_user *= mmc->read_bl_len;
2606	mmc->capacity_boot = 0;
2607	mmc->capacity_rpmb = 0;
2608	for (i = 0; i < 4; i++)
2609		mmc->capacity_gp[i] = 0;
2610
2611	if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2612		mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2613
2614#if CONFIG_IS_ENABLED(MMC_WRITE)
2615	if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2616		mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2617#endif
2618
2619	if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2620		cmd.cmdidx = MMC_CMD_SET_DSR;
2621		cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2622		cmd.resp_type = MMC_RSP_NONE;
2623		if (mmc_send_cmd(mmc, &cmd, NULL))
2624			pr_warn("MMC: SET_DSR failed\n");
2625	}
2626
2627	/* Select the card, and put it into Transfer Mode */
2628	if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2629		cmd.cmdidx = MMC_CMD_SELECT_CARD;
2630		cmd.resp_type = MMC_RSP_R1;
2631		cmd.cmdarg = mmc->rca << 16;
2632		err = mmc_send_cmd(mmc, &cmd, NULL);
2633
2634		if (err)
2635			return err;
2636	}
2637
2638	/*
2639	 * For SD, its erase group is always one sector
2640	 */
2641#if CONFIG_IS_ENABLED(MMC_WRITE)
2642	mmc->erase_grp_size = 1;
2643#endif
2644	mmc->part_config = MMCPART_NOAVAILABLE;
2645
2646	err = mmc_startup_v4(mmc);
2647	if (err)
2648		return err;
2649
2650	err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2651	if (err)
2652		return err;
2653
2654#if CONFIG_IS_ENABLED(MMC_TINY)
2655	mmc_set_clock(mmc, mmc->legacy_speed, false);
2656	mmc_select_mode(mmc, MMC_LEGACY);
2657	mmc_set_bus_width(mmc, 1);
2658#else
2659	if (IS_SD(mmc)) {
2660		err = sd_get_capabilities(mmc);
2661		if (err)
2662			return err;
2663		err = sd_select_mode_and_width(mmc, mmc->card_caps);
2664	} else {
2665		err = mmc_get_capabilities(mmc);
2666		if (err)
2667			return err;
2668		err = mmc_select_mode_and_width(mmc, mmc->card_caps);
2669	}
2670#endif
2671	if (err)
2672		return err;
2673
2674	mmc->best_mode = mmc->selected_mode;
2675
2676	/* Fix the block length for DDR mode */
2677	if (mmc->ddr_mode) {
2678		mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2679#if CONFIG_IS_ENABLED(MMC_WRITE)
2680		mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2681#endif
2682	}
2683
2684	/* fill in device description */
2685	bdesc = mmc_get_blk_desc(mmc);
2686	bdesc->lun = 0;
2687	bdesc->hwpart = 0;
2688	bdesc->type = 0;
2689	bdesc->blksz = mmc->read_bl_len;
2690	bdesc->log2blksz = LOG2(bdesc->blksz);
2691	bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2692#if !defined(CONFIG_SPL_BUILD) || \
2693		(defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2694		!CONFIG_IS_ENABLED(USE_TINY_PRINTF))
2695	sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2696		mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2697		(mmc->cid[3] >> 16) & 0xffff);
2698	sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2699		(mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2700		(mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2701		(mmc->cid[2] >> 24) & 0xff);
2702	sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2703		(mmc->cid[2] >> 16) & 0xf);
2704#else
2705	bdesc->vendor[0] = 0;
2706	bdesc->product[0] = 0;
2707	bdesc->revision[0] = 0;
2708#endif
2709
2710#if !defined(CONFIG_DM_MMC) && (!defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT))
2711	part_init(bdesc);
2712#endif
2713
2714	return 0;
2715}
2716
2717static int mmc_send_if_cond(struct mmc *mmc)
2718{
2719	struct mmc_cmd cmd;
2720	int err;
2721
2722	cmd.cmdidx = SD_CMD_SEND_IF_COND;
2723	/* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2724	cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2725	cmd.resp_type = MMC_RSP_R7;
2726
2727	err = mmc_send_cmd(mmc, &cmd, NULL);
2728
2729	if (err)
2730		return err;
2731
2732	if ((cmd.response[0] & 0xff) != 0xaa)
2733		return -EOPNOTSUPP;
2734	else
2735		mmc->version = SD_VERSION_2;
2736
2737	return 0;
2738}
2739
2740#if !CONFIG_IS_ENABLED(DM_MMC)
2741/* board-specific MMC power initializations. */
2742__weak void board_mmc_power_init(void)
2743{
2744}
2745#endif
2746
2747static int mmc_power_init(struct mmc *mmc)
2748{
2749#if CONFIG_IS_ENABLED(DM_MMC)
2750#if CONFIG_IS_ENABLED(DM_REGULATOR)
2751	int ret;
2752
2753	ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2754					  &mmc->vmmc_supply);
2755	if (ret)
2756		pr_debug("%s: No vmmc supply\n", mmc->dev->name);
2757
2758	ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2759					  &mmc->vqmmc_supply);
2760	if (ret)
2761		pr_debug("%s: No vqmmc supply\n", mmc->dev->name);
2762#endif
2763#else /* !CONFIG_DM_MMC */
2764	/*
2765	 * Driver model should use a regulator, as above, rather than calling
2766	 * out to board code.
2767	 */
2768	board_mmc_power_init();
2769#endif
2770	return 0;
2771}
2772
2773/*
2774 * put the host in the initial state:
2775 * - turn on Vdd (card power supply)
2776 * - configure the bus width and clock to minimal values
2777 */
2778static void mmc_set_initial_state(struct mmc *mmc)
2779{
2780	int err;
2781
2782	/* First try to set 3.3V. If it fails set to 1.8V */
2783	err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2784	if (err != 0)
2785		err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2786	if (err != 0)
2787		pr_warn("mmc: failed to set signal voltage\n");
2788
2789	mmc_select_mode(mmc, MMC_LEGACY);
2790	mmc_set_bus_width(mmc, 1);
2791	mmc_set_clock(mmc, 0, MMC_CLK_ENABLE);
2792}
2793
2794static int mmc_power_on(struct mmc *mmc)
2795{
2796#if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2797	if (mmc->vmmc_supply) {
2798		int ret = regulator_set_enable_if_allowed(mmc->vmmc_supply,
2799							  true);
2800
2801		if (ret && ret != -ENOSYS) {
2802			printf("Error enabling VMMC supply : %d\n", ret);
2803			return ret;
2804		}
2805	}
2806#endif
2807	return 0;
2808}
2809
2810static int mmc_power_off(struct mmc *mmc)
2811{
2812	mmc_set_clock(mmc, 0, MMC_CLK_DISABLE);
2813#if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2814	if (mmc->vmmc_supply) {
2815		int ret = regulator_set_enable_if_allowed(mmc->vmmc_supply,
2816							  false);
2817
2818		if (ret && ret != -ENOSYS) {
2819			pr_debug("Error disabling VMMC supply : %d\n", ret);
2820			return ret;
2821		}
2822	}
2823#endif
2824	return 0;
2825}
2826
2827static int mmc_power_cycle(struct mmc *mmc)
2828{
2829	int ret;
2830
2831	ret = mmc_power_off(mmc);
2832	if (ret)
2833		return ret;
2834
2835	ret = mmc_host_power_cycle(mmc);
2836	if (ret)
2837		return ret;
2838
2839	/*
2840	 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2841	 * to be on the safer side.
2842	 */
2843	udelay(2000);
2844	return mmc_power_on(mmc);
2845}
2846
2847int mmc_get_op_cond(struct mmc *mmc, bool quiet)
2848{
2849	bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2850	int err;
2851
2852	if (mmc->has_init)
2853		return 0;
2854
2855	err = mmc_power_init(mmc);
2856	if (err)
2857		return err;
2858
2859#ifdef CONFIG_MMC_QUIRKS
2860	mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2861		      MMC_QUIRK_RETRY_SEND_CID |
2862		      MMC_QUIRK_RETRY_APP_CMD;
2863#endif
2864
2865	err = mmc_power_cycle(mmc);
2866	if (err) {
2867		/*
2868		 * if power cycling is not supported, we should not try
2869		 * to use the UHS modes, because we wouldn't be able to
2870		 * recover from an error during the UHS initialization.
2871		 */
2872		pr_debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2873		uhs_en = false;
2874		mmc->host_caps &= ~UHS_CAPS;
2875		err = mmc_power_on(mmc);
2876	}
2877	if (err)
2878		return err;
2879
2880#if CONFIG_IS_ENABLED(DM_MMC)
2881	/*
2882	 * Re-initialization is needed to clear old configuration for
2883	 * mmc rescan.
2884	 */
2885	err = mmc_reinit(mmc);
2886#else
2887	/* made sure it's not NULL earlier */
2888	err = mmc->cfg->ops->init(mmc);
2889#endif
2890	if (err)
2891		return err;
2892	mmc->ddr_mode = 0;
2893
2894retry:
2895	mmc_set_initial_state(mmc);
2896
2897	/* Reset the Card */
2898	err = mmc_go_idle(mmc);
2899
2900	if (err)
2901		return err;
2902
2903	/* The internal partition reset to user partition(0) at every CMD0 */
2904	mmc_get_blk_desc(mmc)->hwpart = 0;
2905
2906	/* Test for SD version 2 */
2907	err = mmc_send_if_cond(mmc);
2908
2909	/* Now try to get the SD card's operating condition */
2910	err = sd_send_op_cond(mmc, uhs_en);
2911	if (err && uhs_en) {
2912		uhs_en = false;
2913		mmc_power_cycle(mmc);
2914		goto retry;
2915	}
2916
2917	/* If the command timed out, we check for an MMC card */
2918	if (err == -ETIMEDOUT) {
2919		err = mmc_send_op_cond(mmc);
2920
2921		if (err) {
2922#if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2923			if (!quiet)
2924				pr_err("Card did not respond to voltage select! : %d\n", err);
2925#endif
2926			return -EOPNOTSUPP;
2927		}
2928	}
2929
2930	return err;
2931}
2932
2933int mmc_start_init(struct mmc *mmc)
2934{
2935	bool no_card;
2936	int err = 0;
2937
2938	/*
2939	 * all hosts are capable of 1 bit bus-width and able to use the legacy
2940	 * timings.
2941	 */
2942	mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(MMC_LEGACY) |
2943			 MMC_MODE_1BIT;
2944
2945	if (IS_ENABLED(CONFIG_MMC_SPEED_MODE_SET)) {
2946		if (mmc->user_speed_mode != MMC_MODES_END) {
2947			int i;
2948			/* set host caps */
2949			if (mmc->host_caps & MMC_CAP(mmc->user_speed_mode)) {
2950				/* Remove all existing speed capabilities */
2951				for (i = MMC_LEGACY; i < MMC_MODES_END; i++)
2952					mmc->host_caps &= ~MMC_CAP(i);
2953				mmc->host_caps |= (MMC_CAP(mmc->user_speed_mode)
2954						   | MMC_CAP(MMC_LEGACY) |
2955						   MMC_MODE_1BIT);
2956			} else {
2957				pr_err("bus_mode requested is not supported\n");
2958				return -EINVAL;
2959			}
2960		}
2961	}
2962#if CONFIG_IS_ENABLED(DM_MMC)
2963	mmc_deferred_probe(mmc);
2964#endif
2965#if !defined(CONFIG_MMC_BROKEN_CD)
2966	no_card = mmc_getcd(mmc) == 0;
2967#else
2968	no_card = 0;
2969#endif
2970#if !CONFIG_IS_ENABLED(DM_MMC)
2971	/* we pretend there's no card when init is NULL */
2972	no_card = no_card || (mmc->cfg->ops->init == NULL);
2973#endif
2974	if (no_card) {
2975		mmc->has_init = 0;
2976#if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2977		pr_err("MMC: no card present\n");
2978#endif
2979		return -ENOMEDIUM;
2980	}
2981
2982	err = mmc_get_op_cond(mmc, false);
2983
2984	if (!err)
2985		mmc->init_in_progress = 1;
2986
2987	return err;
2988}
2989
2990static int mmc_complete_init(struct mmc *mmc)
2991{
2992	int err = 0;
2993
2994	mmc->init_in_progress = 0;
2995	if (mmc->op_cond_pending)
2996		err = mmc_complete_op_cond(mmc);
2997
2998	if (!err)
2999		err = mmc_startup(mmc);
3000	if (err)
3001		mmc->has_init = 0;
3002	else
3003		mmc->has_init = 1;
3004	return err;
3005}
3006
3007int mmc_init(struct mmc *mmc)
3008{
3009	int err = 0;
3010	__maybe_unused ulong start;
3011#if CONFIG_IS_ENABLED(DM_MMC)
3012	struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
3013
3014	upriv->mmc = mmc;
3015#endif
3016	if (mmc->has_init)
3017		return 0;
3018
3019	start = get_timer(0);
3020
3021	if (!mmc->init_in_progress)
3022		err = mmc_start_init(mmc);
3023
3024	if (!err)
3025		err = mmc_complete_init(mmc);
3026	if (err)
3027		pr_info("%s: %d, time %lu\n", __func__, err, get_timer(start));
3028
3029	return err;
3030}
3031
3032int mmc_deinit(struct mmc *mmc)
3033{
3034	u32 caps_filtered;
3035
3036	if (!CONFIG_IS_ENABLED(MMC_UHS_SUPPORT) &&
3037	    !CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) &&
3038	    !CONFIG_IS_ENABLED(MMC_HS400_SUPPORT))
3039		return 0;
3040
3041	if (!mmc->has_init)
3042		return 0;
3043
3044	if (IS_SD(mmc)) {
3045		caps_filtered = mmc->card_caps &
3046			~(MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25) |
3047			  MMC_CAP(UHS_SDR50) | MMC_CAP(UHS_DDR50) |
3048			  MMC_CAP(UHS_SDR104));
3049
3050		return sd_select_mode_and_width(mmc, caps_filtered);
3051	} else {
3052		caps_filtered = mmc->card_caps &
3053			~(MMC_CAP(MMC_HS_200) | MMC_CAP(MMC_HS_400) | MMC_CAP(MMC_HS_400_ES));
3054
3055		return mmc_select_mode_and_width(mmc, caps_filtered);
3056	}
3057}
3058
3059int mmc_set_dsr(struct mmc *mmc, u16 val)
3060{
3061	mmc->dsr = val;
3062	return 0;
3063}
3064
3065/* CPU-specific MMC initializations */
3066__weak int cpu_mmc_init(struct bd_info *bis)
3067{
3068	return -1;
3069}
3070
3071/* board-specific MMC initializations. */
3072__weak int board_mmc_init(struct bd_info *bis)
3073{
3074	return -1;
3075}
3076
3077void mmc_set_preinit(struct mmc *mmc, int preinit)
3078{
3079	mmc->preinit = preinit;
3080}
3081
3082#if CONFIG_IS_ENABLED(DM_MMC)
3083static int mmc_probe(struct bd_info *bis)
3084{
3085	int ret, i;
3086	struct uclass *uc;
3087	struct udevice *dev;
3088
3089	ret = uclass_get(UCLASS_MMC, &uc);
3090	if (ret)
3091		return ret;
3092
3093	/*
3094	 * Try to add them in sequence order. Really with driver model we
3095	 * should allow holes, but the current MMC list does not allow that.
3096	 * So if we request 0, 1, 3 we will get 0, 1, 2.
3097	 */
3098	for (i = 0; ; i++) {
3099		ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
3100		if (ret == -ENODEV)
3101			break;
3102	}
3103	uclass_foreach_dev(dev, uc) {
3104		ret = device_probe(dev);
3105		if (ret)
3106			pr_err("%s - probe failed: %d\n", dev->name, ret);
3107	}
3108
3109	return 0;
3110}
3111#else
3112static int mmc_probe(struct bd_info *bis)
3113{
3114	if (board_mmc_init(bis) < 0)
3115		cpu_mmc_init(bis);
3116
3117	return 0;
3118}
3119#endif
3120
3121int mmc_initialize(struct bd_info *bis)
3122{
3123	static int initialized = 0;
3124	int ret;
3125	if (initialized)	/* Avoid initializing mmc multiple times */
3126		return 0;
3127	initialized = 1;
3128
3129#if !CONFIG_IS_ENABLED(BLK)
3130#if !CONFIG_IS_ENABLED(MMC_TINY)
3131	mmc_list_init();
3132#endif
3133#endif
3134	ret = mmc_probe(bis);
3135	if (ret)
3136		return ret;
3137
3138#ifndef CONFIG_SPL_BUILD
3139	print_mmc_devices(',');
3140#endif
3141
3142	mmc_do_preinit();
3143	return 0;
3144}
3145
3146#if CONFIG_IS_ENABLED(DM_MMC)
3147int mmc_init_device(int num)
3148{
3149	struct udevice *dev;
3150	struct mmc *m;
3151	int ret;
3152
3153	if (uclass_get_device_by_seq(UCLASS_MMC, num, &dev)) {
3154		ret = uclass_get_device(UCLASS_MMC, num, &dev);
3155		if (ret)
3156			return ret;
3157	}
3158
3159	m = mmc_get_mmc_dev(dev);
3160	if (!m)
3161		return 0;
3162
3163	/* Initialising user set speed mode */
3164	m->user_speed_mode = MMC_MODES_END;
3165
3166	if (m->preinit)
3167		mmc_start_init(m);
3168
3169	return 0;
3170}
3171#endif
3172
3173#ifdef CONFIG_CMD_BKOPS_ENABLE
3174int mmc_set_bkops_enable(struct mmc *mmc, bool autobkops, bool enable)
3175{
3176	int err;
3177	u32 bit = autobkops ? BIT(1) : BIT(0);
3178	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
3179
3180	err = mmc_send_ext_csd(mmc, ext_csd);
3181	if (err) {
3182		puts("Could not get ext_csd register values\n");
3183		return err;
3184	}
3185
3186	if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
3187		puts("Background operations not supported on device\n");
3188		return -EMEDIUMTYPE;
3189	}
3190
3191	if (enable && (ext_csd[EXT_CSD_BKOPS_EN] & bit)) {
3192		puts("Background operations already enabled\n");
3193		return 0;
3194	}
3195
3196	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN,
3197			 enable ? bit : 0);
3198	if (err) {
3199		printf("Failed to %sable manual background operations\n",
3200		       enable ? "en" : "dis");
3201		return err;
3202	}
3203
3204	printf("%sabled %s background operations\n",
3205	       enable ? "En" : "Dis", autobkops ? "auto" : "manual");
3206
3207	return 0;
3208}
3209#endif
3210
3211__weak int mmc_get_env_dev(void)
3212{
3213#ifdef CONFIG_SYS_MMC_ENV_DEV
3214	return CONFIG_SYS_MMC_ENV_DEV;
3215#else
3216	return 0;
3217#endif
3218}
3219