1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2008 Alexander Motin <mav@FreeBSD.org>
5 * Copyright (c) 2017 Marius Strobl <marius@FreeBSD.org>
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <sys/param.h>
30#include <sys/systm.h>
31#include <sys/bus.h>
32#include <sys/callout.h>
33#include <sys/conf.h>
34#include <sys/kernel.h>
35#include <sys/kobj.h>
36#include <sys/libkern.h>
37#include <sys/lock.h>
38#include <sys/malloc.h>
39#include <sys/module.h>
40#include <sys/mutex.h>
41#include <sys/resource.h>
42#include <sys/rman.h>
43#include <sys/sysctl.h>
44#include <sys/taskqueue.h>
45#include <sys/sbuf.h>
46
47#include <machine/bus.h>
48#include <machine/resource.h>
49#include <machine/stdarg.h>
50
51#include <dev/mmc/bridge.h>
52#include <dev/mmc/mmcreg.h>
53#include <dev/mmc/mmcbrvar.h>
54
55#include <dev/sdhci/sdhci.h>
56
57#include <cam/cam.h>
58#include <cam/cam_ccb.h>
59#include <cam/cam_debug.h>
60#include <cam/cam_sim.h>
61#include <cam/cam_xpt_sim.h>
62
63#include "mmcbr_if.h"
64#include "sdhci_if.h"
65
66#include "opt_mmccam.h"
67
68SYSCTL_NODE(_hw, OID_AUTO, sdhci, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
69    "sdhci driver");
70
71static int sdhci_debug = 0;
72SYSCTL_INT(_hw_sdhci, OID_AUTO, debug, CTLFLAG_RWTUN, &sdhci_debug, 0,
73    "Debug level");
74u_int sdhci_quirk_clear = 0;
75SYSCTL_INT(_hw_sdhci, OID_AUTO, quirk_clear, CTLFLAG_RWTUN, &sdhci_quirk_clear,
76    0, "Mask of quirks to clear");
77u_int sdhci_quirk_set = 0;
78SYSCTL_INT(_hw_sdhci, OID_AUTO, quirk_set, CTLFLAG_RWTUN, &sdhci_quirk_set, 0,
79    "Mask of quirks to set");
80
81#define	RD1(slot, off)	SDHCI_READ_1((slot)->bus, (slot), (off))
82#define	RD2(slot, off)	SDHCI_READ_2((slot)->bus, (slot), (off))
83#define	RD4(slot, off)	SDHCI_READ_4((slot)->bus, (slot), (off))
84#define	RD_MULTI_4(slot, off, ptr, count)	\
85    SDHCI_READ_MULTI_4((slot)->bus, (slot), (off), (ptr), (count))
86
87#define	WR1(slot, off, val)	SDHCI_WRITE_1((slot)->bus, (slot), (off), (val))
88#define	WR2(slot, off, val)	SDHCI_WRITE_2((slot)->bus, (slot), (off), (val))
89#define	WR4(slot, off, val)	SDHCI_WRITE_4((slot)->bus, (slot), (off), (val))
90#define	WR_MULTI_4(slot, off, ptr, count)	\
91    SDHCI_WRITE_MULTI_4((slot)->bus, (slot), (off), (ptr), (count))
92
93static void sdhci_acmd_irq(struct sdhci_slot *slot, uint16_t acmd_err);
94static void sdhci_card_poll(void *arg);
95static void sdhci_card_task(void *arg, int pending);
96static void sdhci_cmd_irq(struct sdhci_slot *slot, uint32_t intmask);
97static void sdhci_data_irq(struct sdhci_slot *slot, uint32_t intmask);
98static int sdhci_exec_tuning(struct sdhci_slot *slot, bool reset);
99static void sdhci_handle_card_present_locked(struct sdhci_slot *slot,
100    bool is_present);
101static void sdhci_finish_command(struct sdhci_slot *slot);
102static void sdhci_init(struct sdhci_slot *slot);
103static void sdhci_read_block_pio(struct sdhci_slot *slot);
104static void sdhci_req_done(struct sdhci_slot *slot);
105static void sdhci_req_wakeup(struct mmc_request *req);
106static void sdhci_retune(void *arg);
107static void sdhci_set_clock(struct sdhci_slot *slot, uint32_t clock);
108static void sdhci_set_power(struct sdhci_slot *slot, u_char power);
109static void sdhci_set_transfer_mode(struct sdhci_slot *slot,
110   const struct mmc_data *data);
111static void sdhci_start(struct sdhci_slot *slot);
112static void sdhci_timeout(void *arg);
113static void sdhci_start_command(struct sdhci_slot *slot,
114   struct mmc_command *cmd);
115static void sdhci_start_data(struct sdhci_slot *slot,
116   const struct mmc_data *data);
117static void sdhci_write_block_pio(struct sdhci_slot *slot);
118static void sdhci_transfer_pio(struct sdhci_slot *slot);
119
120#ifdef MMCCAM
121/* CAM-related */
122static void sdhci_cam_action(struct cam_sim *sim, union ccb *ccb);
123static int sdhci_cam_get_possible_host_clock(const struct sdhci_slot *slot,
124    int proposed_clock);
125static void sdhci_cam_poll(struct cam_sim *sim);
126static int sdhci_cam_request(struct sdhci_slot *slot, union ccb *ccb);
127static int sdhci_cam_settran_settings(struct sdhci_slot *slot, union ccb *ccb);
128static int sdhci_cam_update_ios(struct sdhci_slot *slot);
129#endif
130
131/* helper routines */
132static int sdhci_dma_alloc(struct sdhci_slot *slot);
133static void sdhci_dma_free(struct sdhci_slot *slot);
134static void sdhci_dumpcaps(struct sdhci_slot *slot);
135static void sdhci_dumpcaps_buf(struct sdhci_slot *slot, struct sbuf *s);
136static void sdhci_dumpregs(struct sdhci_slot *slot);
137static void sdhci_dumpregs_buf(struct sdhci_slot *slot, struct sbuf *s);
138static int sdhci_syctl_dumpcaps(SYSCTL_HANDLER_ARGS);
139static int sdhci_syctl_dumpregs(SYSCTL_HANDLER_ARGS);
140static void sdhci_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs,
141    int error);
142static int slot_printf(const struct sdhci_slot *slot, const char * fmt, ...)
143    __printflike(2, 3);
144static int slot_sprintf(const struct sdhci_slot *slot, struct sbuf *s,
145    const char * fmt, ...) __printflike(3, 4);
146static uint32_t sdhci_tuning_intmask(const struct sdhci_slot *slot);
147
148#define	SDHCI_LOCK(_slot)		mtx_lock(&(_slot)->mtx)
149#define	SDHCI_UNLOCK(_slot)		mtx_unlock(&(_slot)->mtx)
150#define	SDHCI_LOCK_INIT(_slot) \
151	mtx_init(&_slot->mtx, "SD slot mtx", "sdhci", MTX_DEF)
152#define	SDHCI_LOCK_DESTROY(_slot)	mtx_destroy(&_slot->mtx);
153#define	SDHCI_ASSERT_LOCKED(_slot)	mtx_assert(&_slot->mtx, MA_OWNED);
154#define	SDHCI_ASSERT_UNLOCKED(_slot)	mtx_assert(&_slot->mtx, MA_NOTOWNED);
155
156#define	SDHCI_DEFAULT_MAX_FREQ	50
157
158#define	SDHCI_200_MAX_DIVIDER	256
159#define	SDHCI_300_MAX_DIVIDER	2046
160
161#define	SDHCI_CARD_PRESENT_TICKS	(hz / 5)
162#define	SDHCI_INSERT_DELAY_TICKS	(hz / 2)
163
164/*
165 * Broadcom BCM577xx Controller Constants
166 */
167/* Maximum divider supported by the default clock source. */
168#define	BCM577XX_DEFAULT_MAX_DIVIDER	256
169/* Alternative clock's base frequency. */
170#define	BCM577XX_ALT_CLOCK_BASE		63000000
171
172#define	BCM577XX_HOST_CONTROL		0x198
173#define	BCM577XX_CTRL_CLKSEL_MASK	0xFFFFCFFF
174#define	BCM577XX_CTRL_CLKSEL_SHIFT	12
175#define	BCM577XX_CTRL_CLKSEL_DEFAULT	0x0
176#define	BCM577XX_CTRL_CLKSEL_64MHZ	0x3
177
178static void
179sdhci_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
180{
181
182	if (error != 0) {
183		printf("getaddr: error %d\n", error);
184		return;
185	}
186	*(bus_addr_t *)arg = segs[0].ds_addr;
187}
188
189static int
190slot_printf(const struct sdhci_slot *slot, const char * fmt, ...)
191{
192	char buf[128];
193	va_list ap;
194	int retval;
195
196	/*
197	 * Make sure we print a single line all together rather than in two
198	 * halves to avoid console gibberish bingo.
199	 */
200	va_start(ap, fmt);
201	retval = vsnprintf(buf, sizeof(buf), fmt, ap);
202	va_end(ap);
203
204	retval += printf("%s-slot%d: %s",
205	    device_get_nameunit(slot->bus), slot->num, buf);
206	return (retval);
207}
208
209static int
210slot_sprintf(const struct sdhci_slot *slot, struct sbuf *s,
211    const char * fmt, ...)
212{
213	va_list ap;
214	int retval;
215
216	retval = sbuf_printf(s, "%s-slot%d: ", device_get_nameunit(slot->bus), slot->num);
217
218	va_start(ap, fmt);
219	retval += sbuf_vprintf(s, fmt, ap);
220	va_end(ap);
221
222	return (retval);
223}
224
225static void
226sdhci_dumpregs_buf(struct sdhci_slot *slot, struct sbuf *s)
227{
228	slot_sprintf(slot, s,  "============== REGISTER DUMP ==============\n");
229
230	slot_sprintf(slot, s,  "Sys addr: 0x%08x | Version:  0x%08x\n",
231	    RD4(slot, SDHCI_DMA_ADDRESS), RD2(slot, SDHCI_HOST_VERSION));
232	slot_sprintf(slot, s,  "Blk size: 0x%08x | Blk cnt:  0x%08x\n",
233	    RD2(slot, SDHCI_BLOCK_SIZE), RD2(slot, SDHCI_BLOCK_COUNT));
234	slot_sprintf(slot, s,  "Argument: 0x%08x | Trn mode: 0x%08x\n",
235	    RD4(slot, SDHCI_ARGUMENT), RD2(slot, SDHCI_TRANSFER_MODE));
236	slot_sprintf(slot, s,  "Present:  0x%08x | Host ctl: 0x%08x\n",
237	    RD4(slot, SDHCI_PRESENT_STATE), RD1(slot, SDHCI_HOST_CONTROL));
238	slot_sprintf(slot, s,  "Power:    0x%08x | Blk gap:  0x%08x\n",
239	    RD1(slot, SDHCI_POWER_CONTROL), RD1(slot, SDHCI_BLOCK_GAP_CONTROL));
240	slot_sprintf(slot, s,  "Wake-up:  0x%08x | Clock:    0x%08x\n",
241	    RD1(slot, SDHCI_WAKE_UP_CONTROL), RD2(slot, SDHCI_CLOCK_CONTROL));
242	slot_sprintf(slot, s,  "Timeout:  0x%08x | Int stat: 0x%08x\n",
243	    RD1(slot, SDHCI_TIMEOUT_CONTROL), RD4(slot, SDHCI_INT_STATUS));
244	slot_sprintf(slot, s,  "Int enab: 0x%08x | Sig enab: 0x%08x\n",
245	    RD4(slot, SDHCI_INT_ENABLE), RD4(slot, SDHCI_SIGNAL_ENABLE));
246	slot_sprintf(slot, s,  "AC12 err: 0x%08x | Host ctl2:0x%08x\n",
247	    RD2(slot, SDHCI_ACMD12_ERR), RD2(slot, SDHCI_HOST_CONTROL2));
248	slot_sprintf(slot, s,  "Caps:     0x%08x | Caps2:    0x%08x\n",
249	    RD4(slot, SDHCI_CAPABILITIES), RD4(slot, SDHCI_CAPABILITIES2));
250	slot_sprintf(slot, s,  "Max curr: 0x%08x | ADMA err: 0x%08x\n",
251	    RD4(slot, SDHCI_MAX_CURRENT), RD1(slot, SDHCI_ADMA_ERR));
252	slot_sprintf(slot, s,  "ADMA addr:0x%08x | Slot int: 0x%08x\n",
253	    RD4(slot, SDHCI_ADMA_ADDRESS_LO), RD2(slot, SDHCI_SLOT_INT_STATUS));
254
255	slot_sprintf(slot, s,  "===========================================\n");
256}
257
258static void
259sdhci_dumpregs(struct sdhci_slot *slot)
260{
261	struct sbuf s;
262
263	if (sbuf_new(&s, NULL, 1024, SBUF_NOWAIT | SBUF_AUTOEXTEND) == NULL) {
264		slot_printf(slot, "sdhci_dumpregs: Failed to allocate memory for sbuf\n");
265		return;
266	}
267
268	sbuf_set_drain(&s, &sbuf_printf_drain, NULL);
269	sdhci_dumpregs_buf(slot, &s);
270	sbuf_finish(&s);
271	sbuf_delete(&s);
272}
273
274static int
275sdhci_syctl_dumpregs(SYSCTL_HANDLER_ARGS)
276{
277	struct sdhci_slot *slot = arg1;
278	struct sbuf s;
279
280	sbuf_new_for_sysctl(&s, NULL, 1024, req);
281	sbuf_putc(&s, '\n');
282	sdhci_dumpregs_buf(slot, &s);
283	sbuf_finish(&s);
284	sbuf_delete(&s);
285
286	return (0);
287}
288
289static void
290sdhci_dumpcaps_buf(struct sdhci_slot *slot, struct sbuf *s)
291{
292	int host_caps = slot->host.caps;
293	int caps = slot->caps;
294
295	slot_sprintf(slot, s,
296	    "%uMHz%s %s VDD:%s%s%s VCCQ: 3.3V%s%s DRV: B%s%s%s %s %s\n",
297	    slot->max_clk / 1000000,
298	    (caps & SDHCI_CAN_DO_HISPD) ? " HS" : "",
299	    (host_caps & MMC_CAP_8_BIT_DATA) ? "8bits" :
300	    ((host_caps & MMC_CAP_4_BIT_DATA) ? "4bits" : "1bit"),
301	    (caps & SDHCI_CAN_VDD_330) ? " 3.3V" : "",
302	    (caps & SDHCI_CAN_VDD_300) ? " 3.0V" : "",
303	    ((caps & SDHCI_CAN_VDD_180) &&
304	    (slot->opt & SDHCI_SLOT_EMBEDDED)) ? " 1.8V" : "",
305	    (host_caps & MMC_CAP_SIGNALING_180) ? " 1.8V" : "",
306	    (host_caps & MMC_CAP_SIGNALING_120) ? " 1.2V" : "",
307	    (host_caps & MMC_CAP_DRIVER_TYPE_A) ? "A" : "",
308	    (host_caps & MMC_CAP_DRIVER_TYPE_C) ? "C" : "",
309	    (host_caps & MMC_CAP_DRIVER_TYPE_D) ? "D" : "",
310	    (slot->opt & SDHCI_HAVE_DMA) ? "DMA" : "PIO",
311	    (slot->opt & SDHCI_SLOT_EMBEDDED) ? "embedded" :
312	    (slot->opt & SDHCI_NON_REMOVABLE) ? "non-removable" :
313	    "removable");
314	if (host_caps & (MMC_CAP_MMC_DDR52 | MMC_CAP_MMC_HS200 |
315	    MMC_CAP_MMC_HS400 | MMC_CAP_MMC_ENH_STROBE))
316		slot_sprintf(slot, s, "eMMC:%s%s%s%s\n",
317		    (host_caps & MMC_CAP_MMC_DDR52) ? " DDR52" : "",
318		    (host_caps & MMC_CAP_MMC_HS200) ? " HS200" : "",
319		    (host_caps & MMC_CAP_MMC_HS400) ? " HS400" : "",
320		    ((host_caps &
321		    (MMC_CAP_MMC_HS400 | MMC_CAP_MMC_ENH_STROBE)) ==
322		    (MMC_CAP_MMC_HS400 | MMC_CAP_MMC_ENH_STROBE)) ?
323		    " HS400ES" : "");
324	if (host_caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
325	    MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104))
326		slot_sprintf(slot, s, "UHS-I:%s%s%s%s%s\n",
327		    (host_caps & MMC_CAP_UHS_SDR12) ? " SDR12" : "",
328		    (host_caps & MMC_CAP_UHS_SDR25) ? " SDR25" : "",
329		    (host_caps & MMC_CAP_UHS_SDR50) ? " SDR50" : "",
330		    (host_caps & MMC_CAP_UHS_SDR104) ? " SDR104" : "",
331		    (host_caps & MMC_CAP_UHS_DDR50) ? " DDR50" : "");
332	if (slot->opt & SDHCI_TUNING_SUPPORTED)
333		slot_sprintf(slot, s,
334		    "Re-tuning count %d secs, mode %d\n",
335		    slot->retune_count, slot->retune_mode + 1);
336}
337
338static void
339sdhci_dumpcaps(struct sdhci_slot *slot)
340{
341	struct sbuf s;
342
343	if (sbuf_new(&s, NULL, 1024, SBUF_NOWAIT | SBUF_AUTOEXTEND) == NULL) {
344		slot_printf(slot, "sdhci_dumpcaps: Failed to allocate memory for sbuf\n");
345		return;
346	}
347
348	sbuf_set_drain(&s, &sbuf_printf_drain, NULL);
349	sdhci_dumpcaps_buf(slot, &s);
350	sbuf_finish(&s);
351	sbuf_delete(&s);
352}
353
354static int
355sdhci_syctl_dumpcaps(SYSCTL_HANDLER_ARGS)
356{
357	struct sdhci_slot *slot = arg1;
358	struct sbuf s;
359
360	sbuf_new_for_sysctl(&s, NULL, 1024, req);
361	sbuf_putc(&s, '\n');
362	sdhci_dumpcaps_buf(slot, &s);
363	sbuf_finish(&s);
364	sbuf_delete(&s);
365
366	return (0);
367}
368
369static uint32_t
370sdhci_tuning_intmask(const struct sdhci_slot *slot)
371{
372	uint32_t intmask;
373
374	intmask = 0;
375	if (slot->opt & SDHCI_TUNING_ENABLED) {
376		intmask |= SDHCI_INT_TUNEERR;
377		if (slot->retune_mode == SDHCI_RETUNE_MODE_2 ||
378		    slot->retune_mode == SDHCI_RETUNE_MODE_3)
379			intmask |= SDHCI_INT_RETUNE;
380	}
381	return (intmask);
382}
383
384static void
385sdhci_init(struct sdhci_slot *slot)
386{
387
388	SDHCI_RESET(slot->bus, slot, SDHCI_RESET_ALL);
389
390	/* Enable interrupts. */
391	slot->intmask = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
392	    SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_INDEX |
393	    SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT |
394	    SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL |
395	    SDHCI_INT_DMA_END | SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE |
396	    SDHCI_INT_ACMD12ERR;
397
398	if (!(slot->quirks & SDHCI_QUIRK_POLL_CARD_PRESENT) &&
399	    !(slot->opt & SDHCI_NON_REMOVABLE)) {
400		slot->intmask |= SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT;
401	}
402
403	WR4(slot, SDHCI_INT_ENABLE, slot->intmask);
404	WR4(slot, SDHCI_SIGNAL_ENABLE, slot->intmask);
405}
406
407static void
408sdhci_set_clock(struct sdhci_slot *slot, uint32_t clock)
409{
410	uint32_t clk_base;
411	uint32_t clk_sel;
412	uint32_t res;
413	uint16_t clk;
414	uint16_t div;
415	int timeout;
416
417	if (clock == slot->clock)
418		return;
419	clock = SDHCI_SET_CLOCK(slot->bus, slot, clock);
420	slot->clock = clock;
421
422	/* Turn off the clock. */
423	clk = RD2(slot, SDHCI_CLOCK_CONTROL);
424	WR2(slot, SDHCI_CLOCK_CONTROL, clk & ~SDHCI_CLOCK_CARD_EN);
425	/* If no clock requested - leave it so. */
426	if (clock == 0)
427		return;
428
429	/* Determine the clock base frequency */
430	clk_base = slot->max_clk;
431	if (slot->quirks & SDHCI_QUIRK_BCM577XX_400KHZ_CLKSRC) {
432		clk_sel = RD2(slot, BCM577XX_HOST_CONTROL) &
433		    BCM577XX_CTRL_CLKSEL_MASK;
434
435		/*
436		 * Select clock source appropriate for the requested frequency.
437		 */
438		if ((clk_base / BCM577XX_DEFAULT_MAX_DIVIDER) > clock) {
439			clk_base = BCM577XX_ALT_CLOCK_BASE;
440			clk_sel |= (BCM577XX_CTRL_CLKSEL_64MHZ <<
441			    BCM577XX_CTRL_CLKSEL_SHIFT);
442		} else {
443			clk_sel |= (BCM577XX_CTRL_CLKSEL_DEFAULT <<
444			    BCM577XX_CTRL_CLKSEL_SHIFT);
445		}
446
447		WR2(slot, BCM577XX_HOST_CONTROL, clk_sel);
448	}
449
450	/* Recalculate timeout clock frequency based on the new sd clock. */
451	if (slot->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)
452		slot->timeout_clk = slot->clock / 1000;
453
454	if (slot->version < SDHCI_SPEC_300) {
455		/* Looking for highest freq <= clock. */
456		res = clk_base;
457		for (div = 1; div < SDHCI_200_MAX_DIVIDER; div <<= 1) {
458			if (res <= clock)
459				break;
460			res >>= 1;
461		}
462		/* Divider 1:1 is 0x00, 2:1 is 0x01, 256:1 is 0x80 ... */
463		div >>= 1;
464	} else {
465		/* Version 3.0 divisors are multiples of two up to 1023 * 2 */
466		if (clock >= clk_base)
467			div = 0;
468		else {
469			for (div = 2; div < SDHCI_300_MAX_DIVIDER; div += 2) {
470				if ((clk_base / div) <= clock)
471					break;
472			}
473		}
474		div >>= 1;
475	}
476
477	if (bootverbose || sdhci_debug)
478		slot_printf(slot, "Divider %d for freq %d (base %d)\n",
479			div, clock, clk_base);
480
481	/* Now we have got divider, set it. */
482	clk = (div & SDHCI_DIVIDER_MASK) << SDHCI_DIVIDER_SHIFT;
483	clk |= ((div >> SDHCI_DIVIDER_MASK_LEN) & SDHCI_DIVIDER_HI_MASK)
484		<< SDHCI_DIVIDER_HI_SHIFT;
485
486	WR2(slot, SDHCI_CLOCK_CONTROL, clk);
487	/* Enable clock. */
488	clk |= SDHCI_CLOCK_INT_EN;
489	WR2(slot, SDHCI_CLOCK_CONTROL, clk);
490	/* Wait up to 10 ms until it stabilize. */
491	timeout = 10;
492	while (!((clk = RD2(slot, SDHCI_CLOCK_CONTROL))
493		& SDHCI_CLOCK_INT_STABLE)) {
494		if (timeout == 0) {
495			slot_printf(slot,
496			    "Internal clock never stabilised.\n");
497			sdhci_dumpregs(slot);
498			return;
499		}
500		timeout--;
501		DELAY(1000);
502	}
503	/* Pass clock signal to the bus. */
504	clk |= SDHCI_CLOCK_CARD_EN;
505	WR2(slot, SDHCI_CLOCK_CONTROL, clk);
506}
507
508static void
509sdhci_set_power(struct sdhci_slot *slot, u_char power)
510{
511	int i;
512	uint8_t pwr;
513
514	if (slot->power == power)
515		return;
516
517	slot->power = power;
518
519	/* Turn off the power. */
520	pwr = 0;
521	WR1(slot, SDHCI_POWER_CONTROL, pwr);
522	/* If power down requested - leave it so. */
523	if (power == 0)
524		return;
525	/* Set voltage. */
526	switch (1 << power) {
527	case MMC_OCR_LOW_VOLTAGE:
528		pwr |= SDHCI_POWER_180;
529		break;
530	case MMC_OCR_290_300:
531	case MMC_OCR_300_310:
532		pwr |= SDHCI_POWER_300;
533		break;
534	case MMC_OCR_320_330:
535	case MMC_OCR_330_340:
536		pwr |= SDHCI_POWER_330;
537		break;
538	}
539	WR1(slot, SDHCI_POWER_CONTROL, pwr);
540	/*
541	 * Turn on VDD1 power.  Note that at least some Intel controllers can
542	 * fail to enable bus power on the first try after transiting from D3
543	 * to D0, so we give them up to 2 ms.
544	 */
545	pwr |= SDHCI_POWER_ON;
546	for (i = 0; i < 20; i++) {
547		WR1(slot, SDHCI_POWER_CONTROL, pwr);
548		if (RD1(slot, SDHCI_POWER_CONTROL) & SDHCI_POWER_ON)
549			break;
550		DELAY(100);
551	}
552	if (!(RD1(slot, SDHCI_POWER_CONTROL) & SDHCI_POWER_ON))
553		slot_printf(slot, "Bus power failed to enable\n");
554
555	if (slot->quirks & SDHCI_QUIRK_INTEL_POWER_UP_RESET) {
556		WR1(slot, SDHCI_POWER_CONTROL, pwr | 0x10);
557		DELAY(10);
558		WR1(slot, SDHCI_POWER_CONTROL, pwr);
559		DELAY(300);
560	}
561}
562
563static void
564sdhci_read_block_pio(struct sdhci_slot *slot)
565{
566	uint32_t data;
567	char *buffer;
568	size_t left;
569
570	buffer = slot->curcmd->data->data;
571	buffer += slot->offset;
572	/* Transfer one block at a time. */
573#ifdef MMCCAM
574	if (slot->curcmd->data->flags & MMC_DATA_BLOCK_SIZE)
575		left = min(slot->curcmd->data->block_size,
576		    slot->curcmd->data->len - slot->offset);
577	else
578#endif
579		left = min(512, slot->curcmd->data->len - slot->offset);
580	slot->offset += left;
581
582	/* If we are too fast, broken controllers return zeroes. */
583	if (slot->quirks & SDHCI_QUIRK_BROKEN_TIMINGS)
584		DELAY(10);
585	/* Handle unaligned and aligned buffer cases. */
586	if ((intptr_t)buffer & 3) {
587		while (left > 3) {
588			data = RD4(slot, SDHCI_BUFFER);
589			buffer[0] = data;
590			buffer[1] = (data >> 8);
591			buffer[2] = (data >> 16);
592			buffer[3] = (data >> 24);
593			buffer += 4;
594			left -= 4;
595		}
596	} else {
597		RD_MULTI_4(slot, SDHCI_BUFFER,
598		    (uint32_t *)buffer, left >> 2);
599		left &= 3;
600	}
601	/* Handle uneven size case. */
602	if (left > 0) {
603		data = RD4(slot, SDHCI_BUFFER);
604		while (left > 0) {
605			*(buffer++) = data;
606			data >>= 8;
607			left--;
608		}
609	}
610}
611
612static void
613sdhci_write_block_pio(struct sdhci_slot *slot)
614{
615	uint32_t data = 0;
616	char *buffer;
617	size_t left;
618
619	buffer = slot->curcmd->data->data;
620	buffer += slot->offset;
621	/* Transfer one block at a time. */
622#ifdef MMCCAM
623	if (slot->curcmd->data->flags & MMC_DATA_BLOCK_SIZE) {
624		left = min(slot->curcmd->data->block_size,
625		    slot->curcmd->data->len - slot->offset);
626	} else
627#endif
628		left = min(512, slot->curcmd->data->len - slot->offset);
629	slot->offset += left;
630
631	/* Handle unaligned and aligned buffer cases. */
632	if ((intptr_t)buffer & 3) {
633		while (left > 3) {
634			data = buffer[0] +
635			    (buffer[1] << 8) +
636			    (buffer[2] << 16) +
637			    (buffer[3] << 24);
638			left -= 4;
639			buffer += 4;
640			WR4(slot, SDHCI_BUFFER, data);
641		}
642	} else {
643		WR_MULTI_4(slot, SDHCI_BUFFER,
644		    (uint32_t *)buffer, left >> 2);
645		left &= 3;
646	}
647	/* Handle uneven size case. */
648	if (left > 0) {
649		while (left > 0) {
650			data <<= 8;
651			data += *(buffer++);
652			left--;
653		}
654		WR4(slot, SDHCI_BUFFER, data);
655	}
656}
657
658static void
659sdhci_transfer_pio(struct sdhci_slot *slot)
660{
661
662	/* Read as many blocks as possible. */
663	if (slot->curcmd->data->flags & MMC_DATA_READ) {
664		while (RD4(slot, SDHCI_PRESENT_STATE) &
665		    SDHCI_DATA_AVAILABLE) {
666			sdhci_read_block_pio(slot);
667			if (slot->offset >= slot->curcmd->data->len)
668				break;
669		}
670	} else {
671		while (RD4(slot, SDHCI_PRESENT_STATE) &
672		    SDHCI_SPACE_AVAILABLE) {
673			sdhci_write_block_pio(slot);
674			if (slot->offset >= slot->curcmd->data->len)
675				break;
676		}
677	}
678}
679
680static void
681sdhci_card_task(void *arg, int pending __unused)
682{
683	struct sdhci_slot *slot = arg;
684#ifndef MMCCAM
685	device_t d;
686#endif
687
688	SDHCI_LOCK(slot);
689	if (SDHCI_GET_CARD_PRESENT(slot->bus, slot)) {
690#ifdef MMCCAM
691		if (slot->card_present == 0) {
692#else
693		if (slot->dev == NULL) {
694#endif
695			/* If card is present - attach mmc bus. */
696			if (bootverbose || sdhci_debug)
697				slot_printf(slot, "Card inserted\n");
698#ifdef MMCCAM
699			slot->card_present = 1;
700			mmccam_start_discovery(slot->sim);
701			SDHCI_UNLOCK(slot);
702#else
703			d = slot->dev = device_add_child(slot->bus, "mmc", -1);
704			SDHCI_UNLOCK(slot);
705			if (d) {
706				device_set_ivars(d, slot);
707				(void)device_probe_and_attach(d);
708			}
709#endif
710		} else
711			SDHCI_UNLOCK(slot);
712	} else {
713#ifdef MMCCAM
714		if (slot->card_present == 1) {
715#else
716		if (slot->dev != NULL) {
717			d = slot->dev;
718#endif
719			/* If no card present - detach mmc bus. */
720			if (bootverbose || sdhci_debug)
721				slot_printf(slot, "Card removed\n");
722			slot->dev = NULL;
723#ifdef MMCCAM
724			slot->card_present = 0;
725			mmccam_start_discovery(slot->sim);
726			SDHCI_UNLOCK(slot);
727#else
728			slot->intmask &= ~sdhci_tuning_intmask(slot);
729			WR4(slot, SDHCI_INT_ENABLE, slot->intmask);
730			WR4(slot, SDHCI_SIGNAL_ENABLE, slot->intmask);
731			slot->opt &= ~SDHCI_TUNING_ENABLED;
732			SDHCI_UNLOCK(slot);
733			callout_drain(&slot->retune_callout);
734			device_delete_child(slot->bus, d);
735#endif
736		} else
737			SDHCI_UNLOCK(slot);
738	}
739}
740
741static void
742sdhci_handle_card_present_locked(struct sdhci_slot *slot, bool is_present)
743{
744	bool was_present;
745
746	/*
747	 * If there was no card and now there is one, schedule the task to
748	 * create the child device after a short delay.  The delay is to
749	 * debounce the card insert (sometimes the card detect pin stabilizes
750	 * before the other pins have made good contact).
751	 *
752	 * If there was a card present and now it's gone, immediately schedule
753	 * the task to delete the child device.  No debouncing -- gone is gone,
754	 * because once power is removed, a full card re-init is needed, and
755	 * that happens by deleting and recreating the child device.
756	 */
757#ifdef MMCCAM
758	was_present = slot->card_present;
759#else
760	was_present = slot->dev != NULL;
761#endif
762	if (!was_present && is_present) {
763		taskqueue_enqueue_timeout(taskqueue_swi_giant,
764		    &slot->card_delayed_task, -SDHCI_INSERT_DELAY_TICKS);
765	} else if (was_present && !is_present) {
766		taskqueue_enqueue(taskqueue_swi_giant, &slot->card_task);
767	}
768}
769
770void
771sdhci_handle_card_present(struct sdhci_slot *slot, bool is_present)
772{
773
774	SDHCI_LOCK(slot);
775	sdhci_handle_card_present_locked(slot, is_present);
776	SDHCI_UNLOCK(slot);
777}
778
779static void
780sdhci_card_poll(void *arg)
781{
782	struct sdhci_slot *slot = arg;
783
784	sdhci_handle_card_present(slot,
785	    SDHCI_GET_CARD_PRESENT(slot->bus, slot));
786	callout_reset(&slot->card_poll_callout, SDHCI_CARD_PRESENT_TICKS,
787	    sdhci_card_poll, slot);
788}
789
790static int
791sdhci_dma_alloc(struct sdhci_slot *slot)
792{
793	int err;
794
795	if (!(slot->quirks & SDHCI_QUIRK_BROKEN_SDMA_BOUNDARY)) {
796		if (maxphys <= 1024 * 4)
797			slot->sdma_boundary = SDHCI_BLKSZ_SDMA_BNDRY_4K;
798		else if (maxphys <= 1024 * 8)
799			slot->sdma_boundary = SDHCI_BLKSZ_SDMA_BNDRY_8K;
800		else if (maxphys <= 1024 * 16)
801			slot->sdma_boundary = SDHCI_BLKSZ_SDMA_BNDRY_16K;
802		else if (maxphys <= 1024 * 32)
803			slot->sdma_boundary = SDHCI_BLKSZ_SDMA_BNDRY_32K;
804		else if (maxphys <= 1024 * 64)
805			slot->sdma_boundary = SDHCI_BLKSZ_SDMA_BNDRY_64K;
806		else if (maxphys <= 1024 * 128)
807			slot->sdma_boundary = SDHCI_BLKSZ_SDMA_BNDRY_128K;
808		else if (maxphys <= 1024 * 256)
809			slot->sdma_boundary = SDHCI_BLKSZ_SDMA_BNDRY_256K;
810		else
811			slot->sdma_boundary = SDHCI_BLKSZ_SDMA_BNDRY_512K;
812	}
813	slot->sdma_bbufsz = SDHCI_SDMA_BNDRY_TO_BBUFSZ(slot->sdma_boundary);
814
815	/*
816	 * Allocate the DMA tag for an SDMA bounce buffer.
817	 * Note that the SDHCI specification doesn't state any alignment
818	 * constraint for the SDMA system address.  However, controllers
819	 * typically ignore the SDMA boundary bits in SDHCI_DMA_ADDRESS when
820	 * forming the actual address of data, requiring the SDMA buffer to
821	 * be aligned to the SDMA boundary.
822	 */
823	err = bus_dma_tag_create(bus_get_dma_tag(slot->bus), slot->sdma_bbufsz,
824	    0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
825	    slot->sdma_bbufsz, 1, slot->sdma_bbufsz, BUS_DMA_ALLOCNOW,
826	    NULL, NULL, &slot->dmatag);
827	if (err != 0) {
828		slot_printf(slot, "Can't create DMA tag for SDMA\n");
829		return (err);
830	}
831	/* Allocate DMA memory for the SDMA bounce buffer. */
832	err = bus_dmamem_alloc(slot->dmatag, (void **)&slot->dmamem,
833	    BUS_DMA_NOWAIT, &slot->dmamap);
834	if (err != 0) {
835		slot_printf(slot, "Can't alloc DMA memory for SDMA\n");
836		bus_dma_tag_destroy(slot->dmatag);
837		return (err);
838	}
839	/* Map the memory of the SDMA bounce buffer. */
840	err = bus_dmamap_load(slot->dmatag, slot->dmamap,
841	    (void *)slot->dmamem, slot->sdma_bbufsz, sdhci_getaddr,
842	    &slot->paddr, 0);
843	if (err != 0 || slot->paddr == 0) {
844		slot_printf(slot, "Can't load DMA memory for SDMA\n");
845		bus_dmamem_free(slot->dmatag, slot->dmamem, slot->dmamap);
846		bus_dma_tag_destroy(slot->dmatag);
847		if (err)
848			return (err);
849		else
850			return (EFAULT);
851	}
852
853	return (0);
854}
855
856static void
857sdhci_dma_free(struct sdhci_slot *slot)
858{
859
860	bus_dmamap_unload(slot->dmatag, slot->dmamap);
861	bus_dmamem_free(slot->dmatag, slot->dmamem, slot->dmamap);
862	bus_dma_tag_destroy(slot->dmatag);
863}
864
865int
866sdhci_init_slot(device_t dev, struct sdhci_slot *slot, int num)
867{
868	kobjop_desc_t kobj_desc;
869	kobj_method_t *kobj_method;
870	uint32_t caps, caps2, freq, host_caps;
871	int err;
872	char node_name[8];
873	struct sysctl_oid *node_oid;
874
875	SDHCI_LOCK_INIT(slot);
876
877	slot->num = num;
878	slot->bus = dev;
879
880	slot->version = (RD2(slot, SDHCI_HOST_VERSION)
881		>> SDHCI_SPEC_VER_SHIFT) & SDHCI_SPEC_VER_MASK;
882	if (slot->quirks & SDHCI_QUIRK_MISSING_CAPS) {
883		caps = slot->caps;
884		caps2 = slot->caps2;
885	} else {
886		caps = RD4(slot, SDHCI_CAPABILITIES);
887		if (slot->version >= SDHCI_SPEC_300)
888			caps2 = RD4(slot, SDHCI_CAPABILITIES2);
889		else
890			caps2 = 0;
891	}
892	if (slot->version >= SDHCI_SPEC_300) {
893		if ((caps & SDHCI_SLOTTYPE_MASK) != SDHCI_SLOTTYPE_REMOVABLE &&
894		    (caps & SDHCI_SLOTTYPE_MASK) != SDHCI_SLOTTYPE_EMBEDDED) {
895			slot_printf(slot,
896			    "Driver doesn't support shared bus slots\n");
897			SDHCI_LOCK_DESTROY(slot);
898			return (ENXIO);
899		} else if ((caps & SDHCI_SLOTTYPE_MASK) ==
900		    SDHCI_SLOTTYPE_EMBEDDED) {
901			slot->opt |= SDHCI_SLOT_EMBEDDED | SDHCI_NON_REMOVABLE;
902		}
903	}
904	/* Calculate base clock frequency. */
905	if (slot->version >= SDHCI_SPEC_300)
906		freq = (caps & SDHCI_CLOCK_V3_BASE_MASK) >>
907		    SDHCI_CLOCK_BASE_SHIFT;
908	else
909		freq = (caps & SDHCI_CLOCK_BASE_MASK) >>
910		    SDHCI_CLOCK_BASE_SHIFT;
911	if (freq != 0)
912		slot->max_clk = freq * 1000000;
913	/*
914	 * If the frequency wasn't in the capabilities and the hardware driver
915	 * hasn't already set max_clk we're probably not going to work right
916	 * with an assumption, so complain about it.
917	 */
918	if (slot->max_clk == 0) {
919		slot->max_clk = SDHCI_DEFAULT_MAX_FREQ * 1000000;
920		slot_printf(slot, "Hardware doesn't specify base clock "
921		    "frequency, using %dMHz as default.\n",
922		    SDHCI_DEFAULT_MAX_FREQ);
923	}
924	/* Calculate/set timeout clock frequency. */
925	if (slot->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK) {
926		slot->timeout_clk = slot->max_clk / 1000;
927	} else if (slot->quirks & SDHCI_QUIRK_DATA_TIMEOUT_1MHZ) {
928		slot->timeout_clk = 1000;
929	} else {
930		slot->timeout_clk = (caps & SDHCI_TIMEOUT_CLK_MASK) >>
931		    SDHCI_TIMEOUT_CLK_SHIFT;
932		if (caps & SDHCI_TIMEOUT_CLK_UNIT)
933			slot->timeout_clk *= 1000;
934	}
935	/*
936	 * If the frequency wasn't in the capabilities and the hardware driver
937	 * hasn't already set timeout_clk we'll probably work okay using the
938	 * max timeout, but still mention it.
939	 */
940	if (slot->timeout_clk == 0) {
941		slot_printf(slot, "Hardware doesn't specify timeout clock "
942		    "frequency, setting BROKEN_TIMEOUT quirk.\n");
943		slot->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
944	}
945
946	slot->host.f_min = SDHCI_MIN_FREQ(slot->bus, slot);
947	slot->host.f_max = slot->max_clk;
948	slot->host.host_ocr = 0;
949	if (caps & SDHCI_CAN_VDD_330)
950	    slot->host.host_ocr |= MMC_OCR_320_330 | MMC_OCR_330_340;
951	if (caps & SDHCI_CAN_VDD_300)
952	    slot->host.host_ocr |= MMC_OCR_290_300 | MMC_OCR_300_310;
953	/*
954	 * 1.8V VDD is not supposed to be used for removable cards.  Hardware
955	 * prior to v3.0 had no way to indicate embedded slots, but did
956	 * sometimes support 1.8v for non-removable devices.
957	 */
958	if ((caps & SDHCI_CAN_VDD_180) && (slot->version < SDHCI_SPEC_300 ||
959	    (slot->opt & SDHCI_SLOT_EMBEDDED)))
960	    slot->host.host_ocr |= MMC_OCR_LOW_VOLTAGE;
961	if (slot->host.host_ocr == 0) {
962		slot_printf(slot, "Hardware doesn't report any "
963		    "support voltages.\n");
964	}
965
966	host_caps = slot->host.caps;
967	host_caps |= MMC_CAP_4_BIT_DATA;
968	if (caps & SDHCI_CAN_DO_8BITBUS)
969		host_caps |= MMC_CAP_8_BIT_DATA;
970	if (caps & SDHCI_CAN_DO_HISPD)
971		host_caps |= MMC_CAP_HSPEED;
972	if (slot->quirks & SDHCI_QUIRK_BOOT_NOACC)
973		host_caps |= MMC_CAP_BOOT_NOACC;
974	if (slot->quirks & SDHCI_QUIRK_WAIT_WHILE_BUSY)
975		host_caps |= MMC_CAP_WAIT_WHILE_BUSY;
976
977	/* Determine supported UHS-I and eMMC modes. */
978	if (caps2 & (SDHCI_CAN_SDR50 | SDHCI_CAN_SDR104 | SDHCI_CAN_DDR50))
979		host_caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
980	if (caps2 & SDHCI_CAN_SDR104) {
981		host_caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
982		if (!(slot->quirks & SDHCI_QUIRK_BROKEN_MMC_HS200))
983			host_caps |= MMC_CAP_MMC_HS200;
984	} else if (caps2 & SDHCI_CAN_SDR50)
985		host_caps |= MMC_CAP_UHS_SDR50;
986	if (caps2 & SDHCI_CAN_DDR50 &&
987	    !(slot->quirks & SDHCI_QUIRK_BROKEN_UHS_DDR50))
988		host_caps |= MMC_CAP_UHS_DDR50;
989	if (slot->quirks & SDHCI_QUIRK_MMC_DDR52)
990		host_caps |= MMC_CAP_MMC_DDR52;
991	if (slot->quirks & SDHCI_QUIRK_CAPS_BIT63_FOR_MMC_HS400 &&
992	    caps2 & SDHCI_CAN_MMC_HS400)
993		host_caps |= MMC_CAP_MMC_HS400;
994	if (slot->quirks & SDHCI_QUIRK_MMC_HS400_IF_CAN_SDR104 &&
995	    caps2 & SDHCI_CAN_SDR104)
996		host_caps |= MMC_CAP_MMC_HS400;
997
998	/*
999	 * Disable UHS-I and eMMC modes if the set_uhs_timing method is the
1000	 * default NULL implementation.
1001	 */
1002	kobj_desc = &sdhci_set_uhs_timing_desc;
1003	kobj_method = kobj_lookup_method(((kobj_t)dev)->ops->cls, NULL,
1004	    kobj_desc);
1005	if (kobj_method == &kobj_desc->deflt)
1006		host_caps &= ~(MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
1007		    MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_DDR50 | MMC_CAP_UHS_SDR104 |
1008		    MMC_CAP_MMC_DDR52 | MMC_CAP_MMC_HS200 | MMC_CAP_MMC_HS400);
1009
1010#define	SDHCI_CAP_MODES_TUNING(caps2)					\
1011    (((caps2) & SDHCI_TUNE_SDR50 ? MMC_CAP_UHS_SDR50 : 0) |		\
1012    MMC_CAP_UHS_DDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_MMC_HS200 |	\
1013    MMC_CAP_MMC_HS400)
1014
1015	/*
1016	 * Disable UHS-I and eMMC modes that require (re-)tuning if either
1017	 * the tune or re-tune method is the default NULL implementation.
1018	 */
1019	kobj_desc = &mmcbr_tune_desc;
1020	kobj_method = kobj_lookup_method(((kobj_t)dev)->ops->cls, NULL,
1021	    kobj_desc);
1022	if (kobj_method == &kobj_desc->deflt)
1023		goto no_tuning;
1024	kobj_desc = &mmcbr_retune_desc;
1025	kobj_method = kobj_lookup_method(((kobj_t)dev)->ops->cls, NULL,
1026	    kobj_desc);
1027	if (kobj_method == &kobj_desc->deflt) {
1028no_tuning:
1029		host_caps &= ~(SDHCI_CAP_MODES_TUNING(caps2));
1030	}
1031
1032	/* Allocate tuning structures and determine tuning parameters. */
1033	if (host_caps & SDHCI_CAP_MODES_TUNING(caps2)) {
1034		slot->opt |= SDHCI_TUNING_SUPPORTED;
1035		slot->tune_req = malloc(sizeof(*slot->tune_req), M_DEVBUF,
1036		    M_WAITOK);
1037		slot->tune_cmd = malloc(sizeof(*slot->tune_cmd), M_DEVBUF,
1038		    M_WAITOK);
1039		slot->tune_data = malloc(sizeof(*slot->tune_data), M_DEVBUF,
1040		    M_WAITOK);
1041		if (caps2 & SDHCI_TUNE_SDR50)
1042			slot->opt |= SDHCI_SDR50_NEEDS_TUNING;
1043		slot->retune_mode = (caps2 & SDHCI_RETUNE_MODES_MASK) >>
1044		    SDHCI_RETUNE_MODES_SHIFT;
1045		if (slot->retune_mode == SDHCI_RETUNE_MODE_1) {
1046			slot->retune_count = (caps2 & SDHCI_RETUNE_CNT_MASK) >>
1047			    SDHCI_RETUNE_CNT_SHIFT;
1048			if (slot->retune_count > 0xb) {
1049				slot_printf(slot, "Unknown re-tuning count "
1050				    "%x, using 1 sec\n", slot->retune_count);
1051				slot->retune_count = 1;
1052			} else if (slot->retune_count != 0)
1053				slot->retune_count =
1054				    1 << (slot->retune_count - 1);
1055		}
1056	}
1057
1058#undef SDHCI_CAP_MODES_TUNING
1059
1060	/* Determine supported VCCQ signaling levels. */
1061	host_caps |= MMC_CAP_SIGNALING_330;
1062	if (host_caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
1063	    MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_DDR50 | MMC_CAP_UHS_SDR104 |
1064	    MMC_CAP_MMC_DDR52_180 | MMC_CAP_MMC_HS200_180 |
1065	    MMC_CAP_MMC_HS400_180))
1066		host_caps |= MMC_CAP_SIGNALING_120 | MMC_CAP_SIGNALING_180;
1067
1068	/*
1069	 * Disable 1.2 V and 1.8 V signaling if the switch_vccq method is the
1070	 * default NULL implementation.  Disable 1.2 V support if it's the
1071	 * generic SDHCI implementation.
1072	 */
1073	kobj_desc = &mmcbr_switch_vccq_desc;
1074	kobj_method = kobj_lookup_method(((kobj_t)dev)->ops->cls, NULL,
1075	    kobj_desc);
1076	if (kobj_method == &kobj_desc->deflt)
1077		host_caps &= ~(MMC_CAP_SIGNALING_120 | MMC_CAP_SIGNALING_180);
1078	else if (kobj_method->func == (kobjop_t)sdhci_generic_switch_vccq)
1079		host_caps &= ~MMC_CAP_SIGNALING_120;
1080
1081	/* Determine supported driver types (type B is always mandatory). */
1082	if (caps2 & SDHCI_CAN_DRIVE_TYPE_A)
1083		host_caps |= MMC_CAP_DRIVER_TYPE_A;
1084	if (caps2 & SDHCI_CAN_DRIVE_TYPE_C)
1085		host_caps |= MMC_CAP_DRIVER_TYPE_C;
1086	if (caps2 & SDHCI_CAN_DRIVE_TYPE_D)
1087		host_caps |= MMC_CAP_DRIVER_TYPE_D;
1088	slot->host.caps = host_caps;
1089
1090	/* Decide if we have usable DMA. */
1091	if (caps & SDHCI_CAN_DO_DMA)
1092		slot->opt |= SDHCI_HAVE_DMA;
1093
1094	if (slot->quirks & SDHCI_QUIRK_BROKEN_DMA)
1095		slot->opt &= ~SDHCI_HAVE_DMA;
1096	if (slot->quirks & SDHCI_QUIRK_FORCE_DMA)
1097		slot->opt |= SDHCI_HAVE_DMA;
1098	if (slot->quirks & SDHCI_QUIRK_ALL_SLOTS_NON_REMOVABLE)
1099		slot->opt |= SDHCI_NON_REMOVABLE;
1100
1101	/*
1102	 * Use platform-provided transfer backend
1103	 * with PIO as a fallback mechanism
1104	 */
1105	if (slot->opt & SDHCI_PLATFORM_TRANSFER)
1106		slot->opt &= ~SDHCI_HAVE_DMA;
1107
1108	if (slot->opt & SDHCI_HAVE_DMA) {
1109		err = sdhci_dma_alloc(slot);
1110		if (err != 0) {
1111			if (slot->opt & SDHCI_TUNING_SUPPORTED) {
1112				free(slot->tune_req, M_DEVBUF);
1113				free(slot->tune_cmd, M_DEVBUF);
1114				free(slot->tune_data, M_DEVBUF);
1115			}
1116			SDHCI_LOCK_DESTROY(slot);
1117			return (err);
1118		}
1119	}
1120
1121	if (bootverbose || sdhci_debug) {
1122		sdhci_dumpcaps(slot);
1123		sdhci_dumpregs(slot);
1124	}
1125
1126	slot->timeout = 10;
1127	SYSCTL_ADD_INT(device_get_sysctl_ctx(slot->bus),
1128	    SYSCTL_CHILDREN(device_get_sysctl_tree(slot->bus)), OID_AUTO,
1129	    "timeout", CTLFLAG_RWTUN, &slot->timeout, 0,
1130	    "Maximum timeout for SDHCI transfers (in secs)");
1131	TASK_INIT(&slot->card_task, 0, sdhci_card_task, slot);
1132	TIMEOUT_TASK_INIT(taskqueue_swi_giant, &slot->card_delayed_task, 0,
1133		sdhci_card_task, slot);
1134	callout_init(&slot->card_poll_callout, 1);
1135	callout_init_mtx(&slot->timeout_callout, &slot->mtx, 0);
1136	callout_init_mtx(&slot->retune_callout, &slot->mtx, 0);
1137
1138	if ((slot->quirks & SDHCI_QUIRK_POLL_CARD_PRESENT) &&
1139	    !(slot->opt & SDHCI_NON_REMOVABLE)) {
1140		callout_reset(&slot->card_poll_callout,
1141		    SDHCI_CARD_PRESENT_TICKS, sdhci_card_poll, slot);
1142	}
1143
1144	sdhci_init(slot);
1145
1146	snprintf(node_name, sizeof(node_name), "slot%d", slot->num);
1147
1148	node_oid = SYSCTL_ADD_NODE(device_get_sysctl_ctx(dev),
1149	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1150	    OID_AUTO, node_name, CTLFLAG_RW, 0, "slot specific node");
1151
1152	node_oid = SYSCTL_ADD_NODE(device_get_sysctl_ctx(dev),
1153	    SYSCTL_CHILDREN(node_oid), OID_AUTO, "debug", CTLFLAG_RW, 0,
1154	    "Debugging node");
1155
1156	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(node_oid),
1157	    OID_AUTO, "dumpregs", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
1158	    slot, 0, &sdhci_syctl_dumpregs,
1159	    "A", "Dump SDHCI registers");
1160
1161	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(node_oid),
1162	    OID_AUTO, "dumpcaps", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
1163	    slot, 0, &sdhci_syctl_dumpcaps,
1164	    "A", "Dump SDHCI capabilites");
1165
1166	return (0);
1167}
1168
1169#ifndef MMCCAM
1170void
1171sdhci_start_slot(struct sdhci_slot *slot)
1172{
1173
1174	sdhci_card_task(slot, 0);
1175}
1176#endif
1177
1178int
1179sdhci_cleanup_slot(struct sdhci_slot *slot)
1180{
1181	device_t d;
1182
1183	callout_drain(&slot->timeout_callout);
1184	callout_drain(&slot->card_poll_callout);
1185	callout_drain(&slot->retune_callout);
1186	taskqueue_drain(taskqueue_swi_giant, &slot->card_task);
1187	taskqueue_drain_timeout(taskqueue_swi_giant, &slot->card_delayed_task);
1188
1189	SDHCI_LOCK(slot);
1190	d = slot->dev;
1191	slot->dev = NULL;
1192	SDHCI_UNLOCK(slot);
1193	if (d != NULL)
1194		device_delete_child(slot->bus, d);
1195
1196	SDHCI_LOCK(slot);
1197	SDHCI_RESET(slot->bus, slot, SDHCI_RESET_ALL);
1198	SDHCI_UNLOCK(slot);
1199	if (slot->opt & SDHCI_HAVE_DMA)
1200		sdhci_dma_free(slot);
1201	if (slot->opt & SDHCI_TUNING_SUPPORTED) {
1202		free(slot->tune_req, M_DEVBUF);
1203		free(slot->tune_cmd, M_DEVBUF);
1204		free(slot->tune_data, M_DEVBUF);
1205	}
1206
1207	SDHCI_LOCK_DESTROY(slot);
1208
1209	return (0);
1210}
1211
1212int
1213sdhci_generic_suspend(struct sdhci_slot *slot)
1214{
1215
1216	/*
1217	 * We expect the MMC layer to issue initial tuning after resume.
1218	 * Otherwise, we'd need to indicate re-tuning including circuit reset
1219	 * being required at least for re-tuning modes 1 and 2 ourselves.
1220	 */
1221	callout_drain(&slot->retune_callout);
1222	SDHCI_LOCK(slot);
1223	slot->opt &= ~SDHCI_TUNING_ENABLED;
1224	SDHCI_RESET(slot->bus, slot, SDHCI_RESET_ALL);
1225	SDHCI_UNLOCK(slot);
1226
1227	return (0);
1228}
1229
1230int
1231sdhci_generic_resume(struct sdhci_slot *slot)
1232{
1233
1234	SDHCI_LOCK(slot);
1235	sdhci_init(slot);
1236	SDHCI_UNLOCK(slot);
1237
1238	return (0);
1239}
1240
1241void
1242sdhci_generic_reset(device_t brdev __unused, struct sdhci_slot *slot,
1243    uint8_t mask)
1244{
1245	int timeout;
1246	uint32_t clock;
1247
1248	if (slot->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
1249		if (!SDHCI_GET_CARD_PRESENT(slot->bus, slot))
1250			return;
1251	}
1252
1253	/* Some controllers need this kick or reset won't work. */
1254	if ((mask & SDHCI_RESET_ALL) == 0 &&
1255	    (slot->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)) {
1256		/* This is to force an update */
1257		clock = slot->clock;
1258		slot->clock = 0;
1259		sdhci_set_clock(slot, clock);
1260	}
1261
1262	if (mask & SDHCI_RESET_ALL) {
1263		slot->clock = 0;
1264		slot->power = 0;
1265	}
1266
1267	WR1(slot, SDHCI_SOFTWARE_RESET, mask);
1268
1269	if (slot->quirks & SDHCI_QUIRK_WAITFOR_RESET_ASSERTED) {
1270		/*
1271		 * Resets on TI OMAPs and AM335x are incompatible with SDHCI
1272		 * specification.  The reset bit has internal propagation delay,
1273		 * so a fast read after write returns 0 even if reset process is
1274		 * in progress.  The workaround is to poll for 1 before polling
1275		 * for 0.  In the worst case, if we miss seeing it asserted the
1276		 * time we spent waiting is enough to ensure the reset finishes.
1277		 */
1278		timeout = 10000;
1279		while ((RD1(slot, SDHCI_SOFTWARE_RESET) & mask) != mask) {
1280			if (timeout <= 0)
1281				break;
1282			timeout--;
1283			DELAY(1);
1284		}
1285	}
1286
1287	/* Wait max 100 ms */
1288	timeout = 10000;
1289	/* Controller clears the bits when it's done */
1290	while (RD1(slot, SDHCI_SOFTWARE_RESET) & mask) {
1291		if (timeout <= 0) {
1292			slot_printf(slot, "Reset 0x%x never completed.\n",
1293			    mask);
1294			sdhci_dumpregs(slot);
1295			return;
1296		}
1297		timeout--;
1298		DELAY(10);
1299	}
1300}
1301
1302uint32_t
1303sdhci_generic_min_freq(device_t brdev __unused, struct sdhci_slot *slot)
1304{
1305
1306	if (slot->version >= SDHCI_SPEC_300)
1307		return (slot->max_clk / SDHCI_300_MAX_DIVIDER);
1308	else
1309		return (slot->max_clk / SDHCI_200_MAX_DIVIDER);
1310}
1311
1312bool
1313sdhci_generic_get_card_present(device_t brdev __unused, struct sdhci_slot *slot)
1314{
1315
1316	if (slot->opt & SDHCI_NON_REMOVABLE)
1317		return true;
1318
1319	return (RD4(slot, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
1320}
1321
1322void
1323sdhci_generic_set_uhs_timing(device_t brdev __unused, struct sdhci_slot *slot)
1324{
1325	const struct mmc_ios *ios;
1326	uint16_t hostctrl2;
1327
1328	if (slot->version < SDHCI_SPEC_300)
1329		return;
1330
1331	SDHCI_ASSERT_LOCKED(slot);
1332	ios = &slot->host.ios;
1333	sdhci_set_clock(slot, 0);
1334	hostctrl2 = RD2(slot, SDHCI_HOST_CONTROL2);
1335	hostctrl2 &= ~SDHCI_CTRL2_UHS_MASK;
1336	if (ios->clock > SD_SDR50_MAX) {
1337		if (ios->timing == bus_timing_mmc_hs400 ||
1338		    ios->timing == bus_timing_mmc_hs400es)
1339			hostctrl2 |= SDHCI_CTRL2_MMC_HS400;
1340		else
1341			hostctrl2 |= SDHCI_CTRL2_UHS_SDR104;
1342	}
1343	else if (ios->clock > SD_SDR25_MAX)
1344		hostctrl2 |= SDHCI_CTRL2_UHS_SDR50;
1345	else if (ios->clock > SD_SDR12_MAX) {
1346		if (ios->timing == bus_timing_uhs_ddr50 ||
1347		    ios->timing == bus_timing_mmc_ddr52)
1348			hostctrl2 |= SDHCI_CTRL2_UHS_DDR50;
1349		else
1350			hostctrl2 |= SDHCI_CTRL2_UHS_SDR25;
1351	} else if (ios->clock > SD_MMC_CARD_ID_FREQUENCY)
1352		hostctrl2 |= SDHCI_CTRL2_UHS_SDR12;
1353	WR2(slot, SDHCI_HOST_CONTROL2, hostctrl2);
1354	sdhci_set_clock(slot, ios->clock);
1355}
1356
1357int
1358sdhci_generic_update_ios(device_t brdev, device_t reqdev)
1359{
1360	struct sdhci_slot *slot = device_get_ivars(reqdev);
1361	struct mmc_ios *ios = &slot->host.ios;
1362
1363	SDHCI_LOCK(slot);
1364	/* Do full reset on bus power down to clear from any state. */
1365	if (ios->power_mode == power_off) {
1366		WR4(slot, SDHCI_SIGNAL_ENABLE, 0);
1367		sdhci_init(slot);
1368	}
1369	/* Configure the bus. */
1370	sdhci_set_clock(slot, ios->clock);
1371	sdhci_set_power(slot, (ios->power_mode == power_off) ? 0 : ios->vdd);
1372	if (ios->bus_width == bus_width_8) {
1373		slot->hostctrl |= SDHCI_CTRL_8BITBUS;
1374		slot->hostctrl &= ~SDHCI_CTRL_4BITBUS;
1375	} else if (ios->bus_width == bus_width_4) {
1376		slot->hostctrl &= ~SDHCI_CTRL_8BITBUS;
1377		slot->hostctrl |= SDHCI_CTRL_4BITBUS;
1378	} else if (ios->bus_width == bus_width_1) {
1379		slot->hostctrl &= ~SDHCI_CTRL_8BITBUS;
1380		slot->hostctrl &= ~SDHCI_CTRL_4BITBUS;
1381	} else {
1382		panic("Invalid bus width: %d", ios->bus_width);
1383	}
1384	if (ios->clock > SD_SDR12_MAX &&
1385	    !(slot->quirks & SDHCI_QUIRK_DONT_SET_HISPD_BIT))
1386		slot->hostctrl |= SDHCI_CTRL_HISPD;
1387	else
1388		slot->hostctrl &= ~SDHCI_CTRL_HISPD;
1389	WR1(slot, SDHCI_HOST_CONTROL, slot->hostctrl);
1390	SDHCI_SET_UHS_TIMING(brdev, slot);
1391	/* Some controllers like reset after bus changes. */
1392	if (slot->quirks & SDHCI_QUIRK_RESET_ON_IOS)
1393		SDHCI_RESET(slot->bus, slot,
1394		    SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1395
1396	SDHCI_UNLOCK(slot);
1397	return (0);
1398}
1399
1400int
1401sdhci_generic_switch_vccq(device_t brdev __unused, device_t reqdev)
1402{
1403	struct sdhci_slot *slot = device_get_ivars(reqdev);
1404	enum mmc_vccq vccq;
1405	int err;
1406	uint16_t hostctrl2;
1407
1408	if (slot->version < SDHCI_SPEC_300)
1409		return (0);
1410
1411	err = 0;
1412	vccq = slot->host.ios.vccq;
1413	SDHCI_LOCK(slot);
1414	sdhci_set_clock(slot, 0);
1415	hostctrl2 = RD2(slot, SDHCI_HOST_CONTROL2);
1416	switch (vccq) {
1417	case vccq_330:
1418		if (!(hostctrl2 & SDHCI_CTRL2_S18_ENABLE))
1419			goto done;
1420		hostctrl2 &= ~SDHCI_CTRL2_S18_ENABLE;
1421		WR2(slot, SDHCI_HOST_CONTROL2, hostctrl2);
1422		DELAY(5000);
1423		hostctrl2 = RD2(slot, SDHCI_HOST_CONTROL2);
1424		if (!(hostctrl2 & SDHCI_CTRL2_S18_ENABLE))
1425			goto done;
1426		err = EAGAIN;
1427		break;
1428	case vccq_180:
1429		if (!(slot->host.caps & MMC_CAP_SIGNALING_180)) {
1430			err = EINVAL;
1431			goto done;
1432		}
1433		if (hostctrl2 & SDHCI_CTRL2_S18_ENABLE)
1434			goto done;
1435		hostctrl2 |= SDHCI_CTRL2_S18_ENABLE;
1436		WR2(slot, SDHCI_HOST_CONTROL2, hostctrl2);
1437		DELAY(5000);
1438		hostctrl2 = RD2(slot, SDHCI_HOST_CONTROL2);
1439		if (hostctrl2 & SDHCI_CTRL2_S18_ENABLE)
1440			goto done;
1441		err = EAGAIN;
1442		break;
1443	default:
1444		slot_printf(slot,
1445		    "Attempt to set unsupported signaling voltage\n");
1446		err = EINVAL;
1447		break;
1448	}
1449done:
1450	sdhci_set_clock(slot, slot->host.ios.clock);
1451	SDHCI_UNLOCK(slot);
1452	return (err);
1453}
1454
1455int
1456sdhci_generic_tune(device_t brdev __unused, device_t reqdev, bool hs400)
1457{
1458	struct sdhci_slot *slot = device_get_ivars(reqdev);
1459	const struct mmc_ios *ios = &slot->host.ios;
1460	struct mmc_command *tune_cmd;
1461	struct mmc_data *tune_data;
1462	uint32_t opcode;
1463	int err;
1464
1465	if (!(slot->opt & SDHCI_TUNING_SUPPORTED))
1466		return (0);
1467
1468	slot->retune_ticks = slot->retune_count * hz;
1469	opcode = MMC_SEND_TUNING_BLOCK;
1470	SDHCI_LOCK(slot);
1471	switch (ios->timing) {
1472	case bus_timing_mmc_hs400:
1473		slot_printf(slot, "HS400 must be tuned in HS200 mode\n");
1474		SDHCI_UNLOCK(slot);
1475		return (EINVAL);
1476	case bus_timing_mmc_hs200:
1477		/*
1478		 * In HS400 mode, controllers use the data strobe line to
1479		 * latch data from the devices so periodic re-tuning isn't
1480		 * expected to be required.
1481		 */
1482		if (hs400)
1483			slot->retune_ticks = 0;
1484		opcode = MMC_SEND_TUNING_BLOCK_HS200;
1485		break;
1486	case bus_timing_uhs_ddr50:
1487	case bus_timing_uhs_sdr104:
1488		break;
1489	case bus_timing_uhs_sdr50:
1490		if (slot->opt & SDHCI_SDR50_NEEDS_TUNING)
1491			break;
1492		SDHCI_UNLOCK(slot);
1493		return (0);
1494	default:
1495		slot_printf(slot, "Tuning requested but not required.\n");
1496		SDHCI_UNLOCK(slot);
1497		return (EINVAL);
1498	}
1499
1500	tune_cmd = slot->tune_cmd;
1501	memset(tune_cmd, 0, sizeof(*tune_cmd));
1502	tune_cmd->opcode = opcode;
1503	tune_cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
1504	tune_data = tune_cmd->data = slot->tune_data;
1505	memset(tune_data, 0, sizeof(*tune_data));
1506	tune_data->len = (opcode == MMC_SEND_TUNING_BLOCK_HS200 &&
1507	    ios->bus_width == bus_width_8) ? MMC_TUNING_LEN_HS200 :
1508	    MMC_TUNING_LEN;
1509	tune_data->flags = MMC_DATA_READ;
1510	tune_data->mrq = tune_cmd->mrq = slot->tune_req;
1511
1512	slot->opt &= ~SDHCI_TUNING_ENABLED;
1513	err = sdhci_exec_tuning(slot, true);
1514	if (err == 0) {
1515		slot->opt |= SDHCI_TUNING_ENABLED;
1516		slot->intmask |= sdhci_tuning_intmask(slot);
1517		WR4(slot, SDHCI_INT_ENABLE, slot->intmask);
1518		WR4(slot, SDHCI_SIGNAL_ENABLE, slot->intmask);
1519		if (slot->retune_ticks) {
1520			callout_reset(&slot->retune_callout, slot->retune_ticks,
1521			    sdhci_retune, slot);
1522		}
1523	}
1524	SDHCI_UNLOCK(slot);
1525	return (err);
1526}
1527
1528int
1529sdhci_generic_retune(device_t brdev __unused, device_t reqdev, bool reset)
1530{
1531	struct sdhci_slot *slot = device_get_ivars(reqdev);
1532	int err;
1533
1534	if (!(slot->opt & SDHCI_TUNING_ENABLED))
1535		return (0);
1536
1537	/* HS400 must be tuned in HS200 mode. */
1538	if (slot->host.ios.timing == bus_timing_mmc_hs400)
1539		return (EINVAL);
1540
1541	SDHCI_LOCK(slot);
1542	err = sdhci_exec_tuning(slot, reset);
1543	/*
1544	 * There are two ways sdhci_exec_tuning() can fail:
1545	 * EBUSY should not actually happen when requests are only issued
1546	 *	 with the host properly acquired, and
1547	 * EIO   re-tuning failed (but it did work initially).
1548	 *
1549	 * In both cases, we should retry at later point if periodic re-tuning
1550	 * is enabled.  Note that due to slot->retune_req not being cleared in
1551	 * these failure cases, the MMC layer should trigger another attempt at
1552	 * re-tuning with the next request anyway, though.
1553	 */
1554	if (slot->retune_ticks) {
1555		callout_reset(&slot->retune_callout, slot->retune_ticks,
1556		    sdhci_retune, slot);
1557	}
1558	SDHCI_UNLOCK(slot);
1559	return (err);
1560}
1561
1562static int
1563sdhci_exec_tuning(struct sdhci_slot *slot, bool reset)
1564{
1565	struct mmc_request *tune_req;
1566	struct mmc_command *tune_cmd;
1567	int i;
1568	uint32_t intmask;
1569	uint16_t hostctrl2;
1570	u_char opt;
1571
1572	SDHCI_ASSERT_LOCKED(slot);
1573	if (slot->req != NULL)
1574		return (EBUSY);
1575
1576	/* Tuning doesn't work with DMA enabled. */
1577	opt = slot->opt;
1578	slot->opt = opt & ~SDHCI_HAVE_DMA;
1579
1580	/*
1581	 * Ensure that as documented, SDHCI_INT_DATA_AVAIL is the only
1582	 * kind of interrupt we receive in response to a tuning request.
1583	 */
1584	intmask = slot->intmask;
1585	slot->intmask = SDHCI_INT_DATA_AVAIL;
1586	WR4(slot, SDHCI_INT_ENABLE, SDHCI_INT_DATA_AVAIL);
1587	WR4(slot, SDHCI_SIGNAL_ENABLE, SDHCI_INT_DATA_AVAIL);
1588
1589	hostctrl2 = RD2(slot, SDHCI_HOST_CONTROL2);
1590	if (reset)
1591		hostctrl2 &= ~SDHCI_CTRL2_SAMPLING_CLOCK;
1592	else
1593		hostctrl2 |= SDHCI_CTRL2_SAMPLING_CLOCK;
1594	WR2(slot, SDHCI_HOST_CONTROL2, hostctrl2 | SDHCI_CTRL2_EXEC_TUNING);
1595
1596	tune_req = slot->tune_req;
1597	tune_cmd = slot->tune_cmd;
1598	for (i = 0; i < MMC_TUNING_MAX; i++) {
1599		memset(tune_req, 0, sizeof(*tune_req));
1600		tune_req->cmd = tune_cmd;
1601		tune_req->done = sdhci_req_wakeup;
1602		tune_req->done_data = slot;
1603		slot->req = tune_req;
1604		slot->flags = 0;
1605		sdhci_start(slot);
1606		while (!(tune_req->flags & MMC_REQ_DONE))
1607			msleep(tune_req, &slot->mtx, 0, "sdhciet", 0);
1608		if (!(tune_req->flags & MMC_TUNE_DONE))
1609			break;
1610		hostctrl2 = RD2(slot, SDHCI_HOST_CONTROL2);
1611		if (!(hostctrl2 & SDHCI_CTRL2_EXEC_TUNING))
1612			break;
1613		if (tune_cmd->opcode == MMC_SEND_TUNING_BLOCK)
1614			DELAY(1000);
1615	}
1616
1617	/*
1618	 * Restore DMA usage and interrupts.
1619	 * Note that the interrupt aggregation code might have cleared
1620	 * SDHCI_INT_DMA_END and/or SDHCI_INT_RESPONSE in slot->intmask
1621	 * and SDHCI_SIGNAL_ENABLE respectively so ensure SDHCI_INT_ENABLE
1622	 * doesn't lose these.
1623	 */
1624	slot->opt = opt;
1625	slot->intmask = intmask;
1626	WR4(slot, SDHCI_INT_ENABLE, intmask | SDHCI_INT_DMA_END |
1627	    SDHCI_INT_RESPONSE);
1628	WR4(slot, SDHCI_SIGNAL_ENABLE, intmask);
1629
1630	if ((hostctrl2 & (SDHCI_CTRL2_EXEC_TUNING |
1631	    SDHCI_CTRL2_SAMPLING_CLOCK)) == SDHCI_CTRL2_SAMPLING_CLOCK) {
1632		slot->retune_req = 0;
1633		return (0);
1634	}
1635
1636	slot_printf(slot, "Tuning failed, using fixed sampling clock\n");
1637	WR2(slot, SDHCI_HOST_CONTROL2, hostctrl2 & ~(SDHCI_CTRL2_EXEC_TUNING |
1638	    SDHCI_CTRL2_SAMPLING_CLOCK));
1639	SDHCI_RESET(slot->bus, slot, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1640	return (EIO);
1641}
1642
1643static void
1644sdhci_retune(void *arg)
1645{
1646	struct sdhci_slot *slot = arg;
1647
1648	slot->retune_req |= SDHCI_RETUNE_REQ_NEEDED;
1649}
1650
1651#ifdef MMCCAM
1652static void
1653sdhci_req_done(struct sdhci_slot *slot)
1654{
1655	union ccb *ccb;
1656
1657	if (__predict_false(sdhci_debug > 1))
1658		slot_printf(slot, "%s\n", __func__);
1659	if (slot->ccb != NULL && slot->curcmd != NULL) {
1660		callout_stop(&slot->timeout_callout);
1661		ccb = slot->ccb;
1662		slot->ccb = NULL;
1663		slot->curcmd = NULL;
1664
1665		/* Tell CAM the request is finished */
1666		struct ccb_mmcio *mmcio;
1667		mmcio = &ccb->mmcio;
1668
1669		ccb->ccb_h.status =
1670		    (mmcio->cmd.error == 0 ? CAM_REQ_CMP : CAM_REQ_CMP_ERR);
1671		xpt_done(ccb);
1672	}
1673}
1674#else
1675static void
1676sdhci_req_done(struct sdhci_slot *slot)
1677{
1678	struct mmc_request *req;
1679
1680	if (slot->req != NULL && slot->curcmd != NULL) {
1681		callout_stop(&slot->timeout_callout);
1682		req = slot->req;
1683		slot->req = NULL;
1684		slot->curcmd = NULL;
1685		req->done(req);
1686	}
1687}
1688#endif
1689
1690static void
1691sdhci_req_wakeup(struct mmc_request *req)
1692{
1693
1694	req->flags |= MMC_REQ_DONE;
1695	wakeup(req);
1696}
1697
1698static void
1699sdhci_timeout(void *arg)
1700{
1701	struct sdhci_slot *slot = arg;
1702
1703	if (slot->curcmd != NULL) {
1704		slot_printf(slot, "Controller timeout\n");
1705		sdhci_dumpregs(slot);
1706		SDHCI_RESET(slot->bus, slot,
1707		    SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1708		slot->curcmd->error = MMC_ERR_TIMEOUT;
1709		sdhci_req_done(slot);
1710	} else {
1711		slot_printf(slot, "Spurious timeout - no active command\n");
1712	}
1713}
1714
1715static void
1716sdhci_set_transfer_mode(struct sdhci_slot *slot, const struct mmc_data *data)
1717{
1718	uint16_t mode;
1719
1720	if (data == NULL)
1721		return;
1722
1723	mode = SDHCI_TRNS_BLK_CNT_EN;
1724	if (data->len > 512 || data->block_count > 1) {
1725		mode |= SDHCI_TRNS_MULTI;
1726		if (data->block_count == 0 && __predict_true(
1727#ifdef MMCCAM
1728		    slot->ccb->mmcio.stop.opcode == MMC_STOP_TRANSMISSION &&
1729#else
1730		    slot->req->stop != NULL &&
1731#endif
1732		    !(slot->quirks & SDHCI_QUIRK_BROKEN_AUTO_STOP)))
1733			mode |= SDHCI_TRNS_ACMD12;
1734	}
1735	if (data->flags & MMC_DATA_READ)
1736		mode |= SDHCI_TRNS_READ;
1737	if (slot->flags & SDHCI_USE_DMA)
1738		mode |= SDHCI_TRNS_DMA;
1739
1740	WR2(slot, SDHCI_TRANSFER_MODE, mode);
1741}
1742
1743static void
1744sdhci_start_command(struct sdhci_slot *slot, struct mmc_command *cmd)
1745{
1746	int flags, timeout;
1747	uint32_t mask;
1748
1749	slot->curcmd = cmd;
1750	slot->cmd_done = 0;
1751
1752	cmd->error = MMC_ERR_NONE;
1753
1754	/* This flags combination is not supported by controller. */
1755	if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1756		slot_printf(slot, "Unsupported response type!\n");
1757		cmd->error = MMC_ERR_FAILED;
1758		sdhci_req_done(slot);
1759		return;
1760	}
1761
1762	/*
1763	 * Do not issue command if there is no card, clock or power.
1764	 * Controller will not detect timeout without clock active.
1765	 */
1766	if (!SDHCI_GET_CARD_PRESENT(slot->bus, slot) ||
1767	    slot->power == 0 ||
1768	    slot->clock == 0) {
1769		slot_printf(slot,
1770			    "Cannot issue a command (power=%d clock=%d)\n",
1771			    slot->power, slot->clock);
1772		cmd->error = MMC_ERR_FAILED;
1773		sdhci_req_done(slot);
1774		return;
1775	}
1776	/* Always wait for free CMD bus. */
1777	mask = SDHCI_CMD_INHIBIT;
1778	/* Wait for free DAT if we have data or busy signal. */
1779	if (cmd->data != NULL || (cmd->flags & MMC_RSP_BUSY))
1780		mask |= SDHCI_DAT_INHIBIT;
1781	/*
1782	 * We shouldn't wait for DAT for stop commands or CMD19/CMD21.  Note
1783	 * that these latter are also special in that SDHCI_CMD_DATA should
1784	 * be set below but no actual data is ever read from the controller.
1785	*/
1786#ifdef MMCCAM
1787	if (cmd == &slot->ccb->mmcio.stop ||
1788#else
1789	if (cmd == slot->req->stop ||
1790#endif
1791	    __predict_false(cmd->opcode == MMC_SEND_TUNING_BLOCK ||
1792	    cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200))
1793		mask &= ~SDHCI_DAT_INHIBIT;
1794	/*
1795	 *  Wait for bus no more then 250 ms.  Typically there will be no wait
1796	 *  here at all, but when writing a crash dump we may be bypassing the
1797	 *  host platform's interrupt handler, and in some cases that handler
1798	 *  may be working around hardware quirks such as not respecting r1b
1799	 *  busy indications.  In those cases, this wait-loop serves the purpose
1800	 *  of waiting for the prior command and data transfers to be done, and
1801	 *  SD cards are allowed to take up to 250ms for write and erase ops.
1802	 *  (It's usually more like 20-30ms in the real world.)
1803	 */
1804	timeout = 250;
1805	while (mask & RD4(slot, SDHCI_PRESENT_STATE)) {
1806		if (timeout == 0) {
1807			slot_printf(slot, "Controller never released "
1808			    "inhibit bit(s).\n");
1809			sdhci_dumpregs(slot);
1810			cmd->error = MMC_ERR_FAILED;
1811			sdhci_req_done(slot);
1812			return;
1813		}
1814		timeout--;
1815		DELAY(1000);
1816	}
1817
1818	/* Prepare command flags. */
1819	if (!(cmd->flags & MMC_RSP_PRESENT))
1820		flags = SDHCI_CMD_RESP_NONE;
1821	else if (cmd->flags & MMC_RSP_136)
1822		flags = SDHCI_CMD_RESP_LONG;
1823	else if (cmd->flags & MMC_RSP_BUSY)
1824		flags = SDHCI_CMD_RESP_SHORT_BUSY;
1825	else
1826		flags = SDHCI_CMD_RESP_SHORT;
1827	if (cmd->flags & MMC_RSP_CRC)
1828		flags |= SDHCI_CMD_CRC;
1829	if (cmd->flags & MMC_RSP_OPCODE)
1830		flags |= SDHCI_CMD_INDEX;
1831	if (cmd->data != NULL)
1832		flags |= SDHCI_CMD_DATA;
1833	if (cmd->opcode == MMC_STOP_TRANSMISSION)
1834		flags |= SDHCI_CMD_TYPE_ABORT;
1835	/* Prepare data. */
1836	sdhci_start_data(slot, cmd->data);
1837	/*
1838	 * Interrupt aggregation: To reduce total number of interrupts
1839	 * group response interrupt with data interrupt when possible.
1840	 * If there going to be data interrupt, mask response one.
1841	 */
1842	if (slot->data_done == 0) {
1843		WR4(slot, SDHCI_SIGNAL_ENABLE,
1844		    slot->intmask &= ~SDHCI_INT_RESPONSE);
1845	}
1846	/* Set command argument. */
1847	WR4(slot, SDHCI_ARGUMENT, cmd->arg);
1848	/* Set data transfer mode. */
1849	sdhci_set_transfer_mode(slot, cmd->data);
1850	if (__predict_false(sdhci_debug > 1))
1851		slot_printf(slot, "Starting command opcode %#04x flags %#04x\n",
1852		    cmd->opcode, flags);
1853
1854	/* Start command. */
1855	WR2(slot, SDHCI_COMMAND_FLAGS, (cmd->opcode << 8) | (flags & 0xff));
1856	/* Start timeout callout. */
1857	callout_reset(&slot->timeout_callout, slot->timeout * hz,
1858	    sdhci_timeout, slot);
1859}
1860
1861static void
1862sdhci_finish_command(struct sdhci_slot *slot)
1863{
1864	int i;
1865	uint32_t val;
1866	uint8_t extra;
1867
1868	if (__predict_false(sdhci_debug > 1))
1869		slot_printf(slot, "%s: called, err %d flags %#04x\n",
1870		    __func__, slot->curcmd->error, slot->curcmd->flags);
1871	slot->cmd_done = 1;
1872	/*
1873	 * Interrupt aggregation: Restore command interrupt.
1874	 * Main restore point for the case when command interrupt
1875	 * happened first.
1876	 */
1877	if (__predict_true(slot->curcmd->opcode != MMC_SEND_TUNING_BLOCK &&
1878	    slot->curcmd->opcode != MMC_SEND_TUNING_BLOCK_HS200))
1879		WR4(slot, SDHCI_SIGNAL_ENABLE, slot->intmask |=
1880		    SDHCI_INT_RESPONSE);
1881	/* In case of error - reset host and return. */
1882	if (slot->curcmd->error) {
1883		if (slot->curcmd->error == MMC_ERR_BADCRC)
1884			slot->retune_req |= SDHCI_RETUNE_REQ_RESET;
1885		SDHCI_RESET(slot->bus, slot, SDHCI_RESET_CMD);
1886		SDHCI_RESET(slot->bus, slot, SDHCI_RESET_DATA);
1887		sdhci_start(slot);
1888		return;
1889	}
1890	/* If command has response - fetch it. */
1891	if (slot->curcmd->flags & MMC_RSP_PRESENT) {
1892		if (slot->curcmd->flags & MMC_RSP_136) {
1893			/* CRC is stripped so we need one byte shift. */
1894			extra = 0;
1895			for (i = 0; i < 4; i++) {
1896				val = RD4(slot, SDHCI_RESPONSE + i * 4);
1897				if (slot->quirks &
1898				    SDHCI_QUIRK_DONT_SHIFT_RESPONSE)
1899					slot->curcmd->resp[3 - i] = val;
1900				else {
1901					slot->curcmd->resp[3 - i] =
1902					    (val << 8) | extra;
1903					extra = val >> 24;
1904				}
1905			}
1906		} else
1907			slot->curcmd->resp[0] = RD4(slot, SDHCI_RESPONSE);
1908	}
1909	if (__predict_false(sdhci_debug > 1))
1910		slot_printf(slot, "Resp: %#04x %#04x %#04x %#04x\n",
1911		    slot->curcmd->resp[0], slot->curcmd->resp[1],
1912		    slot->curcmd->resp[2], slot->curcmd->resp[3]);
1913
1914	/* If data ready - finish. */
1915	if (slot->data_done)
1916		sdhci_start(slot);
1917}
1918
1919static void
1920sdhci_start_data(struct sdhci_slot *slot, const struct mmc_data *data)
1921{
1922	uint32_t blkcnt, blksz, current_timeout, sdma_bbufsz, target_timeout;
1923	uint8_t div;
1924
1925	if (data == NULL && (slot->curcmd->flags & MMC_RSP_BUSY) == 0) {
1926		slot->data_done = 1;
1927		return;
1928	}
1929
1930	slot->data_done = 0;
1931
1932	/* Calculate and set data timeout.*/
1933	/* XXX: We should have this from mmc layer, now assume 1 sec. */
1934	if (slot->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL) {
1935		div = 0xE;
1936	} else {
1937		target_timeout = 1000000;
1938		div = 0;
1939		current_timeout = (1 << 13) * 1000 / slot->timeout_clk;
1940		while (current_timeout < target_timeout && div < 0xE) {
1941			++div;
1942			current_timeout <<= 1;
1943		}
1944		/* Compensate for an off-by-one error in the CaFe chip.*/
1945		if (div < 0xE &&
1946		    (slot->quirks & SDHCI_QUIRK_INCR_TIMEOUT_CONTROL)) {
1947			++div;
1948		}
1949	}
1950	WR1(slot, SDHCI_TIMEOUT_CONTROL, div);
1951
1952	if (data == NULL)
1953		return;
1954
1955	/* Use DMA if possible. */
1956	if ((slot->opt & SDHCI_HAVE_DMA))
1957		slot->flags |= SDHCI_USE_DMA;
1958	/* If data is small, broken DMA may return zeroes instead of data. */
1959	if ((slot->quirks & SDHCI_QUIRK_BROKEN_TIMINGS) &&
1960	    (data->len <= 512))
1961		slot->flags &= ~SDHCI_USE_DMA;
1962	/* Some controllers require even block sizes. */
1963	if ((slot->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE) &&
1964	    ((data->len) & 0x3))
1965		slot->flags &= ~SDHCI_USE_DMA;
1966	/* Load DMA buffer. */
1967	if (slot->flags & SDHCI_USE_DMA) {
1968		sdma_bbufsz = slot->sdma_bbufsz;
1969		if (data->flags & MMC_DATA_READ)
1970			bus_dmamap_sync(slot->dmatag, slot->dmamap,
1971			    BUS_DMASYNC_PREREAD);
1972		else {
1973			memcpy(slot->dmamem, data->data, ulmin(data->len,
1974			    sdma_bbufsz));
1975			bus_dmamap_sync(slot->dmatag, slot->dmamap,
1976			    BUS_DMASYNC_PREWRITE);
1977		}
1978		WR4(slot, SDHCI_DMA_ADDRESS, slot->paddr);
1979		/*
1980		 * Interrupt aggregation: Mask border interrupt for the last
1981		 * bounce buffer and unmask otherwise.
1982		 */
1983		if (data->len == sdma_bbufsz)
1984			slot->intmask &= ~SDHCI_INT_DMA_END;
1985		else
1986			slot->intmask |= SDHCI_INT_DMA_END;
1987		WR4(slot, SDHCI_SIGNAL_ENABLE, slot->intmask);
1988	}
1989	/* Current data offset for both PIO and DMA. */
1990	slot->offset = 0;
1991#ifdef MMCCAM
1992	if (data->flags & MMC_DATA_BLOCK_SIZE) {
1993		/* Set block size and request border interrupts on the SDMA boundary. */
1994		blksz = SDHCI_MAKE_BLKSZ(slot->sdma_boundary, data->block_size);
1995		blkcnt = data->block_count;
1996		if (__predict_false(sdhci_debug > 0))
1997			slot_printf(slot, "SDIO Custom block params: blksz: "
1998			    "%#10x, blk cnt: %#10x\n", blksz, blkcnt);
1999	} else
2000#endif
2001	{
2002		/* Set block size and request border interrupts on the SDMA boundary. */
2003		blksz = SDHCI_MAKE_BLKSZ(slot->sdma_boundary, ulmin(data->len, 512));
2004		blkcnt = howmany(data->len, 512);
2005	}
2006
2007	WR2(slot, SDHCI_BLOCK_SIZE, blksz);
2008	WR2(slot, SDHCI_BLOCK_COUNT, blkcnt);
2009	if (__predict_false(sdhci_debug > 1))
2010		slot_printf(slot, "Blk size: 0x%08x | Blk cnt:  0x%08x\n",
2011		    blksz, blkcnt);
2012}
2013
2014void
2015sdhci_finish_data(struct sdhci_slot *slot)
2016{
2017	struct mmc_data *data = slot->curcmd->data;
2018	size_t left;
2019
2020	/* Interrupt aggregation: Restore command interrupt.
2021	 * Auxiliary restore point for the case when data interrupt
2022	 * happened first. */
2023	if (!slot->cmd_done) {
2024		WR4(slot, SDHCI_SIGNAL_ENABLE,
2025		    slot->intmask |= SDHCI_INT_RESPONSE);
2026	}
2027	/* Unload rest of data from DMA buffer. */
2028	if (!slot->data_done && (slot->flags & SDHCI_USE_DMA) &&
2029	    slot->curcmd->data != NULL) {
2030		if (data->flags & MMC_DATA_READ) {
2031			left = data->len - slot->offset;
2032			bus_dmamap_sync(slot->dmatag, slot->dmamap,
2033			    BUS_DMASYNC_POSTREAD);
2034			memcpy((u_char*)data->data + slot->offset, slot->dmamem,
2035			    ulmin(left, slot->sdma_bbufsz));
2036		} else
2037			bus_dmamap_sync(slot->dmatag, slot->dmamap,
2038			    BUS_DMASYNC_POSTWRITE);
2039	}
2040	slot->data_done = 1;
2041	/* If there was error - reset the host. */
2042	if (slot->curcmd->error) {
2043		if (slot->curcmd->error == MMC_ERR_BADCRC)
2044			slot->retune_req |= SDHCI_RETUNE_REQ_RESET;
2045		SDHCI_RESET(slot->bus, slot, SDHCI_RESET_CMD);
2046		SDHCI_RESET(slot->bus, slot, SDHCI_RESET_DATA);
2047		sdhci_start(slot);
2048		return;
2049	}
2050	/* If we already have command response - finish. */
2051	if (slot->cmd_done)
2052		sdhci_start(slot);
2053}
2054
2055#ifdef MMCCAM
2056static void
2057sdhci_start(struct sdhci_slot *slot)
2058{
2059	union ccb *ccb;
2060	struct ccb_mmcio *mmcio;
2061
2062	ccb = slot->ccb;
2063	if (ccb == NULL)
2064		return;
2065
2066	mmcio = &ccb->mmcio;
2067	if (!(slot->flags & CMD_STARTED)) {
2068		slot->flags |= CMD_STARTED;
2069		sdhci_start_command(slot, &mmcio->cmd);
2070		return;
2071	}
2072
2073	/*
2074	 * Old stack doesn't use this!
2075	 * Enabling this code causes significant performance degradation
2076	 * and IRQ storms on BBB, Wandboard behaves fine.
2077	 * Not using this code does no harm...
2078	if (!(slot->flags & STOP_STARTED) && mmcio->stop.opcode != 0) {
2079		slot->flags |= STOP_STARTED;
2080		sdhci_start_command(slot, &mmcio->stop);
2081		return;
2082	}
2083	*/
2084	if (__predict_false(sdhci_debug > 1))
2085		slot_printf(slot, "result: %d\n", mmcio->cmd.error);
2086	if (mmcio->cmd.error == 0 &&
2087	    (slot->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)) {
2088		SDHCI_RESET(slot->bus, slot, SDHCI_RESET_CMD);
2089		SDHCI_RESET(slot->bus, slot, SDHCI_RESET_DATA);
2090	}
2091
2092	sdhci_req_done(slot);
2093}
2094#else
2095static void
2096sdhci_start(struct sdhci_slot *slot)
2097{
2098	const struct mmc_request *req;
2099
2100	req = slot->req;
2101	if (req == NULL)
2102		return;
2103
2104	if (!(slot->flags & CMD_STARTED)) {
2105		slot->flags |= CMD_STARTED;
2106		sdhci_start_command(slot, req->cmd);
2107		return;
2108	}
2109	if ((slot->quirks & SDHCI_QUIRK_BROKEN_AUTO_STOP) &&
2110	    !(slot->flags & STOP_STARTED) && req->stop) {
2111		slot->flags |= STOP_STARTED;
2112		sdhci_start_command(slot, req->stop);
2113		return;
2114	}
2115	if (__predict_false(sdhci_debug > 1))
2116		slot_printf(slot, "result: %d\n", req->cmd->error);
2117	if (!req->cmd->error &&
2118	    ((slot->curcmd == req->stop &&
2119	     (slot->quirks & SDHCI_QUIRK_BROKEN_AUTO_STOP)) ||
2120	     (slot->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
2121		SDHCI_RESET(slot->bus, slot, SDHCI_RESET_CMD);
2122		SDHCI_RESET(slot->bus, slot, SDHCI_RESET_DATA);
2123	}
2124
2125	sdhci_req_done(slot);
2126}
2127#endif
2128
2129int
2130sdhci_generic_request(device_t brdev __unused, device_t reqdev,
2131    struct mmc_request *req)
2132{
2133	struct sdhci_slot *slot = device_get_ivars(reqdev);
2134
2135	SDHCI_LOCK(slot);
2136	if (slot->req != NULL) {
2137		SDHCI_UNLOCK(slot);
2138		return (EBUSY);
2139	}
2140	if (__predict_false(sdhci_debug > 1)) {
2141		slot_printf(slot,
2142		    "CMD%u arg %#x flags %#x dlen %u dflags %#x\n",
2143		    req->cmd->opcode, req->cmd->arg, req->cmd->flags,
2144		    (req->cmd->data)?(u_int)req->cmd->data->len:0,
2145		    (req->cmd->data)?req->cmd->data->flags:0);
2146	}
2147	slot->req = req;
2148	slot->flags = 0;
2149	sdhci_start(slot);
2150	SDHCI_UNLOCK(slot);
2151	if (dumping) {
2152		while (slot->req != NULL) {
2153			sdhci_generic_intr(slot);
2154			DELAY(10);
2155		}
2156	}
2157	return (0);
2158}
2159
2160int
2161sdhci_generic_get_ro(device_t brdev __unused, device_t reqdev)
2162{
2163	struct sdhci_slot *slot = device_get_ivars(reqdev);
2164	uint32_t val;
2165
2166	SDHCI_LOCK(slot);
2167	val = RD4(slot, SDHCI_PRESENT_STATE);
2168	SDHCI_UNLOCK(slot);
2169	return (!(val & SDHCI_WRITE_PROTECT));
2170}
2171
2172int
2173sdhci_generic_acquire_host(device_t brdev __unused, device_t reqdev)
2174{
2175	struct sdhci_slot *slot = device_get_ivars(reqdev);
2176	int err = 0;
2177
2178	SDHCI_LOCK(slot);
2179	while (slot->bus_busy)
2180		msleep(slot, &slot->mtx, 0, "sdhciah", 0);
2181	slot->bus_busy++;
2182	/* Activate led. */
2183	WR1(slot, SDHCI_HOST_CONTROL, slot->hostctrl |= SDHCI_CTRL_LED);
2184	SDHCI_UNLOCK(slot);
2185	return (err);
2186}
2187
2188int
2189sdhci_generic_release_host(device_t brdev __unused, device_t reqdev)
2190{
2191	struct sdhci_slot *slot = device_get_ivars(reqdev);
2192
2193	SDHCI_LOCK(slot);
2194	/* Deactivate led. */
2195	WR1(slot, SDHCI_HOST_CONTROL, slot->hostctrl &= ~SDHCI_CTRL_LED);
2196	slot->bus_busy--;
2197	wakeup(slot);
2198	SDHCI_UNLOCK(slot);
2199	return (0);
2200}
2201
2202static void
2203sdhci_cmd_irq(struct sdhci_slot *slot, uint32_t intmask)
2204{
2205
2206	if (!slot->curcmd) {
2207		slot_printf(slot, "Got command interrupt 0x%08x, but "
2208		    "there is no active command.\n", intmask);
2209		sdhci_dumpregs(slot);
2210		return;
2211	}
2212	if (intmask & SDHCI_INT_TIMEOUT)
2213		slot->curcmd->error = MMC_ERR_TIMEOUT;
2214	else if (intmask & SDHCI_INT_CRC)
2215		slot->curcmd->error = MMC_ERR_BADCRC;
2216	else if (intmask & (SDHCI_INT_END_BIT | SDHCI_INT_INDEX))
2217		slot->curcmd->error = MMC_ERR_FIFO;
2218
2219	sdhci_finish_command(slot);
2220}
2221
2222static void
2223sdhci_data_irq(struct sdhci_slot *slot, uint32_t intmask)
2224{
2225	struct mmc_data *data;
2226	size_t left;
2227	uint32_t sdma_bbufsz;
2228
2229	if (!slot->curcmd) {
2230		slot_printf(slot, "Got data interrupt 0x%08x, but "
2231		    "there is no active command.\n", intmask);
2232		sdhci_dumpregs(slot);
2233		return;
2234	}
2235	if (slot->curcmd->data == NULL &&
2236	    (slot->curcmd->flags & MMC_RSP_BUSY) == 0) {
2237		slot_printf(slot, "Got data interrupt 0x%08x, but "
2238		    "there is no active data operation.\n",
2239		    intmask);
2240		sdhci_dumpregs(slot);
2241		return;
2242	}
2243	if (intmask & SDHCI_INT_DATA_TIMEOUT)
2244		slot->curcmd->error = MMC_ERR_TIMEOUT;
2245	else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT))
2246		slot->curcmd->error = MMC_ERR_BADCRC;
2247	if (slot->curcmd->data == NULL &&
2248	    (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL |
2249	    SDHCI_INT_DMA_END))) {
2250		slot_printf(slot, "Got data interrupt 0x%08x, but "
2251		    "there is busy-only command.\n", intmask);
2252		sdhci_dumpregs(slot);
2253		slot->curcmd->error = MMC_ERR_INVALID;
2254	}
2255	if (slot->curcmd->error) {
2256		/* No need to continue after any error. */
2257		goto done;
2258	}
2259
2260	/* Handle tuning completion interrupt. */
2261	if (__predict_false((intmask & SDHCI_INT_DATA_AVAIL) &&
2262	    (slot->curcmd->opcode == MMC_SEND_TUNING_BLOCK ||
2263	    slot->curcmd->opcode == MMC_SEND_TUNING_BLOCK_HS200))) {
2264		slot->req->flags |= MMC_TUNE_DONE;
2265		sdhci_finish_command(slot);
2266		sdhci_finish_data(slot);
2267		return;
2268	}
2269	/* Handle PIO interrupt. */
2270	if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL)) {
2271		if ((slot->opt & SDHCI_PLATFORM_TRANSFER) &&
2272		    SDHCI_PLATFORM_WILL_HANDLE(slot->bus, slot)) {
2273			SDHCI_PLATFORM_START_TRANSFER(slot->bus, slot,
2274			    &intmask);
2275			slot->flags |= PLATFORM_DATA_STARTED;
2276		} else
2277			sdhci_transfer_pio(slot);
2278	}
2279	/* Handle DMA border. */
2280	if (intmask & SDHCI_INT_DMA_END) {
2281		data = slot->curcmd->data;
2282		sdma_bbufsz = slot->sdma_bbufsz;
2283
2284		/* Unload DMA buffer ... */
2285		left = data->len - slot->offset;
2286		if (data->flags & MMC_DATA_READ) {
2287			bus_dmamap_sync(slot->dmatag, slot->dmamap,
2288			    BUS_DMASYNC_POSTREAD);
2289			memcpy((u_char*)data->data + slot->offset, slot->dmamem,
2290			    ulmin(left, sdma_bbufsz));
2291		} else {
2292			bus_dmamap_sync(slot->dmatag, slot->dmamap,
2293			    BUS_DMASYNC_POSTWRITE);
2294		}
2295		/* ... and reload it again. */
2296		slot->offset += sdma_bbufsz;
2297		left = data->len - slot->offset;
2298		if (data->flags & MMC_DATA_READ) {
2299			bus_dmamap_sync(slot->dmatag, slot->dmamap,
2300			    BUS_DMASYNC_PREREAD);
2301		} else {
2302			memcpy(slot->dmamem, (u_char*)data->data + slot->offset,
2303			    ulmin(left, sdma_bbufsz));
2304			bus_dmamap_sync(slot->dmatag, slot->dmamap,
2305			    BUS_DMASYNC_PREWRITE);
2306		}
2307		/*
2308		 * Interrupt aggregation: Mask border interrupt for the last
2309		 * bounce buffer.
2310		 */
2311		if (left == sdma_bbufsz) {
2312			slot->intmask &= ~SDHCI_INT_DMA_END;
2313			WR4(slot, SDHCI_SIGNAL_ENABLE, slot->intmask);
2314		}
2315		/* Restart DMA. */
2316		WR4(slot, SDHCI_DMA_ADDRESS, slot->paddr);
2317	}
2318	/* We have got all data. */
2319	if (intmask & SDHCI_INT_DATA_END) {
2320		if (slot->flags & PLATFORM_DATA_STARTED) {
2321			slot->flags &= ~PLATFORM_DATA_STARTED;
2322			SDHCI_PLATFORM_FINISH_TRANSFER(slot->bus, slot);
2323		} else
2324			sdhci_finish_data(slot);
2325	}
2326done:
2327	if (slot->curcmd != NULL && slot->curcmd->error != 0) {
2328		if (slot->flags & PLATFORM_DATA_STARTED) {
2329			slot->flags &= ~PLATFORM_DATA_STARTED;
2330			SDHCI_PLATFORM_FINISH_TRANSFER(slot->bus, slot);
2331		} else
2332			sdhci_finish_data(slot);
2333	}
2334}
2335
2336static void
2337sdhci_acmd_irq(struct sdhci_slot *slot, uint16_t acmd_err)
2338{
2339
2340	if (!slot->curcmd) {
2341		slot_printf(slot, "Got AutoCMD12 error 0x%04x, but "
2342		    "there is no active command.\n", acmd_err);
2343		sdhci_dumpregs(slot);
2344		return;
2345	}
2346	slot_printf(slot, "Got AutoCMD12 error 0x%04x\n", acmd_err);
2347	SDHCI_RESET(slot->bus, slot, SDHCI_RESET_CMD);
2348}
2349
2350void
2351sdhci_generic_intr(struct sdhci_slot *slot)
2352{
2353	uint32_t intmask, present;
2354	uint16_t val16;
2355
2356	SDHCI_LOCK(slot);
2357	/* Read slot interrupt status. */
2358	intmask = RD4(slot, SDHCI_INT_STATUS);
2359	if (intmask == 0 || intmask == 0xffffffff) {
2360		SDHCI_UNLOCK(slot);
2361		return;
2362	}
2363	if (__predict_false(sdhci_debug > 2))
2364		slot_printf(slot, "Interrupt %#x\n", intmask);
2365
2366	/* Handle tuning error interrupt. */
2367	if (__predict_false(intmask & SDHCI_INT_TUNEERR)) {
2368		WR4(slot, SDHCI_INT_STATUS, SDHCI_INT_TUNEERR);
2369		slot_printf(slot, "Tuning error indicated\n");
2370		slot->retune_req |= SDHCI_RETUNE_REQ_RESET;
2371		if (slot->curcmd) {
2372			slot->curcmd->error = MMC_ERR_BADCRC;
2373			sdhci_finish_command(slot);
2374		}
2375	}
2376	/* Handle re-tuning interrupt. */
2377	if (__predict_false(intmask & SDHCI_INT_RETUNE))
2378		slot->retune_req |= SDHCI_RETUNE_REQ_NEEDED;
2379	/* Handle card presence interrupts. */
2380	if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2381		present = (intmask & SDHCI_INT_CARD_INSERT) != 0;
2382		slot->intmask &=
2383		    ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
2384		slot->intmask |= present ? SDHCI_INT_CARD_REMOVE :
2385		    SDHCI_INT_CARD_INSERT;
2386		WR4(slot, SDHCI_INT_ENABLE, slot->intmask);
2387		WR4(slot, SDHCI_SIGNAL_ENABLE, slot->intmask);
2388		WR4(slot, SDHCI_INT_STATUS, intmask &
2389		    (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE));
2390		sdhci_handle_card_present_locked(slot, present);
2391	}
2392	/* Handle command interrupts. */
2393	if (intmask & SDHCI_INT_CMD_MASK) {
2394		WR4(slot, SDHCI_INT_STATUS, intmask & SDHCI_INT_CMD_MASK);
2395		sdhci_cmd_irq(slot, intmask & SDHCI_INT_CMD_MASK);
2396	}
2397	/* Handle data interrupts. */
2398	if (intmask & SDHCI_INT_DATA_MASK) {
2399		WR4(slot, SDHCI_INT_STATUS, intmask & SDHCI_INT_DATA_MASK);
2400		/* Don't call data_irq in case of errored command. */
2401		if ((intmask & SDHCI_INT_CMD_ERROR_MASK) == 0)
2402			sdhci_data_irq(slot, intmask & SDHCI_INT_DATA_MASK);
2403	}
2404	/* Handle AutoCMD12 error interrupt. */
2405	if (intmask & SDHCI_INT_ACMD12ERR) {
2406		/* Clearing SDHCI_INT_ACMD12ERR may clear SDHCI_ACMD12_ERR. */
2407		val16 = RD2(slot, SDHCI_ACMD12_ERR);
2408		WR4(slot, SDHCI_INT_STATUS, SDHCI_INT_ACMD12ERR);
2409		sdhci_acmd_irq(slot, val16);
2410	}
2411	/* Handle bus power interrupt. */
2412	if (intmask & SDHCI_INT_BUS_POWER) {
2413		WR4(slot, SDHCI_INT_STATUS, SDHCI_INT_BUS_POWER);
2414		slot_printf(slot, "Card is consuming too much power!\n");
2415	}
2416	intmask &= ~(SDHCI_INT_ERROR | SDHCI_INT_TUNEERR | SDHCI_INT_RETUNE |
2417	    SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE | SDHCI_INT_CMD_MASK |
2418	    SDHCI_INT_DATA_MASK | SDHCI_INT_ACMD12ERR | SDHCI_INT_BUS_POWER);
2419	/* The rest is unknown. */
2420	if (intmask) {
2421		WR4(slot, SDHCI_INT_STATUS, intmask);
2422		slot_printf(slot, "Unexpected interrupt 0x%08x.\n",
2423		    intmask);
2424		sdhci_dumpregs(slot);
2425	}
2426
2427	SDHCI_UNLOCK(slot);
2428}
2429
2430int
2431sdhci_generic_read_ivar(device_t bus, device_t child, int which,
2432    uintptr_t *result)
2433{
2434	const struct sdhci_slot *slot = device_get_ivars(child);
2435
2436	switch (which) {
2437	default:
2438		return (EINVAL);
2439	case MMCBR_IVAR_BUS_MODE:
2440		*result = slot->host.ios.bus_mode;
2441		break;
2442	case MMCBR_IVAR_BUS_WIDTH:
2443		*result = slot->host.ios.bus_width;
2444		break;
2445	case MMCBR_IVAR_CHIP_SELECT:
2446		*result = slot->host.ios.chip_select;
2447		break;
2448	case MMCBR_IVAR_CLOCK:
2449		*result = slot->host.ios.clock;
2450		break;
2451	case MMCBR_IVAR_F_MIN:
2452		*result = slot->host.f_min;
2453		break;
2454	case MMCBR_IVAR_F_MAX:
2455		*result = slot->host.f_max;
2456		break;
2457	case MMCBR_IVAR_HOST_OCR:
2458		*result = slot->host.host_ocr;
2459		break;
2460	case MMCBR_IVAR_MODE:
2461		*result = slot->host.mode;
2462		break;
2463	case MMCBR_IVAR_OCR:
2464		*result = slot->host.ocr;
2465		break;
2466	case MMCBR_IVAR_POWER_MODE:
2467		*result = slot->host.ios.power_mode;
2468		break;
2469	case MMCBR_IVAR_VDD:
2470		*result = slot->host.ios.vdd;
2471		break;
2472	case MMCBR_IVAR_RETUNE_REQ:
2473		if (slot->opt & SDHCI_TUNING_ENABLED) {
2474			if (slot->retune_req & SDHCI_RETUNE_REQ_RESET) {
2475				*result = retune_req_reset;
2476				break;
2477			}
2478			if (slot->retune_req & SDHCI_RETUNE_REQ_NEEDED) {
2479				*result = retune_req_normal;
2480				break;
2481			}
2482		}
2483		*result = retune_req_none;
2484		break;
2485	case MMCBR_IVAR_VCCQ:
2486		*result = slot->host.ios.vccq;
2487		break;
2488	case MMCBR_IVAR_CAPS:
2489		*result = slot->host.caps;
2490		break;
2491	case MMCBR_IVAR_TIMING:
2492		*result = slot->host.ios.timing;
2493		break;
2494	case MMCBR_IVAR_MAX_DATA:
2495		/*
2496		 * Re-tuning modes 1 and 2 restrict the maximum data length
2497		 * per read/write command to 4 MiB.
2498		 */
2499		if (slot->opt & SDHCI_TUNING_ENABLED &&
2500		    (slot->retune_mode == SDHCI_RETUNE_MODE_1 ||
2501		    slot->retune_mode == SDHCI_RETUNE_MODE_2)) {
2502			*result = 4 * 1024 * 1024 / MMC_SECTOR_SIZE;
2503			break;
2504		}
2505		*result = 65535;
2506		break;
2507	case MMCBR_IVAR_MAX_BUSY_TIMEOUT:
2508		/*
2509		 * Currently, sdhci_start_data() hardcodes 1 s for all CMDs.
2510		 */
2511		*result = 1000000;
2512		break;
2513	}
2514	return (0);
2515}
2516
2517int
2518sdhci_generic_write_ivar(device_t bus, device_t child, int which,
2519    uintptr_t value)
2520{
2521	struct sdhci_slot *slot = device_get_ivars(child);
2522	uint32_t clock, max_clock;
2523	int i;
2524
2525	if (sdhci_debug > 1)
2526		slot_printf(slot, "%s: var=%d\n", __func__, which);
2527	switch (which) {
2528	default:
2529		return (EINVAL);
2530	case MMCBR_IVAR_BUS_MODE:
2531		slot->host.ios.bus_mode = value;
2532		break;
2533	case MMCBR_IVAR_BUS_WIDTH:
2534		slot->host.ios.bus_width = value;
2535		break;
2536	case MMCBR_IVAR_CHIP_SELECT:
2537		slot->host.ios.chip_select = value;
2538		break;
2539	case MMCBR_IVAR_CLOCK:
2540		if (value > 0) {
2541			max_clock = slot->max_clk;
2542			clock = max_clock;
2543
2544			if (slot->version < SDHCI_SPEC_300) {
2545				for (i = 0; i < SDHCI_200_MAX_DIVIDER;
2546				    i <<= 1) {
2547					if (clock <= value)
2548						break;
2549					clock >>= 1;
2550				}
2551			} else {
2552				for (i = 0; i < SDHCI_300_MAX_DIVIDER;
2553				    i += 2) {
2554					if (clock <= value)
2555						break;
2556					clock = max_clock / (i + 2);
2557				}
2558			}
2559
2560			slot->host.ios.clock = clock;
2561		} else
2562			slot->host.ios.clock = 0;
2563		break;
2564	case MMCBR_IVAR_MODE:
2565		slot->host.mode = value;
2566		break;
2567	case MMCBR_IVAR_OCR:
2568		slot->host.ocr = value;
2569		break;
2570	case MMCBR_IVAR_POWER_MODE:
2571		slot->host.ios.power_mode = value;
2572		break;
2573	case MMCBR_IVAR_VDD:
2574		slot->host.ios.vdd = value;
2575		break;
2576	case MMCBR_IVAR_VCCQ:
2577		slot->host.ios.vccq = value;
2578		break;
2579	case MMCBR_IVAR_TIMING:
2580		slot->host.ios.timing = value;
2581		break;
2582	case MMCBR_IVAR_CAPS:
2583	case MMCBR_IVAR_HOST_OCR:
2584	case MMCBR_IVAR_F_MIN:
2585	case MMCBR_IVAR_F_MAX:
2586	case MMCBR_IVAR_MAX_DATA:
2587	case MMCBR_IVAR_RETUNE_REQ:
2588		return (EINVAL);
2589	}
2590	return (0);
2591}
2592
2593#ifdef MMCCAM
2594void
2595sdhci_start_slot(struct sdhci_slot *slot)
2596{
2597
2598	if ((slot->devq = cam_simq_alloc(1)) == NULL)
2599		goto fail;
2600
2601	mtx_init(&slot->sim_mtx, "sdhcisim", NULL, MTX_DEF);
2602	slot->sim = cam_sim_alloc(sdhci_cam_action, sdhci_cam_poll,
2603	    "sdhci_slot", slot, device_get_unit(slot->bus),
2604	    &slot->sim_mtx, 1, 1, slot->devq);
2605
2606	if (slot->sim == NULL) {
2607		cam_simq_free(slot->devq);
2608		slot_printf(slot, "cannot allocate CAM SIM\n");
2609		goto fail;
2610	}
2611
2612	mtx_lock(&slot->sim_mtx);
2613	if (xpt_bus_register(slot->sim, slot->bus, 0) != 0) {
2614		slot_printf(slot, "cannot register SCSI pass-through bus\n");
2615		cam_sim_free(slot->sim, FALSE);
2616		cam_simq_free(slot->devq);
2617		mtx_unlock(&slot->sim_mtx);
2618		goto fail;
2619	}
2620	mtx_unlock(&slot->sim_mtx);
2621
2622	/* End CAM-specific init */
2623	slot->card_present = 0;
2624	sdhci_card_task(slot, 0);
2625	return;
2626
2627fail:
2628	if (slot->sim != NULL) {
2629		mtx_lock(&slot->sim_mtx);
2630		xpt_bus_deregister(cam_sim_path(slot->sim));
2631		cam_sim_free(slot->sim, FALSE);
2632		mtx_unlock(&slot->sim_mtx);
2633	}
2634
2635	if (slot->devq != NULL)
2636		cam_simq_free(slot->devq);
2637}
2638
2639void
2640sdhci_cam_action(struct cam_sim *sim, union ccb *ccb)
2641{
2642	struct sdhci_slot *slot;
2643
2644	slot = cam_sim_softc(sim);
2645	if (slot == NULL) {
2646		ccb->ccb_h.status = CAM_SEL_TIMEOUT;
2647		xpt_done(ccb);
2648		return;
2649	}
2650
2651	mtx_assert(&slot->sim_mtx, MA_OWNED);
2652
2653	switch (ccb->ccb_h.func_code) {
2654	case XPT_PATH_INQ:
2655		mmc_path_inq(&ccb->cpi, "Deglitch Networks", sim, maxphys);
2656		break;
2657
2658	case XPT_MMC_GET_TRAN_SETTINGS:
2659	case XPT_GET_TRAN_SETTINGS:
2660	{
2661		struct ccb_trans_settings *cts = &ccb->cts;
2662		uint32_t max_data;
2663
2664		if (sdhci_debug > 1)
2665			slot_printf(slot, "Got XPT_GET_TRAN_SETTINGS\n");
2666
2667		cts->protocol = PROTO_MMCSD;
2668		cts->protocol_version = 1;
2669		cts->transport = XPORT_MMCSD;
2670		cts->transport_version = 1;
2671		cts->xport_specific.valid = 0;
2672		cts->proto_specific.mmc.host_ocr = slot->host.host_ocr;
2673		cts->proto_specific.mmc.host_f_min = slot->host.f_min;
2674		cts->proto_specific.mmc.host_f_max = slot->host.f_max;
2675		cts->proto_specific.mmc.host_caps = slot->host.caps;
2676		/*
2677		 * Re-tuning modes 1 and 2 restrict the maximum data length
2678		 * per read/write command to 4 MiB.
2679		 */
2680		if (slot->opt & SDHCI_TUNING_ENABLED &&
2681		    (slot->retune_mode == SDHCI_RETUNE_MODE_1 ||
2682		    slot->retune_mode == SDHCI_RETUNE_MODE_2)) {
2683			max_data = 4 * 1024 * 1024 / MMC_SECTOR_SIZE;
2684		} else {
2685			max_data = 65535;
2686		}
2687		cts->proto_specific.mmc.host_max_data = max_data;
2688
2689		memcpy(&cts->proto_specific.mmc.ios, &slot->host.ios, sizeof(struct mmc_ios));
2690		ccb->ccb_h.status = CAM_REQ_CMP;
2691		break;
2692	}
2693	case XPT_MMC_SET_TRAN_SETTINGS:
2694	case XPT_SET_TRAN_SETTINGS:
2695		if (sdhci_debug > 1)
2696			slot_printf(slot, "Got XPT_SET_TRAN_SETTINGS\n");
2697		sdhci_cam_settran_settings(slot, ccb);
2698		ccb->ccb_h.status = CAM_REQ_CMP;
2699		break;
2700	case XPT_RESET_BUS:
2701		if (sdhci_debug > 1)
2702			slot_printf(slot, "Got XPT_RESET_BUS, ACK it...\n");
2703		ccb->ccb_h.status = CAM_REQ_CMP;
2704		break;
2705	case XPT_MMC_IO:
2706		/*
2707		 * Here is the HW-dependent part of
2708		 * sending the command to the underlying h/w
2709		 * At some point in the future an interrupt comes.
2710		 * Then the request will be marked as completed.
2711		 */
2712		if (__predict_false(sdhci_debug > 1))
2713			slot_printf(slot, "Got XPT_MMC_IO\n");
2714		ccb->ccb_h.status = CAM_REQ_INPROG;
2715
2716		sdhci_cam_request(cam_sim_softc(sim), ccb);
2717		return;
2718	default:
2719		ccb->ccb_h.status = CAM_REQ_INVALID;
2720		break;
2721	}
2722	xpt_done(ccb);
2723	return;
2724}
2725
2726void
2727sdhci_cam_poll(struct cam_sim *sim)
2728{
2729	sdhci_generic_intr(cam_sim_softc(sim));
2730}
2731
2732static int
2733sdhci_cam_get_possible_host_clock(const struct sdhci_slot *slot,
2734    int proposed_clock)
2735{
2736	int max_clock, clock, i;
2737
2738	if (proposed_clock == 0)
2739		return 0;
2740	max_clock = slot->max_clk;
2741	clock = max_clock;
2742
2743	if (slot->version < SDHCI_SPEC_300) {
2744		for (i = 0; i < SDHCI_200_MAX_DIVIDER; i <<= 1) {
2745			if (clock <= proposed_clock)
2746				break;
2747			clock >>= 1;
2748		}
2749	} else {
2750		for (i = 0; i < SDHCI_300_MAX_DIVIDER; i += 2) {
2751			if (clock <= proposed_clock)
2752				break;
2753			clock = max_clock / (i + 2);
2754		}
2755	}
2756	return clock;
2757}
2758
2759static int
2760sdhci_cam_settran_settings(struct sdhci_slot *slot, union ccb *ccb)
2761{
2762	struct mmc_ios *ios;
2763	const struct mmc_ios *new_ios;
2764	const struct ccb_trans_settings_mmc *cts;
2765
2766	ios = &slot->host.ios;
2767	cts = &ccb->cts.proto_specific.mmc;
2768	new_ios = &cts->ios;
2769
2770	/* Update only requested fields */
2771	if (cts->ios_valid & MMC_CLK) {
2772		ios->clock = sdhci_cam_get_possible_host_clock(slot, new_ios->clock);
2773		if (sdhci_debug > 1)
2774			slot_printf(slot, "Clock => %d\n", ios->clock);
2775	}
2776	if (cts->ios_valid & MMC_VDD) {
2777		ios->vdd = new_ios->vdd;
2778		if (sdhci_debug > 1)
2779			slot_printf(slot, "VDD => %d\n", ios->vdd);
2780	}
2781	if (cts->ios_valid & MMC_CS) {
2782		ios->chip_select = new_ios->chip_select;
2783		if (sdhci_debug > 1)
2784			slot_printf(slot, "CS => %d\n", ios->chip_select);
2785	}
2786	if (cts->ios_valid & MMC_BW) {
2787		ios->bus_width = new_ios->bus_width;
2788		if (sdhci_debug > 1)
2789			slot_printf(slot, "Bus width => %d\n", ios->bus_width);
2790	}
2791	if (cts->ios_valid & MMC_PM) {
2792		ios->power_mode = new_ios->power_mode;
2793		if (sdhci_debug > 1)
2794			slot_printf(slot, "Power mode => %d\n", ios->power_mode);
2795	}
2796	if (cts->ios_valid & MMC_BT) {
2797		ios->timing = new_ios->timing;
2798		if (sdhci_debug > 1)
2799			slot_printf(slot, "Timing => %d\n", ios->timing);
2800	}
2801	if (cts->ios_valid & MMC_BM) {
2802		ios->bus_mode = new_ios->bus_mode;
2803		if (sdhci_debug > 1)
2804			slot_printf(slot, "Bus mode => %d\n", ios->bus_mode);
2805	}
2806	if (cts->ios_valid & MMC_VCCQ) {
2807		ios->vccq = new_ios->vccq;
2808		if (sdhci_debug > 1)
2809			slot_printf(slot, "VCCQ => %d\n", ios->vccq);
2810	}
2811
2812	/* XXX Provide a way to call a chip-specific IOS update, required for TI */
2813	return (sdhci_cam_update_ios(slot));
2814}
2815
2816static int
2817sdhci_cam_update_ios(struct sdhci_slot *slot)
2818{
2819	struct mmc_ios *ios = &slot->host.ios;
2820
2821	if (sdhci_debug > 1)
2822		slot_printf(slot, "%s: power_mode=%d, clk=%d, bus_width=%d, timing=%d\n",
2823		    __func__, ios->power_mode, ios->clock, ios->bus_width, ios->timing);
2824	SDHCI_LOCK(slot);
2825	/* Do full reset on bus power down to clear from any state. */
2826	if (ios->power_mode == power_off) {
2827		WR4(slot, SDHCI_SIGNAL_ENABLE, 0);
2828		sdhci_init(slot);
2829	}
2830	/* Configure the bus. */
2831	sdhci_set_clock(slot, ios->clock);
2832	sdhci_set_power(slot, (ios->power_mode == power_off) ? 0 : ios->vdd);
2833	if (ios->bus_width == bus_width_8) {
2834		slot->hostctrl |= SDHCI_CTRL_8BITBUS;
2835		slot->hostctrl &= ~SDHCI_CTRL_4BITBUS;
2836	} else if (ios->bus_width == bus_width_4) {
2837		slot->hostctrl &= ~SDHCI_CTRL_8BITBUS;
2838		slot->hostctrl |= SDHCI_CTRL_4BITBUS;
2839	} else if (ios->bus_width == bus_width_1) {
2840		slot->hostctrl &= ~SDHCI_CTRL_8BITBUS;
2841		slot->hostctrl &= ~SDHCI_CTRL_4BITBUS;
2842	} else {
2843		panic("Invalid bus width: %d", ios->bus_width);
2844	}
2845	if (ios->timing == bus_timing_hs &&
2846	    !(slot->quirks & SDHCI_QUIRK_DONT_SET_HISPD_BIT))
2847		slot->hostctrl |= SDHCI_CTRL_HISPD;
2848	else
2849		slot->hostctrl &= ~SDHCI_CTRL_HISPD;
2850	WR1(slot, SDHCI_HOST_CONTROL, slot->hostctrl);
2851	/* Some controllers like reset after bus changes. */
2852	if(slot->quirks & SDHCI_QUIRK_RESET_ON_IOS)
2853		SDHCI_RESET(slot->bus, slot,
2854		    SDHCI_RESET_CMD | SDHCI_RESET_DATA);
2855
2856	SDHCI_UNLOCK(slot);
2857	return (0);
2858}
2859
2860static int
2861sdhci_cam_request(struct sdhci_slot *slot, union ccb *ccb)
2862{
2863	const struct ccb_mmcio *mmcio;
2864
2865	mmcio = &ccb->mmcio;
2866
2867	SDHCI_LOCK(slot);
2868/*	if (slot->req != NULL) {
2869		SDHCI_UNLOCK(slot);
2870		return (EBUSY);
2871	}
2872*/
2873	if (__predict_false(sdhci_debug > 1)) {
2874		slot_printf(slot, "CMD%u arg %#x flags %#x dlen %u dflags %#x "
2875		    "blksz=%zu blkcnt=%zu\n",
2876		    mmcio->cmd.opcode, mmcio->cmd.arg, mmcio->cmd.flags,
2877		    mmcio->cmd.data != NULL ? (unsigned int) mmcio->cmd.data->len : 0,
2878		    mmcio->cmd.data != NULL ? mmcio->cmd.data->flags : 0,
2879		    mmcio->cmd.data != NULL ? mmcio->cmd.data->block_size : 0,
2880		    mmcio->cmd.data != NULL ? mmcio->cmd.data->block_count : 0);
2881	}
2882	if (mmcio->cmd.data != NULL) {
2883		if (mmcio->cmd.data->len == 0 || mmcio->cmd.data->flags == 0)
2884			panic("data->len = %d, data->flags = %d -- something is b0rked",
2885			    (int)mmcio->cmd.data->len, mmcio->cmd.data->flags);
2886	}
2887	slot->ccb = ccb;
2888	slot->flags = 0;
2889	sdhci_start(slot);
2890	SDHCI_UNLOCK(slot);
2891	return (0);
2892}
2893#endif /* MMCCAM */
2894
2895MODULE_VERSION(sdhci, SDHCI_VERSION);
2896