Deleted Added
full compact
a10_mmc.c (308324) a10_mmc.c (309756)
1/*-
2 * Copyright (c) 2013 Alexander Fedorov
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2013 Alexander Fedorov
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: stable/11/sys/arm/allwinner/a10_mmc.c 308324 2016-11-05 04:17:32Z mmel $");
28__FBSDID("$FreeBSD: stable/11/sys/arm/allwinner/a10_mmc.c 309756 2016-12-09 20:07:01Z manu $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/bus.h>
33#include <sys/kernel.h>
34#include <sys/lock.h>
35#include <sys/malloc.h>
36#include <sys/module.h>
37#include <sys/mutex.h>
38#include <sys/resource.h>
39#include <sys/rman.h>
40#include <sys/sysctl.h>
41
42#include <machine/bus.h>
43
44#include <dev/ofw/ofw_bus.h>
45#include <dev/ofw/ofw_bus_subr.h>
46
47#include <dev/mmc/bridge.h>
48#include <dev/mmc/mmcreg.h>
49#include <dev/mmc/mmcbrvar.h>
50
51#include <arm/allwinner/a10_mmc.h>
52#include <dev/extres/clk/clk.h>
53#include <dev/extres/hwreset/hwreset.h>
54
55#define A10_MMC_MEMRES 0
56#define A10_MMC_IRQRES 1
57#define A10_MMC_RESSZ 2
58#define A10_MMC_DMA_SEGS ((MAXPHYS / PAGE_SIZE) + 1)
59#define A10_MMC_DMA_MAX_SIZE 0x2000
60#define A10_MMC_DMA_FTRGLEVEL 0x20070008
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/bus.h>
33#include <sys/kernel.h>
34#include <sys/lock.h>
35#include <sys/malloc.h>
36#include <sys/module.h>
37#include <sys/mutex.h>
38#include <sys/resource.h>
39#include <sys/rman.h>
40#include <sys/sysctl.h>
41
42#include <machine/bus.h>
43
44#include <dev/ofw/ofw_bus.h>
45#include <dev/ofw/ofw_bus_subr.h>
46
47#include <dev/mmc/bridge.h>
48#include <dev/mmc/mmcreg.h>
49#include <dev/mmc/mmcbrvar.h>
50
51#include <arm/allwinner/a10_mmc.h>
52#include <dev/extres/clk/clk.h>
53#include <dev/extres/hwreset/hwreset.h>
54
55#define A10_MMC_MEMRES 0
56#define A10_MMC_IRQRES 1
57#define A10_MMC_RESSZ 2
58#define A10_MMC_DMA_SEGS ((MAXPHYS / PAGE_SIZE) + 1)
59#define A10_MMC_DMA_MAX_SIZE 0x2000
60#define A10_MMC_DMA_FTRGLEVEL 0x20070008
61#define A10_MMC_RESET_RETRY 1000
61
62#define CARD_ID_FREQUENCY 400000
63
64static struct ofw_compat_data compat_data[] = {
65 {"allwinner,sun4i-a10-mmc", 1},
66 {"allwinner,sun5i-a13-mmc", 1},
67 {NULL, 0}
68};
69
70struct a10_mmc_softc {
71 device_t a10_dev;
72 clk_t a10_clk_ahb;
73 clk_t a10_clk_mmc;
74 hwreset_t a10_rst_ahb;
75 int a10_bus_busy;
76 int a10_resid;
77 int a10_timeout;
78 struct callout a10_timeoutc;
79 struct mmc_host a10_host;
80 struct mmc_request * a10_req;
81 struct mtx a10_mtx;
82 struct resource * a10_res[A10_MMC_RESSZ];
83 uint32_t a10_intr;
84 uint32_t a10_intr_wait;
85 void * a10_intrhand;
86
87 /* Fields required for DMA access. */
88 bus_addr_t a10_dma_desc_phys;
89 bus_dmamap_t a10_dma_map;
90 bus_dma_tag_t a10_dma_tag;
91 void * a10_dma_desc;
92 bus_dmamap_t a10_dma_buf_map;
93 bus_dma_tag_t a10_dma_buf_tag;
94 int a10_dma_map_err;
95};
96
97static struct resource_spec a10_mmc_res_spec[] = {
98 { SYS_RES_MEMORY, 0, RF_ACTIVE },
99 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
100 { -1, 0, 0 }
101};
102
103static int a10_mmc_probe(device_t);
104static int a10_mmc_attach(device_t);
105static int a10_mmc_detach(device_t);
106static int a10_mmc_setup_dma(struct a10_mmc_softc *);
107static int a10_mmc_reset(struct a10_mmc_softc *);
108static void a10_mmc_intr(void *);
109static int a10_mmc_update_clock(struct a10_mmc_softc *, uint32_t);
110
111static int a10_mmc_update_ios(device_t, device_t);
112static int a10_mmc_request(device_t, device_t, struct mmc_request *);
113static int a10_mmc_get_ro(device_t, device_t);
114static int a10_mmc_acquire_host(device_t, device_t);
115static int a10_mmc_release_host(device_t, device_t);
116
117#define A10_MMC_LOCK(_sc) mtx_lock(&(_sc)->a10_mtx)
118#define A10_MMC_UNLOCK(_sc) mtx_unlock(&(_sc)->a10_mtx)
119#define A10_MMC_READ_4(_sc, _reg) \
120 bus_read_4((_sc)->a10_res[A10_MMC_MEMRES], _reg)
121#define A10_MMC_WRITE_4(_sc, _reg, _value) \
122 bus_write_4((_sc)->a10_res[A10_MMC_MEMRES], _reg, _value)
123
124static int
125a10_mmc_probe(device_t dev)
126{
127
128 if (!ofw_bus_status_okay(dev))
129 return (ENXIO);
130 if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
131 return (ENXIO);
132
133 device_set_desc(dev, "Allwinner Integrated MMC/SD controller");
134
135 return (BUS_PROBE_DEFAULT);
136}
137
138static int
139a10_mmc_attach(device_t dev)
140{
141 device_t child;
142 struct a10_mmc_softc *sc;
143 struct sysctl_ctx_list *ctx;
144 struct sysctl_oid_list *tree;
145 uint32_t bus_width;
146 phandle_t node;
147 int error;
148
149 node = ofw_bus_get_node(dev);
150 sc = device_get_softc(dev);
151 sc->a10_dev = dev;
152 sc->a10_req = NULL;
153 if (bus_alloc_resources(dev, a10_mmc_res_spec, sc->a10_res) != 0) {
154 device_printf(dev, "cannot allocate device resources\n");
155 return (ENXIO);
156 }
157 if (bus_setup_intr(dev, sc->a10_res[A10_MMC_IRQRES],
158 INTR_TYPE_MISC | INTR_MPSAFE, NULL, a10_mmc_intr, sc,
159 &sc->a10_intrhand)) {
160 bus_release_resources(dev, a10_mmc_res_spec, sc->a10_res);
161 device_printf(dev, "cannot setup interrupt handler\n");
162 return (ENXIO);
163 }
164 mtx_init(&sc->a10_mtx, device_get_nameunit(sc->a10_dev), "a10_mmc",
165 MTX_DEF);
166 callout_init_mtx(&sc->a10_timeoutc, &sc->a10_mtx, 0);
167
168 /* De-assert reset */
169 if (hwreset_get_by_ofw_name(dev, 0, "ahb", &sc->a10_rst_ahb) == 0) {
170 error = hwreset_deassert(sc->a10_rst_ahb);
171 if (error != 0) {
172 device_printf(dev, "cannot de-assert reset\n");
173 goto fail;
174 }
175 }
176
177 /* Activate the module clock. */
178 error = clk_get_by_ofw_name(dev, 0, "ahb", &sc->a10_clk_ahb);
179 if (error != 0) {
180 device_printf(dev, "cannot get ahb clock\n");
181 goto fail;
182 }
183 error = clk_enable(sc->a10_clk_ahb);
184 if (error != 0) {
185 device_printf(dev, "cannot enable ahb clock\n");
186 goto fail;
187 }
188 error = clk_get_by_ofw_name(dev, 0, "mmc", &sc->a10_clk_mmc);
189 if (error != 0) {
190 device_printf(dev, "cannot get mmc clock\n");
191 goto fail;
192 }
193 error = clk_set_freq(sc->a10_clk_mmc, CARD_ID_FREQUENCY,
194 CLK_SET_ROUND_DOWN);
195 if (error != 0) {
196 device_printf(dev, "cannot init mmc clock\n");
197 goto fail;
198 }
199 error = clk_enable(sc->a10_clk_mmc);
200 if (error != 0) {
201 device_printf(dev, "cannot enable mmc clock\n");
202 goto fail;
203 }
204
205 sc->a10_timeout = 10;
206 ctx = device_get_sysctl_ctx(dev);
207 tree = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
208 SYSCTL_ADD_INT(ctx, tree, OID_AUTO, "req_timeout", CTLFLAG_RW,
209 &sc->a10_timeout, 0, "Request timeout in seconds");
210
211 /* Hardware reset */
212 A10_MMC_WRITE_4(sc, A10_MMC_HWRST, 1);
213 DELAY(100);
214 A10_MMC_WRITE_4(sc, A10_MMC_HWRST, 0);
215 DELAY(500);
216
217 /* Soft Reset controller. */
218 if (a10_mmc_reset(sc) != 0) {
219 device_printf(dev, "cannot reset the controller\n");
220 goto fail;
221 }
222
223 if (a10_mmc_setup_dma(sc) != 0) {
224 device_printf(sc->a10_dev, "Couldn't setup DMA!\n");
225 goto fail;
226 }
227
228 if (OF_getencprop(node, "bus-width", &bus_width, sizeof(uint32_t)) <= 0)
229 bus_width = 4;
230
231 sc->a10_host.f_min = 400000;
62
63#define CARD_ID_FREQUENCY 400000
64
65static struct ofw_compat_data compat_data[] = {
66 {"allwinner,sun4i-a10-mmc", 1},
67 {"allwinner,sun5i-a13-mmc", 1},
68 {NULL, 0}
69};
70
71struct a10_mmc_softc {
72 device_t a10_dev;
73 clk_t a10_clk_ahb;
74 clk_t a10_clk_mmc;
75 hwreset_t a10_rst_ahb;
76 int a10_bus_busy;
77 int a10_resid;
78 int a10_timeout;
79 struct callout a10_timeoutc;
80 struct mmc_host a10_host;
81 struct mmc_request * a10_req;
82 struct mtx a10_mtx;
83 struct resource * a10_res[A10_MMC_RESSZ];
84 uint32_t a10_intr;
85 uint32_t a10_intr_wait;
86 void * a10_intrhand;
87
88 /* Fields required for DMA access. */
89 bus_addr_t a10_dma_desc_phys;
90 bus_dmamap_t a10_dma_map;
91 bus_dma_tag_t a10_dma_tag;
92 void * a10_dma_desc;
93 bus_dmamap_t a10_dma_buf_map;
94 bus_dma_tag_t a10_dma_buf_tag;
95 int a10_dma_map_err;
96};
97
98static struct resource_spec a10_mmc_res_spec[] = {
99 { SYS_RES_MEMORY, 0, RF_ACTIVE },
100 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
101 { -1, 0, 0 }
102};
103
104static int a10_mmc_probe(device_t);
105static int a10_mmc_attach(device_t);
106static int a10_mmc_detach(device_t);
107static int a10_mmc_setup_dma(struct a10_mmc_softc *);
108static int a10_mmc_reset(struct a10_mmc_softc *);
109static void a10_mmc_intr(void *);
110static int a10_mmc_update_clock(struct a10_mmc_softc *, uint32_t);
111
112static int a10_mmc_update_ios(device_t, device_t);
113static int a10_mmc_request(device_t, device_t, struct mmc_request *);
114static int a10_mmc_get_ro(device_t, device_t);
115static int a10_mmc_acquire_host(device_t, device_t);
116static int a10_mmc_release_host(device_t, device_t);
117
118#define A10_MMC_LOCK(_sc) mtx_lock(&(_sc)->a10_mtx)
119#define A10_MMC_UNLOCK(_sc) mtx_unlock(&(_sc)->a10_mtx)
120#define A10_MMC_READ_4(_sc, _reg) \
121 bus_read_4((_sc)->a10_res[A10_MMC_MEMRES], _reg)
122#define A10_MMC_WRITE_4(_sc, _reg, _value) \
123 bus_write_4((_sc)->a10_res[A10_MMC_MEMRES], _reg, _value)
124
125static int
126a10_mmc_probe(device_t dev)
127{
128
129 if (!ofw_bus_status_okay(dev))
130 return (ENXIO);
131 if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
132 return (ENXIO);
133
134 device_set_desc(dev, "Allwinner Integrated MMC/SD controller");
135
136 return (BUS_PROBE_DEFAULT);
137}
138
139static int
140a10_mmc_attach(device_t dev)
141{
142 device_t child;
143 struct a10_mmc_softc *sc;
144 struct sysctl_ctx_list *ctx;
145 struct sysctl_oid_list *tree;
146 uint32_t bus_width;
147 phandle_t node;
148 int error;
149
150 node = ofw_bus_get_node(dev);
151 sc = device_get_softc(dev);
152 sc->a10_dev = dev;
153 sc->a10_req = NULL;
154 if (bus_alloc_resources(dev, a10_mmc_res_spec, sc->a10_res) != 0) {
155 device_printf(dev, "cannot allocate device resources\n");
156 return (ENXIO);
157 }
158 if (bus_setup_intr(dev, sc->a10_res[A10_MMC_IRQRES],
159 INTR_TYPE_MISC | INTR_MPSAFE, NULL, a10_mmc_intr, sc,
160 &sc->a10_intrhand)) {
161 bus_release_resources(dev, a10_mmc_res_spec, sc->a10_res);
162 device_printf(dev, "cannot setup interrupt handler\n");
163 return (ENXIO);
164 }
165 mtx_init(&sc->a10_mtx, device_get_nameunit(sc->a10_dev), "a10_mmc",
166 MTX_DEF);
167 callout_init_mtx(&sc->a10_timeoutc, &sc->a10_mtx, 0);
168
169 /* De-assert reset */
170 if (hwreset_get_by_ofw_name(dev, 0, "ahb", &sc->a10_rst_ahb) == 0) {
171 error = hwreset_deassert(sc->a10_rst_ahb);
172 if (error != 0) {
173 device_printf(dev, "cannot de-assert reset\n");
174 goto fail;
175 }
176 }
177
178 /* Activate the module clock. */
179 error = clk_get_by_ofw_name(dev, 0, "ahb", &sc->a10_clk_ahb);
180 if (error != 0) {
181 device_printf(dev, "cannot get ahb clock\n");
182 goto fail;
183 }
184 error = clk_enable(sc->a10_clk_ahb);
185 if (error != 0) {
186 device_printf(dev, "cannot enable ahb clock\n");
187 goto fail;
188 }
189 error = clk_get_by_ofw_name(dev, 0, "mmc", &sc->a10_clk_mmc);
190 if (error != 0) {
191 device_printf(dev, "cannot get mmc clock\n");
192 goto fail;
193 }
194 error = clk_set_freq(sc->a10_clk_mmc, CARD_ID_FREQUENCY,
195 CLK_SET_ROUND_DOWN);
196 if (error != 0) {
197 device_printf(dev, "cannot init mmc clock\n");
198 goto fail;
199 }
200 error = clk_enable(sc->a10_clk_mmc);
201 if (error != 0) {
202 device_printf(dev, "cannot enable mmc clock\n");
203 goto fail;
204 }
205
206 sc->a10_timeout = 10;
207 ctx = device_get_sysctl_ctx(dev);
208 tree = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
209 SYSCTL_ADD_INT(ctx, tree, OID_AUTO, "req_timeout", CTLFLAG_RW,
210 &sc->a10_timeout, 0, "Request timeout in seconds");
211
212 /* Hardware reset */
213 A10_MMC_WRITE_4(sc, A10_MMC_HWRST, 1);
214 DELAY(100);
215 A10_MMC_WRITE_4(sc, A10_MMC_HWRST, 0);
216 DELAY(500);
217
218 /* Soft Reset controller. */
219 if (a10_mmc_reset(sc) != 0) {
220 device_printf(dev, "cannot reset the controller\n");
221 goto fail;
222 }
223
224 if (a10_mmc_setup_dma(sc) != 0) {
225 device_printf(sc->a10_dev, "Couldn't setup DMA!\n");
226 goto fail;
227 }
228
229 if (OF_getencprop(node, "bus-width", &bus_width, sizeof(uint32_t)) <= 0)
230 bus_width = 4;
231
232 sc->a10_host.f_min = 400000;
232 sc->a10_host.f_max = 50000000;
233 sc->a10_host.f_max = 52000000;
233 sc->a10_host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340;
234 sc->a10_host.mode = mode_sd;
235 sc->a10_host.caps = MMC_CAP_HSPEED;
236 if (bus_width >= 4)
237 sc->a10_host.caps |= MMC_CAP_4_BIT_DATA;
238 if (bus_width >= 8)
239 sc->a10_host.caps |= MMC_CAP_8_BIT_DATA;
240
241 child = device_add_child(dev, "mmc", -1);
242 if (child == NULL) {
243 device_printf(dev, "attaching MMC bus failed!\n");
244 goto fail;
245 }
246 if (device_probe_and_attach(child) != 0) {
247 device_printf(dev, "attaching MMC child failed!\n");
248 device_delete_child(dev, child);
249 goto fail;
250 }
251
252 return (0);
253
254fail:
255 callout_drain(&sc->a10_timeoutc);
256 mtx_destroy(&sc->a10_mtx);
257 bus_teardown_intr(dev, sc->a10_res[A10_MMC_IRQRES], sc->a10_intrhand);
258 bus_release_resources(dev, a10_mmc_res_spec, sc->a10_res);
259
260 return (ENXIO);
261}
262
263static int
264a10_mmc_detach(device_t dev)
265{
266
267 return (EBUSY);
268}
269
270static void
271a10_dma_desc_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
272{
273 struct a10_mmc_softc *sc;
274
275 sc = (struct a10_mmc_softc *)arg;
276 if (err) {
277 sc->a10_dma_map_err = err;
278 return;
279 }
280 sc->a10_dma_desc_phys = segs[0].ds_addr;
281}
282
283static int
284a10_mmc_setup_dma(struct a10_mmc_softc *sc)
285{
286 int dma_desc_size, error;
287
288 /* Allocate the DMA descriptor memory. */
289 dma_desc_size = sizeof(struct a10_mmc_dma_desc) * A10_MMC_DMA_SEGS;
290 error = bus_dma_tag_create(bus_get_dma_tag(sc->a10_dev),
291 A10_MMC_DMA_ALIGN, 0,
292 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
293 dma_desc_size, 1, dma_desc_size, 0, NULL, NULL, &sc->a10_dma_tag);
294 if (error)
295 return (error);
296 error = bus_dmamem_alloc(sc->a10_dma_tag, &sc->a10_dma_desc,
297 BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->a10_dma_map);
298 if (error)
299 return (error);
300
301 error = bus_dmamap_load(sc->a10_dma_tag, sc->a10_dma_map,
302 sc->a10_dma_desc, dma_desc_size, a10_dma_desc_cb, sc, 0);
303 if (error)
304 return (error);
305 if (sc->a10_dma_map_err)
306 return (sc->a10_dma_map_err);
307
308 /* Create the DMA map for data transfers. */
309 error = bus_dma_tag_create(bus_get_dma_tag(sc->a10_dev),
310 A10_MMC_DMA_ALIGN, 0,
311 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
312 A10_MMC_DMA_MAX_SIZE * A10_MMC_DMA_SEGS, A10_MMC_DMA_SEGS,
313 A10_MMC_DMA_MAX_SIZE, BUS_DMA_ALLOCNOW, NULL, NULL,
314 &sc->a10_dma_buf_tag);
315 if (error)
316 return (error);
317 error = bus_dmamap_create(sc->a10_dma_buf_tag, 0,
318 &sc->a10_dma_buf_map);
319 if (error)
320 return (error);
321
322 return (0);
323}
324
325static void
326a10_dma_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
327{
328 int i;
329 struct a10_mmc_dma_desc *dma_desc;
330 struct a10_mmc_softc *sc;
331
332 sc = (struct a10_mmc_softc *)arg;
333 sc->a10_dma_map_err = err;
334
335 if (err)
336 return;
337
338 dma_desc = sc->a10_dma_desc;
339 for (i = 0; i < nsegs; i++) {
340 dma_desc[i].buf_size = segs[i].ds_len;
341 dma_desc[i].buf_addr = segs[i].ds_addr;
342 dma_desc[i].config = A10_MMC_DMA_CONFIG_CH |
343 A10_MMC_DMA_CONFIG_OWN;
344 if (i == 0)
345 dma_desc[i].config |= A10_MMC_DMA_CONFIG_FD;
346 if (i < (nsegs - 1)) {
347 dma_desc[i].config |= A10_MMC_DMA_CONFIG_DIC;
348 dma_desc[i].next = sc->a10_dma_desc_phys +
349 ((i + 1) * sizeof(struct a10_mmc_dma_desc));
350 } else {
351 dma_desc[i].config |= A10_MMC_DMA_CONFIG_LD |
352 A10_MMC_DMA_CONFIG_ER;
353 dma_desc[i].next = 0;
354 }
355 }
356}
357
358static int
359a10_mmc_prepare_dma(struct a10_mmc_softc *sc)
360{
361 bus_dmasync_op_t sync_op;
362 int error;
363 struct mmc_command *cmd;
364 uint32_t val;
365
366 cmd = sc->a10_req->cmd;
367 if (cmd->data->len > A10_MMC_DMA_MAX_SIZE * A10_MMC_DMA_SEGS)
368 return (EFBIG);
369 error = bus_dmamap_load(sc->a10_dma_buf_tag, sc->a10_dma_buf_map,
370 cmd->data->data, cmd->data->len, a10_dma_cb, sc, 0);
371 if (error)
372 return (error);
373 if (sc->a10_dma_map_err)
374 return (sc->a10_dma_map_err);
375
376 if (cmd->data->flags & MMC_DATA_WRITE)
377 sync_op = BUS_DMASYNC_PREWRITE;
378 else
379 sync_op = BUS_DMASYNC_PREREAD;
380 bus_dmamap_sync(sc->a10_dma_buf_tag, sc->a10_dma_buf_map, sync_op);
381 bus_dmamap_sync(sc->a10_dma_tag, sc->a10_dma_map, BUS_DMASYNC_PREWRITE);
382
383 /* Enable DMA */
384 val = A10_MMC_READ_4(sc, A10_MMC_GCTL);
385 val &= ~A10_MMC_CTRL_FIFO_AC_MOD;
386 val |= A10_MMC_CTRL_DMA_ENB;
387 A10_MMC_WRITE_4(sc, A10_MMC_GCTL, val);
388
389 /* Reset DMA */
390 val |= A10_MMC_CTRL_DMA_RST;
391 A10_MMC_WRITE_4(sc, A10_MMC_GCTL, val);
392
393 A10_MMC_WRITE_4(sc, A10_MMC_DMAC, A10_MMC_DMAC_IDMAC_SOFT_RST);
394 A10_MMC_WRITE_4(sc, A10_MMC_DMAC,
395 A10_MMC_DMAC_IDMAC_IDMA_ON | A10_MMC_DMAC_IDMAC_FIX_BURST);
396
397 /* Enable RX or TX DMA interrupt */
398 if (cmd->data->flags & MMC_DATA_WRITE)
399 val |= A10_MMC_IDST_TX_INT;
400 else
401 val |= A10_MMC_IDST_RX_INT;
402 A10_MMC_WRITE_4(sc, A10_MMC_IDIE, val);
403
404 /* Set DMA descritptor list address */
405 A10_MMC_WRITE_4(sc, A10_MMC_DLBA, sc->a10_dma_desc_phys);
406
407 /* FIFO trigger level */
408 A10_MMC_WRITE_4(sc, A10_MMC_FWLR, A10_MMC_DMA_FTRGLEVEL);
409
410 return (0);
411}
412
413static int
414a10_mmc_reset(struct a10_mmc_softc *sc)
415{
416 int timeout;
417
418 A10_MMC_WRITE_4(sc, A10_MMC_GCTL, A10_MMC_RESET);
419 timeout = 1000;
420 while (--timeout > 0) {
421 if ((A10_MMC_READ_4(sc, A10_MMC_GCTL) & A10_MMC_RESET) == 0)
422 break;
423 DELAY(100);
424 }
425 if (timeout == 0)
426 return (ETIMEDOUT);
427
428 /* Set the timeout. */
429 A10_MMC_WRITE_4(sc, A10_MMC_TMOR,
430 A10_MMC_TMOR_DTO_LMT_SHIFT(A10_MMC_TMOR_DTO_LMT_MASK) |
431 A10_MMC_TMOR_RTO_LMT_SHIFT(A10_MMC_TMOR_RTO_LMT_MASK));
432
433 /* Clear pending interrupts. */
434 A10_MMC_WRITE_4(sc, A10_MMC_RISR, 0xffffffff);
435 A10_MMC_WRITE_4(sc, A10_MMC_IDST, 0xffffffff);
436 /* Unmask interrupts. */
437 A10_MMC_WRITE_4(sc, A10_MMC_IMKR,
438 A10_MMC_INT_CMD_DONE | A10_MMC_INT_ERR_BIT |
439 A10_MMC_INT_DATA_OVER | A10_MMC_INT_AUTO_STOP_DONE);
440 /* Enable interrupts and AHB access. */
441 A10_MMC_WRITE_4(sc, A10_MMC_GCTL,
442 A10_MMC_READ_4(sc, A10_MMC_GCTL) | A10_MMC_CTRL_INT_ENB);
443
444 return (0);
445}
446
447static void
448a10_mmc_req_done(struct a10_mmc_softc *sc)
449{
450 struct mmc_command *cmd;
451 struct mmc_request *req;
234 sc->a10_host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340;
235 sc->a10_host.mode = mode_sd;
236 sc->a10_host.caps = MMC_CAP_HSPEED;
237 if (bus_width >= 4)
238 sc->a10_host.caps |= MMC_CAP_4_BIT_DATA;
239 if (bus_width >= 8)
240 sc->a10_host.caps |= MMC_CAP_8_BIT_DATA;
241
242 child = device_add_child(dev, "mmc", -1);
243 if (child == NULL) {
244 device_printf(dev, "attaching MMC bus failed!\n");
245 goto fail;
246 }
247 if (device_probe_and_attach(child) != 0) {
248 device_printf(dev, "attaching MMC child failed!\n");
249 device_delete_child(dev, child);
250 goto fail;
251 }
252
253 return (0);
254
255fail:
256 callout_drain(&sc->a10_timeoutc);
257 mtx_destroy(&sc->a10_mtx);
258 bus_teardown_intr(dev, sc->a10_res[A10_MMC_IRQRES], sc->a10_intrhand);
259 bus_release_resources(dev, a10_mmc_res_spec, sc->a10_res);
260
261 return (ENXIO);
262}
263
264static int
265a10_mmc_detach(device_t dev)
266{
267
268 return (EBUSY);
269}
270
271static void
272a10_dma_desc_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
273{
274 struct a10_mmc_softc *sc;
275
276 sc = (struct a10_mmc_softc *)arg;
277 if (err) {
278 sc->a10_dma_map_err = err;
279 return;
280 }
281 sc->a10_dma_desc_phys = segs[0].ds_addr;
282}
283
284static int
285a10_mmc_setup_dma(struct a10_mmc_softc *sc)
286{
287 int dma_desc_size, error;
288
289 /* Allocate the DMA descriptor memory. */
290 dma_desc_size = sizeof(struct a10_mmc_dma_desc) * A10_MMC_DMA_SEGS;
291 error = bus_dma_tag_create(bus_get_dma_tag(sc->a10_dev),
292 A10_MMC_DMA_ALIGN, 0,
293 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
294 dma_desc_size, 1, dma_desc_size, 0, NULL, NULL, &sc->a10_dma_tag);
295 if (error)
296 return (error);
297 error = bus_dmamem_alloc(sc->a10_dma_tag, &sc->a10_dma_desc,
298 BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->a10_dma_map);
299 if (error)
300 return (error);
301
302 error = bus_dmamap_load(sc->a10_dma_tag, sc->a10_dma_map,
303 sc->a10_dma_desc, dma_desc_size, a10_dma_desc_cb, sc, 0);
304 if (error)
305 return (error);
306 if (sc->a10_dma_map_err)
307 return (sc->a10_dma_map_err);
308
309 /* Create the DMA map for data transfers. */
310 error = bus_dma_tag_create(bus_get_dma_tag(sc->a10_dev),
311 A10_MMC_DMA_ALIGN, 0,
312 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
313 A10_MMC_DMA_MAX_SIZE * A10_MMC_DMA_SEGS, A10_MMC_DMA_SEGS,
314 A10_MMC_DMA_MAX_SIZE, BUS_DMA_ALLOCNOW, NULL, NULL,
315 &sc->a10_dma_buf_tag);
316 if (error)
317 return (error);
318 error = bus_dmamap_create(sc->a10_dma_buf_tag, 0,
319 &sc->a10_dma_buf_map);
320 if (error)
321 return (error);
322
323 return (0);
324}
325
326static void
327a10_dma_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
328{
329 int i;
330 struct a10_mmc_dma_desc *dma_desc;
331 struct a10_mmc_softc *sc;
332
333 sc = (struct a10_mmc_softc *)arg;
334 sc->a10_dma_map_err = err;
335
336 if (err)
337 return;
338
339 dma_desc = sc->a10_dma_desc;
340 for (i = 0; i < nsegs; i++) {
341 dma_desc[i].buf_size = segs[i].ds_len;
342 dma_desc[i].buf_addr = segs[i].ds_addr;
343 dma_desc[i].config = A10_MMC_DMA_CONFIG_CH |
344 A10_MMC_DMA_CONFIG_OWN;
345 if (i == 0)
346 dma_desc[i].config |= A10_MMC_DMA_CONFIG_FD;
347 if (i < (nsegs - 1)) {
348 dma_desc[i].config |= A10_MMC_DMA_CONFIG_DIC;
349 dma_desc[i].next = sc->a10_dma_desc_phys +
350 ((i + 1) * sizeof(struct a10_mmc_dma_desc));
351 } else {
352 dma_desc[i].config |= A10_MMC_DMA_CONFIG_LD |
353 A10_MMC_DMA_CONFIG_ER;
354 dma_desc[i].next = 0;
355 }
356 }
357}
358
359static int
360a10_mmc_prepare_dma(struct a10_mmc_softc *sc)
361{
362 bus_dmasync_op_t sync_op;
363 int error;
364 struct mmc_command *cmd;
365 uint32_t val;
366
367 cmd = sc->a10_req->cmd;
368 if (cmd->data->len > A10_MMC_DMA_MAX_SIZE * A10_MMC_DMA_SEGS)
369 return (EFBIG);
370 error = bus_dmamap_load(sc->a10_dma_buf_tag, sc->a10_dma_buf_map,
371 cmd->data->data, cmd->data->len, a10_dma_cb, sc, 0);
372 if (error)
373 return (error);
374 if (sc->a10_dma_map_err)
375 return (sc->a10_dma_map_err);
376
377 if (cmd->data->flags & MMC_DATA_WRITE)
378 sync_op = BUS_DMASYNC_PREWRITE;
379 else
380 sync_op = BUS_DMASYNC_PREREAD;
381 bus_dmamap_sync(sc->a10_dma_buf_tag, sc->a10_dma_buf_map, sync_op);
382 bus_dmamap_sync(sc->a10_dma_tag, sc->a10_dma_map, BUS_DMASYNC_PREWRITE);
383
384 /* Enable DMA */
385 val = A10_MMC_READ_4(sc, A10_MMC_GCTL);
386 val &= ~A10_MMC_CTRL_FIFO_AC_MOD;
387 val |= A10_MMC_CTRL_DMA_ENB;
388 A10_MMC_WRITE_4(sc, A10_MMC_GCTL, val);
389
390 /* Reset DMA */
391 val |= A10_MMC_CTRL_DMA_RST;
392 A10_MMC_WRITE_4(sc, A10_MMC_GCTL, val);
393
394 A10_MMC_WRITE_4(sc, A10_MMC_DMAC, A10_MMC_DMAC_IDMAC_SOFT_RST);
395 A10_MMC_WRITE_4(sc, A10_MMC_DMAC,
396 A10_MMC_DMAC_IDMAC_IDMA_ON | A10_MMC_DMAC_IDMAC_FIX_BURST);
397
398 /* Enable RX or TX DMA interrupt */
399 if (cmd->data->flags & MMC_DATA_WRITE)
400 val |= A10_MMC_IDST_TX_INT;
401 else
402 val |= A10_MMC_IDST_RX_INT;
403 A10_MMC_WRITE_4(sc, A10_MMC_IDIE, val);
404
405 /* Set DMA descritptor list address */
406 A10_MMC_WRITE_4(sc, A10_MMC_DLBA, sc->a10_dma_desc_phys);
407
408 /* FIFO trigger level */
409 A10_MMC_WRITE_4(sc, A10_MMC_FWLR, A10_MMC_DMA_FTRGLEVEL);
410
411 return (0);
412}
413
414static int
415a10_mmc_reset(struct a10_mmc_softc *sc)
416{
417 int timeout;
418
419 A10_MMC_WRITE_4(sc, A10_MMC_GCTL, A10_MMC_RESET);
420 timeout = 1000;
421 while (--timeout > 0) {
422 if ((A10_MMC_READ_4(sc, A10_MMC_GCTL) & A10_MMC_RESET) == 0)
423 break;
424 DELAY(100);
425 }
426 if (timeout == 0)
427 return (ETIMEDOUT);
428
429 /* Set the timeout. */
430 A10_MMC_WRITE_4(sc, A10_MMC_TMOR,
431 A10_MMC_TMOR_DTO_LMT_SHIFT(A10_MMC_TMOR_DTO_LMT_MASK) |
432 A10_MMC_TMOR_RTO_LMT_SHIFT(A10_MMC_TMOR_RTO_LMT_MASK));
433
434 /* Clear pending interrupts. */
435 A10_MMC_WRITE_4(sc, A10_MMC_RISR, 0xffffffff);
436 A10_MMC_WRITE_4(sc, A10_MMC_IDST, 0xffffffff);
437 /* Unmask interrupts. */
438 A10_MMC_WRITE_4(sc, A10_MMC_IMKR,
439 A10_MMC_INT_CMD_DONE | A10_MMC_INT_ERR_BIT |
440 A10_MMC_INT_DATA_OVER | A10_MMC_INT_AUTO_STOP_DONE);
441 /* Enable interrupts and AHB access. */
442 A10_MMC_WRITE_4(sc, A10_MMC_GCTL,
443 A10_MMC_READ_4(sc, A10_MMC_GCTL) | A10_MMC_CTRL_INT_ENB);
444
445 return (0);
446}
447
448static void
449a10_mmc_req_done(struct a10_mmc_softc *sc)
450{
451 struct mmc_command *cmd;
452 struct mmc_request *req;
453 uint32_t val, mask;
454 int retry;
452
453 cmd = sc->a10_req->cmd;
454 if (cmd->error != MMC_ERR_NONE) {
455
456 cmd = sc->a10_req->cmd;
457 if (cmd->error != MMC_ERR_NONE) {
455 /* Reset the controller. */
456 a10_mmc_reset(sc);
458 /* Reset the FIFO and DMA engines. */
459 mask = A10_MMC_CTRL_FIFO_RST | A10_MMC_CTRL_DMA_RST;
460 val = A10_MMC_READ_4(sc, A10_MMC_GCTL);
461 A10_MMC_WRITE_4(sc, A10_MMC_GCTL, val | mask);
462
463 retry = A10_MMC_RESET_RETRY;
464 while (--retry > 0) {
465 val = A10_MMC_READ_4(sc, A10_MMC_GCTL);
466 if ((val & mask) == 0)
467 break;
468 DELAY(10);
469 }
470 if (retry == 0)
471 device_printf(sc->a10_dev,
472 "timeout resetting DMA/FIFO\n");
473 a10_mmc_update_clock(sc, 1);
457 }
458
459 req = sc->a10_req;
460 callout_stop(&sc->a10_timeoutc);
461 sc->a10_req = NULL;
462 sc->a10_intr = 0;
463 sc->a10_resid = 0;
464 sc->a10_dma_map_err = 0;
465 sc->a10_intr_wait = 0;
466 req->done(req);
467}
468
469static void
470a10_mmc_req_ok(struct a10_mmc_softc *sc)
471{
472 int timeout;
473 struct mmc_command *cmd;
474 uint32_t status;
475
476 timeout = 1000;
477 while (--timeout > 0) {
478 status = A10_MMC_READ_4(sc, A10_MMC_STAR);
479 if ((status & A10_MMC_STAR_CARD_BUSY) == 0)
480 break;
481 DELAY(1000);
482 }
483 cmd = sc->a10_req->cmd;
484 if (timeout == 0) {
485 cmd->error = MMC_ERR_FAILED;
486 a10_mmc_req_done(sc);
487 return;
488 }
489 if (cmd->flags & MMC_RSP_PRESENT) {
490 if (cmd->flags & MMC_RSP_136) {
491 cmd->resp[0] = A10_MMC_READ_4(sc, A10_MMC_RESP3);
492 cmd->resp[1] = A10_MMC_READ_4(sc, A10_MMC_RESP2);
493 cmd->resp[2] = A10_MMC_READ_4(sc, A10_MMC_RESP1);
494 cmd->resp[3] = A10_MMC_READ_4(sc, A10_MMC_RESP0);
495 } else
496 cmd->resp[0] = A10_MMC_READ_4(sc, A10_MMC_RESP0);
497 }
498 /* All data has been transferred ? */
499 if (cmd->data != NULL && (sc->a10_resid << 2) < cmd->data->len)
500 cmd->error = MMC_ERR_FAILED;
501 a10_mmc_req_done(sc);
502}
503
504static void
505a10_mmc_timeout(void *arg)
506{
507 struct a10_mmc_softc *sc;
508
509 sc = (struct a10_mmc_softc *)arg;
510 if (sc->a10_req != NULL) {
511 device_printf(sc->a10_dev, "controller timeout\n");
512 sc->a10_req->cmd->error = MMC_ERR_TIMEOUT;
513 a10_mmc_req_done(sc);
514 } else
515 device_printf(sc->a10_dev,
516 "Spurious timeout - no active request\n");
517}
518
519static void
520a10_mmc_intr(void *arg)
521{
522 bus_dmasync_op_t sync_op;
523 struct a10_mmc_softc *sc;
524 struct mmc_data *data;
525 uint32_t idst, imask, rint;
526
527 sc = (struct a10_mmc_softc *)arg;
528 A10_MMC_LOCK(sc);
529 rint = A10_MMC_READ_4(sc, A10_MMC_RISR);
530 idst = A10_MMC_READ_4(sc, A10_MMC_IDST);
531 imask = A10_MMC_READ_4(sc, A10_MMC_IMKR);
532 if (idst == 0 && imask == 0 && rint == 0) {
533 A10_MMC_UNLOCK(sc);
534 return;
535 }
536#ifdef DEBUG
537 device_printf(sc->a10_dev, "idst: %#x, imask: %#x, rint: %#x\n",
538 idst, imask, rint);
539#endif
540 if (sc->a10_req == NULL) {
541 device_printf(sc->a10_dev,
542 "Spurious interrupt - no active request, rint: 0x%08X\n",
543 rint);
544 goto end;
545 }
546 if (rint & A10_MMC_INT_ERR_BIT) {
547 device_printf(sc->a10_dev, "error rint: 0x%08X\n", rint);
548 if (rint & A10_MMC_INT_RESP_TIMEOUT)
549 sc->a10_req->cmd->error = MMC_ERR_TIMEOUT;
550 else
551 sc->a10_req->cmd->error = MMC_ERR_FAILED;
552 a10_mmc_req_done(sc);
553 goto end;
554 }
555 if (idst & A10_MMC_IDST_ERROR) {
556 device_printf(sc->a10_dev, "error idst: 0x%08x\n", idst);
557 sc->a10_req->cmd->error = MMC_ERR_FAILED;
558 a10_mmc_req_done(sc);
559 goto end;
560 }
561
562 sc->a10_intr |= rint;
563 data = sc->a10_req->cmd->data;
564 if (data != NULL && (idst & A10_MMC_IDST_COMPLETE) != 0) {
565 if (data->flags & MMC_DATA_WRITE)
566 sync_op = BUS_DMASYNC_POSTWRITE;
567 else
568 sync_op = BUS_DMASYNC_POSTREAD;
569 bus_dmamap_sync(sc->a10_dma_buf_tag, sc->a10_dma_buf_map,
570 sync_op);
571 bus_dmamap_sync(sc->a10_dma_tag, sc->a10_dma_map,
572 BUS_DMASYNC_POSTWRITE);
573 bus_dmamap_unload(sc->a10_dma_buf_tag, sc->a10_dma_buf_map);
574 sc->a10_resid = data->len >> 2;
575 }
576 if ((sc->a10_intr & sc->a10_intr_wait) == sc->a10_intr_wait)
577 a10_mmc_req_ok(sc);
578
579end:
580 A10_MMC_WRITE_4(sc, A10_MMC_IDST, idst);
581 A10_MMC_WRITE_4(sc, A10_MMC_RISR, rint);
582 A10_MMC_UNLOCK(sc);
583}
584
585static int
586a10_mmc_request(device_t bus, device_t child, struct mmc_request *req)
587{
588 int blksz;
589 struct a10_mmc_softc *sc;
590 struct mmc_command *cmd;
591 uint32_t cmdreg;
592 int err;
593
594 sc = device_get_softc(bus);
595 A10_MMC_LOCK(sc);
596 if (sc->a10_req) {
597 A10_MMC_UNLOCK(sc);
598 return (EBUSY);
599 }
600 sc->a10_req = req;
601 cmd = req->cmd;
602 cmdreg = A10_MMC_CMDR_LOAD;
603 if (cmd->opcode == MMC_GO_IDLE_STATE)
604 cmdreg |= A10_MMC_CMDR_SEND_INIT_SEQ;
605 if (cmd->flags & MMC_RSP_PRESENT)
606 cmdreg |= A10_MMC_CMDR_RESP_RCV;
607 if (cmd->flags & MMC_RSP_136)
608 cmdreg |= A10_MMC_CMDR_LONG_RESP;
609 if (cmd->flags & MMC_RSP_CRC)
610 cmdreg |= A10_MMC_CMDR_CHK_RESP_CRC;
611
612 sc->a10_intr = 0;
613 sc->a10_resid = 0;
614 sc->a10_intr_wait = A10_MMC_INT_CMD_DONE;
615 cmd->error = MMC_ERR_NONE;
616 if (cmd->data != NULL) {
617 sc->a10_intr_wait |= A10_MMC_INT_DATA_OVER;
618 cmdreg |= A10_MMC_CMDR_DATA_TRANS | A10_MMC_CMDR_WAIT_PRE_OVER;
619 if (cmd->data->flags & MMC_DATA_MULTI) {
620 cmdreg |= A10_MMC_CMDR_STOP_CMD_FLAG;
621 sc->a10_intr_wait |= A10_MMC_INT_AUTO_STOP_DONE;
622 }
623 if (cmd->data->flags & MMC_DATA_WRITE)
624 cmdreg |= A10_MMC_CMDR_DIR_WRITE;
625 blksz = min(cmd->data->len, MMC_SECTOR_SIZE);
626 A10_MMC_WRITE_4(sc, A10_MMC_BKSR, blksz);
627 A10_MMC_WRITE_4(sc, A10_MMC_BYCR, cmd->data->len);
628
629 err = a10_mmc_prepare_dma(sc);
630 if (err != 0)
631 device_printf(sc->a10_dev, "prepare_dma failed: %d\n", err);
632 }
633
634 A10_MMC_WRITE_4(sc, A10_MMC_CAGR, cmd->arg);
635 A10_MMC_WRITE_4(sc, A10_MMC_CMDR, cmdreg | cmd->opcode);
636 callout_reset(&sc->a10_timeoutc, sc->a10_timeout * hz,
637 a10_mmc_timeout, sc);
638 A10_MMC_UNLOCK(sc);
639
640 return (0);
641}
642
643static int
644a10_mmc_read_ivar(device_t bus, device_t child, int which,
645 uintptr_t *result)
646{
647 struct a10_mmc_softc *sc;
648
649 sc = device_get_softc(bus);
650 switch (which) {
651 default:
652 return (EINVAL);
653 case MMCBR_IVAR_BUS_MODE:
654 *(int *)result = sc->a10_host.ios.bus_mode;
655 break;
656 case MMCBR_IVAR_BUS_WIDTH:
657 *(int *)result = sc->a10_host.ios.bus_width;
658 break;
659 case MMCBR_IVAR_CHIP_SELECT:
660 *(int *)result = sc->a10_host.ios.chip_select;
661 break;
662 case MMCBR_IVAR_CLOCK:
663 *(int *)result = sc->a10_host.ios.clock;
664 break;
665 case MMCBR_IVAR_F_MIN:
666 *(int *)result = sc->a10_host.f_min;
667 break;
668 case MMCBR_IVAR_F_MAX:
669 *(int *)result = sc->a10_host.f_max;
670 break;
671 case MMCBR_IVAR_HOST_OCR:
672 *(int *)result = sc->a10_host.host_ocr;
673 break;
674 case MMCBR_IVAR_MODE:
675 *(int *)result = sc->a10_host.mode;
676 break;
677 case MMCBR_IVAR_OCR:
678 *(int *)result = sc->a10_host.ocr;
679 break;
680 case MMCBR_IVAR_POWER_MODE:
681 *(int *)result = sc->a10_host.ios.power_mode;
682 break;
683 case MMCBR_IVAR_VDD:
684 *(int *)result = sc->a10_host.ios.vdd;
685 break;
686 case MMCBR_IVAR_CAPS:
687 *(int *)result = sc->a10_host.caps;
688 break;
689 case MMCBR_IVAR_MAX_DATA:
690 *(int *)result = 65535;
691 break;
692 }
693
694 return (0);
695}
696
697static int
698a10_mmc_write_ivar(device_t bus, device_t child, int which,
699 uintptr_t value)
700{
701 struct a10_mmc_softc *sc;
702
703 sc = device_get_softc(bus);
704 switch (which) {
705 default:
706 return (EINVAL);
707 case MMCBR_IVAR_BUS_MODE:
708 sc->a10_host.ios.bus_mode = value;
709 break;
710 case MMCBR_IVAR_BUS_WIDTH:
711 sc->a10_host.ios.bus_width = value;
712 break;
713 case MMCBR_IVAR_CHIP_SELECT:
714 sc->a10_host.ios.chip_select = value;
715 break;
716 case MMCBR_IVAR_CLOCK:
717 sc->a10_host.ios.clock = value;
718 break;
719 case MMCBR_IVAR_MODE:
720 sc->a10_host.mode = value;
721 break;
722 case MMCBR_IVAR_OCR:
723 sc->a10_host.ocr = value;
724 break;
725 case MMCBR_IVAR_POWER_MODE:
726 sc->a10_host.ios.power_mode = value;
727 break;
728 case MMCBR_IVAR_VDD:
729 sc->a10_host.ios.vdd = value;
730 break;
731 /* These are read-only */
732 case MMCBR_IVAR_CAPS:
733 case MMCBR_IVAR_HOST_OCR:
734 case MMCBR_IVAR_F_MIN:
735 case MMCBR_IVAR_F_MAX:
736 case MMCBR_IVAR_MAX_DATA:
737 return (EINVAL);
738 }
739
740 return (0);
741}
742
743static int
744a10_mmc_update_clock(struct a10_mmc_softc *sc, uint32_t clkon)
745{
746 uint32_t cmdreg;
747 int retry;
748 uint32_t ckcr;
749
750 ckcr = A10_MMC_READ_4(sc, A10_MMC_CKCR);
751 ckcr &= ~(A10_MMC_CKCR_CCLK_ENB | A10_MMC_CKCR_CCLK_CTRL);
752
753 if (clkon)
754 ckcr |= A10_MMC_CKCR_CCLK_ENB;
755
756 A10_MMC_WRITE_4(sc, A10_MMC_CKCR, ckcr);
757
758 cmdreg = A10_MMC_CMDR_LOAD | A10_MMC_CMDR_PRG_CLK |
759 A10_MMC_CMDR_WAIT_PRE_OVER;
760 A10_MMC_WRITE_4(sc, A10_MMC_CMDR, cmdreg);
761 retry = 0xfffff;
762 while (--retry > 0) {
763 if ((A10_MMC_READ_4(sc, A10_MMC_CMDR) & A10_MMC_CMDR_LOAD) == 0) {
764 A10_MMC_WRITE_4(sc, A10_MMC_RISR, 0xffffffff);
765 return (0);
766 }
767 DELAY(10);
768 }
769 A10_MMC_WRITE_4(sc, A10_MMC_RISR, 0xffffffff);
770 device_printf(sc->a10_dev, "timeout updating clock\n");
771
772 return (ETIMEDOUT);
773}
774
775static int
776a10_mmc_update_ios(device_t bus, device_t child)
777{
778 int error;
779 struct a10_mmc_softc *sc;
780 struct mmc_ios *ios;
781 uint32_t ckcr;
782
783 sc = device_get_softc(bus);
784
785 ios = &sc->a10_host.ios;
786
787 /* Set the bus width. */
788 switch (ios->bus_width) {
789 case bus_width_1:
790 A10_MMC_WRITE_4(sc, A10_MMC_BWDR, A10_MMC_BWDR1);
791 break;
792 case bus_width_4:
793 A10_MMC_WRITE_4(sc, A10_MMC_BWDR, A10_MMC_BWDR4);
794 break;
795 case bus_width_8:
796 A10_MMC_WRITE_4(sc, A10_MMC_BWDR, A10_MMC_BWDR8);
797 break;
798 }
799
800 if (ios->clock) {
801
802 /* Disable clock */
803 error = a10_mmc_update_clock(sc, 0);
804 if (error != 0)
805 return (error);
806
807 /* Reset the divider. */
808 ckcr = A10_MMC_READ_4(sc, A10_MMC_CKCR);
809 ckcr &= ~A10_MMC_CKCR_CCLK_DIV;
810 A10_MMC_WRITE_4(sc, A10_MMC_CKCR, ckcr);
811
812 /* Set the MMC clock. */
813 error = clk_set_freq(sc->a10_clk_mmc, ios->clock,
814 CLK_SET_ROUND_DOWN);
815 if (error != 0) {
816 device_printf(sc->a10_dev,
817 "failed to set frequency to %u Hz: %d\n",
818 ios->clock, error);
819 return (error);
820 }
821
822 /* Enable clock. */
823 error = a10_mmc_update_clock(sc, 1);
824 if (error != 0)
825 return (error);
826 }
827
828
829 return (0);
830}
831
832static int
833a10_mmc_get_ro(device_t bus, device_t child)
834{
835
836 return (0);
837}
838
839static int
840a10_mmc_acquire_host(device_t bus, device_t child)
841{
842 struct a10_mmc_softc *sc;
843 int error;
844
845 sc = device_get_softc(bus);
846 A10_MMC_LOCK(sc);
847 while (sc->a10_bus_busy) {
848 error = msleep(sc, &sc->a10_mtx, PCATCH, "mmchw", 0);
849 if (error != 0) {
850 A10_MMC_UNLOCK(sc);
851 return (error);
852 }
853 }
854 sc->a10_bus_busy++;
855 A10_MMC_UNLOCK(sc);
856
857 return (0);
858}
859
860static int
861a10_mmc_release_host(device_t bus, device_t child)
862{
863 struct a10_mmc_softc *sc;
864
865 sc = device_get_softc(bus);
866 A10_MMC_LOCK(sc);
867 sc->a10_bus_busy--;
868 wakeup(sc);
869 A10_MMC_UNLOCK(sc);
870
871 return (0);
872}
873
874static device_method_t a10_mmc_methods[] = {
875 /* Device interface */
876 DEVMETHOD(device_probe, a10_mmc_probe),
877 DEVMETHOD(device_attach, a10_mmc_attach),
878 DEVMETHOD(device_detach, a10_mmc_detach),
879
880 /* Bus interface */
881 DEVMETHOD(bus_read_ivar, a10_mmc_read_ivar),
882 DEVMETHOD(bus_write_ivar, a10_mmc_write_ivar),
883 DEVMETHOD(bus_print_child, bus_generic_print_child),
884
885 /* MMC bridge interface */
886 DEVMETHOD(mmcbr_update_ios, a10_mmc_update_ios),
887 DEVMETHOD(mmcbr_request, a10_mmc_request),
888 DEVMETHOD(mmcbr_get_ro, a10_mmc_get_ro),
889 DEVMETHOD(mmcbr_acquire_host, a10_mmc_acquire_host),
890 DEVMETHOD(mmcbr_release_host, a10_mmc_release_host),
891
892 DEVMETHOD_END
893};
894
895static devclass_t a10_mmc_devclass;
896
897static driver_t a10_mmc_driver = {
898 "a10_mmc",
899 a10_mmc_methods,
900 sizeof(struct a10_mmc_softc),
901};
902
903DRIVER_MODULE(a10_mmc, simplebus, a10_mmc_driver, a10_mmc_devclass, 0, 0);
904DRIVER_MODULE(mmc, a10_mmc, mmc_driver, mmc_devclass, NULL, NULL);
905MODULE_DEPEND(a10_mmc, mmc, 1, 1, 1);
474 }
475
476 req = sc->a10_req;
477 callout_stop(&sc->a10_timeoutc);
478 sc->a10_req = NULL;
479 sc->a10_intr = 0;
480 sc->a10_resid = 0;
481 sc->a10_dma_map_err = 0;
482 sc->a10_intr_wait = 0;
483 req->done(req);
484}
485
486static void
487a10_mmc_req_ok(struct a10_mmc_softc *sc)
488{
489 int timeout;
490 struct mmc_command *cmd;
491 uint32_t status;
492
493 timeout = 1000;
494 while (--timeout > 0) {
495 status = A10_MMC_READ_4(sc, A10_MMC_STAR);
496 if ((status & A10_MMC_STAR_CARD_BUSY) == 0)
497 break;
498 DELAY(1000);
499 }
500 cmd = sc->a10_req->cmd;
501 if (timeout == 0) {
502 cmd->error = MMC_ERR_FAILED;
503 a10_mmc_req_done(sc);
504 return;
505 }
506 if (cmd->flags & MMC_RSP_PRESENT) {
507 if (cmd->flags & MMC_RSP_136) {
508 cmd->resp[0] = A10_MMC_READ_4(sc, A10_MMC_RESP3);
509 cmd->resp[1] = A10_MMC_READ_4(sc, A10_MMC_RESP2);
510 cmd->resp[2] = A10_MMC_READ_4(sc, A10_MMC_RESP1);
511 cmd->resp[3] = A10_MMC_READ_4(sc, A10_MMC_RESP0);
512 } else
513 cmd->resp[0] = A10_MMC_READ_4(sc, A10_MMC_RESP0);
514 }
515 /* All data has been transferred ? */
516 if (cmd->data != NULL && (sc->a10_resid << 2) < cmd->data->len)
517 cmd->error = MMC_ERR_FAILED;
518 a10_mmc_req_done(sc);
519}
520
521static void
522a10_mmc_timeout(void *arg)
523{
524 struct a10_mmc_softc *sc;
525
526 sc = (struct a10_mmc_softc *)arg;
527 if (sc->a10_req != NULL) {
528 device_printf(sc->a10_dev, "controller timeout\n");
529 sc->a10_req->cmd->error = MMC_ERR_TIMEOUT;
530 a10_mmc_req_done(sc);
531 } else
532 device_printf(sc->a10_dev,
533 "Spurious timeout - no active request\n");
534}
535
536static void
537a10_mmc_intr(void *arg)
538{
539 bus_dmasync_op_t sync_op;
540 struct a10_mmc_softc *sc;
541 struct mmc_data *data;
542 uint32_t idst, imask, rint;
543
544 sc = (struct a10_mmc_softc *)arg;
545 A10_MMC_LOCK(sc);
546 rint = A10_MMC_READ_4(sc, A10_MMC_RISR);
547 idst = A10_MMC_READ_4(sc, A10_MMC_IDST);
548 imask = A10_MMC_READ_4(sc, A10_MMC_IMKR);
549 if (idst == 0 && imask == 0 && rint == 0) {
550 A10_MMC_UNLOCK(sc);
551 return;
552 }
553#ifdef DEBUG
554 device_printf(sc->a10_dev, "idst: %#x, imask: %#x, rint: %#x\n",
555 idst, imask, rint);
556#endif
557 if (sc->a10_req == NULL) {
558 device_printf(sc->a10_dev,
559 "Spurious interrupt - no active request, rint: 0x%08X\n",
560 rint);
561 goto end;
562 }
563 if (rint & A10_MMC_INT_ERR_BIT) {
564 device_printf(sc->a10_dev, "error rint: 0x%08X\n", rint);
565 if (rint & A10_MMC_INT_RESP_TIMEOUT)
566 sc->a10_req->cmd->error = MMC_ERR_TIMEOUT;
567 else
568 sc->a10_req->cmd->error = MMC_ERR_FAILED;
569 a10_mmc_req_done(sc);
570 goto end;
571 }
572 if (idst & A10_MMC_IDST_ERROR) {
573 device_printf(sc->a10_dev, "error idst: 0x%08x\n", idst);
574 sc->a10_req->cmd->error = MMC_ERR_FAILED;
575 a10_mmc_req_done(sc);
576 goto end;
577 }
578
579 sc->a10_intr |= rint;
580 data = sc->a10_req->cmd->data;
581 if (data != NULL && (idst & A10_MMC_IDST_COMPLETE) != 0) {
582 if (data->flags & MMC_DATA_WRITE)
583 sync_op = BUS_DMASYNC_POSTWRITE;
584 else
585 sync_op = BUS_DMASYNC_POSTREAD;
586 bus_dmamap_sync(sc->a10_dma_buf_tag, sc->a10_dma_buf_map,
587 sync_op);
588 bus_dmamap_sync(sc->a10_dma_tag, sc->a10_dma_map,
589 BUS_DMASYNC_POSTWRITE);
590 bus_dmamap_unload(sc->a10_dma_buf_tag, sc->a10_dma_buf_map);
591 sc->a10_resid = data->len >> 2;
592 }
593 if ((sc->a10_intr & sc->a10_intr_wait) == sc->a10_intr_wait)
594 a10_mmc_req_ok(sc);
595
596end:
597 A10_MMC_WRITE_4(sc, A10_MMC_IDST, idst);
598 A10_MMC_WRITE_4(sc, A10_MMC_RISR, rint);
599 A10_MMC_UNLOCK(sc);
600}
601
602static int
603a10_mmc_request(device_t bus, device_t child, struct mmc_request *req)
604{
605 int blksz;
606 struct a10_mmc_softc *sc;
607 struct mmc_command *cmd;
608 uint32_t cmdreg;
609 int err;
610
611 sc = device_get_softc(bus);
612 A10_MMC_LOCK(sc);
613 if (sc->a10_req) {
614 A10_MMC_UNLOCK(sc);
615 return (EBUSY);
616 }
617 sc->a10_req = req;
618 cmd = req->cmd;
619 cmdreg = A10_MMC_CMDR_LOAD;
620 if (cmd->opcode == MMC_GO_IDLE_STATE)
621 cmdreg |= A10_MMC_CMDR_SEND_INIT_SEQ;
622 if (cmd->flags & MMC_RSP_PRESENT)
623 cmdreg |= A10_MMC_CMDR_RESP_RCV;
624 if (cmd->flags & MMC_RSP_136)
625 cmdreg |= A10_MMC_CMDR_LONG_RESP;
626 if (cmd->flags & MMC_RSP_CRC)
627 cmdreg |= A10_MMC_CMDR_CHK_RESP_CRC;
628
629 sc->a10_intr = 0;
630 sc->a10_resid = 0;
631 sc->a10_intr_wait = A10_MMC_INT_CMD_DONE;
632 cmd->error = MMC_ERR_NONE;
633 if (cmd->data != NULL) {
634 sc->a10_intr_wait |= A10_MMC_INT_DATA_OVER;
635 cmdreg |= A10_MMC_CMDR_DATA_TRANS | A10_MMC_CMDR_WAIT_PRE_OVER;
636 if (cmd->data->flags & MMC_DATA_MULTI) {
637 cmdreg |= A10_MMC_CMDR_STOP_CMD_FLAG;
638 sc->a10_intr_wait |= A10_MMC_INT_AUTO_STOP_DONE;
639 }
640 if (cmd->data->flags & MMC_DATA_WRITE)
641 cmdreg |= A10_MMC_CMDR_DIR_WRITE;
642 blksz = min(cmd->data->len, MMC_SECTOR_SIZE);
643 A10_MMC_WRITE_4(sc, A10_MMC_BKSR, blksz);
644 A10_MMC_WRITE_4(sc, A10_MMC_BYCR, cmd->data->len);
645
646 err = a10_mmc_prepare_dma(sc);
647 if (err != 0)
648 device_printf(sc->a10_dev, "prepare_dma failed: %d\n", err);
649 }
650
651 A10_MMC_WRITE_4(sc, A10_MMC_CAGR, cmd->arg);
652 A10_MMC_WRITE_4(sc, A10_MMC_CMDR, cmdreg | cmd->opcode);
653 callout_reset(&sc->a10_timeoutc, sc->a10_timeout * hz,
654 a10_mmc_timeout, sc);
655 A10_MMC_UNLOCK(sc);
656
657 return (0);
658}
659
660static int
661a10_mmc_read_ivar(device_t bus, device_t child, int which,
662 uintptr_t *result)
663{
664 struct a10_mmc_softc *sc;
665
666 sc = device_get_softc(bus);
667 switch (which) {
668 default:
669 return (EINVAL);
670 case MMCBR_IVAR_BUS_MODE:
671 *(int *)result = sc->a10_host.ios.bus_mode;
672 break;
673 case MMCBR_IVAR_BUS_WIDTH:
674 *(int *)result = sc->a10_host.ios.bus_width;
675 break;
676 case MMCBR_IVAR_CHIP_SELECT:
677 *(int *)result = sc->a10_host.ios.chip_select;
678 break;
679 case MMCBR_IVAR_CLOCK:
680 *(int *)result = sc->a10_host.ios.clock;
681 break;
682 case MMCBR_IVAR_F_MIN:
683 *(int *)result = sc->a10_host.f_min;
684 break;
685 case MMCBR_IVAR_F_MAX:
686 *(int *)result = sc->a10_host.f_max;
687 break;
688 case MMCBR_IVAR_HOST_OCR:
689 *(int *)result = sc->a10_host.host_ocr;
690 break;
691 case MMCBR_IVAR_MODE:
692 *(int *)result = sc->a10_host.mode;
693 break;
694 case MMCBR_IVAR_OCR:
695 *(int *)result = sc->a10_host.ocr;
696 break;
697 case MMCBR_IVAR_POWER_MODE:
698 *(int *)result = sc->a10_host.ios.power_mode;
699 break;
700 case MMCBR_IVAR_VDD:
701 *(int *)result = sc->a10_host.ios.vdd;
702 break;
703 case MMCBR_IVAR_CAPS:
704 *(int *)result = sc->a10_host.caps;
705 break;
706 case MMCBR_IVAR_MAX_DATA:
707 *(int *)result = 65535;
708 break;
709 }
710
711 return (0);
712}
713
714static int
715a10_mmc_write_ivar(device_t bus, device_t child, int which,
716 uintptr_t value)
717{
718 struct a10_mmc_softc *sc;
719
720 sc = device_get_softc(bus);
721 switch (which) {
722 default:
723 return (EINVAL);
724 case MMCBR_IVAR_BUS_MODE:
725 sc->a10_host.ios.bus_mode = value;
726 break;
727 case MMCBR_IVAR_BUS_WIDTH:
728 sc->a10_host.ios.bus_width = value;
729 break;
730 case MMCBR_IVAR_CHIP_SELECT:
731 sc->a10_host.ios.chip_select = value;
732 break;
733 case MMCBR_IVAR_CLOCK:
734 sc->a10_host.ios.clock = value;
735 break;
736 case MMCBR_IVAR_MODE:
737 sc->a10_host.mode = value;
738 break;
739 case MMCBR_IVAR_OCR:
740 sc->a10_host.ocr = value;
741 break;
742 case MMCBR_IVAR_POWER_MODE:
743 sc->a10_host.ios.power_mode = value;
744 break;
745 case MMCBR_IVAR_VDD:
746 sc->a10_host.ios.vdd = value;
747 break;
748 /* These are read-only */
749 case MMCBR_IVAR_CAPS:
750 case MMCBR_IVAR_HOST_OCR:
751 case MMCBR_IVAR_F_MIN:
752 case MMCBR_IVAR_F_MAX:
753 case MMCBR_IVAR_MAX_DATA:
754 return (EINVAL);
755 }
756
757 return (0);
758}
759
760static int
761a10_mmc_update_clock(struct a10_mmc_softc *sc, uint32_t clkon)
762{
763 uint32_t cmdreg;
764 int retry;
765 uint32_t ckcr;
766
767 ckcr = A10_MMC_READ_4(sc, A10_MMC_CKCR);
768 ckcr &= ~(A10_MMC_CKCR_CCLK_ENB | A10_MMC_CKCR_CCLK_CTRL);
769
770 if (clkon)
771 ckcr |= A10_MMC_CKCR_CCLK_ENB;
772
773 A10_MMC_WRITE_4(sc, A10_MMC_CKCR, ckcr);
774
775 cmdreg = A10_MMC_CMDR_LOAD | A10_MMC_CMDR_PRG_CLK |
776 A10_MMC_CMDR_WAIT_PRE_OVER;
777 A10_MMC_WRITE_4(sc, A10_MMC_CMDR, cmdreg);
778 retry = 0xfffff;
779 while (--retry > 0) {
780 if ((A10_MMC_READ_4(sc, A10_MMC_CMDR) & A10_MMC_CMDR_LOAD) == 0) {
781 A10_MMC_WRITE_4(sc, A10_MMC_RISR, 0xffffffff);
782 return (0);
783 }
784 DELAY(10);
785 }
786 A10_MMC_WRITE_4(sc, A10_MMC_RISR, 0xffffffff);
787 device_printf(sc->a10_dev, "timeout updating clock\n");
788
789 return (ETIMEDOUT);
790}
791
792static int
793a10_mmc_update_ios(device_t bus, device_t child)
794{
795 int error;
796 struct a10_mmc_softc *sc;
797 struct mmc_ios *ios;
798 uint32_t ckcr;
799
800 sc = device_get_softc(bus);
801
802 ios = &sc->a10_host.ios;
803
804 /* Set the bus width. */
805 switch (ios->bus_width) {
806 case bus_width_1:
807 A10_MMC_WRITE_4(sc, A10_MMC_BWDR, A10_MMC_BWDR1);
808 break;
809 case bus_width_4:
810 A10_MMC_WRITE_4(sc, A10_MMC_BWDR, A10_MMC_BWDR4);
811 break;
812 case bus_width_8:
813 A10_MMC_WRITE_4(sc, A10_MMC_BWDR, A10_MMC_BWDR8);
814 break;
815 }
816
817 if (ios->clock) {
818
819 /* Disable clock */
820 error = a10_mmc_update_clock(sc, 0);
821 if (error != 0)
822 return (error);
823
824 /* Reset the divider. */
825 ckcr = A10_MMC_READ_4(sc, A10_MMC_CKCR);
826 ckcr &= ~A10_MMC_CKCR_CCLK_DIV;
827 A10_MMC_WRITE_4(sc, A10_MMC_CKCR, ckcr);
828
829 /* Set the MMC clock. */
830 error = clk_set_freq(sc->a10_clk_mmc, ios->clock,
831 CLK_SET_ROUND_DOWN);
832 if (error != 0) {
833 device_printf(sc->a10_dev,
834 "failed to set frequency to %u Hz: %d\n",
835 ios->clock, error);
836 return (error);
837 }
838
839 /* Enable clock. */
840 error = a10_mmc_update_clock(sc, 1);
841 if (error != 0)
842 return (error);
843 }
844
845
846 return (0);
847}
848
849static int
850a10_mmc_get_ro(device_t bus, device_t child)
851{
852
853 return (0);
854}
855
856static int
857a10_mmc_acquire_host(device_t bus, device_t child)
858{
859 struct a10_mmc_softc *sc;
860 int error;
861
862 sc = device_get_softc(bus);
863 A10_MMC_LOCK(sc);
864 while (sc->a10_bus_busy) {
865 error = msleep(sc, &sc->a10_mtx, PCATCH, "mmchw", 0);
866 if (error != 0) {
867 A10_MMC_UNLOCK(sc);
868 return (error);
869 }
870 }
871 sc->a10_bus_busy++;
872 A10_MMC_UNLOCK(sc);
873
874 return (0);
875}
876
877static int
878a10_mmc_release_host(device_t bus, device_t child)
879{
880 struct a10_mmc_softc *sc;
881
882 sc = device_get_softc(bus);
883 A10_MMC_LOCK(sc);
884 sc->a10_bus_busy--;
885 wakeup(sc);
886 A10_MMC_UNLOCK(sc);
887
888 return (0);
889}
890
891static device_method_t a10_mmc_methods[] = {
892 /* Device interface */
893 DEVMETHOD(device_probe, a10_mmc_probe),
894 DEVMETHOD(device_attach, a10_mmc_attach),
895 DEVMETHOD(device_detach, a10_mmc_detach),
896
897 /* Bus interface */
898 DEVMETHOD(bus_read_ivar, a10_mmc_read_ivar),
899 DEVMETHOD(bus_write_ivar, a10_mmc_write_ivar),
900 DEVMETHOD(bus_print_child, bus_generic_print_child),
901
902 /* MMC bridge interface */
903 DEVMETHOD(mmcbr_update_ios, a10_mmc_update_ios),
904 DEVMETHOD(mmcbr_request, a10_mmc_request),
905 DEVMETHOD(mmcbr_get_ro, a10_mmc_get_ro),
906 DEVMETHOD(mmcbr_acquire_host, a10_mmc_acquire_host),
907 DEVMETHOD(mmcbr_release_host, a10_mmc_release_host),
908
909 DEVMETHOD_END
910};
911
912static devclass_t a10_mmc_devclass;
913
914static driver_t a10_mmc_driver = {
915 "a10_mmc",
916 a10_mmc_methods,
917 sizeof(struct a10_mmc_softc),
918};
919
920DRIVER_MODULE(a10_mmc, simplebus, a10_mmc_driver, a10_mmc_devclass, 0, 0);
921DRIVER_MODULE(mmc, a10_mmc, mmc_driver, mmc_devclass, NULL, NULL);
922MODULE_DEPEND(a10_mmc, mmc, 1, 1, 1);