1/*- 2 * Copyright (c) 2006 Bernd Walter. All rights reserved. 3 * Copyright (c) 2006 M. Warner Losh. All rights reserved. 4 * Copyright (c) 2010 Greg Ansley. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28#include <sys/cdefs.h> 29__FBSDID("$FreeBSD$"); 30 31#include <sys/param.h> 32#include <sys/systm.h> 33#include <sys/bio.h> 34#include <sys/bus.h> 35#include <sys/conf.h> 36#include <sys/endian.h> 37#include <sys/kernel.h> 38#include <sys/kthread.h> 39#include <sys/lock.h> 40#include <sys/malloc.h> 41#include <sys/module.h> 42#include <sys/mutex.h> 43#include <sys/queue.h> 44#include <sys/resource.h> 45#include <sys/rman.h> 46#include <sys/sysctl.h> 47#include <sys/time.h> 48#include <sys/timetc.h> 49#include <sys/watchdog.h> 50 51#include <machine/bus.h> 52#include <machine/cpu.h> 53#include <machine/cpufunc.h> 54#include <machine/resource.h> 55#include <machine/frame.h> 56#include <machine/intr.h> 57 58#include <arm/at91/at91var.h> 59#include <arm/at91/at91_mcireg.h> 60#include <arm/at91/at91_pdcreg.h> 61 62#include <dev/mmc/bridge.h> 63#include <dev/mmc/mmcreg.h> 64#include <dev/mmc/mmcbrvar.h> 65 66#include "mmcbr_if.h" 67 68#include "opt_at91.h" 69 70#define BBSZ 512 71 72struct at91_mci_softc { 73 void *intrhand; /* Interrupt handle */ 74 device_t dev; 75 int sc_cap; 76#define CAP_HAS_4WIRE 1 /* Has 4 wire bus */ 77#define CAP_NEEDS_BYTESWAP 2 /* broken hardware needing bounce */ 78 int flags; 79 int has_4wire; 80#define CMD_STARTED 1 81#define STOP_STARTED 2 82 struct resource *irq_res; /* IRQ resource */ 83 struct resource *mem_res; /* Memory resource */ 84 struct mtx sc_mtx; 85 bus_dma_tag_t dmatag; 86 bus_dmamap_t map; 87 int mapped; 88 struct mmc_host host; 89 int bus_busy; 90 struct mmc_request *req; 91 struct mmc_command *curcmd; 92 char bounce_buffer[BBSZ]; 93}; 94 95static inline uint32_t 96RD4(struct at91_mci_softc *sc, bus_size_t off) 97{ 98 return (bus_read_4(sc->mem_res, off)); 99} 100 101static inline void 102WR4(struct at91_mci_softc *sc, bus_size_t off, uint32_t val) 103{ 104 bus_write_4(sc->mem_res, off, val); 105} 106 107/* bus entry points */ 108static int at91_mci_probe(device_t dev); 109static int at91_mci_attach(device_t dev); 110static int at91_mci_detach(device_t dev); 111static void at91_mci_intr(void *); 112 113/* helper routines */ 114static int at91_mci_activate(device_t dev); 115static void at91_mci_deactivate(device_t dev); 116static int at91_mci_is_mci1rev2xx(void); 117 118#define AT91_MCI_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) 119#define AT91_MCI_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) 120#define AT91_MCI_LOCK_INIT(_sc) \ 121 mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \ 122 "mci", MTX_DEF) 123#define AT91_MCI_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx); 124#define AT91_MCI_ASSERT_LOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_OWNED); 125#define AT91_MCI_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED); 126 127static void 128at91_mci_pdc_disable(struct at91_mci_softc *sc) 129{ 130 WR4(sc, PDC_PTCR, PDC_PTCR_TXTDIS | PDC_PTCR_RXTDIS); 131 WR4(sc, PDC_RPR, 0); 132 WR4(sc, PDC_RCR, 0); 133 WR4(sc, PDC_RNPR, 0); 134 WR4(sc, PDC_RNCR, 0); 135 WR4(sc, PDC_TPR, 0); 136 WR4(sc, PDC_TCR, 0); 137 WR4(sc, PDC_TNPR, 0); 138 WR4(sc, PDC_TNCR, 0); 139} 140 141static void 142at91_mci_init(device_t dev) 143{ 144 struct at91_mci_softc *sc = device_get_softc(dev); 145 uint32_t val; 146 147 WR4(sc, MCI_CR, MCI_CR_MCIEN); /* Enable controller */ 148 WR4(sc, MCI_IDR, 0xffffffff); /* Turn off interrupts */ 149 WR4(sc, MCI_DTOR, MCI_DTOR_DTOMUL_1M | 1); 150 val = MCI_MR_PDCMODE; 151 val |= 0x34a; /* PWSDIV = 3; CLKDIV = 74 */ 152 if (at91_mci_is_mci1rev2xx()) 153 val |= MCI_MR_RDPROOF | MCI_MR_WRPROOF; 154 WR4(sc, MCI_MR, val); 155#ifndef AT91_MCI_SLOT_B 156 WR4(sc, MCI_SDCR, 0); /* SLOT A, 1 bit bus */ 157#else 158 /* XXX Really should add second "unit" but nobody using using 159 * a two slot card that we know of. XXX */ 160 WR4(sc, MCI_SDCR, 1); /* SLOT B, 1 bit bus */ 161#endif 162} 163 164static void 165at91_mci_fini(device_t dev) 166{ 167 struct at91_mci_softc *sc = device_get_softc(dev); 168 169 WR4(sc, MCI_IDR, 0xffffffff); /* Turn off interrupts */ 170 at91_mci_pdc_disable(sc); 171 WR4(sc, MCI_CR, MCI_CR_MCIDIS | MCI_CR_SWRST); /* Put the device into reset */ 172} 173 174static int 175at91_mci_probe(device_t dev) 176{ 177 178 device_set_desc(dev, "MCI mmc/sd host bridge"); 179 return (0); 180} 181 182static int 183at91_mci_attach(device_t dev) 184{ 185 struct at91_mci_softc *sc = device_get_softc(dev); 186 struct sysctl_ctx_list *sctx; 187 struct sysctl_oid *soid; 188 device_t child; 189 int err; 190 191 sc->dev = dev; 192 193 sc->sc_cap = 0; 194 if (at91_is_rm92()) 195 sc->sc_cap |= CAP_NEEDS_BYTESWAP; 196 err = at91_mci_activate(dev); 197 if (err) 198 goto out; 199 200 AT91_MCI_LOCK_INIT(sc); 201 202 /* 203 * Allocate DMA tags and maps 204 */ 205 err = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, 206 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MAXPHYS, 1, 207 MAXPHYS, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->dmatag); 208 if (err != 0) 209 goto out; 210 211 err = bus_dmamap_create(sc->dmatag, 0, &sc->map); 212 if (err != 0) 213 goto out; 214 215 at91_mci_fini(dev); 216 at91_mci_init(dev); 217 218 /* 219 * Activate the interrupt 220 */ 221 err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE, 222 NULL, at91_mci_intr, sc, &sc->intrhand); 223 if (err) { 224 AT91_MCI_LOCK_DESTROY(sc); 225 goto out; 226 } 227 228 sctx = device_get_sysctl_ctx(dev); 229 soid = device_get_sysctl_tree(dev); 230 SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "4wire", 231 CTLFLAG_RW, &sc->has_4wire, 0, "has 4 wire SD Card bus"); 232 233#ifdef AT91_MCI_HAS_4WIRE 234 sc->has_4wire = 1; 235#endif 236 if (sc->has_4wire) 237 sc->sc_cap |= CAP_HAS_4WIRE; 238 239 sc->host.f_min = at91_master_clock / 512; 240 sc->host.f_min = 375000; 241 sc->host.f_max = at91_master_clock / 2; 242 if (sc->host.f_max > 50000000) 243 sc->host.f_max = 50000000; /* Limit to 50MHz */ 244 245 sc->host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340; 246 sc->host.caps = 0; 247 if (sc->sc_cap & CAP_HAS_4WIRE) 248 sc->host.caps |= MMC_CAP_4_BIT_DATA; 249 child = device_add_child(dev, "mmc", 0); 250 device_set_ivars(dev, &sc->host); 251 err = bus_generic_attach(dev); 252out: 253 if (err) 254 at91_mci_deactivate(dev); 255 return (err); 256} 257 258static int 259at91_mci_detach(device_t dev) 260{ 261 at91_mci_fini(dev); 262 at91_mci_deactivate(dev); 263 return (EBUSY); /* XXX */ 264} 265 266static int 267at91_mci_activate(device_t dev) 268{ 269 struct at91_mci_softc *sc; 270 int rid; 271 272 sc = device_get_softc(dev); 273 rid = 0; 274 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 275 RF_ACTIVE); 276 if (sc->mem_res == NULL) 277 goto errout; 278 279 rid = 0; 280 sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 281 RF_ACTIVE); 282 if (sc->irq_res == NULL) 283 goto errout; 284 285 return (0); 286errout: 287 at91_mci_deactivate(dev); 288 return (ENOMEM); 289} 290 291static void 292at91_mci_deactivate(device_t dev) 293{ 294 struct at91_mci_softc *sc; 295 296 sc = device_get_softc(dev); 297 if (sc->intrhand) 298 bus_teardown_intr(dev, sc->irq_res, sc->intrhand); 299 sc->intrhand = 0; 300 bus_generic_detach(sc->dev); 301 if (sc->mem_res) 302 bus_release_resource(dev, SYS_RES_IOPORT, 303 rman_get_rid(sc->mem_res), sc->mem_res); 304 sc->mem_res = 0; 305 if (sc->irq_res) 306 bus_release_resource(dev, SYS_RES_IRQ, 307 rman_get_rid(sc->irq_res), sc->irq_res); 308 sc->irq_res = 0; 309 return; 310} 311 312static int 313at91_mci_is_mci1rev2xx(void) 314{ 315 316 switch (AT91_CPU(at91_chip_id)) { 317 case AT91_CPU_SAM9260: 318 case AT91_CPU_SAM9263: 319#ifdef notyet 320 case AT91_CPU_CAP9: 321#endif 322 case AT91_CPU_SAM9G10: 323 case AT91_CPU_SAM9G20: 324#ifdef notyet 325 case AT91_CPU_SAM9RL: 326#endif 327 case AT91_CPU_SAM9XE128: 328 case AT91_CPU_SAM9XE256: 329 case AT91_CPU_SAM9XE512: 330 return(1); 331 } 332 return (0); 333} 334 335static void 336at91_mci_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 337{ 338 if (error != 0) 339 return; 340 *(bus_addr_t *)arg = segs[0].ds_addr; 341} 342 343static int 344at91_mci_update_ios(device_t brdev, device_t reqdev) 345{ 346 struct at91_mci_softc *sc; 347 struct mmc_host *host; 348 struct mmc_ios *ios; 349 uint32_t clkdiv; 350 351 sc = device_get_softc(brdev); 352 host = &sc->host; 353 ios = &host->ios; 354 // bus mode? 355 if (ios->clock == 0) { 356 WR4(sc, MCI_CR, MCI_CR_MCIDIS); 357 clkdiv = 0; 358 } else { 359 WR4(sc, MCI_CR, MCI_CR_MCIEN); 360 if ((at91_master_clock % (ios->clock * 2)) == 0) 361 clkdiv = ((at91_master_clock / ios->clock) / 2) - 1; 362 else 363 clkdiv = (at91_master_clock / ios->clock) / 2; 364 } 365 if (ios->bus_width == bus_width_4) 366 WR4(sc, MCI_SDCR, RD4(sc, MCI_SDCR) | MCI_SDCR_SDCBUS); 367 else 368 WR4(sc, MCI_SDCR, RD4(sc, MCI_SDCR) & ~MCI_SDCR_SDCBUS); 369 WR4(sc, MCI_MR, (RD4(sc, MCI_MR) & ~MCI_MR_CLKDIV) | clkdiv); 370 /* Do we need a settle time here? */ 371 /* XXX We need to turn the device on/off here with a GPIO pin */ 372 return (0); 373} 374 375static void 376at91_mci_start_cmd(struct at91_mci_softc *sc, struct mmc_command *cmd) 377{ 378 size_t len; 379 uint32_t cmdr, ier = 0, mr; 380 uint32_t *src, *dst; 381 int i; 382 struct mmc_data *data; 383 void *vaddr; 384 bus_addr_t paddr; 385 386 sc->curcmd = cmd; 387 data = cmd->data; 388 cmdr = cmd->opcode; 389 390 /* XXX Upper layers don't always set this */ 391 cmd->mrq = sc->req; 392 393 if (MMC_RSP(cmd->flags) == MMC_RSP_NONE) 394 cmdr |= MCI_CMDR_RSPTYP_NO; 395 else { 396 /* Allow big timeout for responses */ 397 cmdr |= MCI_CMDR_MAXLAT; 398 if (cmd->flags & MMC_RSP_136) 399 cmdr |= MCI_CMDR_RSPTYP_136; 400 else 401 cmdr |= MCI_CMDR_RSPTYP_48; 402 } 403 if (cmd->opcode == MMC_STOP_TRANSMISSION) 404 cmdr |= MCI_CMDR_TRCMD_STOP; 405 if (sc->host.ios.bus_mode == opendrain) 406 cmdr |= MCI_CMDR_OPDCMD; 407 if (!data) { 408 // The no data case is fairly simple 409 at91_mci_pdc_disable(sc); 410// printf("CMDR %x ARGR %x\n", cmdr, cmd->arg); 411 WR4(sc, MCI_ARGR, cmd->arg); 412 WR4(sc, MCI_CMDR, cmdr); 413 WR4(sc, MCI_IER, MCI_SR_ERROR | MCI_SR_CMDRDY); 414 return; 415 } 416 if (data->flags & MMC_DATA_READ) 417 cmdr |= MCI_CMDR_TRDIR; 418 if (data->flags & (MMC_DATA_READ | MMC_DATA_WRITE)) 419 cmdr |= MCI_CMDR_TRCMD_START; 420 if (data->flags & MMC_DATA_STREAM) 421 cmdr |= MCI_CMDR_TRTYP_STREAM; 422 if (data->flags & MMC_DATA_MULTI) 423 cmdr |= MCI_CMDR_TRTYP_MULTIPLE; 424 // Set block size and turn on PDC mode for dma xfer and disable 425 // PDC until we're ready. 426 mr = RD4(sc, MCI_MR) & ~MCI_MR_BLKLEN; 427 WR4(sc, MCI_MR, mr | (data->len << 16) | MCI_MR_PDCMODE); 428 WR4(sc, PDC_PTCR, PDC_PTCR_RXTDIS | PDC_PTCR_TXTDIS); 429 if (cmdr & MCI_CMDR_TRCMD_START) { 430 len = data->len; 431 if (cmdr & MCI_CMDR_TRDIR) 432 vaddr = cmd->data->data; 433 else { 434 /* Use bounce buffer even if we don't need 435 * byteswap, since buffer may straddle a page 436 * boundry, and we don't handle multi-segment 437 * transfers in hardware. 438 * (page issues seen from 'bsdlabel -w' which 439 * uses raw geom access to the volume). 440 * Greg Ansley (gja (at) ansley.com) 441 */ 442 vaddr = sc->bounce_buffer; 443 src = (uint32_t *)cmd->data->data; 444 dst = (uint32_t *)vaddr; 445 /* 446 * If this is MCI1 revision 2xx controller, apply 447 * a work-around for the "Data Write Operation and 448 * number of bytes" erratum. 449 */ 450 if (at91_mci_is_mci1rev2xx() && data->len < 12) { 451 len = 12; 452 memset(dst, 0, 12); 453 } 454 if (sc->sc_cap & CAP_NEEDS_BYTESWAP) { 455 for (i = 0; i < data->len / 4; i++) 456 dst[i] = bswap32(src[i]); 457 } else 458 memcpy(dst, src, data->len); 459 } 460 data->xfer_len = 0; 461 if (bus_dmamap_load(sc->dmatag, sc->map, vaddr, len, 462 at91_mci_getaddr, &paddr, 0) != 0) { 463 cmd->error = MMC_ERR_NO_MEMORY; 464 sc->req = NULL; 465 sc->curcmd = NULL; 466 cmd->mrq->done(cmd->mrq); 467 return; 468 } 469 sc->mapped++; 470 if (cmdr & MCI_CMDR_TRDIR) { 471 bus_dmamap_sync(sc->dmatag, sc->map, BUS_DMASYNC_PREREAD); 472 WR4(sc, PDC_RPR, paddr); 473 WR4(sc, PDC_RCR, len / 4); 474 ier = MCI_SR_ENDRX; 475 } else { 476 bus_dmamap_sync(sc->dmatag, sc->map, BUS_DMASYNC_PREWRITE); 477 WR4(sc, PDC_TPR, paddr); 478 WR4(sc, PDC_TCR, len / 4); 479 ier = MCI_SR_TXBUFE; 480 } 481 } 482// printf("CMDR %x ARGR %x with data\n", cmdr, cmd->arg); 483 WR4(sc, MCI_ARGR, cmd->arg); 484 if (cmdr & MCI_CMDR_TRCMD_START) { 485 if (cmdr & MCI_CMDR_TRDIR) { 486 WR4(sc, PDC_PTCR, PDC_PTCR_RXTEN); 487 WR4(sc, MCI_CMDR, cmdr); 488 } else { 489 WR4(sc, MCI_CMDR, cmdr); 490 WR4(sc, PDC_PTCR, PDC_PTCR_TXTEN); 491 } 492 } 493 WR4(sc, MCI_IER, MCI_SR_ERROR | ier); 494} 495 496static void 497at91_mci_start(struct at91_mci_softc *sc) 498{ 499 struct mmc_request *req; 500 501 req = sc->req; 502 if (req == NULL) 503 return; 504 // assert locked 505 if (!(sc->flags & CMD_STARTED)) { 506 sc->flags |= CMD_STARTED; 507// printf("Starting CMD\n"); 508 at91_mci_start_cmd(sc, req->cmd); 509 return; 510 } 511 if (!(sc->flags & STOP_STARTED) && req->stop) { 512// printf("Starting Stop\n"); 513 sc->flags |= STOP_STARTED; 514 at91_mci_start_cmd(sc, req->stop); 515 return; 516 } 517 /* We must be done -- bad idea to do this while locked? */ 518 sc->req = NULL; 519 sc->curcmd = NULL; 520 req->done(req); 521} 522 523static int 524at91_mci_request(device_t brdev, device_t reqdev, struct mmc_request *req) 525{ 526 struct at91_mci_softc *sc = device_get_softc(brdev); 527 528 AT91_MCI_LOCK(sc); 529 // XXX do we want to be able to queue up multiple commands? 530 // XXX sounds like a good idea, but all protocols are sync, so 531 // XXX maybe the idea is naive... 532 if (sc->req != NULL) { 533 AT91_MCI_UNLOCK(sc); 534 return (EBUSY); 535 } 536 sc->req = req; 537 sc->flags = 0; 538 at91_mci_start(sc); 539 AT91_MCI_UNLOCK(sc); 540 return (0); 541} 542 543static int 544at91_mci_get_ro(device_t brdev, device_t reqdev) 545{ 546 return (0); 547} 548 549static int 550at91_mci_acquire_host(device_t brdev, device_t reqdev) 551{ 552 struct at91_mci_softc *sc = device_get_softc(brdev); 553 int err = 0; 554 555 AT91_MCI_LOCK(sc); 556 while (sc->bus_busy) 557 msleep(sc, &sc->sc_mtx, PZERO, "mciah", hz / 5); 558 sc->bus_busy++; 559 AT91_MCI_UNLOCK(sc); 560 return (err); 561} 562 563static int 564at91_mci_release_host(device_t brdev, device_t reqdev) 565{ 566 struct at91_mci_softc *sc = device_get_softc(brdev); 567 568 AT91_MCI_LOCK(sc); 569 sc->bus_busy--; 570 wakeup(sc); 571 AT91_MCI_UNLOCK(sc); 572 return (0); 573} 574 575static void 576at91_mci_read_done(struct at91_mci_softc *sc) 577{ 578 uint32_t *walker; 579 struct mmc_command *cmd; 580 int i, len; 581 582 cmd = sc->curcmd; 583 bus_dmamap_sync(sc->dmatag, sc->map, BUS_DMASYNC_POSTREAD); 584 bus_dmamap_unload(sc->dmatag, sc->map); 585 sc->mapped--; 586 if (sc->sc_cap & CAP_NEEDS_BYTESWAP) { 587 walker = (uint32_t *)cmd->data->data; 588 len = cmd->data->len / 4; 589 for (i = 0; i < len; i++) 590 walker[i] = bswap32(walker[i]); 591 } 592 // Finish up the sequence... 593 WR4(sc, MCI_IDR, MCI_SR_ENDRX); 594 WR4(sc, MCI_IER, MCI_SR_RXBUFF); 595 WR4(sc, PDC_PTCR, PDC_PTCR_RXTDIS | PDC_PTCR_TXTDIS); 596} 597 598static void 599at91_mci_xmit_done(struct at91_mci_softc *sc) 600{ 601 // Finish up the sequence... 602 WR4(sc, PDC_PTCR, PDC_PTCR_RXTDIS | PDC_PTCR_TXTDIS); 603 WR4(sc, MCI_IDR, MCI_SR_TXBUFE); 604 WR4(sc, MCI_IER, MCI_SR_NOTBUSY); 605 bus_dmamap_sync(sc->dmatag, sc->map, BUS_DMASYNC_POSTWRITE); 606 bus_dmamap_unload(sc->dmatag, sc->map); 607 sc->mapped--; 608} 609 610static void 611at91_mci_intr(void *arg) 612{ 613 struct at91_mci_softc *sc = (struct at91_mci_softc*)arg; 614 uint32_t sr; 615 int i, done = 0; 616 struct mmc_command *cmd; 617 618 AT91_MCI_LOCK(sc); 619 sr = RD4(sc, MCI_SR) & RD4(sc, MCI_IMR); 620// printf("i 0x%x\n", sr); 621 cmd = sc->curcmd; 622 if (sr & MCI_SR_ERROR) { 623 // Ignore CRC errors on CMD2 and ACMD47, per relevant standards 624 if ((sr & MCI_SR_RCRCE) && (cmd->opcode == MMC_SEND_OP_COND || 625 cmd->opcode == ACMD_SD_SEND_OP_COND)) 626 cmd->error = MMC_ERR_NONE; 627 else if (sr & (MCI_SR_RTOE | MCI_SR_DTOE)) 628 cmd->error = MMC_ERR_TIMEOUT; 629 else if (sr & (MCI_SR_RCRCE | MCI_SR_DCRCE)) 630 cmd->error = MMC_ERR_BADCRC; 631 else if (sr & (MCI_SR_OVRE | MCI_SR_UNRE)) 632 cmd->error = MMC_ERR_FIFO; 633 else 634 cmd->error = MMC_ERR_FAILED; 635 done = 1; 636 if (sc->mapped && cmd->error) { 637 bus_dmamap_unload(sc->dmatag, sc->map); 638 sc->mapped--; 639 } 640 } else { 641 if (sr & MCI_SR_TXBUFE) { 642// printf("TXBUFE\n"); 643 at91_mci_xmit_done(sc); 644 } 645 if (sr & MCI_SR_RXBUFF) { 646// printf("RXBUFF\n"); 647 WR4(sc, MCI_IDR, MCI_SR_RXBUFF); 648 WR4(sc, MCI_IER, MCI_SR_CMDRDY); 649 } 650 if (sr & MCI_SR_ENDTX) { 651// printf("ENDTX\n"); 652 } 653 if (sr & MCI_SR_ENDRX) { 654// printf("ENDRX\n"); 655 at91_mci_read_done(sc); 656 } 657 if (sr & MCI_SR_NOTBUSY) { 658// printf("NOTBUSY\n"); 659 WR4(sc, MCI_IDR, MCI_SR_NOTBUSY); 660 WR4(sc, MCI_IER, MCI_SR_CMDRDY); 661 } 662 if (sr & MCI_SR_DTIP) { 663// printf("Data transfer in progress\n"); 664 } 665 if (sr & MCI_SR_BLKE) { 666// printf("Block transfer end\n"); 667 } 668 if (sr & MCI_SR_TXRDY) { 669// printf("Ready to transmit\n"); 670 } 671 if (sr & MCI_SR_RXRDY) { 672// printf("Ready to receive\n"); 673 } 674 if (sr & MCI_SR_CMDRDY) { 675// printf("Command ready\n"); 676 done = 1; 677 cmd->error = MMC_ERR_NONE; 678 } 679 } 680 if (done) { 681 WR4(sc, MCI_IDR, 0xffffffff); 682 if (cmd != NULL && (cmd->flags & MMC_RSP_PRESENT)) { 683 for (i = 0; i < ((cmd->flags & MMC_RSP_136) ? 4 : 1); 684 i++) { 685 cmd->resp[i] = RD4(sc, MCI_RSPR + i * 4); 686// printf("RSPR[%d] = %x\n", i, cmd->resp[i]); 687 } 688 } 689 at91_mci_start(sc); 690 } 691 AT91_MCI_UNLOCK(sc); 692} 693 694static int 695at91_mci_read_ivar(device_t bus, device_t child, int which, uintptr_t *result) 696{ 697 struct at91_mci_softc *sc = device_get_softc(bus); 698 699 switch (which) { 700 default: 701 return (EINVAL); 702 case MMCBR_IVAR_BUS_MODE: 703 *(int *)result = sc->host.ios.bus_mode; 704 break; 705 case MMCBR_IVAR_BUS_WIDTH: 706 *(int *)result = sc->host.ios.bus_width; 707 break; 708 case MMCBR_IVAR_CHIP_SELECT: 709 *(int *)result = sc->host.ios.chip_select; 710 break; 711 case MMCBR_IVAR_CLOCK: 712 *(int *)result = sc->host.ios.clock; 713 break; 714 case MMCBR_IVAR_F_MIN: 715 *(int *)result = sc->host.f_min; 716 break; 717 case MMCBR_IVAR_F_MAX: 718 *(int *)result = sc->host.f_max; 719 break; 720 case MMCBR_IVAR_HOST_OCR: 721 *(int *)result = sc->host.host_ocr; 722 break; 723 case MMCBR_IVAR_MODE: 724 *(int *)result = sc->host.mode; 725 break; 726 case MMCBR_IVAR_OCR: 727 *(int *)result = sc->host.ocr; 728 break; 729 case MMCBR_IVAR_POWER_MODE: 730 *(int *)result = sc->host.ios.power_mode; 731 break; 732 case MMCBR_IVAR_VDD: 733 *(int *)result = sc->host.ios.vdd; 734 break; 735 case MMCBR_IVAR_CAPS: 736 if (sc->has_4wire) { 737 sc->sc_cap |= CAP_HAS_4WIRE; 738 sc->host.caps |= MMC_CAP_4_BIT_DATA; 739 } else { 740 sc->sc_cap &= ~CAP_HAS_4WIRE; 741 sc->host.caps &= ~MMC_CAP_4_BIT_DATA; 742 } 743 *(int *)result = sc->host.caps; 744 break; 745 case MMCBR_IVAR_MAX_DATA: 746 *(int *)result = 1; 747 break; 748 } 749 return (0); 750} 751 752static int 753at91_mci_write_ivar(device_t bus, device_t child, int which, uintptr_t value) 754{ 755 struct at91_mci_softc *sc = device_get_softc(bus); 756 757 switch (which) { 758 default: 759 return (EINVAL); 760 case MMCBR_IVAR_BUS_MODE: 761 sc->host.ios.bus_mode = value; 762 break; 763 case MMCBR_IVAR_BUS_WIDTH: 764 sc->host.ios.bus_width = value; 765 break; 766 case MMCBR_IVAR_CHIP_SELECT: 767 sc->host.ios.chip_select = value; 768 break; 769 case MMCBR_IVAR_CLOCK: 770 sc->host.ios.clock = value; 771 break; 772 case MMCBR_IVAR_MODE: 773 sc->host.mode = value; 774 break; 775 case MMCBR_IVAR_OCR: 776 sc->host.ocr = value; 777 break; 778 case MMCBR_IVAR_POWER_MODE: 779 sc->host.ios.power_mode = value; 780 break; 781 case MMCBR_IVAR_VDD: 782 sc->host.ios.vdd = value; 783 break; 784 /* These are read-only */ 785 case MMCBR_IVAR_CAPS: 786 case MMCBR_IVAR_HOST_OCR: 787 case MMCBR_IVAR_F_MIN: 788 case MMCBR_IVAR_F_MAX: 789 case MMCBR_IVAR_MAX_DATA: 790 return (EINVAL); 791 } 792 return (0); 793} 794 795static device_method_t at91_mci_methods[] = { 796 /* device_if */ 797 DEVMETHOD(device_probe, at91_mci_probe), 798 DEVMETHOD(device_attach, at91_mci_attach), 799 DEVMETHOD(device_detach, at91_mci_detach), 800 801 /* Bus interface */ 802 DEVMETHOD(bus_read_ivar, at91_mci_read_ivar), 803 DEVMETHOD(bus_write_ivar, at91_mci_write_ivar), 804 805 /* mmcbr_if */ 806 DEVMETHOD(mmcbr_update_ios, at91_mci_update_ios), 807 DEVMETHOD(mmcbr_request, at91_mci_request), 808 DEVMETHOD(mmcbr_get_ro, at91_mci_get_ro), 809 DEVMETHOD(mmcbr_acquire_host, at91_mci_acquire_host), 810 DEVMETHOD(mmcbr_release_host, at91_mci_release_host), 811 812 DEVMETHOD_END 813}; 814 815static driver_t at91_mci_driver = { 816 "at91_mci", 817 at91_mci_methods, 818 sizeof(struct at91_mci_softc), 819}; 820 821static devclass_t at91_mci_devclass; 822 823DRIVER_MODULE(at91_mci, atmelarm, at91_mci_driver, at91_mci_devclass, NULL, 824 NULL); 825