1/* 2 * linux/drivers/mmc/pxa.c - PXA MMCI driver 3 * 4 * Copyright (C) 2003 Russell King, All Rights Reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This hardware is really sick: 11 * - No way to clear interrupts. 12 * - Have to turn off the clock whenever we touch the device. 13 * - Doesn't tell you how many data blocks were transferred. 14 * Yuck! 15 * 16 * 1 and 3 byte data transfers not supported 17 * max block length up to 1023 18 */ 19#include <linux/module.h> 20#include <linux/init.h> 21#include <linux/ioport.h> 22#include <linux/platform_device.h> 23#include <linux/delay.h> 24#include <linux/interrupt.h> 25#include <linux/dma-mapping.h> 26#include <linux/mmc/host.h> 27 28#include <asm/dma.h> 29#include <asm/io.h> 30#include <asm/scatterlist.h> 31#include <asm/sizes.h> 32 33#include <asm/arch/pxa-regs.h> 34#include <asm/arch/mmc.h> 35 36#include "pxamci.h" 37 38#define DRIVER_NAME "pxa2xx-mci" 39 40#define NR_SG 1 41 42struct pxamci_host { 43 struct mmc_host *mmc; 44 spinlock_t lock; 45 struct resource *res; 46 void __iomem *base; 47 int irq; 48 int dma; 49 unsigned int clkrt; 50 unsigned int cmdat; 51 unsigned int imask; 52 unsigned int power_mode; 53 struct pxamci_platform_data *pdata; 54 55 struct mmc_request *mrq; 56 struct mmc_command *cmd; 57 struct mmc_data *data; 58 59 dma_addr_t sg_dma; 60 struct pxa_dma_desc *sg_cpu; 61 unsigned int dma_len; 62 63 unsigned int dma_dir; 64}; 65 66static void pxamci_stop_clock(struct pxamci_host *host) 67{ 68 if (readl(host->base + MMC_STAT) & STAT_CLK_EN) { 69 unsigned long timeout = 10000; 70 unsigned int v; 71 72 writel(STOP_CLOCK, host->base + MMC_STRPCL); 73 74 do { 75 v = readl(host->base + MMC_STAT); 76 if (!(v & STAT_CLK_EN)) 77 break; 78 udelay(1); 79 } while (timeout--); 80 81 if (v & STAT_CLK_EN) 82 dev_err(mmc_dev(host->mmc), "unable to stop clock\n"); 83 } 84} 85 86static void pxamci_enable_irq(struct pxamci_host *host, unsigned int mask) 87{ 88 unsigned long flags; 89 90 spin_lock_irqsave(&host->lock, flags); 91 host->imask &= ~mask; 92 writel(host->imask, host->base + MMC_I_MASK); 93 spin_unlock_irqrestore(&host->lock, flags); 94} 95 96static void pxamci_disable_irq(struct pxamci_host *host, unsigned int mask) 97{ 98 unsigned long flags; 99 100 spin_lock_irqsave(&host->lock, flags); 101 host->imask |= mask; 102 writel(host->imask, host->base + MMC_I_MASK); 103 spin_unlock_irqrestore(&host->lock, flags); 104} 105 106static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data) 107{ 108 unsigned int nob = data->blocks; 109 unsigned long long clks; 110 unsigned int timeout; 111 u32 dcmd; 112 int i; 113 114 host->data = data; 115 116 if (data->flags & MMC_DATA_STREAM) 117 nob = 0xffff; 118 119 writel(nob, host->base + MMC_NOB); 120 writel(data->blksz, host->base + MMC_BLKLEN); 121 122 clks = (unsigned long long)data->timeout_ns * CLOCKRATE; 123 do_div(clks, 1000000000UL); 124 timeout = (unsigned int)clks + (data->timeout_clks << host->clkrt); 125 writel((timeout + 255) / 256, host->base + MMC_RDTO); 126 127 if (data->flags & MMC_DATA_READ) { 128 host->dma_dir = DMA_FROM_DEVICE; 129 dcmd = DCMD_INCTRGADDR | DCMD_FLOWTRG; 130 DRCMRTXMMC = 0; 131 DRCMRRXMMC = host->dma | DRCMR_MAPVLD; 132 } else { 133 host->dma_dir = DMA_TO_DEVICE; 134 dcmd = DCMD_INCSRCADDR | DCMD_FLOWSRC; 135 DRCMRRXMMC = 0; 136 DRCMRTXMMC = host->dma | DRCMR_MAPVLD; 137 } 138 139 dcmd |= DCMD_BURST32 | DCMD_WIDTH1; 140 141 host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 142 host->dma_dir); 143 144 for (i = 0; i < host->dma_len; i++) { 145 if (data->flags & MMC_DATA_READ) { 146 host->sg_cpu[i].dsadr = host->res->start + MMC_RXFIFO; 147 host->sg_cpu[i].dtadr = sg_dma_address(&data->sg[i]); 148 } else { 149 host->sg_cpu[i].dsadr = sg_dma_address(&data->sg[i]); 150 host->sg_cpu[i].dtadr = host->res->start + MMC_TXFIFO; 151 } 152 host->sg_cpu[i].dcmd = dcmd | sg_dma_len(&data->sg[i]); 153 host->sg_cpu[i].ddadr = host->sg_dma + (i + 1) * 154 sizeof(struct pxa_dma_desc); 155 } 156 host->sg_cpu[host->dma_len - 1].ddadr = DDADR_STOP; 157 wmb(); 158 159 DDADR(host->dma) = host->sg_dma; 160 DCSR(host->dma) = DCSR_RUN; 161} 162 163static void pxamci_start_cmd(struct pxamci_host *host, struct mmc_command *cmd, unsigned int cmdat) 164{ 165 WARN_ON(host->cmd != NULL); 166 host->cmd = cmd; 167 168 if (cmd->flags & MMC_RSP_BUSY) 169 cmdat |= CMDAT_BUSY; 170 171#define RSP_TYPE(x) ((x) & ~(MMC_RSP_BUSY|MMC_RSP_OPCODE)) 172 switch (RSP_TYPE(mmc_resp_type(cmd))) { 173 case RSP_TYPE(MMC_RSP_R1): /* r1, r1b, r6, r7 */ 174 cmdat |= CMDAT_RESP_SHORT; 175 break; 176 case RSP_TYPE(MMC_RSP_R3): 177 cmdat |= CMDAT_RESP_R3; 178 break; 179 case RSP_TYPE(MMC_RSP_R2): 180 cmdat |= CMDAT_RESP_R2; 181 break; 182 default: 183 break; 184 } 185 186 writel(cmd->opcode, host->base + MMC_CMD); 187 writel(cmd->arg >> 16, host->base + MMC_ARGH); 188 writel(cmd->arg & 0xffff, host->base + MMC_ARGL); 189 writel(cmdat, host->base + MMC_CMDAT); 190 writel(host->clkrt, host->base + MMC_CLKRT); 191 192 writel(START_CLOCK, host->base + MMC_STRPCL); 193 194 pxamci_enable_irq(host, END_CMD_RES); 195} 196 197static void pxamci_finish_request(struct pxamci_host *host, struct mmc_request *mrq) 198{ 199 host->mrq = NULL; 200 host->cmd = NULL; 201 host->data = NULL; 202 mmc_request_done(host->mmc, mrq); 203} 204 205static int pxamci_cmd_done(struct pxamci_host *host, unsigned int stat) 206{ 207 struct mmc_command *cmd = host->cmd; 208 int i; 209 u32 v; 210 211 if (!cmd) 212 return 0; 213 214 host->cmd = NULL; 215 216 /* 217 * Did I mention this is Sick. We always need to 218 * discard the upper 8 bits of the first 16-bit word. 219 */ 220 v = readl(host->base + MMC_RES) & 0xffff; 221 for (i = 0; i < 4; i++) { 222 u32 w1 = readl(host->base + MMC_RES) & 0xffff; 223 u32 w2 = readl(host->base + MMC_RES) & 0xffff; 224 cmd->resp[i] = v << 24 | w1 << 8 | w2 >> 8; 225 v = w2; 226 } 227 228 if (stat & STAT_TIME_OUT_RESPONSE) { 229 cmd->error = MMC_ERR_TIMEOUT; 230 } else if (stat & STAT_RES_CRC_ERR && cmd->flags & MMC_RSP_CRC) { 231#ifdef CONFIG_PXA27x 232 if (cmd->flags & MMC_RSP_136 && cmd->resp[0] & 0x80000000) { 233 pr_debug("ignoring CRC from command %d - *risky*\n", cmd->opcode); 234 } else 235#endif 236 cmd->error = MMC_ERR_BADCRC; 237 } 238 239 pxamci_disable_irq(host, END_CMD_RES); 240 if (host->data && cmd->error == MMC_ERR_NONE) { 241 pxamci_enable_irq(host, DATA_TRAN_DONE); 242 } else { 243 pxamci_finish_request(host, host->mrq); 244 } 245 246 return 1; 247} 248 249static int pxamci_data_done(struct pxamci_host *host, unsigned int stat) 250{ 251 struct mmc_data *data = host->data; 252 253 if (!data) 254 return 0; 255 256 DCSR(host->dma) = 0; 257 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len, 258 host->dma_dir); 259 260 if (stat & STAT_READ_TIME_OUT) 261 data->error = MMC_ERR_TIMEOUT; 262 else if (stat & (STAT_CRC_READ_ERROR|STAT_CRC_WRITE_ERROR)) 263 data->error = MMC_ERR_BADCRC; 264 265 /* 266 * There appears to be a hardware design bug here. There seems to 267 * be no way to find out how much data was transferred to the card. 268 * This means that if there was an error on any block, we mark all 269 * data blocks as being in error. 270 */ 271 if (data->error == MMC_ERR_NONE) 272 data->bytes_xfered = data->blocks * data->blksz; 273 else 274 data->bytes_xfered = 0; 275 276 pxamci_disable_irq(host, DATA_TRAN_DONE); 277 278 host->data = NULL; 279 if (host->mrq->stop) { 280 pxamci_stop_clock(host); 281 pxamci_start_cmd(host, host->mrq->stop, 0); 282 } else { 283 pxamci_finish_request(host, host->mrq); 284 } 285 286 return 1; 287} 288 289static irqreturn_t pxamci_irq(int irq, void *devid) 290{ 291 struct pxamci_host *host = devid; 292 unsigned int ireg; 293 int handled = 0; 294 295 ireg = readl(host->base + MMC_I_REG); 296 297 if (ireg) { 298 unsigned stat = readl(host->base + MMC_STAT); 299 300 pr_debug("PXAMCI: irq %08x stat %08x\n", ireg, stat); 301 302 if (ireg & END_CMD_RES) 303 handled |= pxamci_cmd_done(host, stat); 304 if (ireg & DATA_TRAN_DONE) 305 handled |= pxamci_data_done(host, stat); 306 } 307 308 return IRQ_RETVAL(handled); 309} 310 311static void pxamci_request(struct mmc_host *mmc, struct mmc_request *mrq) 312{ 313 struct pxamci_host *host = mmc_priv(mmc); 314 unsigned int cmdat; 315 316 WARN_ON(host->mrq != NULL); 317 318 host->mrq = mrq; 319 320 pxamci_stop_clock(host); 321 322 cmdat = host->cmdat; 323 host->cmdat &= ~CMDAT_INIT; 324 325 if (mrq->data) { 326 pxamci_setup_data(host, mrq->data); 327 328 cmdat &= ~CMDAT_BUSY; 329 cmdat |= CMDAT_DATAEN | CMDAT_DMAEN; 330 if (mrq->data->flags & MMC_DATA_WRITE) 331 cmdat |= CMDAT_WRITE; 332 333 if (mrq->data->flags & MMC_DATA_STREAM) 334 cmdat |= CMDAT_STREAM; 335 } 336 337 pxamci_start_cmd(host, mrq->cmd, cmdat); 338} 339 340static int pxamci_get_ro(struct mmc_host *mmc) 341{ 342 struct pxamci_host *host = mmc_priv(mmc); 343 344 if (host->pdata && host->pdata->get_ro) 345 return host->pdata->get_ro(mmc_dev(mmc)); 346 /* Host doesn't support read only detection so assume writeable */ 347 return 0; 348} 349 350static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 351{ 352 struct pxamci_host *host = mmc_priv(mmc); 353 354 if (ios->clock) { 355 unsigned int clk = CLOCKRATE / ios->clock; 356 if (CLOCKRATE / clk > ios->clock) 357 clk <<= 1; 358 host->clkrt = fls(clk) - 1; 359 pxa_set_cken(CKEN_MMC, 1); 360 361 /* 362 * we write clkrt on the next command 363 */ 364 } else { 365 pxamci_stop_clock(host); 366 pxa_set_cken(CKEN_MMC, 0); 367 } 368 369 if (host->power_mode != ios->power_mode) { 370 host->power_mode = ios->power_mode; 371 372 if (host->pdata && host->pdata->setpower) 373 host->pdata->setpower(mmc_dev(mmc), ios->vdd); 374 375 if (ios->power_mode == MMC_POWER_ON) 376 host->cmdat |= CMDAT_INIT; 377 } 378 379 pr_debug("PXAMCI: clkrt = %x cmdat = %x\n", 380 host->clkrt, host->cmdat); 381} 382 383static const struct mmc_host_ops pxamci_ops = { 384 .request = pxamci_request, 385 .get_ro = pxamci_get_ro, 386 .set_ios = pxamci_set_ios, 387}; 388 389static void pxamci_dma_irq(int dma, void *devid) 390{ 391 printk(KERN_ERR "DMA%d: IRQ???\n", dma); 392 DCSR(dma) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR; 393} 394 395static irqreturn_t pxamci_detect_irq(int irq, void *devid) 396{ 397 struct pxamci_host *host = mmc_priv(devid); 398 399 mmc_detect_change(devid, host->pdata->detect_delay); 400 return IRQ_HANDLED; 401} 402 403static int pxamci_probe(struct platform_device *pdev) 404{ 405 struct mmc_host *mmc; 406 struct pxamci_host *host = NULL; 407 struct resource *r; 408 int ret, irq; 409 410 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 411 irq = platform_get_irq(pdev, 0); 412 if (!r || irq < 0) 413 return -ENXIO; 414 415 r = request_mem_region(r->start, SZ_4K, DRIVER_NAME); 416 if (!r) 417 return -EBUSY; 418 419 mmc = mmc_alloc_host(sizeof(struct pxamci_host), &pdev->dev); 420 if (!mmc) { 421 ret = -ENOMEM; 422 goto out; 423 } 424 425 mmc->ops = &pxamci_ops; 426 mmc->f_min = CLOCKRATE_MIN; 427 mmc->f_max = CLOCKRATE_MAX; 428 429 /* 430 * We can do SG-DMA, but we don't because we never know how much 431 * data we successfully wrote to the card. 432 */ 433 mmc->max_phys_segs = NR_SG; 434 435 /* 436 * Our hardware DMA can handle a maximum of one page per SG entry. 437 */ 438 mmc->max_seg_size = PAGE_SIZE; 439 440 /* 441 * Block length register is 10 bits. 442 */ 443 mmc->max_blk_size = 1023; 444 445 /* 446 * Block count register is 16 bits. 447 */ 448 mmc->max_blk_count = 65535; 449 450 host = mmc_priv(mmc); 451 host->mmc = mmc; 452 host->dma = -1; 453 host->pdata = pdev->dev.platform_data; 454 mmc->ocr_avail = host->pdata ? 455 host->pdata->ocr_mask : 456 MMC_VDD_32_33|MMC_VDD_33_34; 457 458 host->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &host->sg_dma, GFP_KERNEL); 459 if (!host->sg_cpu) { 460 ret = -ENOMEM; 461 goto out; 462 } 463 464 spin_lock_init(&host->lock); 465 host->res = r; 466 host->irq = irq; 467 host->imask = MMC_I_MASK_ALL; 468 469 host->base = ioremap(r->start, SZ_4K); 470 if (!host->base) { 471 ret = -ENOMEM; 472 goto out; 473 } 474 475 /* 476 * Ensure that the host controller is shut down, and setup 477 * with our defaults. 478 */ 479 pxamci_stop_clock(host); 480 writel(0, host->base + MMC_SPI); 481 writel(64, host->base + MMC_RESTO); 482 writel(host->imask, host->base + MMC_I_MASK); 483 484 host->dma = pxa_request_dma(DRIVER_NAME, DMA_PRIO_LOW, 485 pxamci_dma_irq, host); 486 if (host->dma < 0) { 487 ret = -EBUSY; 488 goto out; 489 } 490 491 ret = request_irq(host->irq, pxamci_irq, 0, DRIVER_NAME, host); 492 if (ret) 493 goto out; 494 495 platform_set_drvdata(pdev, mmc); 496 497 if (host->pdata && host->pdata->init) 498 host->pdata->init(&pdev->dev, pxamci_detect_irq, mmc); 499 500 mmc_add_host(mmc); 501 502 return 0; 503 504 out: 505 if (host) { 506 if (host->dma >= 0) 507 pxa_free_dma(host->dma); 508 if (host->base) 509 iounmap(host->base); 510 if (host->sg_cpu) 511 dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); 512 } 513 if (mmc) 514 mmc_free_host(mmc); 515 release_resource(r); 516 return ret; 517} 518 519static int pxamci_remove(struct platform_device *pdev) 520{ 521 struct mmc_host *mmc = platform_get_drvdata(pdev); 522 523 platform_set_drvdata(pdev, NULL); 524 525 if (mmc) { 526 struct pxamci_host *host = mmc_priv(mmc); 527 528 if (host->pdata && host->pdata->exit) 529 host->pdata->exit(&pdev->dev, mmc); 530 531 mmc_remove_host(mmc); 532 533 pxamci_stop_clock(host); 534 writel(TXFIFO_WR_REQ|RXFIFO_RD_REQ|CLK_IS_OFF|STOP_CMD| 535 END_CMD_RES|PRG_DONE|DATA_TRAN_DONE, 536 host->base + MMC_I_MASK); 537 538 DRCMRRXMMC = 0; 539 DRCMRTXMMC = 0; 540 541 free_irq(host->irq, host); 542 pxa_free_dma(host->dma); 543 iounmap(host->base); 544 dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); 545 546 release_resource(host->res); 547 548 mmc_free_host(mmc); 549 } 550 return 0; 551} 552 553#ifdef CONFIG_PM 554static int pxamci_suspend(struct platform_device *dev, pm_message_t state) 555{ 556 struct mmc_host *mmc = platform_get_drvdata(dev); 557 int ret = 0; 558 559 if (mmc) 560 ret = mmc_suspend_host(mmc, state); 561 562 return ret; 563} 564 565static int pxamci_resume(struct platform_device *dev) 566{ 567 struct mmc_host *mmc = platform_get_drvdata(dev); 568 int ret = 0; 569 570 if (mmc) 571 ret = mmc_resume_host(mmc); 572 573 return ret; 574} 575#else 576#define pxamci_suspend NULL 577#define pxamci_resume NULL 578#endif 579 580static struct platform_driver pxamci_driver = { 581 .probe = pxamci_probe, 582 .remove = pxamci_remove, 583 .suspend = pxamci_suspend, 584 .resume = pxamci_resume, 585 .driver = { 586 .name = DRIVER_NAME, 587 }, 588}; 589 590static int __init pxamci_init(void) 591{ 592 return platform_driver_register(&pxamci_driver); 593} 594 595static void __exit pxamci_exit(void) 596{ 597 platform_driver_unregister(&pxamci_driver); 598} 599 600module_init(pxamci_init); 601module_exit(pxamci_exit); 602 603MODULE_DESCRIPTION("PXA Multimedia Card Interface Driver"); 604MODULE_LICENSE("GPL"); 605