1/* 2 * linux/drivers/mmc/host/pxa.c - PXA MMCI driver 3 * 4 * Copyright (C) 2003 Russell King, All Rights Reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This hardware is really sick: 11 * - No way to clear interrupts. 12 * - Have to turn off the clock whenever we touch the device. 13 * - Doesn't tell you how many data blocks were transferred. 14 * Yuck! 15 * 16 * 1 and 3 byte data transfers not supported 17 * max block length up to 1023 18 */ 19#include <linux/module.h> 20#include <linux/init.h> 21#include <linux/ioport.h> 22#include <linux/platform_device.h> 23#include <linux/delay.h> 24#include <linux/interrupt.h> 25#include <linux/dma-mapping.h> 26#include <linux/clk.h> 27#include <linux/err.h> 28#include <linux/mmc/host.h> 29#include <linux/io.h> 30#include <linux/regulator/consumer.h> 31#include <linux/gpio.h> 32#include <linux/gfp.h> 33 34#include <asm/sizes.h> 35 36#include <mach/hardware.h> 37#include <mach/dma.h> 38#include <mach/mmc.h> 39 40#include "pxamci.h" 41 42#define DRIVER_NAME "pxa2xx-mci" 43 44#define NR_SG 1 45#define CLKRT_OFF (~0) 46 47#define mmc_has_26MHz() (cpu_is_pxa300() || cpu_is_pxa310() \ 48 || cpu_is_pxa935()) 49 50struct pxamci_host { 51 struct mmc_host *mmc; 52 spinlock_t lock; 53 struct resource *res; 54 void __iomem *base; 55 struct clk *clk; 56 unsigned long clkrate; 57 int irq; 58 int dma; 59 unsigned int clkrt; 60 unsigned int cmdat; 61 unsigned int imask; 62 unsigned int power_mode; 63 struct pxamci_platform_data *pdata; 64 65 struct mmc_request *mrq; 66 struct mmc_command *cmd; 67 struct mmc_data *data; 68 69 dma_addr_t sg_dma; 70 struct pxa_dma_desc *sg_cpu; 71 unsigned int dma_len; 72 73 unsigned int dma_dir; 74 unsigned int dma_drcmrrx; 75 unsigned int dma_drcmrtx; 76 77 struct regulator *vcc; 78}; 79 80static inline void pxamci_init_ocr(struct pxamci_host *host) 81{ 82#ifdef CONFIG_REGULATOR 83 host->vcc = regulator_get(mmc_dev(host->mmc), "vmmc"); 84 85 if (IS_ERR(host->vcc)) 86 host->vcc = NULL; 87 else { 88 host->mmc->ocr_avail = mmc_regulator_get_ocrmask(host->vcc); 89 if (host->pdata && host->pdata->ocr_mask) 90 dev_warn(mmc_dev(host->mmc), 91 "ocr_mask/setpower will not be used\n"); 92 } 93#endif 94 if (host->vcc == NULL) { 95 /* fall-back to platform data */ 96 host->mmc->ocr_avail = host->pdata ? 97 host->pdata->ocr_mask : 98 MMC_VDD_32_33 | MMC_VDD_33_34; 99 } 100} 101 102static inline void pxamci_set_power(struct pxamci_host *host, unsigned int vdd) 103{ 104 int on; 105 106#ifdef CONFIG_REGULATOR 107 if (host->vcc) 108 mmc_regulator_set_ocr(host->vcc, vdd); 109#endif 110 if (!host->vcc && host->pdata && 111 gpio_is_valid(host->pdata->gpio_power)) { 112 on = ((1 << vdd) & host->pdata->ocr_mask); 113 gpio_set_value(host->pdata->gpio_power, 114 !!on ^ host->pdata->gpio_power_invert); 115 } 116 if (!host->vcc && host->pdata && host->pdata->setpower) 117 host->pdata->setpower(mmc_dev(host->mmc), vdd); 118} 119 120static void pxamci_stop_clock(struct pxamci_host *host) 121{ 122 if (readl(host->base + MMC_STAT) & STAT_CLK_EN) { 123 unsigned long timeout = 10000; 124 unsigned int v; 125 126 writel(STOP_CLOCK, host->base + MMC_STRPCL); 127 128 do { 129 v = readl(host->base + MMC_STAT); 130 if (!(v & STAT_CLK_EN)) 131 break; 132 udelay(1); 133 } while (timeout--); 134 135 if (v & STAT_CLK_EN) 136 dev_err(mmc_dev(host->mmc), "unable to stop clock\n"); 137 } 138} 139 140static void pxamci_enable_irq(struct pxamci_host *host, unsigned int mask) 141{ 142 unsigned long flags; 143 144 spin_lock_irqsave(&host->lock, flags); 145 host->imask &= ~mask; 146 writel(host->imask, host->base + MMC_I_MASK); 147 spin_unlock_irqrestore(&host->lock, flags); 148} 149 150static void pxamci_disable_irq(struct pxamci_host *host, unsigned int mask) 151{ 152 unsigned long flags; 153 154 spin_lock_irqsave(&host->lock, flags); 155 host->imask |= mask; 156 writel(host->imask, host->base + MMC_I_MASK); 157 spin_unlock_irqrestore(&host->lock, flags); 158} 159 160static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data) 161{ 162 unsigned int nob = data->blocks; 163 unsigned long long clks; 164 unsigned int timeout; 165 bool dalgn = 0; 166 u32 dcmd; 167 int i; 168 169 host->data = data; 170 171 if (data->flags & MMC_DATA_STREAM) 172 nob = 0xffff; 173 174 writel(nob, host->base + MMC_NOB); 175 writel(data->blksz, host->base + MMC_BLKLEN); 176 177 clks = (unsigned long long)data->timeout_ns * host->clkrate; 178 do_div(clks, 1000000000UL); 179 timeout = (unsigned int)clks + (data->timeout_clks << host->clkrt); 180 writel((timeout + 255) / 256, host->base + MMC_RDTO); 181 182 if (data->flags & MMC_DATA_READ) { 183 host->dma_dir = DMA_FROM_DEVICE; 184 dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC; 185 DRCMR(host->dma_drcmrtx) = 0; 186 DRCMR(host->dma_drcmrrx) = host->dma | DRCMR_MAPVLD; 187 } else { 188 host->dma_dir = DMA_TO_DEVICE; 189 dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG; 190 DRCMR(host->dma_drcmrrx) = 0; 191 DRCMR(host->dma_drcmrtx) = host->dma | DRCMR_MAPVLD; 192 } 193 194 dcmd |= DCMD_BURST32 | DCMD_WIDTH1; 195 196 host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 197 host->dma_dir); 198 199 for (i = 0; i < host->dma_len; i++) { 200 unsigned int length = sg_dma_len(&data->sg[i]); 201 host->sg_cpu[i].dcmd = dcmd | length; 202 if (length & 31 && !(data->flags & MMC_DATA_READ)) 203 host->sg_cpu[i].dcmd |= DCMD_ENDIRQEN; 204 /* Not aligned to 8-byte boundary? */ 205 if (sg_dma_address(&data->sg[i]) & 0x7) 206 dalgn = 1; 207 if (data->flags & MMC_DATA_READ) { 208 host->sg_cpu[i].dsadr = host->res->start + MMC_RXFIFO; 209 host->sg_cpu[i].dtadr = sg_dma_address(&data->sg[i]); 210 } else { 211 host->sg_cpu[i].dsadr = sg_dma_address(&data->sg[i]); 212 host->sg_cpu[i].dtadr = host->res->start + MMC_TXFIFO; 213 } 214 host->sg_cpu[i].ddadr = host->sg_dma + (i + 1) * 215 sizeof(struct pxa_dma_desc); 216 } 217 host->sg_cpu[host->dma_len - 1].ddadr = DDADR_STOP; 218 wmb(); 219 220 /* 221 * The PXA27x DMA controller encounters overhead when working with 222 * unaligned (to 8-byte boundaries) data, so switch on byte alignment 223 * mode only if we have unaligned data. 224 */ 225 if (dalgn) 226 DALGN |= (1 << host->dma); 227 else 228 DALGN &= ~(1 << host->dma); 229 DDADR(host->dma) = host->sg_dma; 230 231 if (!cpu_is_pxa27x() || data->flags & MMC_DATA_READ) 232 DCSR(host->dma) = DCSR_RUN; 233} 234 235static void pxamci_start_cmd(struct pxamci_host *host, struct mmc_command *cmd, unsigned int cmdat) 236{ 237 WARN_ON(host->cmd != NULL); 238 host->cmd = cmd; 239 240 if (cmd->flags & MMC_RSP_BUSY) 241 cmdat |= CMDAT_BUSY; 242 243#define RSP_TYPE(x) ((x) & ~(MMC_RSP_BUSY|MMC_RSP_OPCODE)) 244 switch (RSP_TYPE(mmc_resp_type(cmd))) { 245 case RSP_TYPE(MMC_RSP_R1): /* r1, r1b, r6, r7 */ 246 cmdat |= CMDAT_RESP_SHORT; 247 break; 248 case RSP_TYPE(MMC_RSP_R3): 249 cmdat |= CMDAT_RESP_R3; 250 break; 251 case RSP_TYPE(MMC_RSP_R2): 252 cmdat |= CMDAT_RESP_R2; 253 break; 254 default: 255 break; 256 } 257 258 writel(cmd->opcode, host->base + MMC_CMD); 259 writel(cmd->arg >> 16, host->base + MMC_ARGH); 260 writel(cmd->arg & 0xffff, host->base + MMC_ARGL); 261 writel(cmdat, host->base + MMC_CMDAT); 262 writel(host->clkrt, host->base + MMC_CLKRT); 263 264 writel(START_CLOCK, host->base + MMC_STRPCL); 265 266 pxamci_enable_irq(host, END_CMD_RES); 267} 268 269static void pxamci_finish_request(struct pxamci_host *host, struct mmc_request *mrq) 270{ 271 host->mrq = NULL; 272 host->cmd = NULL; 273 host->data = NULL; 274 mmc_request_done(host->mmc, mrq); 275} 276 277static int pxamci_cmd_done(struct pxamci_host *host, unsigned int stat) 278{ 279 struct mmc_command *cmd = host->cmd; 280 int i; 281 u32 v; 282 283 if (!cmd) 284 return 0; 285 286 host->cmd = NULL; 287 288 /* 289 * Did I mention this is Sick. We always need to 290 * discard the upper 8 bits of the first 16-bit word. 291 */ 292 v = readl(host->base + MMC_RES) & 0xffff; 293 for (i = 0; i < 4; i++) { 294 u32 w1 = readl(host->base + MMC_RES) & 0xffff; 295 u32 w2 = readl(host->base + MMC_RES) & 0xffff; 296 cmd->resp[i] = v << 24 | w1 << 8 | w2 >> 8; 297 v = w2; 298 } 299 300 if (stat & STAT_TIME_OUT_RESPONSE) { 301 cmd->error = -ETIMEDOUT; 302 } else if (stat & STAT_RES_CRC_ERR && cmd->flags & MMC_RSP_CRC) { 303 if (cpu_is_pxa27x() && 304 (cmd->flags & MMC_RSP_136 && cmd->resp[0] & 0x80000000)) 305 pr_debug("ignoring CRC from command %d - *risky*\n", cmd->opcode); 306 else 307 cmd->error = -EILSEQ; 308 } 309 310 pxamci_disable_irq(host, END_CMD_RES); 311 if (host->data && !cmd->error) { 312 pxamci_enable_irq(host, DATA_TRAN_DONE); 313 if (cpu_is_pxa27x() && host->data->flags & MMC_DATA_WRITE) 314 DCSR(host->dma) = DCSR_RUN; 315 } else { 316 pxamci_finish_request(host, host->mrq); 317 } 318 319 return 1; 320} 321 322static int pxamci_data_done(struct pxamci_host *host, unsigned int stat) 323{ 324 struct mmc_data *data = host->data; 325 326 if (!data) 327 return 0; 328 329 DCSR(host->dma) = 0; 330 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 331 host->dma_dir); 332 333 if (stat & STAT_READ_TIME_OUT) 334 data->error = -ETIMEDOUT; 335 else if (stat & (STAT_CRC_READ_ERROR|STAT_CRC_WRITE_ERROR)) 336 data->error = -EILSEQ; 337 338 /* 339 * There appears to be a hardware design bug here. There seems to 340 * be no way to find out how much data was transferred to the card. 341 * This means that if there was an error on any block, we mark all 342 * data blocks as being in error. 343 */ 344 if (!data->error) 345 data->bytes_xfered = data->blocks * data->blksz; 346 else 347 data->bytes_xfered = 0; 348 349 pxamci_disable_irq(host, DATA_TRAN_DONE); 350 351 host->data = NULL; 352 if (host->mrq->stop) { 353 pxamci_stop_clock(host); 354 pxamci_start_cmd(host, host->mrq->stop, host->cmdat); 355 } else { 356 pxamci_finish_request(host, host->mrq); 357 } 358 359 return 1; 360} 361 362static irqreturn_t pxamci_irq(int irq, void *devid) 363{ 364 struct pxamci_host *host = devid; 365 unsigned int ireg; 366 int handled = 0; 367 368 ireg = readl(host->base + MMC_I_REG) & ~readl(host->base + MMC_I_MASK); 369 370 if (ireg) { 371 unsigned stat = readl(host->base + MMC_STAT); 372 373 pr_debug("PXAMCI: irq %08x stat %08x\n", ireg, stat); 374 375 if (ireg & END_CMD_RES) 376 handled |= pxamci_cmd_done(host, stat); 377 if (ireg & DATA_TRAN_DONE) 378 handled |= pxamci_data_done(host, stat); 379 if (ireg & SDIO_INT) { 380 mmc_signal_sdio_irq(host->mmc); 381 handled = 1; 382 } 383 } 384 385 return IRQ_RETVAL(handled); 386} 387 388static void pxamci_request(struct mmc_host *mmc, struct mmc_request *mrq) 389{ 390 struct pxamci_host *host = mmc_priv(mmc); 391 unsigned int cmdat; 392 393 WARN_ON(host->mrq != NULL); 394 395 host->mrq = mrq; 396 397 pxamci_stop_clock(host); 398 399 cmdat = host->cmdat; 400 host->cmdat &= ~CMDAT_INIT; 401 402 if (mrq->data) { 403 pxamci_setup_data(host, mrq->data); 404 405 cmdat &= ~CMDAT_BUSY; 406 cmdat |= CMDAT_DATAEN | CMDAT_DMAEN; 407 if (mrq->data->flags & MMC_DATA_WRITE) 408 cmdat |= CMDAT_WRITE; 409 410 if (mrq->data->flags & MMC_DATA_STREAM) 411 cmdat |= CMDAT_STREAM; 412 } 413 414 pxamci_start_cmd(host, mrq->cmd, cmdat); 415} 416 417static int pxamci_get_ro(struct mmc_host *mmc) 418{ 419 struct pxamci_host *host = mmc_priv(mmc); 420 421 if (host->pdata && gpio_is_valid(host->pdata->gpio_card_ro)) { 422 if (host->pdata->gpio_card_ro_invert) 423 return !gpio_get_value(host->pdata->gpio_card_ro); 424 else 425 return gpio_get_value(host->pdata->gpio_card_ro); 426 } 427 if (host->pdata && host->pdata->get_ro) 428 return !!host->pdata->get_ro(mmc_dev(mmc)); 429 /* 430 * Board doesn't support read only detection; let the mmc core 431 * decide what to do. 432 */ 433 return -ENOSYS; 434} 435 436static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 437{ 438 struct pxamci_host *host = mmc_priv(mmc); 439 440 if (ios->clock) { 441 unsigned long rate = host->clkrate; 442 unsigned int clk = rate / ios->clock; 443 444 if (host->clkrt == CLKRT_OFF) 445 clk_enable(host->clk); 446 447 if (ios->clock == 26000000) { 448 /* to support 26MHz */ 449 host->clkrt = 7; 450 } else { 451 /* to handle (19.5MHz, 26MHz) */ 452 if (!clk) 453 clk = 1; 454 455 /* 456 * clk might result in a lower divisor than we 457 * desire. check for that condition and adjust 458 * as appropriate. 459 */ 460 if (rate / clk > ios->clock) 461 clk <<= 1; 462 host->clkrt = fls(clk) - 1; 463 } 464 465 /* 466 * we write clkrt on the next command 467 */ 468 } else { 469 pxamci_stop_clock(host); 470 if (host->clkrt != CLKRT_OFF) { 471 host->clkrt = CLKRT_OFF; 472 clk_disable(host->clk); 473 } 474 } 475 476 if (host->power_mode != ios->power_mode) { 477 host->power_mode = ios->power_mode; 478 479 pxamci_set_power(host, ios->vdd); 480 481 if (ios->power_mode == MMC_POWER_ON) 482 host->cmdat |= CMDAT_INIT; 483 } 484 485 if (ios->bus_width == MMC_BUS_WIDTH_4) 486 host->cmdat |= CMDAT_SD_4DAT; 487 else 488 host->cmdat &= ~CMDAT_SD_4DAT; 489 490 pr_debug("PXAMCI: clkrt = %x cmdat = %x\n", 491 host->clkrt, host->cmdat); 492} 493 494static void pxamci_enable_sdio_irq(struct mmc_host *host, int enable) 495{ 496 struct pxamci_host *pxa_host = mmc_priv(host); 497 498 if (enable) 499 pxamci_enable_irq(pxa_host, SDIO_INT); 500 else 501 pxamci_disable_irq(pxa_host, SDIO_INT); 502} 503 504static const struct mmc_host_ops pxamci_ops = { 505 .request = pxamci_request, 506 .get_ro = pxamci_get_ro, 507 .set_ios = pxamci_set_ios, 508 .enable_sdio_irq = pxamci_enable_sdio_irq, 509}; 510 511static void pxamci_dma_irq(int dma, void *devid) 512{ 513 struct pxamci_host *host = devid; 514 int dcsr = DCSR(dma); 515 DCSR(dma) = dcsr & ~DCSR_STOPIRQEN; 516 517 if (dcsr & DCSR_ENDINTR) { 518 writel(BUF_PART_FULL, host->base + MMC_PRTBUF); 519 } else { 520 printk(KERN_ERR "%s: DMA error on channel %d (DCSR=%#x)\n", 521 mmc_hostname(host->mmc), dma, dcsr); 522 host->data->error = -EIO; 523 pxamci_data_done(host, 0); 524 } 525} 526 527static irqreturn_t pxamci_detect_irq(int irq, void *devid) 528{ 529 struct pxamci_host *host = mmc_priv(devid); 530 531 mmc_detect_change(devid, msecs_to_jiffies(host->pdata->detect_delay_ms)); 532 return IRQ_HANDLED; 533} 534 535static int pxamci_probe(struct platform_device *pdev) 536{ 537 struct mmc_host *mmc; 538 struct pxamci_host *host = NULL; 539 struct resource *r, *dmarx, *dmatx; 540 int ret, irq, gpio_cd = -1, gpio_ro = -1, gpio_power = -1; 541 542 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 543 irq = platform_get_irq(pdev, 0); 544 if (!r || irq < 0) 545 return -ENXIO; 546 547 r = request_mem_region(r->start, SZ_4K, DRIVER_NAME); 548 if (!r) 549 return -EBUSY; 550 551 mmc = mmc_alloc_host(sizeof(struct pxamci_host), &pdev->dev); 552 if (!mmc) { 553 ret = -ENOMEM; 554 goto out; 555 } 556 557 mmc->ops = &pxamci_ops; 558 559 /* 560 * We can do SG-DMA, but we don't because we never know how much 561 * data we successfully wrote to the card. 562 */ 563 mmc->max_phys_segs = NR_SG; 564 565 /* 566 * Our hardware DMA can handle a maximum of one page per SG entry. 567 */ 568 mmc->max_seg_size = PAGE_SIZE; 569 570 /* 571 * Block length register is only 10 bits before PXA27x. 572 */ 573 mmc->max_blk_size = cpu_is_pxa25x() ? 1023 : 2048; 574 575 /* 576 * Block count register is 16 bits. 577 */ 578 mmc->max_blk_count = 65535; 579 580 host = mmc_priv(mmc); 581 host->mmc = mmc; 582 host->dma = -1; 583 host->pdata = pdev->dev.platform_data; 584 host->clkrt = CLKRT_OFF; 585 586 host->clk = clk_get(&pdev->dev, NULL); 587 if (IS_ERR(host->clk)) { 588 ret = PTR_ERR(host->clk); 589 host->clk = NULL; 590 goto out; 591 } 592 593 host->clkrate = clk_get_rate(host->clk); 594 595 /* 596 * Calculate minimum clock rate, rounding up. 597 */ 598 mmc->f_min = (host->clkrate + 63) / 64; 599 mmc->f_max = (mmc_has_26MHz()) ? 26000000 : host->clkrate; 600 601 pxamci_init_ocr(host); 602 603 mmc->caps = 0; 604 host->cmdat = 0; 605 if (!cpu_is_pxa25x()) { 606 mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ; 607 host->cmdat |= CMDAT_SDIO_INT_EN; 608 if (mmc_has_26MHz()) 609 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | 610 MMC_CAP_SD_HIGHSPEED; 611 } 612 613 host->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &host->sg_dma, GFP_KERNEL); 614 if (!host->sg_cpu) { 615 ret = -ENOMEM; 616 goto out; 617 } 618 619 spin_lock_init(&host->lock); 620 host->res = r; 621 host->irq = irq; 622 host->imask = MMC_I_MASK_ALL; 623 624 host->base = ioremap(r->start, SZ_4K); 625 if (!host->base) { 626 ret = -ENOMEM; 627 goto out; 628 } 629 630 /* 631 * Ensure that the host controller is shut down, and setup 632 * with our defaults. 633 */ 634 pxamci_stop_clock(host); 635 writel(0, host->base + MMC_SPI); 636 writel(64, host->base + MMC_RESTO); 637 writel(host->imask, host->base + MMC_I_MASK); 638 639 host->dma = pxa_request_dma(DRIVER_NAME, DMA_PRIO_LOW, 640 pxamci_dma_irq, host); 641 if (host->dma < 0) { 642 ret = -EBUSY; 643 goto out; 644 } 645 646 ret = request_irq(host->irq, pxamci_irq, 0, DRIVER_NAME, host); 647 if (ret) 648 goto out; 649 650 platform_set_drvdata(pdev, mmc); 651 652 dmarx = platform_get_resource(pdev, IORESOURCE_DMA, 0); 653 if (!dmarx) { 654 ret = -ENXIO; 655 goto out; 656 } 657 host->dma_drcmrrx = dmarx->start; 658 659 dmatx = platform_get_resource(pdev, IORESOURCE_DMA, 1); 660 if (!dmatx) { 661 ret = -ENXIO; 662 goto out; 663 } 664 host->dma_drcmrtx = dmatx->start; 665 666 if (host->pdata) { 667 gpio_cd = host->pdata->gpio_card_detect; 668 gpio_ro = host->pdata->gpio_card_ro; 669 gpio_power = host->pdata->gpio_power; 670 } 671 if (gpio_is_valid(gpio_power)) { 672 ret = gpio_request(gpio_power, "mmc card power"); 673 if (ret) { 674 dev_err(&pdev->dev, "Failed requesting gpio_power %d\n", gpio_power); 675 goto out; 676 } 677 gpio_direction_output(gpio_power, 678 host->pdata->gpio_power_invert); 679 } 680 if (gpio_is_valid(gpio_ro)) { 681 ret = gpio_request(gpio_ro, "mmc card read only"); 682 if (ret) { 683 dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro); 684 goto err_gpio_ro; 685 } 686 gpio_direction_input(gpio_ro); 687 } 688 if (gpio_is_valid(gpio_cd)) { 689 ret = gpio_request(gpio_cd, "mmc card detect"); 690 if (ret) { 691 dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_cd); 692 goto err_gpio_cd; 693 } 694 gpio_direction_input(gpio_cd); 695 696 ret = request_irq(gpio_to_irq(gpio_cd), pxamci_detect_irq, 697 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, 698 "mmc card detect", mmc); 699 if (ret) { 700 dev_err(&pdev->dev, "failed to request card detect IRQ\n"); 701 goto err_request_irq; 702 } 703 } 704 705 if (host->pdata && host->pdata->init) 706 host->pdata->init(&pdev->dev, pxamci_detect_irq, mmc); 707 708 if (gpio_is_valid(gpio_power) && host->pdata->setpower) 709 dev_warn(&pdev->dev, "gpio_power and setpower() both defined\n"); 710 if (gpio_is_valid(gpio_ro) && host->pdata->get_ro) 711 dev_warn(&pdev->dev, "gpio_ro and get_ro() both defined\n"); 712 713 mmc_add_host(mmc); 714 715 return 0; 716 717err_request_irq: 718 gpio_free(gpio_cd); 719err_gpio_cd: 720 gpio_free(gpio_ro); 721err_gpio_ro: 722 gpio_free(gpio_power); 723 out: 724 if (host) { 725 if (host->dma >= 0) 726 pxa_free_dma(host->dma); 727 if (host->base) 728 iounmap(host->base); 729 if (host->sg_cpu) 730 dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); 731 if (host->clk) 732 clk_put(host->clk); 733 } 734 if (mmc) 735 mmc_free_host(mmc); 736 release_resource(r); 737 return ret; 738} 739 740static int pxamci_remove(struct platform_device *pdev) 741{ 742 struct mmc_host *mmc = platform_get_drvdata(pdev); 743 int gpio_cd = -1, gpio_ro = -1, gpio_power = -1; 744 745 platform_set_drvdata(pdev, NULL); 746 747 if (mmc) { 748 struct pxamci_host *host = mmc_priv(mmc); 749 750 mmc_remove_host(mmc); 751 752 if (host->pdata) { 753 gpio_cd = host->pdata->gpio_card_detect; 754 gpio_ro = host->pdata->gpio_card_ro; 755 gpio_power = host->pdata->gpio_power; 756 } 757 if (gpio_is_valid(gpio_cd)) { 758 free_irq(gpio_to_irq(gpio_cd), mmc); 759 gpio_free(gpio_cd); 760 } 761 if (gpio_is_valid(gpio_ro)) 762 gpio_free(gpio_ro); 763 if (gpio_is_valid(gpio_power)) 764 gpio_free(gpio_power); 765 if (host->vcc) 766 regulator_put(host->vcc); 767 768 if (host->pdata && host->pdata->exit) 769 host->pdata->exit(&pdev->dev, mmc); 770 771 pxamci_stop_clock(host); 772 writel(TXFIFO_WR_REQ|RXFIFO_RD_REQ|CLK_IS_OFF|STOP_CMD| 773 END_CMD_RES|PRG_DONE|DATA_TRAN_DONE, 774 host->base + MMC_I_MASK); 775 776 DRCMR(host->dma_drcmrrx) = 0; 777 DRCMR(host->dma_drcmrtx) = 0; 778 779 free_irq(host->irq, host); 780 pxa_free_dma(host->dma); 781 iounmap(host->base); 782 dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); 783 784 clk_put(host->clk); 785 786 release_resource(host->res); 787 788 mmc_free_host(mmc); 789 } 790 return 0; 791} 792 793#ifdef CONFIG_PM 794static int pxamci_suspend(struct device *dev) 795{ 796 struct mmc_host *mmc = dev_get_drvdata(dev); 797 int ret = 0; 798 799 if (mmc) 800 ret = mmc_suspend_host(mmc); 801 802 return ret; 803} 804 805static int pxamci_resume(struct device *dev) 806{ 807 struct mmc_host *mmc = dev_get_drvdata(dev); 808 int ret = 0; 809 810 if (mmc) 811 ret = mmc_resume_host(mmc); 812 813 return ret; 814} 815 816static const struct dev_pm_ops pxamci_pm_ops = { 817 .suspend = pxamci_suspend, 818 .resume = pxamci_resume, 819}; 820#endif 821 822static struct platform_driver pxamci_driver = { 823 .probe = pxamci_probe, 824 .remove = pxamci_remove, 825 .driver = { 826 .name = DRIVER_NAME, 827 .owner = THIS_MODULE, 828#ifdef CONFIG_PM 829 .pm = &pxamci_pm_ops, 830#endif 831 }, 832}; 833 834static int __init pxamci_init(void) 835{ 836 return platform_driver_register(&pxamci_driver); 837} 838 839static void __exit pxamci_exit(void) 840{ 841 platform_driver_unregister(&pxamci_driver); 842} 843 844module_init(pxamci_init); 845module_exit(pxamci_exit); 846 847MODULE_DESCRIPTION("PXA Multimedia Card Interface Driver"); 848MODULE_LICENSE("GPL"); 849MODULE_ALIAS("platform:pxa2xx-mci"); 850