Lines Matching refs:host

16 #include <linux/mmc/host.h>
209 static void usdhi6_write(struct usdhi6_host *host, u32 reg, u32 data)
211 iowrite32(data, host->base + reg);
212 dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__,
213 host->base, reg, data);
216 static void usdhi6_write16(struct usdhi6_host *host, u32 reg, u16 data)
218 iowrite16(data, host->base + reg);
219 dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__,
220 host->base, reg, data);
223 static u32 usdhi6_read(struct usdhi6_host *host, u32 reg)
225 u32 data = ioread32(host->base + reg);
226 dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__,
227 host->base, reg, data);
231 static u16 usdhi6_read16(struct usdhi6_host *host, u32 reg)
233 u16 data = ioread16(host->base + reg);
234 dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__,
235 host->base, reg, data);
239 static void usdhi6_irq_enable(struct usdhi6_host *host, u32 info1, u32 info2)
241 host->status_mask = USDHI6_SD_INFO1_IRQ & ~info1;
242 host->status2_mask = USDHI6_SD_INFO2_IRQ & ~info2;
243 usdhi6_write(host, USDHI6_SD_INFO1_MASK, host->status_mask);
244 usdhi6_write(host, USDHI6_SD_INFO2_MASK, host->status2_mask);
247 static void usdhi6_wait_for_resp(struct usdhi6_host *host)
249 usdhi6_irq_enable(host, USDHI6_SD_INFO1_RSP_END |
254 static void usdhi6_wait_for_brwe(struct usdhi6_host *host, bool read)
256 usdhi6_irq_enable(host, USDHI6_SD_INFO1_ACCESS_END |
261 static void usdhi6_only_cd(struct usdhi6_host *host)
264 usdhi6_irq_enable(host, USDHI6_SD_INFO1_CARD_CD, 0);
267 static void usdhi6_mask_all(struct usdhi6_host *host)
269 usdhi6_irq_enable(host, 0, 0);
272 static int usdhi6_error_code(struct usdhi6_host *host)
276 usdhi6_write(host, USDHI6_SD_STOP, USDHI6_SD_STOP_STP);
278 if (host->io_error &
280 u32 rsp54 = usdhi6_read(host, USDHI6_SD_RSP54);
281 int opc = host->mrq ? host->mrq->cmd->opcode : -1;
283 err = usdhi6_read(host, USDHI6_SD_ERR_STS2);
285 if (host->wait == USDHI6_WAIT_FOR_CMD)
286 dev_dbg(mmc_dev(host->mmc),
288 err, rsp54, host->wait, opc);
290 dev_warn(mmc_dev(host->mmc),
292 err, rsp54, host->wait, opc);
296 err = usdhi6_read(host, USDHI6_SD_ERR_STS1);
298 dev_warn(mmc_dev(host->mmc), "Err sts 0x%x, state %u, CMD%d\n",
299 err, host->wait, host->mrq ? host->mrq->cmd->opcode : -1);
300 if (host->io_error & USDHI6_SD_INFO2_ILA)
314 static void usdhi6_blk_bounce(struct usdhi6_host *host,
317 struct mmc_data *data = host->mrq->data;
318 size_t blk_head = host->head_len;
320 dev_dbg(mmc_dev(host->mmc), "%s(): CMD%u of %u SG: %ux%u @ 0x%x\n",
321 __func__, host->mrq->cmd->opcode, data->sg_len,
324 host->head_pg.page = host->pg.page;
325 host->head_pg.mapped = host->pg.mapped;
326 host->pg.page = nth_page(host->pg.page, 1);
327 host->pg.mapped = kmap(host->pg.page);
329 host->blk_page = host->bounce_buf;
330 host->offset = 0;
335 memcpy(host->bounce_buf, host->head_pg.mapped + PAGE_SIZE - blk_head,
337 memcpy(host->bounce_buf + blk_head, host->pg.mapped,
342 static void usdhi6_sg_prep(struct usdhi6_host *host)
344 struct mmc_request *mrq = host->mrq;
347 usdhi6_write(host, USDHI6_SD_SECCNT, data->blocks);
349 host->sg = data->sg;
351 host->offset = host->sg->offset;
355 static void *usdhi6_sg_map(struct usdhi6_host *host)
357 struct mmc_data *data = host->mrq->data;
358 struct scatterlist *sg = data->sg_len > 1 ? host->sg : data->sg;
362 WARN(host->pg.page, "%p not properly unmapped!\n", host->pg.page);
368 host->pg.page = sg_page(sg);
369 host->pg.mapped = kmap(host->pg.page);
370 host->offset = sg->offset;
376 host->head_len = blk_head;
383 usdhi6_blk_bounce(host, sg);
385 host->blk_page = host->pg.mapped;
387 dev_dbg(mmc_dev(host->mmc), "Mapped %p (%lx) at %p + %u for CMD%u @ 0x%p\n",
388 host->pg.page, page_to_pfn(host->pg.page), host->pg.mapped,
389 sg->offset, host->mrq->cmd->opcode, host->mrq);
391 return host->blk_page + host->offset;
395 static void usdhi6_sg_unmap(struct usdhi6_host *host, bool force)
397 struct mmc_data *data = host->mrq->data;
398 struct page *page = host->head_pg.page;
403 host->sg : data->sg;
404 size_t blk_head = host->head_len;
407 memcpy(host->head_pg.mapped + PAGE_SIZE - blk_head,
408 host->bounce_buf, blk_head);
409 memcpy(host->pg.mapped, host->bounce_buf + blk_head,
416 host->head_pg.page = NULL;
419 (host->page_idx << PAGE_SHIFT) + data->blksz - blk_head)
424 page = host->pg.page;
431 host->pg.page = NULL;
435 static void usdhi6_sg_advance(struct usdhi6_host *host)
437 struct mmc_data *data = host->mrq->data;
441 if (host->head_pg.page) {
443 host->page_idx++;
444 host->offset = data->blksz - host->head_len;
445 host->blk_page = host->pg.mapped;
446 usdhi6_sg_unmap(host, false);
448 host->offset += data->blksz;
450 if (host->offset == PAGE_SIZE) {
452 host->offset = 0;
453 host->page_idx++;
458 * Now host->blk_page + host->offset point at the end of our last block
459 * and host->page_idx is the index of the page, in which our new block
463 done = (host->page_idx << PAGE_SHIFT) + host->offset;
464 total = host->sg->offset + sg_dma_len(host->sg);
466 dev_dbg(mmc_dev(host->mmc), "%s(): %zu of %zu @ %zu\n", __func__,
467 done, total, host->offset);
469 if (done < total && host->offset) {
471 if (host->offset + data->blksz > PAGE_SIZE)
473 usdhi6_blk_bounce(host, host->sg);
479 usdhi6_sg_unmap(host, false);
487 struct scatterlist *next = sg_next(host->sg);
489 host->page_idx = 0;
492 host->wait = USDHI6_WAIT_FOR_DATA_END;
493 host->sg = next;
506 host->pg.page = nth_page(sg_page(host->sg), host->page_idx);
507 host->pg.mapped = kmap(host->pg.page);
508 host->blk_page = host->pg.mapped;
510 dev_dbg(mmc_dev(host->mmc), "Mapped %p (%lx) at %p for CMD%u @ 0x%p\n",
511 host->pg.page, page_to_pfn(host->pg.page), host->pg.mapped,
512 host->mrq->cmd->opcode, host->mrq);
517 static void usdhi6_dma_release(struct usdhi6_host *host)
519 host->dma_active = false;
520 if (host->chan_tx) {
521 struct dma_chan *chan = host->chan_tx;
522 host->chan_tx = NULL;
525 if (host->chan_rx) {
526 struct dma_chan *chan = host->chan_rx;
527 host->chan_rx = NULL;
532 static void usdhi6_dma_stop_unmap(struct usdhi6_host *host)
534 struct mmc_data *data = host->mrq->data;
536 if (!host->dma_active)
539 usdhi6_write(host, USDHI6_CC_EXT_MODE, 0);
540 host->dma_active = false;
543 dma_unmap_sg(host->chan_rx->device->dev, data->sg,
546 dma_unmap_sg(host->chan_tx->device->dev, data->sg,
552 struct usdhi6_host *host = arg;
553 struct mmc_request *mrq = host->mrq;
556 dev_name(mmc_dev(host->mmc)), mrq))
559 dev_dbg(mmc_dev(host->mmc), "%s(): CMD%u DMA completed\n", __func__,
562 usdhi6_dma_stop_unmap(host);
563 usdhi6_wait_for_brwe(host, mrq->data->flags & MMC_DATA_READ);
566 static int usdhi6_dma_setup(struct usdhi6_host *host, struct dma_chan *chan,
569 struct mmc_data *data = host->mrq->data;
589 host->dma_active = true;
596 desc->callback_param = host;
600 dev_dbg(mmc_dev(host->mmc), "%s(): mapped %d -> %d, cookie %d @ %p\n",
607 usdhi6_dma_release(host);
608 dev_warn(mmc_dev(host->mmc),
615 static int usdhi6_dma_start(struct usdhi6_host *host)
617 if (!host->chan_rx || !host->chan_tx)
620 if (host->mrq->data->flags & MMC_DATA_READ)
621 return usdhi6_dma_setup(host, host->chan_rx, DMA_DEV_TO_MEM);
623 return usdhi6_dma_setup(host, host->chan_tx, DMA_MEM_TO_DEV);
626 static void usdhi6_dma_kill(struct usdhi6_host *host)
628 struct mmc_data *data = host->mrq->data;
630 dev_dbg(mmc_dev(host->mmc), "%s(): SG of %u: %ux%u\n",
634 dmaengine_terminate_sync(host->chan_rx);
636 dmaengine_terminate_sync(host->chan_tx);
639 static void usdhi6_dma_check_error(struct usdhi6_host *host)
641 struct mmc_data *data = host->mrq->data;
643 dev_dbg(mmc_dev(host->mmc), "%s(): IO error %d, status 0x%x\n",
644 __func__, host->io_error, usdhi6_read(host, USDHI6_SD_INFO1));
646 if (host->io_error) {
647 data->error = usdhi6_error_code(host);
649 usdhi6_dma_kill(host);
650 usdhi6_dma_release(host);
651 dev_warn(mmc_dev(host->mmc),
661 if (host->irq_status & USDHI6_SD_INFO1_RSP_END)
662 dev_warn(mmc_dev(host->mmc), "Unexpected response received!\n");
665 static void usdhi6_dma_kick(struct usdhi6_host *host)
667 if (host->mrq->data->flags & MMC_DATA_READ)
668 dma_async_issue_pending(host->chan_rx);
670 dma_async_issue_pending(host->chan_tx);
673 static void usdhi6_dma_request(struct usdhi6_host *host, phys_addr_t start)
681 host->chan_tx = dma_request_chan(mmc_dev(host->mmc), "tx");
682 dev_dbg(mmc_dev(host->mmc), "%s: TX: got channel %p\n", __func__,
683 host->chan_tx);
685 if (IS_ERR(host->chan_tx)) {
686 host->chan_tx = NULL;
694 ret = dmaengine_slave_config(host->chan_tx, &cfg);
698 host->chan_rx = dma_request_chan(mmc_dev(host->mmc), "rx");
699 dev_dbg(mmc_dev(host->mmc), "%s: RX: got channel %p\n", __func__,
700 host->chan_rx);
702 if (IS_ERR(host->chan_rx)) {
703 host->chan_rx = NULL;
711 ret = dmaengine_slave_config(host->chan_rx, &cfg);
718 dma_release_channel(host->chan_rx);
719 host->chan_rx = NULL;
721 dma_release_channel(host->chan_tx);
722 host->chan_tx = NULL;
727 static void usdhi6_clk_set(struct usdhi6_host *host, struct mmc_ios *ios)
734 if (usdhi6_read(host, USDHI6_SD_INFO2) & USDHI6_SD_INFO2_SCLKDIVEN)
740 dev_err(mmc_dev(host->mmc), "SD bus busy, clock set aborted\n");
744 val = usdhi6_read(host, USDHI6_SD_CLK_CTRL) & ~USDHI6_SD_CLK_CTRL_DIV_MASK;
749 if (host->imclk <= rate) {
752 new_rate = host->imclk;
755 new_rate = host->imclk / 2;
759 roundup_pow_of_two(DIV_ROUND_UP(host->imclk, rate));
761 new_rate = host->imclk / div;
764 if (host->rate == new_rate)
767 host->rate = new_rate;
769 dev_dbg(mmc_dev(host->mmc), "target %lu, div %u, set %lu\n",
777 if (host->imclk == rate || host->imclk == host->rate || !rate)
778 usdhi6_write(host, USDHI6_SD_CLK_CTRL,
782 host->rate = 0;
786 usdhi6_write(host, USDHI6_SD_CLK_CTRL, val);
788 if (host->imclk == rate || host->imclk == host->rate ||
790 usdhi6_write(host, USDHI6_SD_CLK_CTRL,
794 static void usdhi6_set_power(struct usdhi6_host *host, struct mmc_ios *ios)
796 struct mmc_host *mmc = host->mmc;
804 static int usdhi6_reset(struct usdhi6_host *host)
808 usdhi6_write(host, USDHI6_SOFT_RST, USDHI6_SOFT_RST_RESERVED);
810 usdhi6_write(host, USDHI6_SOFT_RST, USDHI6_SOFT_RST_RESERVED | USDHI6_SOFT_RST_RESET);
812 if (usdhi6_read(host, USDHI6_SOFT_RST) & USDHI6_SOFT_RST_RESET)
820 struct usdhi6_host *host = mmc_priv(mmc);
829 usdhi6_set_power(host, ios);
830 usdhi6_only_cd(host);
837 ret = usdhi6_reset(host);
841 usdhi6_set_power(host, ios);
842 usdhi6_only_cd(host);
846 option = usdhi6_read(host, USDHI6_SD_OPTION);
862 usdhi6_write(host, USDHI6_SD_OPTION, option);
863 usdhi6_write(host, USDHI6_SDIF_MODE, mode);
867 if (host->rate != ios->clock)
868 usdhi6_clk_set(host, ios);
872 static void usdhi6_timeout_set(struct usdhi6_host *host)
874 struct mmc_request *mrq = host->mrq;
879 ticks = host->rate / 1000 * mrq->cmd->busy_timeout;
881 ticks = host->rate / 1000000 * (mrq->data->timeout_ns / 1000) +
893 dev_dbg(mmc_dev(host->mmc), "Set %s timeout %lu ticks @ %lu Hz\n",
894 mrq->data ? "data" : "cmd", ticks, host->rate);
897 usdhi6_write(host, USDHI6_SD_OPTION, (val << USDHI6_SD_OPTION_TIMEOUT_SHIFT) |
898 (usdhi6_read(host, USDHI6_SD_OPTION) & ~USDHI6_SD_OPTION_TIMEOUT_MASK));
901 static void usdhi6_request_done(struct usdhi6_host *host)
903 struct mmc_request *mrq = host->mrq;
906 if (WARN(host->pg.page || host->head_pg.page,
908 host->pg.page, host->head_pg.page, host->wait, mrq->cmd->opcode,
910 data ? host->offset : 0, data ? data->blocks : 0,
912 usdhi6_sg_unmap(host, true);
917 dev_dbg(mmc_dev(host->mmc), "%s(CMD%d: %ux%u): err %d %d %d\n",
925 usdhi6_write(host, USDHI6_CC_EXT_MODE, 0);
926 host->wait = USDHI6_WAIT_FOR_REQUEST;
927 host->mrq = NULL;
929 mmc_request_done(host->mmc, mrq);
932 static int usdhi6_cmd_flags(struct usdhi6_host *host)
934 struct mmc_request *mrq = host->mrq;
938 if (host->app_cmd) {
939 host->app_cmd = false;
975 dev_warn(mmc_dev(host->mmc),
985 static int usdhi6_rq_start(struct usdhi6_host *host)
987 struct mmc_request *mrq = host->mrq;
990 int opc = usdhi6_cmd_flags(host);
997 if (!(usdhi6_read(host, USDHI6_SD_INFO2) & USDHI6_SD_INFO2_CBSY))
1003 dev_dbg(mmc_dev(host->mmc), "Command active, request aborted\n");
1011 host->page_idx = 0;
1034 dev_warn(mmc_dev(host->mmc), "%s(): %u blocks of %u bytes\n",
1043 usdhi6_sg_prep(host);
1045 usdhi6_write(host, USDHI6_SD_SIZE, data->blksz);
1051 dev_dbg(mmc_dev(host->mmc),
1058 usdhi6_dma_start(host) >= DMA_MIN_COOKIE;
1061 usdhi6_write(host, USDHI6_CC_EXT_MODE, USDHI6_CC_EXT_MODE_SDRW);
1063 dev_dbg(mmc_dev(host->mmc),
1070 dev_dbg(mmc_dev(host->mmc), "%s(): request opcode %u\n",
1075 usdhi6_wait_for_resp(host);
1077 host->wait = USDHI6_WAIT_FOR_CMD;
1078 schedule_delayed_work(&host->timeout_work, host->timeout);
1081 usdhi6_write(host, USDHI6_SD_STOP,
1083 usdhi6_write(host, USDHI6_SD_ARG, cmd->arg);
1086 usdhi6_write(host, USDHI6_SD_CMD, opc);
1093 struct usdhi6_host *host = mmc_priv(mmc);
1096 cancel_delayed_work_sync(&host->timeout_work);
1098 host->mrq = mrq;
1099 host->sg = NULL;
1101 usdhi6_timeout_set(host);
1102 ret = usdhi6_rq_start(host);
1105 usdhi6_request_done(host);
1111 struct usdhi6_host *host = mmc_priv(mmc);
1113 u32 status = usdhi6_read(host, USDHI6_SD_INFO1) & USDHI6_SD_INFO1_CD;
1127 struct usdhi6_host *host = mmc_priv(mmc);
1129 u32 status = usdhi6_read(host, USDHI6_SD_INFO1) & USDHI6_SD_INFO1_WP;
1143 struct usdhi6_host *host = mmc_priv(mmc);
1148 host->sdio_mask = USDHI6_SDIO_INFO1_IRQ & ~USDHI6_SDIO_INFO1_IOIRQ;
1149 usdhi6_write(host, USDHI6_SDIO_INFO1_MASK, host->sdio_mask);
1150 usdhi6_write(host, USDHI6_SDIO_MODE, 1);
1152 usdhi6_write(host, USDHI6_SDIO_MODE, 0);
1153 usdhi6_write(host, USDHI6_SDIO_INFO1_MASK, USDHI6_SDIO_INFO1_IRQ);
1154 host->sdio_mask = USDHI6_SDIO_INFO1_IRQ;
1158 static int usdhi6_set_pinstates(struct usdhi6_host *host, int voltage)
1160 if (IS_ERR(host->pins_uhs))
1166 return pinctrl_select_state(host->pinctrl,
1167 host->pins_uhs);
1170 return pinctrl_select_default_state(mmc_dev(host->mmc));
1191 struct usdhi6_host *host = mmc_priv(mmc);
1192 u32 tmp = usdhi6_read(host, USDHI6_SD_INFO2);
1210 static void usdhi6_resp_cmd12(struct usdhi6_host *host)
1212 struct mmc_command *cmd = host->mrq->stop;
1213 cmd->resp[0] = usdhi6_read(host, USDHI6_SD_RSP10);
1216 static void usdhi6_resp_read(struct usdhi6_host *host)
1218 struct mmc_command *cmd = host->mrq->cmd;
1239 if (!(host->irq_status & USDHI6_SD_INFO1_RSP_END)) {
1240 dev_err(mmc_dev(host->mmc),
1249 tmp = usdhi6_read(host, USDHI6_SD_RSP10 + i * 8);
1255 rsp[0] = usdhi6_read(host, USDHI6_SD_RSP54);
1257 rsp[0] = usdhi6_read(host, USDHI6_SD_RSP10);
1259 dev_dbg(mmc_dev(host->mmc), "Response 0x%x\n", rsp[0]);
1262 static int usdhi6_blk_read(struct usdhi6_host *host)
1264 struct mmc_data *data = host->mrq->data;
1268 if (host->io_error) {
1269 data->error = usdhi6_error_code(host);
1273 if (host->pg.page) {
1274 p = host->blk_page + host->offset;
1276 p = usdhi6_sg_map(host);
1284 *p = usdhi6_read(host, USDHI6_SD_BUF0);
1288 u16 d = usdhi6_read16(host, USDHI6_SD_BUF0);
1297 dev_dbg(mmc_dev(host->mmc), "%s(): %d\n", __func__, data->error);
1298 host->wait = USDHI6_WAIT_FOR_REQUEST;
1302 static int usdhi6_blk_write(struct usdhi6_host *host)
1304 struct mmc_data *data = host->mrq->data;
1308 if (host->io_error) {
1309 data->error = usdhi6_error_code(host);
1313 if (host->pg.page) {
1314 p = host->blk_page + host->offset;
1316 p = usdhi6_sg_map(host);
1324 usdhi6_write(host, USDHI6_SD_BUF0, *p);
1334 usdhi6_write16(host, USDHI6_SD_BUF0, d);
1340 dev_dbg(mmc_dev(host->mmc), "%s(): %d\n", __func__, data->error);
1341 host->wait = USDHI6_WAIT_FOR_REQUEST;
1345 static int usdhi6_stop_cmd(struct usdhi6_host *host)
1347 struct mmc_request *mrq = host->mrq;
1353 host->wait = USDHI6_WAIT_FOR_STOP;
1358 dev_err(mmc_dev(host->mmc),
1367 static bool usdhi6_end_cmd(struct usdhi6_host *host)
1369 struct mmc_request *mrq = host->mrq;
1372 if (host->io_error) {
1373 cmd->error = usdhi6_error_code(host);
1377 usdhi6_resp_read(host);
1382 if (host->dma_active) {
1383 usdhi6_dma_kick(host);
1385 host->wait = USDHI6_WAIT_FOR_DMA;
1386 else if (usdhi6_stop_cmd(host) < 0)
1392 host->wait = USDHI6_WAIT_FOR_MREAD;
1394 host->wait = USDHI6_WAIT_FOR_READ;
1399 host->wait = USDHI6_WAIT_FOR_MWRITE;
1401 host->wait = USDHI6_WAIT_FOR_WRITE;
1407 static bool usdhi6_read_block(struct usdhi6_host *host)
1410 int ret = usdhi6_blk_read(host);
1414 * cross-page, in which case for single-block IO host->page_idx == 0.
1417 usdhi6_sg_unmap(host, true);
1422 host->wait = USDHI6_WAIT_FOR_DATA_END;
1426 static bool usdhi6_mread_block(struct usdhi6_host *host)
1428 int ret = usdhi6_blk_read(host);
1433 usdhi6_sg_advance(host);
1435 return !host->mrq->data->error &&
1436 (host->wait != USDHI6_WAIT_FOR_DATA_END || !host->mrq->stop);
1439 static bool usdhi6_write_block(struct usdhi6_host *host)
1441 int ret = usdhi6_blk_write(host);
1444 usdhi6_sg_unmap(host, true);
1449 host->wait = USDHI6_WAIT_FOR_DATA_END;
1453 static bool usdhi6_mwrite_block(struct usdhi6_host *host)
1455 int ret = usdhi6_blk_write(host);
1460 usdhi6_sg_advance(host);
1462 return !host->mrq->data->error &&
1463 (host->wait != USDHI6_WAIT_FOR_DATA_END || !host->mrq->stop);
1470 struct usdhi6_host *host = dev_id;
1476 cancel_delayed_work_sync(&host->timeout_work);
1478 mrq = host->mrq;
1485 switch (host->wait) {
1491 io_wait = usdhi6_end_cmd(host);
1495 io_wait = usdhi6_mread_block(host);
1499 io_wait = usdhi6_read_block(host);
1503 io_wait = usdhi6_mwrite_block(host);
1507 io_wait = usdhi6_write_block(host);
1510 usdhi6_dma_check_error(host);
1513 usdhi6_write(host, USDHI6_SD_STOP, 0);
1514 if (host->io_error) {
1515 int ret = usdhi6_error_code(host);
1520 dev_warn(mmc_dev(host->mmc), "%s(): %d\n", __func__, ret);
1523 usdhi6_resp_cmd12(host);
1527 if (host->io_error) {
1528 mrq->data->error = usdhi6_error_code(host);
1529 dev_warn(mmc_dev(host->mmc), "%s(): %d\n", __func__,
1535 dev_err(mmc_dev(host->mmc), "Invalid state %u\n", host->wait);
1536 usdhi6_request_done(host);
1541 schedule_delayed_work(&host->timeout_work, host->timeout);
1543 if (!host->dma_active)
1544 usdhi6_wait_for_brwe(host, mrq->data->flags & MMC_DATA_READ);
1551 if (host->wait != USDHI6_WAIT_FOR_STOP &&
1552 host->mrq->stop &&
1553 !host->mrq->stop->error &&
1554 !usdhi6_stop_cmd(host)) {
1556 usdhi6_wait_for_resp(host);
1558 schedule_delayed_work(&host->timeout_work,
1559 host->timeout);
1567 dev_warn(mmc_dev(host->mmc), "%s(): data error %d\n",
1569 usdhi6_sg_unmap(host, true);
1572 host->app_cmd = true;
1576 usdhi6_request_done(host);
1583 struct usdhi6_host *host = dev_id;
1586 status = usdhi6_read(host, USDHI6_SD_INFO1) & ~host->status_mask &
1588 status2 = usdhi6_read(host, USDHI6_SD_INFO2) & ~host->status2_mask;
1590 usdhi6_only_cd(host);
1592 dev_dbg(mmc_dev(host->mmc),
1602 usdhi6_write(host, USDHI6_SD_INFO1,
1610 usdhi6_write(host, USDHI6_SD_INFO2,
1614 host->io_error = error;
1615 host->irq_status = status;
1619 if (host->wait != USDHI6_WAIT_FOR_CMD ||
1621 dev_warn(mmc_dev(host->mmc),
1625 dev_dbg(mmc_dev(host->mmc),
1635 struct usdhi6_host *host = dev_id;
1636 u32 status = usdhi6_read(host, USDHI6_SDIO_INFO1) & ~host->sdio_mask;
1638 dev_dbg(mmc_dev(host->mmc), "%s(): status 0x%x\n", __func__, status);
1643 usdhi6_write(host, USDHI6_SDIO_INFO1, ~status);
1645 mmc_signal_sdio_irq(host->mmc);
1652 struct usdhi6_host *host = dev_id;
1653 struct mmc_host *mmc = host->mmc;
1657 status = usdhi6_read(host, USDHI6_SD_INFO1) & ~host->status_mask &
1664 usdhi6_write(host, USDHI6_SD_INFO1, ~status);
1684 struct usdhi6_host *host = container_of(d, struct usdhi6_host, timeout_work);
1685 struct mmc_request *mrq = host->mrq;
1689 dev_warn(mmc_dev(host->mmc),
1691 host->dma_active ? "DMA" : "PIO",
1692 host->wait, mrq ? mrq->cmd->opcode : -1,
1693 usdhi6_read(host, USDHI6_SD_INFO1),
1694 usdhi6_read(host, USDHI6_SD_INFO2), host->irq_status);
1696 if (host->dma_active) {
1697 usdhi6_dma_kill(host);
1698 usdhi6_dma_stop_unmap(host);
1701 switch (host->wait) {
1703 dev_err(mmc_dev(host->mmc), "Invalid state %u\n", host->wait);
1706 usdhi6_error_code(host);
1711 usdhi6_error_code(host);
1719 sg = host->sg ?: data->sg;
1720 dev_dbg(mmc_dev(host->mmc),
1722 data->flags & MMC_DATA_READ ? 'R' : 'W', host->page_idx,
1723 host->offset, data->blocks, data->blksz, data->sg_len,
1725 usdhi6_sg_unmap(host, true);
1728 usdhi6_error_code(host);
1733 usdhi6_request_done(host);
1748 struct usdhi6_host *host;
1777 host = mmc_priv(mmc);
1778 host->mmc = mmc;
1779 host->wait = USDHI6_WAIT_FOR_REQUEST;
1780 host->timeout = msecs_to_jiffies(USDHI6_REQ_TIMEOUT_MS);
1787 host->pinctrl = devm_pinctrl_get(&pdev->dev);
1788 if (IS_ERR(host->pinctrl)) {
1789 ret = PTR_ERR(host->pinctrl);
1793 host->pins_uhs = pinctrl_lookup_state(host->pinctrl, "state_uhs");
1795 host->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
1796 if (IS_ERR(host->base)) {
1797 ret = PTR_ERR(host->base);
1801 host->clk = devm_clk_get(dev, NULL);
1802 if (IS_ERR(host->clk)) {
1803 ret = PTR_ERR(host->clk);
1807 host->imclk = clk_get_rate(host->clk);
1809 ret = clk_prepare_enable(host->clk);
1813 version = usdhi6_read(host, USDHI6_VERSION);
1820 dev_info(dev, "A USDHI6ROL0 SD host detected with %d ports\n",
1821 usdhi6_read(host, USDHI6_SD_PORT_SEL) >> USDHI6_SD_PORT_SEL_PORTS_SHIFT);
1823 usdhi6_mask_all(host);
1827 dev_name(dev), host);
1835 dev_name(dev), host);
1840 dev_name(dev), host);
1844 INIT_DELAYED_WORK(&host->timeout_work, usdhi6_timeout_work);
1846 usdhi6_dma_request(host, res->start);
1866 mmc->f_max = host->imclk;
1867 mmc->f_min = host->imclk / 512;
1869 platform_set_drvdata(pdev, host);
1878 usdhi6_dma_release(host);
1880 clk_disable_unprepare(host->clk);
1889 struct usdhi6_host *host = platform_get_drvdata(pdev);
1891 mmc_remove_host(host->mmc);
1893 usdhi6_mask_all(host);
1894 cancel_delayed_work_sync(&host->timeout_work);
1895 usdhi6_dma_release(host);
1896 clk_disable_unprepare(host->clk);
1897 mmc_free_host(host->mmc);
1912 MODULE_DESCRIPTION("Renesas usdhi6rol0 SD/SDIO host driver");