1// Copyright 2018 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5// Standard Includes
6#include <assert.h>
7#include <fcntl.h>
8#include <inttypes.h>
9#include <stdio.h>
10#include <stdlib.h>
11#include <string.h>
12#include <threads.h>
13#include <unistd.h>
14
15// DDK Includes
16#include <ddk/binding.h>
17#include <ddk/device.h>
18#include <ddk/debug.h>
19#include <ddk/io-buffer.h>
20#include <ddk/mmio-buffer.h>
21#include <ddk/phys-iter.h>
22#include <ddk/protocol/platform-defs.h>
23#include <ddk/protocol/gpio.h>
24#include <ddk/protocol/platform-device.h>
25#include <ddk/protocol/platform-bus.h>
26#include <ddk/protocol/sdmmc.h>
27#include <ddk/protocol/sdhci.h>
28#include <hw/reg.h>
29#include <hw/sdmmc.h>
30#include <zircon/types.h>
31
32
33// Zircon Includes
34#include <zircon/threads.h>
35#include <zircon/assert.h>
36#include <lib/sync/completion.h>
37#include <pretty/hexdump.h>
38
39#include "imx-sdhci.h"
40
41// Uncomment to disable interrupts
42// #define ENABLE_POLLING
43
44// Uncomment to disable DMA Mode
45#define DISABLE_DMA
46
47// Uncomment to print logs at all levels
48// #define SDHCI_LOG_ALL 1
49
50#ifdef SDHCI_LOG_ALL
51#define SDHCI_ERROR(fmt, ...)       zxlogf(ERROR, "[%s %d]" fmt, __func__, __LINE__, ##__VA_ARGS__)
52#define SDHCI_INFO(fmt, ...)        zxlogf(ERROR, "[%s %d]" fmt, __func__, __LINE__, ##__VA_ARGS__)
53#define SDHCI_TRACE(fmt, ...)       zxlogf(ERROR, "[%s %d]" fmt, __func__, __LINE__, ##__VA_ARGS__)
54#define SDHCI_FUNC_ENTRY_LOG        zxlogf(ERROR, "[%s %d]\n", __func__, __LINE__)
55#else
56#define SDHCI_ERROR(fmt, ...)       zxlogf(ERROR, "[%s %d]" fmt, __func__, __LINE__, ##__VA_ARGS__)
57#define SDHCI_INFO(fmt, ...)        zxlogf(INFO, "[%s %d]" fmt, __func__, __LINE__, ##__VA_ARGS__)
58#define SDHCI_TRACE(fmt, ...)       zxlogf(TRACE, "[%s %d]" fmt, __func__, __LINE__, ##__VA_ARGS__)
59#define SDHCI_FUNC_ENTRY_LOG        zxlogf(TRACE, "[%s %d]\n", __func__, __LINE__)
60#endif
61
62#define PAGE_MASK   (PAGE_SIZE - 1ull)
63#define SD_FREQ_SETUP_HZ  400000
64#define MAX_TUNING_COUNT 40
65
66typedef struct sdhci_adma64_desc {
67    union {
68        struct {
69            uint8_t valid : 1;
70            uint8_t end   : 1;
71            uint8_t intr  : 1;
72            uint8_t rsvd0 : 1;
73            uint8_t act1  : 1;
74            uint8_t act2  : 1;
75            uint8_t rsvd1 : 2;
76            uint8_t rsvd2;
77        } __PACKED;
78        uint16_t attr;
79    } __PACKED;
80    uint16_t length;
81    uint32_t address;
82} __PACKED sdhci_adma64_desc_t;
83
84static_assert(sizeof(sdhci_adma64_desc_t) == 8, "unexpected ADMA2 descriptor size");
85
86// 64k - 1 is max per descriptor, we operate on pages so we use 64K - PAGE_SIZE
87#define ADMA2_DESC_MAX_LENGTH   (0x10000 - PAGE_SIZE)
88// for 2M max transfer size for fully discontiguous
89// also see SDMMC_PAGES_COUNT in ddk/protocol/sdmmc.h
90#define DMA_DESC_COUNT          512
91
92
93
94// TODO: Get base block from hardware registers
95#define IMX8M_SDHCI_BASE_CLOCK  200000000
96
97typedef struct imx_sdhci_device {
98    platform_device_protocol_t  pdev;
99    platform_bus_protocol_t     pbus;
100    zx_device_t*                zxdev;
101    mmio_buffer_t               mmios;
102    zx_handle_t                 irq_handle;
103    gpio_protocol_t             gpio;
104    volatile imx_sdhci_regs_t*  regs;
105    zx_handle_t                 bti_handle;
106
107    // DMA descriptors
108    io_buffer_t                 iobuf;
109    sdhci_adma64_desc_t*        descs;
110
111    mtx_t                       mtx;                // Held when a command or action is in progress.
112    sdmmc_req_t*                cmd_req;            // Current command request
113    sdmmc_req_t*                data_req;           // Current data line request
114    uint16_t                    data_blockid;       // Current block id to transfer (PIO)
115    bool                        data_done;          // Set to true if the data stage completed
116                                                    // before the cmd stage
117    sync_completion_t                req_completion;     // used to signal request complete
118    sdmmc_host_info_t           info;               // Controller info
119    uint32_t                    base_clock;         // Base clock rate
120    bool                        ddr_mode;           // DDR Mode enable flag
121    bool                        dma_mode;           // Flag used to switch between dma and pio mode
122} imx_sdhci_device_t;
123
124static const uint32_t error_interrupts = (
125    IMX_SDHC_INT_STAT_DMAE  |
126    IMX_SDHC_INT_STAT_TNE   |
127    IMX_SDHC_INT_STAT_AC12E |
128    IMX_SDHC_INT_STAT_DEBE  |
129    IMX_SDHC_INT_STAT_DCE   |
130    IMX_SDHC_INT_STAT_DTOE  |
131    IMX_SDHC_INT_STAT_CIE   |
132    IMX_SDHC_INT_STAT_CEBE  |
133    IMX_SDHC_INT_STAT_CCE   |
134    IMX_SDHC_INT_STAT_CTOE
135);
136
137static const uint32_t normal_interrupts = (
138    IMX_SDHC_INT_STAT_BRR   |
139    IMX_SDHC_INT_STAT_BWR   |
140    IMX_SDHC_INT_STAT_TC    |
141    IMX_SDHC_INT_STAT_CC
142);
143
144static const uint32_t dma_normal_interrupts = (
145    IMX_SDHC_INT_STAT_TC    |
146    IMX_SDHC_INT_STAT_CC
147);
148
149static void esdhc_dump(imx_sdhci_device_t* dev)
150{
151    SDHCI_ERROR("#######################\n");
152    SDHCI_ERROR("Dumping Registers\n\n");
153    SDHCI_ERROR("    ds_addr = 0x%x\n", readl(&dev->regs->ds_addr));
154    SDHCI_ERROR("    blk_att = 0x%x\n", readl(&dev->regs->blk_att));
155    SDHCI_ERROR("    cmd_arg = 0x%x\n", readl(&dev->regs->cmd_arg));
156    SDHCI_ERROR("    cmd_xfr_typ = 0x%x\n", readl(&dev->regs->cmd_xfr_typ));
157    SDHCI_ERROR("    cmd_rsp0 = 0x%x\n", readl(&dev->regs->cmd_rsp0));
158    SDHCI_ERROR("    cmd_rsp1 = 0x%x\n", readl(&dev->regs->cmd_rsp1));
159    SDHCI_ERROR("    cmd_rsp2 = 0x%x\n", readl(&dev->regs->cmd_rsp2));
160    SDHCI_ERROR("    cmd_rsp3 = 0x%x\n", readl(&dev->regs->cmd_rsp3));
161    SDHCI_ERROR("    data_buff_acc_port = 0x%x\n", readl(&dev->regs->data_buff_acc_port));
162    SDHCI_ERROR("    pres_state = 0x%x\n", readl(&dev->regs->pres_state));
163    SDHCI_ERROR("    prot_ctrl = 0x%x\n", readl(&dev->regs->prot_ctrl));
164    SDHCI_ERROR("    sys_ctrl = 0x%x\n", readl(&dev->regs->sys_ctrl));
165    SDHCI_ERROR("    int_status = 0x%x\n", readl(&dev->regs->int_status));
166    SDHCI_ERROR("    int_status_en = 0x%x\n", readl(&dev->regs->int_status_en));
167    SDHCI_ERROR("    int_signal_en = 0x%x\n", readl(&dev->regs->int_signal_en));
168    SDHCI_ERROR("    autocmd12_err_status = 0x%x\n", readl(&dev->regs->autocmd12_err_status));
169    SDHCI_ERROR("    host_ctrl_cap = 0x%x\n", readl(&dev->regs->host_ctrl_cap));
170    SDHCI_ERROR("    wtmk_lvl = 0x%x\n", readl(&dev->regs->wtmk_lvl));
171    SDHCI_ERROR("    mix_ctrl = 0x%x\n", readl(&dev->regs->mix_ctrl));
172    SDHCI_ERROR("    force_event = 0x%x\n", readl(&dev->regs->force_event));
173    SDHCI_ERROR("    adma_err_status = 0x%x\n", readl(&dev->regs->adma_err_status));
174    SDHCI_ERROR("    adma_sys_addr = 0x%x\n", readl(&dev->regs->adma_sys_addr));
175    SDHCI_ERROR("    dll_ctrl = 0x%x\n", readl(&dev->regs->dll_ctrl));
176    SDHCI_ERROR("    dll_status = 0x%x\n", readl(&dev->regs->dll_status));
177    SDHCI_ERROR("    clk_tune_ctrl_status = 0x%x\n", readl(&dev->regs->clk_tune_ctrl_status));
178    SDHCI_ERROR("    strobe_dll_ctrl = 0x%x\n", readl(&dev->regs->strobe_dll_ctrl));
179    SDHCI_ERROR("    strobe_dll_status = 0x%x\n", readl(&dev->regs->strobe_dll_status));
180    SDHCI_ERROR("    vend_spec = 0x%x\n", readl(&dev->regs->vend_spec));
181    SDHCI_ERROR("    mmc_boot = 0x%x\n", readl(&dev->regs->mmc_boot));
182    SDHCI_ERROR("    vend_spec2 = 0x%x\n", readl(&dev->regs->vend_spec2));
183    SDHCI_ERROR("    tuning_ctrl = 0x%x\n", readl(&dev->regs->tuning_ctrl));
184    SDHCI_ERROR("\n\n");
185}
186
187static void imx_decode_irq_error(uint32_t err) {
188
189    if(err & IMX_SDHC_INT_EN_DMAEN) {
190        SDHCI_ERROR("    Error:DMAEN...\n");
191    }
192
193    if(err & IMX_SDHC_INT_EN_TNE) {
194        SDHCI_ERROR("    Error:TNE...\n");
195    }
196
197    if(err & IMX_SDHC_INT_EN_AC12E) {
198        SDHCI_ERROR("    Error:AC12E...\n");
199    }
200
201    if(err & IMX_SDHC_INT_EN_DEBE) {
202        SDHCI_ERROR("    Error:DEBE...\n");
203    }
204
205    if(err & IMX_SDHC_INT_EN_DCE) {
206        SDHCI_ERROR("    Error:DCE...\n");
207    }
208
209    if(err & IMX_SDHC_INT_EN_DTOE) {
210        SDHCI_ERROR("    Error:DTOE...\n");
211    }
212
213    if(err & IMX_SDHC_INT_EN_CIE) {
214        SDHCI_ERROR("    Error:CIE...\n");
215    }
216
217    if(err & IMX_SDHC_INT_EN_CEBE) {
218        SDHCI_ERROR("    Error:CEBE...\n");
219    }
220
221    if(err & IMX_SDHC_INT_EN_CCE) {
222        SDHCI_ERROR("    Error:CCE...\n");
223    }
224
225    if(err & IMX_SDHC_INT_EN_CTOE) {
226        SDHCI_ERROR("    Error:CTOE...\n");
227    }
228
229}
230
231static bool imx_sdmmc_cmd_rsp_busy(uint32_t cmd_flags) {
232    return cmd_flags & SDMMC_RESP_LEN_48B;
233}
234
235static bool imx_sdmmc_has_data(uint32_t cmd_flags) {
236    return cmd_flags & SDMMC_RESP_DATA_PRESENT;
237}
238
239static uint32_t imx_sdhci_prepare_cmd(sdmmc_req_t* req) {
240    uint32_t cmd = SDHCI_CMD_IDX(req->cmd_idx);
241    uint32_t cmd_flags = req->cmd_flags;
242    uint32_t sdmmc_sdhci_map[][2] = { {SDMMC_RESP_CRC_CHECK, SDHCI_CMD_RESP_CRC_CHECK},
243                                      {SDMMC_RESP_CMD_IDX_CHECK, SDHCI_CMD_RESP_CMD_IDX_CHECK},
244                                      {SDMMC_RESP_DATA_PRESENT, SDHCI_CMD_RESP_DATA_PRESENT},
245                                      {SDMMC_CMD_DMA_EN, SDHCI_CMD_DMA_EN},
246                                      {SDMMC_CMD_BLKCNT_EN, SDHCI_CMD_BLKCNT_EN},
247                                      {SDMMC_CMD_AUTO12, SDHCI_CMD_AUTO12},
248                                      {SDMMC_CMD_AUTO23, SDHCI_CMD_AUTO23},
249                                      {SDMMC_CMD_READ, SDHCI_CMD_READ},
250                                      {SDMMC_CMD_MULTI_BLK, SDHCI_CMD_MULTI_BLK}
251                                    };
252    if (cmd_flags & SDMMC_RESP_LEN_EMPTY) {
253        cmd |= SDHCI_CMD_RESP_LEN_EMPTY;
254    } else if (cmd_flags & SDMMC_RESP_LEN_136) {
255        cmd |= SDHCI_CMD_RESP_LEN_136;
256    } else if (cmd_flags & SDMMC_RESP_LEN_48) {
257        cmd |= SDHCI_CMD_RESP_LEN_48;
258    } else if (cmd_flags & SDMMC_RESP_LEN_48B) {
259        cmd |= SDHCI_CMD_RESP_LEN_48B;
260    }
261    if (cmd_flags & SDMMC_CMD_TYPE_NORMAL) {
262        cmd |= SDHCI_CMD_TYPE_NORMAL;
263    } else if (cmd_flags & SDMMC_CMD_TYPE_SUSPEND) {
264        cmd |= SDHCI_CMD_TYPE_SUSPEND;
265    } else if (cmd_flags & SDMMC_CMD_TYPE_RESUME) {
266        cmd |= SDHCI_CMD_TYPE_RESUME;
267    } else if (cmd_flags & SDMMC_CMD_TYPE_ABORT) {
268        cmd |= SDHCI_CMD_TYPE_ABORT;
269    }
270    for (unsigned i = 0; i < sizeof(sdmmc_sdhci_map)/sizeof(*sdmmc_sdhci_map); i++) {
271        if (cmd_flags & sdmmc_sdhci_map[i][0]) {
272            cmd |= sdmmc_sdhci_map[i][1];
273        }
274    }
275    return cmd;
276}
277
278static zx_status_t imx_sdhci_wait_for_reset(imx_sdhci_device_t* dev,
279                                            const uint32_t mask, zx_time_t timeout) {
280    zx_time_t deadline = zx_clock_get_monotonic() + timeout;
281    while (true) {
282        if (!(readl(&dev->regs->sys_ctrl) & mask)) {
283            break;
284        }
285        if (zx_clock_get_monotonic() > deadline) {
286            SDHCI_ERROR("time out while waiting for reset\n");
287            return ZX_ERR_TIMED_OUT;
288        }
289    }
290    return ZX_OK;
291}
292
293static void imx_sdhci_complete_request_locked(imx_sdhci_device_t* dev, sdmmc_req_t* req,
294                                                zx_status_t status) {
295    SDHCI_TRACE("complete cmd 0x%08x status %d\n", req->cmd_idx, status);
296
297    // Disable interrupts when no pending transfer
298    writel(0, &dev->regs->int_signal_en);
299
300    dev->cmd_req = NULL;
301    dev->data_req = NULL;
302    dev->data_blockid = 0;
303    dev->data_done = false;
304
305    req->status = status;
306    sync_completion_signal(&dev->req_completion);
307}
308
309static void imx_sdhci_cmd_stage_complete_locked(imx_sdhci_device_t* dev) {
310    SDHCI_TRACE("Got CC interrupt\n");
311
312    if (!dev->cmd_req) {
313        SDHCI_TRACE("Spurious CC interupt\n");
314        return;
315    }
316
317    sdmmc_req_t* req = dev->cmd_req;
318    volatile struct imx_sdhci_regs* regs = dev->regs;
319    uint32_t cmd = imx_sdhci_prepare_cmd(req);
320
321    // Read the response data
322    if (cmd & SDHCI_CMD_RESP_LEN_136) {
323        req->response[0] = (regs->cmd_rsp0 << 8);
324        req->response[1] = (regs->cmd_rsp1 << 8) | ((regs->cmd_rsp0 >> 24) & 0xFF);
325        req->response[2] = (regs->cmd_rsp2 << 8) | ((regs->cmd_rsp1 >> 24) & 0xFF);
326        req->response[3] = (regs->cmd_rsp3 << 8) | ((regs->cmd_rsp2 >> 24) & 0xFF);
327    } else if (cmd & (SDHCI_CMD_RESP_LEN_48 | SDHCI_CMD_RESP_LEN_48B)) {
328        req->response[0] = regs->cmd_rsp0;
329        req->response[1] = regs->cmd_rsp1;
330    }
331
332    // We're done if the command has no data stage or if the data stage completed early
333    if (!dev->data_req || dev->data_done) {
334        imx_sdhci_complete_request_locked(dev, dev->cmd_req, ZX_OK);
335    } else {
336        dev->cmd_req = NULL;
337    }
338}
339
340static void imx_sdhci_data_stage_read_ready_locked(imx_sdhci_device_t* dev) {
341    SDHCI_TRACE("Got BRR Interrupts\n");
342
343    if (!dev->data_req || !imx_sdmmc_has_data(dev->data_req->cmd_flags)) {
344        SDHCI_ERROR("Spurious BRR Interrupt. %p\n", dev->data_req);
345        return;
346    }
347
348    if (dev->data_req->cmd_idx == MMC_SEND_TUNING_BLOCK) {
349        // tuning commnad is done here
350        imx_sdhci_complete_request_locked(dev, dev->data_req, ZX_OK);
351        return;
352    }
353
354    sdmmc_req_t* req = dev->data_req;
355
356    // Sequentially read each block
357    for (size_t byteid = 0; byteid < req->blocksize; byteid += 4) {
358        const size_t offset = dev->data_blockid * req->blocksize + byteid;
359        uint32_t* wrd = req->virt + offset;
360        *wrd = readl(&dev->regs->data_buff_acc_port); //TODO: Can't read this if DMA is enabled!
361    }
362    dev->data_blockid += 1;
363}
364
365static void imx_sdhci_data_stage_write_ready_locked(imx_sdhci_device_t* dev) {
366    SDHCI_TRACE("Got BWR Interrupt\n");
367
368    if (!dev->data_req || !imx_sdmmc_has_data(dev->data_req->cmd_flags)) {
369        SDHCI_TRACE("Spurious BWR Interrupt\n");
370        return;
371    }
372
373    sdmmc_req_t* req = dev->data_req;
374
375    // Sequentially write each block
376    for (size_t byteid = 0; byteid < req->blocksize; byteid += 4) {
377        const size_t offset = dev->data_blockid * req->blocksize + byteid;
378        uint32_t* wrd = req->virt + offset;
379        writel(*wrd, &dev->regs->data_buff_acc_port); //TODO: Can't write if DMA is enabled
380    }
381    dev->data_blockid += 1;
382}
383
384static void imx_sdhci_transfer_complete_locked(imx_sdhci_device_t* dev) {
385    SDHCI_TRACE("Got TC Interrupt\n");
386    if (!dev->data_req) {
387        SDHCI_TRACE("Spurious TC Interrupt\n");
388        return;
389    }
390
391    if (dev->cmd_req) {
392        dev->data_done = true;
393    } else {
394        imx_sdhci_complete_request_locked(dev, dev->data_req, ZX_OK);
395    }
396}
397
398static void imx_sdhci_error_recovery_locked(imx_sdhci_device_t* dev) {
399    // Reset internal state machines
400    set_bitsl(IMX_SDHC_SYS_CTRL_RSTC, &dev->regs->sys_ctrl);
401    imx_sdhci_wait_for_reset(dev, IMX_SDHC_SYS_CTRL_RSTC, ZX_SEC(1));
402    set_bitsl( IMX_SDHC_SYS_CTRL_RSTD, &dev->regs->sys_ctrl);
403    imx_sdhci_wait_for_reset(dev, IMX_SDHC_SYS_CTRL_RSTD, ZX_SEC(1));
404
405    // Complete any pending txn with error status
406    if (dev->cmd_req != NULL) {
407        imx_sdhci_complete_request_locked(dev, dev->cmd_req, ZX_ERR_IO);
408    } else if (dev->data_req != NULL) {
409        imx_sdhci_complete_request_locked(dev, dev->data_req, ZX_ERR_IO);
410    }
411}
412
413static uint32_t get_clock_divider(imx_sdhci_device_t* dev,
414                                    const uint32_t base_clock, const uint32_t target_rate) {
415    uint32_t pre_div = 1;
416    uint32_t div = 1;
417
418    if (target_rate >= base_clock) {
419        // A clock divider of 0 means "don't divide the clock"
420        // If the base clock is already slow enough to use as the SD clock then
421        // we don't need to divide it any further.
422        return 0;
423    }
424
425    if (dev->ddr_mode) {
426        pre_div = 2;
427    }
428
429    SDHCI_TRACE("base %d, pre_div %d, div = %d, target_rate %d\n",
430        base_clock, pre_div, div, target_rate);
431    while (base_clock / pre_div / 16 > target_rate && pre_div < 256) {
432        SDHCI_TRACE("base %d, pre_div %d, div = %d, target_rate %d\n",
433            base_clock, pre_div, div, target_rate);
434        pre_div *= 2;
435    }
436
437    while (base_clock / pre_div / div > target_rate && div < 16) {
438        SDHCI_TRACE("base %d, pre_div %d, div = %d, target_rate %d\n",
439            base_clock, pre_div, div, target_rate);
440        div++;
441    }
442
443    SDHCI_TRACE("base %d, pre_div %d, div = %d, target_rate %d\n",
444        base_clock, pre_div, div, target_rate);
445
446    if(dev->ddr_mode) {
447        pre_div >>= 2;
448    } else {
449        pre_div >>= 1;
450    }
451    div -= 1;
452
453    return (((pre_div & 0xFF) << 16)| (div & 0xF));
454}
455
456#ifndef ENABLE_POLLING
457static int imx_sdhci_irq_thread(void *args) {
458    zx_status_t wait_res;
459    imx_sdhci_device_t* dev = (imx_sdhci_device_t*)args;
460    volatile struct imx_sdhci_regs* regs = dev->regs;
461    zx_handle_t irq_handle = dev->irq_handle;
462    while(true) {
463        regs->int_signal_en = normal_interrupts | error_interrupts;
464        wait_res = zx_interrupt_wait(irq_handle, NULL);
465        if (wait_res != ZX_OK) {
466            SDHCI_ERROR("sdhci: interrupt wait failed with retcode = %d\n", wait_res);
467            break;
468        }
469
470        const uint32_t irq = regs->int_status;
471        SDHCI_TRACE("got irq 0x%08x[stat 0x%08x en 0x%08x sig 0x%08x\n",irq, regs->int_status,
472                                                    regs->int_status_en, regs->int_signal_en);
473
474
475        // disable interrupts generation since we only process one at a time
476        // int_status_en is still enabled, so we won't lose any interrupt info
477        regs->int_signal_en = 0; // disable for now
478
479        // Acknowledge the IRQs that we stashed.
480        regs->int_status = irq;
481
482        mtx_lock(&dev->mtx);
483        if (irq & error_interrupts) {
484            SDHCI_ERROR("IRQ ERROR: 0x%x\n", irq);
485            imx_decode_irq_error(irq);
486            esdhc_dump(dev);
487            if (irq & IMX_SDHC_INT_STAT_DMAE) {
488                SDHCI_TRACE("ADMA error 0x%x ADMAADDR0 0x%x\n",
489                regs->adma_err_status, regs->adma_sys_addr);
490            }
491            imx_sdhci_error_recovery_locked(dev);
492        }
493        if (irq & IMX_SDHC_INT_STAT_CC) {
494            imx_sdhci_cmd_stage_complete_locked(dev);
495        }
496        if (irq & IMX_SDHC_INT_STAT_BRR) {
497            imx_sdhci_data_stage_read_ready_locked(dev);
498        }
499        if (irq & IMX_SDHC_INT_STAT_BWR) {
500            imx_sdhci_data_stage_write_ready_locked(dev);
501        }
502        if (irq & IMX_SDHC_INT_STAT_TC) {
503            imx_sdhci_transfer_complete_locked(dev);
504        }
505        mtx_unlock(&dev->mtx);
506    }
507    return ZX_OK;
508}
509#endif
510
511static zx_status_t imx_sdhci_build_dma_desc(imx_sdhci_device_t* dev, sdmmc_req_t* req) {
512    SDHCI_FUNC_ENTRY_LOG;
513    uint64_t req_len = req->blockcount * req->blocksize;
514    bool is_read = req->cmd_flags & SDMMC_CMD_READ;
515
516    uint64_t pagecount = ((req->buf_offset & PAGE_MASK) + req_len + PAGE_MASK) /
517                           PAGE_SIZE;
518    if (pagecount > SDMMC_PAGES_COUNT) {
519        SDHCI_ERROR("too many pages %lu vs %lu\n", pagecount, SDMMC_PAGES_COUNT);
520        return ZX_ERR_INVALID_ARGS;
521    }
522
523    // pin the vmo
524    zx_paddr_t phys[SDMMC_PAGES_COUNT];
525    zx_handle_t pmt;
526    // offset_vmo is converted to bytes by the sdmmc layer
527    uint32_t options = is_read ? ZX_BTI_PERM_WRITE : ZX_BTI_PERM_READ;
528    zx_status_t st = zx_bti_pin(dev->bti_handle, options, req->dma_vmo,
529                                req->buf_offset & ~PAGE_MASK,
530                                pagecount * PAGE_SIZE, phys, pagecount, &pmt);
531    if (st != ZX_OK) {
532        SDHCI_ERROR("error %d bti_pin\n", st);
533        return st;
534    }
535    // cache this for zx_pmt_unpin() later
536    req->pmt = pmt;
537
538    if (is_read) {
539        st = zx_vmo_op_range(req->dma_vmo, ZX_VMO_OP_CACHE_CLEAN_INVALIDATE,
540                             req->buf_offset, req_len, NULL, 0);
541    } else {
542        st = zx_vmo_op_range(req->dma_vmo, ZX_VMO_OP_CACHE_CLEAN,
543                             req->buf_offset, req_len, NULL, 0);
544    }
545    if (st != ZX_OK) {
546        zxlogf(ERROR, "imx-emmc: cache clean failed with error %d\n", st);
547    }
548
549    phys_iter_buffer_t buf = {
550        .phys = phys,
551        .phys_count = pagecount,
552        .length = req_len,
553        .vmo_offset = req->buf_offset,
554    };
555    phys_iter_t iter;
556    phys_iter_init(&iter, &buf, ADMA2_DESC_MAX_LENGTH);
557
558    int count = 0;
559    size_t length;
560    zx_paddr_t paddr;
561    sdhci_adma64_desc_t* desc = dev->descs;
562    for (;;) {
563        length = phys_iter_next(&iter, &paddr);
564        if (length == 0) {
565            if (desc != dev->descs) {
566                desc -= 1;
567                desc->end = 1; // set end bit on the last descriptor
568                break;
569            } else {
570                SDHCI_TRACE("empty descriptor list!\n");
571                return ZX_ERR_NOT_SUPPORTED;
572            }
573        } else if (length > ADMA2_DESC_MAX_LENGTH) {
574            SDHCI_TRACE("chunk size > %zu is unsupported\n", length);
575            return ZX_ERR_NOT_SUPPORTED;
576        } else if ((++count) > DMA_DESC_COUNT) {
577            SDHCI_TRACE("request with more than %zd chunks is unsupported\n",
578                    length);
579            return ZX_ERR_NOT_SUPPORTED;
580        }
581        desc->length = length & 0xffff; // 0 = 0x10000 bytes
582        desc->address = paddr;
583        desc->attr = 0;
584        desc->valid = 1;
585        desc->act2 = 1; // transfer data
586        desc += 1;
587    }
588
589    if (driver_get_log_flags() & DDK_LOG_SPEW) {
590        desc = dev->descs;
591        do {
592            SDHCI_TRACE("desc: addr=0x%" PRIx32 " length=0x%04x attr=0x%04x\n",
593                         desc->address, desc->length, desc->attr);
594        } while (!(desc++)->end);
595    }
596    return ZX_OK;
597}
598
599static zx_status_t imx_sdhci_start_req_locked(imx_sdhci_device_t* dev, sdmmc_req_t* req) {
600    volatile struct imx_sdhci_regs* regs = dev->regs;
601    const uint32_t arg = req->arg;
602    const uint16_t blkcnt = req->blockcount;
603    const uint16_t blksiz = req->blocksize;
604    uint32_t cmd = imx_sdhci_prepare_cmd(req);
605    bool has_data = imx_sdmmc_has_data(req->cmd_flags);
606
607    if (req->use_dma && !dev->dma_mode) {
608        SDHCI_INFO("we don't support dma yet\t");
609        return ZX_ERR_NOT_SUPPORTED;
610    }
611
612    SDHCI_TRACE("start_req cmd=0x%08x (data %d dma %d bsy %d) blkcnt %u blksiz %u\n",
613                  cmd, has_data, req->use_dma, imx_sdmmc_cmd_rsp_busy(cmd), blkcnt, blksiz);
614
615    // Every command requires that the Commnad Inhibit bit is unset
616    uint32_t inhibit_mask = IMX_SDHC_PRES_STATE_CIHB;
617
618    // Busy type commands must also wait for the DATA Inhibit to be 0 unless it's an abort
619    // command which can be issued with the data lines active
620    if (((cmd & SDMMC_RESP_LEN_48B) == SDMMC_RESP_LEN_48B) &&
621        ((cmd & SDMMC_CMD_TYPE_ABORT) == 0)) {
622        inhibit_mask |= IMX_SDHC_PRES_STATE_CDIHB;
623    }
624
625    // Wait for the inhibit masks from above to become 0 before issuing the command
626    while(readl(&regs->pres_state) & inhibit_mask) {
627        zx_nanosleep(zx_deadline_after(ZX_MSEC(1)));
628    }
629
630    zx_status_t st = ZX_OK;
631    if (has_data) {
632        if (req->use_dma) {
633            st = imx_sdhci_build_dma_desc(dev, req);
634            if (st != ZX_OK) {
635                SDHCI_ERROR("Could not build DMA Descriptor\n");
636                return st;
637            }
638            zx_paddr_t desc_phys = io_buffer_phys(&dev->iobuf);
639            io_buffer_cache_flush(&dev->iobuf, 0,
640                          DMA_DESC_COUNT * sizeof(sdhci_adma64_desc_t));
641            writel((uint32_t)desc_phys, &regs->adma_sys_addr);
642            clr_bitsl(IMX_SDHC_PROT_CTRL_DMASEL_MASK, &dev->regs->prot_ctrl);
643            set_bitsl(IMX_SDHC_PROT_CTRL_DMASEL_ADMA2, &dev->regs->prot_ctrl);
644            writel(0, &regs->adma_err_status);
645            set_bitsl(IMX_SDHC_MIX_CTRL_DMAEN, &regs->mix_ctrl);
646        } else {
647            clr_bitsl(IMX_SDHC_PROT_CTRL_DMASEL_MASK, &dev->regs->prot_ctrl);
648        }
649        if (cmd & SDHCI_CMD_MULTI_BLK) {
650            cmd |= SDHCI_CMD_AUTO12;
651        }
652    }
653
654    writel(blksiz | (blkcnt << 16), &regs->blk_att);
655    writel((blksiz/4) | (blksiz/4) << 16, &dev->regs->wtmk_lvl);
656
657    writel(arg, &regs->cmd_arg);
658
659    // Clear any pending interrupts before starting the transaction
660    writel(0xFFFFFFFF, &regs->int_status);
661
662    if (req->use_dma) {
663        // Unmask and enable interrupts
664        writel(error_interrupts | dma_normal_interrupts, &regs->int_signal_en);
665        writel(error_interrupts | dma_normal_interrupts, &regs->int_status_en);
666    } else {
667        // Unmask and enable interrupts
668        writel(error_interrupts | normal_interrupts, &regs->int_signal_en);
669        writel(error_interrupts | normal_interrupts, &regs->int_status_en);
670    }
671
672    dev->cmd_req = req;
673
674    if (has_data || imx_sdmmc_cmd_rsp_busy(cmd)) {
675        dev->data_req = req;
676    } else {
677        dev->data_req = NULL;
678    }
679    dev->data_blockid = 0;
680    dev->data_done = false;
681
682    // Start command
683    clr_bitsl(IMX_SDHC_MIX_CTRL_CMD_MASK, &regs->mix_ctrl);
684    set_bitsl(cmd & IMX_SDHC_MIX_CTRL_CMD_MASK, &regs->mix_ctrl);
685    writel(cmd & IMX_SDHC_CMD_XFER_TYPE_CMD_MASK, &regs->cmd_xfr_typ);
686
687#ifdef ENABLE_POLLING
688    bool pio_done = false;
689
690    while (!pio_done) {
691        // wait for interrupt to occur
692        while((readl(&regs->int_status) & readl(&regs->int_status_en)) == 0) {
693            usleep(1);
694        }
695
696        // we got an interrupt. process it
697        const uint32_t irq = readl(&regs->int_status);
698        SDHCI_TRACE("(PIO MODE) got irq 0x%08x 0x%08x en 0x%08x sig 0x%08x, data_req %p\n",
699            readl(&regs->int_status), irq, readl(&regs->int_status_en), readl(&regs->int_signal_en),
700            dev->data_req);
701
702        // Acknowledge the IRQs that we stashed.
703        writel(irq, &regs->int_status);
704
705        if (irq & error_interrupts) {
706            SDHCI_ERROR("IRQ ERROR: 0x%x\n", irq);
707            imx_decode_irq_error(irq);
708            esdhc_dump(dev);
709            if (irq & IMX_SDHC_INT_STAT_DMAE) {
710                SDHCI_TRACE("ADMA error 0x%x ADMAADDR0 0x%x\n",
711                readl(&regs->adma_err_status), readl(&regs->adma_sys_addr));
712            }
713            imx_sdhci_error_recovery_locked(dev);
714        }
715
716        if (irq & IMX_SDHC_INT_STAT_CC) {
717            imx_sdhci_cmd_stage_complete_locked(dev);
718            if (!has_data) {
719                pio_done = true;
720            }
721        }
722        if (irq & IMX_SDHC_INT_STAT_BRR) {
723            if (dev->data_req->cmd_idx == MMC_SEND_TUNING_BLOCK) {
724                pio_done = true;
725            }
726            imx_sdhci_data_stage_read_ready_locked(dev);
727        }
728        if (irq & IMX_SDHC_INT_STAT_BWR) {
729            imx_sdhci_data_stage_write_ready_locked(dev);
730        }
731        if (irq & IMX_SDHC_INT_STAT_TC) {
732            imx_sdhci_transfer_complete_locked(dev);
733            pio_done = true;
734        }
735    }
736#endif
737    return ZX_OK;
738}
739
740static zx_status_t imx_sdhci_finish_req(imx_sdhci_device_t* dev, sdmmc_req_t* req) {
741    zx_status_t status = ZX_OK;
742
743    if (req->use_dma && req->pmt != ZX_HANDLE_INVALID) {
744        /*
745         * Clean the cache one more time after the DMA operation because there
746         * might be a possibility of cpu prefetching while the DMA operation is
747         * going on.
748         */
749        uint64_t req_len = req->blockcount * req->blocksize;
750        if ((req->cmd_flags & SDMMC_CMD_READ) && req->use_dma) {
751            status = zx_vmo_op_range(req->dma_vmo, ZX_VMO_OP_CACHE_CLEAN_INVALIDATE,
752                                             req->buf_offset, req_len, NULL, 0);
753            if (status != ZX_OK) {
754                zxlogf(ERROR, "aml-sd-emmc: cache clean failed with error  %d\n", status);
755            }
756        }
757
758        status = zx_pmt_unpin(req->pmt);
759        if (status != ZX_OK) {
760            SDHCI_ERROR("error %d in pmt_unpin\n", status);
761        }
762        req->pmt = ZX_HANDLE_INVALID;
763    }
764    return status;
765}
766
767/* SDMMC PROTOCOL Implementations: host_info */
768static zx_status_t imx_sdhci_host_info(void* ctx, sdmmc_host_info_t* info) {
769    SDHCI_FUNC_ENTRY_LOG;
770    imx_sdhci_device_t* dev = ctx;
771    memcpy(info, &dev->info, sizeof(dev->info));
772    return ZX_OK;
773}
774
775/* SDMMC PROTOCOL Implementations: set_signal_voltage */
776static zx_status_t imx_sdhci_set_signal_voltage(void* ctx, sdmmc_voltage_t voltage) {
777    SDHCI_FUNC_ENTRY_LOG;
778    return ZX_OK; // TODO: Figure out how to change voltage using the regulator
779}
780
781/* SDMMC PROTOCOL Implementations: set_bus_width */
782static zx_status_t imx_sdhci_set_bus_width(void* ctx, uint32_t bus_width) {
783    SDHCI_FUNC_ENTRY_LOG;
784    if (bus_width >= SDMMC_BUS_WIDTH_MAX) {
785        return ZX_ERR_INVALID_ARGS;
786    }
787    zx_status_t status = ZX_OK;
788    imx_sdhci_device_t* dev = ctx;
789
790    mtx_lock(&dev->mtx);
791
792    if ((bus_width == SDMMC_BUS_WIDTH_8) && !(dev->info.caps & SDMMC_HOST_CAP_BUS_WIDTH_8)) {
793        SDHCI_ERROR("8-bit bus width not supported\n");
794        status = ZX_ERR_NOT_SUPPORTED;
795        goto unlock;
796    }
797
798    switch (bus_width) {
799        case SDMMC_BUS_WIDTH_1:
800            clr_bitsl(IMX_SDHC_PROT_CTRL_DTW_MASK, &dev->regs->prot_ctrl);
801            set_bitsl(IMX_SDHC_PROT_CTRL_DTW_1, &dev->regs->prot_ctrl);
802            break;
803        case SDMMC_BUS_WIDTH_4:
804            clr_bitsl(IMX_SDHC_PROT_CTRL_DTW_MASK, &dev->regs->prot_ctrl);
805            set_bitsl(IMX_SDHC_PROT_CTRL_DTW_4, &dev->regs->prot_ctrl);
806            break;
807        case SDMMC_BUS_WIDTH_8:
808            clr_bitsl(IMX_SDHC_PROT_CTRL_DTW_MASK, &dev->regs->prot_ctrl);
809            set_bitsl(IMX_SDHC_PROT_CTRL_DTW_8, &dev->regs->prot_ctrl);
810            break;
811        default:
812            break;
813    }
814
815    SDHCI_ERROR("set bus width to %d\n", bus_width);
816
817unlock:
818    mtx_unlock(&dev->mtx);
819    return status;
820}
821
822/* SDMMC PROTOCOL Implementations: set_bus_freq */
823static zx_status_t imx_sdhci_set_bus_freq(void* ctx, uint32_t bus_freq) {
824    SDHCI_FUNC_ENTRY_LOG;
825    zx_status_t status = ZX_OK;
826    imx_sdhci_device_t* dev = ctx;
827
828    mtx_lock(&dev->mtx);
829
830    const uint32_t divider = get_clock_divider(dev, dev->base_clock, bus_freq);
831    const uint8_t pre_div = (divider >> 16) & 0xFF;
832    const uint8_t div = (divider & 0xF);
833
834    SDHCI_TRACE("divider %d, pre_div %d, div = %d, ddr_mode %s\n",
835        divider, pre_div, div, dev->ddr_mode? "ON" : "OFF");
836
837    volatile struct imx_sdhci_regs* regs = dev->regs;
838
839    uint32_t iterations = 0;
840    while (readl(&regs->pres_state) & (IMX_SDHC_PRES_STATE_CIHB | IMX_SDHC_PRES_STATE_CDIHB)) {
841        if (++iterations > 1000) {
842            status = ZX_ERR_TIMED_OUT;
843            goto unlock;
844        }
845        zx_nanosleep(zx_deadline_after(ZX_MSEC(1)));
846    }
847
848    if(dev->ddr_mode) {
849        set_bitsl(IMX_SDHC_MIX_CTRL_DDR_EN, &regs->mix_ctrl);
850    }
851
852    clr_bitsl(IMX_SDHC_VEND_SPEC_CARD_CLK_SOFT_EN, &regs->vend_spec);
853
854    clr_bitsl(IMX_SDHC_SYS_CTRL_CLOCK_MASK, &regs->sys_ctrl);
855
856    set_bitsl((pre_div << IMX_SDHC_SYS_CTRL_PREDIV_SHIFT) |
857                  (div << IMX_SDHC_SYS_CTRL_DIVIDER_SHIFT),
858              &regs->sys_ctrl);
859
860    // Add delay to make sure clocks are stable
861    zx_nanosleep(zx_deadline_after(ZX_MSEC(2)));
862
863    set_bitsl(IMX_SDHC_VEND_SPEC_IPG_PERCLK_SOFT_EN | IMX_SDHC_VEND_SPEC_CARD_CLK_SOFT_EN,
864              &regs->vend_spec);
865
866    zx_nanosleep(zx_deadline_after(ZX_MSEC(2)));
867
868    SDHCI_INFO("desired freq = %d, actual = %d, (%d, %d. %d)\n",
869        bus_freq, dev->base_clock / (pre_div? dev->ddr_mode? pre_div<<2 : pre_div<<1 : dev->ddr_mode? 2: 1) / (div+1), dev->base_clock,
870        pre_div, div);
871
872unlock:
873    mtx_unlock(&dev->mtx);
874    return status;
875}
876
877static void imx_sdhci_set_strobe_dll(imx_sdhci_device_t* dev) {
878
879    clr_bitsl(IMX_SDHC_VEND_SPEC_FRC_SDCLK_ON, &dev->regs->vend_spec);
880    writel(IMX_SDHC_DLLCTRL_RESET, &dev->regs->dll_ctrl);
881
882    writel((IMX_SDHC_DLLCTRL_ENABLE | IMX_SDHC_DLLCTRL_SLV_DLY_TARGET), &dev->regs->dll_ctrl);
883    usleep(10);
884    if(!(readl(&dev->regs->dll_status) & IMX_SDHC_DLLSTS_REF_LOCK)) {
885        SDHCI_ERROR("HS400 Strobe DLL status REF not locked!!\n");
886    }
887    if(!(readl(&dev->regs->dll_status) & IMX_SDHC_DLLSTS_SLV_LOCK)) {
888        SDHCI_ERROR("HS400 Strobe DLL status SLV not locked!!\n");
889    }
890
891}
892
893/* SDMMC PROTOCOL Implementations: set_timing */
894static zx_status_t imx_sdhci_set_timing(void* ctx, sdmmc_timing_t timing) {
895    SDHCI_FUNC_ENTRY_LOG;
896    if (timing >= SDMMC_TIMING_MAX) {
897        return ZX_ERR_INVALID_ARGS;
898    }
899
900    zx_status_t status = ZX_OK;
901    imx_sdhci_device_t* dev = ctx;
902
903    mtx_lock(&dev->mtx);
904
905    uint32_t regVal = readl(&dev->regs->mix_ctrl);
906    regVal &= ~(IMX_SDHC_MIX_CTRL_HS400 | IMX_SDHC_MIX_CTRL_DDR_EN);
907    dev->ddr_mode = false;
908    switch(timing) {
909        case SDMMC_TIMING_LEGACY:
910            mtx_unlock(&dev->mtx);
911            imx_sdhci_set_bus_freq(dev, 25000000);
912            mtx_lock(&dev->mtx);
913            clr_bitsl(IMX_SDHC_AUTOCMD12_ERRSTS_SMP_CLK_SEL | IMX_SDHC_AUTOCMD12_ERRSTS_EXE_TUNING,
914                      &dev->regs->autocmd12_err_status);
915            break;
916        case SDMMC_TIMING_HS400:
917            regVal |= (IMX_SDHC_MIX_CTRL_HS400 | IMX_SDHC_MIX_CTRL_DDR_EN);
918            writel(regVal, &dev->regs->mix_ctrl);
919            // make sure we are running at 200MHz already
920            mtx_unlock(&dev->mtx);
921            dev->ddr_mode = true;
922            imx_sdhci_set_bus_freq(dev, 200000000);
923            mtx_lock(&dev->mtx);
924            imx_sdhci_set_strobe_dll(dev);
925            break;
926        case SDMMC_TIMING_HSDDR:
927            dev->ddr_mode = true;
928            regVal |= (IMX_SDHC_MIX_CTRL_DDR_EN);
929            //fall through
930        default:
931            mtx_unlock(&dev->mtx);
932            imx_sdhci_set_bus_freq(dev, 52000000);
933            mtx_lock(&dev->mtx);
934            writel(regVal, &dev->regs->mix_ctrl);
935            break;
936    }
937
938    // need to upate pin state
939    mtx_unlock(&dev->mtx);
940    return status;
941}
942
943/* SDMMC PROTOCOL Implementations: hw_reset */
944static void imx_sdhci_hw_reset(void* ctx) {
945    SDHCI_FUNC_ENTRY_LOG;
946    imx_sdhci_device_t* dev = ctx;
947
948    mtx_lock(&dev->mtx);
949
950    gpio_write(&dev->gpio, 0);
951    usleep(10000);
952    gpio_write(&dev->gpio, 1);
953
954    dev->info.caps |= SDMMC_HOST_CAP_AUTO_CMD12;
955
956    // Reset host controller
957    set_bitsl(IMX_SDHC_SYS_CTRL_RSTA, &dev->regs->sys_ctrl);
958    if (imx_sdhci_wait_for_reset(dev, IMX_SDHC_SYS_CTRL_RSTA, ZX_SEC(1)) != ZX_OK) {
959        SDHCI_ERROR("Did not recover from reset 0x%x\n", readl(&dev->regs->sys_ctrl));
960        mtx_unlock(&dev->mtx);
961        return;
962    }
963
964    writel(0, &dev->regs->mmc_boot);
965    writel(0, &dev->regs->mix_ctrl);
966    writel(0, &dev->regs->clk_tune_ctrl_status);
967    writel(0, &dev->regs->dll_ctrl);
968    writel(0, &dev->regs->autocmd12_err_status);
969    writel(IMX_SDHC_VEND_SPEC_INIT, &dev->regs->vend_spec);
970    set_bitsl(IMX_SDHC_VEND_SPEC_HCLK_SOFT_EN | IMX_SDHC_VEND_SPEC_IPG_CLK_SOFT_EN,
971              &dev->regs->vend_spec);
972    clr_bitsl(IMX_SDHC_SYS_CTRL_DTOCV_MASK, &dev->regs->sys_ctrl);
973    set_bitsl(IMX_SDHC_SYS_CTRL_DTOCV(0xe), &dev->regs->sys_ctrl);
974    writel(IMX_SDHC_PROT_CTRL_INIT, &dev->regs->prot_ctrl);
975
976    uint32_t regVal = readl(&dev->regs->tuning_ctrl);
977    regVal &= ~(IMX_SDHC_TUNING_CTRL_START_TAP_MASK);
978    regVal &= ~(IMX_SDHC_TUNING_CTRL_STEP_MASK);
979    regVal &= ~(IMX_SDHC_TUNING_CTRL_STD_TUN_EN);
980    regVal |=   (IMX_SDHC_TUNING_CTRL_START_TAP(20)) |
981                (IMX_SDHC_TUNING_CTRL_STEP(2)) |
982                (IMX_SDHC_TUNING_CTRL_STD_TUN_EN);
983    writel(regVal, &dev->regs->tuning_ctrl);
984
985    set_bitsl(1 << 1, &dev->regs->vend_spec);
986    usleep(100);
987
988    // enable clocks
989    mtx_unlock(&dev->mtx);
990    imx_sdhci_set_bus_freq(dev, SD_FREQ_SETUP_HZ);
991    imx_sdhci_set_bus_width(dev, SDMMC_BUS_WIDTH_1);
992}
993
994/* SDMMC PROTOCOL Implementations: request */
995static zx_status_t imx_sdhci_request(void* ctx, sdmmc_req_t* req) {
996    SDHCI_FUNC_ENTRY_LOG;
997    zx_status_t status = ZX_OK;
998    imx_sdhci_device_t* dev = ctx;
999
1000    mtx_lock(&dev->mtx);
1001    // one command at a time
1002    if ((dev->cmd_req != NULL) || (dev->data_req != NULL)) {
1003        status = ZX_ERR_SHOULD_WAIT;
1004        goto unlock_out;
1005    }
1006
1007
1008    status = imx_sdhci_start_req_locked(dev, req);
1009    if (status != ZX_OK) {
1010        goto unlock_out;
1011    }
1012
1013    mtx_unlock(&dev->mtx);
1014
1015    sync_completion_wait(&dev->req_completion, ZX_TIME_INFINITE);
1016
1017    imx_sdhci_finish_req(dev, req);
1018
1019    sync_completion_reset(&dev->req_completion);
1020
1021    return req->status;
1022
1023unlock_out:
1024    mtx_unlock(&dev->mtx);
1025    imx_sdhci_finish_req(dev, req);
1026    return status;
1027}
1028
1029/* SDMMC PROTOCOL Implementations: perform_tuning */
1030static zx_status_t imx_sdhci_perform_tuning(void* ctx, uint32_t tuning_cmd_idx) {
1031    SDHCI_FUNC_ENTRY_LOG;
1032    imx_sdhci_device_t* dev = ctx;
1033    uint32_t regVal;
1034
1035    mtx_lock(&dev->mtx);
1036
1037    sdmmc_req_t req = {
1038        .cmd_idx = tuning_cmd_idx,
1039        .cmd_flags = MMC_SEND_TUNING_BLOCK_FLAGS,
1040        .arg = 0,
1041        .blockcount = 0,
1042        .blocksize = (readl(&dev->regs->prot_ctrl) & IMX_SDHC_PROT_CTRL_DTW_8) ? 128 : 64,
1043    };
1044
1045    // Setup Standard Tuning
1046    regVal = readl(&dev->regs->autocmd12_err_status);
1047    regVal &= ~(IMX_SDHC_AUTOCMD12_ERRSTS_SMP_CLK_SEL);
1048    regVal |= IMX_SDHC_AUTOCMD12_ERRSTS_EXE_TUNING;
1049    writel(regVal, &dev->regs->autocmd12_err_status);
1050
1051    regVal = readl(&dev->regs->mix_ctrl);
1052    regVal &= ~(IMX_SDHC_MIX_CTRL_FBCLK_SEL | IMX_SDHC_MIX_CTRL_AUTO_TUNE);
1053    regVal |= (IMX_SDHC_MIX_CTRL_FBCLK_SEL | IMX_SDHC_MIX_CTRL_AUTO_TUNE);
1054    writel(regVal, &dev->regs->mix_ctrl);
1055
1056    int count = 0;
1057    do {
1058        mtx_unlock(&dev->mtx);
1059        usleep(1000);
1060        zx_status_t st = imx_sdhci_request(dev, &req);
1061        if (st != ZX_OK) {
1062            SDHCI_ERROR("sdhci: MMC_SEND_TUNING_BLOCK error, retcode = %d\n", req.status);
1063            return st;
1064        }
1065        mtx_lock(&dev->mtx);
1066    } while (((readl(&dev->regs->autocmd12_err_status) & IMX_SDHC_AUTOCMD12_ERRSTS_EXE_TUNING)) &&
1067             count++ < (MAX_TUNING_COUNT));
1068
1069    bool fail = (readl(&dev->regs->autocmd12_err_status) & IMX_SDHC_AUTOCMD12_ERRSTS_EXE_TUNING) ||
1070                !(readl(&dev->regs->autocmd12_err_status) & IMX_SDHC_AUTOCMD12_ERRSTS_SMP_CLK_SEL);
1071
1072    // Give the card some time to finish up
1073    usleep(1000);
1074    mtx_unlock(&dev->mtx);
1075
1076    SDHCI_ERROR("sdhci: tuning %s\n", fail? "failed!":"successful!");
1077
1078    if (fail) {
1079        esdhc_dump(dev);
1080        return ZX_ERR_IO;
1081    }
1082   return ZX_OK;
1083}
1084
1085static sdmmc_protocol_ops_t sdmmc_proto = {
1086    .host_info = imx_sdhci_host_info,
1087    .set_signal_voltage = imx_sdhci_set_signal_voltage,
1088    .set_bus_width = imx_sdhci_set_bus_width,
1089    .set_bus_freq = imx_sdhci_set_bus_freq,
1090    .set_timing = imx_sdhci_set_timing,
1091    .hw_reset = imx_sdhci_hw_reset,
1092    .perform_tuning = imx_sdhci_perform_tuning,
1093    .request = imx_sdhci_request,
1094};
1095
1096static void imx_sdhci_unbind(void* ctx) {
1097    imx_sdhci_device_t* dev = ctx;
1098    device_remove(dev->zxdev);
1099}
1100
1101static void imx_sdhci_release(void* ctx) {
1102    imx_sdhci_device_t* dev = ctx;
1103    mmio_buffer_release(&dev->mmios);
1104    zx_handle_close(dev->bti_handle);
1105    free(dev);
1106}
1107
1108static zx_protocol_device_t imx_sdhci_device_proto = {
1109    .version = DEVICE_OPS_VERSION,
1110    .unbind = imx_sdhci_unbind,
1111    .release = imx_sdhci_release,
1112
1113};
1114
1115static zx_status_t imx_sdhci_bind(void* ctx, zx_device_t* parent) {
1116    zx_status_t status;
1117
1118    imx_sdhci_device_t* dev = calloc(1, sizeof(imx_sdhci_device_t));
1119    if (!dev) {
1120        return ZX_ERR_NO_MEMORY;
1121    }
1122
1123    status = device_get_protocol(parent, ZX_PROTOCOL_PLATFORM_DEV, &dev->pdev);
1124    if (status != ZX_OK) {
1125        SDHCI_ERROR("ZX_PROTOCOL_PLATFORM_DEV not available %d \n", status);
1126        goto fail;
1127    }
1128
1129    status = device_get_protocol(parent, ZX_PROTOCOL_GPIO, &dev->gpio);
1130    if (status != ZX_OK) {
1131        SDHCI_ERROR("ZX_PROTOCOL_GPIO not available %d\n", status);
1132        goto fail;
1133    }
1134
1135    status = pdev_map_mmio_buffer2(&dev->pdev, 0, ZX_CACHE_POLICY_UNCACHED_DEVICE,
1136                                    &dev->mmios);
1137    if (status != ZX_OK) {
1138        SDHCI_ERROR("pdev_map_mmio_buffer failed %d\n", status);
1139        goto fail;
1140    }
1141    dev->regs = dev->mmios.vaddr;
1142
1143    status = pdev_get_bti(&dev->pdev, 0, &dev->bti_handle);
1144    if (status != ZX_OK) {
1145        SDHCI_ERROR("Could not get BTI handle %d\n", status);
1146        goto fail;
1147    }
1148
1149    status = pdev_map_interrupt(&dev->pdev, 0, &dev->irq_handle);
1150    if (status != ZX_OK) {
1151        SDHCI_ERROR("pdev_map_interrupt failed %d\n", status);
1152        goto fail;
1153    }
1154
1155#ifndef ENABLE_POLLING
1156    thrd_t irq_thread;
1157    if (thrd_create_with_name(&irq_thread, imx_sdhci_irq_thread,
1158                                        dev, "imx_sdhci_irq_thread") != thrd_success) {
1159        SDHCI_ERROR("Failed to create irq thread\n");
1160    }
1161    thrd_detach(irq_thread);
1162#endif
1163
1164    dev->base_clock = IMX8M_SDHCI_BASE_CLOCK; // TODO: Better way of obtaining this info
1165
1166    // Toggle the reset button
1167    if (gpio_config_out(&dev->gpio, 0) != ZX_OK) {
1168        SDHCI_ERROR("Could not configure RESET pin as output\n");
1169        goto fail;
1170    }
1171
1172    uint32_t caps0 = readl(&dev->regs->host_ctrl_cap);
1173
1174    //TODO: Turn off 8-bit mode for now since it doesn't work
1175    dev->info.caps |= SDMMC_HOST_CAP_BUS_WIDTH_8;
1176#ifndef DISABLE_DMA
1177    dev->info.caps |= SDMMC_HOST_CAP_ADMA2;
1178#endif
1179    if (caps0 & SDHCI_CORECFG_3P3_VOLT_SUPPORT) {
1180        dev->info.caps |= SDMMC_HOST_CAP_VOLTAGE_330;
1181    }
1182
1183    dev->info.caps |= SDMMC_HOST_CAP_AUTO_CMD12;
1184
1185    // TODO: Disable HS400 for now
1186    dev->info.prefs |= SDMMC_HOST_PREFS_DISABLE_HS400;
1187#ifndef DISABLE_DMA
1188    status = io_buffer_init(&dev->iobuf, dev->bti_handle,
1189                            DMA_DESC_COUNT * sizeof(sdhci_adma64_desc_t),
1190                            IO_BUFFER_RW | IO_BUFFER_CONTIG);
1191    if (status != ZX_OK) {
1192        SDHCI_ERROR("Could not allocate DMA buffer. Falling to PIO Mode\n");
1193        dev->dma_mode = false;
1194        dev->info.max_transfer_size = BLOCK_MAX_TRANSFER_UNBOUNDED;
1195    } else {
1196        SDHCI_ERROR("0x%lx %p\n", io_buffer_phys(&dev->iobuf), io_buffer_virt(&dev->iobuf));
1197        dev->descs = io_buffer_virt(&dev->iobuf);
1198        dev->info.max_transfer_size = DMA_DESC_COUNT * PAGE_SIZE;
1199        clr_bitsl(IMX_SDHC_PROT_CTRL_DMASEL_MASK, &dev->regs->prot_ctrl);
1200        set_bitsl(IMX_SDHC_PROT_CTRL_DMASEL_ADMA2, &dev->regs->prot_ctrl);
1201        dev->dma_mode = true;
1202        SDHCI_ERROR("Enabling DMA Mode\n");
1203    }
1204#else
1205        SDHCI_ERROR("DMA Mode Disabled. Using PIO Mode\n");
1206        dev->dma_mode = false;
1207        dev->info.max_transfer_size = BLOCK_MAX_TRANSFER_UNBOUNDED;
1208#endif
1209    dev->info.max_transfer_size_non_dma = BLOCK_MAX_TRANSFER_UNBOUNDED;
1210
1211    // Disable all interrupts
1212    writel(0,  &dev->regs->int_signal_en);
1213    writel(0xffffffff, & dev->regs->int_status);
1214
1215#ifdef ENABLE_POLLING
1216    SDHCI_INFO("Interrupts Disabled! Polling Mode Active\n");
1217#else
1218    SDHCI_INFO("Interrupts Enabled\n");
1219#endif
1220
1221    device_add_args_t args = {
1222        .version = DEVICE_ADD_ARGS_VERSION,
1223        .name = "imx-sdhci",
1224        .ctx = dev,
1225        .ops = &imx_sdhci_device_proto,
1226        .proto_id = ZX_PROTOCOL_SDMMC,
1227        .proto_ops = &sdmmc_proto,
1228    };
1229
1230    status = device_add(parent, &args, &dev->zxdev);
1231    if (status != ZX_OK) {
1232        SDHCI_ERROR("device_add failed %d\n", status);
1233        goto fail;
1234    }
1235
1236    return ZX_OK;
1237
1238fail:
1239    imx_sdhci_release(dev);
1240    return status;
1241}
1242
1243static zx_driver_ops_t imx_sdhci_driver_ops = {
1244    .version = DRIVER_OPS_VERSION,
1245    .bind = imx_sdhci_bind,
1246};
1247
1248ZIRCON_DRIVER_BEGIN(imx_sdhci, imx_sdhci_driver_ops, "zircon", "0.1", 4)
1249    BI_ABORT_IF(NE, BIND_PROTOCOL, ZX_PROTOCOL_PLATFORM_DEV),
1250    BI_ABORT_IF(NE, BIND_PLATFORM_DEV_VID, PDEV_VID_NXP),
1251    BI_ABORT_IF(NE, BIND_PLATFORM_DEV_DID, PDEV_DID_IMX_SDHCI),
1252    BI_MATCH_IF(EQ, BIND_PLATFORM_DEV_PID, PDEV_PID_IMX8MEVK),
1253ZIRCON_DRIVER_END(imx_sdhci)
1254