1// Copyright 2018 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include <stdint.h>
6#include <string.h>
7#include <unistd.h>
8
9#include <bits/limits.h>
10#include <ddk/binding.h>
11#include <ddk/debug.h>
12#include <ddk/device.h>
13#include <ddk/io-buffer.h>
14#include <ddk/io-buffer.h>
15#include <ddk/metadata.h>
16#include <ddk/phys-iter.h>
17#include <ddk/protocol/gpio.h>
18#include <ddk/protocol/platform-bus.h>
19#include <ddk/protocol/platform-defs.h>
20#include <ddk/protocol/platform-device.h>
21#include <ddk/protocol/sdmmc.h>
22#include <hw/reg.h>
23#include <hw/sdmmc.h>
24#include <lib/sync/completion.h>
25#include <soc/aml-common/aml-sd-emmc.h>
26
27#include <zircon/assert.h>
28#include <zircon/threads.h>
29#include <zircon/types.h>
30
31// Limit maximum number of descriptors to 512 for now
32#define AML_DMA_DESC_MAX_COUNT 512
33#define AML_SD_EMMC_TRACE(fmt, ...) zxlogf(TRACE, "%s: " fmt, __func__, ##__VA_ARGS__)
34#define AML_SD_EMMC_INFO(fmt, ...) zxlogf(INFO, "%s: " fmt, __func__, ##__VA_ARGS__)
35#define AML_SD_EMMC_ERROR(fmt, ...) zxlogf(ERROR, "%s: " fmt, __func__, ##__VA_ARGS__)
36#define AML_SD_EMMC_COMMAND(c) ((0x80) | (c))
37#define PAGE_MASK (PAGE_SIZE - 1ull)
38
39static inline uint8_t log2_ceil(uint16_t blk_sz) {
40    if (blk_sz == 1) {
41        return 0;
42    }
43    return (16 - __builtin_clz(blk_sz - 1));
44}
45
46typedef struct aml_sd_emmc_t {
47    platform_device_protocol_t pdev;
48    zx_device_t* zxdev;
49    gpio_protocol_t gpio;
50    uint32_t gpio_count;
51    io_buffer_t mmio;
52    // virt address of mmio
53    aml_sd_emmc_regs_t* regs;
54    zx_handle_t irq_handle;
55    thrd_t irq_thread;
56    zx_handle_t bti;
57    io_buffer_t descs_buffer;
58    // Held when I/O submit/complete is in progress.
59    mtx_t mtx;
60    // Controller info
61    sdmmc_host_info_t info;
62    uint32_t max_freq;
63    uint32_t min_freq;
64    // cur pending req
65    sdmmc_req_t* cur_req;
66    // used to signal request complete
67    sync_completion_t req_completion;
68} aml_sd_emmc_t;
69
70zx_status_t aml_sd_emmc_request(void* ctx, sdmmc_req_t* req);
71static void aml_sd_emmc_dump_clock(uint32_t clock);
72static void aml_sd_emmc_dump_cfg(uint32_t cfg);
73
74static void aml_sd_emmc_dump_regs(aml_sd_emmc_t* dev) {
75    aml_sd_emmc_regs_t* regs = dev->regs;
76    AML_SD_EMMC_TRACE("sd_emmc_clock : 0x%x\n", regs->sd_emmc_clock);
77    aml_sd_emmc_dump_clock(regs->sd_emmc_clock);
78    AML_SD_EMMC_TRACE("sd_emmc_delay1 : 0x%x\n", regs->sd_emmc_delay1);
79    AML_SD_EMMC_TRACE("sd_emmc_delay2 : 0x%x\n", regs->sd_emmc_delay2);
80    AML_SD_EMMC_TRACE("sd_emmc_adjust : 0x%x\n", regs->sd_emmc_adjust);
81    AML_SD_EMMC_TRACE("sd_emmc_calout : 0x%x\n", regs->sd_emmc_calout);
82    AML_SD_EMMC_TRACE("sd_emmc_start : 0x%x\n", regs->sd_emmc_start);
83    AML_SD_EMMC_TRACE("sd_emmc_cfg : 0x%x\n", regs->sd_emmc_cfg);
84    aml_sd_emmc_dump_cfg(regs->sd_emmc_cfg);
85    AML_SD_EMMC_TRACE("sd_emmc_status : 0x%x\n", regs->sd_emmc_status);
86    AML_SD_EMMC_TRACE("sd_emmc_irq_en : 0x%x\n", regs->sd_emmc_irq_en);
87    AML_SD_EMMC_TRACE("sd_emmc_cmd_cfg : 0x%x\n", regs->sd_emmc_cmd_cfg);
88    AML_SD_EMMC_TRACE("sd_emmc_cmd_arg : 0x%x\n", regs->sd_emmc_cmd_arg);
89    AML_SD_EMMC_TRACE("sd_emmc_cmd_dat : 0x%x\n", regs->sd_emmc_cmd_dat);
90    AML_SD_EMMC_TRACE("sd_emmc_cmd_rsp : 0x%x\n", regs->sd_emmc_cmd_rsp);
91    AML_SD_EMMC_TRACE("sd_emmc_cmd_rsp1 : 0x%x\n", regs->sd_emmc_cmd_rsp1);
92    AML_SD_EMMC_TRACE("sd_emmc_cmd_rsp2 : 0x%x\n", regs->sd_emmc_cmd_rsp2);
93    AML_SD_EMMC_TRACE("sd_emmc_cmd_rsp3 : 0x%x\n", regs->sd_emmc_cmd_rsp3);
94    AML_SD_EMMC_TRACE("bus_err : 0x%x\n", regs->bus_err);
95    AML_SD_EMMC_TRACE("sd_emmc_curr_cfg: 0x%x\n", regs->sd_emmc_curr_cfg);
96    AML_SD_EMMC_TRACE("sd_emmc_curr_arg: 0x%x\n", regs->sd_emmc_curr_arg);
97    AML_SD_EMMC_TRACE("sd_emmc_curr_dat: 0x%x\n", regs->sd_emmc_curr_dat);
98    AML_SD_EMMC_TRACE("sd_emmc_curr_rsp: 0x%x\n", regs->sd_emmc_curr_rsp);
99    AML_SD_EMMC_TRACE("sd_emmc_next_cfg: 0x%x\n", regs->sd_emmc_curr_cfg);
100    AML_SD_EMMC_TRACE("sd_emmc_next_arg: 0x%x\n", regs->sd_emmc_curr_arg);
101    AML_SD_EMMC_TRACE("sd_emmc_next_dat: 0x%x\n", regs->sd_emmc_curr_dat);
102    AML_SD_EMMC_TRACE("sd_emmc_next_rsp: 0x%x\n", regs->sd_emmc_curr_rsp);
103    AML_SD_EMMC_TRACE("sd_emmc_rxd : 0x%x\n", regs->sd_emmc_rxd);
104    AML_SD_EMMC_TRACE("sd_emmc_txd : 0x%x\n", regs->sd_emmc_txd);
105    AML_SD_EMMC_TRACE("sramDesc : %p\n", regs->sramDesc);
106    AML_SD_EMMC_TRACE("ping : %p\n", regs->ping);
107    AML_SD_EMMC_TRACE("pong : %p\n", regs->pong);
108}
109
110static void aml_sd_emmc_dump_status(uint32_t status) {
111    uint32_t rxd_err = get_bits(status, AML_SD_EMMC_STATUS_RXD_ERR_MASK,
112                                AML_SD_EMMC_STATUS_RXD_ERR_LOC);
113    AML_SD_EMMC_TRACE("Dumping sd_emmc_status 0x%0x\n", status);
114    AML_SD_EMMC_TRACE("    RXD_ERR: %d\n", rxd_err);
115    AML_SD_EMMC_TRACE("    TXD_ERR: %d\n", get_bit(status, AML_SD_EMMC_STATUS_TXD_ERR));
116    AML_SD_EMMC_TRACE("    DESC_ERR: %d\n", get_bit(status, AML_SD_EMMC_STATUS_DESC_ERR));
117    AML_SD_EMMC_TRACE("    RESP_ERR: %d\n", get_bit(status, AML_SD_EMMC_STATUS_RESP_ERR));
118    AML_SD_EMMC_TRACE("    RESP_TIMEOUT: %d\n", get_bit(status, AML_SD_EMMC_STATUS_RESP_TIMEOUT));
119    AML_SD_EMMC_TRACE("    DESC_TIMEOUT: %d\n", get_bit(status, AML_SD_EMMC_STATUS_DESC_TIMEOUT));
120    AML_SD_EMMC_TRACE("    END_OF_CHAIN: %d\n", get_bit(status, AML_SD_EMMC_STATUS_END_OF_CHAIN));
121    AML_SD_EMMC_TRACE("    DESC_IRQ: %d\n", get_bit(status, AML_SD_EMMC_STATUS_RESP_STATUS));
122    AML_SD_EMMC_TRACE("    IRQ_SDIO: %d\n", get_bit(status, AML_SD_EMMC_STATUS_IRQ_SDIO));
123    AML_SD_EMMC_TRACE("    DAT_I: %d\n", get_bits(status, AML_SD_EMMC_STATUS_DAT_I_MASK,
124                                                  AML_SD_EMMC_STATUS_DAT_I_LOC));
125    AML_SD_EMMC_TRACE("    CMD_I: %d\n", get_bit(status, AML_SD_EMMC_STATUS_CMD_I));
126    AML_SD_EMMC_TRACE("    DS: %d\n", get_bit(status, AML_SD_EMMC_STATUS_DS));
127    AML_SD_EMMC_TRACE("    BUS_FSM: %d\n", get_bits(status, AML_SD_EMMC_STATUS_BUS_FSM_MASK,
128                                                    AML_SD_EMMC_STATUS_BUS_FSM_LOC));
129    AML_SD_EMMC_TRACE("    BUS_DESC_BUSY: %d\n", get_bit(status, AML_SD_EMMC_STATUS_BUS_DESC_BUSY));
130    AML_SD_EMMC_TRACE("    CORE_RDY: %d\n", get_bit(status, AML_SD_EMMC_STATUS_BUS_CORE_BUSY));
131}
132
133static void aml_sd_emmc_dump_cfg(uint32_t config) {
134    AML_SD_EMMC_TRACE("Dumping sd_emmc_cfg 0x%0x\n", config);
135    AML_SD_EMMC_TRACE("    BUS_WIDTH: %d\n", get_bits(config, AML_SD_EMMC_CFG_BUS_WIDTH_MASK,
136                                                      AML_SD_EMMC_CFG_BUS_WIDTH_LOC));
137    AML_SD_EMMC_TRACE("    DDR: %d\n", get_bit(config, AML_SD_EMMC_CFG_DDR));
138    AML_SD_EMMC_TRACE("    DC_UGT: %d\n", get_bit(config, AML_SD_EMMC_CFG_DC_UGT));
139    AML_SD_EMMC_TRACE("    BLOCK LEN: %d\n", get_bits(config, AML_SD_EMMC_CFG_BL_LEN_MASK,
140                                                      AML_SD_EMMC_CFG_BL_LEN_LOC));
141}
142
143static void aml_sd_emmc_dump_clock(uint32_t clock) {
144    AML_SD_EMMC_TRACE("Dumping clock 0x%0x\n", clock);
145    AML_SD_EMMC_TRACE("   DIV: %d\n", get_bits(clock, AML_SD_EMMC_CLOCK_CFG_DIV_MASK,
146                                               AML_SD_EMMC_CLOCK_CFG_DIV_LOC));
147    AML_SD_EMMC_TRACE("   SRC: %d\n", get_bits(clock, AML_SD_EMMC_CLOCK_CFG_SRC_MASK,
148                                               AML_SD_EMMC_CLOCK_CFG_SRC_LOC));
149    AML_SD_EMMC_TRACE("   CORE_PHASE: %d\n", get_bits(clock, AML_SD_EMMC_CLOCK_CFG_CO_PHASE_MASK,
150                                                      AML_SD_EMMC_CLOCK_CFG_CO_PHASE_LOC));
151    AML_SD_EMMC_TRACE("   TX_PHASE: %d\n", get_bits(clock, AML_SD_EMMC_CLOCK_CFG_TX_PHASE_MASK,
152                                                    AML_SD_EMMC_CLOCK_CFG_TX_PHASE_LOC));
153    AML_SD_EMMC_TRACE("   RX_PHASE: %d\n", get_bits(clock, AML_SD_EMMC_CLOCK_CFG_RX_PHASE_MASK,
154                                                    AML_SD_EMMC_CLOCK_CFG_RX_PHASE_LOC));
155    AML_SD_EMMC_TRACE("   TX_DELAY: %d\n", get_bits(clock, AML_SD_EMMC_CLOCK_CFG_TX_DELAY_MASK,
156                                                    AML_SD_EMMC_CLOCK_CFG_TX_DELAY_LOC));
157    AML_SD_EMMC_TRACE("   RX_DELAY: %d\n", get_bits(clock, AML_SD_EMMC_CLOCK_CFG_RX_DELAY_MASK,
158                                                    AML_SD_EMMC_CLOCK_CFG_RX_DELAY_LOC));
159    AML_SD_EMMC_TRACE("   ALWAYS_ON: %d\n", get_bit(clock, AML_SD_EMMC_CLOCK_CFG_ALWAYS_ON));
160}
161
162static void aml_sd_emmc_dump_desc_cmd_cfg(uint32_t cmd_desc) {
163    AML_SD_EMMC_TRACE("Dumping cmd_cfg 0x%0x\n", cmd_desc);
164    AML_SD_EMMC_TRACE("   REQ_LEN: %d\n", get_bits(cmd_desc, AML_SD_EMMC_CMD_INFO_LEN_MASK,
165                                                   AML_SD_EMMC_CMD_INFO_LEN_LOC));
166    AML_SD_EMMC_TRACE("   BLOCK_MODE: %d\n", get_bit(cmd_desc, AML_SD_EMMC_CMD_INFO_BLOCK_MODE));
167    AML_SD_EMMC_TRACE("   R1B: %d\n", get_bit(cmd_desc, AML_SD_EMMC_CMD_INFO_R1B));
168    AML_SD_EMMC_TRACE("   END_OF_CHAIN: %d\n", get_bit(cmd_desc,
169                                                       AML_SD_EMMC_CMD_INFO_END_OF_CHAIN));
170    AML_SD_EMMC_TRACE("   TIMEOUT: %d\n", get_bits(cmd_desc, AML_SD_EMMC_CMD_INFO_TIMEOUT_MASK,
171                                                   AML_SD_EMMC_CMD_INFO_TIMEOUT_LOC));
172    AML_SD_EMMC_TRACE("   NO_RESP: %d\n", get_bit(cmd_desc, AML_SD_EMMC_CMD_INFO_NO_RESP));
173    AML_SD_EMMC_TRACE("   NO_CMD: %d\n", get_bit(cmd_desc, AML_SD_EMMC_CMD_INFO_NO_CMD));
174    AML_SD_EMMC_TRACE("   DATA_IO: %d\n", get_bit(cmd_desc, AML_SD_EMMC_CMD_INFO_DATA_IO));
175    AML_SD_EMMC_TRACE("   DATA_WR: %d\n", get_bit(cmd_desc, AML_SD_EMMC_CMD_INFO_DATA_WR));
176    AML_SD_EMMC_TRACE("   RESP_NO_CRC: %d\n", get_bit(cmd_desc, AML_SD_EMMC_CMD_INFO_RESP_NO_CRC));
177    AML_SD_EMMC_TRACE("   RESP_128: %d\n", get_bit(cmd_desc, AML_SD_EMMC_CMD_INFO_RESP_128));
178    AML_SD_EMMC_TRACE("   RESP_NUM: %d\n", get_bit(cmd_desc, AML_SD_EMMC_CMD_INFO_RESP_NUM));
179    AML_SD_EMMC_TRACE("   DATA_NUM: %d\n", get_bit(cmd_desc, AML_SD_EMMC_CMD_INFO_DATA_NUM));
180    AML_SD_EMMC_TRACE("   CMD_IDX: %d\n", get_bits(cmd_desc, AML_SD_EMMC_CMD_INFO_CMD_IDX_MASK,
181                                                   AML_SD_EMMC_CMD_INFO_CMD_IDX_LOC));
182    AML_SD_EMMC_TRACE("   ERROR: %d\n", get_bit(cmd_desc, AML_SD_EMMC_CMD_INFO_ERROR));
183    AML_SD_EMMC_TRACE("   OWNER: %d\n", get_bit(cmd_desc, AML_SD_EMMC_CMD_INFO_OWNER));
184}
185
186uint32_t get_clk_freq(uint32_t clk_src) {
187    if (clk_src == AML_SD_EMMC_FCLK_DIV2_SRC) {
188        return AML_SD_EMMC_FCLK_DIV2_FREQ;
189    }
190    return AML_SD_EMMC_CTS_OSCIN_CLK_FREQ;
191}
192
193static void aml_sd_emmc_release(void* ctx) {
194    aml_sd_emmc_t* dev = ctx;
195    if (dev->irq_handle != ZX_HANDLE_INVALID)
196        zx_interrupt_destroy(dev->irq_handle);
197    if (dev->irq_thread)
198        thrd_join(dev->irq_thread, NULL);
199    io_buffer_release(&dev->mmio);
200    io_buffer_release(&dev->descs_buffer);
201    zx_handle_close(dev->irq_handle);
202    zx_handle_close(dev->bti);
203    free(dev);
204}
205
206static zx_status_t aml_sd_emmc_host_info(void* ctx, sdmmc_host_info_t* info) {
207    aml_sd_emmc_t* dev = (aml_sd_emmc_t*)ctx;
208    mtx_lock(&dev->mtx);
209    memcpy(info, &dev->info, sizeof(dev->info));
210    mtx_unlock(&dev->mtx);
211    return ZX_OK;
212}
213
214static zx_status_t aml_sd_emmc_set_bus_width(void* ctx, uint32_t bw) {
215    aml_sd_emmc_t* dev = (aml_sd_emmc_t*)ctx;
216
217    mtx_lock(&dev->mtx);
218    aml_sd_emmc_regs_t* regs = dev->regs;
219    uint32_t config = regs->sd_emmc_cfg;
220
221    switch (bw) {
222    case SDMMC_BUS_WIDTH_1:
223        update_bits(&config, AML_SD_EMMC_CFG_BUS_WIDTH_MASK, AML_SD_EMMC_CFG_BUS_WIDTH_LOC,
224                    AML_SD_EMMC_CFG_BUS_WIDTH_1BIT);
225        break;
226    case SDMMC_BUS_WIDTH_4:
227        update_bits(&config, AML_SD_EMMC_CFG_BUS_WIDTH_MASK, AML_SD_EMMC_CFG_BUS_WIDTH_LOC,
228                    AML_SD_EMMC_CFG_BUS_WIDTH_4BIT);
229        break;
230    case SDMMC_BUS_WIDTH_8:
231        update_bits(&config, AML_SD_EMMC_CFG_BUS_WIDTH_MASK, AML_SD_EMMC_CFG_BUS_WIDTH_LOC,
232                    AML_SD_EMMC_CFG_BUS_WIDTH_8BIT);
233        break;
234    default:
235        mtx_unlock(&dev->mtx);
236        return ZX_ERR_OUT_OF_RANGE;
237    }
238
239    regs->sd_emmc_cfg = config;
240    mtx_unlock(&dev->mtx);
241    return ZX_OK;
242}
243
244static zx_status_t aml_sd_emmc_do_tuning_transfer(aml_sd_emmc_t* dev, uint8_t* tuning_res,
245                                                  size_t blk_pattern_size, uint32_t tuning_cmd_idx) {
246    sdmmc_req_t tuning_req = {
247        .cmd_idx = tuning_cmd_idx,
248        .cmd_flags = MMC_SEND_TUNING_BLOCK_FLAGS,
249        .arg = 0,
250        .blockcount = 1,
251        .blocksize = blk_pattern_size,
252        .use_dma = false,
253        .virt = tuning_res,
254    };
255    return aml_sd_emmc_request(dev, &tuning_req);
256}
257
258static bool aml_sd_emmc_tuning_test_delay(aml_sd_emmc_t* dev, const uint8_t* blk_pattern,
259                                          size_t blk_pattern_size, uint32_t adj_delay,
260                                          uint32_t tuning_cmd_idx) {
261    mtx_lock(&dev->mtx);
262    aml_sd_emmc_regs_t* regs = dev->regs;
263    uint32_t adjust_reg = regs->sd_emmc_adjust;
264    update_bits(&adjust_reg, AML_SD_EMMC_ADJUST_ADJ_DELAY_MASK,
265                AML_SD_EMMC_ADJUST_ADJ_DELAY_LOC, adj_delay);
266    adjust_reg |= AML_SD_EMMC_ADJUST_ADJ_FIXED;
267    adjust_reg &= ~AML_SD_EMMC_ADJUST_CALI_RISE;
268    adjust_reg &= ~AML_SD_EMMC_ADJUST_CALI_ENABLE;
269    regs->sd_emmc_adjust = adjust_reg;
270    mtx_unlock(&dev->mtx);
271
272    zx_status_t status = ZX_OK;
273    size_t n;
274    for (n = 0; n < AML_SD_EMMC_ADJ_DELAY_TEST_ATTEMPTS; n++) {
275        uint8_t tuning_res[512] = {0};
276        status = aml_sd_emmc_do_tuning_transfer(dev, tuning_res, blk_pattern_size, tuning_cmd_idx);
277        if (status != ZX_OK || memcmp(blk_pattern, tuning_res, blk_pattern_size)) {
278            break;
279        }
280    }
281    return (n == AML_SD_EMMC_ADJ_DELAY_TEST_ATTEMPTS);
282}
283
284static zx_status_t aml_sd_emmc_tuning_calculate_best_window(aml_sd_emmc_t* dev,
285                                                            const uint8_t* tuning_blk,
286                                                            size_t tuning_blk_size,
287                                                            uint32_t cur_clk_div, int* best_start,
288                                                            uint32_t* best_size,
289                                                            uint32_t tuning_cmd_idx) {
290    int cur_win_start = -1, best_win_start = -1;
291    uint32_t cycle_begin_win_size = 0, cur_win_size = 0, best_win_size = 0;
292
293    for (uint32_t adj_delay = 0; adj_delay < cur_clk_div; adj_delay++) {
294        if (aml_sd_emmc_tuning_test_delay(dev, tuning_blk, tuning_blk_size, adj_delay,
295                                          tuning_cmd_idx)) {
296            if (cur_win_start < 0) {
297                cur_win_start = adj_delay;
298            }
299            cur_win_size++;
300        } else {
301            if (cur_win_start >= 0) {
302                if (best_win_start < 0) {
303                    best_win_start = cur_win_start;
304                    best_win_size = cur_win_size;
305                } else if (best_win_size < cur_win_size) {
306                    best_win_start = cur_win_start;
307                    best_win_size = cur_win_size;
308                }
309                if (cur_win_start == 0) {
310                    cycle_begin_win_size = cur_win_size;
311                }
312                cur_win_start = -1;
313                cur_win_size = 0;
314            }
315        }
316    }
317    // Last delay is good
318    if (cur_win_start >= 0) {
319        if (best_win_start < 0) {
320            best_win_start = cur_win_start;
321            best_win_size = cur_win_size;
322        } else if (cycle_begin_win_size > 0) {
323            // Combine the cur window with the window starting next cycle
324            if (cur_win_size + cycle_begin_win_size > best_win_size) {
325                best_win_start = cur_win_start;
326                best_win_size = cur_win_size + cycle_begin_win_size;
327            }
328        } else if (best_win_size < cur_win_size) {
329            best_win_start = cur_win_start;
330            best_win_size = cur_win_size;
331        }
332    }
333
334    *best_start = best_win_start;
335    *best_size = best_win_size;
336    return ZX_OK;
337}
338
339static zx_status_t aml_sd_emmc_perform_tuning(void* ctx, uint32_t tuning_cmd_idx) {
340    aml_sd_emmc_t* dev = (aml_sd_emmc_t*)ctx;
341    mtx_lock(&dev->mtx);
342
343    aml_sd_emmc_regs_t* regs = dev->regs;
344    const uint8_t* tuning_blk;
345    size_t tuning_blk_size;
346    int best_win_start = -1;
347    uint32_t best_win_size = 0;
348    uint32_t tries = 0;
349
350    uint32_t config = regs->sd_emmc_cfg;
351    uint32_t bw = get_bits(config, AML_SD_EMMC_CFG_BUS_WIDTH_MASK, AML_SD_EMMC_CFG_BUS_WIDTH_LOC);
352    if (bw == AML_SD_EMMC_CFG_BUS_WIDTH_4BIT) {
353        tuning_blk = aml_sd_emmc_tuning_blk_pattern_4bit;
354        tuning_blk_size = sizeof(aml_sd_emmc_tuning_blk_pattern_4bit);
355    } else if (bw == AML_SD_EMMC_CFG_BUS_WIDTH_8BIT) {
356        tuning_blk = aml_sd_emmc_tuning_blk_pattern_8bit;
357        tuning_blk_size = sizeof(aml_sd_emmc_tuning_blk_pattern_8bit);
358    } else {
359        zxlogf(ERROR, "aml_sd_emmc_perform_tuning: Tuning at wrong buswidth: %d\n", bw);
360        mtx_unlock(&dev->mtx);
361        return ZX_ERR_INTERNAL;
362    }
363
364    uint32_t clk_val, clk_div;
365    clk_val = regs->sd_emmc_clock;
366    clk_div = get_bits(clk_val, AML_SD_EMMC_CLOCK_CFG_DIV_MASK, AML_SD_EMMC_CLOCK_CFG_DIV_LOC);
367    mtx_unlock(&dev->mtx);
368
369    do {
370        aml_sd_emmc_tuning_calculate_best_window(dev, tuning_blk, tuning_blk_size,
371                                                 clk_div, &best_win_start, &best_win_size,
372                                                 tuning_cmd_idx);
373        if (best_win_size == 0) {
374            // Lower the frequency and try again
375            zxlogf(INFO, "Tuning failed. Reducing the frequency and trying again\n");
376            mtx_lock(&dev->mtx);
377            clk_val = regs->sd_emmc_clock;
378            clk_div = get_bits(clk_val, AML_SD_EMMC_CLOCK_CFG_DIV_MASK,
379                               AML_SD_EMMC_CLOCK_CFG_DIV_LOC);
380            clk_div += 2;
381            if (clk_div > (AML_SD_EMMC_CLOCK_CFG_DIV_MASK >> AML_SD_EMMC_CLOCK_CFG_DIV_LOC)) {
382                clk_div = AML_SD_EMMC_CLOCK_CFG_DIV_MASK >> AML_SD_EMMC_CLOCK_CFG_DIV_LOC;
383            }
384            update_bits(&clk_val, AML_SD_EMMC_CLOCK_CFG_DIV_MASK, AML_SD_EMMC_CLOCK_CFG_DIV_LOC,
385                        clk_div);
386            regs->sd_emmc_clock = clk_val;
387            uint32_t clk_src = get_bits(clk_val, AML_SD_EMMC_CLOCK_CFG_SRC_MASK,
388                                        AML_SD_EMMC_CLOCK_CFG_SRC_LOC);
389            uint32_t cur_freq = (get_clk_freq(clk_src)) / clk_div;
390            if (dev->max_freq > cur_freq) {
391                // Update max freq accordingly
392                dev->max_freq = cur_freq;
393            }
394            mtx_unlock(&dev->mtx);
395        }
396    } while (best_win_size == 0 && ++tries < AML_SD_EMMC_MAX_TUNING_TRIES);
397
398    if (best_win_size == 0) {
399        zxlogf(ERROR, "aml_sd_emmc_perform_tuning: Tuning failed\n");
400        return ZX_ERR_IO;
401    }
402
403    mtx_lock(&dev->mtx);
404    uint32_t best_adj_delay = 0;
405    uint32_t adjust_reg = regs->sd_emmc_adjust;
406
407    clk_val = regs->sd_emmc_clock;
408    clk_div = get_bits(clk_val, AML_SD_EMMC_CLOCK_CFG_DIV_MASK, AML_SD_EMMC_CLOCK_CFG_DIV_LOC);
409    if (best_win_size != clk_div) {
410        best_adj_delay = best_win_start + ((best_win_size - 1) / 2) + ((best_win_size - 1) % 2);
411        best_adj_delay = best_adj_delay % clk_div;
412    }
413    update_bits(&adjust_reg, AML_SD_EMMC_ADJUST_ADJ_DELAY_MASK, AML_SD_EMMC_ADJUST_ADJ_DELAY_LOC,
414                best_adj_delay);
415    adjust_reg |= AML_SD_EMMC_ADJUST_ADJ_FIXED;
416    adjust_reg &= ~AML_SD_EMMC_ADJUST_CALI_RISE;
417    adjust_reg &= ~AML_SD_EMMC_ADJUST_CALI_ENABLE;
418    regs->sd_emmc_adjust = adjust_reg;
419
420    mtx_unlock(&dev->mtx);
421    return ZX_OK;
422}
423
424static zx_status_t aml_sd_emmc_set_bus_freq(void* ctx, uint32_t freq) {
425    aml_sd_emmc_t* dev = (aml_sd_emmc_t*)ctx;
426
427    mtx_lock(&dev->mtx);
428    aml_sd_emmc_regs_t* regs = dev->regs;
429    uint32_t clk = 0, clk_src = 0, clk_div = 0;
430    uint32_t clk_val = regs->sd_emmc_clock;
431
432    if (freq == 0) {
433        //TODO: Disable clock here
434    } else if (freq > dev->max_freq) {
435        freq = dev->max_freq;
436    } else if (freq < dev->min_freq) {
437        freq = dev->min_freq;
438    }
439    if (freq < AML_SD_EMMC_FCLK_DIV2_MIN_FREQ) {
440        clk_src = AML_SD_EMMC_CTS_OSCIN_CLK_SRC;
441        clk = AML_SD_EMMC_CTS_OSCIN_CLK_FREQ;
442    } else {
443        clk_src = AML_SD_EMMC_FCLK_DIV2_SRC;
444        clk = AML_SD_EMMC_FCLK_DIV2_FREQ;
445    }
446    clk_div = clk / freq;
447    update_bits(&clk_val, AML_SD_EMMC_CLOCK_CFG_DIV_MASK, AML_SD_EMMC_CLOCK_CFG_DIV_LOC, clk_div);
448    update_bits(&clk_val, AML_SD_EMMC_CLOCK_CFG_SRC_MASK, AML_SD_EMMC_CLOCK_CFG_SRC_LOC, clk_src);
449    regs->sd_emmc_clock = clk_val;
450
451    mtx_unlock(&dev->mtx);
452    return ZX_OK;
453}
454
455static void aml_sd_emmc_init_regs(aml_sd_emmc_t* dev) {
456    aml_sd_emmc_regs_t* regs = dev->regs;
457    uint32_t config = 0;
458    uint32_t clk_val = 0;
459    update_bits(&clk_val, AML_SD_EMMC_CLOCK_CFG_CO_PHASE_MASK,
460                AML_SD_EMMC_CLOCK_CFG_CO_PHASE_LOC, AML_SD_EMMC_DEFAULT_CLK_CORE_PHASE);
461    update_bits(&clk_val, AML_SD_EMMC_CLOCK_CFG_SRC_MASK, AML_SD_EMMC_CLOCK_CFG_SRC_LOC,
462                AML_SD_EMMC_DEFAULT_CLK_SRC);
463    update_bits(&clk_val, AML_SD_EMMC_CLOCK_CFG_DIV_MASK, AML_SD_EMMC_CLOCK_CFG_DIV_LOC,
464                AML_SD_EMMC_DEFAULT_CLK_DIV);
465    clk_val |= AML_SD_EMMC_CLOCK_CFG_ALWAYS_ON;
466    regs->sd_emmc_clock = clk_val;
467
468    update_bits(&config, AML_SD_EMMC_CFG_BL_LEN_MASK, AML_SD_EMMC_CFG_BL_LEN_LOC,
469                AML_SD_EMMC_DEFAULT_BL_LEN);
470    update_bits(&config, AML_SD_EMMC_CFG_RESP_TIMEOUT_MASK, AML_SD_EMMC_CFG_RESP_TIMEOUT_LOC,
471                AML_SD_EMMC_DEFAULT_RESP_TIMEOUT);
472    update_bits(&config, AML_SD_EMMC_CFG_RC_CC_MASK, AML_SD_EMMC_CFG_RC_CC_LOC,
473                AML_SD_EMMC_DEFAULT_RC_CC);
474    update_bits(&config, AML_SD_EMMC_CFG_BUS_WIDTH_MASK, AML_SD_EMMC_CFG_BUS_WIDTH_LOC,
475                AML_SD_EMMC_CFG_BUS_WIDTH_1BIT);
476
477    regs->sd_emmc_cfg = config;
478    regs->sd_emmc_status = AML_SD_EMMC_IRQ_ALL_CLEAR;
479    regs->sd_emmc_irq_en = AML_SD_EMMC_IRQ_ALL_CLEAR;
480}
481
482static void aml_sd_emmc_hw_reset(void* ctx) {
483    aml_sd_emmc_t* dev = (aml_sd_emmc_t*)ctx;
484    mtx_lock(&dev->mtx);
485    gpio_config_out(&dev->gpio, 0);
486    usleep(10 * 1000);
487    gpio_write(&dev->gpio, 1);
488    usleep(10 * 1000);
489    aml_sd_emmc_init_regs(dev);
490    mtx_unlock(&dev->mtx);
491}
492
493static zx_status_t aml_sd_emmc_set_bus_timing(void* ctx, sdmmc_timing_t timing) {
494    aml_sd_emmc_t* dev = ctx;
495
496    mtx_lock(&dev->mtx);
497    aml_sd_emmc_regs_t* regs = dev->regs;
498    uint32_t config = regs->sd_emmc_cfg;
499    uint32_t clk_val = regs->sd_emmc_clock;
500
501    if (timing == SDMMC_TIMING_HS400 || timing == SDMMC_TIMING_HSDDR ||
502        timing == SDMMC_TIMING_DDR50) {
503        if (timing == SDMMC_TIMING_HS400) {
504            config |= AML_SD_EMMC_CFG_CHK_DS;
505        } else {
506            config &= ~AML_SD_EMMC_CFG_CHK_DS;
507        }
508        config |= AML_SD_EMMC_CFG_DDR;
509        uint32_t clk_div = get_bits(clk_val, AML_SD_EMMC_CLOCK_CFG_DIV_MASK,
510                                    AML_SD_EMMC_CLOCK_CFG_DIV_LOC);
511        if (clk_div & 0x01) {
512            clk_div++;
513        }
514        clk_div /= 2;
515        update_bits(&clk_val, AML_SD_EMMC_CLOCK_CFG_DIV_MASK, AML_SD_EMMC_CLOCK_CFG_DIV_LOC,
516                    clk_div);
517    } else {
518        config &= ~AML_SD_EMMC_CFG_DDR;
519    }
520
521    regs->sd_emmc_cfg = config;
522    regs->sd_emmc_clock = clk_val;
523    mtx_unlock(&dev->mtx);
524    return ZX_OK;
525}
526
527static zx_status_t aml_sd_emmc_set_signal_voltage(void* ctx, sdmmc_voltage_t voltage) {
528    //Amlogic controller does not allow to modify voltage
529    //We do not return an error here since things work fine without switching the voltage.
530    return ZX_OK;
531}
532
533static int aml_sd_emmc_irq_thread(void* ctx) {
534    aml_sd_emmc_t* dev = ctx;
535    uint32_t status_irq;
536
537    while (1) {
538        zx_status_t status = ZX_OK;
539        status = zx_interrupt_wait(dev->irq_handle, NULL);
540        if (status != ZX_OK) {
541            zxlogf(ERROR, "aml_sd_emmc_irq_thread: zx_interrupt_wait got %d\n", status);
542            break;
543        }
544        mtx_lock(&dev->mtx);
545        aml_sd_emmc_regs_t* regs = dev->regs;
546        sdmmc_req_t* req = dev->cur_req;
547
548        if (req == NULL) {
549            status = ZX_ERR_IO_INVALID;
550            zxlogf(ERROR, "aml_sd_emmc_irq_thread: Got a spurious interrupt\n");
551            //TODO(ravoorir): Do some error recovery here and continue instead
552            // of breaking.
553            mtx_unlock(&dev->mtx);
554            break;
555        }
556
557        status_irq = regs->sd_emmc_status;
558        if (!(status_irq & AML_SD_EMMC_STATUS_END_OF_CHAIN)) {
559            status = ZX_ERR_IO_INVALID;
560            zxlogf(ERROR, "aml_sd_emmc_irq_thread: END OF CHAIN bit is not set\n");
561            goto complete;
562        }
563
564        uint32_t rxd_err = get_bits(status_irq, AML_SD_EMMC_STATUS_RXD_ERR_MASK,
565                                    AML_SD_EMMC_STATUS_RXD_ERR_LOC);
566        if (rxd_err) {
567            AML_SD_EMMC_ERROR("RX Data CRC Error cmd%d, status=0x%x, RXD_ERR:%d\n", req->cmd_idx,
568                              status_irq, rxd_err);
569            status = ZX_ERR_IO_DATA_INTEGRITY;
570            goto complete;
571        }
572        if (status_irq & AML_SD_EMMC_STATUS_TXD_ERR) {
573            AML_SD_EMMC_ERROR("TX Data CRC Error, cmd%d, status=0x%x TXD_ERR\n", req->cmd_idx,
574                              status_irq);
575            status = ZX_ERR_IO_DATA_INTEGRITY;
576            goto complete;
577        }
578        if (status_irq & AML_SD_EMMC_STATUS_DESC_ERR) {
579            AML_SD_EMMC_ERROR("Controller does not own the descriptor, cmd%d, status=0x%x\n",
580                              req->cmd_idx, status_irq);
581            status = ZX_ERR_IO_INVALID;
582            goto complete;
583        }
584        if (status_irq & AML_SD_EMMC_STATUS_RESP_ERR) {
585            AML_SD_EMMC_ERROR("Response CRC Error, cmd%d, status=0x%x\n", req->cmd_idx, status_irq);
586            status = ZX_ERR_IO_DATA_INTEGRITY;
587            goto complete;
588        }
589        if (status_irq & AML_SD_EMMC_STATUS_RESP_TIMEOUT) {
590            AML_SD_EMMC_ERROR("No response reived before time limit, cmd%d, status=0x%x\n",
591                              req->cmd_idx, status_irq);
592            status = ZX_ERR_TIMED_OUT;
593            goto complete;
594        }
595        if (status_irq & AML_SD_EMMC_STATUS_DESC_TIMEOUT) {
596            AML_SD_EMMC_ERROR("Descriptor execution timed out, cmd%d, status=0x%x\n", req->cmd_idx,
597                              status_irq);
598            status = ZX_ERR_TIMED_OUT;
599            goto complete;
600        }
601
602        if (req->cmd_flags & SDMMC_RESP_LEN_136) {
603            req->response[0] = regs->sd_emmc_cmd_rsp;
604            req->response[1] = regs->sd_emmc_cmd_rsp1;
605            req->response[2] = regs->sd_emmc_cmd_rsp2;
606            req->response[3] = regs->sd_emmc_cmd_rsp3;
607        } else {
608            req->response[0] = regs->sd_emmc_cmd_rsp;
609        }
610        if ((!req->use_dma) && (req->cmd_flags & SDMMC_CMD_READ)) {
611            uint32_t length = req->blockcount * req->blocksize;
612            if (length == 0 || ((length % 4) != 0)) {
613                status = ZX_ERR_INTERNAL;
614                goto complete;
615            }
616            uint32_t data_copied = 0;
617            uint32_t* dest = (uint32_t*)req->virt;
618            volatile uint32_t* src = (volatile uint32_t*)(io_buffer_virt(&dev->mmio) +
619                                                          AML_SD_EMMC_PING_BUFFER_BASE);
620            while (length) {
621                *dest++ = *src++;
622                length -= 4;
623                data_copied += 4;
624            }
625        }
626
627    complete:
628        req->status = status;
629        regs->sd_emmc_status = AML_SD_EMMC_IRQ_ALL_CLEAR;
630        dev->cur_req = NULL;
631        sync_completion_signal(&dev->req_completion);
632        mtx_unlock(&dev->mtx);
633    }
634    return 0;
635}
636
637static void aml_sd_emmc_setup_cmd_desc(aml_sd_emmc_t* dev, sdmmc_req_t* req,
638                                       aml_sd_emmc_desc_t** out_desc) {
639    aml_sd_emmc_desc_t* desc;
640    if (req->use_dma) {
641        ZX_DEBUG_ASSERT((dev->info.caps & SDMMC_HOST_CAP_ADMA2));
642        desc = (aml_sd_emmc_desc_t*)io_buffer_virt(&dev->descs_buffer);
643        memset(desc, 0, dev->descs_buffer.size);
644    } else {
645        desc = (aml_sd_emmc_desc_t*)(io_buffer_virt(&dev->mmio) + AML_SD_EMMC_SRAM_MEMORY_BASE);
646    }
647    uint32_t cmd_info = 0;
648    if (req->cmd_flags == 0) {
649        cmd_info |= AML_SD_EMMC_CMD_INFO_NO_RESP;
650    } else {
651        if (req->cmd_flags & SDMMC_RESP_LEN_136) {
652            cmd_info |= AML_SD_EMMC_CMD_INFO_RESP_128;
653        }
654
655        if (!(req->cmd_flags & SDMMC_RESP_CRC_CHECK)) {
656            cmd_info |= AML_SD_EMMC_CMD_INFO_RESP_NO_CRC;
657        }
658
659        if (req->cmd_flags & SDMMC_RESP_LEN_48B) {
660            cmd_info |= AML_SD_EMMC_CMD_INFO_R1B;
661        }
662
663        cmd_info |= AML_SD_EMMC_CMD_INFO_RESP_NUM;
664    }
665    update_bits(&cmd_info, AML_SD_EMMC_CMD_INFO_CMD_IDX_MASK, AML_SD_EMMC_CMD_INFO_CMD_IDX_LOC,
666                AML_SD_EMMC_COMMAND(req->cmd_idx));
667    update_bits(&cmd_info, AML_SD_EMMC_CMD_INFO_TIMEOUT_MASK, AML_SD_EMMC_CMD_INFO_TIMEOUT_LOC,
668                AML_SD_EMMC_DEFAULT_CMD_TIMEOUT);
669    cmd_info &= ~AML_SD_EMMC_CMD_INFO_ERROR;
670    cmd_info |= AML_SD_EMMC_CMD_INFO_OWNER;
671    cmd_info &= ~AML_SD_EMMC_CMD_INFO_END_OF_CHAIN;
672    desc->cmd_info = cmd_info;
673    desc->cmd_arg = req->arg;
674    desc->data_addr = 0;
675    desc->resp_addr = 0;
676    *out_desc = desc;
677}
678
679static zx_status_t aml_sd_emmc_setup_data_descs_dma(aml_sd_emmc_t* dev, sdmmc_req_t* req,
680                                                    aml_sd_emmc_desc_t* cur_desc,
681                                                    aml_sd_emmc_desc_t** last_desc) {
682    uint64_t req_len = req->blockcount * req->blocksize;
683    bool is_read = req->cmd_flags & SDMMC_CMD_READ;
684    uint64_t pagecount = ((req->buf_offset & PAGE_MASK) + req_len + PAGE_MASK) /
685                         PAGE_SIZE;
686    if (pagecount > SDMMC_PAGES_COUNT) {
687        zxlogf(ERROR, "aml-sd-emmc.c: too many pages %lu vs %lu\n", pagecount, SDMMC_PAGES_COUNT);
688        return ZX_ERR_INVALID_ARGS;
689    }
690
691    // pin the vmo
692    zx_paddr_t phys[SDMMC_PAGES_COUNT];
693    zx_handle_t pmt;
694    // offset_vmo is converted to bytes by the sdmmc layer
695    uint32_t options = is_read ? ZX_BTI_PERM_WRITE : ZX_BTI_PERM_READ;
696    zx_status_t st = zx_bti_pin(dev->bti, options, req->dma_vmo,
697                                req->buf_offset & ~PAGE_MASK,
698                                pagecount * PAGE_SIZE, phys, pagecount, &pmt);
699    if (st != ZX_OK) {
700        zxlogf(ERROR, "aml-sd-emmc: bti-pin failed with error %d\n", st);
701        return st;
702    }
703    if (is_read) {
704        st = zx_vmo_op_range(req->dma_vmo, ZX_VMO_OP_CACHE_CLEAN_INVALIDATE,
705                             req->buf_offset, req_len, NULL, 0);
706    } else {
707        st = zx_vmo_op_range(req->dma_vmo, ZX_VMO_OP_CACHE_CLEAN,
708                             req->buf_offset, req_len, NULL, 0);
709    }
710    if (st != ZX_OK) {
711        zxlogf(ERROR, "aml-sd-emmc: cache clean failed with error  %d\n", st);
712        return st;
713    }
714
715    // cache this for zx_pmt_unpin() later
716    req->pmt = pmt;
717
718    phys_iter_buffer_t buf = {
719        .phys = phys,
720        .phys_count = pagecount,
721        .length = req_len,
722        .vmo_offset = req->buf_offset,
723    };
724    phys_iter_t iter;
725    phys_iter_init(&iter, &buf, PAGE_SIZE);
726
727    int count = 0;
728    size_t length;
729    zx_paddr_t paddr;
730    uint32_t blockcount;
731    aml_sd_emmc_desc_t* desc = cur_desc;
732    for (;;) {
733        length = phys_iter_next(&iter, &paddr);
734        if (length == 0) {
735            if (desc != io_buffer_virt(&dev->descs_buffer)) {
736                desc -= 1;
737                *last_desc = desc;
738                break;
739            } else {
740                zxlogf(TRACE, "aml-sd-emmc: empty descriptor list!\n");
741                return ZX_ERR_NOT_SUPPORTED;
742            }
743        } else if (length > PAGE_SIZE) {
744            zxlogf(TRACE, "aml-sd-emmc: chunk size > %zu is unsupported\n", length);
745            return ZX_ERR_NOT_SUPPORTED;
746        } else if ((++count) > AML_DMA_DESC_MAX_COUNT) {
747            zxlogf(TRACE, "aml-sd-emmc: request with more than %d chunks is unsupported\n",
748                   AML_DMA_DESC_MAX_COUNT);
749            return ZX_ERR_NOT_SUPPORTED;
750        }
751        if (count > 1) {
752            desc->cmd_info |= AML_SD_EMMC_CMD_INFO_NO_RESP;
753            desc->cmd_info |= AML_SD_EMMC_CMD_INFO_NO_CMD;
754        }
755
756        desc->cmd_info |= AML_SD_EMMC_CMD_INFO_DATA_IO;
757        if (!(req->cmd_flags & SDMMC_CMD_READ)) {
758            desc->cmd_info |= AML_SD_EMMC_CMD_INFO_DATA_WR;
759        }
760        desc->cmd_info |= AML_SD_EMMC_CMD_INFO_OWNER;
761        update_bits(&desc->cmd_info, AML_SD_EMMC_CMD_INFO_TIMEOUT_MASK,
762                    AML_SD_EMMC_CMD_INFO_TIMEOUT_LOC, AML_SD_EMMC_DEFAULT_CMD_TIMEOUT);
763        desc->cmd_info &= ~AML_SD_EMMC_CMD_INFO_ERROR;
764
765        uint32_t blocksize = req->blocksize;
766        blockcount = length / blocksize;
767        ZX_DEBUG_ASSERT(((length % blocksize) == 0));
768
769        if (blockcount > 1) {
770            desc->cmd_info |= AML_SD_EMMC_CMD_INFO_BLOCK_MODE;
771            update_bits(&desc->cmd_info, AML_SD_EMMC_CMD_INFO_LEN_MASK,
772                        AML_SD_EMMC_CMD_INFO_LEN_LOC, blockcount);
773        } else {
774            update_bits(&desc->cmd_info, AML_SD_EMMC_CMD_INFO_LEN_MASK,
775                        AML_SD_EMMC_CMD_INFO_LEN_LOC, req->blocksize);
776        }
777
778        desc->data_addr = (uint32_t)paddr;
779        desc += 1;
780    }
781    return ZX_OK;
782}
783
784static zx_status_t aml_sd_emmc_setup_data_descs_pio(aml_sd_emmc_t* dev, sdmmc_req_t* req,
785                                                    aml_sd_emmc_desc_t* desc,
786                                                    aml_sd_emmc_desc_t** last_desc) {
787    zx_status_t status = ZX_OK;
788    uint32_t length = req->blockcount * req->blocksize;
789
790    if (length > AML_SD_EMMC_MAX_PIO_DATA_SIZE) {
791        zxlogf(ERROR, "Request transfer size is greater than max transfer size\n");
792        return ZX_ERR_NOT_SUPPORTED;
793    }
794
795    if (length == 0 || ((length % 4) != 0)) {
796        // From Amlogic documentation, Ping and Pong buffers in sram can be accessed only 4 bytes
797        // at a time.
798        zxlogf(ERROR, "Request sizes that are not multiple of 4 are not supported in PIO mode\n");
799        return ZX_ERR_NOT_SUPPORTED;
800    }
801
802    desc->cmd_info |= AML_SD_EMMC_CMD_INFO_DATA_IO;
803    if (!(req->cmd_flags & SDMMC_CMD_READ)) {
804        desc->cmd_info |= AML_SD_EMMC_CMD_INFO_DATA_WR;
805        uint32_t data_copied = 0;
806        uint32_t data_remaining = length;
807        uint32_t* src = (uint32_t*)req->virt;
808        volatile uint32_t* dest = (volatile uint32_t*)(io_buffer_virt(&dev->mmio) +
809                                                       AML_SD_EMMC_PING_BUFFER_BASE);
810        while (data_remaining) {
811            *dest++ = *src++;
812            data_remaining -= 4;
813            data_copied += 4;
814        }
815    }
816
817    if (req->blockcount > 1) {
818        desc->cmd_info |= AML_SD_EMMC_CMD_INFO_BLOCK_MODE;
819        update_bits(&desc->cmd_info, AML_SD_EMMC_CMD_INFO_LEN_MASK,
820                    AML_SD_EMMC_CMD_INFO_LEN_LOC, req->blockcount);
821    } else {
822        update_bits(&desc->cmd_info, AML_SD_EMMC_CMD_INFO_LEN_MASK,
823                    AML_SD_EMMC_CMD_INFO_LEN_LOC, req->blocksize);
824    }
825
826    // data_addr[0] = 0 for DDR. data_addr[0] = 1 if address is from SRAM
827    zx_paddr_t buffer_phys = io_buffer_phys(&dev->mmio) + AML_SD_EMMC_PING_BUFFER_BASE;
828    desc->data_addr = (uint32_t)buffer_phys | 1;
829    *last_desc = desc;
830    return status;
831}
832
833static zx_status_t aml_sd_emmc_setup_data_descs(aml_sd_emmc_t *dev, sdmmc_req_t *req,
834                                                aml_sd_emmc_desc_t *desc,
835                                                aml_sd_emmc_desc_t **last_desc) {
836    zx_status_t st = ZX_OK;
837
838    if (!req->blocksize || req->blocksize > AML_SD_EMMC_MAX_BLK_SIZE) {
839        return ZX_ERR_NOT_SUPPORTED;
840    }
841
842    if (req->use_dma) {
843        st = aml_sd_emmc_setup_data_descs_dma(dev, req, desc, last_desc);
844        if (st != ZX_OK) {
845            return st;
846        }
847    } else {
848        st =  aml_sd_emmc_setup_data_descs_pio(dev, req, desc, last_desc);
849        if (st != ZX_OK) {
850            return st;
851        }
852    }
853
854    //update config
855    uint32_t config = dev->regs->sd_emmc_cfg;
856    uint8_t cur_blk_len = get_bits(config, AML_SD_EMMC_CFG_BL_LEN_MASK,
857                                   AML_SD_EMMC_CFG_BL_LEN_LOC);
858    uint8_t req_blk_len = log2_ceil(req->blocksize);
859    if (cur_blk_len != req_blk_len) {
860        update_bits(&config, AML_SD_EMMC_CFG_BL_LEN_MASK, AML_SD_EMMC_CFG_BL_LEN_LOC,
861                    req_blk_len);
862        dev->regs->sd_emmc_cfg = config;
863    }
864    return ZX_OK;
865}
866
867static zx_status_t aml_sd_emmc_finish_req(aml_sd_emmc_t* dev, sdmmc_req_t* req) {
868    zx_status_t st = ZX_OK;
869    if (req->use_dma && req->pmt != ZX_HANDLE_INVALID) {
870        /*
871         * Clean the cache one more time after the DMA operation because there
872         * might be a possibility of cpu prefetching while the DMA operation is
873         * going on.
874         */
875        uint64_t req_len = req->blockcount * req->blocksize;
876        if ((req->cmd_flags & SDMMC_CMD_READ) && req->use_dma) {
877            st = zx_vmo_op_range(req->dma_vmo, ZX_VMO_OP_CACHE_CLEAN_INVALIDATE,
878                                 req->buf_offset, req_len, NULL, 0);
879            if (st != ZX_OK) {
880                zxlogf(ERROR, "aml-sd-emmc: cache clean failed with error  %d\n", st);
881            }
882        }
883
884        st = zx_pmt_unpin(req->pmt);
885        if (st != ZX_OK) {
886            zxlogf(ERROR, "aml-sd-emmc: error %d in pmt_unpin\n", st);
887        }
888        req->pmt = ZX_HANDLE_INVALID;
889    }
890    return st;
891}
892
893zx_status_t aml_sd_emmc_request(void* ctx, sdmmc_req_t* req) {
894    aml_sd_emmc_t* dev = (aml_sd_emmc_t*)ctx;
895    zx_status_t status = ZX_OK;
896
897    mtx_lock(&dev->mtx);
898    aml_sd_emmc_regs_t* regs = dev->regs;
899
900    // stop executing
901    uint32_t start_reg = regs->sd_emmc_start;
902    start_reg &= ~AML_SD_EMMC_START_DESC_BUSY;
903    regs->sd_emmc_start = start_reg;
904    aml_sd_emmc_desc_t *desc, *last_desc;
905
906    aml_sd_emmc_setup_cmd_desc(dev, req, &desc);
907    last_desc = desc;
908    if (req->cmd_flags & SDMMC_RESP_DATA_PRESENT) {
909        status = aml_sd_emmc_setup_data_descs(dev, req, desc, &last_desc);
910        if (status != ZX_OK) {
911            zxlogf(ERROR, "aml_sd_emmc_request: Failed to setup data descriptors\n");
912            mtx_unlock(&dev->mtx);
913            return status;
914        }
915    }
916
917    last_desc->cmd_info |= AML_SD_EMMC_CMD_INFO_END_OF_CHAIN;
918    AML_SD_EMMC_TRACE("SUBMIT req:%p cmd_idx: %d cmd_cfg: 0x%x cmd_dat: 0x%x cmd_arg: 0x%x\n", req,
919                      req->cmd_idx, desc->cmd_info, desc->data_addr, desc->cmd_arg);
920
921    dev->cur_req = req;
922    zx_paddr_t desc_phys;
923
924    start_reg = regs->sd_emmc_start;
925    if (req->use_dma) {
926        desc_phys = io_buffer_phys(&dev->descs_buffer);
927        io_buffer_cache_flush(&dev->descs_buffer, 0,
928                              AML_DMA_DESC_MAX_COUNT * sizeof(aml_sd_emmc_desc_t));
929        //Read desc from external DDR
930        start_reg &= ~AML_SD_EMMC_START_DESC_INT;
931    } else {
932        desc_phys = (io_buffer_phys(&dev->mmio)) + AML_SD_EMMC_SRAM_MEMORY_BASE;
933        start_reg |= AML_SD_EMMC_START_DESC_INT;
934    }
935
936    start_reg |= AML_SD_EMMC_START_DESC_BUSY;
937    update_bits(&start_reg, AML_SD_EMMC_START_DESC_ADDR_MASK, AML_SD_EMMC_START_DESC_ADDR_LOC,
938                (((uint32_t)desc_phys) >> 2));
939    mtx_unlock(&dev->mtx);
940    regs->sd_emmc_start = start_reg;
941
942    sync_completion_wait(&dev->req_completion, ZX_TIME_INFINITE);
943    aml_sd_emmc_finish_req(dev, req);
944    sync_completion_reset(&dev->req_completion);
945    return req->status;
946}
947
948static zx_protocol_device_t aml_sd_emmc_device_proto = {
949    .version = DEVICE_OPS_VERSION,
950    .release = aml_sd_emmc_release,
951};
952
953static sdmmc_protocol_ops_t aml_sdmmc_proto = {
954    .host_info = aml_sd_emmc_host_info,
955    .set_signal_voltage = aml_sd_emmc_set_signal_voltage,
956    .set_bus_width = aml_sd_emmc_set_bus_width,
957    .set_bus_freq = aml_sd_emmc_set_bus_freq,
958    .set_timing = aml_sd_emmc_set_bus_timing,
959    .hw_reset = aml_sd_emmc_hw_reset,
960    .perform_tuning = aml_sd_emmc_perform_tuning,
961    .request = aml_sd_emmc_request,
962};
963
964static zx_status_t aml_sd_emmc_bind(void* ctx, zx_device_t* parent) {
965    aml_sd_emmc_t* dev = calloc(1, sizeof(aml_sd_emmc_t));
966    if (!dev) {
967        zxlogf(ERROR, "aml-dev_bind: out of memory\n");
968        return ZX_ERR_NO_MEMORY;
969    }
970    dev->req_completion = SYNC_COMPLETION_INIT;
971
972    zx_status_t status = ZX_OK;
973    if ((status = device_get_protocol(parent, ZX_PROTOCOL_PLATFORM_DEV, &dev->pdev)) != ZX_OK) {
974        zxlogf(ERROR, "aml_sd_emmc_bind: ZX_PROTOCOL_PLATFORM_DEV not available\n");
975        goto fail;
976    }
977
978    if ((status = device_get_protocol(parent, ZX_PROTOCOL_GPIO, &dev->gpio)) != ZX_OK) {
979        zxlogf(ERROR, "aml_sd_emmc_bind: ZX_PROTOCOL_GPIO not available\n");
980        goto fail;
981    }
982
983    pdev_device_info_t info;
984    status = pdev_get_device_info(&dev->pdev, &info);
985    if (status != ZX_OK) {
986        zxlogf(ERROR, "aml_sd_emmc_bind: pdev_get_device_info failed\n");
987        goto fail;
988    }
989
990    if (info.mmio_count != info.irq_count) {
991        zxlogf(ERROR, "aml_sd_emmc_bind: mmio_count %u does not match irq_count %u\n",
992               info.mmio_count, info.irq_count);
993        status = ZX_ERR_INVALID_ARGS;
994        goto fail;
995    }
996    dev->gpio_count = info.gpio_count;
997
998    status = pdev_get_bti(&dev->pdev, 0, &dev->bti);
999    if (status != ZX_OK) {
1000        zxlogf(ERROR, "aml_sd_emmc_bind: pdev_get_bti failed\n");
1001        goto fail;
1002    }
1003
1004    status = pdev_map_mmio_buffer(&dev->pdev, 0, ZX_CACHE_POLICY_UNCACHED_DEVICE, &dev->mmio);
1005    if (status != ZX_OK) {
1006        zxlogf(ERROR, "aml_sd_emmc_bind: pdev_map_mmio_buffer failed %d\n", status);
1007        goto fail;
1008    }
1009
1010    status = pdev_map_interrupt(&dev->pdev, 0, &dev->irq_handle);
1011    if (status != ZX_OK) {
1012        zxlogf(ERROR, "aml_sdhci_bind: pdev_map_interrupt failed %d\n", status);
1013        goto fail;
1014    }
1015
1016    int rc = thrd_create_with_name(&dev->irq_thread, aml_sd_emmc_irq_thread, dev,
1017                                   "aml_sd_emmc_irq_thread");
1018    if (rc != thrd_success) {
1019        zx_handle_close(dev->irq_handle);
1020        dev->irq_handle = ZX_HANDLE_INVALID;
1021        status = thrd_status_to_zx_status(rc);
1022        goto fail;
1023    }
1024
1025    dev->info.caps = SDMMC_HOST_CAP_BUS_WIDTH_8 | SDMMC_HOST_CAP_VOLTAGE_330;
1026    // Populate board specific information
1027    aml_sd_emmc_config_t dev_config;
1028    size_t actual;
1029    status = device_get_metadata(parent, DEVICE_METADATA_PRIVATE,
1030                                 &dev_config, sizeof(aml_sd_emmc_config_t), &actual);
1031    if (status != ZX_OK || actual != sizeof(aml_sd_emmc_config_t)) {
1032        zxlogf(ERROR, "aml_sd_emmc_bind: device_get_metadata failed\n");
1033        goto fail;
1034    }
1035    if (dev_config.supports_dma) {
1036        dev->info.caps |= SDMMC_HOST_CAP_ADMA2;
1037    }
1038
1039    dev->regs = (aml_sd_emmc_regs_t*)io_buffer_virt(&dev->mmio);
1040
1041    if (dev->info.caps & SDMMC_HOST_CAP_ADMA2) {
1042        status = io_buffer_init(&dev->descs_buffer, dev->bti,
1043                                AML_DMA_DESC_MAX_COUNT * sizeof(aml_sd_emmc_desc_t),
1044                                IO_BUFFER_RW | IO_BUFFER_CONTIG);
1045        if (status != ZX_OK) {
1046            zxlogf(ERROR, "aml_sd_emmc_bind: Failed to allocate dma descriptors\n");
1047            goto fail;
1048        }
1049        dev->info.max_transfer_size = AML_DMA_DESC_MAX_COUNT * PAGE_SIZE;
1050    } else {
1051        dev->info.max_transfer_size = AML_SD_EMMC_MAX_PIO_DATA_SIZE;
1052    }
1053    dev->info.max_transfer_size_non_dma = AML_SD_EMMC_MAX_PIO_DATA_SIZE;
1054
1055    dev->max_freq = dev_config.max_freq;
1056    dev->min_freq = dev_config.min_freq;
1057    // Create the device.
1058    device_add_args_t args = {
1059        .version = DEVICE_ADD_ARGS_VERSION,
1060        .name = "aml-sd-emmc",
1061        .ctx = dev,
1062        .ops = &aml_sd_emmc_device_proto,
1063        .proto_id = ZX_PROTOCOL_SDMMC,
1064        .proto_ops = &aml_sdmmc_proto,
1065    };
1066
1067    // Try pdev_device_add() first, but fallback to device_add()
1068    // if we weren't configured for platform device children.
1069    status = pdev_device_add(&dev->pdev, 0, &args, &dev->zxdev);
1070    if (status != ZX_OK) {
1071        status = device_add(parent, &args, &dev->zxdev);
1072    }
1073    if (status != ZX_OK) {
1074        goto fail;
1075    }
1076    return ZX_OK;
1077fail:
1078    aml_sd_emmc_release(dev);
1079    return status;
1080}
1081
1082static zx_driver_ops_t aml_sd_emmc_driver_ops = {
1083    .version = DRIVER_OPS_VERSION,
1084    .bind = aml_sd_emmc_bind,
1085};
1086
1087ZIRCON_DRIVER_BEGIN(aml_sd_emmc, aml_sd_emmc_driver_ops, "zircon", "0.1", 3)
1088    BI_ABORT_IF(NE, BIND_PROTOCOL, ZX_PROTOCOL_PLATFORM_DEV),
1089    BI_ABORT_IF(NE, BIND_PLATFORM_DEV_VID, PDEV_VID_AMLOGIC),
1090    BI_MATCH_IF(EQ, BIND_PLATFORM_DEV_DID, PDEV_DID_AMLOGIC_SD_EMMC),
1091ZIRCON_DRIVER_END(aml_sd_emmc)
1092