1// Copyright 2017 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5// Standard Includes
6#include <endian.h>
7#include <inttypes.h>
8#include <stdio.h>
9#include <stdlib.h>
10#include <string.h>
11
12#include <ddk/protocol/sdmmc.h>
13#include <ddk/debug.h>
14#include <hw/sdio.h>
15
16#include <pretty/hexdump.h>
17
18#include "sdmmc.h"
19
20#define RCA_ARG(dev) ((dev)->rca << 16)
21
22zx_status_t sdmmc_request_helper(sdmmc_device_t* dev, sdmmc_req_t* req,
23                                 uint8_t retries, uint32_t wait_time) {
24    zx_status_t st;
25    while (((st = sdmmc_request(&dev->host, req)) != ZX_OK) && retries > 0) {
26        retries--;
27        zx_nanosleep(zx_deadline_after(ZX_MSEC(wait_time)));
28    }
29    return st;
30}
31
32// SD/MMC shared ops
33
34zx_status_t sdmmc_go_idle(sdmmc_device_t* dev) {
35    sdmmc_req_t req = {
36        .cmd_idx = SDMMC_GO_IDLE_STATE,
37        .arg = 0,
38        .cmd_flags = SDMMC_GO_IDLE_STATE_FLAGS,
39        .use_dma = sdmmc_use_dma(dev),
40    };
41    return sdmmc_request(&dev->host, &req);
42}
43
44zx_status_t sdmmc_send_status(sdmmc_device_t* dev, uint32_t* response) {
45    sdmmc_req_t req = {
46        .cmd_idx = SDMMC_SEND_STATUS,
47        .arg = RCA_ARG(dev),
48        .cmd_flags = SDMMC_SEND_STATUS_FLAGS,
49        .use_dma = sdmmc_use_dma(dev),
50    };
51    zx_status_t st = sdmmc_request(&dev->host, &req);
52    if (st == ZX_OK) {
53        *response = req.response[0];
54    }
55    return st;
56}
57
58zx_status_t sdmmc_stop_transmission(sdmmc_device_t* dev) {
59    sdmmc_req_t req = {
60        .cmd_idx = SDMMC_STOP_TRANSMISSION,
61        .arg = 0,
62        .cmd_flags = SDMMC_STOP_TRANSMISSION_FLAGS,
63        .use_dma = sdmmc_use_dma(dev),
64    };
65    return sdmmc_request(&dev->host, &req);
66}
67
68// SD ops
69
70zx_status_t sd_send_if_cond(sdmmc_device_t* dev) {
71    // TODO what is this parameter?
72    uint32_t arg = 0x1aa;
73    sdmmc_req_t req = {
74        .cmd_idx = SD_SEND_IF_COND,
75        .arg = arg,
76        .cmd_flags = SD_SEND_IF_COND_FLAGS,
77        .use_dma = sdmmc_use_dma(dev),
78    };
79    zx_status_t st = sdmmc_request(&dev->host, &req);
80    if (st != ZX_OK) {
81        zxlogf(TRACE, "sd: SD_SEND_IF_COND failed, retcode = %d\n", st);
82        return st;
83    }
84    if ((req.response[0] & 0xfff) != arg) {
85        // The card should have replied with the pattern that we sent.
86        zxlogf(TRACE, "sd: SDMMC_SEND_IF_COND got bad reply = %"PRIu32"\n",
87               req.response[0]);
88        return ZX_ERR_BAD_STATE;
89    } else {
90        return ZX_OK;
91    }
92}
93
94zx_status_t sd_send_relative_addr(sdmmc_device_t* dev, uint16_t *rca) {
95    sdmmc_req_t req = {
96        .cmd_idx = SD_SEND_RELATIVE_ADDR,
97        .arg = 0,
98        .cmd_flags = SD_SEND_RELATIVE_ADDR_FLAGS,
99        .use_dma = sdmmc_use_dma(dev),
100    };
101
102    zx_status_t st = sdmmc_request(&dev->host, &req);
103    if (st != ZX_OK) {
104        zxlogf(TRACE, "sd: SD_SEND_RELATIVE_ADDR failed, retcode = %d\n", st);
105        return st;
106    }
107
108    if (rca != NULL) {
109        *rca = (req.response[0]) >> 16;
110    }
111    return st;
112}
113
114zx_status_t sd_switch_uhs_voltage(sdmmc_device_t *dev, uint32_t ocr) {
115    zx_status_t st = ZX_OK;
116    sdmmc_req_t req = {
117        .cmd_idx = SD_VOLTAGE_SWITCH,
118        .arg = ocr,
119        .cmd_flags = SD_VOLTAGE_SWITCH_FLAGS,
120        .use_dma = sdmmc_use_dma(dev),
121    };
122
123    if (dev->signal_voltage == SDMMC_VOLTAGE_180) {
124        return ZX_OK;
125    }
126
127    st = sdmmc_request(&dev->host, &req);
128    if (st != ZX_OK) {
129        zxlogf(TRACE, "sd: SD_VOLTAGE_SWITCH failed, retcode = %d\n", st);
130        return st;
131    }
132    zx_nanosleep(zx_deadline_after(ZX_MSEC(20)));
133    //TODO: clock gating while switching voltage
134    st = sdmmc_set_signal_voltage(&dev->host, SDMMC_VOLTAGE_180);
135    if (st != ZX_OK) {
136        zxlogf(TRACE, "sd: SD_VOLTAGE_SWITCH failed, retcode = %d\n", st);
137        return st;
138    }
139    return ZX_OK;
140}
141
142// SDIO specific ops
143
144zx_status_t sdio_send_op_cond(sdmmc_device_t* dev, uint32_t ocr, uint32_t* rocr) {
145    zx_status_t st = ZX_OK;
146    sdmmc_req_t req = {
147        .cmd_idx = SDIO_SEND_OP_COND,
148        .arg = ocr,
149        .cmd_flags = SDIO_SEND_OP_COND_FLAGS,
150        .use_dma = sdmmc_use_dma(dev),
151    };
152    for (size_t i = 0; i < 100; i++) {
153        if ((st = sdmmc_request_helper(dev, &req, 3, 10)) != ZX_OK) {
154            // fail on request error
155            break;
156        }
157        // No need to wait for busy clear if probing
158        if ((ocr == 0) || (req.response[0] & MMC_OCR_BUSY)) {
159            *rocr = req.response[0];
160            break;
161        }
162        zx_nanosleep(zx_deadline_after(ZX_MSEC(10)));
163    }
164    return st;
165}
166
167zx_status_t sdio_io_rw_direct(sdmmc_device_t* dev, bool write, uint32_t fn_idx,
168                              uint32_t reg_addr, uint8_t write_byte, uint8_t *read_byte) {
169    uint32_t cmd_arg = 0;
170    if (write) {
171        cmd_arg |= SDIO_IO_RW_DIRECT_RW_FLAG;
172        if (read_byte) {
173            cmd_arg |= SDIO_IO_RW_DIRECT_RAW_FLAG;
174        }
175    }
176    update_bits(&cmd_arg, SDIO_IO_RW_DIRECT_FN_IDX_MASK, SDIO_IO_RW_DIRECT_FN_IDX_LOC,
177                fn_idx);
178    update_bits(&cmd_arg, SDIO_IO_RW_DIRECT_REG_ADDR_MASK, SDIO_IO_RW_DIRECT_REG_ADDR_LOC,
179                reg_addr);
180    update_bits(&cmd_arg, SDIO_IO_RW_DIRECT_WRITE_BYTE_MASK, SDIO_IO_RW_DIRECT_WRITE_BYTE_LOC,
181                write_byte);
182    sdmmc_req_t req = {
183        .cmd_idx = SDIO_IO_RW_DIRECT,
184        .arg = cmd_arg,
185        .cmd_flags = SDIO_IO_RW_DIRECT_FLAGS,
186        .use_dma = sdmmc_use_dma(dev),
187    };
188    zx_status_t st = sdmmc_request(&dev->host, &req);
189    if (st != ZX_OK) {
190        zxlogf(ERROR, "sdio: SDIO_IO_RW_DIRECT failed, retcode = %d\n", st);
191        return st;
192    }
193    if (read_byte) {
194        *read_byte = get_bits(req.response[0], SDIO_IO_RW_DIRECT_RESP_READ_BYTE_MASK,
195                              SDIO_IO_RW_DIRECT_RESP_READ_BYTE_LOC);
196    }
197    return ZX_OK;
198}
199
200zx_status_t sdio_io_rw_extended(sdmmc_device_t *dev, bool write, uint32_t fn_idx,
201                                uint32_t reg_addr, bool incr, uint32_t blk_count,
202                                uint32_t blk_size,  bool use_dma, uint8_t *buf,
203                                zx_handle_t dma_vmo, uint64_t buf_offset) {
204
205    uint32_t cmd_arg = 0;
206    if (write) {
207        cmd_arg |= SDIO_IO_RW_EXTD_RW_FLAG;
208    }
209    update_bits(&cmd_arg, SDIO_IO_RW_EXTD_FN_IDX_MASK, SDIO_IO_RW_EXTD_FN_IDX_LOC,
210                fn_idx);
211    update_bits(&cmd_arg, SDIO_IO_RW_EXTD_REG_ADDR_MASK, SDIO_IO_RW_EXTD_REG_ADDR_LOC,
212                reg_addr);
213    if (incr) {
214        cmd_arg |= SDIO_IO_RW_EXTD_OP_CODE_INCR;
215    }
216
217    if (blk_count > 1) {
218        if (dev->sdio_dev.hw_info.caps & SDIO_CARD_MULTI_BLOCK) {
219            cmd_arg |= SDIO_IO_RW_EXTD_BLOCK_MODE;
220            update_bits(&cmd_arg, SDIO_IO_RW_EXTD_BYTE_BLK_COUNT_MASK,
221                        SDIO_IO_RW_EXTD_BYTE_BLK_COUNT_LOC, blk_count);
222        } else {
223            //Convert the request into byte mode?
224            return ZX_ERR_NOT_SUPPORTED;
225        }
226    } else {
227        //SDIO Spec Table 5-3
228        uint32_t arg_blk_size = (blk_size == 512) ? 0 : blk_size;
229        update_bits(&cmd_arg, SDIO_IO_RW_EXTD_BYTE_BLK_COUNT_MASK,
230                    SDIO_IO_RW_EXTD_BYTE_BLK_COUNT_LOC, arg_blk_size);
231    }
232    sdmmc_req_t req = {
233        .cmd_idx = SDIO_IO_RW_DIRECT_EXTENDED,
234        .arg = cmd_arg,
235        .cmd_flags = write ? (SDIO_IO_RW_DIRECT_EXTENDED_FLAGS) :
236                    (SDIO_IO_RW_DIRECT_EXTENDED_FLAGS | SDMMC_CMD_READ),
237        .blockcount = blk_count,
238        .blocksize = blk_size,
239    };
240
241    if (use_dma) {
242        req.virt = NULL;
243        req.dma_vmo = dma_vmo;
244        req.buf_offset = buf_offset;
245    } else {
246        req.virt = buf + buf_offset;
247    }
248    req.use_dma = use_dma;
249
250    zx_status_t st = sdmmc_request(&dev->host, &req);
251    if (st != ZX_OK) {
252        zxlogf(ERROR, "sdio: SDIO_IO_RW_DIRECT_EXTENDED failed, retcode = %d\n", st);
253        return st;
254    }
255    return ZX_OK;
256}
257
258// MMC ops
259
260zx_status_t mmc_send_op_cond(sdmmc_device_t* dev, uint32_t ocr, uint32_t* rocr) {
261    // Request sector addressing if not probing
262    uint32_t arg = (ocr == 0) ? ocr : ((1 << 30) | ocr);
263    sdmmc_req_t req = {
264        .cmd_idx = MMC_SEND_OP_COND,
265        .arg = arg,
266        .cmd_flags = MMC_SEND_OP_COND_FLAGS,
267        .use_dma = sdmmc_use_dma(dev),
268    };
269    zx_status_t st;
270    for (int i = 100; i; i--) {
271        if ((st = sdmmc_request(&dev->host, &req)) != ZX_OK) {
272            // fail on request error
273            break;
274        }
275        // No need to wait for busy clear if probing
276        if ((arg == 0) || (req.response[0] & MMC_OCR_BUSY)) {
277            *rocr = req.response[0];
278            break;
279        }
280        zx_nanosleep(zx_deadline_after(ZX_MSEC(10)));
281    }
282    return st;
283}
284
285zx_status_t mmc_all_send_cid(sdmmc_device_t* dev, uint32_t cid[4]) {
286    sdmmc_req_t req = {
287        .cmd_idx = SDMMC_ALL_SEND_CID,
288        .arg = 0,
289        .cmd_flags = SDMMC_ALL_SEND_CID_FLAGS,
290        .use_dma = sdmmc_use_dma(dev),
291    };
292    zx_status_t st = sdmmc_request(&dev->host, &req);
293    if (st == ZX_OK) {
294        cid[0] = req.response[0];
295        cid[1] = req.response[1];
296        cid[2] = req.response[2];
297        cid[3] = req.response[3];
298    }
299    return st;
300}
301
302zx_status_t mmc_set_relative_addr(sdmmc_device_t* dev, uint16_t rca) {
303    sdmmc_req_t req = {
304        .cmd_idx = MMC_SET_RELATIVE_ADDR,
305        .arg = (rca << 16),
306        .cmd_flags = MMC_SET_RELATIVE_ADDR_FLAGS,
307        .use_dma = sdmmc_use_dma(dev),
308    };
309    return sdmmc_request(&dev->host, &req);
310}
311
312zx_status_t mmc_send_csd(sdmmc_device_t* dev, uint32_t csd[4]) {
313    sdmmc_req_t req = {
314        .cmd_idx = SDMMC_SEND_CSD,
315        .arg = RCA_ARG(dev),
316        .cmd_flags = SDMMC_SEND_CSD_FLAGS,
317        .use_dma = sdmmc_use_dma(dev),
318    };
319    zx_status_t st = sdmmc_request(&dev->host, &req);
320    if (st == ZX_OK) {
321        csd[0] = req.response[0];
322        csd[1] = req.response[1];
323        csd[2] = req.response[2];
324        csd[3] = req.response[3];
325    }
326    return st;
327}
328
329zx_status_t mmc_send_ext_csd(sdmmc_device_t* dev, uint8_t ext_csd[512]) {
330    // EXT_CSD is send in a data stage
331    sdmmc_req_t req = {
332        .cmd_idx = MMC_SEND_EXT_CSD,
333        .arg = 0,
334        .blockcount = 1,
335        .blocksize = 512,
336        .use_dma = false,
337        .virt = ext_csd,
338        .cmd_flags = MMC_SEND_EXT_CSD_FLAGS,
339    };
340    zx_status_t st = sdmmc_request(&dev->host, &req);
341    if ((st == ZX_OK) && (driver_get_log_flags() & DDK_LOG_SPEW)) {
342        zxlogf(SPEW, "EXT_CSD:\n");
343        hexdump8_ex(ext_csd, 512, 0);
344    }
345    return st;
346}
347
348zx_status_t mmc_select_card(sdmmc_device_t* dev) {
349    sdmmc_req_t req = {
350        .cmd_idx = MMC_SELECT_CARD,
351        .arg = RCA_ARG(dev),
352        .cmd_flags = MMC_SELECT_CARD_FLAGS,
353        .use_dma = sdmmc_use_dma(dev),
354    };
355    return sdmmc_request(&dev->host, &req);
356}
357
358zx_status_t mmc_switch(sdmmc_device_t* dev, uint8_t index, uint8_t value) {
359    // Send the MMC_SWITCH command
360    uint32_t arg = (3 << 24) |  // write byte
361                   (index << 16) | (value << 8);
362    sdmmc_req_t req = {
363        .cmd_idx = MMC_SWITCH,
364        .arg = arg,
365        .cmd_flags = MMC_SWITCH_FLAGS,
366        .use_dma = sdmmc_use_dma(dev),
367    };
368    return sdmmc_request(&dev->host, &req);
369}
370