1// Copyright 2018 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include <errno.h>
6#include <fcntl.h>
7#include <stdint.h>
8#include <stdlib.h>
9#include <time.h>
10#include <unistd.h>
11
12#include <bits/limits.h>
13#include <ddk/binding.h>
14#include <ddk/debug.h>
15#include <ddk/device.h>
16#include <ddk/mmio-buffer.h>
17#include <ddk/io-buffer.h>
18#include <ddk/protocol/platform-defs.h>
19#include <ddk/protocol/platform-device.h>
20#include <ddk/protocol/rawnand.h>
21#include <hw/reg.h>
22
23#include <lib/sync/completion.h>
24#include <zircon/assert.h>
25#include <zircon/status.h>
26#include <zircon/threads.h>
27#include <zircon/types.h>
28
29#include <string.h>
30
31#include "onfi.h"
32#include <soc/aml-common/aml-rawnand.h>
33#include "aml-rawnand.h"
34
35static const uint32_t chipsel[2] = {NAND_CE0, NAND_CE1};
36
37struct aml_controller_params aml_params = {
38    8, /* Overwritten using BCH setting from page0 */
39    2,
40    /* The 2 following values are overwritten by page0 contents */
41    1,                /* rand-mode is 1 for page0 */
42    AML_ECC_BCH60_1K, /* This is the BCH setting for page0 */
43};
44
45static void aml_cmd_ctrl(void* ctx,
46                         int32_t cmd, uint32_t ctrl);
47static uint8_t aml_read_byte(void* ctx);
48static zx_status_t aml_nand_init(aml_raw_nand_t* raw_nand);
49
50static const char* aml_ecc_string(uint32_t ecc_mode) {
51    const char* s;
52
53    switch (ecc_mode) {
54    case AML_ECC_BCH8:
55        s = "AML_ECC_BCH8";
56        break;
57    case AML_ECC_BCH8_1K:
58        s = "AML_ECC_BCH8_1K";
59        break;
60    case AML_ECC_BCH24_1K:
61        s = "AML_ECC_BCH24_1K";
62        break;
63    case AML_ECC_BCH30_1K:
64        s = "AML_ECC_BCH30_1K";
65        break;
66    case AML_ECC_BCH40_1K:
67        s = "AML_ECC_BCH40_1K";
68        break;
69    case AML_ECC_BCH50_1K:
70        s = "AML_ECC_BCH50_1K";
71        break;
72    case AML_ECC_BCH60_1K:
73        s = "AML_ECC_BCH60_1K";
74        break;
75    default:
76        s = "BAD ECC Algorithm";
77        break;
78    }
79    return s;
80}
81
82uint32_t aml_get_ecc_pagesize(aml_raw_nand_t* raw_nand, uint32_t ecc_mode) {
83    uint32_t ecc_page;
84
85    switch (ecc_mode) {
86    case AML_ECC_BCH8:
87        ecc_page = 512;
88        break;
89    case AML_ECC_BCH8_1K:
90    case AML_ECC_BCH24_1K:
91    case AML_ECC_BCH30_1K:
92    case AML_ECC_BCH40_1K:
93    case AML_ECC_BCH50_1K:
94    case AML_ECC_BCH60_1K:
95        ecc_page = 1024;
96        break;
97    default:
98        ecc_page = 0;
99        break;
100    }
101    return ecc_page;
102}
103
104int aml_get_ecc_strength(uint32_t ecc_mode) {
105    int ecc_strength;
106
107    switch (ecc_mode) {
108    case AML_ECC_BCH8:
109    case AML_ECC_BCH8_1K:
110        ecc_strength = 8;
111        break;
112    case AML_ECC_BCH24_1K:
113        ecc_strength = 24;
114        break;
115    case AML_ECC_BCH30_1K:
116        ecc_strength = 30;
117        break;
118    case AML_ECC_BCH40_1K:
119        ecc_strength = 40;
120        break;
121    case AML_ECC_BCH50_1K:
122        ecc_strength = 50;
123        break;
124    case AML_ECC_BCH60_1K:
125        ecc_strength = 60;
126        break;
127    default:
128        ecc_strength = -1;
129        break;
130    }
131    return ecc_strength;
132}
133
134static void aml_cmd_idle(aml_raw_nand_t* raw_nand, uint32_t time) {
135    uint32_t cmd = 0;
136    volatile uint8_t* reg = (volatile uint8_t*)
137        raw_nand->mmio[NANDREG_WINDOW].vaddr;
138
139    cmd = raw_nand->chip_select | AML_CMD_IDLE | (time & 0x3ff);
140    writel(cmd, reg + P_NAND_CMD);
141}
142
143static zx_status_t aml_wait_cmd_finish(aml_raw_nand_t* raw_nand,
144                                       unsigned int timeout_ms) {
145    uint32_t cmd_size = 0;
146    zx_status_t ret = ZX_OK;
147    uint64_t total_time = 0;
148    uint32_t numcmds;
149    volatile uint8_t* reg = (volatile uint8_t*)
150        raw_nand->mmio[NANDREG_WINDOW].vaddr;
151
152    /* wait until cmd fifo is empty */
153    while (true) {
154        cmd_size = readl(reg + P_NAND_CMD);
155        numcmds = (cmd_size >> 22) & 0x1f;
156        if (numcmds == 0)
157            break;
158        usleep(10);
159        total_time += 10;
160        if (total_time > (timeout_ms * 1000)) {
161            ret = ZX_ERR_TIMED_OUT;
162            break;
163        }
164    }
165    if (ret == ZX_ERR_TIMED_OUT)
166        zxlogf(ERROR, "wait for empty cmd FIFO time out\n");
167    return ret;
168}
169
170static void aml_cmd_seed(aml_raw_nand_t* raw_nand, uint32_t seed) {
171    uint32_t cmd;
172    volatile uint8_t* reg = (volatile uint8_t*)
173        raw_nand->mmio[NANDREG_WINDOW].vaddr;
174
175    cmd = AML_CMD_SEED | (0xc2 + (seed & 0x7fff));
176    writel(cmd, reg + P_NAND_CMD);
177}
178
179static void aml_cmd_n2m(aml_raw_nand_t* raw_nand, uint32_t ecc_pages,
180                        uint32_t ecc_pagesize) {
181    uint32_t cmd;
182    volatile uint8_t* reg = (volatile uint8_t*)
183        raw_nand->mmio[NANDREG_WINDOW].vaddr;
184
185    cmd = CMDRWGEN(AML_CMD_N2M,
186                   raw_nand->controller_params.rand_mode,
187                   raw_nand->controller_params.bch_mode,
188                   0,
189                   ecc_pagesize,
190                   ecc_pages);
191    writel(cmd, reg + P_NAND_CMD);
192}
193
194static void aml_cmd_m2n_page0(aml_raw_nand_t* raw_nand) {
195    /* TODO */
196}
197
198static void aml_cmd_m2n(aml_raw_nand_t* raw_nand, uint32_t ecc_pages,
199                        uint32_t ecc_pagesize) {
200    uint32_t cmd;
201    volatile uint8_t* reg = (volatile uint8_t*)
202        raw_nand->mmio[NANDREG_WINDOW].vaddr;
203
204    cmd = CMDRWGEN(AML_CMD_M2N,
205                   raw_nand->controller_params.rand_mode,
206                   raw_nand->controller_params.bch_mode,
207                   0, ecc_pagesize,
208                   ecc_pages);
209    writel(cmd, reg + P_NAND_CMD);
210}
211
212static void aml_cmd_n2m_page0(aml_raw_nand_t* raw_nand) {
213    uint32_t cmd;
214    volatile uint8_t* reg = (volatile uint8_t*)
215        raw_nand->mmio[NANDREG_WINDOW].vaddr;
216
217    /*
218     * For page0 reads, we must use AML_ECC_BCH60_1K,
219     * and rand-mode == 1.
220     */
221    cmd = CMDRWGEN(AML_CMD_N2M,
222                   1,                /* force rand_mode */
223                   AML_ECC_BCH60_1K, /* force bch_mode  */
224                   1,                /* shortm == 1     */
225                   384 >> 3,
226                   1);
227    writel(cmd, reg + P_NAND_CMD);
228}
229
230static zx_status_t aml_wait_dma_finish(aml_raw_nand_t* raw_nand) {
231    aml_cmd_idle(raw_nand, 0);
232    aml_cmd_idle(raw_nand, 0);
233    return aml_wait_cmd_finish(raw_nand, DMA_BUSY_TIMEOUT);
234}
235
236/*
237 * Return the aml_info_format struct corresponding to the i'th
238 * ECC page. THIS ASSUMES user_mode == 2 (2 OOB bytes per ECC page).
239 */
240static struct aml_info_format* aml_info_ptr(aml_raw_nand_t* raw_nand,
241                                            int i) {
242    struct aml_info_format* p;
243
244    p = (struct aml_info_format*)raw_nand->info_buf;
245    return &p[i];
246}
247
248/*
249 * In the case where user_mode == 2, info_buf contains one nfc_info_format
250 * struct per ECC page on completion of a read. This 8 byte structure has
251 * the 2 OOB bytes and ECC/error status
252 */
253static zx_status_t aml_get_oob_byte(aml_raw_nand_t* raw_nand,
254                                    uint8_t* oob_buf) {
255    struct aml_info_format* info;
256    int count = 0;
257    uint32_t ecc_pagesize, ecc_pages;
258
259    ecc_pagesize = aml_get_ecc_pagesize(raw_nand,
260                                        raw_nand->controller_params.bch_mode);
261    ecc_pages = raw_nand->writesize / ecc_pagesize;
262    /*
263     * user_mode is 2 in our case - 2 bytes of OOB for every
264     * ECC page.
265     */
266    if (raw_nand->controller_params.user_mode != 2)
267        return ZX_ERR_NOT_SUPPORTED;
268    for (uint32_t i = 0;
269         i < ecc_pages;
270         i++) {
271        info = aml_info_ptr(raw_nand, i);
272        oob_buf[count++] = info->info_bytes & 0xff;
273        oob_buf[count++] = (info->info_bytes >> 8) & 0xff;
274    }
275    return ZX_OK;
276}
277
278static zx_status_t aml_set_oob_byte(aml_raw_nand_t* raw_nand,
279                                    const uint8_t* oob_buf,
280                                    uint32_t ecc_pages)
281{
282    struct aml_info_format* info;
283    int count = 0;
284
285    /*
286     * user_mode is 2 in our case - 2 bytes of OOB for every
287     * ECC page.
288     */
289    if (raw_nand->controller_params.user_mode != 2)
290        return ZX_ERR_NOT_SUPPORTED;
291    for (uint32_t i = 0; i < ecc_pages; i++) {
292        info = aml_info_ptr(raw_nand, i);
293        info->info_bytes = oob_buf[count] | (oob_buf[count + 1] << 8);
294        count += 2;
295    }
296    return ZX_OK;
297}
298
299/*
300 * Returns the maximum bitflips corrected on this NAND page
301 * (the maximum bitflips across all of the ECC pages in this page).
302 */
303static int aml_get_ecc_corrections(aml_raw_nand_t* raw_nand, int ecc_pages,
304                                   uint32_t nand_page) {
305    struct aml_info_format* info;
306    int bitflips = 0;
307    uint8_t zero_bits;
308
309    for (int i = 0; i < ecc_pages; i++) {
310        info = aml_info_ptr(raw_nand, i);
311        if (info->ecc.eccerr_cnt == AML_ECC_UNCORRECTABLE_CNT) {
312            if (!raw_nand->controller_params.rand_mode) {
313                zxlogf(ERROR, "%s: ECC failure (non-randomized)@%u\n", __func__, nand_page);
314                raw_nand->stats.failed++;
315                return ECC_CHECK_RETURN_FF;
316            }
317            /*
318             * Why are we checking for zero_bits here ?
319             * To deal with blank NAND pages. A blank page is entirely 0xff.
320             * When read with scrambler, the page will be ECC uncorrectable,
321             * In theory, if there is a single zero-bit in the page, then that
322             * page is not a blank page. But in practice, even fresh NAND chips
323             * report a few errors on the read of a page (including blank pages)
324             * so we make allowance for a few bitflips. The threshold against
325             * which we test the zero-bits is one under which we can correct
326             * the bitflips when the page is written to. One option is to set
327             * this threshold to be exactly the ECC strength (this is aggressive).
328             * TODO(srmohan): What should the correct threshold be ? We could
329             * conservatively set this to a small value, or we could have this
330             * depend on the quality of the NAND, the wear of the NAND etc.
331             */
332            zero_bits = info->zero_bits & AML_ECC_UNCORRECTABLE_CNT;
333            if (zero_bits >= raw_nand->controller_params.ecc_strength) {
334                zxlogf(ERROR, "%s: ECC failure (randomized)@%u zero_bits=%u\n",
335                       __func__, nand_page, zero_bits);
336                raw_nand->stats.failed++;
337                return ECC_CHECK_RETURN_FF;
338            }
339            zxlogf(INFO, "%s: Blank Page@%u\n", __func__, nand_page);
340            continue;
341        }
342        if (info->ecc.eccerr_cnt != 0) {
343            zxlogf(INFO, "%s: Corrected %u ECC errors@%u\n",
344                   __func__, info->ecc.eccerr_cnt, nand_page);
345        }
346        raw_nand->stats.ecc_corrected += info->ecc.eccerr_cnt;
347        bitflips = MAX(bitflips, info->ecc.eccerr_cnt);
348    }
349    return bitflips;
350}
351
352static zx_status_t aml_check_ecc_pages(aml_raw_nand_t* raw_nand, int ecc_pages) {
353    struct aml_info_format* info;
354
355    for (int i = 0; i < ecc_pages; i++) {
356        info = aml_info_ptr(raw_nand, i);
357        if (info->ecc.completed == 0)
358            return ZX_ERR_IO;
359    }
360    return ZX_OK;
361}
362
363static zx_status_t aml_queue_rb(aml_raw_nand_t* raw_nand) {
364    uint32_t cmd, cfg;
365    zx_status_t status;
366    volatile uint8_t* reg = (volatile uint8_t*)
367        raw_nand->mmio[NANDREG_WINDOW].vaddr;
368
369    raw_nand->req_completion = SYNC_COMPLETION_INIT;
370    cfg = readl(reg + P_NAND_CFG);
371    cfg |= (1 << 21);
372    writel(cfg, reg + P_NAND_CFG);
373    aml_cmd_idle(raw_nand, NAND_TWB_TIME_CYCLE);
374    cmd = raw_nand->chip_select | AML_CMD_CLE | (NAND_CMD_STATUS & 0xff);
375    writel(cmd, reg + P_NAND_CMD);
376    aml_cmd_idle(raw_nand, NAND_TWB_TIME_CYCLE);
377    cmd = AML_CMD_RB | AML_CMD_IO6 | (1 << 16) | (0x18 & 0x1f);
378    writel(cmd, reg + P_NAND_CMD);
379    aml_cmd_idle(raw_nand, 2);
380    status = sync_completion_wait(&raw_nand->req_completion,
381                             ZX_SEC(1));
382    if (status == ZX_ERR_TIMED_OUT) {
383        zxlogf(ERROR, "%s: Request timed out, not woken up from irq\n",
384               __func__);
385    }
386    return status;
387}
388
389static void aml_cmd_ctrl(void* ctx,
390                         int32_t cmd, uint32_t ctrl) {
391    aml_raw_nand_t* raw_nand = (aml_raw_nand_t*)ctx;
392
393    volatile uint8_t* reg = (volatile uint8_t*)
394        raw_nand->mmio[NANDREG_WINDOW].vaddr;
395
396    if (cmd == NAND_CMD_NONE)
397        return;
398    if (ctrl & NAND_CLE)
399        cmd = raw_nand->chip_select | AML_CMD_CLE | (cmd & 0xff);
400    else
401        cmd = raw_nand->chip_select | AML_CMD_ALE | (cmd & 0xff);
402    writel(cmd, reg + P_NAND_CMD);
403}
404
405/* Read status byte */
406static uint8_t aml_read_byte(void* ctx) {
407    aml_raw_nand_t* raw_nand = (aml_raw_nand_t*)ctx;
408    uint32_t cmd;
409    volatile uint8_t* reg = (volatile uint8_t*)
410        raw_nand->mmio[NANDREG_WINDOW].vaddr;
411
412    cmd = raw_nand->chip_select | AML_CMD_DRD | 0;
413    nandctrl_send_cmd(raw_nand, cmd);
414
415    aml_cmd_idle(raw_nand, NAND_TWB_TIME_CYCLE);
416
417    aml_cmd_idle(raw_nand, 0);
418    aml_cmd_idle(raw_nand, 0);
419    aml_wait_cmd_finish(raw_nand,
420                        CMD_FINISH_TIMEOUT_MS);
421    return readb(reg + P_NAND_BUF);
422}
423
424static void aml_set_clock_rate(aml_raw_nand_t* raw_nand,
425                               uint32_t clk_freq) {
426    uint32_t always_on = 0x1 << 24;
427    uint32_t clk;
428    volatile uint8_t* reg = (volatile uint8_t*)
429        raw_nand->mmio[CLOCKREG_WINDOW].vaddr;
430
431    /* For Amlogic type  AXG */
432    always_on = 0x1 << 28;
433    switch (clk_freq) {
434    case 24:
435        clk = 0x80000201;
436        break;
437    case 112:
438        clk = 0x80000249;
439        break;
440    case 200:
441        clk = 0x80000245;
442        break;
443    case 250:
444        clk = 0x80000244;
445        break;
446    default:
447        clk = 0x80000245;
448        break;
449    }
450    clk |= always_on;
451    writel(clk, reg);
452}
453
454static void aml_clock_init(aml_raw_nand_t* raw_nand) {
455    uint32_t sys_clk_rate, bus_cycle, bus_timing;
456
457    sys_clk_rate = 200;
458    aml_set_clock_rate(raw_nand, sys_clk_rate);
459    bus_cycle = 6;
460    bus_timing = bus_cycle + 1;
461    nandctrl_set_cfg(raw_nand, 0);
462    nandctrl_set_timing_async(raw_nand, bus_timing, (bus_cycle - 1));
463    nandctrl_send_cmd(raw_nand, 1 << 31);
464}
465
466static void aml_adjust_timings(aml_raw_nand_t* raw_nand,
467                               uint32_t tRC_min, uint32_t tREA_max,
468                               uint32_t RHOH_min) {
469    int sys_clk_rate, bus_cycle, bus_timing;
470
471    if (!tREA_max)
472        tREA_max = TREA_MAX_DEFAULT;
473    if (!RHOH_min)
474        RHOH_min = RHOH_MIN_DEFAULT;
475    if (tREA_max > 30)
476        sys_clk_rate = 112;
477    else if (tREA_max > 16)
478        sys_clk_rate = 200;
479    else
480        sys_clk_rate = 250;
481    aml_set_clock_rate(raw_nand, sys_clk_rate);
482    bus_cycle = 6;
483    bus_timing = bus_cycle + 1;
484    nandctrl_set_cfg(raw_nand, 0);
485    nandctrl_set_timing_async(raw_nand, bus_timing, (bus_cycle - 1));
486    nandctrl_send_cmd(raw_nand, 1 << 31);
487}
488
489static bool is_page0_nand_page(uint32_t nand_page) {
490    return ((nand_page <= AML_PAGE0_MAX_ADDR) &&
491            ((nand_page % AML_PAGE0_STEP) == 0));
492}
493
494static zx_status_t aml_read_page_hwecc(void* ctx,
495                                       void* data,
496                                       void* oob,
497                                       uint32_t nand_page,
498                                       int* ecc_correct) {
499    aml_raw_nand_t* raw_nand = (aml_raw_nand_t*)ctx;
500    uint32_t cmd;
501    zx_status_t status;
502    uint64_t daddr = raw_nand->data_buf_paddr;
503    uint64_t iaddr = raw_nand->info_buf_paddr;
504    int ecc_c;
505    volatile uint8_t* reg = (volatile uint8_t*)
506        raw_nand->mmio[NANDREG_WINDOW].vaddr;
507    uint32_t ecc_pagesize = 0; /* initialize to silence compiler */
508    uint32_t ecc_pages;
509    bool page0 = is_page0_nand_page(nand_page);
510
511    if (!page0) {
512        ecc_pagesize = aml_get_ecc_pagesize(raw_nand,
513                                            raw_nand->controller_params.bch_mode);
514        ecc_pages = raw_nand->writesize / ecc_pagesize;
515        if (is_page0_nand_page(nand_page))
516            return ZX_ERR_IO;
517    } else
518        ecc_pages = 1;
519    /* Send the page address into the controller */
520    onfi_command(&raw_nand->raw_nand_proto, NAND_CMD_READ0, 0x00,
521                 nand_page, raw_nand->chipsize, raw_nand->chip_delay,
522                 (raw_nand->controller_params.options & NAND_BUSWIDTH_16));
523    cmd = GENCMDDADDRL(AML_CMD_ADL, daddr);
524    writel(cmd, reg + P_NAND_CMD);
525    cmd = GENCMDDADDRH(AML_CMD_ADH, daddr);
526    writel(cmd, reg + P_NAND_CMD);
527    cmd = GENCMDIADDRL(AML_CMD_AIL, iaddr);
528    writel(cmd, reg + P_NAND_CMD);
529    cmd = GENCMDIADDRH(AML_CMD_AIH, iaddr);
530    writel(cmd, reg + P_NAND_CMD);
531    /* page0 needs randomization. so force it for page0 */
532    if (page0 || raw_nand->controller_params.rand_mode)
533        /*
534         * Only need to set the seed if randomizing
535         * is enabled.
536         */
537        aml_cmd_seed(raw_nand, nand_page);
538    if (!page0)
539        aml_cmd_n2m(raw_nand, ecc_pages, ecc_pagesize);
540    else
541        aml_cmd_n2m_page0(raw_nand);
542    status = aml_wait_dma_finish(raw_nand);
543    if (status != ZX_OK) {
544        zxlogf(ERROR, "%s: aml_wait_dma_finish failed %d\n",
545               __func__, status);
546        return status;
547    }
548    status = aml_queue_rb(raw_nand);
549    if (status != ZX_OK) {
550        zxlogf(ERROR, "%s: aml_queue_rb failed %d\n", __func__, status);
551        return ZX_ERR_IO;
552    }
553    status = aml_check_ecc_pages(raw_nand, ecc_pages);
554    if (status != ZX_OK) {
555        zxlogf(ERROR, "%s: aml_check_ecc_pages failed %d\n",
556               __func__, status);
557        return status;
558    }
559    /*
560     * Finally copy out the data and oob as needed
561     */
562    if (data != NULL) {
563        if (!page0)
564            memcpy(data, raw_nand->data_buf, raw_nand->writesize);
565        else
566            memcpy(data, raw_nand->data_buf, AML_PAGE0_LEN);
567    }
568    if (oob != NULL)
569        status = aml_get_oob_byte(raw_nand, oob);
570    ecc_c = aml_get_ecc_corrections(raw_nand, ecc_pages, nand_page);
571    if (ecc_c < 0) {
572        zxlogf(ERROR, "%s: Uncorrectable ECC error on read\n",
573               __func__);
574        status = ZX_ERR_IO;
575    }
576    *ecc_correct = ecc_c;
577    return status;
578}
579
580/*
581 * TODO : Right now, the driver uses a buffer for DMA, which
582 * is not needed. We should initiate DMA to/from pages passed in.
583 */
584static zx_status_t aml_write_page_hwecc(void* ctx,
585                                        const void* data,
586                                        const void* oob,
587                                        uint32_t nand_page)
588{
589    aml_raw_nand_t *raw_nand = (aml_raw_nand_t*)ctx;
590    uint32_t cmd;
591    uint64_t daddr = raw_nand->data_buf_paddr;
592    uint64_t iaddr = raw_nand->info_buf_paddr;
593    zx_status_t status;
594    volatile uint8_t* reg = (volatile uint8_t*)
595        raw_nand->mmio[NANDREG_WINDOW].vaddr;
596    uint32_t ecc_pagesize = 0; /* initialize to silence compiler */
597    uint32_t ecc_pages;
598    bool page0 = is_page0_nand_page(nand_page);
599
600    if (!page0) {
601        ecc_pagesize = aml_get_ecc_pagesize(raw_nand,
602                                            raw_nand->controller_params.bch_mode);
603        ecc_pages = raw_nand->writesize / ecc_pagesize;
604        if (is_page0_nand_page(nand_page))
605            return ZX_ERR_IO;
606    } else
607        ecc_pages = 1;
608    if (data != NULL) {
609        memcpy(raw_nand->data_buf, data, raw_nand->writesize);
610    }
611    if (oob != NULL) {
612        aml_set_oob_byte(raw_nand, oob, ecc_pages);
613    }
614
615    onfi_command(&raw_nand->raw_nand_proto, NAND_CMD_SEQIN, 0x00, nand_page,
616                 raw_nand->chipsize, raw_nand->chip_delay,
617                 (raw_nand->controller_params.options & NAND_BUSWIDTH_16));
618    cmd = GENCMDDADDRL(AML_CMD_ADL, daddr);
619    writel(cmd, reg + P_NAND_CMD);
620    cmd = GENCMDDADDRH(AML_CMD_ADH, daddr);
621    writel(cmd, reg + P_NAND_CMD);
622    cmd = GENCMDIADDRL(AML_CMD_AIL, iaddr);
623    writel(cmd, reg + P_NAND_CMD);
624    cmd = GENCMDIADDRH(AML_CMD_AIH, iaddr);
625    writel(cmd, reg + P_NAND_CMD);
626    /* page0 needs randomization. so force it for page0 */
627    if (page0 || raw_nand->controller_params.rand_mode)
628        /*
629         * Only need to set the seed if randomizing
630         * is enabled.
631         */
632        aml_cmd_seed(raw_nand, nand_page);
633    if (!page0)
634        aml_cmd_m2n(raw_nand, ecc_pages, ecc_pagesize);
635    else
636        aml_cmd_m2n_page0(raw_nand);
637    status = aml_wait_dma_finish(raw_nand);
638    if (status != ZX_OK) {
639        zxlogf(ERROR, "%s: error from wait_dma_finish\n",
640               __func__);
641        return status;
642    }
643    onfi_command(&raw_nand->raw_nand_proto, NAND_CMD_PAGEPROG, -1, -1,
644                 raw_nand->chipsize, raw_nand->chip_delay,
645                 (raw_nand->controller_params.options & NAND_BUSWIDTH_16));
646    status = onfi_wait(&raw_nand->raw_nand_proto, AML_WRITE_PAGE_TIMEOUT);
647
648    return status;
649}
650
651/*
652 * Erase entry point into the Amlogic driver.
653 * nandblock : NAND erase block address.
654 */
655static zx_status_t aml_erase_block(void* ctx, uint32_t nand_page) {
656    aml_raw_nand_t* raw_nand = (aml_raw_nand_t*)ctx;
657    zx_status_t status;
658
659    /* nandblock has to be erasesize aligned */
660    if (nand_page % raw_nand->erasesize_pages) {
661        zxlogf(ERROR, "%s: NAND block %u must be a erasesize_pages (%u) multiple\n",
662               __func__, nand_page, raw_nand->erasesize_pages);
663        return ZX_ERR_INVALID_ARGS;
664    }
665    onfi_command(&raw_nand->raw_nand_proto, NAND_CMD_ERASE1, -1, nand_page,
666                 raw_nand->chipsize, raw_nand->chip_delay,
667                 (raw_nand->controller_params.options & NAND_BUSWIDTH_16));
668    onfi_command(&raw_nand->raw_nand_proto, NAND_CMD_ERASE2, -1, -1,
669                 raw_nand->chipsize, raw_nand->chip_delay,
670                 (raw_nand->controller_params.options & NAND_BUSWIDTH_16));
671    status = onfi_wait(&raw_nand->raw_nand_proto, AML_ERASE_BLOCK_TIMEOUT);
672    return status;
673}
674
675static zx_status_t aml_get_flash_type(aml_raw_nand_t* raw_nand) {
676    uint8_t nand_maf_id, nand_dev_id;
677    uint8_t id_data[8];
678    struct nand_chip_table* nand_chip;
679
680    onfi_command(&raw_nand->raw_nand_proto, NAND_CMD_RESET, -1, -1,
681                 raw_nand->chipsize, raw_nand->chip_delay,
682                 (raw_nand->controller_params.options & NAND_BUSWIDTH_16));
683    onfi_command(&raw_nand->raw_nand_proto, NAND_CMD_READID, 0x00, -1,
684                 raw_nand->chipsize, raw_nand->chip_delay,
685                 (raw_nand->controller_params.options & NAND_BUSWIDTH_16));
686    /* Read manufacturer and device IDs */
687    nand_maf_id = aml_read_byte(&raw_nand->raw_nand_proto);
688    nand_dev_id = aml_read_byte(&raw_nand->raw_nand_proto);
689    /* Read again */
690    onfi_command(&raw_nand->raw_nand_proto, NAND_CMD_READID, 0x00, -1,
691                 raw_nand->chipsize, raw_nand->chip_delay,
692                 (raw_nand->controller_params.options & NAND_BUSWIDTH_16));
693    /* Read entire ID string */
694    for (uint32_t i = 0; i < sizeof(id_data); i++)
695        id_data[i] = aml_read_byte(&raw_nand->raw_nand_proto);
696    if (id_data[0] != nand_maf_id || id_data[1] != nand_dev_id) {
697        zxlogf(ERROR, "second ID read did not match %02x,%02x against %02x,%02x\n",
698               nand_maf_id, nand_dev_id, id_data[0], id_data[1]);
699    }
700
701    zxlogf(INFO, "%s: manufacturer_id = %x, device_ide = %x\n",
702           __func__, nand_maf_id, nand_dev_id);
703
704    nand_chip = find_nand_chip_table(nand_maf_id, nand_dev_id);
705    if (nand_chip == NULL) {
706        zxlogf(ERROR, "%s: Cound not find matching NAND chip. NAND chip unsupported."
707                      " This is FATAL\n",
708               __func__);
709        return ZX_ERR_UNAVAILABLE;
710    }
711    if (nand_chip->extended_id_nand) {
712        /*
713	 * Initialize pagesize, eraseblk size, oobsize and
714	 * buswidth from extended parameters queried just now.
715	 */
716        uint8_t extid = id_data[3];
717
718        raw_nand->writesize = 1024 << (extid & 0x03);
719        extid >>= 2;
720        /* Calc oobsize */
721        raw_nand->oobsize = (8 << (extid & 0x01)) *
722                            (raw_nand->writesize >> 9);
723        extid >>= 2;
724        /* Calc blocksize. Blocksize is multiples of 64KiB */
725        raw_nand->erasesize = (64 * 1024) << (extid & 0x03);
726        extid >>= 2;
727        /* Get buswidth information */
728        raw_nand->bus_width = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0;
729    } else {
730        /*
731	 * Initialize pagesize, eraseblk size, oobsize and
732	 * buswidth from values in table.
733	 */
734        raw_nand->writesize = nand_chip->page_size;
735        raw_nand->oobsize = nand_chip->oobsize;
736        raw_nand->erasesize = nand_chip->erase_block_size;
737        raw_nand->bus_width = nand_chip->bus_width;
738    }
739    raw_nand->erasesize_pages =
740        raw_nand->erasesize / raw_nand->writesize;
741    raw_nand->chipsize = nand_chip->chipsize;
742    raw_nand->page_shift = ffs(raw_nand->writesize) - 1;
743
744    /*
745     * We found a matching device in our database, use it to
746     * initialize. Adjust timings and set various parameters.
747     */
748    aml_adjust_timings(raw_nand,
749                       nand_chip->timings.tRC_min,
750                       nand_chip->timings.tREA_max,
751                       nand_chip->timings.RHOH_min);
752    /*
753     * chip_delay is used onfi_command(), after sending down some commands
754     * to the NAND chip.
755     */
756    raw_nand->chip_delay = nand_chip->chip_delay_us;
757    zxlogf(INFO, "NAND %s %s: chip size = %lu(GB), page size = %u, oob size = %u\n"
758           "eraseblock size = %u, chip delay (us) = %u\n",
759           nand_chip->manufacturer_name, nand_chip->device_name,
760           raw_nand->chipsize, raw_nand->writesize, raw_nand->oobsize, raw_nand->erasesize,
761           raw_nand->chip_delay);
762    return ZX_OK;
763}
764
765static int aml_raw_nand_irq_thread(void* arg) {
766    zxlogf(INFO, "aml_raw_nand_irq_thread start\n");
767
768    aml_raw_nand_t* raw_nand = arg;
769
770    while (1) {
771        zx_time_t slots;
772
773        zx_status_t result = zx_interrupt_wait(raw_nand->irq_handle, &slots);
774        if (result != ZX_OK) {
775            zxlogf(ERROR,
776                   "aml_raw_nand_irq_thread: zx_interrupt_wait got %d\n",
777                   result);
778            break;
779        }
780        /*
781         * Wakeup blocked requester on
782         * sync_completion_wait(&raw_nand->req_completion, ZX_TIME_INFINITE);
783         */
784        sync_completion_signal(&raw_nand->req_completion);
785    }
786
787    return 0;
788}
789
790static zx_status_t aml_get_nand_info(void* ctx, struct nand_info* nand_info) {
791    aml_raw_nand_t* raw_nand = (aml_raw_nand_t*)ctx;
792    uint64_t capacity;
793    zx_status_t status = ZX_OK;
794
795    nand_info->page_size = raw_nand->writesize;
796    nand_info->pages_per_block = raw_nand->erasesize_pages;
797    capacity = raw_nand->chipsize * (1024 * 1024);
798    capacity /= raw_nand->erasesize;
799    nand_info->num_blocks = (uint32_t)capacity;
800    nand_info->ecc_bits = raw_nand->controller_params.ecc_strength;
801
802    nand_info->nand_class = NAND_CLASS_PARTMAP;
803    memset(&nand_info->partition_guid, 0, sizeof(nand_info->partition_guid));
804
805    if (raw_nand->controller_params.user_mode == 2)
806        nand_info->oob_size =
807            (raw_nand->writesize /
808             aml_get_ecc_pagesize(raw_nand, raw_nand->controller_params.bch_mode)) *
809            2;
810    else
811        status = ZX_ERR_NOT_SUPPORTED;
812    return status;
813}
814
815static raw_nand_protocol_ops_t aml_raw_nand_ops = {
816    .read_page_hwecc = aml_read_page_hwecc,
817    .write_page_hwecc = aml_write_page_hwecc,
818    .erase_block = aml_erase_block,
819    .get_nand_info = aml_get_nand_info,
820    .cmd_ctrl = aml_cmd_ctrl,
821    .read_byte = aml_read_byte,
822};
823
824static void aml_raw_nand_release(void* ctx) {
825    aml_raw_nand_t* raw_nand = ctx;
826
827    for (raw_nand_addr_window_t wnd = 0;
828         wnd < ADDR_WINDOW_COUNT;
829         wnd++)
830        mmio_buffer_release(&raw_nand->mmio[wnd]);
831    io_buffer_release(&raw_nand->data_buffer);
832    io_buffer_release(&raw_nand->info_buffer);
833    zx_handle_close(raw_nand->bti_handle);
834    free(raw_nand);
835}
836
837static void aml_set_encryption(aml_raw_nand_t* raw_nand) {
838    uint32_t cfg;
839    volatile uint8_t* reg = (volatile uint8_t*)
840        raw_nand->mmio[NANDREG_WINDOW].vaddr;
841
842    cfg = readl(reg + P_NAND_CFG);
843    cfg |= (1 << 17);
844    writel(cfg, reg + P_NAND_CFG);
845}
846
847static zx_status_t aml_read_page0(aml_raw_nand_t* raw_nand,
848                                  void* data,
849                                  void* oob,
850                                  uint32_t nand_page,
851                                  int* ecc_correct,
852                                  int retries) {
853    zx_status_t status;
854
855    retries++;
856    do {
857        status = aml_read_page_hwecc(raw_nand, data, oob,
858                                     nand_page, ecc_correct);
859    } while (status != ZX_OK && --retries > 0);
860    if (status != ZX_OK)
861        zxlogf(ERROR, "%s: Read error\n", __func__);
862    return status;
863}
864
865/*
866 * Read one of the page0 pages, and use the result to init
867 * ECC algorithm and rand-mode.
868 */
869static zx_status_t aml_nand_init_from_page0(aml_raw_nand_t* raw_nand) {
870    zx_status_t status;
871    char* data;
872    nand_page0_t* page0;
873    int ecc_correct;
874
875    data = malloc(raw_nand->writesize);
876    if (data == NULL) {
877        zxlogf(ERROR, "%s: Cannot allocate memory to read in Page0\n", __func__);
878        return ZX_ERR_NO_MEMORY;
879    }
880    /*
881     * There are 8 copies of page0 spaced apart by 128 pages
882     * starting at Page 0. Read the first we can.
883     */
884    for (uint32_t i = 0; i < 7; i++) {
885        status = aml_read_page0(raw_nand, data, NULL, i * 128,
886                                &ecc_correct, 3);
887        if (status == ZX_OK)
888            break;
889    }
890    if (status != ZX_OK) {
891        /*
892         * Could not read any of the page0 copies. This is a fatal
893         * error.
894         */
895        free(data);
896        zxlogf(ERROR, "%s: Page0 Read (all copies) failed\n", __func__);
897        return status;
898    }
899
900    page0 = (nand_page0_t*)data;
901    raw_nand->controller_params.rand_mode =
902        (page0->nand_setup.cfg.d32 >> 19) & 0x1;
903    raw_nand->controller_params.bch_mode =
904        (page0->nand_setup.cfg.d32 >> 14) & 0x7;
905
906    raw_nand->controller_params.ecc_strength =
907        aml_get_ecc_strength(raw_nand->controller_params.bch_mode);
908    if (raw_nand->controller_params.ecc_strength < 0) {
909        zxlogf(INFO, "%s: BAD ECC strength computed from BCH Mode\n", __func__);
910        free(data);
911        return ZX_ERR_BAD_STATE;
912    }
913
914    zxlogf(INFO, "%s: NAND BCH Mode is %s\n", __func__,
915           aml_ecc_string(raw_nand->controller_params.bch_mode));
916    free(data);
917    return ZX_OK;
918}
919
920static zx_status_t aml_raw_nand_allocbufs(aml_raw_nand_t* raw_nand) {
921    zx_status_t status;
922
923    status = pdev_get_bti(&raw_nand->pdev, 0, &raw_nand->bti_handle);
924    if (status != ZX_OK) {
925        zxlogf(ERROR, "raw_nand_test_allocbufs: pdev_get_bti failed (%d)\n",
926               status);
927        return status;
928    }
929    /*
930     * The iobuffers MUST be uncachable. Making these cachable, with
931     * cache flush/invalidate at the right places in the code does not
932     * work. We see data corruptions caused by speculative cache prefetching
933     * done by ARM. Note also that these corruptions are not easily reproducible.
934     */
935    status = io_buffer_init(&raw_nand->data_buffer,
936                            raw_nand->bti_handle,
937                            raw_nand->writesize,
938                            IO_BUFFER_UNCACHED | IO_BUFFER_RW | IO_BUFFER_CONTIG);
939    if (status != ZX_OK) {
940        zxlogf(ERROR,
941               "raw_nand_test_allocbufs: io_buffer_init(data_buffer) failed\n");
942        zx_handle_close(raw_nand->bti_handle);
943        return status;
944    }
945    ZX_DEBUG_ASSERT(raw_nand->writesize > 0);
946    status = io_buffer_init(&raw_nand->info_buffer,
947                            raw_nand->bti_handle,
948                            raw_nand->writesize,
949                            IO_BUFFER_UNCACHED | IO_BUFFER_RW | IO_BUFFER_CONTIG);
950    if (status != ZX_OK) {
951        zxlogf(ERROR,
952               "raw_nand_test_allocbufs: io_buffer_init(info_buffer) failed\n");
953        io_buffer_release(&raw_nand->data_buffer);
954        zx_handle_close(raw_nand->bti_handle);
955        return status;
956    }
957    raw_nand->data_buf = io_buffer_virt(&raw_nand->data_buffer);
958    raw_nand->info_buf = io_buffer_virt(&raw_nand->info_buffer);
959    raw_nand->data_buf_paddr = io_buffer_phys(&raw_nand->data_buffer);
960    raw_nand->info_buf_paddr = io_buffer_phys(&raw_nand->info_buffer);
961    return ZX_OK;
962}
963
964static zx_status_t aml_nand_init(aml_raw_nand_t* raw_nand) {
965    zx_status_t status;
966
967    /*
968     * Do nand scan to get manufacturer and other info
969     */
970    status = aml_get_flash_type(raw_nand);
971    if (status != ZX_OK)
972        return status;
973    raw_nand->controller_params.ecc_strength = aml_params.ecc_strength;
974    raw_nand->controller_params.user_mode = aml_params.user_mode;
975    raw_nand->controller_params.rand_mode = aml_params.rand_mode;
976    raw_nand->controller_params.options = NAND_USE_BOUNCE_BUFFER;
977    raw_nand->controller_params.bch_mode = aml_params.bch_mode;
978
979    /*
980     * Note on OOB byte settings.
981     * The default config for OOB is 2 bytes per OOB page. This is the
982     * settings we use. So nothing to be done for OOB. If we ever need
983     * to switch to 16 bytes of OOB per NAND page, we need to set the
984     * right bits in the CFG register/
985     */
986
987    status = aml_raw_nand_allocbufs(raw_nand);
988    if (status != ZX_OK)
989        return status;
990
991    /*
992     * Read one of the copies of page0, and use that to initialize
993     * ECC algorithm and rand-mode.
994     */
995    status = aml_nand_init_from_page0(raw_nand);
996
997    /* Force chip_select to 0 */
998    raw_nand->chip_select = chipsel[0];
999
1000    return status;
1001}
1002
1003static void aml_raw_nand_unbind(void* ctx) {
1004    aml_raw_nand_t* raw_nand = ctx;
1005
1006    zx_interrupt_destroy(raw_nand->irq_handle);
1007    thrd_join(raw_nand->irq_thread, NULL);
1008    zx_handle_close(raw_nand->irq_handle);
1009    device_remove(raw_nand->zxdev);
1010}
1011
1012static zx_protocol_device_t raw_nand_device_proto = {
1013    .version = DEVICE_OPS_VERSION,
1014    .unbind = aml_raw_nand_unbind,
1015    .release = aml_raw_nand_release,
1016};
1017
1018static zx_status_t aml_raw_nand_bind(void* ctx, zx_device_t* parent) {
1019    zx_status_t status;
1020
1021    aml_raw_nand_t* raw_nand = calloc(1, sizeof(aml_raw_nand_t));
1022
1023    if (!raw_nand) {
1024        return ZX_ERR_NO_MEMORY;
1025    }
1026
1027    raw_nand->req_completion = SYNC_COMPLETION_INIT;
1028
1029    if ((status = device_get_protocol(parent,
1030                                      ZX_PROTOCOL_PLATFORM_DEV,
1031                                      &raw_nand->pdev)) != ZX_OK) {
1032        zxlogf(ERROR,
1033               "aml_raw_nand_bind: ZX_PROTOCOL_PLATFORM_DEV not available\n");
1034        free(raw_nand);
1035        return status;
1036    }
1037
1038    pdev_device_info_t info;
1039    status = pdev_get_device_info(&raw_nand->pdev, &info);
1040    if (status != ZX_OK) {
1041        zxlogf(ERROR, "aml_raw_nand_bind: pdev_get_device_info failed\n");
1042        free(raw_nand);
1043        return status;
1044    }
1045
1046    /* Map all of the mmio windows that we need */
1047    for (raw_nand_addr_window_t wnd = 0;
1048         wnd < ADDR_WINDOW_COUNT;
1049         wnd++) {
1050        status = pdev_map_mmio_buffer2(&raw_nand->pdev,
1051                                       wnd,
1052                                       ZX_CACHE_POLICY_UNCACHED_DEVICE,
1053                                       &raw_nand->mmio[wnd]);
1054        if (status != ZX_OK) {
1055            zxlogf(ERROR, "aml_raw_nand_bind: pdev_map_mmio_buffer failed %d\n",
1056                   status);
1057            for (raw_nand_addr_window_t j = 0; j < wnd; j++)
1058                mmio_buffer_release(&raw_nand->mmio[j]);
1059            free(raw_nand);
1060            return status;
1061        }
1062    }
1063
1064    status = pdev_map_interrupt(&raw_nand->pdev, 0, &raw_nand->irq_handle);
1065    if (status != ZX_OK) {
1066        zxlogf(ERROR, "aml_raw_nand_bind: pdev_map_interrupt failed %d\n",
1067               status);
1068        goto fail;
1069    }
1070
1071    raw_nand->raw_nand_proto.ops = &aml_raw_nand_ops;
1072    raw_nand->raw_nand_proto.ctx = raw_nand;
1073    /*
1074     * This creates a device that a top level (controller independent)
1075     * raw_nand driver can bind to.
1076     */
1077    device_add_args_t args = {
1078        .version = DEVICE_ADD_ARGS_VERSION,
1079        .name = "aml-raw_nand",
1080        .ctx = raw_nand,
1081        .ops = &raw_nand_device_proto,
1082        .proto_id = ZX_PROTOCOL_RAW_NAND,
1083        .proto_ops = &aml_raw_nand_ops,
1084        .flags = DEVICE_ADD_INVISIBLE,
1085    };
1086
1087    status = device_add(parent, &args, &raw_nand->zxdev);
1088    if (status != ZX_OK) {
1089        zxlogf(ERROR, "aml_raw_nand_bind: device_add failed\n");
1090        zx_handle_close(raw_nand->irq_handle);
1091        goto fail;
1092    }
1093
1094    int rc = thrd_create_with_name(&raw_nand->irq_thread,
1095                                   aml_raw_nand_irq_thread,
1096                                   raw_nand, "aml_raw_nand_irq_thread");
1097    if (rc != thrd_success) {
1098        zx_handle_close(raw_nand->irq_handle);
1099        status = thrd_status_to_zx_status(rc);
1100        goto fail;
1101    }
1102
1103    /*
1104     * Do the rest of the init here, instead of up top in the irq
1105     * thread, because the init needs for irq's to work.
1106     */
1107    aml_clock_init(raw_nand);
1108    status = aml_nand_init(raw_nand);
1109    if (status != ZX_OK) {
1110        zxlogf(ERROR,
1111               "aml_raw_nand_bind: aml_nand_init() failed - This is FATAL\n");
1112        zx_interrupt_destroy(raw_nand->irq_handle);
1113        thrd_join(raw_nand->irq_thread, NULL);
1114        device_remove(raw_nand->zxdev);
1115        goto fail;
1116    }
1117
1118    zxlogf(ERROR, "aml_raw_nand_bind: Making device visible\n");
1119
1120    /*
1121     * device was added invisible, now that init has completed,
1122     * flip the switch, allowing the upper layer nand driver to
1123     * bind to us.
1124     */
1125    device_make_visible(raw_nand->zxdev);
1126
1127    return status;
1128
1129fail:
1130    for (raw_nand_addr_window_t wnd = 0;
1131         wnd < ADDR_WINDOW_COUNT;
1132         wnd++)
1133        mmio_buffer_release(&raw_nand->mmio[wnd]);
1134    free(raw_nand);
1135    return status;
1136}
1137
1138static zx_driver_ops_t aml_raw_nand_driver_ops = {
1139    .version = DRIVER_OPS_VERSION,
1140    .bind = aml_raw_nand_bind,
1141};
1142
1143ZIRCON_DRIVER_BEGIN(aml_raw_nand, aml_raw_nand_driver_ops, "zircon", "0.1", 3)
1144BI_ABORT_IF(NE, BIND_PROTOCOL, ZX_PROTOCOL_PLATFORM_DEV),
1145    BI_ABORT_IF(NE, BIND_PLATFORM_DEV_VID, PDEV_VID_AMLOGIC),
1146    BI_MATCH_IF(EQ, BIND_PLATFORM_DEV_DID, PDEV_DID_AMLOGIC_RAW_NAND),
1147    ZIRCON_DRIVER_END(aml_raw_nand)
1148