1/***********************license start***************
2 * Copyright (c) 2003-2010  Cavium Networks (support@cavium.com). All rights
3 * reserved.
4 *
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 *   * Redistributions of source code must retain the above copyright
11 *     notice, this list of conditions and the following disclaimer.
12 *
13 *   * Redistributions in binary form must reproduce the above
14 *     copyright notice, this list of conditions and the following
15 *     disclaimer in the documentation and/or other materials provided
16 *     with the distribution.
17
18 *   * Neither the name of Cavium Networks nor the names of
19 *     its contributors may be used to endorse or promote products
20 *     derived from this software without specific prior written
21 *     permission.
22
23 * This Software, including technical data, may be subject to U.S. export  control
24 * laws, including the U.S. Export Administration Act and its  associated
25 * regulations, and may be subject to export or import  regulations in other
26 * countries.
27
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM  NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE  RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
39
40
41
42/**
43 * @file
44 *
45 * Interface to the NAND flash controller.
46 * See cvmx-nand.h for usage documentation and notes.
47 *
48 * <hr>$Revision: 35726 $<hr>
49 */
50
51#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
52#include <linux/module.h>
53
54#include <asm/octeon/cvmx.h>
55#include <asm/octeon/cvmx-clock.h>
56#include <asm/octeon/cvmx-nand.h>
57#include <asm/octeon/cvmx-ndf-defs.h>
58#include <asm/octeon/cvmx-swap.h>
59#include <asm/octeon/cvmx-bootmem.h>
60#else
61#include "cvmx.h"
62#include "cvmx-nand.h"
63#include "cvmx-swap.h"
64#include "cvmx-bootmem.h"
65#endif
66
67#define NAND_COMMAND_READ_ID            0x90
68#define NAND_COMMAND_READ_PARAM_PAGE    0xec
69#define NAND_COMMAND_RESET              0xff
70#define NAND_COMMAND_STATUS             0x70
71#define NAND_COMMAND_READ               0x00
72#define NAND_COMMAND_READ_FIN           0x30
73#define NAND_COMMAND_ERASE              0x60
74#define NAND_COMMAND_ERASE_FIN          0xd0
75#define NAND_COMMAND_PROGRAM            0x80
76#define NAND_COMMAND_PROGRAM_FIN        0x10
77#define NAND_TIMEOUT_USECS              1000000
78
79#define CVMX_NAND_ROUNDUP(_Dividend, _Divisor) (((_Dividend)+(_Divisor-1))/(_Divisor))
80#undef min
81#define min(X, Y)                               \
82        ({ typeof (X) __x = (X), __y = (Y);     \
83                (__x < __y) ? __x : __y; })
84
85#undef max
86#define max(X, Y)                               \
87        ({ typeof (X) __x = (X), __y = (Y);     \
88                (__x > __y) ? __x : __y; })
89
90
91/* Structure to store the parameters that we care about that
92** describe the ONFI speed modes.  This is used to configure
93** the flash timing to match what is reported in the
94** parameter page of the ONFI flash chip. */
95typedef struct
96{
97    int twp;
98    int twh;
99    int twc;
100    int tclh;
101    int tals;
102} onfi_speed_mode_desc_t;
103static const onfi_speed_mode_desc_t onfi_speed_modes[] =
104{
105
106    {50,30,100,20,50},  /* Mode 0 */
107    {25,15, 45,10,25},  /* Mode 1 */
108    {17,15, 35,10,15},  /* Mode 2 */
109    {15,10, 30, 5,10},  /* Mode 3 */
110    {12,10, 25, 5,10},  /* Mode 4, requires EDO timings */
111    {10, 7, 20, 5,10},  /* Mode 5, requries EDO timings */
112};
113
114
115
116typedef enum
117{
118    CVMX_NAND_STATE_16BIT = 1<<0,
119} cvmx_nand_state_flags_t;
120
121/**
122 * Structure used to store data about the NAND devices hooked
123 * to the bootbus.
124 */
125typedef struct
126{
127    int page_size;
128    int oob_size;
129    int pages_per_block;
130    int blocks;
131    int tim_mult;
132    int tim_par[8];
133    int clen[4];
134    int alen[4];
135    int rdn[4];
136    int wrn[2];
137    int onfi_timing;
138    cvmx_nand_state_flags_t flags;
139} cvmx_nand_state_t;
140
141/**
142 * Array indexed by bootbus chip select with information
143 * about NAND devices.
144 */
145#if defined(CVMX_BUILD_FOR_UBOOT) && CONFIG_OCTEON_NAND_STAGE2
146/* For u-boot nand boot we need to play some tricks to be able
147** to use this early in boot.  We put them in a special section that is merged
148** with the text segment.  (Using the text segment directly results in an assembler warning.)
149*/
150#define USE_DATA_IN_TEXT
151#endif
152
153#ifdef USE_DATA_IN_TEXT
154static uint8_t cvmx_nand_buffer[CVMX_NAND_MAX_PAGE_AND_OOB_SIZE] __attribute__((aligned(8)))  __attribute__ ((section (".data_in_text")));
155static cvmx_nand_state_t cvmx_nand_state[8] __attribute__ ((section (".data_in_text")));
156static cvmx_nand_state_t cvmx_nand_default __attribute__ ((section (".data_in_text")));
157static cvmx_nand_initialize_flags_t cvmx_nand_flags __attribute__ ((section (".data_in_text")));
158static int debug_indent __attribute__ ((section (".data_in_text")));
159#else
160static CVMX_SHARED cvmx_nand_state_t cvmx_nand_state[8];
161static CVMX_SHARED cvmx_nand_state_t cvmx_nand_default;
162static CVMX_SHARED cvmx_nand_initialize_flags_t cvmx_nand_flags;
163static CVMX_SHARED uint8_t *cvmx_nand_buffer = NULL;
164static int debug_indent = 0;
165#endif
166
167static CVMX_SHARED const char *cvmx_nand_opcode_labels[] =
168{
169    "NOP",                      /* 0 */
170    "Timing",                   /* 1 */
171    "Wait",                     /* 2 */
172    "Chip Enable / Disable",    /* 3 */
173    "CLE",                      /* 4 */
174    "ALE",                      /* 5 */
175    "6 - Unknown",              /* 6 */
176    "7 - Unknown",              /* 7 */
177    "Write",                    /* 8 */
178    "Read",                     /* 9 */
179    "Read EDO",                 /* 10 */
180    "Wait Status",              /* 11 */
181    "12 - Unknown",             /* 12 */
182    "13 - Unknown",             /* 13 */
183    "14 - Unknown",             /* 14 */
184    "Bus Aquire / Release"      /* 15 */
185};
186
187#define ULL unsigned long long
188/* This macro logs out whenever a function is called if debugging is on */
189#define CVMX_NAND_LOG_CALLED() \
190    if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG)) \
191        cvmx_dprintf("%*s%s: called\n", 2*debug_indent++, "", __FUNCTION__);
192
193/* This macro logs out each function parameter if debugging is on */
194#define CVMX_NAND_LOG_PARAM(format, param) \
195    if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG)) \
196        cvmx_dprintf("%*s%s: param %s = " format "\n", 2*debug_indent, "", __FUNCTION__, #param, param);
197
198/* This macro logs out when a function returns a value */
199#define CVMX_NAND_RETURN(v)                                              \
200    do {                                                                \
201        typeof(v) r = v;                                                \
202        if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))         \
203            cvmx_dprintf("%*s%s: returned %s(%d)\n", 2*--debug_indent, "", __FUNCTION__, #v, r); \
204        return r;                                                       \
205    } while (0);
206
207/* This macro logs out when a function doesn't return a value */
208#define CVMX_NAND_RETURN_NOTHING()                                      \
209    do {                                                                \
210        if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))         \
211            cvmx_dprintf("%*s%s: returned\n", 2*--debug_indent, "", __FUNCTION__); \
212        return;                                                         \
213    } while (0);
214
215
216
217
218
219
220/* Compute the CRC for the ONFI parameter page.  Adapted from sample code
221** in the specification.
222*/
223static uint16_t __onfi_parameter_crc_compute(uint8_t *data)
224{
225    const int order = 16;                     // Order of the CRC-16
226    unsigned long i, j, c, bit;
227    unsigned long crc = 0x4F4E;              // Initialize the shift register with 0x4F4E
228    unsigned long crcmask = ((((unsigned long)1<<(order-1))-1)<<1)|1;
229    unsigned long crchighbit = (unsigned long)1<<(order-1);
230
231    for (i = 0; i < 254; i++)
232    {
233        c = (unsigned long)data[i];
234        for (j = 0x80; j; j >>= 1) {
235              bit = crc & crchighbit;
236              crc <<= 1;
237              if (c & j)
238                  bit ^= crchighbit;
239              if (bit)
240                   crc ^= 0x8005;
241        }
242        crc &= crcmask;
243    }
244    return(crc);
245}
246
247
248/**
249 * Validate the ONFI parameter page and return a pointer to
250 * the config values.
251 *
252 * @param param_page Pointer to the raw NAND data returned after a parameter page read. It will
253 *                   contain at least 4 copies of the parameter structure.
254 *
255 * @return Pointer to a validated paramter page, or NULL if one couldn't be found.
256 */
257static cvmx_nand_onfi_param_page_t *__cvmx_nand_onfi_process(cvmx_nand_onfi_param_page_t param_page[4])
258{
259    int index;
260
261    for (index=0; index<4; index++)
262    {
263        uint16_t crc = __onfi_parameter_crc_compute((void *)&param_page[index]);
264        if (crc == cvmx_le16_to_cpu(param_page[index].crc))
265            break;
266        if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
267            cvmx_dprintf("%s: Paramter page %d is corrupt. (Expected CRC: 0x%04x, computed: 0x%04x)\n",
268                          __FUNCTION__, index, cvmx_le16_to_cpu(param_page[index].crc), crc);
269    }
270
271    if (index == 4)
272    {
273        if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
274            cvmx_dprintf("%s: All parameter pages fail CRC check.  Checking to see if any look sane.\n", __FUNCTION__);
275
276        if (!memcmp(param_page, param_page + 1, 256))
277        {
278            /* First and second copies match, now check some values */
279            if (param_page[0].pages_per_block != 0 && param_page[0].pages_per_block != 0xFFFFFFFF
280                && param_page[0].page_data_bytes != 0 && param_page[0].page_data_bytes != 0xFFFFFFFF
281                && param_page[0].page_spare_bytes != 0 && param_page[0].page_spare_bytes != 0xFFFF
282                && param_page[0].blocks_per_lun != 0 && param_page[0].blocks_per_lun != 0xFFFFFFFF
283                && param_page[0].timing_mode != 0 && param_page[0].timing_mode != 0xFFFF)
284            {
285                /* Looks like we have enough values to use */
286                if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
287                    cvmx_dprintf("%s: Page 0 looks sane, using even though CRC fails.\n", __FUNCTION__);
288                index = 0;
289            }
290        }
291    }
292
293    if (index == 4)
294    {
295        cvmx_dprintf("%s: WARNING: ONFI part but no valid ONFI parameter pages found.\n", __FUNCTION__);
296        return NULL;
297    }
298
299    if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
300    {
301        cvmx_dprintf("%*sONFI Information (from copy %d in param page)\n", 2*debug_indent, "", index);
302        debug_indent++;
303        cvmx_dprintf("%*sonfi = %c%c%c%c\n", 2*debug_indent, "", param_page[index].onfi[0], param_page[index].onfi[1],
304            param_page[index].onfi[2], param_page[index].onfi[3]);
305        cvmx_dprintf("%*srevision_number = 0x%x\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].revision_number));
306        cvmx_dprintf("%*sfeatures = 0x%x\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].features));
307        cvmx_dprintf("%*soptional_commands = 0x%x\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].optional_commands));
308
309        cvmx_dprintf("%*smanufacturer = %12.12s\n", 2*debug_indent, "", param_page[index].manufacturer);
310        cvmx_dprintf("%*smodel = %20.20s\n", 2*debug_indent, "", param_page[index].model);
311        cvmx_dprintf("%*sjedec_id = 0x%x\n", 2*debug_indent, "", param_page[index].jedec_id);
312        cvmx_dprintf("%*sdate_code = 0x%x\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].date_code));
313
314        cvmx_dprintf("%*spage_data_bytes = %u\n", 2*debug_indent, "", (int)cvmx_le32_to_cpu(param_page[index].page_data_bytes));
315        cvmx_dprintf("%*spage_spare_bytes = %u\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].page_spare_bytes));
316        cvmx_dprintf("%*spartial_page_data_bytes = %u\n", 2*debug_indent, "", (int)cvmx_le32_to_cpu(param_page[index].partial_page_data_bytes));
317        cvmx_dprintf("%*spartial_page_spare_bytes = %u\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].partial_page_spare_bytes));
318        cvmx_dprintf("%*spages_per_block = %u\n", 2*debug_indent, "", (int)cvmx_le32_to_cpu(param_page[index].pages_per_block));
319        cvmx_dprintf("%*sblocks_per_lun = %u\n", 2*debug_indent, "", (int)cvmx_le32_to_cpu(param_page[index].blocks_per_lun));
320        cvmx_dprintf("%*snumber_lun = %u\n", 2*debug_indent, "", param_page[index].number_lun);
321        cvmx_dprintf("%*saddress_cycles = 0x%x\n", 2*debug_indent, "", param_page[index].address_cycles);
322        cvmx_dprintf("%*sbits_per_cell = %u\n", 2*debug_indent, "", param_page[index].bits_per_cell);
323        cvmx_dprintf("%*sbad_block_per_lun = %u\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].bad_block_per_lun));
324        cvmx_dprintf("%*sblock_endurance = %u\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].block_endurance));
325        cvmx_dprintf("%*sgood_blocks = %u\n", 2*debug_indent, "", param_page[index].good_blocks);
326        cvmx_dprintf("%*sgood_block_endurance = %u\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].good_block_endurance));
327        cvmx_dprintf("%*sprograms_per_page = %u\n", 2*debug_indent, "", param_page[index].programs_per_page);
328        cvmx_dprintf("%*spartial_program_attrib = 0x%x\n", 2*debug_indent, "", param_page[index].partial_program_attrib);
329        cvmx_dprintf("%*sbits_ecc = %u\n", 2*debug_indent, "", param_page[index].bits_ecc);
330        cvmx_dprintf("%*sinterleaved_address_bits = 0x%x\n", 2*debug_indent, "", param_page[index].interleaved_address_bits);
331        cvmx_dprintf("%*sinterleaved_attrib = 0x%x\n", 2*debug_indent, "", param_page[index].interleaved_attrib);
332
333        cvmx_dprintf("%*spin_capacitance = %u\n", 2*debug_indent, "", param_page[index].pin_capacitance);
334        cvmx_dprintf("%*stiming_mode = 0x%x\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].timing_mode));
335        cvmx_dprintf("%*scache_timing_mode = 0x%x\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].cache_timing_mode));
336        cvmx_dprintf("%*st_prog = %d us\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].t_prog));
337        cvmx_dprintf("%*st_bers = %u us\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].t_bers));
338        cvmx_dprintf("%*st_r = %u us\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].t_r));
339        cvmx_dprintf("%*st_ccs = %u ns\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].t_ccs));
340        cvmx_dprintf("%*svendor_revision = 0x%x\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].vendor_revision));
341        //uint8_t vendor_specific[88];    /**< Byte 166-253: Vendor specific */
342        cvmx_dprintf("%*scrc = 0x%x\n", 2*debug_indent, "", param_page[index].crc);
343        debug_indent--;
344    }
345    return param_page + index;
346}
347
348void __set_onfi_timing_mode(int *tim_par, int clocks_us, int mode)
349{
350    const onfi_speed_mode_desc_t *mp = &onfi_speed_modes[mode];  /* use shorter name to fill in timing array */
351    int margin;
352    int pulse_adjust;
353
354    if (mode > 5)
355    {
356        cvmx_dprintf("%s: invalid ONFI timing mode: %d\n", __FUNCTION__, mode);
357        return;
358    }
359
360    /* Adjust the read/write pulse duty cycle to make it more even.  The cycle time
361    ** requirement is longer than the sum of the high low times, so we exend both the high
362    ** and low times to meet the cycle time requirement.
363    */
364    pulse_adjust = ((mp->twc - mp->twh - mp->twp)/2 + 1) * clocks_us;
365
366    /* Add a small margin to all timings. */
367    margin = 2 * clocks_us;
368    /* Update timing parameters based on supported mode */
369    tim_par[1] = CVMX_NAND_ROUNDUP(mp->twp * clocks_us + margin + pulse_adjust, 1000); /* Twp, WE# pulse width */
370    tim_par[2] = CVMX_NAND_ROUNDUP(max(mp->twh, mp->twc - mp->twp) * clocks_us + margin + pulse_adjust, 1000); /* Tw, WE# pulse width high */
371    tim_par[3] = CVMX_NAND_ROUNDUP(mp->tclh * clocks_us + margin, 1000); /* Tclh, CLE hold time */
372    tim_par[4] = CVMX_NAND_ROUNDUP(mp->tals * clocks_us + margin, 1000); /* Tals, ALE setup time */
373    tim_par[5] = tim_par[3]; /* Talh, ALE hold time */
374    tim_par[6] = tim_par[1]; /* Trp, RE# pulse width*/
375    tim_par[7] = tim_par[2]; /* Treh, RE# high hold time */
376
377}
378
379
380/* Internal helper function to set chip configuration to use default values */
381static void __set_chip_defaults(int chip, int clocks_us)
382{
383    if (!cvmx_nand_default.page_size)
384        return;
385    cvmx_nand_state[chip].page_size = cvmx_nand_default.page_size;  /* NAND page size in bytes */
386    cvmx_nand_state[chip].oob_size = cvmx_nand_default.oob_size;     /* NAND OOB (spare) size in bytes (per page) */
387    cvmx_nand_state[chip].pages_per_block = cvmx_nand_default.pages_per_block;
388    cvmx_nand_state[chip].blocks = cvmx_nand_default.blocks;
389    cvmx_nand_state[chip].onfi_timing = cvmx_nand_default.onfi_timing;
390    __set_onfi_timing_mode(cvmx_nand_state[chip].tim_par, clocks_us, cvmx_nand_state[chip].onfi_timing);
391    if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
392    {
393
394        cvmx_dprintf("%s: Using default NAND parameters.\n", __FUNCTION__);
395        cvmx_dprintf("%s: Defaults: page size: %d, OOB size: %d, pages per block %d, blocks: %d, timing mode: %d\n",
396                     __FUNCTION__, cvmx_nand_state[chip].page_size, cvmx_nand_state[chip].oob_size, cvmx_nand_state[chip].pages_per_block,
397                     cvmx_nand_state[chip].blocks, cvmx_nand_state[chip].onfi_timing);
398    }
399}
400/* Do the proper wait for the ready/busy signal.  First wait
401** for busy to be valid, then wait for busy to de-assert.
402*/
403static int __wait_for_busy_done(int chip)
404{
405    cvmx_nand_cmd_t cmd;
406
407    CVMX_NAND_LOG_CALLED();
408    CVMX_NAND_LOG_PARAM("%d", chip);
409
410    memset(&cmd,  0,  sizeof(cmd));
411    cmd.wait.two = 2;
412    cmd.wait.r_b=0;
413    cmd.wait.n = 2;
414
415    /* Wait for RB to be valied (tWB).
416    ** Use 5 * tWC as proxy.  In some modes this is
417    ** much longer than required, but does not affect performance
418    ** since we will wait much longer for busy to de-assert.
419    */
420    if (cvmx_nand_submit(cmd))
421        CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
422    if (cvmx_nand_submit(cmd))
423        CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
424    if (cvmx_nand_submit(cmd))
425        CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
426    if (cvmx_nand_submit(cmd))
427        CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
428    cmd.wait.r_b=1; /* Now wait for busy to be de-asserted */
429    if (cvmx_nand_submit(cmd))
430        CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
431
432    CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
433}
434/**
435 * Called to initialize the NAND controller for use. Note that
436 * you must be running out of L2 or memory and not NAND before
437 * calling this function.
438 * When probing for NAND chips, this function attempts to autoconfigure based on the NAND parts detected.
439 * It currently supports autodetection for ONFI parts (with valid parameter pages), and some Samsung NAND
440 * parts (decoding ID bits.)  If autoconfiguration fails, the defaults set with __set_chip_defaults()
441 * prior to calling cvmx_nand_initialize() are used.
442 * If defaults are set and the CVMX_NAND_INITIALIZE_FLAGS_DONT_PROBE flag is provided, the defaults are used
443 * for all chips in the active_chips mask.
444 *
445 * @param flags  Optional initialization flags
446 *               If the CVMX_NAND_INITIALIZE_FLAGS_DONT_PROBE flag is passed, chips are not probed,
447 *               and the default parameters (if set with cvmx_nand_set_defaults) are used for all chips
448 *               in the active_chips mask.
449 * @param active_chips
450 *               Each bit in this parameter represents a chip select that might
451 *               contain NAND flash. Any chip select present in this bitmask may
452 *               be connected to NAND. It is normally safe to pass 0xff here and
453 *               let the API probe all 8 chip selects.
454 *
455 * @return Zero on success, a negative cvmx_nand_status error code on failure
456 */
457cvmx_nand_status_t cvmx_nand_initialize(cvmx_nand_initialize_flags_t flags, int active_chips)
458{
459    int chip;
460    int start_chip;
461    int stop_chip;
462    uint64_t clocks_us;
463    union cvmx_ndf_misc ndf_misc;
464    uint8_t nand_id_buffer[16];
465
466    cvmx_nand_flags = flags;
467    CVMX_NAND_LOG_CALLED();
468    CVMX_NAND_LOG_PARAM("0x%x", flags);
469
470    memset(&cvmx_nand_state,  0,  sizeof(cvmx_nand_state));
471
472#ifndef USE_DATA_IN_TEXT
473    /* cvmx_nand_buffer is statically allocated in the TEXT_IN_DATA case */
474    if (!cvmx_nand_buffer)
475        cvmx_nand_buffer = cvmx_bootmem_alloc(CVMX_NAND_MAX_PAGE_AND_OOB_SIZE, 128);
476    if (!cvmx_nand_buffer)
477        CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
478#endif
479
480    /* Disable boot mode and reset the fifo */
481    ndf_misc.u64 = cvmx_read_csr(CVMX_NDF_MISC);
482    ndf_misc.s.rd_cmd = 0;
483    ndf_misc.s.bt_dma = 0;
484    ndf_misc.s.bt_dis = 1;
485    ndf_misc.s.ex_dis = 0;
486    ndf_misc.s.rst_ff = 1;
487    cvmx_write_csr(CVMX_NDF_MISC, ndf_misc.u64);
488    cvmx_read_csr(CVMX_NDF_MISC);
489
490    /* Bring the fifo out of reset */
491    cvmx_wait_usec(1);
492    ndf_misc.s.rst_ff = 0;
493    cvmx_write_csr(CVMX_NDF_MISC, ndf_misc.u64);
494    cvmx_read_csr(CVMX_NDF_MISC);
495    cvmx_wait_usec(1);
496
497    /* Clear the ECC counter */
498    //cvmx_write_csr(CVMX_NDF_ECC_CNT, cvmx_read_csr(CVMX_NDF_ECC_CNT));
499
500    /* Clear the interrupt state */
501    cvmx_write_csr(CVMX_NDF_INT, cvmx_read_csr(CVMX_NDF_INT));
502    cvmx_write_csr(CVMX_NDF_INT_EN, 0);
503    cvmx_write_csr(CVMX_MIO_NDF_DMA_INT, cvmx_read_csr(CVMX_MIO_NDF_DMA_INT));
504    cvmx_write_csr(CVMX_MIO_NDF_DMA_INT_EN, 0);
505
506
507    /* The simulator crashes if you access non existant devices. Assume
508        only chip select 1 is connected to NAND */
509    if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM)
510    {
511        start_chip = 1;
512        stop_chip = 2;
513    }
514    else
515    {
516        start_chip = 0;
517        stop_chip = 8;
518    }
519
520    /* Figure out how many clocks are in one microsecond, rounding up */
521    clocks_us = CVMX_NAND_ROUNDUP(cvmx_clock_get_rate(CVMX_CLOCK_SCLK), 1000000);
522
523    /* If the CVMX_NAND_INITIALIZE_FLAGS_DONT_PROBE flag is set, then
524    ** use the supplied default values to configured the chips in the
525    ** active_chips mask */
526    if (cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DONT_PROBE)
527    {
528        if (cvmx_nand_default.page_size)
529        {
530            for (chip=start_chip; chip<stop_chip; chip++)
531            {
532                /* Skip chip selects that the caller didn't supply in the active chip bits */
533                if (((1<<chip) & active_chips) == 0)
534                    continue;
535                __set_chip_defaults(chip, clocks_us);
536            }
537        }
538        CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
539    }
540
541    /* Probe and see what NAND flash we can find */
542    for (chip=start_chip; chip<stop_chip; chip++)
543    {
544        union cvmx_mio_boot_reg_cfgx mio_boot_reg_cfg;
545        cvmx_nand_onfi_param_page_t *onfi_param_page;
546        int probe_failed;
547        int width_16;
548
549        /* Skip chip selects that the caller didn't supply in the active chip bits */
550        if (((1<<chip) & active_chips) == 0)
551            continue;
552
553        mio_boot_reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(chip));
554        /* Enabled regions can't be connected to NAND flash */
555        if (mio_boot_reg_cfg.s.en)
556            continue;
557
558        /* Start out with some sane, but slow, defaults */
559        cvmx_nand_state[chip].page_size = 0;
560        cvmx_nand_state[chip].oob_size = 64;
561        cvmx_nand_state[chip].pages_per_block = 64;
562        cvmx_nand_state[chip].blocks = 100;
563
564
565        /* Set timing mode to ONFI mode 0 for initial accesses */
566        __set_onfi_timing_mode(cvmx_nand_state[chip].tim_par, clocks_us, 0);
567
568        /* Put the index of which timing parameter to use.  The indexes are into the tim_par
569        ** which match the indexes of the 8 timing parameters that the hardware supports.
570        ** Index 0 is not software controlled, and is fixed by hardware. */
571        cvmx_nand_state[chip].clen[0] = 0; /* Command doesn't need to be held before WE */
572        cvmx_nand_state[chip].clen[1] = 1; /* Twp, WE# pulse width */
573        cvmx_nand_state[chip].clen[2] = 3; /* Tclh, CLE hold time */
574        cvmx_nand_state[chip].clen[3] = 1;
575
576        cvmx_nand_state[chip].alen[0] = 4; /* Tals, ALE setup time */
577        cvmx_nand_state[chip].alen[1] = 1; /* Twp, WE# pulse width */
578        cvmx_nand_state[chip].alen[2] = 2; /* Twh, WE# pulse width high */
579        cvmx_nand_state[chip].alen[3] = 5; /* Talh, ALE hold time */
580
581        cvmx_nand_state[chip].rdn[0] = 0;
582        cvmx_nand_state[chip].rdn[1] = 6; /* Trp, RE# pulse width*/
583        cvmx_nand_state[chip].rdn[2] = 7; /* Treh, RE# high hold time */
584        cvmx_nand_state[chip].rdn[3] = 0;
585
586        cvmx_nand_state[chip].wrn[0] = 1; /* Twp, WE# pulse width */
587        cvmx_nand_state[chip].wrn[1] = 2; /* Twh, WE# pulse width high */
588
589        /* Probe and see if we get an answer.  Read more than required, as in
590        ** 16 bit mode only every other byte is valid.
591        ** Here we probe twice, once in 8 bit mode, and once in 16 bit mode to autodetect
592        ** the width.
593        */
594        probe_failed = 1;
595        for (width_16 = 0; width_16 <= 1 && probe_failed; width_16++)
596        {
597            probe_failed = 0;
598
599            if (width_16)
600                cvmx_nand_state[chip].flags |= CVMX_NAND_STATE_16BIT;
601            memset(cvmx_nand_buffer, 0xff, 16);
602            if (cvmx_nand_read_id(chip, 0x0, cvmx_ptr_to_phys(cvmx_nand_buffer), 16) < 16)
603            {
604                if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
605                    cvmx_dprintf("%s: Failed to probe chip %d\n", __FUNCTION__, chip);
606                probe_failed = 1;
607
608            }
609            if (*(uint32_t*)cvmx_nand_buffer == 0xffffffff || *(uint32_t*)cvmx_nand_buffer == 0x0)
610            {
611                if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
612                    cvmx_dprintf("%s: Probe returned nothing for chip %d\n", __FUNCTION__, chip);
613                probe_failed = 1;
614            }
615        }
616        /* Neither 8 or 16 bit mode worked, so go on to next chip select */
617        if (probe_failed)
618            continue;
619
620        /* Save copy of ID for later use */
621        memcpy(nand_id_buffer, cvmx_nand_buffer, sizeof(nand_id_buffer));
622
623        if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
624            cvmx_dprintf("%s: NAND chip %d has ID 0x%08llx\n", __FUNCTION__, chip, (unsigned long long int)*(uint64_t*)cvmx_nand_buffer);
625        /* Read more than required, as in 16 bit mode only every other byte is valid. */
626        if (cvmx_nand_read_id(chip, 0x20, cvmx_ptr_to_phys(cvmx_nand_buffer), 8) < 8)
627        {
628            if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
629                cvmx_dprintf("%s: Failed to probe chip %d\n", __FUNCTION__, chip);
630            continue;
631        }
632
633        if (((cvmx_nand_buffer[0] == 'O') && (cvmx_nand_buffer[1] == 'N') &&
634            (cvmx_nand_buffer[2] == 'F') && (cvmx_nand_buffer[3] == 'I')))
635        {
636            /* We have an ONFI part, so read the parameter page */
637
638            cvmx_nand_read_param_page(chip, cvmx_ptr_to_phys(cvmx_nand_buffer), 2048);
639            onfi_param_page = __cvmx_nand_onfi_process((cvmx_nand_onfi_param_page_t *)cvmx_nand_buffer);
640            if (onfi_param_page)
641            {
642                /* ONFI NAND parts are described by a parameter page.  Here we extract the configuration values
643                ** from the parameter page that we need to access the chip. */
644                cvmx_nand_state[chip].page_size = cvmx_le32_to_cpu(onfi_param_page->page_data_bytes);
645                cvmx_nand_state[chip].oob_size = cvmx_le16_to_cpu(onfi_param_page->page_spare_bytes);
646                cvmx_nand_state[chip].pages_per_block = cvmx_le32_to_cpu(onfi_param_page->pages_per_block);
647                cvmx_nand_state[chip].blocks = cvmx_le32_to_cpu(onfi_param_page->blocks_per_lun) * onfi_param_page->number_lun;
648
649                if (cvmx_le16_to_cpu(onfi_param_page->timing_mode) <= 0x3f)
650                {
651                    int mode_mask = cvmx_le16_to_cpu(onfi_param_page->timing_mode);
652                    int mode = 0;
653                    int i;
654                    for (i = 0; i < 6;i++)
655                    {
656                        if (mode_mask & (1 << i))
657                            mode = i;
658                    }
659                    cvmx_nand_state[chip].onfi_timing = mode;
660                }
661                else
662                {
663                    cvmx_dprintf("%s: Invalid timing mode (%d) in ONFI parameter page, ignoring\n", __FUNCTION__, cvmx_nand_state[chip].onfi_timing);
664                    cvmx_nand_state[chip].onfi_timing = 0;
665
666                }
667                if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
668                    cvmx_dprintf("%s: Using ONFI timing mode: %d\n", __FUNCTION__, cvmx_nand_state[chip].onfi_timing);
669                __set_onfi_timing_mode(cvmx_nand_state[chip].tim_par, clocks_us, cvmx_nand_state[chip].onfi_timing);
670                if (cvmx_nand_state[chip].page_size + cvmx_nand_state[chip].oob_size > CVMX_NAND_MAX_PAGE_AND_OOB_SIZE)
671                {
672                    cvmx_dprintf("%s: ERROR: Page size (%d) + OOB size (%d) is greater than max size (%d)\n",
673                                 __FUNCTION__, cvmx_nand_state[chip].page_size, cvmx_nand_state[chip].oob_size, CVMX_NAND_MAX_PAGE_AND_OOB_SIZE);
674                    return(CVMX_NAND_ERROR);
675                }
676                /* We have completed setup for this ONFI chip, so go on to next chip. */
677                continue;
678            }
679            else
680            {
681                /* Parameter page is not valid */
682                if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
683                    cvmx_dprintf("%s: ONFI paramater page missing or invalid.\n", __FUNCTION__);
684
685            }
686
687
688        }
689        else
690        {
691            /* We have a non-ONFI part. */
692            if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
693                cvmx_dprintf("%s: Chip %d doesn't support ONFI.\n", __FUNCTION__, chip);
694
695
696            if (nand_id_buffer[0] == 0xEC)
697            {
698                /* We have a Samsung part, so decode part info from ID bytes */
699                uint64_t nand_size_bits = (64*1024*1024ULL) << ((nand_id_buffer[4] & 0x70) >> 4); /* Plane size */
700                cvmx_nand_state[chip].page_size = 1024 << (nand_id_buffer[3] & 0x3);  /* NAND page size in bytes */
701                cvmx_nand_state[chip].oob_size = 128;     /* NAND OOB (spare) size in bytes (per page) */
702                cvmx_nand_state[chip].pages_per_block = (0x10000 << ((nand_id_buffer[3] & 0x30) >> 4))/cvmx_nand_state[chip].page_size;
703
704                nand_size_bits *= 1 << ((nand_id_buffer[4] & 0xc) >> 2);
705
706                cvmx_nand_state[chip].oob_size = cvmx_nand_state[chip].page_size/64;
707                if (nand_id_buffer[3] & 0x4)
708                    cvmx_nand_state[chip].oob_size *= 2;
709
710                cvmx_nand_state[chip].blocks = nand_size_bits/(8ULL*cvmx_nand_state[chip].page_size*cvmx_nand_state[chip].pages_per_block);
711                cvmx_nand_state[chip].onfi_timing = 2;
712
713                if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
714                {
715                    cvmx_dprintf("%s: Samsung NAND chip detected, using parameters decoded from ID bytes.\n", __FUNCTION__);
716                    cvmx_dprintf("%s: Defaults: page size: %d, OOB size: %d, pages per block %d, part size: %d MBytes, timing mode: %d\n",
717                                 __FUNCTION__, cvmx_nand_state[chip].page_size, cvmx_nand_state[chip].oob_size, cvmx_nand_state[chip].pages_per_block,
718                                 (int)(nand_size_bits/(8*1024*1024)), cvmx_nand_state[chip].onfi_timing);
719                }
720
721                __set_onfi_timing_mode(cvmx_nand_state[chip].tim_par, clocks_us, cvmx_nand_state[chip].onfi_timing);
722                if (cvmx_nand_state[chip].page_size + cvmx_nand_state[chip].oob_size > CVMX_NAND_MAX_PAGE_AND_OOB_SIZE)
723                {
724                    cvmx_dprintf("%s: ERROR: Page size (%d) + OOB size (%d) is greater than max size (%d)\n",
725                                 __FUNCTION__, cvmx_nand_state[chip].page_size, cvmx_nand_state[chip].oob_size, CVMX_NAND_MAX_PAGE_AND_OOB_SIZE);
726                    return(CVMX_NAND_ERROR);
727                }
728
729                /* We have completed setup for this Samsung chip, so go on to next chip. */
730                continue;
731
732
733            }
734
735        }
736
737
738
739        /*  We were not able to automatically identify the NAND chip parameters.  If default values were configured,
740        ** use them. */
741        if (cvmx_nand_default.page_size)
742        {
743            __set_chip_defaults(chip, clocks_us);
744        }
745        else
746        {
747
748            if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
749                cvmx_dprintf("%s: Unable to determine NAND parameters, and no defaults supplied.\n", __FUNCTION__);
750        }
751    }
752    CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
753}
754#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
755EXPORT_SYMBOL(cvmx_nand_initialize);
756#endif
757
758
759/**
760 * Call to shutdown the NAND controller after all transactions
761 * are done. In most setups this will never be called.
762 *
763 * @return Zero on success, a negative cvmx_nand_status_t error code on failure
764 */
765cvmx_nand_status_t cvmx_nand_shutdown(void)
766{
767    CVMX_NAND_LOG_CALLED();
768    memset(&cvmx_nand_state,  0,  sizeof(cvmx_nand_state));
769    CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
770}
771
772
773/**
774 * Returns a bitmask representing the chip selects that are
775 * connected to NAND chips. This can be called after the
776 * initialize to determine the actual number of NAND chips
777 * found. Each bit in the response coresponds to a chip select.
778 *
779 * @return Zero if no NAND chips were found. Otherwise a bit is set for
780 *         each chip select (1<<chip).
781 */
782int cvmx_nand_get_active_chips(void)
783{
784    int chip;
785    int result = 0;
786    for (chip=0; chip<8; chip++)
787    {
788        if (cvmx_nand_state[chip].page_size)
789            result |= 1<<chip;
790    }
791    return result;
792}
793#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
794EXPORT_SYMBOL(cvmx_nand_get_active_chips);
795#endif
796
797
798/**
799 * Override the timing parameters for a NAND chip
800 *
801 * @param chip     Chip select to override
802 * @param tim_mult
803 * @param tim_par
804 * @param clen
805 * @param alen
806 * @param rdn
807 * @param wrn
808 *
809 * @return Zero on success, a negative cvmx_nand_status_t error code on failure
810 */
811cvmx_nand_status_t cvmx_nand_set_timing(int chip, int tim_mult, int tim_par[8], int clen[4], int alen[4], int rdn[4], int wrn[2])
812{
813    int i;
814    CVMX_NAND_LOG_CALLED();
815
816    if ((chip < 0) || (chip > 7))
817        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
818    if (!cvmx_nand_state[chip].page_size)
819        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
820
821    cvmx_nand_state[chip].tim_mult = tim_mult;
822    for (i=0;i<8;i++)
823        cvmx_nand_state[chip].tim_par[i] = tim_par[i];
824    for (i=0;i<4;i++)
825        cvmx_nand_state[chip].clen[i] = clen[i];
826    for (i=0;i<4;i++)
827        cvmx_nand_state[chip].alen[i] = alen[i];
828    for (i=0;i<4;i++)
829        cvmx_nand_state[chip].rdn[i] = rdn[i];
830    for (i=0;i<2;i++)
831        cvmx_nand_state[chip].wrn[i] = wrn[i];
832
833    CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
834}
835
836
837/**
838 * @INTERNAL
839 * Get the number of free bytes in the NAND command queue
840 *
841 * @return Number of bytes in queue
842 */
843static inline int __cvmx_nand_get_free_cmd_bytes(void)
844{
845    union cvmx_ndf_misc ndf_misc;
846    CVMX_NAND_LOG_CALLED();
847    ndf_misc.u64 = cvmx_read_csr(CVMX_NDF_MISC);
848    CVMX_NAND_RETURN((int)ndf_misc.s.fr_byt);
849}
850
851
852/**
853 * Submit a command to the NAND command queue. Generally this
854 * will not be used directly. Instead most programs will use the other
855 * higher level NAND functions.
856 *
857 * @param cmd    Command to submit
858 *
859 * @return Zero on success, a negative cvmx_nand_status_t error code on failure
860 */
861cvmx_nand_status_t cvmx_nand_submit(cvmx_nand_cmd_t cmd)
862{
863    CVMX_NAND_LOG_CALLED();
864    CVMX_NAND_LOG_PARAM("0x%llx", (ULL)cmd.u64[0]);
865    CVMX_NAND_LOG_PARAM("0x%llx", (ULL)cmd.u64[1]);
866    CVMX_NAND_LOG_PARAM("%s", cvmx_nand_opcode_labels[cmd.s.op_code]);
867    switch (cmd.s.op_code)
868    {
869        /* All these commands fit in one 64bit word */
870        case 0: /* NOP */
871        case 1: /* Timing */
872        case 2: /* WAIT */
873        case 3: /* Chip Enable/Disable */
874        case 4: /* CLE */
875        case 8: /* Write */
876        case 9: /* Read */
877        case 10: /* Read EDO */
878        case 15: /* Bus Aquire/Release */
879            if (__cvmx_nand_get_free_cmd_bytes() < 8)
880                CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
881            cvmx_write_csr(CVMX_NDF_CMD, cmd.u64[1]);
882            CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
883
884        case 5: /* ALE commands take either one or two 64bit words */
885            if (cmd.ale.adr_byte_num < 5)
886            {
887                if (__cvmx_nand_get_free_cmd_bytes() < 8)
888                    CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
889                cvmx_write_csr(CVMX_NDF_CMD, cmd.u64[1]);
890                CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
891            }
892            else
893            {
894                if (__cvmx_nand_get_free_cmd_bytes() < 16)
895                    CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
896                cvmx_write_csr(CVMX_NDF_CMD, cmd.u64[1]);
897                cvmx_write_csr(CVMX_NDF_CMD, cmd.u64[0]);
898                CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
899            }
900
901        case 11: /* Wait status commands take two 64bit words */
902            if (__cvmx_nand_get_free_cmd_bytes() < 16)
903                CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
904            cvmx_write_csr(CVMX_NDF_CMD, cmd.u64[1]);
905            cvmx_write_csr(CVMX_NDF_CMD, cmd.u64[0]);
906            CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
907
908        default:
909            CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
910    }
911}
912
913
914/**
915 * @INTERNAL
916 * Get the number of bits required to encode the column bits. This
917 * does not include padding to align on a byte boundary.
918 *
919 * @param chip   NAND chip to get data for
920 *
921 * @return Number of column bits
922 */
923static inline int __cvmx_nand_get_column_bits(int chip)
924{
925    return cvmx_pop(cvmx_nand_state[chip].page_size - 1);
926}
927
928
929/**
930 * @INTERNAL
931 * Get the number of bits required to encode the row bits. This
932 * does not include padding to align on a byte boundary.
933 *
934 * @param chip   NAND chip to get data for
935 *
936 * @return Number of row bits
937 */
938static inline int __cvmx_nand_get_row_bits(int chip)
939{
940    return cvmx_pop(cvmx_nand_state[chip].blocks-1) + cvmx_pop(cvmx_nand_state[chip].pages_per_block-1);
941}
942
943
944/**
945 * @INTERNAL
946 * Get the number of address cycles required for this NAND part.
947 * This include column bits, padding, page bits, and block bits.
948 *
949 * @param chip   NAND chip to get data for
950 *
951 * @return Number of address cycles on the bus
952 */
953static inline int __cvmx_nand_get_address_cycles(int chip)
954{
955    int address_bits = ((__cvmx_nand_get_column_bits(chip) + 7) >> 3) << 3;
956    address_bits += ((__cvmx_nand_get_row_bits(chip) + 7) >> 3) << 3;
957    return (address_bits + 7) >> 3;
958}
959
960
961/**
962 * @INTERNAL
963 * Build the set of command common to most transactions
964 * @param chip      NAND chip to program
965 * @param cmd_data  NAND comamnd for CLE cycle 1
966 * @param num_address_cycles
967 *                  Number of address cycles to put on the bus
968 * @param nand_address
969 *                  Data to be put on the bus. It is translated according to
970 *                  the rules in the file information section.
971 *
972 * @param cmd_data2 If non zero, adds a second CLE cycle used by a number of NAND
973 *                  transactions.
974 *
975 * @return Zero on success, a negative cvmx_nand_status_t error code on failure
976 */
977static inline cvmx_nand_status_t __cvmx_nand_build_pre_cmd(int chip, int cmd_data, int num_address_cycles, uint64_t nand_address, int cmd_data2)
978{
979    cvmx_nand_status_t result;
980    cvmx_nand_cmd_t cmd;
981
982    CVMX_NAND_LOG_CALLED();
983
984    /* Send timing parameters */
985    memset(&cmd,  0,  sizeof(cmd));
986    cmd.set_tm_par.one = 1;
987    cmd.set_tm_par.tim_mult = cvmx_nand_state[chip].tim_mult;
988    /* tim_par[0] unused */
989    cmd.set_tm_par.tim_par1 = cvmx_nand_state[chip].tim_par[1];
990    cmd.set_tm_par.tim_par2 = cvmx_nand_state[chip].tim_par[2];
991    cmd.set_tm_par.tim_par3 = cvmx_nand_state[chip].tim_par[3];
992    cmd.set_tm_par.tim_par4 = cvmx_nand_state[chip].tim_par[4];
993    cmd.set_tm_par.tim_par5 = cvmx_nand_state[chip].tim_par[5];
994    cmd.set_tm_par.tim_par6 = cvmx_nand_state[chip].tim_par[6];
995    cmd.set_tm_par.tim_par7 = cvmx_nand_state[chip].tim_par[7];
996    result = cvmx_nand_submit(cmd);
997    if (result)
998        CVMX_NAND_RETURN(result);
999
1000    /* Send bus select */
1001    memset(&cmd,  0,  sizeof(cmd));
1002    cmd.bus_acq.fifteen = 15;
1003    cmd.bus_acq.one = 1;
1004    result = cvmx_nand_submit(cmd);
1005    if (result)
1006        CVMX_NAND_RETURN(result);
1007
1008    /* Send chip select */
1009    memset(&cmd,  0,  sizeof(cmd));
1010    cmd.chip_en.chip = chip;
1011    cmd.chip_en.one = 1;
1012    cmd.chip_en.three = 3;
1013    cmd.chip_en.width = (cvmx_nand_state[chip].flags & CVMX_NAND_STATE_16BIT) ? 2 : 1;
1014    result = cvmx_nand_submit(cmd);
1015    if (result)
1016        CVMX_NAND_RETURN(result);
1017
1018    /* Send wait, fixed time
1019    ** This meets chip enable to command latch enable timing.
1020    ** This is tCS - tCLS from the ONFI spec.
1021    ** Use tWP as a proxy, as this is adequate for
1022    ** all ONFI 1.0 timing modes. */
1023    memset(&cmd,  0,  sizeof(cmd));
1024    cmd.wait.two = 2;
1025    cmd.wait.n = 1;
1026    if (cvmx_nand_submit(cmd))
1027        CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1028
1029    /* Send CLE */
1030    memset(&cmd,  0,  sizeof(cmd));
1031    cmd.cle.cmd_data = cmd_data;
1032    cmd.cle.clen1 = cvmx_nand_state[chip].clen[0];
1033    cmd.cle.clen2 = cvmx_nand_state[chip].clen[1];
1034    cmd.cle.clen3 = cvmx_nand_state[chip].clen[2];
1035    cmd.cle.four = 4;
1036    result = cvmx_nand_submit(cmd);
1037    if (result)
1038        CVMX_NAND_RETURN(result);
1039
1040    /* Send ALE */
1041    if (num_address_cycles)
1042    {
1043        memset(&cmd,  0,  sizeof(cmd));
1044        cmd.ale.adr_byte_num = num_address_cycles;
1045        if (num_address_cycles < __cvmx_nand_get_address_cycles(chip))
1046        {
1047            cmd.ale.adr_bytes_l = nand_address;
1048            cmd.ale.adr_bytes_h = nand_address >> 32;
1049        }
1050        else
1051        {
1052            int column_bits = __cvmx_nand_get_column_bits(chip);
1053            int column_shift = ((column_bits + 7) >> 3) << 3;
1054            int column = nand_address & (cvmx_nand_state[chip].page_size-1);
1055            int row = nand_address >> column_bits;
1056            cmd.ale.adr_bytes_l = column + (row << column_shift);
1057            cmd.ale.adr_bytes_h = row >> (32 - column_shift);
1058        }
1059        cmd.ale.alen1 = cvmx_nand_state[chip].alen[0];
1060        cmd.ale.alen2 = cvmx_nand_state[chip].alen[1];
1061        cmd.ale.alen3 = cvmx_nand_state[chip].alen[2];
1062        cmd.ale.alen4 = cvmx_nand_state[chip].alen[3];
1063        cmd.ale.five = 5;
1064        result = cvmx_nand_submit(cmd);
1065        if (result)
1066            CVMX_NAND_RETURN(result);
1067    }
1068
1069    /* Send CLE 2 */
1070    if (cmd_data2)
1071    {
1072        memset(&cmd,  0,  sizeof(cmd));
1073        cmd.cle.cmd_data = cmd_data2;
1074        cmd.cle.clen1 = cvmx_nand_state[chip].clen[0];
1075        cmd.cle.clen2 = cvmx_nand_state[chip].clen[1];
1076        cmd.cle.clen3 = cvmx_nand_state[chip].clen[2];
1077        cmd.cle.four = 4;
1078        result = cvmx_nand_submit(cmd);
1079        if (result)
1080            CVMX_NAND_RETURN(result);
1081    }
1082
1083    CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
1084}
1085
1086
1087/**
1088 * @INTERNAL
1089 * Build the set of command common to most transactions
1090 * @return Zero on success, a negative cvmx_nand_status_t error code on failure
1091 */
1092static inline cvmx_nand_status_t __cvmx_nand_build_post_cmd(void)
1093{
1094    cvmx_nand_status_t result;
1095    cvmx_nand_cmd_t cmd;
1096
1097    CVMX_NAND_LOG_CALLED();
1098
1099    /* Send chip deselect */
1100    memset(&cmd,  0,  sizeof(cmd));
1101    cmd.chip_dis.three = 3;
1102    result = cvmx_nand_submit(cmd);
1103    if (result)
1104        CVMX_NAND_RETURN(result);
1105
1106    /* Send bus release */
1107    memset(&cmd,  0,  sizeof(cmd));
1108    cmd.bus_rel.fifteen = 15;
1109    result = cvmx_nand_submit(cmd);
1110    if (result)
1111        CVMX_NAND_RETURN(result);
1112
1113    /* Ring the doorbell */
1114    cvmx_write_csr(CVMX_NDF_DRBELL, 1);
1115    CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
1116}
1117
1118
1119/**
1120 * @INTERNAL
1121 * Setup the NAND DMA engine for a transfer
1122 *
1123 * @param chip     Chip select for NAND flash
1124 * @param is_write Non zero if this is a write
1125 * @param buffer_address
1126 *                 Physical memory address to DMA to/from
1127 * @param buffer_length
1128 *                 Length of the DMA in bytes
1129 */
1130static inline void __cvmx_nand_setup_dma(int chip, int is_write, uint64_t buffer_address, int buffer_length)
1131{
1132    union cvmx_mio_ndf_dma_cfg ndf_dma_cfg;
1133    CVMX_NAND_LOG_CALLED();
1134    CVMX_NAND_LOG_PARAM("%d", chip);
1135    CVMX_NAND_LOG_PARAM("%d", is_write);
1136    CVMX_NAND_LOG_PARAM("0x%llx", (ULL)buffer_address);
1137    CVMX_NAND_LOG_PARAM("%d", buffer_length);
1138    ndf_dma_cfg.u64 = 0;
1139    ndf_dma_cfg.s.en = 1;
1140    ndf_dma_cfg.s.rw = is_write; /* One means DMA reads from memory and writes to flash */
1141    ndf_dma_cfg.s.clr = 0;
1142    ndf_dma_cfg.s.size = ((buffer_length + 7) >> 3) - 1;
1143    ndf_dma_cfg.s.adr = buffer_address;
1144    CVMX_SYNCWS;
1145    cvmx_write_csr(CVMX_MIO_NDF_DMA_CFG, ndf_dma_cfg.u64);
1146    CVMX_NAND_RETURN_NOTHING();
1147}
1148
1149
1150/**
1151 * Dump a buffer out in hex for debug
1152 *
1153 * @param buffer_address
1154 *               Starting physical address
1155 * @param buffer_length
1156 *               Number of bytes to display
1157 */
1158static void __cvmx_nand_hex_dump(uint64_t buffer_address, int buffer_length)
1159{
1160    uint8_t *buffer = cvmx_phys_to_ptr(buffer_address);
1161    int offset = 0;
1162    while (offset < buffer_length)
1163    {
1164        int i;
1165        cvmx_dprintf("%*s%04x:",  2*debug_indent, "", offset);
1166        for (i=0; i<32; i++)
1167        {
1168            if ((i&3) == 0)
1169                cvmx_dprintf(" ");
1170            if (offset+i < buffer_length)
1171                cvmx_dprintf("%02x", 0xff & buffer[offset+i]);
1172            else
1173                cvmx_dprintf("  ");
1174        }
1175        cvmx_dprintf("\n");
1176        offset += 32;
1177    }
1178}
1179
1180/**
1181 * @INTERNAL
1182 * Perform a low level NAND read command
1183 *
1184 * @param chip   Chip to read from
1185 * @param nand_command1
1186 *               First command cycle value
1187 * @param address_cycles
1188 *               Number of address cycles after comand 1
1189 * @param nand_address
1190 *               NAND address to use for address cycles
1191 * @param nand_command2
1192 *               NAND comamnd cycle 2 if not zero
1193 * @param buffer_address
1194 *               Physical address to DMA into
1195 * @param buffer_length
1196 *               Length of the transfer in bytes
1197 *
1198 * @return Number of bytes transfered or a negative error code
1199 */
1200static inline int __cvmx_nand_low_level_read(int chip, int nand_command1, int address_cycles, uint64_t nand_address, int nand_command2, uint64_t buffer_address, int buffer_length)
1201{
1202    cvmx_nand_cmd_t cmd;
1203    union cvmx_mio_ndf_dma_cfg ndf_dma_cfg;
1204    int bytes;
1205
1206    CVMX_NAND_LOG_CALLED();
1207    CVMX_NAND_LOG_PARAM("%d", chip);
1208    CVMX_NAND_LOG_PARAM("0x%x", nand_command1);
1209    CVMX_NAND_LOG_PARAM("%d", address_cycles);
1210    CVMX_NAND_LOG_PARAM("0x%llx", (ULL)nand_address);
1211    CVMX_NAND_LOG_PARAM("0x%x", nand_command2);
1212    CVMX_NAND_LOG_PARAM("0x%llx", (ULL)buffer_address);
1213    CVMX_NAND_LOG_PARAM("%d", buffer_length);
1214
1215    if ((chip < 0) || (chip > 7))
1216        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1217    if (!buffer_address)
1218        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1219    if (buffer_address & 7)
1220        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1221    if (buffer_length & 7)
1222        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1223    if (!buffer_length)
1224        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1225
1226    /* Build the command and address cycles */
1227    if (__cvmx_nand_build_pre_cmd(chip, nand_command1, address_cycles, nand_address, nand_command2))
1228        CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1229
1230    /* Send WAIT.  This waits for some time, then
1231    ** waits for busy to be de-asserted. */
1232    if (__wait_for_busy_done(chip))
1233        CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1234
1235    /* Wait for tRR after busy de-asserts.
1236    ** Use 2* tALS as proxy.  This is overkill in
1237    ** the slow modes, but not bad in the faster ones. */
1238    memset(&cmd,  0,  sizeof(cmd));
1239    cmd.wait.two = 2;
1240    cmd.wait.n=4;
1241    if (cvmx_nand_submit(cmd))
1242        CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1243    if (cvmx_nand_submit(cmd))
1244        CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1245
1246    /* Send READ */
1247    memset(&cmd,  0,  sizeof(cmd));
1248    cmd.rd.data_bytes = buffer_length;
1249    if (cvmx_nand_state[chip].onfi_timing >= 4)
1250        cmd.rd.nine = 10;  /* READ_EDO command is required for ONFI timing modes 4 and 5 */
1251    else
1252        cmd.rd.nine = 9;
1253    cmd.rd.rdn1 = cvmx_nand_state[chip].rdn[0];
1254    cmd.rd.rdn2 = cvmx_nand_state[chip].rdn[1];
1255    cmd.rd.rdn3 = cvmx_nand_state[chip].rdn[2];
1256    cmd.rd.rdn4 = cvmx_nand_state[chip].rdn[3];
1257    if (cvmx_nand_submit(cmd))
1258        CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1259
1260    __cvmx_nand_setup_dma(chip, 0, buffer_address, buffer_length);
1261
1262    if (__cvmx_nand_build_post_cmd())
1263        CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1264
1265    /* Wait for the DMA to complete */
1266    if (CVMX_WAIT_FOR_FIELD64(CVMX_MIO_NDF_DMA_CFG, cvmx_mio_ndf_dma_cfg_t, en, ==, 0, NAND_TIMEOUT_USECS))
1267        CVMX_NAND_RETURN(CVMX_NAND_TIMEOUT);
1268
1269    /* Return the number of bytes transfered */
1270    ndf_dma_cfg.u64 = cvmx_read_csr(CVMX_MIO_NDF_DMA_CFG);
1271    bytes = ndf_dma_cfg.s.adr - buffer_address;
1272
1273    if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
1274        __cvmx_nand_hex_dump(buffer_address, bytes);
1275
1276    CVMX_NAND_RETURN(bytes);
1277}
1278
1279
1280/**
1281 * Read a page from NAND. If the buffer has room, the out of band
1282 * data will be included.
1283 *
1284 * @param chip   Chip select for NAND flash
1285 * @param nand_address
1286 *               Location in NAND to read. See description in file comment
1287 * @param buffer_address
1288 *               Physical address to store the result at
1289 * @param buffer_length
1290 *               Number of bytes to read
1291 *
1292 * @return Bytes read on success, a negative cvmx_nand_status_t error code on failure
1293 */
1294int cvmx_nand_page_read(int chip, uint64_t nand_address, uint64_t buffer_address, int buffer_length)
1295{
1296    int bytes;
1297
1298    CVMX_NAND_LOG_CALLED();
1299    CVMX_NAND_LOG_PARAM("%d", chip);
1300    CVMX_NAND_LOG_PARAM("0x%llx", (ULL)nand_address);
1301    CVMX_NAND_LOG_PARAM("0x%llx", (ULL)buffer_address);
1302    CVMX_NAND_LOG_PARAM("%d", buffer_length);
1303
1304    if ((chip < 0) || (chip > 7))
1305        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1306    if (!cvmx_nand_state[chip].page_size)
1307        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1308    if (!buffer_address)
1309        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1310    if (buffer_address & 7)
1311        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1312    if (buffer_length & 7)
1313        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1314    if (!buffer_length)
1315        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1316
1317    /* For 16 bit mode, addresses within a page are word address, rather than byte addresses */
1318    if (cvmx_nand_state[chip].flags & CVMX_NAND_STATE_16BIT)
1319            nand_address = (nand_address & ~(cvmx_nand_state[chip].page_size - 1)) |  ((nand_address & (cvmx_nand_state[chip].page_size - 1)) >> 1);
1320
1321    bytes = __cvmx_nand_low_level_read(chip, NAND_COMMAND_READ, __cvmx_nand_get_address_cycles(chip), nand_address, NAND_COMMAND_READ_FIN, buffer_address, buffer_length);
1322    CVMX_NAND_RETURN(bytes);
1323}
1324#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
1325EXPORT_SYMBOL(cvmx_nand_page_read);
1326#endif
1327
1328
1329/**
1330 * Write a page to NAND. The buffer must contain the entire page
1331 * including the out of band data.
1332 *
1333 * @param chip   Chip select for NAND flash
1334 * @param nand_address
1335 *               Location in NAND to write. See description in file comment
1336 * @param buffer_address
1337 *               Physical address to read the data from
1338 *
1339 * @return Zero on success, a negative cvmx_nand_status_t error code on failure
1340 */
1341cvmx_nand_status_t cvmx_nand_page_write(int chip, uint64_t nand_address, uint64_t buffer_address)
1342{
1343    cvmx_nand_cmd_t cmd;
1344    int buffer_length;
1345
1346    CVMX_NAND_LOG_CALLED();
1347    CVMX_NAND_LOG_PARAM("%d", chip);
1348    CVMX_NAND_LOG_PARAM("0x%llx", (ULL)nand_address);
1349    CVMX_NAND_LOG_PARAM("0x%llx", (ULL)buffer_address);
1350
1351    if ((chip < 0) || (chip > 7))
1352        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1353    if (!cvmx_nand_state[chip].page_size)
1354        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1355    if (!buffer_address)
1356        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1357    if (buffer_address & 7)
1358        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1359
1360    /* For 16 bit mode, addresses within a page are word address, rather than byte addresses */
1361    if (cvmx_nand_state[chip].flags & CVMX_NAND_STATE_16BIT)
1362            nand_address = (nand_address & ~(cvmx_nand_state[chip].page_size - 1)) |  ((nand_address & (cvmx_nand_state[chip].page_size - 1)) >> 1);
1363
1364    buffer_length = cvmx_nand_state[chip].page_size + cvmx_nand_state[chip].oob_size;
1365
1366    /* The NAND DMA engine always does transfers in 8 byte blocks, so round the buffer size down
1367    ** to a multiple of 8, otherwise we will transfer too much data to the NAND chip.
1368    ** Note this prevents the last few bytes of the OOB being written.  If these bytes
1369    ** need to be written, then this check needs to be removed, but this will result in
1370    ** extra write cycles beyond the end of the OOB. */
1371    buffer_length &= ~0x7;
1372
1373    /* Build the command and address cycles */
1374    if (__cvmx_nand_build_pre_cmd(chip, NAND_COMMAND_PROGRAM, __cvmx_nand_get_address_cycles(chip), nand_address, 0))
1375        CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1376
1377    /* Send WRITE */
1378    memset(&cmd,  0,  sizeof(cmd));
1379    cmd.wr.data_bytes = buffer_length;
1380    cmd.wr.eight = 8;
1381    cmd.wr.wrn1 = cvmx_nand_state[chip].wrn[0];
1382    cmd.wr.wrn2 = cvmx_nand_state[chip].wrn[1];
1383    if (cvmx_nand_submit(cmd))
1384        CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1385
1386    /* Send WRITE command */
1387    memset(&cmd,  0,  sizeof(cmd));
1388    cmd.cle.cmd_data = NAND_COMMAND_PROGRAM_FIN;
1389    cmd.cle.clen1 = cvmx_nand_state[chip].clen[0];
1390    cmd.cle.clen2 = cvmx_nand_state[chip].clen[1];
1391    cmd.cle.clen3 = cvmx_nand_state[chip].clen[2];
1392    cmd.cle.four = 4;
1393    if (cvmx_nand_submit(cmd))
1394        CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1395
1396    __cvmx_nand_setup_dma(chip, 1, buffer_address, buffer_length);
1397
1398    /* WAIT for R_B to signal program is complete  */
1399    if (__wait_for_busy_done(chip))
1400        CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1401
1402    if (__cvmx_nand_build_post_cmd())
1403        CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1404
1405    /* Wait for the DMA to complete */
1406    if (CVMX_WAIT_FOR_FIELD64(CVMX_MIO_NDF_DMA_CFG, cvmx_mio_ndf_dma_cfg_t, en, ==, 0, NAND_TIMEOUT_USECS))
1407        CVMX_NAND_RETURN(CVMX_NAND_TIMEOUT);
1408
1409    CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
1410}
1411#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
1412EXPORT_SYMBOL(cvmx_nand_page_write);
1413#endif
1414
1415
1416/**
1417 * Erase a NAND block. A single block contains multiple pages.
1418 *
1419 * @param chip   Chip select for NAND flash
1420 * @param nand_address
1421 *               Location in NAND to erase. See description in file comment
1422 *
1423 * @return Zero on success, a negative cvmx_nand_status_t error code on failure
1424 */
1425cvmx_nand_status_t cvmx_nand_block_erase(int chip, uint64_t nand_address)
1426{
1427    CVMX_NAND_LOG_CALLED();
1428    CVMX_NAND_LOG_PARAM("%d", chip);
1429    CVMX_NAND_LOG_PARAM("0x%llx", (ULL)nand_address);
1430
1431    if ((chip < 0) || (chip > 7))
1432        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1433    if (!cvmx_nand_state[chip].page_size)
1434        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1435
1436    /* Build the command and address cycles */
1437    if (__cvmx_nand_build_pre_cmd(chip, NAND_COMMAND_ERASE,
1438                                  (__cvmx_nand_get_row_bits(chip)+7) >> 3,
1439                                  nand_address >> __cvmx_nand_get_column_bits(chip),
1440                                  NAND_COMMAND_ERASE_FIN))
1441        CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1442
1443    /* WAIT for R_B to signal erase is complete  */
1444    if (__wait_for_busy_done(chip))
1445        CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1446
1447    if (__cvmx_nand_build_post_cmd())
1448        CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1449
1450    /* Wait for the command queue to be idle, which means the wait is done */
1451    if (CVMX_WAIT_FOR_FIELD64(CVMX_NDF_ST_REG, cvmx_ndf_st_reg_t, exe_idle, ==, 1, NAND_TIMEOUT_USECS))
1452        CVMX_NAND_RETURN(CVMX_NAND_TIMEOUT);
1453
1454    CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
1455}
1456#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
1457EXPORT_SYMBOL(cvmx_nand_block_erase);
1458#endif
1459
1460
1461/* Some reads (read ID, read parameter page) only use the low 8 bits of the bus
1462** in 16 bit mode.  We remove the unused bytes so that the data we present to the
1463** caller is as expected (same as 8 bit mode.)
1464*/
1465static void __cvmx_nand_fixup_16bit_id_reads(uint8_t *buf, int buffer_length)
1466{
1467    /* Decimate data, taking only every other byte. */
1468    int i;
1469    for (i = 0; i < buffer_length/2; i++)
1470        buf[i] = buf[2*i + 1];
1471}
1472
1473/**
1474 * Read the NAND ID information
1475 *
1476 * @param chip   Chip select for NAND flash
1477 * @param nand_address
1478 *               NAND address to read ID from. Usually this is either 0x0 or 0x20.
1479 * @param buffer_address
1480 *               Physical address to store data in
1481 * @param buffer_length
1482 *               Length of the buffer. Usually this is 4-8 bytes.  For 16 bit mode, this must be twice
1483 *               as large as the actual expected data.
1484 *
1485 * @return Bytes read on success, a negative cvmx_nand_status_t error code on failure
1486 */
1487int cvmx_nand_read_id(int chip, uint64_t nand_address, uint64_t buffer_address, int buffer_length)
1488{
1489    int bytes;
1490
1491    CVMX_NAND_LOG_CALLED();
1492    CVMX_NAND_LOG_PARAM("%d", chip);
1493    CVMX_NAND_LOG_PARAM("0x%llx", (ULL)nand_address);
1494    CVMX_NAND_LOG_PARAM("0x%llx", (ULL)buffer_address);
1495    CVMX_NAND_LOG_PARAM("%d", buffer_length);
1496
1497    if ((chip < 0) || (chip > 7))
1498        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1499    if (!buffer_address)
1500        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1501    if (buffer_address & 7)
1502        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1503    if (!buffer_length)
1504        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1505
1506    bytes = __cvmx_nand_low_level_read(chip, NAND_COMMAND_READ_ID, 1, nand_address, 0, buffer_address, buffer_length);
1507    if (cvmx_nand_state[chip].flags & CVMX_NAND_STATE_16BIT)
1508        __cvmx_nand_fixup_16bit_id_reads(cvmx_phys_to_ptr(buffer_address), buffer_length);
1509
1510    CVMX_NAND_RETURN(bytes);
1511}
1512#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
1513EXPORT_SYMBOL(cvmx_nand_read_id);
1514#endif
1515
1516
1517/**
1518 * Read the NAND parameter page
1519 *
1520 * @param chip   Chip select for NAND flash
1521 * @param buffer_address
1522 *               Physical address to store data in
1523 * @param buffer_length
1524 *               Length of the buffer.  Usually 1024 bytes for 8 bit, 2048 for 16 bit mode.
1525 *
1526 * @return Bytes read on success, a negative cvmx_nand_status_t error code on failure
1527 */
1528int cvmx_nand_read_param_page(int chip, uint64_t buffer_address, int buffer_length)
1529{
1530    int bytes;
1531
1532    CVMX_NAND_LOG_CALLED();
1533    CVMX_NAND_LOG_PARAM("%d", chip);
1534    CVMX_NAND_LOG_PARAM("0x%llx", (ULL)buffer_address);
1535    CVMX_NAND_LOG_PARAM("%d", buffer_length);
1536
1537    if ((chip < 0) || (chip > 7))
1538        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1539    if (!buffer_address)
1540        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1541    if (buffer_address & 7)
1542        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1543    if (buffer_length & 7)
1544        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1545    if (!buffer_length)
1546        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1547
1548    bytes = __cvmx_nand_low_level_read(chip, NAND_COMMAND_READ_PARAM_PAGE, 1, 0x0, 0, buffer_address, buffer_length);
1549    if (cvmx_nand_state[chip].flags & CVMX_NAND_STATE_16BIT)
1550        __cvmx_nand_fixup_16bit_id_reads(cvmx_phys_to_ptr(buffer_address), buffer_length);
1551    CVMX_NAND_RETURN(bytes);
1552}
1553
1554
1555/**
1556 * Get the status of the NAND flash
1557 *
1558 * @param chip   Chip select for NAND flash
1559 *
1560 * @return NAND status or a negative cvmx_nand_status_t error code on failure
1561 */
1562int cvmx_nand_get_status(int chip)
1563{
1564    int status;
1565    int offset = !!(cvmx_nand_state[chip].flags & CVMX_NAND_STATE_16BIT);  /* Normalize flag to 0/1 */
1566
1567    CVMX_NAND_LOG_CALLED();
1568    CVMX_NAND_LOG_PARAM("%d", chip);
1569
1570    if ((chip < 0) || (chip > 7))
1571        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1572
1573    *((uint8_t*)cvmx_nand_buffer + offset)  = 0xff;
1574    status = __cvmx_nand_low_level_read(chip, NAND_COMMAND_STATUS, 0, 0, 0, cvmx_ptr_to_phys(cvmx_nand_buffer), 8);
1575    if (status > 0)
1576        status = *((uint8_t*)cvmx_nand_buffer + offset);
1577
1578    CVMX_NAND_RETURN(status);
1579}
1580#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
1581EXPORT_SYMBOL(cvmx_nand_get_status);
1582#endif
1583
1584
1585/**
1586 * Get the page size, excluding out of band data. This  function
1587 * will return zero for chip selects not connected to NAND.
1588 *
1589 * @param chip   Chip select for NAND flash
1590 *
1591 * @return Page size in bytes or a negative cvmx_nand_status_t error code on failure
1592 */
1593int cvmx_nand_get_page_size(int chip)
1594{
1595    CVMX_NAND_LOG_CALLED();
1596    CVMX_NAND_LOG_PARAM("%d", chip);
1597
1598    if ((chip < 0) || (chip > 7))
1599        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1600
1601    CVMX_NAND_RETURN(cvmx_nand_state[chip].page_size);
1602}
1603
1604
1605/**
1606 * Get the OOB size.
1607 *
1608 * @param chip   Chip select for NAND flash
1609 *
1610 * @return OOB in bytes or a negative cvmx_nand_status_t error code on failure
1611 */
1612int cvmx_nand_get_oob_size(int chip)
1613{
1614    CVMX_NAND_LOG_CALLED();
1615    CVMX_NAND_LOG_PARAM("%d", chip);
1616
1617    if ((chip < 0) || (chip > 7))
1618        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1619
1620    CVMX_NAND_RETURN(cvmx_nand_state[chip].oob_size);
1621}
1622
1623
1624/**
1625 * Get the number of pages per NAND block
1626 *
1627 * @param chip   Chip select for NAND flash
1628 *
1629 * @return Number of pages in each block or a negative cvmx_nand_status_t error
1630 *         code on failure
1631 */
1632int cvmx_nand_get_pages_per_block(int chip)
1633{
1634    CVMX_NAND_LOG_CALLED();
1635    CVMX_NAND_LOG_PARAM("%d", chip);
1636
1637    if ((chip < 0) || (chip > 7))
1638        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1639
1640    CVMX_NAND_RETURN(cvmx_nand_state[chip].pages_per_block);
1641}
1642
1643
1644/**
1645 * Get the number of blocks in the NAND flash
1646 *
1647 * @param chip   Chip select for NAND flash
1648 *
1649 * @return Number of blocks or a negative cvmx_nand_status_t error code on failure
1650 */
1651int cvmx_nand_get_blocks(int chip)
1652{
1653    CVMX_NAND_LOG_CALLED();
1654    CVMX_NAND_LOG_PARAM("%d", chip);
1655
1656    if ((chip < 0) || (chip > 7))
1657        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1658
1659    CVMX_NAND_RETURN(cvmx_nand_state[chip].blocks);
1660}
1661
1662
1663/**
1664 * Reset the NAND flash
1665 *
1666 * @param chip   Chip select for NAND flash
1667 *
1668 * @return Zero on success, a negative cvmx_nand_status_t error code on failure
1669 */
1670cvmx_nand_status_t cvmx_nand_reset(int chip)
1671{
1672    CVMX_NAND_LOG_CALLED();
1673    CVMX_NAND_LOG_PARAM("%d", chip);
1674
1675    if ((chip < 0) || (chip > 7))
1676        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1677    if (!cvmx_nand_state[chip].page_size)
1678        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1679
1680    if (__cvmx_nand_build_pre_cmd(chip, NAND_COMMAND_RESET, 0, 0, 0))
1681        CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1682
1683    /* WAIT for R_B to signal reset is complete  */
1684    if (__wait_for_busy_done(chip))
1685        CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1686
1687    if (__cvmx_nand_build_post_cmd())
1688        CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1689
1690    CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
1691}
1692#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
1693EXPORT_SYMBOL(cvmx_nand_reset);
1694#endif
1695
1696
1697
1698
1699/**
1700 * This function computes the Octeon specific ECC data used by the NAND boot
1701 * feature.
1702 *
1703 * @param block  pointer to 256 bytes of data
1704 * @param eccp   pointer to where 8 bytes of ECC data will be stored
1705 */
1706void cvmx_nand_compute_boot_ecc(unsigned char *block, unsigned char *eccp)
1707{
1708	unsigned char pd0, pd1, pd2;
1709	int i, j;
1710
1711	pd0 = pd1 = pd2 = 0;
1712
1713	for (i = 0; i < 256; i++)	/* PD0<0> */
1714		pd0 ^= (block[i] ^ (block[i] >> 2) ^ (block[i] >> 4) ^ (block[i] >> 6)) & 1;
1715	for (i = 0; i < 256; i++)	/* PD0<1> */
1716		pd0 ^= ((block[i] ^ (block[i] >> 1) ^ (block[i] >> 4) ^ (block[i] >> 5)) & 1) << 1;
1717	for (i = 0; i < 256; i++)	/* PD0<2> */
1718		pd0 ^= ((block[i] ^ (block[i] >> 1) ^ (block[i] >> 2) ^ (block[i] >> 3)) & 1) << 2;
1719	for (i = 0; i < 128; i++)	/* PD0<3> */
1720		pd0 ^= ((block[2*i] ^ (block[2*i] >> 1) ^ (block[2*i] >> 2) ^
1721			(block[2*i] >> 3) ^ (block[2*i] >> 4) ^ (block[2*i] >> 5) ^
1722			(block[2*i] >> 6) ^ (block[2*i] >> 7)) & 1) << 3;
1723	for (i = 0; i < 64; i++)	/* PD0<4> */
1724		for (j = 0; j < 2; j++)
1725			pd0 ^= ((block[4*i+j] ^ (block[4*i+j] >> 1) ^ (block[4*i+j] >> 2) ^
1726				(block[4*i+j] >> 3) ^ (block[4*i+j] >> 4) ^ (block[4*i+j] >> 5) ^
1727				(block[4*i+j] >> 6) ^ (block[4*i+j] >> 7)) & 1) << 4;
1728	for (i = 0; i < 32; i++)	/* PD0<5> */
1729		for (j = 0; j < 4; j++)
1730			pd0 ^= ((block[8*i+j] ^ (block[8*i+j] >> 1) ^ (block[8*i+j] >> 2) ^
1731				(block[8*i+j] >> 3) ^ (block[8*i+j] >> 4) ^ (block[8*i+j] >> 5) ^
1732				(block[8*i+j] >> 6) ^ (block[8*i+j] >> 7)) & 1) << 5;
1733	for (i = 0; i < 16; i++)	/* PD0<6> */
1734		for (j = 0; j < 8; j++)
1735			pd0 ^= ((block[16*i+j] ^ (block[16*i+j] >> 1) ^ (block[16*i+j] >> 2) ^
1736				(block[16*i+j] >> 3) ^ (block[16*i+j] >> 4) ^ (block[16*i+j] >> 5) ^
1737				(block[16*i+j] >> 6) ^ (block[16*i+j] >> 7)) & 1) << 6;
1738	for (i = 0; i < 8; i++)		/* PD0<7> */
1739		for (j = 0; j < 16; j++)
1740			pd0 ^= ((block[32*i+j] ^ (block[32*i+j] >> 1) ^ (block[32*i+j] >> 2) ^
1741				(block[32*i+j] >> 3) ^ (block[32*i+j] >> 4) ^ (block[32*i+j] >> 5) ^
1742				(block[32*i+j] >> 6) ^ (block[32*i+j] >> 7)) & 1) << 7;
1743	for (i = 0; i < 4; i++)		/* PD1<0> */
1744		for (j = 0; j < 32; j++)
1745			pd1 ^= ((block[64*i+j] ^ (block[64*i+j] >> 1) ^ (block[64*i+j] >> 2) ^
1746				(block[64*i+j] >> 3) ^ (block[64*i+j] >> 4) ^ (block[64*i+j] >> 5) ^
1747				(block[64*i+j] >> 6) ^ (block[64*i+j] >> 7)) & 1) << 0;
1748	for (i = 0; i < 2; i++)		/* PD1<1> */
1749		for (j = 0; j < 64; j++)
1750			pd1 ^= ((block[128*i+j] ^ (block[128*i+j] >> 1) ^ (block[128*i+j] >> 2) ^
1751				(block[128*i+j] >> 3) ^ (block[128*i+j] >> 4) ^ (block[128*i+j] >> 5) ^
1752				(block[128*i+j] >> 6) ^ (block[128*i+j] >> 7)) & 1) << 1;
1753	for (i = 0; i < 128; i++)	/* PD1<2> */
1754		pd1 ^= ((block[i] ^ (block[i] >> 1) ^ (block[i] >> 2) ^
1755			(block[i] >> 3) ^ (block[i] >> 4) ^ (block[i] >> 5) ^
1756			(block[i] >> 6) ^ (block[i] >> 7)) & 1) << 2;
1757	/* PD1<3> */
1758	/* PD1<4> */
1759	for (i = 0; i < 256; i++)	/* PD1<5> */
1760		pd1 ^= (((block[i] >> 1) ^ (block[i] >> 3) ^ (block[i] >> 5) ^ (block[i] >> 7)) & 1) << 5;
1761	for (i = 0; i < 256; i++)	/* PD1<6> */
1762		pd1 ^= (((block[i] >> 2) ^ (block[i] >> 3) ^ (block[i] >> 6) ^ (block[i] >> 7)) & 1) << 6;
1763	for (i = 0; i < 256; i++)	/* PD1<7> */
1764		pd1 ^= (((block[i] >> 4) ^ (block[i] >> 5) ^ (block[i] >> 6) ^ (block[i] >> 7)) & 1) << 7;
1765	for (i = 0; i < 128; i++)	/* PD2<0> */
1766		pd2 ^= ((block[2*i+1] ^ (block[2*i+1] >> 1) ^ (block[2*i+1] >> 2) ^
1767			(block[2*i+1] >> 3) ^ (block[2*i+1] >> 4) ^ (block[2*i+1] >> 5) ^
1768			(block[2*i+1] >> 6) ^ (block[2*i+1] >> 7)) & 1) << 0;
1769	for (i = 0; i < 64; i++)	/* PD2<1> */
1770		for (j = 2; j < 4; j++)
1771			pd2 ^= ((block[4*i+j] ^ (block[4*i+j] >> 1) ^ (block[4*i+j] >> 2) ^
1772				(block[4*i+j] >> 3) ^ (block[4*i+j] >> 4) ^ (block[4*i+j] >> 5) ^
1773				(block[4*i+j] >> 6) ^ (block[4*i+j] >> 7)) & 1) << 1;
1774	for (i = 0; i < 32; i++)	/* PD2<2> */
1775		for (j = 4; j < 8; j++)
1776			pd2 ^= ((block[8*i+j] ^ (block[8*i+j] >> 1) ^ (block[8*i+j] >> 2) ^
1777				(block[8*i+j] >> 3) ^ (block[8*i+j] >> 4) ^ (block[8*i+j] >> 5) ^
1778				(block[8*i+j] >> 6) ^ (block[8*i+j] >> 7)) & 1) << 2;
1779	for (i = 0; i < 16; i++)	/* PD2<3> */
1780		for (j = 8; j < 16; j++)
1781			pd2 ^= ((block[16*i+j] ^ (block[16*i+j] >> 1) ^ (block[16*i+j] >> 2) ^
1782				(block[16*i+j] >> 3) ^ (block[16*i+j] >> 4) ^ (block[16*i+j] >> 5) ^
1783				(block[16*i+j] >> 6) ^ (block[16*i+j] >> 7)) & 1) << 3;
1784	for (i = 0; i < 8; i++)		/* PD2<4> */
1785		for (j = 16; j < 32; j++)
1786			pd2 ^= ((block[32*i+j] ^ (block[32*i+j] >> 1) ^ (block[32*i+j] >> 2) ^
1787				(block[32*i+j] >> 3) ^ (block[32*i+j] >> 4) ^ (block[32*i+j] >> 5) ^
1788				(block[32*i+j] >> 6) ^ (block[32*i+j] >> 7)) & 1) << 4;
1789	for (i = 0; i < 4; i++)		/* PD2<5> */
1790		for (j = 32; j < 64; j++)
1791			pd2 ^= ((block[64*i+j] ^ (block[64*i+j] >> 1) ^ (block[64*i+j] >> 2) ^
1792				(block[64*i+j] >> 3) ^ (block[64*i+j] >> 4) ^ (block[64*i+j] >> 5) ^
1793				(block[64*i+j] >> 6) ^ (block[64*i+j] >> 7)) & 1) << 5;
1794	for (i = 0; i < 2; i++)		/* PD2<6> */
1795		for (j = 64; j < 128; j++)
1796			pd2 ^= ((block[128*i+j] ^ (block[128*i+j] >> 1) ^ (block[128*i+j] >> 2) ^
1797				(block[128*i+j] >> 3) ^ (block[128*i+j] >> 4) ^ (block[128*i+j] >> 5) ^
1798				(block[128*i+j] >> 6) ^ (block[128*i+j] >> 7)) & 1) << 6;
1799	for (i = 128; i < 256; i++)	/* PD2<7> */
1800		pd2 ^= ((block[i] ^ (block[i] >> 1) ^ (block[i] >> 2) ^
1801			(block[i] >> 3) ^ (block[i] >> 4) ^ (block[i] >> 5) ^
1802			(block[i] >> 6) ^ (block[i] >> 7)) & 1) << 7;
1803
1804	eccp[0] = pd0;
1805	eccp[1] = pd1;
1806	eccp[2] = pd2;
1807}
1808
1809/**
1810 * Check an Octeon ECC block, fixing errors if possible
1811 *
1812 * @param block  Pointer to block to check
1813 *
1814 * @return Zero if block has no errors, one if errors were corrected, two
1815 *         if the errors could not be corrected.
1816 */
1817int cvmx_nand_correct_boot_ecc(uint8_t *block)
1818{
1819    unsigned char pd0, pd1, pd2;
1820    int i, j;
1821    unsigned char xorpd0, xorpd1, xorpd2;
1822    int xor_num;
1823    unsigned int check;
1824
1825    asm volatile ("pref 0,0(%0);pref 0,128(%0);pref 0,256(%0)\n" :: "r" (block));
1826
1827    pd0 = pd1 = pd2 = 0;
1828
1829    for (i = 0; i < 256; i++)   /* PD0<0> */
1830        pd0 ^= (block[i] ^ (block[i] >> 2) ^ (block[i] >> 4) ^ (block[i] >> 6)) & 1;
1831    for (i = 0; i < 256; i++)   /* PD0<1> */
1832        pd0 ^= ((block[i] ^ (block[i] >> 1) ^ (block[i] >> 4) ^ (block[i] >> 5)) & 1) << 1;
1833    for (i = 0; i < 256; i++)   /* PD0<2> */
1834        pd0 ^= ((block[i] ^ (block[i] >> 1) ^ (block[i] >> 2) ^ (block[i] >> 3)) & 1) << 2;
1835    for (i = 0; i < 128; i++)   /* PD0<3> */
1836        pd0 ^= ((block[2*i] ^ (block[2*i] >> 1) ^ (block[2*i] >> 2) ^
1837                 (block[2*i] >> 3) ^ (block[2*i] >> 4) ^ (block[2*i] >> 5) ^
1838                 (block[2*i] >> 6) ^ (block[2*i] >> 7)) & 1) << 3;
1839    for (i = 0; i < 64; i++)    /* PD0<4> */
1840        for (j = 0; j < 2; j++)
1841            pd0 ^= ((block[4*i+j] ^ (block[4*i+j] >> 1) ^ (block[4*i+j] >> 2) ^
1842                     (block[4*i+j] >> 3) ^ (block[4*i+j] >> 4) ^ (block[4*i+j] >> 5) ^
1843                     (block[4*i+j] >> 6) ^ (block[4*i+j] >> 7)) & 1) << 4;
1844    for (i = 0; i < 32; i++)    /* PD0<5> */
1845        for (j = 0; j < 4; j++)
1846            pd0 ^= ((block[8*i+j] ^ (block[8*i+j] >> 1) ^ (block[8*i+j] >> 2) ^
1847                     (block[8*i+j] >> 3) ^ (block[8*i+j] >> 4) ^ (block[8*i+j] >> 5) ^
1848                     (block[8*i+j] >> 6) ^ (block[8*i+j] >> 7)) & 1) << 5;
1849    for (i = 0; i < 16; i++)    /* PD0<6> */
1850        for (j = 0; j < 8; j++)
1851            pd0 ^= ((block[16*i+j] ^ (block[16*i+j] >> 1) ^ (block[16*i+j] >> 2) ^
1852                     (block[16*i+j] >> 3) ^ (block[16*i+j] >> 4) ^ (block[16*i+j] >> 5) ^
1853                     (block[16*i+j] >> 6) ^ (block[16*i+j] >> 7)) & 1) << 6;
1854    for (i = 0; i < 8; i++)     /* PD0<7> */
1855        for (j = 0; j < 16; j++)
1856            pd0 ^= ((block[32*i+j] ^ (block[32*i+j] >> 1) ^ (block[32*i+j] >> 2) ^
1857                     (block[32*i+j] >> 3) ^ (block[32*i+j] >> 4) ^ (block[32*i+j] >> 5) ^
1858                     (block[32*i+j] >> 6) ^ (block[32*i+j] >> 7)) & 1) << 7;
1859    for (i = 0; i < 4; i++)     /* PD1<0> */
1860        for (j = 0; j < 32; j++)
1861            pd1 ^= ((block[64*i+j] ^ (block[64*i+j] >> 1) ^ (block[64*i+j] >> 2) ^
1862                     (block[64*i+j] >> 3) ^ (block[64*i+j] >> 4) ^ (block[64*i+j] >> 5) ^
1863                     (block[64*i+j] >> 6) ^ (block[64*i+j] >> 7)) & 1) << 0;
1864    for (i = 0; i < 2; i++)     /* PD1<1> */
1865        for (j = 0; j < 64; j++)
1866            pd1 ^= ((block[128*i+j] ^ (block[128*i+j] >> 1) ^ (block[128*i+j] >> 2) ^
1867                     (block[128*i+j] >> 3) ^ (block[128*i+j] >> 4) ^ (block[128*i+j] >> 5) ^
1868                     (block[128*i+j] >> 6) ^ (block[128*i+j] >> 7)) & 1) << 1;
1869    for (i = 0; i < 128; i++)   /* PD1<2> */
1870        pd1 ^= ((block[i] ^ (block[i] >> 1) ^ (block[i] >> 2) ^
1871                 (block[i] >> 3) ^ (block[i] >> 4) ^ (block[i] >> 5) ^
1872                 (block[i] >> 6) ^ (block[i] >> 7)) & 1) << 2;
1873    /* PD1<3> */
1874    /* PD1<4> */
1875    for (i = 0; i < 256; i++)   /* PD1<5> */
1876        pd1 ^= (((block[i] >> 1) ^ (block[i] >> 3) ^ (block[i] >> 5) ^ (block[i] >> 7)) & 1) << 5;
1877    for (i = 0; i < 256; i++)   /* PD1<6> */
1878        pd1 ^= (((block[i] >> 2) ^ (block[i] >> 3) ^ (block[i] >> 6) ^ (block[i] >> 7)) & 1) << 6;
1879    for (i = 0; i < 256; i++)   /* PD1<7> */
1880        pd1 ^= (((block[i] >> 4) ^ (block[i] >> 5) ^ (block[i] >> 6) ^ (block[i] >> 7)) & 1) << 7;
1881    for (i = 0; i < 128; i++)   /* PD2<0> */
1882        pd2 ^= ((block[2*i+1] ^ (block[2*i+1] >> 1) ^ (block[2*i+1] >> 2) ^
1883                 (block[2*i+1] >> 3) ^ (block[2*i+1] >> 4) ^ (block[2*i+1] >> 5) ^
1884                 (block[2*i+1] >> 6) ^ (block[2*i+1] >> 7)) & 1) << 0;
1885    for (i = 0; i < 64; i++)    /* PD2<1> */
1886        for (j = 2; j < 4; j++)
1887            pd2 ^= ((block[4*i+j] ^ (block[4*i+j] >> 1) ^ (block[4*i+j] >> 2) ^
1888                     (block[4*i+j] >> 3) ^ (block[4*i+j] >> 4) ^ (block[4*i+j] >> 5) ^
1889                     (block[4*i+j] >> 6) ^ (block[4*i+j] >> 7)) & 1) << 1;
1890    for (i = 0; i < 32; i++)    /* PD2<2> */
1891        for (j = 4; j < 8; j++)
1892            pd2 ^= ((block[8*i+j] ^ (block[8*i+j] >> 1) ^ (block[8*i+j] >> 2) ^
1893                     (block[8*i+j] >> 3) ^ (block[8*i+j] >> 4) ^ (block[8*i+j] >> 5) ^
1894                     (block[8*i+j] >> 6) ^ (block[8*i+j] >> 7)) & 1) << 2;
1895    for (i = 0; i < 16; i++)    /* PD2<3> */
1896        for (j = 8; j < 16; j++)
1897            pd2 ^= ((block[16*i+j] ^ (block[16*i+j] >> 1) ^ (block[16*i+j] >> 2) ^
1898                     (block[16*i+j] >> 3) ^ (block[16*i+j] >> 4) ^ (block[16*i+j] >> 5) ^
1899                     (block[16*i+j] >> 6) ^ (block[16*i+j] >> 7)) & 1) << 3;
1900    for (i = 0; i < 8; i++)     /* PD2<4> */
1901        for (j = 16; j < 32; j++)
1902            pd2 ^= ((block[32*i+j] ^ (block[32*i+j] >> 1) ^ (block[32*i+j] >> 2) ^
1903                     (block[32*i+j] >> 3) ^ (block[32*i+j] >> 4) ^ (block[32*i+j] >> 5) ^
1904                     (block[32*i+j] >> 6) ^ (block[32*i+j] >> 7)) & 1) << 4;
1905    for (i = 0; i < 4; i++)     /* PD2<5> */
1906        for (j = 32; j < 64; j++)
1907            pd2 ^= ((block[64*i+j] ^ (block[64*i+j] >> 1) ^ (block[64*i+j] >> 2) ^
1908                     (block[64*i+j] >> 3) ^ (block[64*i+j] >> 4) ^ (block[64*i+j] >> 5) ^
1909                     (block[64*i+j] >> 6) ^ (block[64*i+j] >> 7)) & 1) << 5;
1910    for (i = 0; i < 2; i++)     /* PD2<6> */
1911        for (j = 64; j < 128; j++)
1912            pd2 ^= ((block[128*i+j] ^ (block[128*i+j] >> 1) ^ (block[128*i+j] >> 2) ^
1913                     (block[128*i+j] >> 3) ^ (block[128*i+j] >> 4) ^ (block[128*i+j] >> 5) ^
1914                     (block[128*i+j] >> 6) ^ (block[128*i+j] >> 7)) & 1) << 6;
1915    for (i = 128; i < 256; i++) /* PD2<7> */
1916        pd2 ^= ((block[i] ^ (block[i] >> 1) ^ (block[i] >> 2) ^
1917                 (block[i] >> 3) ^ (block[i] >> 4) ^ (block[i] >> 5) ^
1918                 (block[i] >> 6) ^ (block[i] >> 7)) & 1) << 7;
1919
1920    xorpd0 = pd0 ^ block[256];
1921    xorpd1 = pd1 ^ block[257];
1922    xorpd2 = pd2 ^ block[258];
1923
1924    xor_num = __builtin_popcount((xorpd0 << 16) | (xorpd1 << 8) | xorpd2);
1925    check = (((xorpd1 & 7) << 8) | xorpd0) ^ ((xorpd2 << 3) | (xorpd1 >> 5));
1926
1927    if (xor_num == 0)
1928        return 0;
1929    else if ((xor_num > 1) && (check != 0x7FF))
1930        return 2;
1931
1932    if (check == 0x7FF)
1933    {
1934        /* Correct the error */
1935        block[xorpd2] ^= 1 << (xorpd1 >> 5);
1936    }
1937
1938    return 1;
1939}
1940
1941cvmx_nand_status_t cvmx_nand_set_defaults(int page_size, int oob_size, int pages_per_block, int blocks, int onfi_timing_mode)
1942{
1943    if (!page_size || !oob_size || !pages_per_block || !blocks || onfi_timing_mode > 5)
1944        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1945
1946    cvmx_nand_default.page_size = page_size;
1947    cvmx_nand_default.oob_size = oob_size;
1948    cvmx_nand_default.pages_per_block = pages_per_block;
1949    cvmx_nand_default.blocks = blocks;
1950    cvmx_nand_default.onfi_timing = onfi_timing_mode;
1951
1952    CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
1953}
1954