1/***********************license start***************
2 * Copyright (c) 2003-2010  Cavium Inc. (support@cavium.com). All rights
3 * reserved.
4 *
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 *   * Redistributions of source code must retain the above copyright
11 *     notice, this list of conditions and the following disclaimer.
12 *
13 *   * Redistributions in binary form must reproduce the above
14 *     copyright notice, this list of conditions and the following
15 *     disclaimer in the documentation and/or other materials provided
16 *     with the distribution.
17
18 *   * Neither the name of Cavium Inc. nor the names of
19 *     its contributors may be used to endorse or promote products
20 *     derived from this software without specific prior written
21 *     permission.
22
23 * This Software, including technical data, may be subject to U.S. export  control
24 * laws, including the U.S. Export Administration Act and its  associated
25 * regulations, and may be subject to export or import  regulations in other
26 * countries.
27
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE  RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
39
40
41
42/**
43 * @file
44 *
45 * Interface to the NAND flash controller.
46 * See cvmx-nand.h for usage documentation and notes.
47 *
48 * <hr>$Revision: 35726 $<hr>
49 */
50
51#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
52#include <linux/module.h>
53
54#include <asm/octeon/cvmx.h>
55#include <asm/octeon/cvmx-clock.h>
56#include <asm/octeon/cvmx-nand.h>
57#include <asm/octeon/cvmx-ndf-defs.h>
58#include <asm/octeon/cvmx-swap.h>
59#include <asm/octeon/cvmx-bootmem.h>
60#else
61#include "cvmx.h"
62#include "cvmx-nand.h"
63#include "cvmx-swap.h"
64#include "cvmx-bootmem.h"
65#endif
66#if defined(__U_BOOT__) && defined(CONFIG_HW_WATCHDOG)
67# include <watchdog.h>
68#else
69# define WATCHDOG_RESET()
70#endif
71
72#define NAND_COMMAND_READ_ID            0x90
73#define NAND_COMMAND_READ_PARAM_PAGE    0xec
74#define NAND_COMMAND_RESET              0xff
75#define NAND_COMMAND_STATUS             0x70
76#define NAND_COMMAND_READ               0x00
77#define NAND_COMMAND_READ_FIN           0x30
78#define NAND_COMMAND_ERASE              0x60
79#define NAND_COMMAND_ERASE_FIN          0xd0
80#define NAND_COMMAND_PROGRAM            0x80
81#define NAND_COMMAND_PROGRAM_FIN        0x10
82#define NAND_TIMEOUT_USECS_READ         100000
83#define NAND_TIMEOUT_USECS_WRITE        1000000
84#define NAND_TIMEOUT_USECS_BLOCK_ERASE  1000000
85
86#define CVMX_NAND_ROUNDUP(_Dividend, _Divisor) (((_Dividend)+((_Divisor)-1))/(_Divisor))
87#undef min
88#define min(X, Y)                               \
89        ({ typeof (X) __x = (X);                \
90           typeof (Y) __y = (Y);                \
91                (__x < __y) ? __x : __y; })
92
93#undef max
94#define max(X, Y)                               \
95        ({ typeof (X) __x = (X);                \
96           typeof (Y) __y = (Y);                \
97                (__x > __y) ? __x : __y; })
98
99
100/* Structure to store the parameters that we care about that
101** describe the ONFI speed modes.  This is used to configure
102** the flash timing to match what is reported in the
103** parameter page of the ONFI flash chip. */
104typedef struct
105{
106    int twp;
107    int twh;
108    int twc;
109    int tclh;
110    int tals;
111} onfi_speed_mode_desc_t;
112static const onfi_speed_mode_desc_t onfi_speed_modes[] =
113{
114
115    {50,30,100,20,50},  /* Mode 0 */
116    {25,15, 45,10,25},  /* Mode 1 */
117    {17,15, 35,10,15},  /* Mode 2 */
118    {15,10, 30, 5,10},  /* Mode 3 */
119    {12,10, 25, 5,10},  /* Mode 4, requires EDO timings */
120    {10, 7, 20, 5,10},  /* Mode 5, requries EDO timings */
121    {10,10, 25, 5,12},	/* Mode 6, requires EDO timings */
122};
123
124
125
126typedef enum
127{
128    CVMX_NAND_STATE_16BIT = 1<<0,
129} cvmx_nand_state_flags_t;
130
131/**
132 * Structure used to store data about the NAND devices hooked
133 * to the bootbus.
134 */
135typedef struct
136{
137    int page_size;
138    int oob_size;
139    int pages_per_block;
140    int blocks;
141    int tim_mult;
142    int tim_par[8];
143    int clen[4];
144    int alen[4];
145    int rdn[4];
146    int wrn[2];
147    int onfi_timing;
148    cvmx_nand_state_flags_t flags;
149} cvmx_nand_state_t;
150
151/**
152 * Array indexed by bootbus chip select with information
153 * about NAND devices.
154 */
155#if defined(__U_BOOT__)
156/* For u-boot nand boot we need to play some tricks to be able
157** to use this early in boot.  We put them in a special section that is merged
158** with the text segment.  (Using the text segment directly results in an assembler warning.)
159*/
160/*#define USE_DATA_IN_TEXT*/
161#endif
162
163#ifdef USE_DATA_IN_TEXT
164static uint8_t cvmx_nand_buffer[CVMX_NAND_MAX_PAGE_AND_OOB_SIZE] __attribute__((aligned(8)))  __attribute__ ((section (".data_in_text")));
165static cvmx_nand_state_t cvmx_nand_state[8] __attribute__ ((section (".data_in_text")));
166static cvmx_nand_state_t cvmx_nand_default __attribute__ ((section (".data_in_text")));
167static cvmx_nand_initialize_flags_t cvmx_nand_flags __attribute__ ((section (".data_in_text")));
168static int debug_indent __attribute__ ((section (".data_in_text")));
169#else
170static CVMX_SHARED cvmx_nand_state_t cvmx_nand_state[8];
171static CVMX_SHARED cvmx_nand_state_t cvmx_nand_default;
172static CVMX_SHARED cvmx_nand_initialize_flags_t cvmx_nand_flags;
173static CVMX_SHARED uint8_t *cvmx_nand_buffer = NULL;
174static int debug_indent = 0;
175#endif
176
177static CVMX_SHARED const char *cvmx_nand_opcode_labels[] =
178{
179    "NOP",                      /* 0 */
180    "Timing",                   /* 1 */
181    "Wait",                     /* 2 */
182    "Chip Enable / Disable",    /* 3 */
183    "CLE",                      /* 4 */
184    "ALE",                      /* 5 */
185    "6 - Unknown",              /* 6 */
186    "7 - Unknown",              /* 7 */
187    "Write",                    /* 8 */
188    "Read",                     /* 9 */
189    "Read EDO",                 /* 10 */
190    "Wait Status",              /* 11 */
191    "12 - Unknown",             /* 12 */
192    "13 - Unknown",             /* 13 */
193    "14 - Unknown",             /* 14 */
194    "Bus Aquire / Release"      /* 15 */
195};
196
197#define ULL unsigned long long
198/* This macro logs out whenever a function is called if debugging is on */
199#define CVMX_NAND_LOG_CALLED() \
200    if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG)) \
201        cvmx_dprintf("%*s%s: called\n", 2*debug_indent++, "", __FUNCTION__);
202
203/* This macro logs out each function parameter if debugging is on */
204#define CVMX_NAND_LOG_PARAM(format, param) \
205    if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG)) \
206        cvmx_dprintf("%*s%s: param %s = " format "\n", 2*debug_indent, "", __FUNCTION__, #param, param);
207
208/* This macro logs out when a function returns a value */
209#define CVMX_NAND_RETURN(v)                                              \
210    do {                                                                \
211        typeof(v) r = v;                                                \
212        if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))         \
213            cvmx_dprintf("%*s%s: returned %s(%d)\n", 2*--debug_indent, "", __FUNCTION__, #v, r); \
214        return r;                                                       \
215    } while (0);
216
217/* This macro logs out when a function doesn't return a value */
218#define CVMX_NAND_RETURN_NOTHING()                                      \
219    do {                                                                \
220        if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))         \
221            cvmx_dprintf("%*s%s: returned\n", 2*--debug_indent, "", __FUNCTION__); \
222        return;                                                         \
223    } while (0);
224
225
226
227
228
229
230/* Compute the CRC for the ONFI parameter page.  Adapted from sample code
231** in the specification.
232*/
233static uint16_t __onfi_parameter_crc_compute(uint8_t *data)
234{
235    const int order = 16;                     // Order of the CRC-16
236    unsigned long i, j, c, bit;
237    unsigned long crc = 0x4F4E;              // Initialize the shift register with 0x4F4E
238    unsigned long crcmask = ((((unsigned long)1<<(order-1))-1)<<1)|1;
239    unsigned long crchighbit = (unsigned long)1<<(order-1);
240
241    for (i = 0; i < 254; i++)
242    {
243        c = (unsigned long)data[i];
244        for (j = 0x80; j; j >>= 1) {
245              bit = crc & crchighbit;
246              crc <<= 1;
247              if (c & j)
248                  bit ^= crchighbit;
249              if (bit)
250                   crc ^= 0x8005;
251        }
252        crc &= crcmask;
253    }
254    return(crc);
255}
256
257
258/**
259 * Validate the ONFI parameter page and return a pointer to
260 * the config values.
261 *
262 * @param param_page Pointer to the raw NAND data returned after a parameter page read. It will
263 *                   contain at least 4 copies of the parameter structure.
264 *
265 * @return Pointer to a validated paramter page, or NULL if one couldn't be found.
266 */
267static cvmx_nand_onfi_param_page_t *__cvmx_nand_onfi_process(cvmx_nand_onfi_param_page_t param_page[4])
268{
269    int index;
270
271    for (index=0; index<4; index++)
272    {
273        uint16_t crc = __onfi_parameter_crc_compute((void *)&param_page[index]);
274        if (crc == cvmx_le16_to_cpu(param_page[index].crc))
275            break;
276        if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
277            cvmx_dprintf("%s: Paramter page %d is corrupt. (Expected CRC: 0x%04x, computed: 0x%04x)\n",
278                          __FUNCTION__, index, cvmx_le16_to_cpu(param_page[index].crc), crc);
279    }
280
281    if (index == 4)
282    {
283        if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
284            cvmx_dprintf("%s: All parameter pages fail CRC check.  Checking to see if any look sane.\n", __FUNCTION__);
285
286        if (!memcmp(param_page, param_page + 1, 256))
287        {
288            /* First and second copies match, now check some values */
289            if (param_page[0].pages_per_block != 0 && param_page[0].pages_per_block != 0xFFFFFFFF
290                && param_page[0].page_data_bytes != 0 && param_page[0].page_data_bytes != 0xFFFFFFFF
291                && param_page[0].page_spare_bytes != 0 && param_page[0].page_spare_bytes != 0xFFFF
292                && param_page[0].blocks_per_lun != 0 && param_page[0].blocks_per_lun != 0xFFFFFFFF
293                && param_page[0].timing_mode != 0 && param_page[0].timing_mode != 0xFFFF)
294            {
295                /* Looks like we have enough values to use */
296                if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
297                    cvmx_dprintf("%s: Page 0 looks sane, using even though CRC fails.\n", __FUNCTION__);
298                index = 0;
299            }
300        }
301    }
302
303    if (index == 4)
304    {
305        cvmx_dprintf("%s: WARNING: ONFI part but no valid ONFI parameter pages found.\n", __FUNCTION__);
306        return NULL;
307    }
308
309    if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
310    {
311        cvmx_dprintf("%*sONFI Information (from copy %d in param page)\n", 2*debug_indent, "", index);
312        debug_indent++;
313        cvmx_dprintf("%*sonfi = %c%c%c%c\n", 2*debug_indent, "", param_page[index].onfi[0], param_page[index].onfi[1],
314            param_page[index].onfi[2], param_page[index].onfi[3]);
315        cvmx_dprintf("%*srevision_number = 0x%x\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].revision_number));
316        cvmx_dprintf("%*sfeatures = 0x%x\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].features));
317        cvmx_dprintf("%*soptional_commands = 0x%x\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].optional_commands));
318
319        cvmx_dprintf("%*smanufacturer = %12.12s\n", 2*debug_indent, "", param_page[index].manufacturer);
320        cvmx_dprintf("%*smodel = %20.20s\n", 2*debug_indent, "", param_page[index].model);
321        cvmx_dprintf("%*sjedec_id = 0x%x\n", 2*debug_indent, "", param_page[index].jedec_id);
322        cvmx_dprintf("%*sdate_code = 0x%x\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].date_code));
323
324        cvmx_dprintf("%*spage_data_bytes = %u\n", 2*debug_indent, "", (int)cvmx_le32_to_cpu(param_page[index].page_data_bytes));
325        cvmx_dprintf("%*spage_spare_bytes = %u\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].page_spare_bytes));
326        cvmx_dprintf("%*spartial_page_data_bytes = %u\n", 2*debug_indent, "", (int)cvmx_le32_to_cpu(param_page[index].partial_page_data_bytes));
327        cvmx_dprintf("%*spartial_page_spare_bytes = %u\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].partial_page_spare_bytes));
328        cvmx_dprintf("%*spages_per_block = %u\n", 2*debug_indent, "", (int)cvmx_le32_to_cpu(param_page[index].pages_per_block));
329        cvmx_dprintf("%*sblocks_per_lun = %u\n", 2*debug_indent, "", (int)cvmx_le32_to_cpu(param_page[index].blocks_per_lun));
330        cvmx_dprintf("%*snumber_lun = %u\n", 2*debug_indent, "", param_page[index].number_lun);
331        cvmx_dprintf("%*saddress_cycles = 0x%x\n", 2*debug_indent, "", param_page[index].address_cycles);
332        cvmx_dprintf("%*sbits_per_cell = %u\n", 2*debug_indent, "", param_page[index].bits_per_cell);
333        cvmx_dprintf("%*sbad_block_per_lun = %u\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].bad_block_per_lun));
334        cvmx_dprintf("%*sblock_endurance = %u\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].block_endurance));
335        cvmx_dprintf("%*sgood_blocks = %u\n", 2*debug_indent, "", param_page[index].good_blocks);
336        cvmx_dprintf("%*sgood_block_endurance = %u\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].good_block_endurance));
337        cvmx_dprintf("%*sprograms_per_page = %u\n", 2*debug_indent, "", param_page[index].programs_per_page);
338        cvmx_dprintf("%*spartial_program_attrib = 0x%x\n", 2*debug_indent, "", param_page[index].partial_program_attrib);
339        cvmx_dprintf("%*sbits_ecc = %u\n", 2*debug_indent, "", param_page[index].bits_ecc);
340        cvmx_dprintf("%*sinterleaved_address_bits = 0x%x\n", 2*debug_indent, "", param_page[index].interleaved_address_bits);
341        cvmx_dprintf("%*sinterleaved_attrib = 0x%x\n", 2*debug_indent, "", param_page[index].interleaved_attrib);
342
343        cvmx_dprintf("%*spin_capacitance = %u\n", 2*debug_indent, "", param_page[index].pin_capacitance);
344        cvmx_dprintf("%*stiming_mode = 0x%x\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].timing_mode));
345        cvmx_dprintf("%*scache_timing_mode = 0x%x\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].cache_timing_mode));
346        cvmx_dprintf("%*st_prog = %d us\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].t_prog));
347        cvmx_dprintf("%*st_bers = %u us\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].t_bers));
348        cvmx_dprintf("%*st_r = %u us\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].t_r));
349        cvmx_dprintf("%*st_ccs = %u ns\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].t_ccs));
350        cvmx_dprintf("%*svendor_revision = 0x%x\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].vendor_revision));
351        //uint8_t vendor_specific[88];    /**< Byte 166-253: Vendor specific */
352        cvmx_dprintf("%*scrc = 0x%x\n", 2*debug_indent, "", param_page[index].crc);
353        debug_indent--;
354    }
355    return param_page + index;
356}
357
358void __set_onfi_timing_mode(int *tim_par, int clocks_us, int mode)
359{
360    const onfi_speed_mode_desc_t *mp = &onfi_speed_modes[mode];  /* use shorter name to fill in timing array */
361    int margin;
362    int pulse_adjust;
363
364    if (mode > 6)
365    {
366        cvmx_dprintf("%s: invalid ONFI timing mode: %d\n", __FUNCTION__, mode);
367        return;
368    }
369
370    /* Adjust the read/write pulse duty cycle to make it more even.  The cycle time
371    ** requirement is longer than the sum of the high low times, so we exend both the high
372    ** and low times to meet the cycle time requirement.
373    */
374    pulse_adjust = ((mp->twc - mp->twh - mp->twp)/2 + 1) * clocks_us;
375
376    /* Add a small margin to all timings. */
377    margin = 2 * clocks_us;
378    /* Update timing parameters based on supported mode */
379    tim_par[1] = CVMX_NAND_ROUNDUP(mp->twp * clocks_us + margin + pulse_adjust, 1000); /* Twp, WE# pulse width */
380    tim_par[2] = CVMX_NAND_ROUNDUP(max(mp->twh, mp->twc - mp->twp) * clocks_us + margin + pulse_adjust, 1000); /* Tw, WE# pulse width high */
381    tim_par[3] = CVMX_NAND_ROUNDUP(mp->tclh * clocks_us + margin, 1000); /* Tclh, CLE hold time */
382    tim_par[4] = CVMX_NAND_ROUNDUP(mp->tals * clocks_us + margin, 1000); /* Tals, ALE setup time */
383    tim_par[5] = tim_par[3]; /* Talh, ALE hold time */
384    tim_par[6] = tim_par[1]; /* Trp, RE# pulse width*/
385    tim_par[7] = tim_par[2]; /* Treh, RE# high hold time */
386
387}
388
389
390/* Internal helper function to set chip configuration to use default values */
391static void __set_chip_defaults(int chip, int clocks_us)
392{
393    if (!cvmx_nand_default.page_size)
394        return;
395    cvmx_nand_state[chip].page_size = cvmx_nand_default.page_size;  /* NAND page size in bytes */
396    cvmx_nand_state[chip].oob_size = cvmx_nand_default.oob_size;     /* NAND OOB (spare) size in bytes (per page) */
397    cvmx_nand_state[chip].pages_per_block = cvmx_nand_default.pages_per_block;
398    cvmx_nand_state[chip].blocks = cvmx_nand_default.blocks;
399    cvmx_nand_state[chip].onfi_timing = cvmx_nand_default.onfi_timing;
400    __set_onfi_timing_mode(cvmx_nand_state[chip].tim_par, clocks_us, cvmx_nand_state[chip].onfi_timing);
401    if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
402    {
403
404        cvmx_dprintf("%s: Using default NAND parameters.\n", __FUNCTION__);
405        cvmx_dprintf("%s: Defaults: page size: %d, OOB size: %d, pages per block %d, blocks: %d, timing mode: %d\n",
406                     __FUNCTION__, cvmx_nand_state[chip].page_size, cvmx_nand_state[chip].oob_size, cvmx_nand_state[chip].pages_per_block,
407                     cvmx_nand_state[chip].blocks, cvmx_nand_state[chip].onfi_timing);
408    }
409}
410/* Do the proper wait for the ready/busy signal.  First wait
411** for busy to be valid, then wait for busy to de-assert.
412*/
413static int __wait_for_busy_done(int chip)
414{
415    cvmx_nand_cmd_t cmd;
416
417    CVMX_NAND_LOG_CALLED();
418    CVMX_NAND_LOG_PARAM("%d", chip);
419
420    memset(&cmd,  0,  sizeof(cmd));
421    cmd.wait.two = 2;
422    cmd.wait.r_b=0;
423    cmd.wait.n = 2;
424
425    /* Wait for RB to be valied (tWB).
426    ** Use 5 * tWC as proxy.  In some modes this is
427    ** much longer than required, but does not affect performance
428    ** since we will wait much longer for busy to de-assert.
429    */
430    if (cvmx_nand_submit(cmd))
431        CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
432    if (cvmx_nand_submit(cmd))
433        CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
434    if (cvmx_nand_submit(cmd))
435        CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
436    if (cvmx_nand_submit(cmd))
437        CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
438    cmd.wait.r_b=1; /* Now wait for busy to be de-asserted */
439    if (cvmx_nand_submit(cmd))
440        CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
441
442    CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
443}
444/**
445 * Called to initialize the NAND controller for use. Note that
446 * you must be running out of L2 or memory and not NAND before
447 * calling this function.
448 * When probing for NAND chips, this function attempts to autoconfigure based on the NAND parts detected.
449 * It currently supports autodetection for ONFI parts (with valid parameter pages), and some Samsung NAND
450 * parts (decoding ID bits.)  If autoconfiguration fails, the defaults set with __set_chip_defaults()
451 * prior to calling cvmx_nand_initialize() are used.
452 * If defaults are set and the CVMX_NAND_INITIALIZE_FLAGS_DONT_PROBE flag is provided, the defaults are used
453 * for all chips in the active_chips mask.
454 *
455 * @param flags  Optional initialization flags
456 *               If the CVMX_NAND_INITIALIZE_FLAGS_DONT_PROBE flag is passed, chips are not probed,
457 *               and the default parameters (if set with cvmx_nand_set_defaults) are used for all chips
458 *               in the active_chips mask.
459 * @param active_chips
460 *               Each bit in this parameter represents a chip select that might
461 *               contain NAND flash. Any chip select present in this bitmask may
462 *               be connected to NAND. It is normally safe to pass 0xff here and
463 *               let the API probe all 8 chip selects.
464 *
465 * @return Zero on success, a negative cvmx_nand_status error code on failure
466 */
467cvmx_nand_status_t cvmx_nand_initialize(cvmx_nand_initialize_flags_t flags, int active_chips)
468{
469    int chip;
470    int start_chip;
471    int stop_chip;
472    uint64_t clocks_us;
473    union cvmx_ndf_misc ndf_misc;
474    uint8_t nand_id_buffer[16];
475
476    if (!octeon_has_feature(OCTEON_FEATURE_NAND))
477        CVMX_NAND_RETURN(CVMX_NAND_NO_DEVICE);
478
479    cvmx_nand_flags = flags;
480    CVMX_NAND_LOG_CALLED();
481    CVMX_NAND_LOG_PARAM("0x%x", flags);
482
483    memset(&cvmx_nand_state,  0,  sizeof(cvmx_nand_state));
484
485#ifndef USE_DATA_IN_TEXT
486    /* cvmx_nand_buffer is statically allocated in the TEXT_IN_DATA case */
487    if (!cvmx_nand_buffer)
488    {
489        cvmx_nand_buffer = cvmx_bootmem_alloc_named_flags(CVMX_NAND_MAX_PAGE_AND_OOB_SIZE, 128, "__nand_buffer", CVMX_BOOTMEM_FLAG_END_ALLOC);
490    }
491    if (!cvmx_nand_buffer) {
492        const cvmx_bootmem_named_block_desc_t *block_desc = cvmx_bootmem_find_named_block("__nand_buffer");
493        if (block_desc)
494            cvmx_nand_buffer = cvmx_phys_to_ptr(block_desc->base_addr);
495    }
496
497    if (!cvmx_nand_buffer)
498        CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
499#endif
500
501    /* Disable boot mode and reset the fifo */
502    ndf_misc.u64 = cvmx_read_csr(CVMX_NDF_MISC);
503    ndf_misc.s.rd_cmd = 0;
504    ndf_misc.s.bt_dma = 0;
505    ndf_misc.s.bt_dis = 1;
506    ndf_misc.s.ex_dis = 0;
507    ndf_misc.s.rst_ff = 1;
508    cvmx_write_csr(CVMX_NDF_MISC, ndf_misc.u64);
509    cvmx_read_csr(CVMX_NDF_MISC);
510
511    /* Bring the fifo out of reset */
512    cvmx_wait_usec(1);
513    ndf_misc.s.rst_ff = 0;
514    cvmx_write_csr(CVMX_NDF_MISC, ndf_misc.u64);
515    cvmx_read_csr(CVMX_NDF_MISC);
516    cvmx_wait_usec(1);
517
518    /* Clear the ECC counter */
519    //cvmx_write_csr(CVMX_NDF_ECC_CNT, cvmx_read_csr(CVMX_NDF_ECC_CNT));
520
521    /* Clear the interrupt state */
522    cvmx_write_csr(CVMX_NDF_INT, cvmx_read_csr(CVMX_NDF_INT));
523    cvmx_write_csr(CVMX_NDF_INT_EN, 0);
524    cvmx_write_csr(CVMX_MIO_NDF_DMA_INT, cvmx_read_csr(CVMX_MIO_NDF_DMA_INT));
525    cvmx_write_csr(CVMX_MIO_NDF_DMA_INT_EN, 0);
526
527
528    /* The simulator crashes if you access non existant devices. Assume
529        only chip select 1 is connected to NAND */
530    if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM)
531    {
532        start_chip = 1;
533        stop_chip = 2;
534    }
535    else
536    {
537        start_chip = 0;
538        stop_chip = 8;
539    }
540
541    /* Figure out how many clocks are in one microsecond, rounding up */
542    clocks_us = CVMX_NAND_ROUNDUP(cvmx_clock_get_rate(CVMX_CLOCK_SCLK), 1000000);
543
544    /* If the CVMX_NAND_INITIALIZE_FLAGS_DONT_PROBE flag is set, then
545    ** use the supplied default values to configured the chips in the
546    ** active_chips mask */
547    if (cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DONT_PROBE)
548    {
549        if (cvmx_nand_default.page_size)
550        {
551            for (chip=start_chip; chip<stop_chip; chip++)
552            {
553                /* Skip chip selects that the caller didn't supply in the active chip bits */
554                if (((1<<chip) & active_chips) == 0)
555                    continue;
556                __set_chip_defaults(chip, clocks_us);
557            }
558        }
559        CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
560    }
561
562    /* Probe and see what NAND flash we can find */
563    for (chip=start_chip; chip<stop_chip; chip++)
564    {
565        union cvmx_mio_boot_reg_cfgx mio_boot_reg_cfg;
566        cvmx_nand_onfi_param_page_t *onfi_param_page;
567        int probe_failed;
568        int width_16;
569
570        /* Skip chip selects that the caller didn't supply in the active chip bits */
571        if (((1<<chip) & active_chips) == 0)
572            continue;
573
574        mio_boot_reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(chip));
575        /* Enabled regions can't be connected to NAND flash */
576        if (mio_boot_reg_cfg.s.en)
577            continue;
578
579        /* Start out with some sane, but slow, defaults */
580        cvmx_nand_state[chip].page_size = 0;
581        cvmx_nand_state[chip].oob_size = 64;
582        cvmx_nand_state[chip].pages_per_block = 64;
583        cvmx_nand_state[chip].blocks = 100;
584
585
586        /* Set timing mode to ONFI mode 0 for initial accesses */
587        __set_onfi_timing_mode(cvmx_nand_state[chip].tim_par, clocks_us, 0);
588
589        /* Put the index of which timing parameter to use.  The indexes are into the tim_par
590        ** which match the indexes of the 8 timing parameters that the hardware supports.
591        ** Index 0 is not software controlled, and is fixed by hardware. */
592        cvmx_nand_state[chip].clen[0] = 0; /* Command doesn't need to be held before WE */
593        cvmx_nand_state[chip].clen[1] = 1; /* Twp, WE# pulse width */
594        cvmx_nand_state[chip].clen[2] = 3; /* Tclh, CLE hold time */
595        cvmx_nand_state[chip].clen[3] = 1;
596
597        cvmx_nand_state[chip].alen[0] = 4; /* Tals, ALE setup time */
598        cvmx_nand_state[chip].alen[1] = 1; /* Twp, WE# pulse width */
599        cvmx_nand_state[chip].alen[2] = 2; /* Twh, WE# pulse width high */
600        cvmx_nand_state[chip].alen[3] = 5; /* Talh, ALE hold time */
601
602        cvmx_nand_state[chip].rdn[0] = 0;
603        cvmx_nand_state[chip].rdn[1] = 6; /* Trp, RE# pulse width*/
604        cvmx_nand_state[chip].rdn[2] = 7; /* Treh, RE# high hold time */
605        cvmx_nand_state[chip].rdn[3] = 0;
606
607        cvmx_nand_state[chip].wrn[0] = 1; /* Twp, WE# pulse width */
608        cvmx_nand_state[chip].wrn[1] = 2; /* Twh, WE# pulse width high */
609
610        /* Probe and see if we get an answer.  Read more than required, as in
611        ** 16 bit mode only every other byte is valid.
612        ** Here we probe twice, once in 8 bit mode, and once in 16 bit mode to autodetect
613        ** the width.
614        */
615        probe_failed = 1;
616        for (width_16 = 0; width_16 <= 1 && probe_failed; width_16++)
617        {
618            probe_failed = 0;
619
620            if (width_16)
621                cvmx_nand_state[chip].flags |= CVMX_NAND_STATE_16BIT;
622            memset(cvmx_nand_buffer, 0xff, 16);
623            if (cvmx_nand_read_id(chip, 0x0, cvmx_ptr_to_phys(cvmx_nand_buffer), 16) < 16)
624            {
625                if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
626                    cvmx_dprintf("%s: Failed to probe chip %d\n", __FUNCTION__, chip);
627                probe_failed = 1;
628
629            }
630            if (*(uint32_t*)cvmx_nand_buffer == 0xffffffff || *(uint32_t*)cvmx_nand_buffer == 0x0)
631            {
632                if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
633                    cvmx_dprintf("%s: Probe returned nothing for chip %d\n", __FUNCTION__, chip);
634                probe_failed = 1;
635            }
636        }
637        /* Neither 8 or 16 bit mode worked, so go on to next chip select */
638        if (probe_failed)
639            continue;
640
641        /* Save copy of ID for later use */
642        memcpy(nand_id_buffer, cvmx_nand_buffer, sizeof(nand_id_buffer));
643
644        if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
645            cvmx_dprintf("%s: NAND chip %d has ID 0x%08llx\n", __FUNCTION__, chip, (unsigned long long int)*(uint64_t*)cvmx_nand_buffer);
646        /* Read more than required, as in 16 bit mode only every other byte is valid. */
647        if (cvmx_nand_read_id(chip, 0x20, cvmx_ptr_to_phys(cvmx_nand_buffer), 8) < 8)
648        {
649            if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
650                cvmx_dprintf("%s: Failed to probe chip %d\n", __FUNCTION__, chip);
651            continue;
652        }
653
654        if (((cvmx_nand_buffer[0] == 'O') && (cvmx_nand_buffer[1] == 'N') &&
655            (cvmx_nand_buffer[2] == 'F') && (cvmx_nand_buffer[3] == 'I')))
656        {
657            /* We have an ONFI part, so read the parameter page */
658
659            cvmx_nand_read_param_page(chip, cvmx_ptr_to_phys(cvmx_nand_buffer), 2048);
660            onfi_param_page = __cvmx_nand_onfi_process((cvmx_nand_onfi_param_page_t *)cvmx_nand_buffer);
661            if (onfi_param_page)
662            {
663                /* ONFI NAND parts are described by a parameter page.  Here we extract the configuration values
664                ** from the parameter page that we need to access the chip. */
665                cvmx_nand_state[chip].page_size = cvmx_le32_to_cpu(onfi_param_page->page_data_bytes);
666                cvmx_nand_state[chip].oob_size = cvmx_le16_to_cpu(onfi_param_page->page_spare_bytes);
667                cvmx_nand_state[chip].pages_per_block = cvmx_le32_to_cpu(onfi_param_page->pages_per_block);
668                cvmx_nand_state[chip].blocks = cvmx_le32_to_cpu(onfi_param_page->blocks_per_lun) * onfi_param_page->number_lun;
669
670                if (cvmx_le16_to_cpu(onfi_param_page->timing_mode) <= 0x3f)
671                {
672                    int mode_mask = cvmx_le16_to_cpu(onfi_param_page->timing_mode);
673                    int mode = 0;
674                    int i;
675                    for (i = 0; i < 6;i++)
676                    {
677                        if (mode_mask & (1 << i))
678                            mode = i;
679                    }
680                    cvmx_nand_state[chip].onfi_timing = mode;
681                }
682                else
683                {
684                    cvmx_dprintf("%s: Invalid timing mode (%d) in ONFI parameter page, ignoring\n", __FUNCTION__, cvmx_nand_state[chip].onfi_timing);
685                    cvmx_nand_state[chip].onfi_timing = 0;
686
687                }
688                if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
689                    cvmx_dprintf("%s: Using ONFI timing mode: %d\n", __FUNCTION__, cvmx_nand_state[chip].onfi_timing);
690                __set_onfi_timing_mode(cvmx_nand_state[chip].tim_par, clocks_us, cvmx_nand_state[chip].onfi_timing);
691                if (cvmx_nand_state[chip].page_size + cvmx_nand_state[chip].oob_size > CVMX_NAND_MAX_PAGE_AND_OOB_SIZE)
692                {
693                    cvmx_dprintf("%s: ERROR: Page size (%d) + OOB size (%d) is greater than max size (%d)\n",
694                                 __FUNCTION__, cvmx_nand_state[chip].page_size, cvmx_nand_state[chip].oob_size, CVMX_NAND_MAX_PAGE_AND_OOB_SIZE);
695                    return(CVMX_NAND_ERROR);
696                }
697                /* We have completed setup for this ONFI chip, so go on to next chip. */
698                continue;
699            }
700            else
701            {
702                /* Parameter page is not valid */
703                if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
704                    cvmx_dprintf("%s: ONFI paramater page missing or invalid.\n", __FUNCTION__);
705
706            }
707
708
709        }
710        else
711        {
712            /* We have a non-ONFI part. */
713            if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
714                cvmx_dprintf("%s: Chip %d doesn't support ONFI.\n", __FUNCTION__, chip);
715
716
717            if (nand_id_buffer[0] == 0xEC)
718            {
719                /* We have a Samsung part, so decode part info from ID bytes */
720                uint64_t nand_size_bits = (64*1024*1024ULL) << ((nand_id_buffer[4] & 0x70) >> 4); /* Plane size */
721                cvmx_nand_state[chip].page_size = 1024 << (nand_id_buffer[3] & 0x3);  /* NAND page size in bytes */
722		/* NAND OOB (spare) size in bytes (per page) */
723		cvmx_nand_state[chip].oob_size = (cvmx_nand_state[chip].page_size / 512) * ((nand_id_buffer[3] & 4) ? 16 : 8);
724                cvmx_nand_state[chip].pages_per_block = (0x10000 << ((nand_id_buffer[3] & 0x30) >> 4))/cvmx_nand_state[chip].page_size;
725
726                nand_size_bits *= 1 << ((nand_id_buffer[4] & 0xc) >> 2);
727
728                cvmx_nand_state[chip].oob_size = cvmx_nand_state[chip].page_size/64;
729                if (nand_id_buffer[3] & 0x4)
730                    cvmx_nand_state[chip].oob_size *= 2;
731
732                cvmx_nand_state[chip].blocks = nand_size_bits/(8ULL*cvmx_nand_state[chip].page_size*cvmx_nand_state[chip].pages_per_block);
733                switch (nand_id_buffer[1]) {
734                case 0xD3:      /* K9F8G08U0M */
735                case 0xDC:      /* K9F4G08U0B */
736                    cvmx_nand_state[chip].onfi_timing = 6;
737                    break;
738                default:
739                    cvmx_nand_state[chip].onfi_timing = 2;
740                    break;
741                }
742
743                if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
744                {
745                    cvmx_dprintf("%s: Samsung NAND chip detected, using parameters decoded from ID bytes.\n", __FUNCTION__);
746                    cvmx_dprintf("%s: Defaults: page size: %d, OOB size: %d, pages per block %d, part size: %d MBytes, timing mode: %d\n",
747                                 __FUNCTION__, cvmx_nand_state[chip].page_size, cvmx_nand_state[chip].oob_size, cvmx_nand_state[chip].pages_per_block,
748                                 (int)(nand_size_bits/(8*1024*1024)), cvmx_nand_state[chip].onfi_timing);
749                }
750
751                __set_onfi_timing_mode(cvmx_nand_state[chip].tim_par, clocks_us, cvmx_nand_state[chip].onfi_timing);
752                if (cvmx_nand_state[chip].page_size + cvmx_nand_state[chip].oob_size > CVMX_NAND_MAX_PAGE_AND_OOB_SIZE)
753                {
754                    cvmx_dprintf("%s: ERROR: Page size (%d) + OOB size (%d) is greater than max size (%d)\n",
755                                 __FUNCTION__, cvmx_nand_state[chip].page_size, cvmx_nand_state[chip].oob_size, CVMX_NAND_MAX_PAGE_AND_OOB_SIZE);
756                    return(CVMX_NAND_ERROR);
757                }
758
759                /* We have completed setup for this Samsung chip, so go on to next chip. */
760                continue;
761
762
763            }
764
765        }
766
767
768
769        /*  We were not able to automatically identify the NAND chip parameters.  If default values were configured,
770        ** use them. */
771        if (cvmx_nand_default.page_size)
772        {
773            __set_chip_defaults(chip, clocks_us);
774        }
775        else
776        {
777
778            if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
779                cvmx_dprintf("%s: Unable to determine NAND parameters, and no defaults supplied.\n", __FUNCTION__);
780        }
781    }
782    CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
783}
784#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
785EXPORT_SYMBOL(cvmx_nand_initialize);
786#endif
787
788
789/**
790 * Call to shutdown the NAND controller after all transactions
791 * are done. In most setups this will never be called.
792 *
793 * @return Zero on success, a negative cvmx_nand_status_t error code on failure
794 */
795cvmx_nand_status_t cvmx_nand_shutdown(void)
796{
797    CVMX_NAND_LOG_CALLED();
798    memset(&cvmx_nand_state,  0,  sizeof(cvmx_nand_state));
799    CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
800}
801
802
803/**
804 * Returns a bitmask representing the chip selects that are
805 * connected to NAND chips. This can be called after the
806 * initialize to determine the actual number of NAND chips
807 * found. Each bit in the response coresponds to a chip select.
808 *
809 * @return Zero if no NAND chips were found. Otherwise a bit is set for
810 *         each chip select (1<<chip).
811 */
812int cvmx_nand_get_active_chips(void)
813{
814    int chip;
815    int result = 0;
816    for (chip=0; chip<8; chip++)
817    {
818        if (cvmx_nand_state[chip].page_size)
819            result |= 1<<chip;
820    }
821    return result;
822}
823#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
824EXPORT_SYMBOL(cvmx_nand_get_active_chips);
825#endif
826
827
828/**
829 * Override the timing parameters for a NAND chip
830 *
831 * @param chip     Chip select to override
832 * @param tim_mult
833 * @param tim_par
834 * @param clen
835 * @param alen
836 * @param rdn
837 * @param wrn
838 *
839 * @return Zero on success, a negative cvmx_nand_status_t error code on failure
840 */
841cvmx_nand_status_t cvmx_nand_set_timing(int chip, int tim_mult, int tim_par[8], int clen[4], int alen[4], int rdn[4], int wrn[2])
842{
843    int i;
844    CVMX_NAND_LOG_CALLED();
845
846    if ((chip < 0) || (chip > 7))
847        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
848    if (!cvmx_nand_state[chip].page_size)
849        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
850
851    cvmx_nand_state[chip].tim_mult = tim_mult;
852    for (i=0;i<8;i++)
853        cvmx_nand_state[chip].tim_par[i] = tim_par[i];
854    for (i=0;i<4;i++)
855        cvmx_nand_state[chip].clen[i] = clen[i];
856    for (i=0;i<4;i++)
857        cvmx_nand_state[chip].alen[i] = alen[i];
858    for (i=0;i<4;i++)
859        cvmx_nand_state[chip].rdn[i] = rdn[i];
860    for (i=0;i<2;i++)
861        cvmx_nand_state[chip].wrn[i] = wrn[i];
862
863    CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
864}
865
866
867/**
868 * @INTERNAL
869 * Get the number of free bytes in the NAND command queue
870 *
871 * @return Number of bytes in queue
872 */
873static inline int __cvmx_nand_get_free_cmd_bytes(void)
874{
875    union cvmx_ndf_misc ndf_misc;
876    CVMX_NAND_LOG_CALLED();
877    ndf_misc.u64 = cvmx_read_csr(CVMX_NDF_MISC);
878    CVMX_NAND_RETURN((int)ndf_misc.s.fr_byt);
879}
880
881
882/**
883 * Submit a command to the NAND command queue. Generally this
884 * will not be used directly. Instead most programs will use the other
885 * higher level NAND functions.
886 *
887 * @param cmd    Command to submit
888 *
889 * @return Zero on success, a negative cvmx_nand_status_t error code on failure
890 */
891cvmx_nand_status_t cvmx_nand_submit(cvmx_nand_cmd_t cmd)
892{
893    CVMX_NAND_LOG_CALLED();
894    CVMX_NAND_LOG_PARAM("0x%llx", (ULL)cmd.u64[0]);
895    CVMX_NAND_LOG_PARAM("0x%llx", (ULL)cmd.u64[1]);
896    CVMX_NAND_LOG_PARAM("%s", cvmx_nand_opcode_labels[cmd.s.op_code]);
897    switch (cmd.s.op_code)
898    {
899        /* All these commands fit in one 64bit word */
900        case 0: /* NOP */
901        case 1: /* Timing */
902        case 2: /* WAIT */
903        case 3: /* Chip Enable/Disable */
904        case 4: /* CLE */
905        case 8: /* Write */
906        case 9: /* Read */
907        case 10: /* Read EDO */
908        case 15: /* Bus Aquire/Release */
909            if (__cvmx_nand_get_free_cmd_bytes() < 8)
910                CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
911            cvmx_write_csr(CVMX_NDF_CMD, cmd.u64[1]);
912            CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
913
914        case 5: /* ALE commands take either one or two 64bit words */
915            if (cmd.ale.adr_byte_num < 5)
916            {
917                if (__cvmx_nand_get_free_cmd_bytes() < 8)
918                    CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
919                cvmx_write_csr(CVMX_NDF_CMD, cmd.u64[1]);
920                CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
921            }
922            else
923            {
924                if (__cvmx_nand_get_free_cmd_bytes() < 16)
925                    CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
926                cvmx_write_csr(CVMX_NDF_CMD, cmd.u64[1]);
927                cvmx_write_csr(CVMX_NDF_CMD, cmd.u64[0]);
928                CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
929            }
930
931        case 11: /* Wait status commands take two 64bit words */
932            if (__cvmx_nand_get_free_cmd_bytes() < 16)
933                CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
934            cvmx_write_csr(CVMX_NDF_CMD, cmd.u64[1]);
935            cvmx_write_csr(CVMX_NDF_CMD, cmd.u64[0]);
936            CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
937
938        default:
939            CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
940    }
941}
942
943
944/**
945 * @INTERNAL
946 * Get the number of bits required to encode the column bits. This
947 * does not include padding to align on a byte boundary.
948 *
949 * @param chip   NAND chip to get data for
950 *
951 * @return Number of column bits
952 */
953static inline int __cvmx_nand_get_column_bits(int chip)
954{
955    return cvmx_pop(cvmx_nand_state[chip].page_size - 1);
956}
957
958
959/**
960 * @INTERNAL
961 * Get the number of bits required to encode the row bits. This
962 * does not include padding to align on a byte boundary.
963 *
964 * @param chip   NAND chip to get data for
965 *
966 * @return Number of row bits
967 */
968static inline int __cvmx_nand_get_row_bits(int chip)
969{
970    return cvmx_pop(cvmx_nand_state[chip].blocks-1) + cvmx_pop(cvmx_nand_state[chip].pages_per_block-1);
971}
972
973
974/**
975 * @INTERNAL
976 * Get the number of address cycles required for this NAND part.
977 * This include column bits, padding, page bits, and block bits.
978 *
979 * @param chip   NAND chip to get data for
980 *
981 * @return Number of address cycles on the bus
982 */
983static inline int __cvmx_nand_get_address_cycles(int chip)
984{
985    int address_bits = ((__cvmx_nand_get_column_bits(chip) + 7) >> 3) << 3;
986    address_bits += ((__cvmx_nand_get_row_bits(chip) + 7) >> 3) << 3;
987    return (address_bits + 7) >> 3;
988}
989
990
991/**
992 * @INTERNAL
993 * Build the set of command common to most transactions
994 * @param chip      NAND chip to program
995 * @param cmd_data  NAND command for CLE cycle 1
996 * @param num_address_cycles
997 *                  Number of address cycles to put on the bus
998 * @param nand_address
999 *                  Data to be put on the bus. It is translated according to
1000 *                  the rules in the file information section.
1001 *
1002 * @param cmd_data2 If non zero, adds a second CLE cycle used by a number of NAND
1003 *                  transactions.
1004 *
1005 * @return Zero on success, a negative cvmx_nand_status_t error code on failure
1006 */
1007static inline cvmx_nand_status_t __cvmx_nand_build_pre_cmd(int chip, int cmd_data, int num_address_cycles, uint64_t nand_address, int cmd_data2)
1008{
1009    cvmx_nand_status_t result;
1010    cvmx_nand_cmd_t cmd;
1011
1012    CVMX_NAND_LOG_CALLED();
1013
1014    /* Send timing parameters */
1015    memset(&cmd,  0,  sizeof(cmd));
1016    cmd.set_tm_par.one = 1;
1017    cmd.set_tm_par.tim_mult = cvmx_nand_state[chip].tim_mult;
1018    /* tim_par[0] unused */
1019    cmd.set_tm_par.tim_par1 = cvmx_nand_state[chip].tim_par[1];
1020    cmd.set_tm_par.tim_par2 = cvmx_nand_state[chip].tim_par[2];
1021    cmd.set_tm_par.tim_par3 = cvmx_nand_state[chip].tim_par[3];
1022    cmd.set_tm_par.tim_par4 = cvmx_nand_state[chip].tim_par[4];
1023    cmd.set_tm_par.tim_par5 = cvmx_nand_state[chip].tim_par[5];
1024    cmd.set_tm_par.tim_par6 = cvmx_nand_state[chip].tim_par[6];
1025    cmd.set_tm_par.tim_par7 = cvmx_nand_state[chip].tim_par[7];
1026    result = cvmx_nand_submit(cmd);
1027    if (result)
1028        CVMX_NAND_RETURN(result);
1029
1030    /* Send bus select */
1031    memset(&cmd,  0,  sizeof(cmd));
1032    cmd.bus_acq.fifteen = 15;
1033    cmd.bus_acq.one = 1;
1034    result = cvmx_nand_submit(cmd);
1035    if (result)
1036        CVMX_NAND_RETURN(result);
1037
1038    /* Send chip select */
1039    memset(&cmd,  0,  sizeof(cmd));
1040    cmd.chip_en.chip = chip;
1041    cmd.chip_en.one = 1;
1042    cmd.chip_en.three = 3;
1043    cmd.chip_en.width = (cvmx_nand_state[chip].flags & CVMX_NAND_STATE_16BIT) ? 2 : 1;
1044    result = cvmx_nand_submit(cmd);
1045    if (result)
1046        CVMX_NAND_RETURN(result);
1047
1048    /* Send wait, fixed time
1049    ** This meets chip enable to command latch enable timing.
1050    ** This is tCS - tCLS from the ONFI spec.
1051    ** Use tWP as a proxy, as this is adequate for
1052    ** all ONFI 1.0 timing modes. */
1053    memset(&cmd,  0,  sizeof(cmd));
1054    cmd.wait.two = 2;
1055    cmd.wait.n = 1;
1056    if (cvmx_nand_submit(cmd))
1057        CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1058
1059    /* Send CLE */
1060    memset(&cmd,  0,  sizeof(cmd));
1061    cmd.cle.cmd_data = cmd_data;
1062    cmd.cle.clen1 = cvmx_nand_state[chip].clen[0];
1063    cmd.cle.clen2 = cvmx_nand_state[chip].clen[1];
1064    cmd.cle.clen3 = cvmx_nand_state[chip].clen[2];
1065    cmd.cle.four = 4;
1066    result = cvmx_nand_submit(cmd);
1067    if (result)
1068        CVMX_NAND_RETURN(result);
1069
1070    /* Send ALE */
1071    if (num_address_cycles)
1072    {
1073        memset(&cmd,  0,  sizeof(cmd));
1074        cmd.ale.adr_byte_num = num_address_cycles;
1075        if (num_address_cycles < __cvmx_nand_get_address_cycles(chip))
1076        {
1077            cmd.ale.adr_bytes_l = nand_address;
1078            cmd.ale.adr_bytes_h = nand_address >> 32;
1079        }
1080        else
1081        {
1082            int column_bits = __cvmx_nand_get_column_bits(chip);
1083            int column_shift = ((column_bits + 7) >> 3) << 3;
1084            int column = nand_address & (cvmx_nand_state[chip].page_size-1);
1085            int row = nand_address >> column_bits;
1086            cmd.ale.adr_bytes_l = column + (row << column_shift);
1087            cmd.ale.adr_bytes_h = row >> (32 - column_shift);
1088        }
1089        cmd.ale.alen1 = cvmx_nand_state[chip].alen[0];
1090        cmd.ale.alen2 = cvmx_nand_state[chip].alen[1];
1091        cmd.ale.alen3 = cvmx_nand_state[chip].alen[2];
1092        cmd.ale.alen4 = cvmx_nand_state[chip].alen[3];
1093        cmd.ale.five = 5;
1094        result = cvmx_nand_submit(cmd);
1095        if (result)
1096            CVMX_NAND_RETURN(result);
1097    }
1098
1099    /* Send CLE 2 */
1100    if (cmd_data2)
1101    {
1102        memset(&cmd,  0,  sizeof(cmd));
1103        cmd.cle.cmd_data = cmd_data2;
1104        cmd.cle.clen1 = cvmx_nand_state[chip].clen[0];
1105        cmd.cle.clen2 = cvmx_nand_state[chip].clen[1];
1106        cmd.cle.clen3 = cvmx_nand_state[chip].clen[2];
1107        cmd.cle.four = 4;
1108        result = cvmx_nand_submit(cmd);
1109        if (result)
1110            CVMX_NAND_RETURN(result);
1111    }
1112
1113    CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
1114}
1115
1116
1117/**
1118 * @INTERNAL
1119 * Build the set of command common to most transactions
1120 * @return Zero on success, a negative cvmx_nand_status_t error code on failure
1121 */
1122static inline cvmx_nand_status_t __cvmx_nand_build_post_cmd(void)
1123{
1124    cvmx_nand_status_t result;
1125    cvmx_nand_cmd_t cmd;
1126
1127    CVMX_NAND_LOG_CALLED();
1128
1129    /* Send chip deselect */
1130    memset(&cmd,  0,  sizeof(cmd));
1131    cmd.chip_dis.three = 3;
1132    result = cvmx_nand_submit(cmd);
1133    if (result)
1134        CVMX_NAND_RETURN(result);
1135
1136    /* Send bus release */
1137    memset(&cmd,  0,  sizeof(cmd));
1138    cmd.bus_rel.fifteen = 15;
1139    result = cvmx_nand_submit(cmd);
1140    if (result)
1141        CVMX_NAND_RETURN(result);
1142
1143    /* Ring the doorbell */
1144    cvmx_write_csr(CVMX_NDF_DRBELL, 1);
1145    CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
1146}
1147
1148
1149/**
1150 * @INTERNAL
1151 * Setup the NAND DMA engine for a transfer
1152 *
1153 * @param chip     Chip select for NAND flash
1154 * @param is_write Non zero if this is a write
1155 * @param buffer_address
1156 *                 Physical memory address to DMA to/from
1157 * @param buffer_length
1158 *                 Length of the DMA in bytes
1159 */
1160static inline void __cvmx_nand_setup_dma(int chip, int is_write, uint64_t buffer_address, int buffer_length)
1161{
1162    union cvmx_mio_ndf_dma_cfg ndf_dma_cfg;
1163    CVMX_NAND_LOG_CALLED();
1164    CVMX_NAND_LOG_PARAM("%d", chip);
1165    CVMX_NAND_LOG_PARAM("%d", is_write);
1166    CVMX_NAND_LOG_PARAM("0x%llx", (ULL)buffer_address);
1167    CVMX_NAND_LOG_PARAM("%d", buffer_length);
1168    ndf_dma_cfg.u64 = 0;
1169    ndf_dma_cfg.s.en = 1;
1170    ndf_dma_cfg.s.rw = is_write; /* One means DMA reads from memory and writes to flash */
1171    ndf_dma_cfg.s.clr = 0;
1172    ndf_dma_cfg.s.size = ((buffer_length + 7) >> 3) - 1;
1173    ndf_dma_cfg.s.adr = buffer_address;
1174    CVMX_SYNCWS;
1175    cvmx_write_csr(CVMX_MIO_NDF_DMA_CFG, ndf_dma_cfg.u64);
1176    CVMX_NAND_RETURN_NOTHING();
1177}
1178
1179
1180/**
1181 * Dump a buffer out in hex for debug
1182 *
1183 * @param buffer_address
1184 *               Starting physical address
1185 * @param buffer_length
1186 *               Number of bytes to display
1187 */
1188static void __cvmx_nand_hex_dump(uint64_t buffer_address, int buffer_length)
1189{
1190    uint8_t *buffer = cvmx_phys_to_ptr(buffer_address);
1191    int offset = 0;
1192    while (offset < buffer_length)
1193    {
1194        int i;
1195        cvmx_dprintf("%*s%04x:",  2*debug_indent, "", offset);
1196        for (i=0; i<32; i++)
1197        {
1198            if ((i&3) == 0)
1199                cvmx_dprintf(" ");
1200            if (offset+i < buffer_length)
1201                cvmx_dprintf("%02x", 0xff & buffer[offset+i]);
1202            else
1203                cvmx_dprintf("  ");
1204        }
1205        cvmx_dprintf("\n");
1206        offset += 32;
1207    }
1208}
1209
1210/**
1211 * @INTERNAL
1212 * Perform a low level NAND read command
1213 *
1214 * @param chip   Chip to read from
1215 * @param nand_command1
1216 *               First command cycle value
1217 * @param address_cycles
1218 *               Number of address cycles after comand 1
1219 * @param nand_address
1220 *               NAND address to use for address cycles
1221 * @param nand_command2
1222 *               NAND command cycle 2 if not zero
1223 * @param buffer_address
1224 *               Physical address to DMA into
1225 * @param buffer_length
1226 *               Length of the transfer in bytes
1227 *
1228 * @return Number of bytes transfered or a negative error code
1229 */
1230static inline int __cvmx_nand_low_level_read(int chip, int nand_command1, int address_cycles, uint64_t nand_address, int nand_command2, uint64_t buffer_address, int buffer_length)
1231{
1232    cvmx_nand_cmd_t cmd;
1233    union cvmx_mio_ndf_dma_cfg ndf_dma_cfg;
1234    int bytes;
1235
1236    CVMX_NAND_LOG_CALLED();
1237    CVMX_NAND_LOG_PARAM("%d", chip);
1238    CVMX_NAND_LOG_PARAM("0x%x", nand_command1);
1239    CVMX_NAND_LOG_PARAM("%d", address_cycles);
1240    CVMX_NAND_LOG_PARAM("0x%llx", (ULL)nand_address);
1241    CVMX_NAND_LOG_PARAM("0x%x", nand_command2);
1242    CVMX_NAND_LOG_PARAM("0x%llx", (ULL)buffer_address);
1243    CVMX_NAND_LOG_PARAM("%d", buffer_length);
1244
1245    if ((chip < 0) || (chip > 7))
1246        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1247    if (!buffer_address)
1248        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1249    if (buffer_address & 7)
1250        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1251    if (buffer_length & 7)
1252        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1253    if (!buffer_length)
1254        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1255
1256    /* Build the command and address cycles */
1257    if (__cvmx_nand_build_pre_cmd(chip, nand_command1, address_cycles, nand_address, nand_command2))
1258        CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1259
1260    /* Send WAIT.  This waits for some time, then
1261    ** waits for busy to be de-asserted. */
1262    if (__wait_for_busy_done(chip))
1263        CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1264
1265    /* Wait for tRR after busy de-asserts.
1266    ** Use 2* tALS as proxy.  This is overkill in
1267    ** the slow modes, but not bad in the faster ones. */
1268    memset(&cmd,  0,  sizeof(cmd));
1269    cmd.wait.two = 2;
1270    cmd.wait.n=4;
1271    if (cvmx_nand_submit(cmd))
1272        CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1273    if (cvmx_nand_submit(cmd))
1274        CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1275
1276    /* Send READ */
1277    memset(&cmd,  0,  sizeof(cmd));
1278    cmd.rd.data_bytes = buffer_length;
1279    if (cvmx_nand_state[chip].onfi_timing >= 4)
1280        cmd.rd.nine = 10;  /* READ_EDO command is required for ONFI timing modes 4 and 5 */
1281    else
1282        cmd.rd.nine = 9;
1283    cmd.rd.rdn1 = cvmx_nand_state[chip].rdn[0];
1284    cmd.rd.rdn2 = cvmx_nand_state[chip].rdn[1];
1285    cmd.rd.rdn3 = cvmx_nand_state[chip].rdn[2];
1286    cmd.rd.rdn4 = cvmx_nand_state[chip].rdn[3];
1287    if (cvmx_nand_submit(cmd))
1288        CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1289
1290    __cvmx_nand_setup_dma(chip, 0, buffer_address, buffer_length);
1291
1292    if (__cvmx_nand_build_post_cmd())
1293        CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1294    WATCHDOG_RESET();
1295    /* Wait for the DMA to complete */
1296    if (CVMX_WAIT_FOR_FIELD64(CVMX_MIO_NDF_DMA_CFG, cvmx_mio_ndf_dma_cfg_t, en, ==, 0, NAND_TIMEOUT_USECS_READ))
1297    {
1298        WATCHDOG_RESET();
1299        CVMX_NAND_RETURN(CVMX_NAND_TIMEOUT);
1300    }
1301    /* Return the number of bytes transfered */
1302    ndf_dma_cfg.u64 = cvmx_read_csr(CVMX_MIO_NDF_DMA_CFG);
1303    bytes = ndf_dma_cfg.s.adr - buffer_address;
1304
1305    if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
1306        __cvmx_nand_hex_dump(buffer_address, bytes);
1307
1308    CVMX_NAND_RETURN(bytes);
1309}
1310
1311
1312/**
1313 * Read a page from NAND. If the buffer has room, the out of band
1314 * data will be included.
1315 *
1316 * @param chip   Chip select for NAND flash
1317 * @param nand_address
1318 *               Location in NAND to read. See description in file comment
1319 * @param buffer_address
1320 *               Physical address to store the result at
1321 * @param buffer_length
1322 *               Number of bytes to read
1323 *
1324 * @return Bytes read on success, a negative cvmx_nand_status_t error code on failure
1325 */
1326int cvmx_nand_page_read(int chip, uint64_t nand_address, uint64_t buffer_address, int buffer_length)
1327{
1328    int bytes;
1329
1330    CVMX_NAND_LOG_CALLED();
1331    CVMX_NAND_LOG_PARAM("%d", chip);
1332    CVMX_NAND_LOG_PARAM("0x%llx", (ULL)nand_address);
1333    CVMX_NAND_LOG_PARAM("0x%llx", (ULL)buffer_address);
1334    CVMX_NAND_LOG_PARAM("%d", buffer_length);
1335
1336    if ((chip < 0) || (chip > 7))
1337        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1338    if (!cvmx_nand_state[chip].page_size)
1339        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1340    if (!buffer_address)
1341        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1342    if (buffer_address & 7)
1343        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1344    if (buffer_length & 7)
1345        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1346    if (!buffer_length)
1347        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1348
1349    /* For 16 bit mode, addresses within a page are word address, rather than byte addresses */
1350    if (cvmx_nand_state[chip].flags & CVMX_NAND_STATE_16BIT)
1351            nand_address = (nand_address & ~(cvmx_nand_state[chip].page_size - 1)) |  ((nand_address & (cvmx_nand_state[chip].page_size - 1)) >> 1);
1352
1353    bytes = __cvmx_nand_low_level_read(chip, NAND_COMMAND_READ, __cvmx_nand_get_address_cycles(chip), nand_address, NAND_COMMAND_READ_FIN, buffer_address, buffer_length);
1354    CVMX_NAND_RETURN(bytes);
1355}
1356#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
1357EXPORT_SYMBOL(cvmx_nand_page_read);
1358#endif
1359
1360
1361/**
1362 * Write a page to NAND. The buffer must contain the entire page
1363 * including the out of band data.
1364 *
1365 * @param chip   Chip select for NAND flash
1366 * @param nand_address
1367 *               Location in NAND to write. See description in file comment
1368 * @param buffer_address
1369 *               Physical address to read the data from
1370 *
1371 * @return Zero on success, a negative cvmx_nand_status_t error code on failure
1372 */
1373cvmx_nand_status_t cvmx_nand_page_write(int chip, uint64_t nand_address, uint64_t buffer_address)
1374{
1375    cvmx_nand_cmd_t cmd;
1376    int buffer_length;
1377
1378    CVMX_NAND_LOG_CALLED();
1379    CVMX_NAND_LOG_PARAM("%d", chip);
1380    CVMX_NAND_LOG_PARAM("0x%llx", (ULL)nand_address);
1381    CVMX_NAND_LOG_PARAM("0x%llx", (ULL)buffer_address);
1382
1383    if ((chip < 0) || (chip > 7))
1384        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1385    if (!cvmx_nand_state[chip].page_size)
1386        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1387    if (!buffer_address)
1388        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1389    if (buffer_address & 7)
1390        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1391
1392    /* For 16 bit mode, addresses within a page are word address, rather than byte addresses */
1393    if (cvmx_nand_state[chip].flags & CVMX_NAND_STATE_16BIT)
1394            nand_address = (nand_address & ~(cvmx_nand_state[chip].page_size - 1)) |  ((nand_address & (cvmx_nand_state[chip].page_size - 1)) >> 1);
1395
1396    buffer_length = cvmx_nand_state[chip].page_size + cvmx_nand_state[chip].oob_size;
1397
1398    /* The NAND DMA engine always does transfers in 8 byte blocks, so round the buffer size down
1399    ** to a multiple of 8, otherwise we will transfer too much data to the NAND chip.
1400    ** Note this prevents the last few bytes of the OOB being written.  If these bytes
1401    ** need to be written, then this check needs to be removed, but this will result in
1402    ** extra write cycles beyond the end of the OOB. */
1403    buffer_length &= ~0x7;
1404
1405    /* Build the command and address cycles */
1406    if (__cvmx_nand_build_pre_cmd(chip, NAND_COMMAND_PROGRAM, __cvmx_nand_get_address_cycles(chip), nand_address, 0))
1407        CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1408
1409    /* Send WRITE */
1410    memset(&cmd,  0,  sizeof(cmd));
1411    cmd.wr.data_bytes = buffer_length;
1412    cmd.wr.eight = 8;
1413    cmd.wr.wrn1 = cvmx_nand_state[chip].wrn[0];
1414    cmd.wr.wrn2 = cvmx_nand_state[chip].wrn[1];
1415    if (cvmx_nand_submit(cmd))
1416        CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1417
1418    /* Send WRITE command */
1419    memset(&cmd,  0,  sizeof(cmd));
1420    cmd.cle.cmd_data = NAND_COMMAND_PROGRAM_FIN;
1421    cmd.cle.clen1 = cvmx_nand_state[chip].clen[0];
1422    cmd.cle.clen2 = cvmx_nand_state[chip].clen[1];
1423    cmd.cle.clen3 = cvmx_nand_state[chip].clen[2];
1424    cmd.cle.four = 4;
1425    if (cvmx_nand_submit(cmd))
1426        CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1427
1428    __cvmx_nand_setup_dma(chip, 1, buffer_address, buffer_length);
1429
1430    /* WAIT for R_B to signal program is complete  */
1431    if (__wait_for_busy_done(chip))
1432        CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1433
1434    if (__cvmx_nand_build_post_cmd())
1435        CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1436
1437    /* Wait for the DMA to complete */
1438    WATCHDOG_RESET();
1439    if (CVMX_WAIT_FOR_FIELD64(CVMX_MIO_NDF_DMA_CFG, cvmx_mio_ndf_dma_cfg_t, en, ==, 0, NAND_TIMEOUT_USECS_WRITE))
1440    {
1441        WATCHDOG_RESET();
1442        CVMX_NAND_RETURN(CVMX_NAND_TIMEOUT);
1443    }
1444    CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
1445}
1446#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
1447EXPORT_SYMBOL(cvmx_nand_page_write);
1448#endif
1449
1450
1451/**
1452 * Erase a NAND block. A single block contains multiple pages.
1453 *
1454 * @param chip   Chip select for NAND flash
1455 * @param nand_address
1456 *               Location in NAND to erase. See description in file comment
1457 *
1458 * @return Zero on success, a negative cvmx_nand_status_t error code on failure
1459 */
1460cvmx_nand_status_t cvmx_nand_block_erase(int chip, uint64_t nand_address)
1461{
1462    CVMX_NAND_LOG_CALLED();
1463    CVMX_NAND_LOG_PARAM("%d", chip);
1464    CVMX_NAND_LOG_PARAM("0x%llx", (ULL)nand_address);
1465
1466    if ((chip < 0) || (chip > 7))
1467        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1468    if (!cvmx_nand_state[chip].page_size)
1469        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1470
1471    /* Build the command and address cycles */
1472    if (__cvmx_nand_build_pre_cmd(chip, NAND_COMMAND_ERASE,
1473                                  (__cvmx_nand_get_row_bits(chip)+7) >> 3,
1474                                  nand_address >> __cvmx_nand_get_column_bits(chip),
1475                                  NAND_COMMAND_ERASE_FIN))
1476        CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1477
1478    /* WAIT for R_B to signal erase is complete  */
1479    if (__wait_for_busy_done(chip))
1480        CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1481
1482    if (__cvmx_nand_build_post_cmd())
1483        CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1484
1485    /* Wait for the command queue to be idle, which means the wait is done */
1486    WATCHDOG_RESET();
1487    if (CVMX_WAIT_FOR_FIELD64(CVMX_NDF_ST_REG, cvmx_ndf_st_reg_t, exe_idle, ==, 1, NAND_TIMEOUT_USECS_BLOCK_ERASE))
1488    {
1489        WATCHDOG_RESET();
1490        CVMX_NAND_RETURN(CVMX_NAND_TIMEOUT);
1491    }
1492
1493    CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
1494}
1495#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
1496EXPORT_SYMBOL(cvmx_nand_block_erase);
1497#endif
1498
1499
1500/* Some reads (read ID, read parameter page) only use the low 8 bits of the bus
1501** in 16 bit mode.  We remove the unused bytes so that the data we present to the
1502** caller is as expected (same as 8 bit mode.)
1503*/
1504static void __cvmx_nand_fixup_16bit_id_reads(uint8_t *buf, int buffer_length)
1505{
1506    /* Decimate data, taking only every other byte. */
1507    int i;
1508    for (i = 0; i < buffer_length/2; i++)
1509        buf[i] = buf[2*i + 1];
1510}
1511
1512/**
1513 * Read the NAND ID information
1514 *
1515 * @param chip   Chip select for NAND flash
1516 * @param nand_address
1517 *               NAND address to read ID from. Usually this is either 0x0 or 0x20.
1518 * @param buffer_address
1519 *               Physical address to store data in
1520 * @param buffer_length
1521 *               Length of the buffer. Usually this is 4-8 bytes.  For 16 bit mode, this must be twice
1522 *               as large as the actual expected data.
1523 *
1524 * @return Bytes read on success, a negative cvmx_nand_status_t error code on failure
1525 */
1526int cvmx_nand_read_id(int chip, uint64_t nand_address, uint64_t buffer_address, int buffer_length)
1527{
1528    int bytes;
1529
1530    CVMX_NAND_LOG_CALLED();
1531    CVMX_NAND_LOG_PARAM("%d", chip);
1532    CVMX_NAND_LOG_PARAM("0x%llx", (ULL)nand_address);
1533    CVMX_NAND_LOG_PARAM("0x%llx", (ULL)buffer_address);
1534    CVMX_NAND_LOG_PARAM("%d", buffer_length);
1535
1536    if ((chip < 0) || (chip > 7))
1537        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1538    if (!buffer_address)
1539        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1540    if (buffer_address & 7)
1541        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1542    if (!buffer_length)
1543        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1544
1545    bytes = __cvmx_nand_low_level_read(chip, NAND_COMMAND_READ_ID, 1, nand_address, 0, buffer_address, buffer_length);
1546    if (cvmx_nand_state[chip].flags & CVMX_NAND_STATE_16BIT)
1547        __cvmx_nand_fixup_16bit_id_reads(cvmx_phys_to_ptr(buffer_address), buffer_length);
1548
1549    CVMX_NAND_RETURN(bytes);
1550}
1551#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
1552EXPORT_SYMBOL(cvmx_nand_read_id);
1553#endif
1554
1555
1556/**
1557 * Read the NAND parameter page
1558 *
1559 * @param chip   Chip select for NAND flash
1560 * @param buffer_address
1561 *               Physical address to store data in
1562 * @param buffer_length
1563 *               Length of the buffer.  Usually 1024 bytes for 8 bit, 2048 for 16 bit mode.
1564 *
1565 * @return Bytes read on success, a negative cvmx_nand_status_t error code on failure
1566 */
1567int cvmx_nand_read_param_page(int chip, uint64_t buffer_address, int buffer_length)
1568{
1569    int bytes;
1570
1571    CVMX_NAND_LOG_CALLED();
1572    CVMX_NAND_LOG_PARAM("%d", chip);
1573    CVMX_NAND_LOG_PARAM("0x%llx", (ULL)buffer_address);
1574    CVMX_NAND_LOG_PARAM("%d", buffer_length);
1575
1576    if ((chip < 0) || (chip > 7))
1577        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1578    if (!buffer_address)
1579        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1580    if (buffer_address & 7)
1581        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1582    if (buffer_length & 7)
1583        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1584    if (!buffer_length)
1585        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1586
1587    bytes = __cvmx_nand_low_level_read(chip, NAND_COMMAND_READ_PARAM_PAGE, 1, 0x0, 0, buffer_address, buffer_length);
1588    if (cvmx_nand_state[chip].flags & CVMX_NAND_STATE_16BIT)
1589        __cvmx_nand_fixup_16bit_id_reads(cvmx_phys_to_ptr(buffer_address), buffer_length);
1590    CVMX_NAND_RETURN(bytes);
1591}
1592
1593
1594/**
1595 * Get the status of the NAND flash
1596 *
1597 * @param chip   Chip select for NAND flash
1598 *
1599 * @return NAND status or a negative cvmx_nand_status_t error code on failure
1600 */
1601int cvmx_nand_get_status(int chip)
1602{
1603    int status;
1604    int offset = !!(cvmx_nand_state[chip].flags & CVMX_NAND_STATE_16BIT);  /* Normalize flag to 0/1 */
1605
1606    CVMX_NAND_LOG_CALLED();
1607    CVMX_NAND_LOG_PARAM("%d", chip);
1608
1609    if ((chip < 0) || (chip > 7))
1610        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1611
1612    *((uint8_t*)cvmx_nand_buffer + offset)  = 0xff;
1613    status = __cvmx_nand_low_level_read(chip, NAND_COMMAND_STATUS, 0, 0, 0, cvmx_ptr_to_phys(cvmx_nand_buffer), 8);
1614    if (status > 0)
1615        status = *((uint8_t*)cvmx_nand_buffer + offset);
1616
1617    CVMX_NAND_RETURN(status);
1618}
1619#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
1620EXPORT_SYMBOL(cvmx_nand_get_status);
1621#endif
1622
1623
1624/**
1625 * Get the page size, excluding out of band data. This  function
1626 * will return zero for chip selects not connected to NAND.
1627 *
1628 * @param chip   Chip select for NAND flash
1629 *
1630 * @return Page size in bytes or a negative cvmx_nand_status_t error code on failure
1631 */
1632int cvmx_nand_get_page_size(int chip)
1633{
1634    CVMX_NAND_LOG_CALLED();
1635    CVMX_NAND_LOG_PARAM("%d", chip);
1636
1637    if ((chip < 0) || (chip > 7))
1638        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1639
1640    CVMX_NAND_RETURN(cvmx_nand_state[chip].page_size);
1641}
1642
1643
1644/**
1645 * Get the OOB size.
1646 *
1647 * @param chip   Chip select for NAND flash
1648 *
1649 * @return OOB in bytes or a negative cvmx_nand_status_t error code on failure
1650 */
1651int cvmx_nand_get_oob_size(int chip)
1652{
1653    CVMX_NAND_LOG_CALLED();
1654    CVMX_NAND_LOG_PARAM("%d", chip);
1655
1656    if ((chip < 0) || (chip > 7))
1657        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1658
1659    CVMX_NAND_RETURN(cvmx_nand_state[chip].oob_size);
1660}
1661
1662
1663/**
1664 * Get the number of pages per NAND block
1665 *
1666 * @param chip   Chip select for NAND flash
1667 *
1668 * @return Number of pages in each block or a negative cvmx_nand_status_t error
1669 *         code on failure
1670 */
1671int cvmx_nand_get_pages_per_block(int chip)
1672{
1673    CVMX_NAND_LOG_CALLED();
1674    CVMX_NAND_LOG_PARAM("%d", chip);
1675
1676    if ((chip < 0) || (chip > 7))
1677        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1678
1679    CVMX_NAND_RETURN(cvmx_nand_state[chip].pages_per_block);
1680}
1681
1682
1683/**
1684 * Get the number of blocks in the NAND flash
1685 *
1686 * @param chip   Chip select for NAND flash
1687 *
1688 * @return Number of blocks or a negative cvmx_nand_status_t error code on failure
1689 */
1690int cvmx_nand_get_blocks(int chip)
1691{
1692    CVMX_NAND_LOG_CALLED();
1693    CVMX_NAND_LOG_PARAM("%d", chip);
1694
1695    if ((chip < 0) || (chip > 7))
1696        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1697
1698    CVMX_NAND_RETURN(cvmx_nand_state[chip].blocks);
1699}
1700
1701
1702/**
1703 * Reset the NAND flash
1704 *
1705 * @param chip   Chip select for NAND flash
1706 *
1707 * @return Zero on success, a negative cvmx_nand_status_t error code on failure
1708 */
1709cvmx_nand_status_t cvmx_nand_reset(int chip)
1710{
1711    CVMX_NAND_LOG_CALLED();
1712    CVMX_NAND_LOG_PARAM("%d", chip);
1713
1714    if ((chip < 0) || (chip > 7))
1715        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1716    if (!cvmx_nand_state[chip].page_size)
1717        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1718
1719    if (__cvmx_nand_build_pre_cmd(chip, NAND_COMMAND_RESET, 0, 0, 0))
1720        CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1721
1722    /* WAIT for R_B to signal reset is complete  */
1723    if (__wait_for_busy_done(chip))
1724        CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1725
1726    if (__cvmx_nand_build_post_cmd())
1727        CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1728
1729    CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
1730}
1731#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
1732EXPORT_SYMBOL(cvmx_nand_reset);
1733#endif
1734
1735
1736
1737
1738/**
1739 * This function computes the Octeon specific ECC data used by the NAND boot
1740 * feature.
1741 *
1742 * @param block  pointer to 256 bytes of data
1743 * @param eccp   pointer to where 8 bytes of ECC data will be stored
1744 */
1745void cvmx_nand_compute_boot_ecc(unsigned char *block, unsigned char *eccp)
1746{
1747	unsigned char pd0, pd1, pd2;
1748	int i, j;
1749
1750	pd0 = pd1 = pd2 = 0;
1751
1752	for (i = 0; i < 256; i++)	/* PD0<0> */
1753		pd0 ^= (block[i] ^ (block[i] >> 2) ^ (block[i] >> 4) ^ (block[i] >> 6)) & 1;
1754	for (i = 0; i < 256; i++)	/* PD0<1> */
1755		pd0 ^= ((block[i] ^ (block[i] >> 1) ^ (block[i] >> 4) ^ (block[i] >> 5)) & 1) << 1;
1756	for (i = 0; i < 256; i++)	/* PD0<2> */
1757		pd0 ^= ((block[i] ^ (block[i] >> 1) ^ (block[i] >> 2) ^ (block[i] >> 3)) & 1) << 2;
1758	for (i = 0; i < 128; i++)	/* PD0<3> */
1759		pd0 ^= ((block[2*i] ^ (block[2*i] >> 1) ^ (block[2*i] >> 2) ^
1760			(block[2*i] >> 3) ^ (block[2*i] >> 4) ^ (block[2*i] >> 5) ^
1761			(block[2*i] >> 6) ^ (block[2*i] >> 7)) & 1) << 3;
1762	for (i = 0; i < 64; i++)	/* PD0<4> */
1763		for (j = 0; j < 2; j++)
1764			pd0 ^= ((block[4*i+j] ^ (block[4*i+j] >> 1) ^ (block[4*i+j] >> 2) ^
1765				(block[4*i+j] >> 3) ^ (block[4*i+j] >> 4) ^ (block[4*i+j] >> 5) ^
1766				(block[4*i+j] >> 6) ^ (block[4*i+j] >> 7)) & 1) << 4;
1767	for (i = 0; i < 32; i++)	/* PD0<5> */
1768		for (j = 0; j < 4; j++)
1769			pd0 ^= ((block[8*i+j] ^ (block[8*i+j] >> 1) ^ (block[8*i+j] >> 2) ^
1770				(block[8*i+j] >> 3) ^ (block[8*i+j] >> 4) ^ (block[8*i+j] >> 5) ^
1771				(block[8*i+j] >> 6) ^ (block[8*i+j] >> 7)) & 1) << 5;
1772	for (i = 0; i < 16; i++)	/* PD0<6> */
1773		for (j = 0; j < 8; j++)
1774			pd0 ^= ((block[16*i+j] ^ (block[16*i+j] >> 1) ^ (block[16*i+j] >> 2) ^
1775				(block[16*i+j] >> 3) ^ (block[16*i+j] >> 4) ^ (block[16*i+j] >> 5) ^
1776				(block[16*i+j] >> 6) ^ (block[16*i+j] >> 7)) & 1) << 6;
1777	for (i = 0; i < 8; i++)		/* PD0<7> */
1778		for (j = 0; j < 16; j++)
1779			pd0 ^= ((block[32*i+j] ^ (block[32*i+j] >> 1) ^ (block[32*i+j] >> 2) ^
1780				(block[32*i+j] >> 3) ^ (block[32*i+j] >> 4) ^ (block[32*i+j] >> 5) ^
1781				(block[32*i+j] >> 6) ^ (block[32*i+j] >> 7)) & 1) << 7;
1782	for (i = 0; i < 4; i++)		/* PD1<0> */
1783		for (j = 0; j < 32; j++)
1784			pd1 ^= ((block[64*i+j] ^ (block[64*i+j] >> 1) ^ (block[64*i+j] >> 2) ^
1785				(block[64*i+j] >> 3) ^ (block[64*i+j] >> 4) ^ (block[64*i+j] >> 5) ^
1786				(block[64*i+j] >> 6) ^ (block[64*i+j] >> 7)) & 1) << 0;
1787	for (i = 0; i < 2; i++)		/* PD1<1> */
1788		for (j = 0; j < 64; j++)
1789			pd1 ^= ((block[128*i+j] ^ (block[128*i+j] >> 1) ^ (block[128*i+j] >> 2) ^
1790				(block[128*i+j] >> 3) ^ (block[128*i+j] >> 4) ^ (block[128*i+j] >> 5) ^
1791				(block[128*i+j] >> 6) ^ (block[128*i+j] >> 7)) & 1) << 1;
1792	for (i = 0; i < 128; i++)	/* PD1<2> */
1793		pd1 ^= ((block[i] ^ (block[i] >> 1) ^ (block[i] >> 2) ^
1794			(block[i] >> 3) ^ (block[i] >> 4) ^ (block[i] >> 5) ^
1795			(block[i] >> 6) ^ (block[i] >> 7)) & 1) << 2;
1796	/* PD1<3> */
1797	/* PD1<4> */
1798	for (i = 0; i < 256; i++)	/* PD1<5> */
1799		pd1 ^= (((block[i] >> 1) ^ (block[i] >> 3) ^ (block[i] >> 5) ^ (block[i] >> 7)) & 1) << 5;
1800	for (i = 0; i < 256; i++)	/* PD1<6> */
1801		pd1 ^= (((block[i] >> 2) ^ (block[i] >> 3) ^ (block[i] >> 6) ^ (block[i] >> 7)) & 1) << 6;
1802	for (i = 0; i < 256; i++)	/* PD1<7> */
1803		pd1 ^= (((block[i] >> 4) ^ (block[i] >> 5) ^ (block[i] >> 6) ^ (block[i] >> 7)) & 1) << 7;
1804	for (i = 0; i < 128; i++)	/* PD2<0> */
1805		pd2 ^= ((block[2*i+1] ^ (block[2*i+1] >> 1) ^ (block[2*i+1] >> 2) ^
1806			(block[2*i+1] >> 3) ^ (block[2*i+1] >> 4) ^ (block[2*i+1] >> 5) ^
1807			(block[2*i+1] >> 6) ^ (block[2*i+1] >> 7)) & 1) << 0;
1808	for (i = 0; i < 64; i++)	/* PD2<1> */
1809		for (j = 2; j < 4; j++)
1810			pd2 ^= ((block[4*i+j] ^ (block[4*i+j] >> 1) ^ (block[4*i+j] >> 2) ^
1811				(block[4*i+j] >> 3) ^ (block[4*i+j] >> 4) ^ (block[4*i+j] >> 5) ^
1812				(block[4*i+j] >> 6) ^ (block[4*i+j] >> 7)) & 1) << 1;
1813	for (i = 0; i < 32; i++)	/* PD2<2> */
1814		for (j = 4; j < 8; j++)
1815			pd2 ^= ((block[8*i+j] ^ (block[8*i+j] >> 1) ^ (block[8*i+j] >> 2) ^
1816				(block[8*i+j] >> 3) ^ (block[8*i+j] >> 4) ^ (block[8*i+j] >> 5) ^
1817				(block[8*i+j] >> 6) ^ (block[8*i+j] >> 7)) & 1) << 2;
1818	for (i = 0; i < 16; i++)	/* PD2<3> */
1819		for (j = 8; j < 16; j++)
1820			pd2 ^= ((block[16*i+j] ^ (block[16*i+j] >> 1) ^ (block[16*i+j] >> 2) ^
1821				(block[16*i+j] >> 3) ^ (block[16*i+j] >> 4) ^ (block[16*i+j] >> 5) ^
1822				(block[16*i+j] >> 6) ^ (block[16*i+j] >> 7)) & 1) << 3;
1823	for (i = 0; i < 8; i++)		/* PD2<4> */
1824		for (j = 16; j < 32; j++)
1825			pd2 ^= ((block[32*i+j] ^ (block[32*i+j] >> 1) ^ (block[32*i+j] >> 2) ^
1826				(block[32*i+j] >> 3) ^ (block[32*i+j] >> 4) ^ (block[32*i+j] >> 5) ^
1827				(block[32*i+j] >> 6) ^ (block[32*i+j] >> 7)) & 1) << 4;
1828	for (i = 0; i < 4; i++)		/* PD2<5> */
1829		for (j = 32; j < 64; j++)
1830			pd2 ^= ((block[64*i+j] ^ (block[64*i+j] >> 1) ^ (block[64*i+j] >> 2) ^
1831				(block[64*i+j] >> 3) ^ (block[64*i+j] >> 4) ^ (block[64*i+j] >> 5) ^
1832				(block[64*i+j] >> 6) ^ (block[64*i+j] >> 7)) & 1) << 5;
1833	for (i = 0; i < 2; i++)		/* PD2<6> */
1834		for (j = 64; j < 128; j++)
1835			pd2 ^= ((block[128*i+j] ^ (block[128*i+j] >> 1) ^ (block[128*i+j] >> 2) ^
1836				(block[128*i+j] >> 3) ^ (block[128*i+j] >> 4) ^ (block[128*i+j] >> 5) ^
1837				(block[128*i+j] >> 6) ^ (block[128*i+j] >> 7)) & 1) << 6;
1838	for (i = 128; i < 256; i++)	/* PD2<7> */
1839		pd2 ^= ((block[i] ^ (block[i] >> 1) ^ (block[i] >> 2) ^
1840			(block[i] >> 3) ^ (block[i] >> 4) ^ (block[i] >> 5) ^
1841			(block[i] >> 6) ^ (block[i] >> 7)) & 1) << 7;
1842
1843	eccp[0] = pd0;
1844	eccp[1] = pd1;
1845	eccp[2] = pd2;
1846}
1847
1848/**
1849 * Check an Octeon ECC block, fixing errors if possible
1850 *
1851 * @param block  Pointer to block to check
1852 *
1853 * @return Zero if block has no errors, one if errors were corrected, two
1854 *         if the errors could not be corrected.
1855 */
1856int cvmx_nand_correct_boot_ecc(uint8_t *block)
1857{
1858    unsigned char pd0, pd1, pd2;
1859    int i, j;
1860    unsigned char xorpd0, xorpd1, xorpd2;
1861    int xor_num;
1862    unsigned int check;
1863
1864    asm volatile ("pref 0,0(%0);pref 0,128(%0);pref 0,256(%0)\n" :: "r" (block));
1865
1866    pd0 = pd1 = pd2 = 0;
1867
1868    for (i = 0; i < 256; i++)   /* PD0<0> */
1869        pd0 ^= (block[i] ^ (block[i] >> 2) ^ (block[i] >> 4) ^ (block[i] >> 6)) & 1;
1870    for (i = 0; i < 256; i++)   /* PD0<1> */
1871        pd0 ^= ((block[i] ^ (block[i] >> 1) ^ (block[i] >> 4) ^ (block[i] >> 5)) & 1) << 1;
1872    for (i = 0; i < 256; i++)   /* PD0<2> */
1873        pd0 ^= ((block[i] ^ (block[i] >> 1) ^ (block[i] >> 2) ^ (block[i] >> 3)) & 1) << 2;
1874    for (i = 0; i < 128; i++)   /* PD0<3> */
1875        pd0 ^= ((block[2*i] ^ (block[2*i] >> 1) ^ (block[2*i] >> 2) ^
1876                 (block[2*i] >> 3) ^ (block[2*i] >> 4) ^ (block[2*i] >> 5) ^
1877                 (block[2*i] >> 6) ^ (block[2*i] >> 7)) & 1) << 3;
1878    for (i = 0; i < 64; i++)    /* PD0<4> */
1879        for (j = 0; j < 2; j++)
1880            pd0 ^= ((block[4*i+j] ^ (block[4*i+j] >> 1) ^ (block[4*i+j] >> 2) ^
1881                     (block[4*i+j] >> 3) ^ (block[4*i+j] >> 4) ^ (block[4*i+j] >> 5) ^
1882                     (block[4*i+j] >> 6) ^ (block[4*i+j] >> 7)) & 1) << 4;
1883    for (i = 0; i < 32; i++)    /* PD0<5> */
1884        for (j = 0; j < 4; j++)
1885            pd0 ^= ((block[8*i+j] ^ (block[8*i+j] >> 1) ^ (block[8*i+j] >> 2) ^
1886                     (block[8*i+j] >> 3) ^ (block[8*i+j] >> 4) ^ (block[8*i+j] >> 5) ^
1887                     (block[8*i+j] >> 6) ^ (block[8*i+j] >> 7)) & 1) << 5;
1888    for (i = 0; i < 16; i++)    /* PD0<6> */
1889        for (j = 0; j < 8; j++)
1890            pd0 ^= ((block[16*i+j] ^ (block[16*i+j] >> 1) ^ (block[16*i+j] >> 2) ^
1891                     (block[16*i+j] >> 3) ^ (block[16*i+j] >> 4) ^ (block[16*i+j] >> 5) ^
1892                     (block[16*i+j] >> 6) ^ (block[16*i+j] >> 7)) & 1) << 6;
1893    for (i = 0; i < 8; i++)     /* PD0<7> */
1894        for (j = 0; j < 16; j++)
1895            pd0 ^= ((block[32*i+j] ^ (block[32*i+j] >> 1) ^ (block[32*i+j] >> 2) ^
1896                     (block[32*i+j] >> 3) ^ (block[32*i+j] >> 4) ^ (block[32*i+j] >> 5) ^
1897                     (block[32*i+j] >> 6) ^ (block[32*i+j] >> 7)) & 1) << 7;
1898    for (i = 0; i < 4; i++)     /* PD1<0> */
1899        for (j = 0; j < 32; j++)
1900            pd1 ^= ((block[64*i+j] ^ (block[64*i+j] >> 1) ^ (block[64*i+j] >> 2) ^
1901                     (block[64*i+j] >> 3) ^ (block[64*i+j] >> 4) ^ (block[64*i+j] >> 5) ^
1902                     (block[64*i+j] >> 6) ^ (block[64*i+j] >> 7)) & 1) << 0;
1903    for (i = 0; i < 2; i++)     /* PD1<1> */
1904        for (j = 0; j < 64; j++)
1905            pd1 ^= ((block[128*i+j] ^ (block[128*i+j] >> 1) ^ (block[128*i+j] >> 2) ^
1906                     (block[128*i+j] >> 3) ^ (block[128*i+j] >> 4) ^ (block[128*i+j] >> 5) ^
1907                     (block[128*i+j] >> 6) ^ (block[128*i+j] >> 7)) & 1) << 1;
1908    for (i = 0; i < 128; i++)   /* PD1<2> */
1909        pd1 ^= ((block[i] ^ (block[i] >> 1) ^ (block[i] >> 2) ^
1910                 (block[i] >> 3) ^ (block[i] >> 4) ^ (block[i] >> 5) ^
1911                 (block[i] >> 6) ^ (block[i] >> 7)) & 1) << 2;
1912    /* PD1<3> */
1913    /* PD1<4> */
1914    for (i = 0; i < 256; i++)   /* PD1<5> */
1915        pd1 ^= (((block[i] >> 1) ^ (block[i] >> 3) ^ (block[i] >> 5) ^ (block[i] >> 7)) & 1) << 5;
1916    for (i = 0; i < 256; i++)   /* PD1<6> */
1917        pd1 ^= (((block[i] >> 2) ^ (block[i] >> 3) ^ (block[i] >> 6) ^ (block[i] >> 7)) & 1) << 6;
1918    for (i = 0; i < 256; i++)   /* PD1<7> */
1919        pd1 ^= (((block[i] >> 4) ^ (block[i] >> 5) ^ (block[i] >> 6) ^ (block[i] >> 7)) & 1) << 7;
1920    for (i = 0; i < 128; i++)   /* PD2<0> */
1921        pd2 ^= ((block[2*i+1] ^ (block[2*i+1] >> 1) ^ (block[2*i+1] >> 2) ^
1922                 (block[2*i+1] >> 3) ^ (block[2*i+1] >> 4) ^ (block[2*i+1] >> 5) ^
1923                 (block[2*i+1] >> 6) ^ (block[2*i+1] >> 7)) & 1) << 0;
1924    for (i = 0; i < 64; i++)    /* PD2<1> */
1925        for (j = 2; j < 4; j++)
1926            pd2 ^= ((block[4*i+j] ^ (block[4*i+j] >> 1) ^ (block[4*i+j] >> 2) ^
1927                     (block[4*i+j] >> 3) ^ (block[4*i+j] >> 4) ^ (block[4*i+j] >> 5) ^
1928                     (block[4*i+j] >> 6) ^ (block[4*i+j] >> 7)) & 1) << 1;
1929    for (i = 0; i < 32; i++)    /* PD2<2> */
1930        for (j = 4; j < 8; j++)
1931            pd2 ^= ((block[8*i+j] ^ (block[8*i+j] >> 1) ^ (block[8*i+j] >> 2) ^
1932                     (block[8*i+j] >> 3) ^ (block[8*i+j] >> 4) ^ (block[8*i+j] >> 5) ^
1933                     (block[8*i+j] >> 6) ^ (block[8*i+j] >> 7)) & 1) << 2;
1934    for (i = 0; i < 16; i++)    /* PD2<3> */
1935        for (j = 8; j < 16; j++)
1936            pd2 ^= ((block[16*i+j] ^ (block[16*i+j] >> 1) ^ (block[16*i+j] >> 2) ^
1937                     (block[16*i+j] >> 3) ^ (block[16*i+j] >> 4) ^ (block[16*i+j] >> 5) ^
1938                     (block[16*i+j] >> 6) ^ (block[16*i+j] >> 7)) & 1) << 3;
1939    for (i = 0; i < 8; i++)     /* PD2<4> */
1940        for (j = 16; j < 32; j++)
1941            pd2 ^= ((block[32*i+j] ^ (block[32*i+j] >> 1) ^ (block[32*i+j] >> 2) ^
1942                     (block[32*i+j] >> 3) ^ (block[32*i+j] >> 4) ^ (block[32*i+j] >> 5) ^
1943                     (block[32*i+j] >> 6) ^ (block[32*i+j] >> 7)) & 1) << 4;
1944    for (i = 0; i < 4; i++)     /* PD2<5> */
1945        for (j = 32; j < 64; j++)
1946            pd2 ^= ((block[64*i+j] ^ (block[64*i+j] >> 1) ^ (block[64*i+j] >> 2) ^
1947                     (block[64*i+j] >> 3) ^ (block[64*i+j] >> 4) ^ (block[64*i+j] >> 5) ^
1948                     (block[64*i+j] >> 6) ^ (block[64*i+j] >> 7)) & 1) << 5;
1949    for (i = 0; i < 2; i++)     /* PD2<6> */
1950        for (j = 64; j < 128; j++)
1951            pd2 ^= ((block[128*i+j] ^ (block[128*i+j] >> 1) ^ (block[128*i+j] >> 2) ^
1952                     (block[128*i+j] >> 3) ^ (block[128*i+j] >> 4) ^ (block[128*i+j] >> 5) ^
1953                     (block[128*i+j] >> 6) ^ (block[128*i+j] >> 7)) & 1) << 6;
1954    for (i = 128; i < 256; i++) /* PD2<7> */
1955        pd2 ^= ((block[i] ^ (block[i] >> 1) ^ (block[i] >> 2) ^
1956                 (block[i] >> 3) ^ (block[i] >> 4) ^ (block[i] >> 5) ^
1957                 (block[i] >> 6) ^ (block[i] >> 7)) & 1) << 7;
1958
1959    xorpd0 = pd0 ^ block[256];
1960    xorpd1 = pd1 ^ block[257];
1961    xorpd2 = pd2 ^ block[258];
1962
1963    xor_num = __builtin_popcount((xorpd0 << 16) | (xorpd1 << 8) | xorpd2);
1964    check = (((xorpd1 & 7) << 8) | xorpd0) ^ ((xorpd2 << 3) | (xorpd1 >> 5));
1965
1966    if (xor_num == 0)
1967        return 0;
1968    else if ((xor_num > 1) && (check != 0x7FF))
1969        return 2;
1970
1971    if (check == 0x7FF)
1972    {
1973        /* Correct the error */
1974        block[xorpd2] ^= 1 << (xorpd1 >> 5);
1975    }
1976
1977    return 1;
1978}
1979
1980cvmx_nand_status_t cvmx_nand_set_defaults(int page_size, int oob_size, int pages_per_block, int blocks, int onfi_timing_mode)
1981{
1982    if (!page_size || !oob_size || !pages_per_block || !blocks || onfi_timing_mode > 5)
1983        CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1984
1985    cvmx_nand_default.page_size = page_size;
1986    cvmx_nand_default.oob_size = oob_size;
1987    cvmx_nand_default.pages_per_block = pages_per_block;
1988    cvmx_nand_default.blocks = blocks;
1989    cvmx_nand_default.onfi_timing = onfi_timing_mode;
1990
1991    CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
1992}
1993