1/*
2 * Broadcom NAND core interface
3 *
4 * Copyright (C) 2015, Broadcom Corporation. All Rights Reserved.
5 *
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
13 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
15 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
16 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 *
18 * $Id: $
19 */
20
21#include <typedefs.h>
22#include <osl.h>
23#include <bcmutils.h>
24#include <siutils.h>
25#include <hndsoc.h>
26#include <sbhndcpu.h>
27#include <sbchipc.h>
28#include <bcmdevs.h>
29#include <nand_core.h>
30#include <hndnand.h>
31#include <hndpmu.h>
32
33#ifdef BCMDBG
34#define	NANDFL_MSG(args)	printf args
35#else
36#define	NANDFL_MSG(args)
37#endif	/* BCMDBG */
38
39#define NANDF_RETRIES	1000000
40
41#define NANDF_SMALL_BADBLOCK_POS	5
42#define NANDF_LARGE_BADBLOCK_POS	0
43
44struct nandpart_timing_info {
45	const char	*name;
46	uint8	id[8];
47	/* Timing unit is ns for the following parameters */
48	uint8	tWP;
49	uint8	tWH;
50	uint8	tRP;
51	uint8	tREH;
52	uint8	tCS;
53	uint8	tCLH;
54	uint8	tALH;
55	uint16	tADL;
56	uint8	tWB;
57	uint8	tWHR;
58	uint8	tREAD;
59};
60
61static struct nandpart_timing_info nandpart_timing_list[] = {
62	{"Samsung K9LCG08U0B",
63	{0xec, 0xde, 0xd5, 0x7e, 0x68, 0x44},
64	11, 11, 11, 11, 25, 5, 5, 300, 100, 176, 37},
65
66	{"Micron MT29F4G08ABADA",
67	{0x2c, 0xdc, 0x90, 0x95, 0x56},
68	10, 7, 10, 7, 15, 5, 5, 70, 100, 60, 33},
69
70	{"Micron MT29F64G08CBABA",
71	{0x2c, 0x64, 0x44, 0x4b, 0xa9},
72	50, 30, 50, 30, 70, 20, 20, 200, 200, 120, 57},
73
74	{NULL, }
75};
76
77/* Private global state */
78static hndnand_t nandcore;
79
80static uint32 num_cache_per_page;
81static uint32 spare_per_cache;
82
83/* Prototype */
84static int nandcore_poll(si_t *sih, nandregs_t *nc);
85
86hndnand_t *nandcore_init(si_t *sih);
87static int nandcore_read(hndnand_t *nfl, uint64 offset, uint len, uchar *buf);
88static int nandcore_write(hndnand_t *nfl, uint64 offset, uint len, const uchar *buf);
89static int nandcore_erase(hndnand_t *nfl, uint64 offset);
90static int nandcore_checkbadb(hndnand_t *nfl, uint64 offset);
91static int nandcore_mark_badb(hndnand_t *nfl, uint64 offset);
92
93#ifndef _CFE_
94static int nandcore_dev_ready(hndnand_t *nfl);
95static int nandcore_select_chip(hndnand_t *nfl, int chip);
96static int nandcore_cmdfunc(hndnand_t *nfl, uint64 addr, int cmd);
97static int nandcore_waitfunc(hndnand_t *nfl, int *status);
98static int nandcore_read_oob(hndnand_t *nfl, uint64 addr, uint8 *oob);
99static int nandcore_write_oob(hndnand_t *nfl, uint64 addr, uint8 *oob);
100static int nandcore_read_page(hndnand_t *nfl, uint64 addr, uint8 *buf, uint8 *oob, bool ecc,
101	uint32 *herr, uint32 *serr);
102static int nandcore_write_page(hndnand_t *nfl, uint64 addr, const uint8 *buf, uint8 *oob, bool ecc);
103static int nandcore_cmd_read_byte(hndnand_t *nfl, int cmd, int arg);
104#endif /* !_CFE_ */
105
106/* Issue a nand flash command */
107static INLINE void
108nandcore_cmd(osl_t *osh, nandregs_t *nc, uint opcode)
109{
110	W_REG(osh, &nc->cmd_start, opcode);
111}
112
113static bool
114_nandcore_buf_erased(const void *buf, unsigned len)
115{
116	unsigned i;
117	const uint32 *p = buf;
118
119	for (i = 0; i < (len >> 2); i++) {
120		if (p[i] != 0xffffffff)
121			return FALSE;
122	}
123
124	return TRUE;
125}
126
127static INLINE int
128_nandcore_oobbyte_per_cache(hndnand_t *nfl, uint cache, uint32 spare)
129{
130	uint32 oob_byte;
131
132	if (nfl->sectorsize == 512)
133		oob_byte = spare;
134	else {
135		if ((spare * 2) < NANDSPARECACHE_SIZE)
136			oob_byte = spare * 2;
137		else
138			oob_byte = (cache % 2) ?
139			((spare * 2) - NANDSPARECACHE_SIZE) :
140			 NANDSPARECACHE_SIZE;
141	}
142
143	return oob_byte;
144}
145
146static int
147_nandcore_read_page(hndnand_t *nfl, uint64 offset, uint8 *buf, uint8 *oob, bool ecc,
148	uint32 *herr, uint32 *serr)
149{
150	osl_t *osh;
151	nandregs_t *nc = (nandregs_t *)nfl->core;
152	aidmp_t *ai = (aidmp_t *)nfl->wrap;
153	unsigned cache, col = 0;
154	unsigned hard_err_count = 0;
155	uint32 mask, reg, *to;
156	uint32 err_soft_reg, err_hard_reg;
157	int i, ret;
158	uint8 *oob_to = oob;
159	uint32 rd_oob_byte, left_oob_byte;
160
161	ASSERT(nfl->sih);
162
163	mask = nfl->pagesize - 1;
164	/* Check offset and length */
165	if ((offset & mask) != 0)
166		return 0;
167
168	if ((((offset + nfl->pagesize) >> 20) > nfl->size) ||
169	    ((((offset + nfl->pagesize) >> 20) == nfl->size) &&
170	     (((offset + nfl->pagesize) & ((1 << 20) - 1)) != 0)))
171		return 0;
172
173	osh = si_osh(nfl->sih);
174
175	/* Reset  ECC error stats */
176	err_hard_reg = R_REG(osh, &nc->uncorr_error_count);
177	err_soft_reg = R_REG(osh, &nc->read_error_count);
178
179	/* Enable ECC validation for ecc page reads */
180	if (ecc)
181		OR_REG(osh, nfl->chipidx ? &nc->acc_control_cs1 : &nc->acc_control_cs0,
182			NANDAC_CS0_RD_ECC_EN);
183	else
184		AND_REG(osh, nfl->chipidx ? &nc->acc_control_cs1 : &nc->acc_control_cs0,
185			~NANDAC_CS0_RD_ECC_EN);
186
187	/* Loop all caches in page */
188	for (cache = 0; cache < num_cache_per_page; cache++, col += NANDCACHE_SIZE) {
189		uint32 ext_addr;
190
191		/* Set the page address for the following commands */
192		reg = (R_REG(osh, &nc->cmd_ext_address) & ~NANDCMD_EXT_ADDR_MASK);
193		ext_addr = ((offset + col) >> 32) & NANDCMD_EXT_ADDR_MASK;
194		W_REG(osh, &nc->cmd_ext_address, (reg | ext_addr));
195		W_REG(osh, &nc->cmd_address, (uint32)offset + col);
196
197		/* Issue command to read partial page */
198		nandcore_cmd(osh, nc, NANDCMD_PAGE_RD);
199
200		/* Wait for the command to complete */
201		if ((ret = nandcore_poll(nfl->sih, nc)) < 0)
202			return ret;
203
204		/* Set controller to Little Endian mode for copying */
205		OR_REG(osh, &ai->ioctrl, NAND_APB_LITTLE_ENDIAN);
206
207		/* Read page data per cache */
208		to = (uint32 *)(buf + col);
209		for (i = 0; i < (NANDCACHE_SIZE / 4); i++, to++)
210			*to = R_REG(osh, &nc->flash_cache[i]);
211
212		/* Read oob data per cache */
213		if (oob_to) {
214			rd_oob_byte = _nandcore_oobbyte_per_cache(nfl, cache, spare_per_cache);
215
216			left_oob_byte = rd_oob_byte % 4;
217
218			/* Pay attention to natural address alignment access */
219			for (i = 0; i < (rd_oob_byte / 4); i++) {
220				reg = R_REG(osh, &nc->spare_area_read_ofs[i]);
221				memcpy((void *)oob_to, (void *)&reg, 4);
222				oob_to += 4;
223			}
224
225			if (left_oob_byte != 0) {
226				reg = R_REG(osh, &nc->spare_area_read_ofs[i]);
227				memcpy((void *)oob_to, (void *)&reg, left_oob_byte);
228				oob_to += left_oob_byte;
229			}
230		}
231
232		/* Return to Big Endian mode for commands etc */
233		AND_REG(osh, &ai->ioctrl, ~NAND_APB_LITTLE_ENDIAN);
234
235		/* capture hard errors for each partial */
236		if (err_hard_reg != R_REG(osh, &nc->uncorr_error_count)) {
237			int era = (R_REG(osh, &nc->intfc_status) & NANDIST_ERASED);
238			if ((!era) && (!_nandcore_buf_erased(buf+col, NANDCACHE_SIZE)))
239				hard_err_count ++;
240
241			err_hard_reg = R_REG(osh, &nc->uncorr_error_count);
242		}
243	} /* for cache */
244
245	if (!ecc)
246		return 0;
247
248	/* Report hard ECC errors */
249	if (herr)
250		*herr = hard_err_count;
251
252	/* Get ECC soft error stats */
253	if (serr)
254		*serr = R_REG(osh, &nc->read_error_count) - err_soft_reg;
255
256	return 0;
257}
258
259static int
260_nandcore_write_page(hndnand_t *nfl, uint64 offset, const uint8 *buf, uint8 *oob, bool ecc)
261{
262	osl_t *osh;
263	nandregs_t *nc = (nandregs_t *)nfl->core;
264	aidmp_t *ai = (aidmp_t *)nfl->wrap;
265	unsigned cache, col = 0;
266	uint32 mask, reg, *from;
267	int i, ret = 0;
268	uint8 *oob_from = oob;
269	uint32 wr_oob_byte, left_oob_byte;
270
271	ASSERT(nfl->sih);
272
273	mask = nfl->pagesize - 1;
274	/* Check offset and length */
275	if ((offset & mask) != 0)
276		return 0;
277
278	if ((((offset + nfl->pagesize) >> 20) > nfl->size) ||
279	    ((((offset + nfl->pagesize) >> 20) == nfl->size) &&
280	     (((offset + nfl->pagesize) & ((1 << 20) - 1)) != 0)))
281		return 0;
282
283	osh = si_osh(nfl->sih);
284
285	/* Disable WP */
286	AND_REG(osh, &nc->cs_nand_select, ~NANDCSEL_NAND_WP);
287
288	/* Enable ECC generation for ecc page write, if requested */
289	if (ecc)
290		OR_REG(osh, nfl->chipidx ? &nc->acc_control_cs1 : &nc->acc_control_cs0,
291			NANDAC_CS0_WR_ECC_EN);
292	else
293		AND_REG(osh, nfl->chipidx ? &nc->acc_control_cs1 : &nc->acc_control_cs0,
294			~NANDAC_CS0_WR_ECC_EN);
295
296	/* Loop all caches in page */
297	for (cache = 0; cache < num_cache_per_page; cache++, col += NANDCACHE_SIZE) {
298		uint32 ext_addr;
299
300		/* Set the page address for the following commands */
301		reg = (R_REG(osh, &nc->cmd_ext_address) & ~NANDCMD_EXT_ADDR_MASK);
302		ext_addr = ((offset + col) >> 32) & NANDCMD_EXT_ADDR_MASK;
303		W_REG(osh, &nc->cmd_ext_address, (reg | ext_addr));
304		W_REG(osh, &nc->cmd_address, (uint32)offset + col);
305
306		/* Set controller to Little Endian mode for copying */
307		OR_REG(osh, &ai->ioctrl, NAND_APB_LITTLE_ENDIAN);
308
309		/* Copy sub-page data */
310		from = (uint32 *)(buf + col);
311		for (i = 0; i < (NANDCACHE_SIZE / 4); i++, from++)
312			W_REG(osh, &nc->flash_cache[i], *from);
313
314		/* Set spare area is written at each cache start */
315		if (oob_from) {
316			/* Fill spare area write cache */
317			wr_oob_byte = _nandcore_oobbyte_per_cache(nfl, cache, spare_per_cache);
318
319			left_oob_byte = wr_oob_byte % 4;
320
321			/* Pay attention to natural address alignment access */
322			for (i = 0; i < (wr_oob_byte / 4); i++) {
323				memcpy((void *)&reg, (void *)oob_from, 4);
324				W_REG(osh, &nc->spare_area_write_ofs[i], reg);
325				oob_from += 4;
326			}
327
328			if (left_oob_byte != 0) {
329				reg = 0xffffffff;
330				memcpy((void *)&reg, (void *)oob_from,
331					left_oob_byte);
332				W_REG(osh, &nc->spare_area_write_ofs[i], reg);
333				oob_from += left_oob_byte;
334				i++;
335			}
336
337			for (; i < (NANDSPARECACHE_SIZE / 4); i ++)
338				W_REG(osh, &nc->spare_area_write_ofs[i],
339					0xffffffff);
340		}
341		else {
342			/* Write 0xffffffff to spare_area_write_ofs register
343			 * to prevent old spare_area_write_ofs vale write
344			 * when we issue NANDCMD_PAGE_PROG.
345			 */
346			for (i = 0; i < (NANDSPARECACHE_SIZE / 4); i++)
347				W_REG(osh, &nc->spare_area_write_ofs[i],
348					0xffffffff);
349		}
350
351		/* Return to Big Endian mode for commands etc */
352		AND_REG(osh, &ai->ioctrl, ~NAND_APB_LITTLE_ENDIAN);
353
354		/* Push data into internal cache */
355		nandcore_cmd(osh, nc, NANDCMD_PAGE_PROG);
356
357		ret = nandcore_poll(nfl->sih, nc);
358		if (ret < 0)
359			goto err;
360	}
361
362err:
363	/* Enable WP */
364	OR_REG(osh, &nc->cs_nand_select, NANDCSEL_NAND_WP);
365
366	return ret;
367}
368
369static bool firsttime = TRUE;
370
371static char *
372nandcore_check_id(uint8 *id)
373{
374	char *name = NULL;
375
376	switch (id[0]) {
377	case NFL_VENDOR_AMD:
378		name = "AMD";
379		break;
380	case NFL_VENDOR_NUMONYX:
381		name = "Numonyx";
382		break;
383	case NFL_VENDOR_MICRON:
384		name = "Micron";
385		break;
386	case NFL_VENDOR_TOSHIBA:
387		name = "Toshiba";
388		break;
389	case NFL_VENDOR_HYNIX:
390		name = "Hynix";
391		break;
392	case NFL_VENDOR_SAMSUNG:
393		name = "Samsung";
394		break;
395	case NFL_VENDOR_ESMT:
396		name = "Esmt";
397		break;
398	case NFL_VENDOR_MXIC:
399		name = "Mxic";
400		break;
401	case NFL_VENDOR_ZENTEL:
402		name = "Zentel";
403		break;
404	case NFL_VENDOR_WINBOND:
405		name = "Winbond";
406		break;
407	default:
408		printf("No NAND flash type found\n");
409		break;
410	}
411
412	return name;
413}
414
415static void
416nandcore_override_config(hndnand_t *nfl)
417{
418	nandregs_t *nc = nfl->core;
419	osl_t *osh;
420	uint32 reg;
421
422	ASSERT(nfl->sih);
423	osh = si_osh(nfl->sih);
424
425	/* Samsung K9LCG08U0B */
426	if ((nfl->id[0] == 0xec) && (nfl->id[1] == 0xde) &&
427	    (nfl->id[2] == 0xd5) && (nfl->id[3] == 0x7e) &&
428	    (nfl->id[4] == 0x68) && (nfl->id[5] == 0x44)) {
429		/* Block size, total size */
430		reg = R_REG(osh, &nc->config_cs0);
431		reg &= ~NANDCF_CS0_BLOCK_SIZE_MASK;
432		reg |= (NANDCF_CS0_BLOCK_SIZE_1MB << NANDCF_CS0_BLOCK_SIZE_SHIFT);
433		reg &= ~NANDCF_CS0_DEVICE_SIZE_MASK;
434		reg |= (NANDCF_CS0_DEVICE_SIZE_8GB << NANDCF_CS0_DEVICE_SIZE_SHIFT);
435		W_REG(osh, &nc->config_cs0, reg);
436
437		/* Spare size, sector size and ECC level */
438		reg = R_REG(osh, &nc->acc_control_cs0);
439		reg &= ~NANDAC_CS0_SPARE_AREA_SIZE;
440		reg |= NANDAC_CS0_SPARE_AREA_45B;
441		reg |= NANDAC_CS0_SECTOR_SIZE_1K;
442		reg &= ~NANDAC_CS0_ECC_LEVEL_MASK;
443		reg |= NANDAC_CS0_ECC_LEVEL_20;
444		W_REG(osh, &nc->acc_control_cs0, reg);
445	}
446
447	/* Micron MT29F64G08CBABA */
448	if ((nfl->id[0] == 0x2c) && (nfl->id[1] == 0x64) &&
449	    (nfl->id[2] == 0x44) && (nfl->id[3] == 0x4b) &&
450	    (nfl->id[4] == 0xa9)) {
451		/* Spare size, sector size and ECC level */
452		reg = R_REG(osh, &nc->acc_control_cs0);
453		reg &= ~NANDAC_CS0_SPARE_AREA_SIZE;
454		reg |= NANDAC_CS0_SPARE_AREA_45B;
455		reg |= NANDAC_CS0_SECTOR_SIZE_1K;
456		reg &= ~NANDAC_CS0_ECC_LEVEL_MASK;
457		reg |= NANDAC_CS0_ECC_LEVEL_20;
458		W_REG(osh, &nc->acc_control_cs0, reg);
459	}
460}
461
462static void
463nandcore_optimize_timing(hndnand_t *nfl)
464{
465	nandregs_t *nc = nfl->core;
466	osl_t *osh;
467	struct nandpart_timing_info *info = nandpart_timing_list;
468	uint32 reg, tmp_val;
469	uint32 clk_select, ns, divisor;
470
471	ASSERT(nfl->sih);
472	osh = si_osh(nfl->sih);
473
474	for (; info->name != NULL; info++) {
475		if (memcmp(nfl->id, info->id, 5) == 0)
476			break;
477	}
478
479	if (!info->name)
480		return;
481
482	reg = R_REG(osh, nfl->chipidx ? &nc->timing_2_cs1 : &nc->timing_2_cs0);
483	clk_select = (reg & NANDTIMING2_CLK_SEL_MASK) >> NANDTIMING2_CLK_SEL_SHIFT;
484	ns = (clk_select == 0) ? 8 : 4;
485	divisor = (clk_select == 0) ? 2 : 4;
486
487	/* Optimize nand_timing_1 */
488	reg = ((info->tWP + (ns - 1)) / ns) << NANDTIMING1_TWP_SHIFT;
489	reg |= ((info->tWH + (ns - 1)) / ns) << NANDTIMING1_TWH_SHIFT;
490	reg |= ((info->tRP + (ns - 1)) / ns) << NANDTIMING1_TRP_SHIFT;
491	reg |= ((info->tREH + (ns - 1)) / ns) << NANDTIMING1_TREH_SHIFT;
492	tmp_val = (((info->tCS + (ns - 1)) / ns) + (divisor - 1)) / divisor;
493	reg |= tmp_val << NANDTIMING1_TCS_SHIFT;
494	reg |= ((info->tCLH + (ns - 1)) / ns) << NANDTIMING1_TCLH_SHIFT;
495	tmp_val = (info->tALH > info->tWH) ? info->tALH : info->tWH;
496	reg |= ((tmp_val + (ns - 1)) / ns) << NANDTIMING1_TALH_SHIFT;
497	tmp_val = (((info->tADL + (ns - 1)) / ns) + (divisor - 1)) / divisor;
498	tmp_val = (tmp_val > 0xf) ? 0xf : tmp_val;
499	reg |= tmp_val << NANDTIMING1_TADL_SHIFT;
500	W_REG(osh, nfl->chipidx ? &nc->timing_1_cs1 : &nc->timing_1_cs0, reg);
501
502	/* Optimize nand_timing_2 */
503	reg = clk_select << NANDTIMING2_CLK_SEL_SHIFT;
504	tmp_val = (((info->tWB - (ns - 1)) / ns) + (divisor - 1)) / divisor;
505	reg |= tmp_val << NANDTIMING2_TWB_SHIFT;
506	tmp_val = (((info->tWHR + (ns - 1)) / ns) + (divisor - 1)) / divisor;
507	reg |= tmp_val << NANDTIMING2_TWHR_SHIFT;
508	tmp_val = info->tRP + info->tREH;
509	tmp_val = (info->tREAD > tmp_val) ? tmp_val : info->tREAD;
510	reg |= ((tmp_val + (ns - 1)) / ns) << NANDTIMING2_TREAD_SHIFT;
511	W_REG(osh, nfl->chipidx ? &nc->timing_2_cs1 : &nc->timing_2_cs0, reg);
512
513	printf("Optimize %s timing.\n", info->name);
514
515	return;
516}
517
518/* Initialize nand flash access */
519hndnand_t *
520nandcore_init(si_t *sih)
521{
522	nandregs_t *nc;
523	aidmp_t *ai;
524	uint32 id, id2;
525	char *name = "";
526	osl_t *osh;
527	int i;
528	uint32 ncf, val;
529	uint32 acc_control;
530
531	ASSERT(sih);
532
533	/* Only support chipcommon revision == 42 for now */
534	if (sih->ccrev != 42)
535		return NULL;
536
537	if ((nc = (nandregs_t *)si_setcore(sih, NS_NAND_CORE_ID, 0)) == NULL)
538		return NULL;
539
540	if (R_REG(NULL, &nc->flash_device_id) == 0)
541		return NULL;
542
543	if (!firsttime && nandcore.size)
544		return &nandcore;
545
546	osh = si_osh(sih);
547	bzero(&nandcore, sizeof(nandcore));
548
549	nandcore.sih = sih;
550	nandcore.core = (void *)nc;
551	nandcore.wrap = si_wrapperregs(sih);
552	nandcore.read = nandcore_read;
553	nandcore.write = nandcore_write;
554	nandcore.erase = nandcore_erase;
555	nandcore.checkbadb = nandcore_checkbadb;
556	nandcore.markbadb = nandcore_mark_badb;
557
558#ifndef _CFE_
559	nandcore.dev_ready = nandcore_dev_ready;
560	nandcore.select_chip = nandcore_select_chip;
561	nandcore.cmdfunc = nandcore_cmdfunc;
562	nandcore.waitfunc = nandcore_waitfunc;
563	nandcore.read_oob = nandcore_read_oob;
564	nandcore.write_oob = nandcore_write_oob;
565	nandcore.read_page = nandcore_read_page;
566	nandcore.write_page = nandcore_write_page;
567	nandcore.cmd_read_byte = nandcore_cmd_read_byte;
568#endif
569
570	/* For some nand part, requires to do reset before the other command */
571	nandcore_cmd(osh, nc, NANDCMD_FLASH_RESET);
572	if (nandcore_poll(sih, nc) < 0) {
573		return NULL;
574	}
575
576	nandcore_cmd(osh, nc, NANDCMD_ID_RD);
577	if (nandcore_poll(sih, nc) < 0) {
578		return NULL;
579	}
580
581	ai = (aidmp_t *)nandcore.wrap;
582
583	/* Toggle as little endian */
584	OR_REG(osh, &ai->ioctrl, NAND_APB_LITTLE_ENDIAN);
585
586	id = R_REG(osh, &nc->flash_device_id);
587	id2 = R_REG(osh, &nc->flash_device_id_ext);
588
589	/* Toggle as big endian */
590	AND_REG(osh, &ai->ioctrl, ~NAND_APB_LITTLE_ENDIAN);
591
592	for (i = 0; i < 4; i++) {
593		nandcore.id[i] = (id >> (8*i)) & 0xff;
594		nandcore.id[i + 4] = (id2 >> (8*i)) & 0xff;
595	}
596
597	name = nandcore_check_id(nandcore.id);
598	if (name == NULL)
599		return NULL;
600	nandcore.type = nandcore.id[0];
601
602	/* Override configuration for specific nand flash */
603	nandcore_override_config(&nandcore);
604
605	ncf = R_REG(osh, &nc->config_cs0);
606	/*  Page size (# of bytes) */
607	val = (ncf & NANDCF_CS0_PAGE_SIZE_MASK) >> NANDCF_CS0_PAGE_SIZE_SHIFT;
608	switch (val) {
609	case 0:
610		nandcore.pagesize = 512;
611		break;
612	case 1:
613		nandcore.pagesize = (1 << 10) * 2;
614		break;
615	case 2:
616		nandcore.pagesize = (1 << 10) * 4;
617		break;
618	case 3:
619		nandcore.pagesize = (1 << 10) * 8;
620		break;
621	}
622	/* Block size (# of bytes) */
623	val = (ncf & NANDCF_CS0_BLOCK_SIZE_MASK) >> NANDCF_CS0_BLOCK_SIZE_SHIFT;
624	switch (val) {
625	case 0:
626		nandcore.blocksize = (1 << 10) * 8;
627		break;
628	case 1:
629		nandcore.blocksize = (1 << 10) * 16;
630		break;
631	case 2:
632		nandcore.blocksize = (1 << 10) * 128;
633		break;
634	case 3:
635		nandcore.blocksize = (1 << 10) * 256;
636		break;
637	case 4:
638		nandcore.blocksize = (1 << 10) * 512;
639		break;
640	case 5:
641		nandcore.blocksize = (1 << 10) * 1024;
642		break;
643	case 6:
644		nandcore.blocksize = (1 << 10) * 2048;
645		break;
646	default:
647		printf("Unknown block size\n");
648		return NULL;
649	}
650	/* NAND flash size in MBytes */
651	val = (ncf & NANDCF_CS0_DEVICE_SIZE_MASK) >> NANDCF_CS0_DEVICE_SIZE_SHIFT;
652	nandcore.size = (1 << val) * 4;
653
654	/* Get Device I/O data bus width */
655	if (ncf & NANDCF_CS0_DEVICE_WIDTH)
656		nandcore.width = 1;
657
658	/* Spare size and Spare per cache (# of bytes) */
659	acc_control = R_REG(osh, &nc->acc_control_cs0);
660
661	/* Check conflict between 1K sector and page size */
662	if (acc_control & NANDAC_CS0_SECTOR_SIZE_1K) {
663		nandcore.sectorsize = 1024;
664	}
665	else
666		nandcore.sectorsize = 512;
667
668	if (nandcore.sectorsize == 1024 && nandcore.pagesize == 512) {
669		printf("Pin strapping error. Page size is 512, but sector size is 1024\n");
670		return NULL;
671	}
672
673	/* Get Spare size */
674	nandcore.sparesize = acc_control & NANDAC_CS0_SPARE_AREA_SIZE;
675
676	/* Get oob size,  */
677	nandcore.oobsize = nandcore.sparesize * (nandcore.pagesize / NANDCACHE_SIZE);
678
679	/* Get ECC level */
680	nandcore.ecclevel = (acc_control & NANDAC_CS0_ECC_LEVEL_MASK) >> NANDAC_CS0_ECC_LEVEL_SHIFT;
681
682	/* Adjusted sparesize and eccbytes if sectorsize is 1K */
683	if (nandcore.sectorsize == 1024) {
684		nandcore.sparesize *= 2;
685		nandcore.eccbytes = ((nandcore.ecclevel * 14 + 3) >> 2);
686	}
687	else
688		nandcore.eccbytes = ((nandcore.ecclevel * 14 + 7) >> 3);
689
690	nandcore.numblocks = (nandcore.size * (1 << 10)) / (nandcore.blocksize >> 10);
691
692	/* Get the number of cache per page */
693	num_cache_per_page  = nandcore.pagesize / NANDCACHE_SIZE;
694
695	/* Get the spare size per cache */
696	spare_per_cache = nandcore.oobsize / num_cache_per_page;
697
698	if (firsttime) {
699		printf("Found a %s NAND flash:\n", name);
700		printf("Total size:  %uMB\n", nandcore.size);
701		printf("Block size:  %uKB\n", (nandcore.blocksize >> 10));
702		printf("Page Size:   %uB\n", nandcore.pagesize);
703		printf("OOB Size:    %uB\n", nandcore.oobsize);
704		printf("Sector size: %uB\n", nandcore.sectorsize);
705		printf("Spare size:  %uB\n", nandcore.sparesize);
706		printf("ECC level:   %u (%u-bit)\n", nandcore.ecclevel,
707			(nandcore.sectorsize == 1024)? nandcore.ecclevel*2 : nandcore.ecclevel);
708		printf("Device ID: 0x%2x 0x%2x 0x%2x 0x%2x 0x%2x 0x%02x\n",
709			nandcore.id[0], nandcore.id[1], nandcore.id[2],
710			nandcore.id[3], nandcore.id[4], nandcore.id[5]);
711	}
712	firsttime = FALSE;
713
714	/* Memory mapping */
715	nandcore.phybase = SI_NS_NANDFLASH;
716	nandcore.base = (uint32)REG_MAP(SI_NS_NANDFLASH, SI_FLASH_WINDOW);
717
718#if 1 /*Ares Test*/
719/* try override spare area size to 16*/
720    acc_control = acc_control & ~(NANDAC_CS0_SPARE_AREA_SIZE);
721    acc_control = acc_control | (16<<0);
722    W_REG(osh, &nc->acc_control_cs0, acc_control);
723#endif
724	/* For 1KB sector size setting */
725	if (R_REG(osh, &nc->acc_control_cs0) & NANDAC_CS0_SECTOR_SIZE_1K) {
726		AND_REG(osh, &nc->acc_control_cs0, ~NANDAC_CS0_PARTIAL_PAGE_EN);
727		printf("Disable PARTIAL_PAGE_EN\n");
728		AND_REG(osh, &nc->acc_control_cs0, ~NANDAC_CS0_FAST_PGM_RDIN);
729		printf("Disable FAST_PGM_RDIN\n");
730	}
731
732	/* Optimize timing */
733	nandcore_optimize_timing(&nandcore);
734
735	return nandcore.size ? &nandcore : NULL;
736}
737
738/* Read len bytes starting at offset into buf. Returns number of bytes read. */
739static int
740nandcore_read(hndnand_t *nfl, uint64 offset, uint len, uchar *buf)
741{
742	osl_t *osh;
743	uint8 *to;
744	uint res;
745	uint32 herr = 0, serr = 0;
746
747	ASSERT(nfl->sih);
748	osh = si_osh(nfl->sih);
749
750	to = buf;
751	res = len;
752
753	while (res > 0) {
754		_nandcore_read_page(nfl, offset, to, NULL, TRUE, &herr, &serr);
755
756		res -= nfl->pagesize;
757		offset += nfl->pagesize;
758		to += nfl->pagesize;
759	}
760
761	return (len - res);
762}
763
764/* Poll for command completion. Returns zero when complete. */
765static int
766nandcore_poll(si_t *sih, nandregs_t *nc)
767{
768	osl_t *osh;
769	int i;
770	uint32 pollmask;
771
772	ASSERT(sih);
773	osh = si_osh(sih);
774
775	pollmask = NANDIST_CTRL_READY | NANDIST_FLASH_READY;
776	for (i = 0; i < NANDF_RETRIES; i++) {
777		if ((R_REG(osh, &nc->intfc_status) & pollmask) == pollmask) {
778			return 0;
779		}
780	}
781
782	printf("%s: not ready\n", __FUNCTION__);
783	return -1;
784}
785
786/* Write len bytes starting at offset into buf. Returns number of bytes
787 * written.
788 */
789static int
790nandcore_write(hndnand_t *nfl, uint64 offset, uint len, const uchar *buf)
791{
792	int ret = 0;
793	osl_t *osh;
794	uint res;
795	uint8 *from;
796
797	ASSERT(nfl->sih);
798	osh = si_osh(nfl->sih);
799
800	from = (uint8 *)buf;
801	res = len;
802
803	while (res > 0) {
804		ret = _nandcore_write_page(nfl, offset, from, NULL, TRUE);
805		if (ret < 0)
806			return ret;
807
808		res -= nfl->pagesize;
809		offset += nfl->pagesize;
810		from += nfl->pagesize;
811	}
812
813	if (ret)
814		return ret;
815
816	return (len - res);
817}
818
819/* Erase a region. Returns number of bytes scheduled for erasure.
820 * Caller should poll for completion.
821 */
822static int
823nandcore_erase(hndnand_t *nfl, uint64 offset)
824{
825	si_t *sih = nfl->sih;
826	nandregs_t *nc = (nandregs_t *)nfl->core;
827	osl_t *osh;
828	int ret = -1;
829	uint8 status = 0;
830	uint32 reg;
831
832	ASSERT(sih);
833
834	osh = si_osh(sih);
835	if ((offset >> 20) >= nfl->size)
836		return -1;
837	if ((offset & (nfl->blocksize - 1)) != 0) {
838		return -1;
839	}
840
841	/* Disable WP */
842	AND_REG(osh, &nc->cs_nand_select, ~NANDCSEL_NAND_WP);
843
844	/* Set the block address for the following commands */
845	reg = (R_REG(osh, &nc->cmd_ext_address) & ~NANDCMD_EXT_ADDR_MASK);
846	W_REG(osh, &nc->cmd_ext_address, (reg | ((offset >> 32) & NANDCMD_EXT_ADDR_MASK)));
847
848	W_REG(osh, &nc->cmd_address, (uint32)offset);
849	nandcore_cmd(osh, nc, NANDCMD_BLOCK_ERASE);
850	if (nandcore_poll(sih, nc) < 0)
851		goto exit;
852
853	/* Check status */
854	W_REG(osh, &nc->cmd_start, NANDCMD_STATUS_RD);
855	if (nandcore_poll(sih, nc) < 0)
856		goto exit;
857
858	status = R_REG(osh, &nc->intfc_status) & NANDIST_STATUS;
859	if (status & 1)
860		goto exit;
861
862	ret = 0;
863exit:
864	/* Enable WP */
865	OR_REG(osh, &nc->cs_nand_select, NANDCSEL_NAND_WP);
866
867	return ret;
868}
869
870static int
871nandcore_checkbadb(hndnand_t *nfl, uint64 offset)
872{
873	si_t *sih = nfl->sih;
874	nandregs_t *nc = (nandregs_t *)nfl->core;
875	aidmp_t *ai = (aidmp_t *)nfl->wrap;
876	osl_t *osh;
877	int i, j;
878	uint64 addr;
879	int ret = 0;
880	uint32 reg, oob_bi;
881	unsigned cache, col = 0;
882	uint32 rd_oob_byte, left_oob_byte;
883
884	ASSERT(sih);
885
886	osh = si_osh(sih);
887	if ((offset >> 20) >= nfl->size)
888		return -1;
889	if ((offset & (nfl->blocksize - 1)) != 0) {
890		return -1;
891	}
892
893	/* Enable ECC validation for spare area reads */
894	OR_REG(osh, nfl->chipidx ? &nc->acc_control_cs1 : &nc->acc_control_cs0,
895		NANDAC_CS0_RD_ECC_EN);
896
897	/* Check the first two pages for this block */
898	for (i = 0; i < 2; i++) {
899		addr = offset + (nfl->pagesize * i);
900		col = 0;
901		/* Loop all caches in page */
902		for (cache = 0; cache < num_cache_per_page; cache++, col += NANDCACHE_SIZE) {
903			uint32 ext_addr;
904
905			/* Set the page address for the following commands */
906			reg = (R_REG(osh, &nc->cmd_ext_address) & ~NANDCMD_EXT_ADDR_MASK);
907			ext_addr = ((addr + col) >> 32) & NANDCMD_EXT_ADDR_MASK;
908			W_REG(osh, &nc->cmd_ext_address, (reg | ext_addr));
909			W_REG(osh, &nc->cmd_address, (uint32)addr + col);
910
911			/* Issue page-read command */
912			nandcore_cmd(osh, nc, NANDCMD_PAGE_RD);
913
914			/* Wait for the command to complete */
915			if (nandcore_poll(sih, nc) < 0) {
916				ret = -1;
917				goto exit;
918			}
919
920			/* Set controller to Little Endian mode for copying */
921			OR_REG(osh, &ai->ioctrl, NAND_APB_LITTLE_ENDIAN);
922
923			rd_oob_byte = _nandcore_oobbyte_per_cache(nfl, cache, spare_per_cache);
924
925			left_oob_byte = rd_oob_byte % 4;
926
927			for (j = 0; j < (rd_oob_byte / 4); j++) {
928				if (cache == 0 && j == 0)
929					/* Save bad block indicator */
930					oob_bi = R_REG(osh, &nc->spare_area_read_ofs[0]);
931				else
932					reg = R_REG(osh, &nc->spare_area_read_ofs[j]);
933			}
934
935			if (left_oob_byte != 0) {
936				reg = R_REG(osh, &nc->spare_area_read_ofs[j]);
937			}
938
939			/* Return to Big Endian mode for commands etc */
940			AND_REG(osh, &ai->ioctrl, ~NAND_APB_LITTLE_ENDIAN);
941		}
942
943		/* Check bad block indicator */
944		if ((oob_bi & 0xFF) != 0xFF) {
945			ret = -1;
946			break;
947		}
948	}
949
950exit:
951	return ret;
952}
953
954static int
955nandcore_mark_badb(hndnand_t *nfl, uint64 offset)
956{
957	si_t *sih = nfl->sih;
958	nandregs_t *nc = (nandregs_t *)nfl->core;
959	aidmp_t *ai = (aidmp_t *)nfl->wrap;
960	osl_t *osh;
961	uint64 off;
962	int i, ret = 0;
963	uint32 reg;
964
965	ASSERT(sih);
966
967	osh = si_osh(sih);
968	if ((offset >> 20) >= nfl->size)
969		return -1;
970	if ((offset & (nfl->blocksize - 1)) != 0) {
971		return -1;
972	}
973
974	/* Disable WP */
975	AND_REG(osh, &nc->cs_nand_select, ~NANDCSEL_NAND_WP);
976
977	/* Erase block */
978	W_REG(osh, &nc->cmd_address, offset);
979	nandcore_cmd(osh, nc, NANDCMD_BLOCK_ERASE);
980	if (nandcore_poll(sih, nc) < 0) {
981		ret = -1;
982		/* Still go through the spare area write */
983		/* goto err; */
984	}
985
986	/*
987	 * Enable partial page programming and disable ECC checkbit generation
988	 * for PROGRAM_SPARE_AREA
989	 */
990	reg = R_REG(osh, &nc->acc_control_cs0);
991	reg |= NANDAC_CS0_PARTIAL_PAGE_EN;
992	reg |= NANDAC_CS0_FAST_PGM_RDIN;
993	reg &= ~NANDAC_CS0_WR_ECC_EN;
994	W_REG(osh, &nc->acc_control_cs0, reg);
995
996	for (i = 0; i < 2; i++) {
997		uint32 ext_addr;
998
999		off = offset + (nfl->pagesize * i);
1000
1001		/* Set the block address for the following commands */
1002		reg = (R_REG(osh, &nc->cmd_ext_address) & ~NANDCMD_EXT_ADDR_MASK);
1003		ext_addr = (off >> 32) & NANDCMD_EXT_ADDR_MASK;
1004		W_REG(osh, &nc->cmd_ext_address, (reg | ext_addr));
1005
1006		W_REG(osh, &nc->cmd_address, (uint32)off);
1007
1008		/* Toggle as little endian */
1009		OR_REG(osh, &ai->ioctrl, NAND_APB_LITTLE_ENDIAN);
1010
1011		W_REG(osh, &nc->spare_area_write_ofs[0], 0);
1012		W_REG(osh, &nc->spare_area_write_ofs[1], 0);
1013		W_REG(osh, &nc->spare_area_write_ofs[2], 0);
1014		W_REG(osh, &nc->spare_area_write_ofs[3], 0);
1015
1016		/* Toggle as big endian */
1017		AND_REG(osh, &ai->ioctrl, ~NAND_APB_LITTLE_ENDIAN);
1018
1019		nandcore_cmd(osh, nc, NANDCMD_SPARE_PROG);
1020		if (nandcore_poll(sih, nc) < 0) {
1021			ret = -1;
1022#if BCMDBG
1023			printf("%s: Spare program is not ready\n", __FUNCTION__);
1024#endif
1025			goto err;
1026		}
1027	}
1028
1029err:
1030	/* Restore the default value for spare area write registers */
1031	W_REG(osh, &nc->spare_area_write_ofs[0], 0xffffffff);
1032	W_REG(osh, &nc->spare_area_write_ofs[1], 0xffffffff);
1033	W_REG(osh, &nc->spare_area_write_ofs[2], 0xffffffff);
1034	W_REG(osh, &nc->spare_area_write_ofs[3], 0xffffffff);
1035
1036	/*
1037	 * Disable partial page programming and enable ECC checkbit generation
1038	 * for PROGRAM_SPARE_AREA
1039	 */
1040	reg = R_REG(osh, &nc->acc_control_cs0);
1041	reg &= ~NANDAC_CS0_PARTIAL_PAGE_EN;
1042	reg &= ~NANDAC_CS0_FAST_PGM_RDIN;
1043	reg |= NANDAC_CS0_WR_ECC_EN;
1044	W_REG(osh, &nc->acc_control_cs0, reg);
1045
1046	/* Enable WP */
1047	OR_REG(osh, &nc->cs_nand_select, NANDCSEL_NAND_WP);
1048
1049	return ret;
1050}
1051
1052
1053#ifndef _CFE_
1054/* Functions support brcmnand driver */
1055static void
1056_nandcore_set_cmd_address(hndnand_t *nfl, uint64 addr)
1057{
1058	uint32 reg;
1059	osl_t *osh;
1060	si_t *sih = nfl->sih;
1061	nandregs_t *nc = (nandregs_t *)nfl->core;
1062
1063	ASSERT(sih);
1064	osh = si_osh(sih);
1065
1066	reg = (R_REG(osh, &nc->cmd_ext_address) & ~NANDCMD_EXT_ADDR_MASK);
1067	W_REG(osh, &nc->cmd_ext_address, (reg | ((addr >> 32) & NANDCMD_EXT_ADDR_MASK)));
1068	W_REG(osh, &nc->cmd_address, (uint32)addr);
1069}
1070
1071static int
1072nandcore_dev_ready(hndnand_t *nfl)
1073{
1074	aidmp_t *ai = (aidmp_t *)nfl->wrap;
1075
1076	ASSERT(nfl->sih);
1077
1078	return (R_REG(si_osh(nfl->sih), &ai->iostatus) & NAND_RO_CTRL_READY);
1079}
1080
1081static int
1082nandcore_select_chip(hndnand_t *nfl, int chip)
1083{
1084	uint32 reg;
1085	osl_t *osh;
1086	si_t *sih = nfl->sih;
1087	nandregs_t *nc = (nandregs_t *)nfl->core;
1088
1089	ASSERT(sih);
1090	osh = si_osh(sih);
1091
1092	reg = R_REG(osh, &nc->cmd_ext_address);
1093	reg &= ~NANDCMD_CS_SEL_MASK;
1094	reg |= (chip << NANDCMD_CS_SEL_SHIFT);
1095	W_REG(osh, &nc->cmd_ext_address, reg);
1096
1097	/* Set active chip index */
1098	nfl->chipidx = chip;
1099
1100	return 0;
1101}
1102
1103static int
1104nandcore_cmdfunc(hndnand_t *nfl, uint64 addr, int cmd)
1105{
1106	int ret = 0;
1107	osl_t *osh;
1108	nandregs_t *nc = (nandregs_t *)nfl->core;
1109
1110	ASSERT(nfl->sih);
1111	osh = si_osh(nfl->sih);
1112
1113	switch (cmd) {
1114	case CMDFUNC_ERASE1:
1115		_nandcore_set_cmd_address(nfl, addr);
1116		break;
1117	case CMDFUNC_ERASE2:
1118		/* Disable WP */
1119		AND_REG(osh, &nc->cs_nand_select, ~NANDCSEL_NAND_WP);
1120		nandcore_cmd(osh, nc, NANDCMD_BLOCK_ERASE);
1121		ret = nandcore_waitfunc(nfl, NULL);
1122		/* Enable WP */
1123		OR_REG(osh, &nc->cs_nand_select, NANDCSEL_NAND_WP);
1124		break;
1125	case CMDFUNC_SEQIN:
1126		_nandcore_set_cmd_address(nfl, addr);
1127		break;
1128	case CMDFUNC_READ:
1129		_nandcore_set_cmd_address(nfl, addr);
1130		nandcore_cmd(osh, nc, NANDCMD_PAGE_RD);
1131		ret = nandcore_waitfunc(nfl, NULL);
1132		break;
1133	case CMDFUNC_RESET:
1134		nandcore_cmd(osh, nc, NANDCMD_FLASH_RESET);
1135		ret = nandcore_waitfunc(nfl, NULL);
1136		break;
1137	case CMDFUNC_READID:
1138		nandcore_cmd(osh, nc, NANDCMD_ID_RD);
1139		ret = nandcore_waitfunc(nfl, NULL);
1140		break;
1141	case CMDFUNC_STATUS:
1142		/* Disable WP */
1143		AND_REG(osh, &nc->cs_nand_select, ~NANDCSEL_NAND_WP);
1144		nandcore_cmd(osh, nc, NANDCMD_STATUS_RD);
1145		ret = nandcore_waitfunc(nfl, NULL);
1146		/* Enable WP */
1147		OR_REG(osh, &nc->cs_nand_select, NANDCSEL_NAND_WP);
1148		break;
1149	case CMDFUNC_READOOB:
1150		break;
1151	default:
1152#ifdef BCMDBG
1153		printf("%s: Unknow command 0x%x\n", __FUNCTION__, cmd);
1154#endif
1155		ret = -1;
1156		break;
1157	}
1158
1159	return ret;
1160}
1161
1162/* Return intfc_status FLASH_STATUS if CTRL/FLASH is ready otherwise -1 */
1163static int
1164nandcore_waitfunc(hndnand_t *nfl, int *status)
1165{
1166	int ret;
1167	osl_t *osh;
1168	nandregs_t *nc = (nandregs_t *)nfl->core;
1169
1170	ASSERT(nfl->sih);
1171	osh = si_osh(nfl->sih);
1172
1173	ret = nandcore_poll(nfl->sih, nc);
1174	if (ret == 0 && status)
1175		*status = R_REG(osh, &nc->intfc_status) & NANDIST_STATUS;
1176
1177	return ret;
1178}
1179
1180static int
1181nandcore_read_oob(hndnand_t *nfl, uint64 addr, uint8 *oob)
1182{
1183	osl_t *osh;
1184	si_t *sih = nfl->sih;
1185	nandregs_t *nc = (nandregs_t *)nfl->core;
1186	aidmp_t *ai = (aidmp_t *)nfl->wrap;
1187	uint32 reg;
1188	unsigned cache, col = 0;
1189	int i;
1190	uint8 *to = oob;
1191	uint32 rd_oob_byte, left_oob_byte;
1192
1193	ASSERT(sih);
1194	osh = si_osh(sih);
1195
1196	/* Enable ECC validation for spare area reads */
1197	OR_REG(osh, nfl->chipidx ? &nc->acc_control_cs1 : &nc->acc_control_cs0,
1198		NANDAC_CS0_RD_ECC_EN);
1199
1200	/* Loop all caches in page */
1201	for (cache = 0; cache < num_cache_per_page; cache++, col += NANDCACHE_SIZE) {
1202		uint32 ext_addr;
1203
1204		/* Set the page address for the following commands */
1205		reg = (R_REG(osh, &nc->cmd_ext_address) & ~NANDCMD_EXT_ADDR_MASK);
1206		ext_addr = ((addr + col) >> 32) & NANDCMD_EXT_ADDR_MASK;
1207		W_REG(osh, &nc->cmd_ext_address, (reg | ext_addr));
1208		W_REG(osh, &nc->cmd_address, (uint32)(addr + col));
1209
1210		/* Issue page-read command */
1211		nandcore_cmd(osh, nc, NANDCMD_PAGE_RD);
1212
1213		/* Wait for the command to complete */
1214		if (nandcore_poll(sih, nc))
1215			return -1;
1216
1217		/* Set controller to Little Endian mode for copying */
1218		OR_REG(osh, &ai->ioctrl, NAND_APB_LITTLE_ENDIAN);
1219
1220		rd_oob_byte = _nandcore_oobbyte_per_cache(nfl, cache, spare_per_cache);
1221
1222		left_oob_byte = rd_oob_byte % 4;
1223
1224		/* Pay attention to natural address alignment access */
1225		for (i = 0; i < (rd_oob_byte / 4); i++) {
1226			reg = R_REG(osh, &nc->spare_area_read_ofs[i]);
1227			memcpy((void *)to, (void *)&reg, 4);
1228			to += 4;
1229		}
1230
1231		if (left_oob_byte != 0) {
1232			reg = R_REG(osh, &nc->spare_area_read_ofs[i]);
1233			memcpy((void *)to, (void *)&reg, left_oob_byte);
1234			to += left_oob_byte;
1235		}
1236
1237		/* Return to Big Endian mode for commands etc */
1238		AND_REG(osh, &ai->ioctrl, ~NAND_APB_LITTLE_ENDIAN);
1239	}
1240
1241	return 0;
1242}
1243
1244static int
1245nandcore_write_oob(hndnand_t *nfl, uint64 addr, uint8 *oob)
1246{
1247	osl_t *osh;
1248	si_t *sih = nfl->sih;
1249	nandregs_t *nc = (nandregs_t *)nfl->core;
1250	aidmp_t *ai = (aidmp_t *)nfl->wrap;
1251	uint32 reg;
1252	unsigned cache, col = 0;
1253	int i;
1254	int ret = 0;
1255	uint8 *from = oob;
1256	uint32 wr_oob_byte, left_oob_byte;
1257
1258	ASSERT(sih);
1259	osh = si_osh(sih);
1260
1261	/* Disable WP */
1262	AND_REG(osh, &nc->cs_nand_select, ~NANDCSEL_NAND_WP);
1263
1264	/*
1265	 * Enable partial page programming and disable ECC checkbit generation
1266	 * for PROGRAM_SPARE_AREA
1267	 */
1268	reg = R_REG(osh, nfl->chipidx ? &nc->acc_control_cs1 : &nc->acc_control_cs0);
1269	if (nfl->sectorsize == 512) {
1270		reg |= NANDAC_CS0_PARTIAL_PAGE_EN;
1271		reg |= NANDAC_CS0_FAST_PGM_RDIN;
1272	}
1273	reg &= ~NANDAC_CS0_WR_ECC_EN;
1274	W_REG(osh, nfl->chipidx ? &nc->acc_control_cs1 : &nc->acc_control_cs0, reg);
1275
1276	/* Loop all caches in page */
1277	for (cache = 0; cache < num_cache_per_page; cache++, col += NANDCACHE_SIZE) {
1278		uint32 ext_addr;
1279
1280		/* Set the page address for the following commands */
1281		reg = (R_REG(osh, &nc->cmd_ext_address) & ~NANDCMD_EXT_ADDR_MASK);
1282		ext_addr = ((addr + col) >> 32) & NANDCMD_EXT_ADDR_MASK;
1283		W_REG(osh, &nc->cmd_ext_address, (reg | ext_addr));
1284		W_REG(osh, &nc->cmd_address, (uint32)(addr + col));
1285
1286		/* Set controller to Little Endian mode for copying */
1287		OR_REG(osh, &ai->ioctrl, NAND_APB_LITTLE_ENDIAN);
1288
1289		/* Must fill flash cache with all 0xff in each round */
1290		for (i = 0; i < (NANDCACHE_SIZE / 4); i++)
1291			W_REG(osh, &nc->flash_cache[i], 0xffffffff);
1292
1293		/* Fill spare area write cache */
1294		wr_oob_byte = _nandcore_oobbyte_per_cache(nfl, cache, spare_per_cache);
1295
1296		left_oob_byte = wr_oob_byte % 4;
1297
1298		/* Pay attention to natural address alignment access */
1299		for (i = 0; i < (wr_oob_byte / 4); i++) {
1300			memcpy((void *)&reg, (void *)from, 4);
1301			W_REG(osh, &nc->spare_area_write_ofs[i], reg);
1302			from += 4;
1303		}
1304
1305		if (left_oob_byte != 0) {
1306			reg = 0xffffffff;
1307			memcpy((void *)&reg, (void *)from, left_oob_byte);
1308			W_REG(osh, &nc->spare_area_write_ofs[i], reg);
1309			from += left_oob_byte;
1310			i++;
1311		}
1312
1313		for (; i < (NANDSPARECACHE_SIZE / 4); i++)
1314			W_REG(osh, &nc->spare_area_write_ofs[i], 0xffffffff);
1315
1316		/* Return to Big Endian mode for commands etc */
1317		AND_REG(osh, &ai->ioctrl, ~NAND_APB_LITTLE_ENDIAN);
1318
1319		/* Push spare bytes into internal buffer, last goes to flash */
1320		nandcore_cmd(osh, nc, NANDCMD_PAGE_PROG);
1321
1322		if (nandcore_poll(sih, nc)) {
1323			ret = -1;
1324			goto err;
1325		}
1326	}
1327
1328err:
1329	/*
1330	 * Disable partial page programming and enable ECC checkbit generation
1331	 * for PROGRAM_SPARE_AREA
1332	 */
1333	reg = R_REG(osh, nfl->chipidx ? &nc->acc_control_cs1 : &nc->acc_control_cs0);
1334	if (nfl->sectorsize == 512) {
1335		reg &= ~NANDAC_CS0_PARTIAL_PAGE_EN;
1336		reg &= ~NANDAC_CS0_FAST_PGM_RDIN;
1337	}
1338	reg |= NANDAC_CS0_WR_ECC_EN;
1339	W_REG(osh, nfl->chipidx ? &nc->acc_control_cs1 : &nc->acc_control_cs0, reg);
1340
1341	/* Enable WP */
1342	OR_REG(osh, &nc->cs_nand_select, NANDCSEL_NAND_WP);
1343
1344	return ret;
1345}
1346
1347static int
1348nandcore_read_page(hndnand_t *nfl, uint64 addr, uint8 *buf, uint8 *oob, bool ecc,
1349	uint32 *herr, uint32 *serr)
1350{
1351	return _nandcore_read_page(nfl, addr, buf, oob, ecc, herr, serr);
1352}
1353
1354static int
1355nandcore_write_page(hndnand_t *nfl, uint64 addr, const uint8 *buf, uint8 *oob, bool ecc)
1356{
1357	return _nandcore_write_page(nfl, addr, buf, oob, ecc);
1358}
1359
1360static int
1361nandcore_cmd_read_byte(hndnand_t *nfl, int cmd, int arg)
1362{
1363	int id_ext = arg;
1364	osl_t *osh;
1365	nandregs_t *nc = (nandregs_t *)nfl->core;
1366
1367	ASSERT(nfl->sih);
1368	osh = si_osh(nfl->sih);
1369
1370	switch (cmd) {
1371	case CMDFUNC_READID:
1372		return R_REG(osh, id_ext ? &nc->flash_device_id_ext : &nc->flash_device_id);
1373	case CMDFUNC_STATUS:
1374		return (R_REG(osh, &nc->intfc_status) & NANDIST_STATUS);
1375	default:
1376#ifdef BCMDBG
1377		printf("%s: Unknow command 0x%x\n", __FUNCTION__, cmd);
1378#endif
1379		break;
1380	}
1381
1382	return 0;
1383}
1384#endif /* !_CFE_ */
1385