1/*
2 * Broadcom NAND core interface
3 *
4 * Copyright (C) 2015, Broadcom Corporation. All Rights Reserved.
5 *
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
13 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
15 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
16 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 *
18 * $Id: $
19 */
20
21#include <typedefs.h>
22#include <osl.h>
23#include <bcmutils.h>
24#include <siutils.h>
25#include <hndsoc.h>
26#include <sbhndcpu.h>
27#include <sbchipc.h>
28#include <bcmdevs.h>
29#include <nand_core.h>
30#include <hndnand.h>
31#include <hndpmu.h>
32
33#ifdef BCMDBG
34#define	NANDFL_MSG(args)	printf args
35#else
36#define	NANDFL_MSG(args)
37#endif	/* BCMDBG */
38
39#define NANDF_RETRIES	1000000
40
41#define NANDF_SMALL_BADBLOCK_POS	5
42#define NANDF_LARGE_BADBLOCK_POS	0
43
44extern int nospare;
45
46struct nandpart_timing_info {
47	const char	*name;
48	uint8	id[8];
49	/* Timing unit is ns for the following parameters */
50	uint8	tWP;
51	uint8	tWH;
52	uint8	tRP;
53	uint8	tREH;
54	uint8	tCS;
55	uint8	tCLH;
56	uint8	tALH;
57	uint16	tADL;
58	uint8	tWB;
59	uint8	tWHR;
60	uint8	tREAD;
61};
62
63static struct nandpart_timing_info nandpart_timing_list[] = {
64	{"Samsung K9LCG08U0B",
65	{0xec, 0xde, 0xd5, 0x7e, 0x68, 0x44},
66	11, 11, 11, 11, 25, 5, 5, 300, 100, 176, 37},
67
68	{"Micron MT29F4G08ABADA",
69	{0x2c, 0xdc, 0x90, 0x95, 0x56},
70	10, 7, 10, 7, 15, 5, 5, 70, 100, 60, 33},
71
72	{"Micron MT29F64G08CBABA",
73	{0x2c, 0x64, 0x44, 0x4b, 0xa9},
74	50, 30, 50, 30, 70, 20, 20, 200, 200, 120, 57},
75
76	{NULL, }
77};
78
79/* Private global state */
80static hndnand_t nandcore;
81
82static uint32 num_cache_per_page;
83static uint32 spare_per_cache;
84
85/* Prototype */
86static int nandcore_poll(si_t *sih, nandregs_t *nc);
87
88hndnand_t *nandcore_init(si_t *sih);
89static int nandcore_read(hndnand_t *nfl, uint64 offset, uint len, uchar *buf);
90static int nandcore_write(hndnand_t *nfl, uint64 offset, uint len, const uchar *buf);
91static int nandcore_erase(hndnand_t *nfl, uint64 offset);
92static int nandcore_checkbadb(hndnand_t *nfl, uint64 offset);
93static int nandcore_checkbadb_nospare(hndnand_t *nfl, uint64 offset);
94static int nandcore_mark_badb(hndnand_t *nfl, uint64 offset);
95static int nandcore_read_oob(hndnand_t *nfl, uint64 addr, uint8 *oob);
96
97#ifndef _CFE_
98static int nandcore_dev_ready(hndnand_t *nfl);
99static int nandcore_select_chip(hndnand_t *nfl, int chip);
100static int nandcore_cmdfunc(hndnand_t *nfl, uint64 addr, int cmd);
101static int nandcore_waitfunc(hndnand_t *nfl, int *status);
102static int nandcore_write_oob(hndnand_t *nfl, uint64 addr, uint8 *oob);
103static int nandcore_read_page(hndnand_t *nfl, uint64 addr, uint8 *buf, uint8 *oob, bool ecc,
104	uint32 *herr, uint32 *serr);
105static int nandcore_write_page(hndnand_t *nfl, uint64 addr, const uint8 *buf, uint8 *oob, bool ecc);
106static int nandcore_cmd_read_byte(hndnand_t *nfl, int cmd, int arg);
107#endif /* !_CFE_ */
108
109
110/* Issue a nand flash command */
111static INLINE void
112nandcore_cmd(osl_t *osh, nandregs_t *nc, uint opcode)
113{
114	W_REG(osh, &nc->cmd_start, opcode);
115}
116
117static bool
118_nandcore_buf_erased(const void *buf, unsigned len)
119{
120	unsigned i;
121	const uint32 *p = buf;
122
123	for (i = 0; i < (len >> 2); i++) {
124		if (p[i] != 0xffffffff)
125			return FALSE;
126	}
127
128	return TRUE;
129}
130
131static INLINE int
132_nandcore_oobbyte_per_cache(hndnand_t *nfl, uint cache, uint32 spare)
133{
134	uint32 oob_byte;
135
136	if (nfl->sectorsize == 512)
137		oob_byte = spare;
138	else {
139		if ((spare * 2) < NANDSPARECACHE_SIZE)
140			oob_byte = spare * 2;
141		else
142			oob_byte = (cache % 2) ?
143			((spare * 2) - NANDSPARECACHE_SIZE) :
144			 NANDSPARECACHE_SIZE;
145	}
146
147	return oob_byte;
148}
149
150static int
151_nandcore_read_page(hndnand_t *nfl, uint64 offset, uint8 *buf, uint8 *oob, bool ecc,
152	uint32 *herr, uint32 *serr)
153{
154	osl_t *osh;
155	nandregs_t *nc = (nandregs_t *)nfl->core;
156	aidmp_t *ai = (aidmp_t *)nfl->wrap;
157	unsigned cache, col = 0;
158	unsigned hard_err_count = 0;
159	uint32 mask, reg, *to;
160	uint32 err_soft_reg, err_hard_reg;
161	int i, ret;
162	uint8 *oob_to = oob;
163	uint32 rd_oob_byte, left_oob_byte;
164
165	ASSERT(nfl->sih);
166
167	mask = nfl->pagesize - 1;
168	/* Check offset and length */
169	if ((offset & mask) != 0)
170		return 0;
171
172	if ((((offset + nfl->pagesize) >> 20) > nfl->size) ||
173	    ((((offset + nfl->pagesize) >> 20) == nfl->size) &&
174	     (((offset + nfl->pagesize) & ((1 << 20) - 1)) != 0)))
175		return 0;
176
177	osh = si_osh(nfl->sih);
178
179	/* Reset  ECC error stats */
180	err_hard_reg = R_REG(osh, &nc->uncorr_error_count);
181	err_soft_reg = R_REG(osh, &nc->read_error_count);
182
183	/* Enable ECC validation for ecc page reads */
184	if (ecc)
185		OR_REG(osh, nfl->chipidx ? &nc->acc_control_cs1 : &nc->acc_control_cs0,
186			NANDAC_CS0_RD_ECC_EN);
187	else
188		AND_REG(osh, nfl->chipidx ? &nc->acc_control_cs1 : &nc->acc_control_cs0,
189			~NANDAC_CS0_RD_ECC_EN);
190
191	/* Loop all caches in page */
192	for (cache = 0; cache < num_cache_per_page; cache++, col += NANDCACHE_SIZE) {
193		uint32 ext_addr;
194
195		/* Set the page address for the following commands */
196		reg = (R_REG(osh, &nc->cmd_ext_address) & ~NANDCMD_EXT_ADDR_MASK);
197		ext_addr = ((offset + col) >> 32) & NANDCMD_EXT_ADDR_MASK;
198		W_REG(osh, &nc->cmd_ext_address, (reg | ext_addr));
199		W_REG(osh, &nc->cmd_address, (uint32)offset + col);
200
201		/* Issue command to read partial page */
202		nandcore_cmd(osh, nc, NANDCMD_PAGE_RD);
203
204		/* Wait for the command to complete */
205		if ((ret = nandcore_poll(nfl->sih, nc)) < 0)
206			return ret;
207
208		/* Set controller to Little Endian mode for copying */
209		OR_REG(osh, &ai->ioctrl, NAND_APB_LITTLE_ENDIAN);
210
211		/* Read page data per cache */
212		to = (uint32 *)(buf + col);
213		for (i = 0; i < (NANDCACHE_SIZE / 4); i++, to++)
214			*to = R_REG(osh, &nc->flash_cache[i]);
215
216		/* Read oob data per cache */
217		if (oob_to) {
218			rd_oob_byte = _nandcore_oobbyte_per_cache(nfl, cache, spare_per_cache);
219
220			left_oob_byte = rd_oob_byte % 4;
221
222			/* Pay attention to natural address alignment access */
223			for (i = 0; i < (rd_oob_byte / 4); i++) {
224				reg = R_REG(osh, &nc->spare_area_read_ofs[i]);
225				memcpy((void *)oob_to, (void *)&reg, 4);
226				oob_to += 4;
227			}
228
229			if (left_oob_byte != 0) {
230				reg = R_REG(osh, &nc->spare_area_read_ofs[i]);
231				memcpy((void *)oob_to, (void *)&reg, left_oob_byte);
232				oob_to += left_oob_byte;
233			}
234		}
235
236		/* Return to Big Endian mode for commands etc */
237		AND_REG(osh, &ai->ioctrl, ~NAND_APB_LITTLE_ENDIAN);
238
239		/* capture hard errors for each partial */
240		if (err_hard_reg != R_REG(osh, &nc->uncorr_error_count)) {
241			int era = (R_REG(osh, &nc->intfc_status) & NANDIST_ERASED);
242			if ((!era) && (!_nandcore_buf_erased(buf+col, NANDCACHE_SIZE)))
243				hard_err_count ++;
244
245			err_hard_reg = R_REG(osh, &nc->uncorr_error_count);
246		}
247	} /* for cache */
248
249	if (!ecc)
250		return 0;
251
252	/* Report hard ECC errors */
253	if (herr)
254		*herr = hard_err_count;
255
256	/* Get ECC soft error stats */
257	if (serr)
258		*serr = R_REG(osh, &nc->read_error_count) - err_soft_reg;
259
260	return 0;
261}
262
263static int
264_nandcore_write_page(hndnand_t *nfl, uint64 offset, const uint8 *buf, uint8 *oob, bool ecc)
265{
266	osl_t *osh;
267	nandregs_t *nc = (nandregs_t *)nfl->core;
268	aidmp_t *ai = (aidmp_t *)nfl->wrap;
269	unsigned cache, col = 0;
270	uint32 mask, reg, *from;
271	int i, ret = 0;
272	uint8 *oob_from = oob;
273	uint32 wr_oob_byte, left_oob_byte;
274
275	ASSERT(nfl->sih);
276
277	mask = nfl->pagesize - 1;
278	/* Check offset and length */
279	if ((offset & mask) != 0)
280		return 0;
281
282	if ((((offset + nfl->pagesize) >> 20) > nfl->size) ||
283	    ((((offset + nfl->pagesize) >> 20) == nfl->size) &&
284	     (((offset + nfl->pagesize) & ((1 << 20) - 1)) != 0)))
285		return 0;
286
287	osh = si_osh(nfl->sih);
288
289	/* Disable WP */
290	AND_REG(osh, &nc->cs_nand_select, ~NANDCSEL_NAND_WP);
291
292	/* Enable ECC generation for ecc page write, if requested */
293	if (ecc)
294		OR_REG(osh, nfl->chipidx ? &nc->acc_control_cs1 : &nc->acc_control_cs0,
295			NANDAC_CS0_WR_ECC_EN);
296	else
297		AND_REG(osh, nfl->chipidx ? &nc->acc_control_cs1 : &nc->acc_control_cs0,
298			~NANDAC_CS0_WR_ECC_EN);
299
300	/* Loop all caches in page */
301	for (cache = 0; cache < num_cache_per_page; cache++, col += NANDCACHE_SIZE) {
302		uint32 ext_addr;
303
304		/* Set the page address for the following commands */
305		reg = (R_REG(osh, &nc->cmd_ext_address) & ~NANDCMD_EXT_ADDR_MASK);
306		ext_addr = ((offset + col) >> 32) & NANDCMD_EXT_ADDR_MASK;
307		W_REG(osh, &nc->cmd_ext_address, (reg | ext_addr));
308		W_REG(osh, &nc->cmd_address, (uint32)offset + col);
309
310		/* Set controller to Little Endian mode for copying */
311		OR_REG(osh, &ai->ioctrl, NAND_APB_LITTLE_ENDIAN);
312
313		/* Copy sub-page data */
314		from = (uint32 *)(buf + col);
315		for (i = 0; i < (NANDCACHE_SIZE / 4); i++, from++)
316			W_REG(osh, &nc->flash_cache[i], *from);
317
318		/* Set spare area is written at each cache start */
319		if (oob_from) {
320			/* Fill spare area write cache */
321			wr_oob_byte = _nandcore_oobbyte_per_cache(nfl, cache, spare_per_cache);
322
323			left_oob_byte = wr_oob_byte % 4;
324
325			/* Pay attention to natural address alignment access */
326			for (i = 0; i < (wr_oob_byte / 4); i++) {
327				memcpy((void *)&reg, (void *)oob_from, 4);
328				W_REG(osh, &nc->spare_area_write_ofs[i], reg);
329				oob_from += 4;
330			}
331
332			if (left_oob_byte != 0) {
333				reg = 0xffffffff;
334				memcpy((void *)&reg, (void *)oob_from,
335					left_oob_byte);
336				W_REG(osh, &nc->spare_area_write_ofs[i], reg);
337				oob_from += left_oob_byte;
338				i++;
339			}
340
341			for (; i < (NANDSPARECACHE_SIZE / 4); i ++)
342				W_REG(osh, &nc->spare_area_write_ofs[i],
343					0xffffffff);
344		}
345		else {
346			/* Write 0xffffffff to spare_area_write_ofs register
347			 * to prevent old spare_area_write_ofs vale write
348			 * when we issue NANDCMD_PAGE_PROG.
349			 */
350			for (i = 0; i < (NANDSPARECACHE_SIZE / 4); i++)
351				W_REG(osh, &nc->spare_area_write_ofs[i],
352					0xffffffff);
353		}
354
355		/* Return to Big Endian mode for commands etc */
356		AND_REG(osh, &ai->ioctrl, ~NAND_APB_LITTLE_ENDIAN);
357
358		/* Push data into internal cache */
359		nandcore_cmd(osh, nc, NANDCMD_PAGE_PROG);
360
361		ret = nandcore_poll(nfl->sih, nc);
362		if (ret < 0)
363			goto err;
364	}
365
366err:
367	/* Enable WP */
368	OR_REG(osh, &nc->cs_nand_select, NANDCSEL_NAND_WP);
369
370	return ret;
371}
372
373static bool firsttime = TRUE;
374
375static char *
376nandcore_check_id(uint8 *id)
377{
378	char *name = NULL;
379
380	switch (id[0]) {
381	case NFL_VENDOR_AMD:
382		name = "AMD";
383		break;
384	case NFL_VENDOR_NUMONYX:
385		name = "Numonyx";
386		break;
387	case NFL_VENDOR_MICRON:
388		name = "Micron";
389		break;
390	case NFL_VENDOR_TOSHIBA:
391		name = "Toshiba";
392		break;
393	case NFL_VENDOR_HYNIX:
394		name = "Hynix";
395		break;
396	case NFL_VENDOR_SAMSUNG:
397		name = "Samsung";
398		break;
399	case NFL_VENDOR_ESMT:
400		name = "Esmt";
401		break;
402	case NFL_VENDOR_MXIC:
403		name = "Mxic";
404		break;
405	case NFL_VENDOR_ZENTEL:
406		name = "Zentel";
407		break;
408	case NFL_VENDOR_WINBOND:
409		name = "Winbond";
410		break;
411	default:
412//		printf("No NAND flash type found\n");
413		name = "Unknown";
414		break;
415	}
416
417	return name;
418}
419
420static void
421nandcore_override_config(hndnand_t *nfl)
422{
423	nandregs_t *nc = nfl->core;
424	osl_t *osh;
425	uint32 reg;
426
427	ASSERT(nfl->sih);
428	osh = si_osh(nfl->sih);
429
430	/* Samsung K9LCG08U0B */
431	if ((nfl->id[0] == 0xec) && (nfl->id[1] == 0xde) &&
432	    (nfl->id[2] == 0xd5) && (nfl->id[3] == 0x7e) &&
433	    (nfl->id[4] == 0x68) && (nfl->id[5] == 0x44)) {
434		/* Block size, total size */
435		reg = R_REG(osh, &nc->config_cs0);
436		reg &= ~NANDCF_CS0_BLOCK_SIZE_MASK;
437		reg |= (NANDCF_CS0_BLOCK_SIZE_1MB << NANDCF_CS0_BLOCK_SIZE_SHIFT);
438		reg &= ~NANDCF_CS0_DEVICE_SIZE_MASK;
439		reg |= (NANDCF_CS0_DEVICE_SIZE_8GB << NANDCF_CS0_DEVICE_SIZE_SHIFT);
440		W_REG(osh, &nc->config_cs0, reg);
441
442		/* Spare size, sector size and ECC level */
443		reg = R_REG(osh, &nc->acc_control_cs0);
444		reg &= ~NANDAC_CS0_SPARE_AREA_SIZE;
445		reg |= NANDAC_CS0_SPARE_AREA_45B;
446		reg |= NANDAC_CS0_SECTOR_SIZE_1K;
447		reg &= ~NANDAC_CS0_ECC_LEVEL_MASK;
448		reg |= NANDAC_CS0_ECC_LEVEL_20;
449		W_REG(osh, &nc->acc_control_cs0, reg);
450	}
451
452	/* Micron MT29F64G08CBABA */
453	if ((nfl->id[0] == 0x2c) && (nfl->id[1] == 0x64) &&
454	    (nfl->id[2] == 0x44) && (nfl->id[3] == 0x4b) &&
455	    (nfl->id[4] == 0xa9)) {
456		/* Spare size, sector size and ECC level */
457		reg = R_REG(osh, &nc->acc_control_cs0);
458		reg &= ~NANDAC_CS0_SPARE_AREA_SIZE;
459		reg |= NANDAC_CS0_SPARE_AREA_45B;
460		reg |= NANDAC_CS0_SECTOR_SIZE_1K;
461		reg &= ~NANDAC_CS0_ECC_LEVEL_MASK;
462		reg |= NANDAC_CS0_ECC_LEVEL_20;
463		W_REG(osh, &nc->acc_control_cs0, reg);
464	}
465}
466
467static void
468nandcore_optimize_timing(hndnand_t *nfl)
469{
470	nandregs_t *nc = nfl->core;
471	osl_t *osh;
472	struct nandpart_timing_info *info = nandpart_timing_list;
473	uint32 reg, tmp_val;
474	uint32 clk_select, ns, divisor;
475
476	ASSERT(nfl->sih);
477	osh = si_osh(nfl->sih);
478
479	for (; info->name != NULL; info++) {
480		if (memcmp(nfl->id, info->id, 5) == 0)
481			break;
482	}
483
484	if (!info->name)
485		return;
486
487	reg = R_REG(osh, nfl->chipidx ? &nc->timing_2_cs1 : &nc->timing_2_cs0);
488	clk_select = (reg & NANDTIMING2_CLK_SEL_MASK) >> NANDTIMING2_CLK_SEL_SHIFT;
489	ns = (clk_select == 0) ? 8 : 4;
490	divisor = (clk_select == 0) ? 2 : 4;
491
492	/* Optimize nand_timing_1 */
493	reg = ((info->tWP + (ns - 1)) / ns) << NANDTIMING1_TWP_SHIFT;
494	reg |= ((info->tWH + (ns - 1)) / ns) << NANDTIMING1_TWH_SHIFT;
495	reg |= ((info->tRP + (ns - 1)) / ns) << NANDTIMING1_TRP_SHIFT;
496	reg |= ((info->tREH + (ns - 1)) / ns) << NANDTIMING1_TREH_SHIFT;
497	tmp_val = (((info->tCS + (ns - 1)) / ns) + (divisor - 1)) / divisor;
498	reg |= tmp_val << NANDTIMING1_TCS_SHIFT;
499	reg |= ((info->tCLH + (ns - 1)) / ns) << NANDTIMING1_TCLH_SHIFT;
500	tmp_val = (info->tALH > info->tWH) ? info->tALH : info->tWH;
501	reg |= ((tmp_val + (ns - 1)) / ns) << NANDTIMING1_TALH_SHIFT;
502	tmp_val = (((info->tADL + (ns - 1)) / ns) + (divisor - 1)) / divisor;
503	tmp_val = (tmp_val > 0xf) ? 0xf : tmp_val;
504	reg |= tmp_val << NANDTIMING1_TADL_SHIFT;
505	W_REG(osh, nfl->chipidx ? &nc->timing_1_cs1 : &nc->timing_1_cs0, reg);
506
507	/* Optimize nand_timing_2 */
508	reg = clk_select << NANDTIMING2_CLK_SEL_SHIFT;
509	tmp_val = (((info->tWB - (ns - 1)) / ns) + (divisor - 1)) / divisor;
510	reg |= tmp_val << NANDTIMING2_TWB_SHIFT;
511	tmp_val = (((info->tWHR + (ns - 1)) / ns) + (divisor - 1)) / divisor;
512	reg |= tmp_val << NANDTIMING2_TWHR_SHIFT;
513	tmp_val = info->tRP + info->tREH;
514	tmp_val = (info->tREAD > tmp_val) ? tmp_val : info->tREAD;
515	reg |= ((tmp_val + (ns - 1)) / ns) << NANDTIMING2_TREAD_SHIFT;
516	W_REG(osh, nfl->chipidx ? &nc->timing_2_cs1 : &nc->timing_2_cs0, reg);
517
518	printf("Optimize %s timing.\n", info->name);
519#ifdef BCMDBG
520	printf("R_REG(timing_1_cs%d)	= 0x%08x\n",
521		nfl->chipidx, R_REG(osh, nfl->chipidx ? &nc->timing_1_cs1 : &nc->timing_1_cs0));
522	printf("R_REG(timing_2_cs%d)	= 0x%08x\n",
523		nfl->chipidx, R_REG(osh, nfl->chipidx ? &nc->timing_2_cs1 : &nc->timing_2_cs0));
524#endif /* BCMDBG */
525
526	return;
527}
528
529/* Initialize nand flash access */
530hndnand_t *
531nandcore_init(si_t *sih)
532{
533	nandregs_t *nc;
534	aidmp_t *ai;
535	uint32 id, id2;
536	char *name = "";
537	osl_t *osh;
538	int i;
539	uint32 ncf, val;
540	uint32 acc_control;
541
542	ASSERT(sih);
543
544	/* Only support chipcommon revision == 42 for now */
545	if (sih->ccrev != 42)
546		return NULL;
547
548	if ((nc = (nandregs_t *)si_setcore(sih, NS_NAND_CORE_ID, 0)) == NULL)
549		return NULL;
550
551	if (R_REG(NULL, &nc->flash_device_id) == 0)
552		return NULL;
553
554	if (!firsttime && nandcore.size)
555		return &nandcore;
556
557	osh = si_osh(sih);
558	bzero(&nandcore, sizeof(nandcore));
559
560	nandcore.sih = sih;
561	nandcore.core = (void *)nc;
562	nandcore.wrap = si_wrapperregs(sih);
563	nandcore.read = nandcore_read;
564	nandcore.write = nandcore_write;
565	nandcore.erase = nandcore_erase;
566	nandcore.checkbadb = nandcore_checkbadb;
567	nandcore.markbadb = nandcore_mark_badb;
568	nandcore.read_oob = nandcore_read_oob;
569
570#ifndef _CFE_
571	nandcore.dev_ready = nandcore_dev_ready;
572	nandcore.select_chip = nandcore_select_chip;
573	nandcore.cmdfunc = nandcore_cmdfunc;
574	nandcore.waitfunc = nandcore_waitfunc;
575	nandcore.write_oob = nandcore_write_oob;
576	nandcore.read_page = nandcore_read_page;
577	nandcore.write_page = nandcore_write_page;
578	nandcore.cmd_read_byte = nandcore_cmd_read_byte;
579#endif
580
581	/* For some nand part, requires to do reset before the other command */
582	nandcore_cmd(osh, nc, NANDCMD_FLASH_RESET);
583	if (nandcore_poll(sih, nc) < 0) {
584		return NULL;
585	}
586
587	nandcore_cmd(osh, nc, NANDCMD_ID_RD);
588	if (nandcore_poll(sih, nc) < 0) {
589		return NULL;
590	}
591
592	ai = (aidmp_t *)nandcore.wrap;
593
594	/* Toggle as little endian */
595	OR_REG(osh, &ai->ioctrl, NAND_APB_LITTLE_ENDIAN);
596
597	id = R_REG(osh, &nc->flash_device_id);
598	id2 = R_REG(osh, &nc->flash_device_id_ext);
599
600	/* Toggle as big endian */
601	AND_REG(osh, &ai->ioctrl, ~NAND_APB_LITTLE_ENDIAN);
602
603	for (i = 0; i < 4; i++) {
604		nandcore.id[i] = (id >> (8*i)) & 0xff;
605		nandcore.id[i + 4] = (id2 >> (8*i)) & 0xff;
606	}
607
608	name = nandcore_check_id(nandcore.id);
609	if (name == NULL)
610		return NULL;
611	nandcore.type = nandcore.id[0];
612
613	/* Override configuration for specific nand flash */
614	nandcore_override_config(&nandcore);
615
616	ncf = R_REG(osh, &nc->config_cs0);
617	/*  Page size (# of bytes) */
618	val = (ncf & NANDCF_CS0_PAGE_SIZE_MASK) >> NANDCF_CS0_PAGE_SIZE_SHIFT;
619	switch (val) {
620	case 0:
621		nandcore.pagesize = 512;
622		break;
623	case 1:
624		nandcore.pagesize = (1 << 10) * 2;
625		break;
626	case 2:
627		nandcore.pagesize = (1 << 10) * 4;
628		break;
629	case 3:
630		nandcore.pagesize = (1 << 10) * 8;
631		break;
632	}
633	/* Block size (# of bytes) */
634	val = (ncf & NANDCF_CS0_BLOCK_SIZE_MASK) >> NANDCF_CS0_BLOCK_SIZE_SHIFT;
635	switch (val) {
636	case 0:
637		nandcore.blocksize = (1 << 10) * 8;
638		break;
639	case 1:
640		nandcore.blocksize = (1 << 10) * 16;
641		break;
642	case 2:
643		nandcore.blocksize = (1 << 10) * 128;
644		break;
645	case 3:
646		nandcore.blocksize = (1 << 10) * 256;
647		break;
648	case 4:
649		nandcore.blocksize = (1 << 10) * 512;
650		break;
651	case 5:
652		nandcore.blocksize = (1 << 10) * 1024;
653		break;
654	case 6:
655		nandcore.blocksize = (1 << 10) * 2048;
656		break;
657	default:
658		printf("Unknown block size\n");
659		return NULL;
660	}
661	/* NAND flash size in MBytes */
662	val = (ncf & NANDCF_CS0_DEVICE_SIZE_MASK) >> NANDCF_CS0_DEVICE_SIZE_SHIFT;
663	nandcore.size = (1 << val) * 4;
664
665	/* Get Device I/O data bus width */
666	if (ncf & NANDCF_CS0_DEVICE_WIDTH)
667		nandcore.width = 1;
668
669	/* Spare size and Spare per cache (# of bytes) */
670	acc_control = R_REG(osh, &nc->acc_control_cs0);
671
672	/* Check conflict between 1K sector and page size */
673	if (acc_control & NANDAC_CS0_SECTOR_SIZE_1K) {
674		nandcore.sectorsize = 1024;
675	}
676	else
677		nandcore.sectorsize = 512;
678
679	if (nandcore.sectorsize == 1024 && nandcore.pagesize == 512) {
680		printf("Pin strapping error. Page size is 512, but sector size is 1024\n");
681		return NULL;
682	}
683
684	/* Get Spare size */
685	nandcore.sparesize = acc_control & NANDAC_CS0_SPARE_AREA_SIZE;
686
687	/* Get oob size,  */
688	nandcore.oobsize = nandcore.sparesize * (nandcore.pagesize / NANDCACHE_SIZE);
689
690	/* Get ECC level */
691	nandcore.ecclevel = (acc_control & NANDAC_CS0_ECC_LEVEL_MASK) >> NANDAC_CS0_ECC_LEVEL_SHIFT;
692
693	/* Adjusted sparesize and eccbytes if sectorsize is 1K */
694	if (nandcore.sectorsize == 1024) {
695		nandcore.sparesize *= 2;
696		nandcore.eccbytes = ((nandcore.ecclevel * 14 + 3) >> 2);
697	}
698	else
699		nandcore.eccbytes = ((nandcore.ecclevel * 14 + 7) >> 3);
700
701	nandcore.numblocks = (nandcore.size * (1 << 10)) / (nandcore.blocksize >> 10);
702
703	/* Get the number of cache per page */
704	num_cache_per_page  = nandcore.pagesize / NANDCACHE_SIZE;
705
706	/* Get the spare size per cache */
707	spare_per_cache = nandcore.oobsize / num_cache_per_page;
708
709	if (firsttime) {
710		printf("Found a %s NAND flash:\n", name);
711		printf("Total size:  %uMB\n", nandcore.size);
712		printf("Block size:  %uKB\n", (nandcore.blocksize >> 10));
713		printf("Page Size:   %uB\n", nandcore.pagesize);
714		printf("OOB Size:    %uB\n", nandcore.oobsize);
715		printf("Sector size: %uB\n", nandcore.sectorsize);
716		printf("Spare size:  %uB\n", nandcore.sparesize);
717		printf("ECC level:   %u (%u-bit)\n", nandcore.ecclevel,
718			(nandcore.sectorsize == 1024)? nandcore.ecclevel*2 : nandcore.ecclevel);
719		printf("Device ID: 0x%2x 0x%2x 0x%2x 0x%2x 0x%2x 0x%02x\n",
720			nandcore.id[0], nandcore.id[1], nandcore.id[2],
721			nandcore.id[3], nandcore.id[4], nandcore.id[5]);
722	}
723	firsttime = FALSE;
724
725	/* Memory mapping */
726	nandcore.phybase = SI_NS_NANDFLASH;
727	nandcore.base = (uint32)REG_MAP(SI_NS_NANDFLASH, SI_FLASH_WINDOW);
728
729	/* For 1KB sector size setting */
730	if (R_REG(osh, &nc->acc_control_cs0) & NANDAC_CS0_SECTOR_SIZE_1K) {
731		AND_REG(osh, &nc->acc_control_cs0, ~NANDAC_CS0_PARTIAL_PAGE_EN);
732		printf("Disable PARTIAL_PAGE_EN\n");
733		AND_REG(osh, &nc->acc_control_cs0, ~NANDAC_CS0_FAST_PGM_RDIN);
734		printf("Disable FAST_PGM_RDIN\n");
735	}
736
737	/* Optimize timing */
738	nandcore_optimize_timing(&nandcore);
739
740#ifdef BCMDBG
741	/* Configuration readback */
742	printf("R_REG(nand_revision)	= 0x%08x\n", R_REG(osh, &nc->revision));
743	printf("R_REG(cs_nand_select)	= 0x%08x\n", R_REG(osh, &nc->cs_nand_select));
744	printf("R_REG(config_cs0)	= 0x%08x\n", R_REG(osh, &nc->config_cs0));
745	printf("R_REG(acc_control_cs0)	= 0x%08x\n", R_REG(osh, &nc->acc_control_cs0));
746#endif /* BCMDBG */
747
748	return nandcore.size ? &nandcore : NULL;
749}
750
751/* Read len bytes starting at offset into buf. Returns number of bytes read. */
752static int
753nandcore_read(hndnand_t *nfl, uint64 offset, uint len, uchar *buf)
754{
755	osl_t *osh;
756	uint8 *to;
757	uint res;
758	uint32 herr = 0, serr = 0;
759
760	ASSERT(nfl->sih);
761	osh = si_osh(nfl->sih);
762
763	to = buf;
764	res = len;
765
766	while (res > 0) {
767		_nandcore_read_page(nfl, offset, to, NULL, TRUE, &herr, &serr);
768
769		res -= nfl->pagesize;
770		offset += nfl->pagesize;
771		to += nfl->pagesize;
772	}
773
774	return (len - res);
775}
776
777/* Poll for command completion. Returns zero when complete. */
778static int
779nandcore_poll(si_t *sih, nandregs_t *nc)
780{
781	osl_t *osh;
782	int i;
783	uint32 pollmask;
784
785	ASSERT(sih);
786	osh = si_osh(sih);
787
788	pollmask = NANDIST_CTRL_READY | NANDIST_FLASH_READY;
789	for (i = 0; i < NANDF_RETRIES; i++) {
790		if ((R_REG(osh, &nc->intfc_status) & pollmask) == pollmask) {
791			return 0;
792		}
793	}
794
795	printf("%s: not ready\n", __FUNCTION__);
796	return -1;
797}
798
799/* Write len bytes starting at offset into buf. Returns number of bytes
800 * written.
801 */
802static int
803nandcore_write(hndnand_t *nfl, uint64 offset, uint len, const uchar *buf)
804{
805	int ret = 0;
806	osl_t *osh;
807	uint res;
808	uint8 *from;
809
810	ASSERT(nfl->sih);
811	osh = si_osh(nfl->sih);
812
813	from = (uint8 *)buf;
814	res = len;
815
816	while (res > 0) {
817		ret = _nandcore_write_page(nfl, offset, from, NULL, TRUE);
818		if (ret < 0)
819			return ret;
820
821		res -= nfl->pagesize;
822		offset += nfl->pagesize;
823		from += nfl->pagesize;
824	}
825
826	if (ret)
827		return ret;
828
829	return (len - res);
830}
831
832/* Erase a region. Returns number of bytes scheduled for erasure.
833 * Caller should poll for completion.
834 */
835static int
836nandcore_erase(hndnand_t *nfl, uint64 offset)
837{
838	si_t *sih = nfl->sih;
839	nandregs_t *nc = (nandregs_t *)nfl->core;
840	osl_t *osh;
841	int ret = -1;
842	uint8 status = 0;
843	uint32 reg;
844
845	ASSERT(sih);
846
847	osh = si_osh(sih);
848	if ((offset >> 20) >= nfl->size)
849		return -1;
850	if ((offset & (nfl->blocksize - 1)) != 0) {
851		return -1;
852	}
853
854	/* Disable WP */
855	AND_REG(osh, &nc->cs_nand_select, ~NANDCSEL_NAND_WP);
856
857	/* Set the block address for the following commands */
858	reg = (R_REG(osh, &nc->cmd_ext_address) & ~NANDCMD_EXT_ADDR_MASK);
859	W_REG(osh, &nc->cmd_ext_address, (reg | ((offset >> 32) & NANDCMD_EXT_ADDR_MASK)));
860
861	W_REG(osh, &nc->cmd_address, (uint32)offset);
862	nandcore_cmd(osh, nc, NANDCMD_BLOCK_ERASE);
863	if (nandcore_poll(sih, nc) < 0)
864		goto exit;
865
866	/* Check status */
867	W_REG(osh, &nc->cmd_start, NANDCMD_STATUS_RD);
868	if (nandcore_poll(sih, nc) < 0)
869		goto exit;
870
871	status = R_REG(osh, &nc->intfc_status) & NANDIST_STATUS;
872	if (status & 1)
873		goto exit;
874
875	ret = 0;
876exit:
877	/* Enable WP */
878	OR_REG(osh, &nc->cs_nand_select, NANDCSEL_NAND_WP);
879
880	return ret;
881}
882
883static int
884nandcore_checkbadb_nospare(hndnand_t *nfl, uint64 offset)
885{
886        si_t *sih = nfl->sih;
887        nandregs_t *nc = (nandregs_t *)nfl->core;
888        aidmp_t *ai = (aidmp_t *)nfl->wrap;
889        osl_t *osh;
890        int i;
891        uint off;
892        uint32 nand_intfc_status;
893        int ret = 0;
894        uint32 reg;
895
896        ASSERT(sih);
897
898        osh = si_osh(sih);
899        if ((offset >> 20) >= nfl->size)
900                return -1;
901        if ((offset & (nfl->blocksize - 1)) != 0) {
902                return -1;
903        }
904
905        /* Set the block address for the following commands */
906        reg = (R_REG(osh, &nc->cmd_ext_address) & ~NANDCMD_EXT_ADDR_MASK);
907        W_REG(osh, &nc->cmd_ext_address, (reg | (offset >> 32)));
908
909        for (i = 0; i < 2; i++) {
910                off = offset + (nfl->pagesize * i);
911                W_REG(osh, &nc->cmd_address, off);
912                nandcore_cmd(osh, nc, NANDCMD_SPARE_RD);
913                if (nandcore_poll(sih, nc) < 0) {
914                        ret = -1;
915                        goto exit;
916                }
917                nand_intfc_status = R_REG(osh, &nc->intfc_status) & NANDIST_SPARE_VALID;
918                if (nand_intfc_status != NANDIST_SPARE_VALID) {
919                        ret = -1;
920#ifdef BCMDBG
921                        printf("%s: Spare is not valid\n", __FUNCTION__);
922#endif
923                        goto exit;
924                }
925
926                /* Toggle as little endian */
927                OR_REG(osh, &ai->ioctrl, NAND_APB_LITTLE_ENDIAN);
928
929                if ((R_REG(osh, &nc->spare_area_read_ofs[0]) & 0xff) != 0xff) {
930                        ret = -1;
931#ifdef BCMDBG
932                        printf("%s: Bad Block (0x%llx)\n", __FUNCTION__, offset);
933#endif
934                }
935
936                /* Toggle as big endian */
937                AND_REG(osh, &ai->ioctrl, ~NAND_APB_LITTLE_ENDIAN);
938
939                if (ret == -1)
940                        break;
941        }
942
943exit:
944        return ret;
945}
946
947static int
948nandcore_checkbadb(hndnand_t *nfl, uint64 offset)
949{
950	si_t *sih = nfl->sih;
951	nandregs_t *nc = (nandregs_t *)nfl->core;
952	aidmp_t *ai = (aidmp_t *)nfl->wrap;
953	osl_t *osh;
954	int i, j;
955	uint64 addr;
956	int ret = 0;
957	uint32 reg, oob_bi;
958	unsigned cache, col = 0;
959	uint32 rd_oob_byte, left_oob_byte;
960
961	if(nospare)
962		return nandcore_checkbadb_nospare(nfl, offset);
963
964	ASSERT(sih);
965
966	osh = si_osh(sih);
967	if ((offset >> 20) >= nfl->size)
968		return -1;
969	if ((offset & (nfl->blocksize - 1)) != 0) {
970		return -1;
971	}
972
973	/* Enable ECC validation for spare area reads */
974	OR_REG(osh, nfl->chipidx ? &nc->acc_control_cs1 : &nc->acc_control_cs0,
975		NANDAC_CS0_RD_ECC_EN);
976
977	/* Check the first two pages for this block */
978	for (i = 0; i < 2; i++) {
979		addr = offset + (nfl->pagesize * i);
980		col = 0;
981		/* Loop all caches in page */
982		for (cache = 0; cache < num_cache_per_page; cache++, col += NANDCACHE_SIZE) {
983			uint32 ext_addr;
984
985			/* Set the page address for the following commands */
986			reg = (R_REG(osh, &nc->cmd_ext_address) & ~NANDCMD_EXT_ADDR_MASK);
987			ext_addr = ((addr + col) >> 32) & NANDCMD_EXT_ADDR_MASK;
988			W_REG(osh, &nc->cmd_ext_address, (reg | ext_addr));
989			W_REG(osh, &nc->cmd_address, (uint32)addr + col);
990
991			/* Issue page-read command */
992			nandcore_cmd(osh, nc, NANDCMD_PAGE_RD);
993
994			/* Wait for the command to complete */
995			if (nandcore_poll(sih, nc) < 0) {
996				ret = -1;
997				goto exit;
998			}
999
1000			/* Set controller to Little Endian mode for copying */
1001			OR_REG(osh, &ai->ioctrl, NAND_APB_LITTLE_ENDIAN);
1002
1003			rd_oob_byte = _nandcore_oobbyte_per_cache(nfl, cache, spare_per_cache);
1004
1005			left_oob_byte = rd_oob_byte % 4;
1006
1007			for (j = 0; j < (rd_oob_byte / 4); j++) {
1008				if (cache == 0 && j == 0)
1009					/* Save bad block indicator */
1010					oob_bi = R_REG(osh, &nc->spare_area_read_ofs[0]);
1011				else
1012					reg = R_REG(osh, &nc->spare_area_read_ofs[j]);
1013			}
1014
1015			if (left_oob_byte != 0) {
1016				reg = R_REG(osh, &nc->spare_area_read_ofs[j]);
1017			}
1018
1019			/* Return to Big Endian mode for commands etc */
1020			AND_REG(osh, &ai->ioctrl, ~NAND_APB_LITTLE_ENDIAN);
1021		}
1022
1023		/* Check bad block indicator */
1024		if ((oob_bi & 0xFF) != 0xFF) {
1025			ret = -1;
1026#ifdef BCMDBG
1027			printf("%s: Bad Block (0x%llx)\n", __FUNCTION__, offset);
1028#endif
1029			break;
1030		}
1031	}
1032
1033exit:
1034	return ret;
1035}
1036
1037static int
1038nandcore_mark_badb(hndnand_t *nfl, uint64 offset)
1039{
1040	si_t *sih = nfl->sih;
1041	nandregs_t *nc = (nandregs_t *)nfl->core;
1042	aidmp_t *ai = (aidmp_t *)nfl->wrap;
1043	osl_t *osh;
1044	uint64 off;
1045	int i, ret = 0;
1046	uint32 reg;
1047
1048	ASSERT(sih);
1049
1050	osh = si_osh(sih);
1051	if ((offset >> 20) >= nfl->size)
1052		return -1;
1053	if ((offset & (nfl->blocksize - 1)) != 0) {
1054		return -1;
1055	}
1056
1057	/* Disable WP */
1058	AND_REG(osh, &nc->cs_nand_select, ~NANDCSEL_NAND_WP);
1059
1060	/* Erase block */
1061	W_REG(osh, &nc->cmd_address, offset);
1062	nandcore_cmd(osh, nc, NANDCMD_BLOCK_ERASE);
1063	if (nandcore_poll(sih, nc) < 0) {
1064		ret = -1;
1065		/* Still go through the spare area write */
1066		/* goto err; */
1067	}
1068
1069	/*
1070	 * Enable partial page programming and disable ECC checkbit generation
1071	 * for PROGRAM_SPARE_AREA
1072	 */
1073	reg = R_REG(osh, &nc->acc_control_cs0);
1074	reg |= NANDAC_CS0_PARTIAL_PAGE_EN;
1075	reg |= NANDAC_CS0_FAST_PGM_RDIN;
1076	reg &= ~NANDAC_CS0_WR_ECC_EN;
1077	W_REG(osh, &nc->acc_control_cs0, reg);
1078
1079	for (i = 0; i < 2; i++) {
1080		uint32 ext_addr;
1081
1082		off = offset + (nfl->pagesize * i);
1083
1084		/* Set the block address for the following commands */
1085		reg = (R_REG(osh, &nc->cmd_ext_address) & ~NANDCMD_EXT_ADDR_MASK);
1086		ext_addr = (off >> 32) & NANDCMD_EXT_ADDR_MASK;
1087		W_REG(osh, &nc->cmd_ext_address, (reg | ext_addr));
1088
1089		W_REG(osh, &nc->cmd_address, (uint32)off);
1090
1091		/* Toggle as little endian */
1092		OR_REG(osh, &ai->ioctrl, NAND_APB_LITTLE_ENDIAN);
1093
1094		W_REG(osh, &nc->spare_area_write_ofs[0], 0);
1095		W_REG(osh, &nc->spare_area_write_ofs[1], 0);
1096		W_REG(osh, &nc->spare_area_write_ofs[2], 0);
1097		W_REG(osh, &nc->spare_area_write_ofs[3], 0);
1098
1099		/* Toggle as big endian */
1100		AND_REG(osh, &ai->ioctrl, ~NAND_APB_LITTLE_ENDIAN);
1101
1102		nandcore_cmd(osh, nc, NANDCMD_SPARE_PROG);
1103		if (nandcore_poll(sih, nc) < 0) {
1104			ret = -1;
1105#if BCMDBG
1106			printf("%s: Spare program is not ready\n", __FUNCTION__);
1107#endif
1108			goto err;
1109		}
1110	}
1111
1112err:
1113	/* Restore the default value for spare area write registers */
1114	W_REG(osh, &nc->spare_area_write_ofs[0], 0xffffffff);
1115	W_REG(osh, &nc->spare_area_write_ofs[1], 0xffffffff);
1116	W_REG(osh, &nc->spare_area_write_ofs[2], 0xffffffff);
1117	W_REG(osh, &nc->spare_area_write_ofs[3], 0xffffffff);
1118
1119	/*
1120	 * Disable partial page programming and enable ECC checkbit generation
1121	 * for PROGRAM_SPARE_AREA
1122	 */
1123	reg = R_REG(osh, &nc->acc_control_cs0);
1124	reg &= ~NANDAC_CS0_PARTIAL_PAGE_EN;
1125	reg &= ~NANDAC_CS0_FAST_PGM_RDIN;
1126	reg |= NANDAC_CS0_WR_ECC_EN;
1127	W_REG(osh, &nc->acc_control_cs0, reg);
1128
1129	/* Enable WP */
1130	OR_REG(osh, &nc->cs_nand_select, NANDCSEL_NAND_WP);
1131
1132	return ret;
1133}
1134
1135
1136#ifndef _CFE_
1137/* Functions support brcmnand driver */
1138static void
1139_nandcore_set_cmd_address(hndnand_t *nfl, uint64 addr)
1140{
1141	uint32 reg;
1142	osl_t *osh;
1143	si_t *sih = nfl->sih;
1144	nandregs_t *nc = (nandregs_t *)nfl->core;
1145
1146	ASSERT(sih);
1147	osh = si_osh(sih);
1148
1149	reg = (R_REG(osh, &nc->cmd_ext_address) & ~NANDCMD_EXT_ADDR_MASK);
1150	W_REG(osh, &nc->cmd_ext_address, (reg | ((addr >> 32) & NANDCMD_EXT_ADDR_MASK)));
1151	W_REG(osh, &nc->cmd_address, (uint32)addr);
1152}
1153
1154static int
1155nandcore_dev_ready(hndnand_t *nfl)
1156{
1157	aidmp_t *ai = (aidmp_t *)nfl->wrap;
1158
1159	ASSERT(nfl->sih);
1160
1161	return (R_REG(si_osh(nfl->sih), &ai->iostatus) & NAND_RO_CTRL_READY);
1162}
1163
1164static int
1165nandcore_select_chip(hndnand_t *nfl, int chip)
1166{
1167	uint32 reg;
1168	osl_t *osh;
1169	si_t *sih = nfl->sih;
1170	nandregs_t *nc = (nandregs_t *)nfl->core;
1171
1172	ASSERT(sih);
1173	osh = si_osh(sih);
1174
1175	reg = R_REG(osh, &nc->cmd_ext_address);
1176	reg &= ~NANDCMD_CS_SEL_MASK;
1177	reg |= (chip << NANDCMD_CS_SEL_SHIFT);
1178	W_REG(osh, &nc->cmd_ext_address, reg);
1179
1180	/* Set active chip index */
1181	nfl->chipidx = chip;
1182
1183	return 0;
1184}
1185
1186static int
1187nandcore_cmdfunc(hndnand_t *nfl, uint64 addr, int cmd)
1188{
1189	int ret = 0;
1190	osl_t *osh;
1191	nandregs_t *nc = (nandregs_t *)nfl->core;
1192
1193	ASSERT(nfl->sih);
1194	osh = si_osh(nfl->sih);
1195
1196	switch (cmd) {
1197	case CMDFUNC_ERASE1:
1198		_nandcore_set_cmd_address(nfl, addr);
1199		break;
1200	case CMDFUNC_ERASE2:
1201		/* Disable WP */
1202		AND_REG(osh, &nc->cs_nand_select, ~NANDCSEL_NAND_WP);
1203		nandcore_cmd(osh, nc, NANDCMD_BLOCK_ERASE);
1204		ret = nandcore_waitfunc(nfl, NULL);
1205		/* Enable WP */
1206		OR_REG(osh, &nc->cs_nand_select, NANDCSEL_NAND_WP);
1207		break;
1208	case CMDFUNC_SEQIN:
1209		_nandcore_set_cmd_address(nfl, addr);
1210		break;
1211	case CMDFUNC_READ:
1212		_nandcore_set_cmd_address(nfl, addr);
1213		nandcore_cmd(osh, nc, NANDCMD_PAGE_RD);
1214		ret = nandcore_waitfunc(nfl, NULL);
1215		break;
1216	case CMDFUNC_RESET:
1217		nandcore_cmd(osh, nc, NANDCMD_FLASH_RESET);
1218		ret = nandcore_waitfunc(nfl, NULL);
1219		break;
1220	case CMDFUNC_READID:
1221		nandcore_cmd(osh, nc, NANDCMD_ID_RD);
1222		ret = nandcore_waitfunc(nfl, NULL);
1223		break;
1224	case CMDFUNC_STATUS:
1225		/* Disable WP */
1226		AND_REG(osh, &nc->cs_nand_select, ~NANDCSEL_NAND_WP);
1227		nandcore_cmd(osh, nc, NANDCMD_STATUS_RD);
1228		ret = nandcore_waitfunc(nfl, NULL);
1229		/* Enable WP */
1230		OR_REG(osh, &nc->cs_nand_select, NANDCSEL_NAND_WP);
1231		break;
1232	case CMDFUNC_READOOB:
1233		break;
1234	default:
1235#ifdef BCMDBG
1236		printf("%s: Unknow command 0x%x\n", __FUNCTION__, cmd);
1237#endif
1238		ret = -1;
1239		break;
1240	}
1241
1242	return ret;
1243}
1244
1245/* Return intfc_status FLASH_STATUS if CTRL/FLASH is ready otherwise -1 */
1246static int
1247nandcore_waitfunc(hndnand_t *nfl, int *status)
1248{
1249	int ret;
1250	osl_t *osh;
1251	nandregs_t *nc = (nandregs_t *)nfl->core;
1252
1253	ASSERT(nfl->sih);
1254	osh = si_osh(nfl->sih);
1255
1256	ret = nandcore_poll(nfl->sih, nc);
1257	if (ret == 0 && status)
1258		*status = R_REG(osh, &nc->intfc_status) & NANDIST_STATUS;
1259
1260	return ret;
1261}
1262#endif
1263
1264static int
1265nandcore_read_oob(hndnand_t *nfl, uint64 addr, uint8 *oob)
1266{
1267	osl_t *osh;
1268	si_t *sih = nfl->sih;
1269	nandregs_t *nc = (nandregs_t *)nfl->core;
1270	aidmp_t *ai = (aidmp_t *)nfl->wrap;
1271	uint32 reg;
1272	unsigned cache, col = 0;
1273	int i;
1274	uint8 *to = oob;
1275	uint32 rd_oob_byte, left_oob_byte;
1276
1277	ASSERT(sih);
1278	osh = si_osh(sih);
1279
1280	/* Enable ECC validation for spare area reads */
1281	OR_REG(osh, nfl->chipidx ? &nc->acc_control_cs1 : &nc->acc_control_cs0,
1282		NANDAC_CS0_RD_ECC_EN);
1283
1284	/* Loop all caches in page */
1285	for (cache = 0; cache < num_cache_per_page; cache++, col += NANDCACHE_SIZE) {
1286		uint32 ext_addr;
1287
1288		/* Set the page address for the following commands */
1289		reg = (R_REG(osh, &nc->cmd_ext_address) & ~NANDCMD_EXT_ADDR_MASK);
1290		ext_addr = ((addr + col) >> 32) & NANDCMD_EXT_ADDR_MASK;
1291		W_REG(osh, &nc->cmd_ext_address, (reg | ext_addr));
1292		W_REG(osh, &nc->cmd_address, (uint32)(addr + col));
1293
1294		/* Issue page-read command */
1295		nandcore_cmd(osh, nc, NANDCMD_PAGE_RD);
1296
1297		/* Wait for the command to complete */
1298		if (nandcore_poll(sih, nc))
1299			return -1;
1300
1301		/* Set controller to Little Endian mode for copying */
1302		OR_REG(osh, &ai->ioctrl, NAND_APB_LITTLE_ENDIAN);
1303
1304		rd_oob_byte = _nandcore_oobbyte_per_cache(nfl, cache, spare_per_cache);
1305
1306		left_oob_byte = rd_oob_byte % 4;
1307
1308		/* Pay attention to natural address alignment access */
1309		for (i = 0; i < (rd_oob_byte / 4); i++) {
1310			reg = R_REG(osh, &nc->spare_area_read_ofs[i]);
1311			memcpy((void *)to, (void *)&reg, 4);
1312			to += 4;
1313		}
1314
1315		if (left_oob_byte != 0) {
1316			reg = R_REG(osh, &nc->spare_area_read_ofs[i]);
1317			memcpy((void *)to, (void *)&reg, left_oob_byte);
1318			to += left_oob_byte;
1319		}
1320
1321		/* Return to Big Endian mode for commands etc */
1322		AND_REG(osh, &ai->ioctrl, ~NAND_APB_LITTLE_ENDIAN);
1323	}
1324
1325	return 0;
1326}
1327
1328#ifndef _CFE_
1329
1330static int
1331nandcore_write_oob(hndnand_t *nfl, uint64 addr, uint8 *oob)
1332{
1333	osl_t *osh;
1334	si_t *sih = nfl->sih;
1335	nandregs_t *nc = (nandregs_t *)nfl->core;
1336	aidmp_t *ai = (aidmp_t *)nfl->wrap;
1337	uint32 reg;
1338	unsigned cache, col = 0;
1339	int i;
1340	int ret = 0;
1341	uint8 *from = oob;
1342	uint32 wr_oob_byte, left_oob_byte;
1343
1344	ASSERT(sih);
1345
1346	osh = si_osh(sih);
1347
1348	/* Disable WP */
1349	AND_REG(osh, &nc->cs_nand_select, ~NANDCSEL_NAND_WP);
1350
1351	/*
1352	 * Enable partial page programming and disable ECC checkbit generation
1353	 * for PROGRAM_SPARE_AREA
1354	 */
1355	reg = R_REG(osh, nfl->chipidx ? &nc->acc_control_cs1 : &nc->acc_control_cs0);
1356	if (nfl->sectorsize == 512) {
1357		reg |= NANDAC_CS0_PARTIAL_PAGE_EN;
1358		reg |= NANDAC_CS0_FAST_PGM_RDIN;
1359	}
1360	reg &= ~NANDAC_CS0_WR_ECC_EN;
1361	W_REG(osh, nfl->chipidx ? &nc->acc_control_cs1 : &nc->acc_control_cs0, reg);
1362
1363	/* Loop all caches in page */
1364	for (cache = 0; cache < num_cache_per_page; cache++, col += NANDCACHE_SIZE) {
1365		uint32 ext_addr;
1366
1367		/* Set the page address for the following commands */
1368		reg = (R_REG(osh, &nc->cmd_ext_address) & ~NANDCMD_EXT_ADDR_MASK);
1369		ext_addr = ((addr + col) >> 32) & NANDCMD_EXT_ADDR_MASK;
1370		W_REG(osh, &nc->cmd_ext_address, (reg | ext_addr));
1371		W_REG(osh, &nc->cmd_address, (uint32)(addr + col));
1372
1373		/* Set controller to Little Endian mode for copying */
1374		OR_REG(osh, &ai->ioctrl, NAND_APB_LITTLE_ENDIAN);
1375
1376		/* Must fill flash cache with all 0xff in each round */
1377		for (i = 0; i < (NANDCACHE_SIZE / 4); i++)
1378			W_REG(osh, &nc->flash_cache[i], 0xffffffff);
1379
1380		/* Fill spare area write cache */
1381		wr_oob_byte = _nandcore_oobbyte_per_cache(nfl, cache, spare_per_cache);
1382
1383		left_oob_byte = wr_oob_byte % 4;
1384
1385		/* Pay attention to natural address alignment access */
1386		for (i = 0; i < (wr_oob_byte / 4); i++) {
1387			memcpy((void *)&reg, (void *)from, 4);
1388			W_REG(osh, &nc->spare_area_write_ofs[i], reg);
1389			from += 4;
1390		}
1391
1392		if (left_oob_byte != 0) {
1393			reg = 0xffffffff;
1394			memcpy((void *)&reg, (void *)from, left_oob_byte);
1395			W_REG(osh, &nc->spare_area_write_ofs[i], reg);
1396			from += left_oob_byte;
1397			i++;
1398		}
1399
1400		for (; i < (NANDSPARECACHE_SIZE / 4); i++)
1401			W_REG(osh, &nc->spare_area_write_ofs[i], 0xffffffff);
1402
1403		/* Return to Big Endian mode for commands etc */
1404		AND_REG(osh, &ai->ioctrl, ~NAND_APB_LITTLE_ENDIAN);
1405
1406		/* Push spare bytes into internal buffer, last goes to flash */
1407		nandcore_cmd(osh, nc, NANDCMD_PAGE_PROG);
1408
1409		if (nandcore_poll(sih, nc)) {
1410			ret = -1;
1411			goto err;
1412		}
1413	}
1414
1415err:
1416	/*
1417	 * Disable partial page programming and enable ECC checkbit generation
1418	 * for PROGRAM_SPARE_AREA
1419	 */
1420	reg = R_REG(osh, nfl->chipidx ? &nc->acc_control_cs1 : &nc->acc_control_cs0);
1421	if (nfl->sectorsize == 512) {
1422		reg &= ~NANDAC_CS0_PARTIAL_PAGE_EN;
1423		reg &= ~NANDAC_CS0_FAST_PGM_RDIN;
1424	}
1425	reg |= NANDAC_CS0_WR_ECC_EN;
1426	W_REG(osh, nfl->chipidx ? &nc->acc_control_cs1 : &nc->acc_control_cs0, reg);
1427
1428	/* Enable WP */
1429	OR_REG(osh, &nc->cs_nand_select, NANDCSEL_NAND_WP);
1430
1431	return ret;
1432}
1433
1434static int
1435nandcore_read_page(hndnand_t *nfl, uint64 addr, uint8 *buf, uint8 *oob, bool ecc,
1436	uint32 *herr, uint32 *serr)
1437{
1438	return _nandcore_read_page(nfl, addr, buf, oob, ecc, herr, serr);
1439}
1440
1441static int
1442nandcore_write_page(hndnand_t *nfl, uint64 addr, const uint8 *buf, uint8 *oob, bool ecc)
1443{
1444	return _nandcore_write_page(nfl, addr, buf, oob, ecc);
1445}
1446
1447static int
1448nandcore_cmd_read_byte(hndnand_t *nfl, int cmd, int arg)
1449{
1450	int id_ext = arg;
1451	osl_t *osh;
1452	nandregs_t *nc = (nandregs_t *)nfl->core;
1453
1454	ASSERT(nfl->sih);
1455	osh = si_osh(nfl->sih);
1456
1457	switch (cmd) {
1458	case CMDFUNC_READID:
1459		return R_REG(osh, id_ext ? &nc->flash_device_id_ext : &nc->flash_device_id);
1460	case CMDFUNC_STATUS:
1461		return (R_REG(osh, &nc->intfc_status) & NANDIST_STATUS);
1462	default:
1463#ifdef BCMDBG
1464		printf("%s: Unknow command 0x%x\n", __FUNCTION__, cmd);
1465#endif
1466		break;
1467	}
1468
1469	return 0;
1470}
1471#endif /* !_CFE_ */
1472