1/*
2 * Broadcom NAND flash controller interface
3 *
4 * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved.
5 *
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
13 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
15 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
16 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 *
18 * $Id $
19 */
20
21#include <linux/config.h>
22#include <linux/module.h>
23#include <linux/slab.h>
24#include <linux/ioport.h>
25#include <linux/mtd/compatmac.h>
26#include <linux/mtd/mtd.h>
27#include <linux/mtd/nand.h>
28#include <linux/mtd/nand_ecc.h>
29#include <linux/errno.h>
30#include <linux/pci.h>
31#include <linux/delay.h>
32#include <asm/io.h>
33
34#include <typedefs.h>
35#include <osl.h>
36#include <bcmutils.h>
37#include <bcmdevs.h>
38#include <bcmnvram.h>
39#include <siutils.h>
40#include <hndpci.h>
41#include <pcicfg.h>
42#include <hndsoc.h>
43#include <sbchipc.h>
44#include <nflash.h>
45
46#include "brcmnand_priv.h"
47
48struct mutex *partitions_mutex_init(void);
49#ifdef CONFIG_MTD_PARTITIONS
50#include <linux/mtd/partitions.h>
51
52extern int boot_flags(void);
53extern struct mtd_partition * init_brcmnand_mtd_partitions(struct mtd_info *mtd, size_t size);
54#endif
55
56#define PLATFORM_IOFLUSH_WAR()  __sync()
57
58#define BRCMNAND_POLL_TIMEOUT	3000
59
60#define BRCMNAND_CORRECTABLE_ECC_ERROR      (1)
61#define BRCMNAND_SUCCESS                    (0)
62#define BRCMNAND_UNCORRECTABLE_ECC_ERROR    (-1)
63#define BRCMNAND_FLASH_STATUS_ERROR         (-2)
64#define BRCMNAND_TIMED_OUT                  (-3)
65
66#define BRCMNAND_OOBBUF(pbuf) (&((pbuf)->databuf[NAND_MAX_PAGESIZE]))
67
68/*
69 * Number of required ECC bytes per 512B slice
70 */
71static const unsigned int brcmnand_eccbytes[16] = {
72	[BRCMNAND_ECC_DISABLE]  = 0,
73	[BRCMNAND_ECC_BCH_1]    = 2,
74	[BRCMNAND_ECC_BCH_2]    = 4,
75	[BRCMNAND_ECC_BCH_3]    = 5,
76	[BRCMNAND_ECC_BCH_4]    = 7,
77	[BRCMNAND_ECC_BCH_5]    = 9,
78	[BRCMNAND_ECC_BCH_6]    = 10,
79	[BRCMNAND_ECC_BCH_7]    = 12,
80	[BRCMNAND_ECC_BCH_8]    = 13,
81	[BRCMNAND_ECC_BCH_9]    = 15,
82	[BRCMNAND_ECC_BCH_10]   = 17,
83	[BRCMNAND_ECC_BCH_11]   = 18,
84	[BRCMNAND_ECC_BCH_12]   = 20,
85	[BRCMNAND_ECC_RESVD_1]  = 0,
86	[BRCMNAND_ECC_RESVD_2]  = 0,
87	[BRCMNAND_ECC_HAMMING]  = 3,
88};
89
90static const unsigned char ffchars[] = {
91	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
92	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 16 */
93	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
94	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 32 */
95	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
96	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 48 */
97	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
98	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 64 */
99	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
100	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 80 */
101	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
102	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 96 */
103	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
104	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 112 */
105	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
106	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 128 */
107};
108
109static struct nand_ecclayout brcmnand_oob_128 = {
110	.eccbytes = 24,
111	.eccpos =
112	{
113		6, 7, 8,
114		22, 23, 24,
115		38, 39, 40,
116		54, 55, 56,
117		70, 71, 72,
118		86, 87, 88,
119		102, 103, 104,
120		118, 119, 120
121	},
122	.oobfree =
123	{
124		/* 0-1 used for BBT and/or manufacturer bad block marker,
125		 * first slice loses 2 bytes for BBT
126		 */
127		{.offset = 2, .length = 4},
128		{.offset = 9, .length = 13},
129		/* First slice {9,7} 2nd slice {16,6}are combined */
130		/* ST uses 6th byte (offset=5) as Bad Block Indicator,
131		 * in addition to the 1st byte, and will be adjusted at run time
132		 */
133		{.offset = 25, .length = 13}, /* 2nd slice */
134		{.offset = 41, .length = 13}, /* 4th slice */
135		{.offset = 57, .length = 13}, /* 5th slice */
136		{.offset = 73, .length = 13}, /* 6th slice */
137		{.offset = 89, .length = 13}, /* 7th slice */
138		{.offset = 105, .length = 13}, /* 8th slice */
139		{.offset = 121, .length = 7}, /* 9th slice */
140		{.offset = 0, .length = 0} /* End marker */
141	}
142};
143
144static struct nand_ecclayout brcmnand_oob_64 = {
145	.eccbytes = 12,
146	.eccpos =
147	{
148		6, 7, 8,
149		22, 23, 24,
150		38, 39, 40,
151		54, 55, 56
152	},
153	.oobfree =
154	{
155		/* 0-1 used for BBT and/or manufacturer bad block marker,
156		 * first slice loses 2 bytes for BBT
157		 */
158		{.offset = 2, .length = 4},
159		{.offset = 9, .length = 13},
160		/* First slice {9,7} 2nd slice {16,6}are combined */
161		/* ST uses 6th byte (offset=5) as Bad Block Indicator,
162		 * in addition to the 1st byte, and will be adjusted at run time
163		 */
164		{.offset = 25, .length = 13}, /* 2nd slice */
165		{.offset = 41, .length = 13}, /* 3rd slice */
166		{.offset = 57, .length = 7}, /* 4th slice */
167		{.offset = 0, .length = 0} /* End marker */
168	}
169};
170
171/**
172 * brcmnand_oob oob info for 512 page
173 */
174static struct nand_ecclayout brcmnand_oob_16 = {
175	.eccbytes = 3,
176	.eccpos = {6, 7, 8},
177	.oobfree = {
178		{.offset = 2, .length = 3},
179		{.offset = 9, .length = 7}, /* Byte 5 (6th byte) used for BI */
180		{.offset = 0, .length = 0}} /* End marker */
181		/* Bytes offset 4&5 are used by BBT.  Actually only byte 5 is used,
182		 * but in order to accomodate for 16 bit bus width, byte 4 is also not used.
183		 * If we only use byte-width chip, (We did)
184		 * then we can also use byte 4 as free bytes.
185		 */
186};
187
188/* Small page with BCH-4 */
189static struct nand_ecclayout brcmnand_oob_bch4_512 = {
190	.eccbytes = 7,
191	.eccpos = {9, 10, 11, 12, 13, 14, 15},
192	.oobfree = {
193		{.offset = 0, .length = 5},
194		{.offset = 7, .length = 2}, /* Byte 5 (6th byte) used for BI */
195		{.offset = 0, .length = 0}} /* End marker */
196};
197
198/*
199 * 2K page SLC/MLC with BCH-4 ECC, uses 7 ECC bytes per 512B ECC step
200 */
201static struct nand_ecclayout brcmnand_oob_bch4_2k = {
202	.eccbytes = 7 * 4, /* 7 * 4 = 28 bytes */
203	.eccpos =
204	{
205		9, 10, 11, 12, 13, 14, 15,
206		25, 26, 27, 28, 29, 30, 31,
207		41, 42, 43, 44, 45, 46, 47,
208		57, 58, 59, 60, 61, 62, 63
209	},
210	.oobfree =
211	{
212		/* 0 used for BBT and/or manufacturer bad block marker,
213		 * first slice loses 1 byte for BBT
214		 */
215		{.offset = 1, .length = 8}, /* 1st slice loses byte 0 */
216		{.offset = 16, .length = 9}, /* 2nd slice */
217		{.offset = 32, .length = 9}, /* 3rd slice  */
218		{.offset = 48, .length = 9}, /* 4th slice */
219		{.offset = 0, .length = 0} /* End marker */
220	}
221};
222
223
224static void *page_buffer = NULL;
225
226/* Private global state */
227struct brcmnand_mtd brcmnand_info;
228
229static INLINE void
230brcmnand_cmd(osl_t *osh, chipcregs_t *cc, uint opcode)
231{
232	W_REG(osh, &cc->nand_cmd_start, opcode);
233	/* read after write to flush the command */
234	R_REG(osh, &cc->nand_cmd_start);
235}
236
237int brcmnand_ctrl_verify_ecc(struct nand_chip *chip, int state)
238{
239	si_t *sih = brcmnand_info.sih;
240	chipcregs_t *cc = brcmnand_info.cc;
241	osl_t *osh;
242	uint32_t addr, ext_addr;
243	int err = 0;
244
245	if (state != FL_READING)
246		return BRCMNAND_SUCCESS;
247	osh = si_osh(sih);
248	addr = R_REG(osh, &cc->nand_ecc_corr_addr);
249	if (addr) {
250		ext_addr = R_REG(osh, &cc->nand_ecc_corr_addr_x);
251		/* clear */
252		W_REG(osh, &cc->nand_ecc_corr_addr, 0);
253		W_REG(osh, &cc->nand_ecc_corr_addr_x, 0);
254		err = BRCMNAND_CORRECTABLE_ECC_ERROR;
255	}
256	/* In BCH4 case, the controller will report BRCMNAND_UNCORRECTABLE_ECC_ERROR
257	 * but we cannot resolve this issue in this version. In this case, if we don't
258	 * check nand_ecc_unc_addr the process also work smoothly.
259	 */
260	if (sih->ccrev != 38) {
261		addr = R_REG(osh, &cc->nand_ecc_unc_addr);
262		if (addr) {
263			ext_addr = R_REG(osh, &cc->nand_ecc_unc_addr_x);
264			/* clear */
265			W_REG(osh, &cc->nand_ecc_unc_addr, 0);
266			W_REG(osh, &cc->nand_ecc_unc_addr_x, 0);
267			/* If the block was just erased, and have not yet been written to,
268			 * this will be flagged, so this could be a false alarm
269			 */
270			err = BRCMNAND_UNCORRECTABLE_ECC_ERROR;
271		}
272	}
273	return (err);
274}
275
276uint32 brcmnand_poll(uint32 pollmask)
277{
278	si_t *sih = brcmnand_info.sih;
279	chipcregs_t *cc = brcmnand_info.cc;
280	osl_t *osh;
281	uint32 status;
282
283	osh = si_osh(sih);
284	status = R_REG(osh, &cc->nand_intfc_status);
285	status &= pollmask;
286
287	return status;
288}
289
290int brcmnand_cache_is_valid(struct mtd_info *mtd, struct nand_chip *chip, int state)
291{
292	uint32 pollmask = NIST_CTRL_READY | 0x1;
293	unsigned long timeout = msecs_to_jiffies(BRCMNAND_POLL_TIMEOUT);
294	unsigned long now = jiffies;
295	uint32 status = 0;
296	int ret;
297
298	for (;;) {
299		if ((status = brcmnand_poll(pollmask)) != 0) {
300			break;
301		}
302		if (time_after(jiffies, now + timeout)) {
303			status = brcmnand_poll(pollmask);
304			break;
305		}
306		udelay(1);
307	}
308
309	if (status == 0)
310		ret = BRCMNAND_TIMED_OUT;
311	else if (status & 0x1)
312		ret = BRCMNAND_FLASH_STATUS_ERROR;
313	else
314		ret = brcmnand_ctrl_verify_ecc(chip, state);
315
316	return ret;
317}
318
319int brcmnand_spare_is_valid(struct mtd_info *mtd, struct nand_chip *chip, int state)
320{
321	uint32 pollmask = NIST_CTRL_READY;
322	unsigned long timeout = msecs_to_jiffies(BRCMNAND_POLL_TIMEOUT);
323	unsigned long now = jiffies;
324	uint32 status = 0;
325	int ret;
326
327	for (;;) {
328		if ((status = brcmnand_poll(pollmask)) != 0) {
329			break;
330		}
331		if (time_after(jiffies, now + timeout)) {
332			status = brcmnand_poll(pollmask);
333			break;
334		}
335		udelay(1);
336	}
337
338	if (status == 0)
339		ret = 0 /* timed out */;
340	else
341		ret = 1;
342
343	return ret;
344}
345
346/**
347 * nand_release_device - [GENERIC] release chip
348 * @mtd:	MTD device structure
349 *
350 * Deselect, release chip lock and wake up anyone waiting on the device
351 */
352static void brcmnand_release_device(struct mtd_info *mtd)
353{
354	hndnand_enable(brcmnand_info.nfl, 0);
355	mutex_unlock(mtd->mutex);
356}
357
358/**
359 * brcmnand_get_device - [GENERIC] Get chip for selected access
360 * @param chip      the nand chip descriptor
361 * @param mtd       MTD device structure
362 * @param new_state the state which is requested
363 *
364 * Get the device and lock it for exclusive access
365 */
366static int brcmnand_get_device(struct nand_chip *chip, struct mtd_info *mtd, int new_state)
367{
368	mutex_lock(mtd->mutex);
369	hndnand_enable(brcmnand_info.nfl, 1);
370	return 0;
371}
372
373/**
374 * brcmnand_release_device_bcm4706 - [GENERIC] release chip
375 * @mtd:	MTD device structure
376 *
377 * Deselect, release chip lock and wake up anyone waiting on the device
378 */
379static void
380brcmnand_release_device_bcm4706(struct mtd_info *mtd)
381{
382	mutex_unlock(mtd->mutex);
383}
384
385/**
386 * brcmnand_get_device_bcm4706 - [GENERIC] Get chip for selected access
387 * @param chip      the nand chip descriptor
388 * @param mtd       MTD device structure
389 * @param new_state the state which is requested
390 *
391 * Get the device and lock it for exclusive access
392 */
393static int
394brcmnand_get_device_bcm4706(struct nand_chip *chip, struct mtd_info *mtd, int new_state)
395{
396	mutex_lock(mtd->mutex);
397	return 0;
398}
399
400/**
401 * brcmnand_block_checkbad - [GENERIC] Check if a block is marked bad
402 * @mtd:	MTD device structure
403 * @ofs:	offset from device start
404 * @getchip:	0, if the chip is already selected
405 * @allowbbt:	1, if its allowed to access the bbt area
406 *
407 * Check, if the block is bad. Either by reading the bad block table or
408 * calling of the scan function.
409 */
410static int brcmnand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int getchip,
411	int allowbbt)
412{
413	struct nand_chip *chip = mtd->priv;
414	int ret;
415
416	if (!chip->bbt)
417		ret = chip->block_bad(mtd, ofs, getchip);
418	else
419		ret = brcmnand_isbad_bbt(mtd, ofs, allowbbt);
420
421	return (ret);
422}
423
424/*
425 * Returns 0 on success
426 */
427static int brcmnand_handle_false_read_ecc_unc_errors(struct mtd_info *mtd,
428	struct nand_chip *chip, uint8_t *buf, uint8_t *oob, uint32_t offset)
429{
430	static uint32_t oobbuf[4];
431	uint32_t *p32 = (oob ?  (uint32_t *)oob :  (uint32_t *)&oobbuf[0]);
432	int ret = 0;
433	uint8_t *oobarea;
434	int erased = 0, allFF = 0;
435	int i;
436	si_t *sih = brcmnand_info.sih;
437	chipcregs_t *cc = brcmnand_info.cc;
438	osl_t *osh;
439
440	osh = si_osh(sih);
441	oobarea = (uint8_t *)p32;
442	for (i = 0; i < 4; i++) {
443		p32[i] = R_REG(osh, (uint32_t *)((uint32_t)&cc->nand_spare_rd0 + (i * 4)));
444	}
445	if (chip->ecc.level == BRCMNAND_ECC_HAMMING) {
446		erased =
447			(oobarea[6] == 0xff && oobarea[7] == 0xff && oobarea[8] == 0xff);
448		allFF =
449			(oobarea[6] == 0x00 && oobarea[7] == 0x00 && oobarea[8] == 0x00);
450	} else if (chip->ecc.level >= BRCMNAND_ECC_BCH_1 &&
451	           chip->ecc.level <= BRCMNAND_ECC_BCH_12) {
452		erased = allFF = 1;
453		/* For BCH-n, the ECC bytes are at the end of the OOB area */
454		for (i = chip->ecc.oobsize - chip->ecc.bytes; i < chip->ecc.oobsize; i++) {
455			erased = erased && (oobarea[i] == 0xff);
456			allFF = allFF && (oobarea[i] == 0x00);
457		}
458	} else {
459		printk("BUG: Unsupported ECC level %d\n", chip->ecc.level);
460		BUG();
461	}
462
463	if (erased || allFF) {
464		/*
465		 * For the first case, the slice is an erased block, and the ECC bytes
466		 * are all 0xFF, for the 2nd, all bytes are 0xFF, so the Hamming Codes
467		 * for it are all zeroes.  The current version of the BrcmNAND
468		 * controller treats these as un-correctable errors.  For either case,
469		 * fill data buffer with 0xff and return success.  The error has
470		 * already been cleared inside brcmnand_verify_ecc.  Both case will be
471		 * handled correctly by the BrcmNand controller in later releases.
472		 */
473		p32 = (uint32_t *)buf;
474		for (i = 0; i < chip->ecc.size/4; i++) {
475			p32[i] = 0xFFFFFFFF;
476		}
477		ret = 0; /* Success */
478	} else {
479		/* Real error: Disturb read returns uncorrectable errors */
480		ret = -EBADMSG;
481		printk("<-- %s: ret -EBADMSG\n", __FUNCTION__);
482	}
483	return ret;
484}
485
486static int brcmnand_posted_read_cache(struct mtd_info *mtd, struct nand_chip *chip,
487	uint8_t *buf, uint8_t *oob, uint32_t offset)
488{
489	uint32_t mask = chip->ecc.size - 1;
490	si_t *sih = brcmnand_info.sih;
491	chipcregs_t *cc = brcmnand_info.cc;
492	osl_t *osh;
493	int valid;
494	uint32_t *to;
495	int ret = 0, i;
496
497	if (offset & mask)
498		return -EINVAL;
499
500	osh = si_osh(sih);
501	W_REG(osh, &cc->nand_cmd_addr, offset);
502	PLATFORM_IOFLUSH_WAR();
503	brcmnand_cmd(osh, cc, NCMD_PAGE_RD);
504	valid = brcmnand_cache_is_valid(mtd, chip, FL_READING);
505
506	switch (valid) {
507	case BRCMNAND_CORRECTABLE_ECC_ERROR:
508	case BRCMNAND_SUCCESS:
509		if (buf) {
510			to = (uint32_t *)buf;
511			PLATFORM_IOFLUSH_WAR();
512			for (i = 0; i < chip->ecc.size; i += 4, to++) {
513				*to = R_REG(osh, &cc->nand_cache_data);
514			}
515		}
516		if (oob) {
517			to = (uint32_t *)oob;
518			PLATFORM_IOFLUSH_WAR();
519			for (i = 0; i < chip->ecc.oobsize; i += 4, to++) {
520				*to = R_REG(osh, (uint32_t *)((uint32_t)&cc->nand_spare_rd0 + i));
521			}
522		}
523		break;
524	case BRCMNAND_UNCORRECTABLE_ECC_ERROR:
525		ret = brcmnand_handle_false_read_ecc_unc_errors(mtd, chip, buf, oob, offset);
526		break;
527	case BRCMNAND_FLASH_STATUS_ERROR:
528		ret = -EBADMSG;
529		break;
530	case BRCMNAND_TIMED_OUT:
531		ret = -ETIMEDOUT;
532		break;
533	default:
534		ret = -EFAULT;
535		break;
536	}
537
538	return (ret);
539}
540
541/**
542 * nand_read_page_hwecc - [REPLACABLE] hardware ecc based page read function
543 * @mtd:	mtd info structure
544 * @chip:	nand chip info structure
545 * @buf:	buffer to store read data
546 *
547 * Not for syndrome calculating ecc controllers which need a special oob layout
548 */
549static int brcmnand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
550	uint8_t *buf)
551{
552	int eccsteps;
553	int data_read = 0;
554	int oob_read = 0;
555	int corrected = 0;
556	int ret = 0;
557	uint32_t offset = chip->pageidx << chip->page_shift;
558	uint8_t *oob = chip->oob_poi;
559
560	for (eccsteps = 0; eccsteps < chip->ecc.steps; eccsteps++) {
561		ret = brcmnand_posted_read_cache(mtd, chip, &buf[data_read],
562			oob ? &oob[oob_read]: NULL, offset + data_read);
563		if (ret == BRCMNAND_CORRECTABLE_ECC_ERROR && !corrected) {
564			mtd->ecc_stats.corrected++;
565			corrected = 1;
566			ret = 0;
567		} else {
568			if (ret < 0)
569				break;
570		}
571		data_read += chip->ecc.size;
572		oob_read += chip->ecc.oobsize;
573	}
574	return (ret);
575}
576
577/**
578 * brcmnand_transfer_oob - [Internal] Transfer oob to client buffer
579 * @chip:	nand chip structure
580 * @oob:	oob destination address
581 * @ops:	oob ops structure
582 * @len:	size of oob to transfer
583 */
584static uint8_t *brcmnand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
585	struct mtd_oob_ops *ops, size_t len)
586{
587	switch (ops->mode) {
588
589	case MTD_OOB_PLACE:
590	case MTD_OOB_RAW:
591		memcpy(oob, chip->oob_poi + ops->ooboffs, len);
592		return oob + len;
593
594	case MTD_OOB_AUTO: {
595		struct nand_oobfree *free = chip->ecc.layout->oobfree;
596		uint32_t boffs = 0, roffs = ops->ooboffs;
597		size_t bytes = 0;
598
599		for (; free->length && len; free++, len -= bytes) {
600			/* Read request not from offset 0 ? */
601			if (unlikely(roffs)) {
602				if (roffs >= free->length) {
603					roffs -= free->length;
604					continue;
605				}
606				boffs = free->offset + roffs;
607				bytes = min_t(size_t, len,
608				              (free->length - roffs));
609				roffs = 0;
610			} else {
611				bytes = min_t(size_t, len, free->length);
612				boffs = free->offset;
613			}
614			memcpy(oob, chip->oob_poi + boffs, bytes);
615			oob += bytes;
616		}
617		return oob;
618	}
619	default:
620		BUG();
621	}
622	return NULL;
623}
624
625/**
626 * brcmnand_do_read_ops - [Internal] Read data with ECC
627 *
628 * @mtd:	MTD device structure
629 * @from:	offset to read from
630 * @ops:	oob ops structure
631 *
632 * Internal function. Called with chip held.
633 */
634static int brcmnand_do_read_ops(struct mtd_info *mtd, loff_t from,
635	struct mtd_oob_ops *ops)
636{
637	int page, realpage, col, bytes, aligned;
638	struct nand_chip *chip = mtd->priv;
639	struct mtd_ecc_stats stats;
640	int ret = 0;
641	uint32_t readlen = ops->len;
642	uint32_t oobreadlen = ops->ooblen;
643	uint8_t *bufpoi, *oob, *buf;
644
645	stats = mtd->ecc_stats;
646
647	realpage = (int)(from >> chip->page_shift);
648	page = realpage & chip->pagemask;
649
650	col = (int)(from & (mtd->writesize - 1));
651
652	buf = ops->datbuf;
653	oob = ops->oobbuf;
654
655	while (1) {
656		bytes = min(mtd->writesize - col, readlen);
657		aligned = (bytes == mtd->writesize);
658
659		/* Is the current page in the buffer ? */
660		if (realpage != chip->pagebuf || oob) {
661			bufpoi = aligned ? buf : chip->buffers->databuf;
662			chip->pageidx = page;
663			/* Now read the page into the buffer */
664			ret = chip->ecc.read_page(mtd, chip, bufpoi);
665			if (ret < 0)
666				break;
667
668			/* Transfer not aligned data */
669			if (!aligned) {
670				chip->pagebuf = realpage;
671				memcpy(buf, chip->buffers->databuf + col, bytes);
672			}
673
674			buf += bytes;
675
676			if (unlikely(oob)) {
677				if (ops->mode != MTD_OOB_RAW) {
678					int toread = min(oobreadlen,
679						chip->ecc.layout->oobavail);
680					if (toread) {
681						oob = brcmnand_transfer_oob(chip,
682							oob, ops, toread);
683						oobreadlen -= toread;
684					}
685				} else
686					buf = brcmnand_transfer_oob(chip,
687						buf, ops, mtd->oobsize);
688			}
689		} else {
690			memcpy(buf, chip->buffers->databuf + col, bytes);
691			buf += bytes;
692		}
693
694		readlen -= bytes;
695
696		if (!readlen)
697			break;
698
699		/* For subsequent reads align to page boundary. */
700		col = 0;
701		/* Increment page address */
702		realpage++;
703
704		page = realpage & chip->pagemask;
705	}
706
707	ops->retlen = ops->len - (size_t) readlen;
708	if (oob)
709		ops->oobretlen = ops->ooblen - oobreadlen;
710
711	if (ret)
712		return ret;
713
714	if (mtd->ecc_stats.failed - stats.failed)
715		return -EBADMSG;
716
717	return  mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
718}
719
720/**
721 * __brcmnand_read - [MTD Interface] MTD compability function for nand_do_read_ecc
722 * @mtd:    MTD device structure
723 * @from:   offset to read from
724 * @len:    number of bytes to read
725 * @retlen: pointer to variable to store the number of read bytes
726 * @buf:    the databuffer to put data
727 *
728 * Get hold of the chip and call nand_do_read
729 */
730int
731__brcmnand_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
732{
733	struct nand_chip *chip = mtd->priv;
734	int ret;
735
736	if ((from + len) > mtd->size)
737		return -EINVAL;
738	if (!len)
739		return 0;
740
741	chip->ops.len = len;
742	chip->ops.datbuf = buf;
743	chip->ops.oobbuf = NULL;
744
745	ret = brcmnand_do_read_ops(mtd, from, &chip->ops);
746
747	*retlen = chip->ops.retlen;
748
749	return ret;
750}
751
752/**
753 * brcmnand_read - [MTD Interface] MTD compability function for nand_do_read_ecc
754 * @mtd:    MTD device structure
755 * @from:   offset to read from
756 * @len:    number of bytes to read
757 * @retlen: pointer to variable to store the number of read bytes
758 * @buf:    the databuffer to put data
759 *
760 * Get hold of the chip and call nand_do_read
761 */
762static int
763brcmnand_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
764{
765	struct nand_chip *chip = mtd->priv;
766	int ret;
767
768	brcmnand_get_device(chip, mtd, FL_READING);
769	ret = __brcmnand_read(mtd, from, len, retlen, buf);
770	brcmnand_release_device(mtd);
771
772	return ret;
773}
774
775static int brcmnand_posted_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
776	uint8_t *oob, uint32_t offset)
777{
778	uint32_t mask = chip->ecc.size - 1;
779	si_t *sih = brcmnand_info.sih;
780	chipcregs_t *cc = brcmnand_info.cc;
781	osl_t *osh;
782	int valid;
783	uint32 *to;
784	int ret = 0, i;
785
786	if (offset & mask)
787		return -EINVAL;
788
789	osh = si_osh(sih);
790	W_REG(osh, &cc->nand_cmd_addr, offset);
791	PLATFORM_IOFLUSH_WAR();
792	brcmnand_cmd(osh, cc, NCMD_SPARE_RD);
793	valid = brcmnand_spare_is_valid(mtd, chip, FL_READING);
794
795	switch (valid) {
796	case 1:
797		if (oob) {
798			to = (uint32 *)oob;
799			for (i = 0; i < chip->ecc.oobsize; i += 4, to++) {
800				*to = R_REG(osh, (uint32_t *)((uint32_t)&cc->nand_spare_rd0 + i));
801			}
802		}
803		break;
804	case 0:
805		ret = -ETIMEDOUT;
806		break;
807	default:
808		ret = -EFAULT;
809		break;
810	}
811	return (ret);
812}
813
814/**
815 * brcmnand_read_oob_hwecc - [REPLACABLE] the most common OOB data read function
816 * @mtd:	mtd info structure
817 * @chip:	nand chip info structure
818 * @page:	page number to read
819 * @sndcmd:	flag whether to issue read command or not
820 */
821static int brcmnand_read_oob_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
822	int page, int sndcmd)
823{
824	int eccsteps;
825	int data_read = 0;
826	int oob_read = 0;
827	int corrected = 0;
828	int ret = 0;
829	uint32_t offset = page << chip->page_shift;
830	uint8_t *oob = chip->oob_poi;
831
832	for (eccsteps = 0; eccsteps < chip->ecc.steps; eccsteps++) {
833		ret = brcmnand_posted_read_oob(mtd, chip, &oob[oob_read], offset + data_read);
834		if (ret == BRCMNAND_CORRECTABLE_ECC_ERROR && !corrected) {
835			mtd->ecc_stats.corrected++;
836			/* Only update stats once per page */
837			corrected = 1;
838			ret = 0;
839		} else {
840			if (ret < 0)
841				break;
842		}
843		data_read += chip->ecc.size;
844		oob_read += chip->ecc.oobsize;
845	}
846
847	return (ret);
848}
849
850/**
851 * brcmnand_do_read_oob - [Intern] NAND read out-of-band
852 * @mtd:	MTD device structure
853 * @from:	offset to read from
854 * @ops:	oob operations description structure
855 *
856 * NAND read out-of-band data from the spare area
857 */
858static int brcmnand_do_read_oob(struct mtd_info *mtd, loff_t from,
859	struct mtd_oob_ops *ops)
860{
861	int page, realpage;
862	struct nand_chip *chip = mtd->priv;
863	int readlen = ops->ooblen;
864	int len;
865	uint8_t *buf = ops->oobbuf;
866	int ret;
867
868	DEBUG(MTD_DEBUG_LEVEL3, "nand_read_oob: from = 0x%08Lx, len = %i\n",
869	      (unsigned long long)from, readlen);
870
871	if (ops->mode == MTD_OOB_AUTO)
872		len = chip->ecc.layout->oobavail;
873	else
874		len = mtd->oobsize;
875
876	if (unlikely(ops->ooboffs >= len)) {
877		DEBUG(MTD_DEBUG_LEVEL0, "nand_read_oob: "
878			"Attempt to start read outside oob\n");
879		return -EINVAL;
880	}
881
882	/* Do not allow reads past end of device */
883	if (unlikely(from >= mtd->size ||
884		ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) -
885		(from >> chip->page_shift)) * len)) {
886		DEBUG(MTD_DEBUG_LEVEL0, "nand_read_oob: "
887			"Attempt read beyond end of device\n");
888		return -EINVAL;
889	}
890	/* Shift to get page */
891	realpage = (int)(from >> chip->page_shift);
892	page = realpage & chip->pagemask;
893
894	while (1) {
895		ret = chip->ecc.read_oob(mtd, chip, page, 0);
896		if (ret)
897			break;
898		len = min(len, readlen);
899		buf = brcmnand_transfer_oob(chip, buf, ops, len);
900
901		readlen -= len;
902		if (!readlen)
903			break;
904
905		/* Increment page address */
906		realpage++;
907
908		page = realpage & chip->pagemask;
909	}
910
911	ops->oobretlen = ops->ooblen;
912	return (ret);
913}
914
915/**
916 * _brcmnand_read_oob - [MTD Interface] NAND read data and/or out-of-band
917 * @mtd:	MTD device structure
918 * @from:	offset to read from
919 * @ops:	oob operation description structure
920 *
921 * NAND read data and/or out-of-band data
922 */
923
924int __brcmnand_read_oob(struct mtd_info *mtd, loff_t from,
925	struct mtd_oob_ops *ops)
926{
927	int ret = -ENOTSUPP;
928
929	ops->retlen = 0;
930
931	/* Do not allow reads past end of device */
932	if (ops->datbuf && (from + ops->len) > mtd->size) {
933		DEBUG(MTD_DEBUG_LEVEL0, "__brcmnand_read_oob: "
934		      "Attempt read beyond end of device\n");
935		return -EINVAL;
936	}
937
938	switch (ops->mode) {
939	case MTD_OOB_PLACE:
940	case MTD_OOB_AUTO:
941	case MTD_OOB_RAW:
942		if (!ops->datbuf)
943			ret = brcmnand_do_read_oob(mtd, from, ops);
944		else
945			ret = brcmnand_do_read_ops(mtd, from, ops);
946
947		break;
948
949	default:
950		DEBUG(MTD_DEBUG_LEVEL0, "__brcmnand_read_oob: "
951		      "ops->mode unsupport; %x\n", ops->mode);
952		break;
953	}
954
955	return ret;
956}
957
958/**
959 * brcmnand_read_oob - [MTD Interface] NAND read data and/or out-of-band
960 * @mtd:	MTD device structure
961 * @from:	offset to read from
962 * @ops:	oob operation description structure
963 *
964 * NAND read data and/or out-of-band data
965 */
966static int brcmnand_read_oob(struct mtd_info *mtd, loff_t from,
967	struct mtd_oob_ops *ops)
968{
969	struct nand_chip *chip = mtd->priv;
970	int ret;
971
972	brcmnand_get_device(chip, mtd, FL_READING);
973	ret = __brcmnand_read_oob(mtd, from, ops);
974	brcmnand_release_device(mtd);
975
976	return ret;
977}
978
979static int brcmnand_ctrl_write_is_complete(struct mtd_info *mtd, struct nand_chip *chip,
980	int *need_bbt)
981{
982	uint32 pollmask = NIST_CTRL_READY | 0x1;
983	unsigned long timeout = msecs_to_jiffies(BRCMNAND_POLL_TIMEOUT);
984	unsigned long now = jiffies;
985	uint32 status = 0;
986	int ret;
987
988	for (;;) {
989		if ((status = brcmnand_poll(pollmask)) != 0) {
990			break;
991		}
992		if (time_after(jiffies, now + timeout)) {
993			status = brcmnand_poll(pollmask);
994			break;
995		}
996		udelay(1);
997	}
998
999	*need_bbt = 0;
1000	if (status == 0)
1001		ret = 0; /* timed out */
1002	else {
1003		ret = 1;
1004		if (status & 0x1)
1005			*need_bbt = 1;
1006	}
1007
1008	return ret;
1009}
1010
1011/**
1012 * brcmnand_posted_write - [BrcmNAND Interface] Write a buffer to the flash
1013 *  cache
1014 * Assuming brcmnand_get_device() has been called to obtain exclusive lock
1015 *
1016 * @param mtd       MTD data structure
1017 * @param chip	    nand chip info structure
1018 * @param buf       the databuffer to put/get data
1019 * @param oob	    Spare area, pass NULL if not interested
1020 * @param offset    offset to write to, and must be 512B aligned
1021 *
1022 */
1023static int brcmnand_posted_write_cache(struct mtd_info *mtd, struct nand_chip *chip,
1024	const uint8_t *buf, uint8_t *oob, uint32_t offset)
1025{
1026	uint32_t mask = chip->ecc.size - 1;
1027	si_t *sih = brcmnand_info.sih;
1028	chipcregs_t *cc = brcmnand_info.cc;
1029	osl_t *osh;
1030	int i, ret = 0;
1031	uint32_t *from;
1032
1033	if (offset & mask) {
1034		ret = -EINVAL;
1035		goto out;
1036	}
1037
1038	osh = si_osh(sih);
1039	from = (uint32_t *)buf;
1040	for (i = 0; i < chip->ecc.size; i += 4, from++) {
1041		W_REG(osh, &cc->nand_cache_data, *from);
1042	}
1043out:
1044	return (ret);
1045}
1046
1047/**
1048 * brcmnand_write_page_hwecc - [REPLACABLE] hardware ecc based page write function
1049 * @mtd:	mtd info structure
1050 * @chip:	nand chip info structure
1051 * @buf:	data buffer
1052 */
1053static void brcmnand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
1054	const uint8_t *buf)
1055{
1056	int eccsize = chip->ecc.size;
1057	int eccsteps;
1058	int data_written = 0;
1059	int oob_written = 0;
1060	si_t *sih = brcmnand_info.sih;
1061	chipcregs_t *cc = brcmnand_info.cc;
1062	osl_t *osh;
1063	uint32_t reg;
1064	int ret = 0, need_bbt = 0;
1065	uint32_t offset = chip->pageidx << chip->page_shift;
1066
1067	uint8_t oob_buf[NAND_MAX_OOBSIZE];
1068	int *eccpos = chip->ecc.layout->eccpos;
1069	int i;
1070	uint8_t *oob = chip->oob_poi;
1071
1072	osh = si_osh(sih);
1073	/* full page write */
1074	/* disable partial page enable */
1075	reg = R_REG(osh, &cc->nand_acc_control);
1076	reg &= ~NAC_PARTIAL_PAGE_EN;
1077	W_REG(osh, &cc->nand_acc_control, reg);
1078
1079	for (eccsteps = 0; eccsteps < chip->ecc.steps; eccsteps++) {
1080		W_REG(osh, &cc->nand_cache_addr, 0);
1081		W_REG(osh, &cc->nand_cmd_addr, data_written);
1082		ret = brcmnand_posted_write_cache(mtd, chip, &buf[data_written],
1083			oob ? &oob[oob_written]: NULL, offset + data_written);
1084		if (ret < 0) {
1085			goto out;
1086		}
1087		data_written += eccsize;
1088		oob_written += chip->ecc.oobsize;
1089	}
1090
1091	W_REG(osh, &cc->nand_cmd_addr, offset + mtd->writesize - NFL_SECTOR_SIZE);
1092	brcmnand_cmd(osh, cc, NCMD_PAGE_PROG);
1093	if (brcmnand_ctrl_write_is_complete(mtd, chip, &need_bbt)) {
1094		if (!need_bbt) {
1095			/* write the oob */
1096			if (oob) {
1097				/* Enable partial page program so that we can
1098				 * overwrite the spare area
1099				 */
1100				reg = R_REG(osh, &cc->nand_acc_control);
1101				reg |= NAC_PARTIAL_PAGE_EN;
1102				W_REG(osh, &cc->nand_acc_control, reg);
1103
1104				memcpy(oob_buf, oob, NAND_MAX_OOBSIZE);
1105				/* read from the spare area first */
1106				ret = chip->ecc.read_oob(mtd, chip, chip->pageidx, 0);
1107				if (ret != 0)
1108					goto out;
1109				/* merge the oob */
1110				for (i = 0; i < chip->ecc.total; i++)
1111					oob_buf[eccpos[i]] = chip->oob_poi[eccpos[i]];
1112				memcpy(chip->oob_poi, oob_buf, NAND_MAX_OOBSIZE);
1113				/* write back to the spare area */
1114				ret = chip->ecc.write_oob(mtd, chip, chip->pageidx);
1115			}
1116			goto out;
1117		} else {
1118			ret = chip->block_markbad(mtd, offset);
1119			goto out;
1120		}
1121	}
1122	/* timed out */
1123	ret = -ETIMEDOUT;
1124
1125out:
1126	if (ret != 0)
1127		printk(KERN_ERR "brcmnand_write_page_hwecc failed\n");
1128	return;
1129}
1130
1131/*
1132 * brcmnand_posted_write_oob - [BrcmNAND Interface] Write the spare area
1133 * @mtd:	    MTD data structure
1134 * @chip:	    nand chip info structure
1135 * @oob:	    Spare area, pass NULL if not interested.  Must be able to
1136 *                  hold mtd->oobsize (16) bytes.
1137 * @offset:	    offset to write to, and must be 512B aligned
1138 *
1139 */
1140static int brcmnand_posted_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
1141	uint8_t *oob, uint32_t offset)
1142{
1143	uint32_t mask = chip->ecc.size - 1;
1144	si_t *sih = brcmnand_info.sih;
1145	chipcregs_t *cc = brcmnand_info.cc;
1146	osl_t *osh;
1147	int i, ret = 0, need_bbt = 0;
1148	uint32_t *from;
1149	uint32_t reg;
1150	uint8_t oob_buf0[16];
1151
1152	if (offset & mask) {
1153		ret = -EINVAL;
1154		goto out;
1155	}
1156
1157	osh = si_osh(sih);
1158	/* Make sure we are in partial page program mode */
1159	reg = R_REG(osh, &cc->nand_acc_control);
1160	reg |= NAC_PARTIAL_PAGE_EN;
1161	W_REG(osh, &cc->nand_acc_control, reg);
1162
1163	W_REG(osh, &cc->nand_cmd_addr, offset);
1164	if (!oob) {
1165		ret = -EINVAL;
1166		goto out;
1167	}
1168	memcpy(oob_buf0, oob, chip->ecc.oobsize);
1169	from = (uint32_t *)oob_buf0;
1170	for (i = 0; i < chip->ecc.oobsize; i += 4, from++) {
1171		W_REG(osh, (uint32_t *)((uint32_t)&cc->nand_spare_wr0 + i), *from);
1172	}
1173	PLATFORM_IOFLUSH_WAR();
1174	brcmnand_cmd(osh, cc, NCMD_SPARE_PROG);
1175	if (brcmnand_ctrl_write_is_complete(mtd, chip, &need_bbt)) {
1176		if (!need_bbt) {
1177			ret = 0;
1178			goto out;
1179		} else {
1180			ret = chip->block_markbad(mtd, offset);
1181			goto out;
1182		}
1183	}
1184	/* timed out */
1185	ret = -ETIMEDOUT;
1186out:
1187	return ret;
1188}
1189
1190
1191/**
1192 * brcmnand_write_page - [REPLACEABLE] write one page
1193 * @mtd:	MTD device structure
1194 * @chip:	NAND chip descriptor
1195 * @buf:	the data to write
1196 * @page:	page number to write
1197 * @cached:	cached programming
1198 * @raw:	use _raw version of write_page
1199 */
1200static int brcmnand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
1201	const uint8_t *buf, int page, int cached, int raw)
1202{
1203	chip->pageidx = page;
1204	chip->ecc.write_page(mtd, chip, buf);
1205
1206	return 0;
1207}
1208
1209/**
1210 * brcmnand_fill_oob - [Internal] Transfer client buffer to oob
1211 * @chip:	nand chip structure
1212 * @oob:	oob data buffer
1213 * @ops:	oob ops structure
1214 */
1215static uint8_t *brcmnand_fill_oob(struct nand_chip *chip, uint8_t *oob,
1216	struct mtd_oob_ops *ops)
1217{
1218	size_t len = ops->ooblen;
1219
1220	switch (ops->mode) {
1221
1222	case MTD_OOB_PLACE:
1223	case MTD_OOB_RAW:
1224		memcpy(chip->oob_poi + ops->ooboffs, oob, len);
1225		return oob + len;
1226
1227	case MTD_OOB_AUTO: {
1228		struct nand_oobfree *free = chip->ecc.layout->oobfree;
1229		uint32_t boffs = 0, woffs = ops->ooboffs;
1230		size_t bytes = 0;
1231
1232		for (; free->length && len; free++, len -= bytes) {
1233			/* Write request not from offset 0 ? */
1234			if (unlikely(woffs)) {
1235				if (woffs >= free->length) {
1236					woffs -= free->length;
1237					continue;
1238				}
1239				boffs = free->offset + woffs;
1240				bytes = min_t(size_t, len,
1241				              (free->length - woffs));
1242				woffs = 0;
1243			} else {
1244				bytes = min_t(size_t, len, free->length);
1245				boffs = free->offset;
1246			}
1247			memcpy(chip->oob_poi + boffs, oob, bytes);
1248			oob += bytes;
1249		}
1250		return oob;
1251	}
1252	default:
1253		BUG();
1254	}
1255	return NULL;
1256}
1257
1258#define NOTALIGNED(x)	(x & (chip->subpagesize - 1)) != 0
1259
1260/**
1261 * brcmnand_do_write_ops - [Internal] NAND write with ECC
1262 * @mtd:	MTD device structure
1263 * @to:		offset to write to
1264 * @ops:	oob operations description structure
1265 *
1266 * NAND write with ECC
1267 */
1268static int brcmnand_do_write_ops(struct mtd_info *mtd, loff_t to,
1269	struct mtd_oob_ops *ops)
1270{
1271	int realpage, page, blockmask;
1272	struct nand_chip *chip = mtd->priv;
1273	uint32_t writelen = ops->len;
1274	uint8_t *oob = ops->oobbuf;
1275	uint8_t *buf = ops->datbuf;
1276	int ret;
1277
1278	ops->retlen = 0;
1279	if (!writelen)
1280		return 0;
1281
1282	/* reject writes, which are not page aligned */
1283	if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
1284		printk(KERN_NOTICE "nand_write: "
1285		       "Attempt to write not page aligned data\n");
1286		return -EINVAL;
1287	}
1288
1289
1290	realpage = (int)(to >> chip->page_shift);
1291	page = realpage & chip->pagemask;
1292	blockmask = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
1293
1294	/* Invalidate the page cache, when we write to the cached page */
1295	if (to <= (chip->pagebuf << chip->page_shift) &&
1296	    (chip->pagebuf << chip->page_shift) < (to + ops->len))
1297		chip->pagebuf = -1;
1298
1299	/* If we're not given explicit OOB data, let it be 0xFF */
1300	if (likely(!oob))
1301		memset(chip->oob_poi, 0xff, mtd->oobsize);
1302
1303	while (1) {
1304		int bytes = mtd->writesize;
1305		int cached = writelen > bytes && page != blockmask;
1306		uint8_t *wbuf = buf;
1307
1308		if (unlikely(oob))
1309			oob = brcmnand_fill_oob(chip, oob, ops);
1310
1311		ret = chip->write_page(mtd, chip, wbuf, page, cached,
1312		                       (ops->mode == MTD_OOB_RAW));
1313		if (ret)
1314			break;
1315
1316		writelen -= bytes;
1317		if (!writelen)
1318			break;
1319
1320		buf += bytes;
1321		realpage++;
1322
1323		page = realpage & chip->pagemask;
1324	}
1325
1326	ops->retlen = ops->len - writelen;
1327	if (unlikely(oob))
1328		ops->oobretlen = ops->ooblen;
1329	return ret;
1330}
1331
1332/**
1333 * brcmnand_write - [MTD Interface] NAND write with ECC
1334 * @mtd:	MTD device structure
1335 * @to:		offset to write to
1336 * @len:	number of bytes to write
1337 * @retlen:	pointer to variable to store the number of written bytes
1338 * @buf:	the data to write
1339 *
1340 * NAND write with ECC
1341 */
1342static int brcmnand_write(struct mtd_info *mtd, loff_t to, size_t len,
1343	size_t *retlen, const uint8_t *buf)
1344{
1345	struct nand_chip *chip = mtd->priv;
1346	int ret;
1347
1348	/* Do not allow reads past end of device */
1349	if ((to + len) > mtd->size)
1350		return -EINVAL;
1351	if (!len)
1352		return 0;
1353
1354	brcmnand_get_device(chip, mtd, FL_WRITING);
1355
1356	chip->ops.len = len;
1357	chip->ops.datbuf = (uint8_t *)buf;
1358	chip->ops.oobbuf = NULL;
1359
1360	ret = brcmnand_do_write_ops(mtd, to, &chip->ops);
1361
1362	*retlen = chip->ops.retlen;
1363
1364	brcmnand_release_device(mtd);
1365
1366	return ret;
1367}
1368
1369/**
1370 * brcmnand_write_oob_hwecc - [INTERNAL] write one page
1371 * @mtd:    MTD device structure
1372 * @chip:   NAND chip descriptor. The oob_poi ptr points to the OOB buffer.
1373 * @page:   page number to write
1374 */
1375static int brcmnand_write_oob_hwecc(struct mtd_info *mtd, struct nand_chip *chip, int page)
1376{
1377	int eccsteps;
1378	int oob_written = 0, data_written = 0;
1379	uint32_t offset = page << chip->page_shift;
1380	uint8_t *oob = chip->oob_poi;
1381	int ret = 0;
1382
1383	for (eccsteps = 0; eccsteps < chip->ecc.steps; eccsteps++) {
1384		ret = brcmnand_posted_write_oob(mtd, chip, oob + oob_written,
1385			offset + data_written);
1386		if (ret < 0)
1387			break;
1388		data_written += chip->ecc.size;
1389		oob_written += chip->ecc.oobsize;
1390	}
1391	return (ret);
1392}
1393
1394/**
1395 * brcmnand_do_write_oob - [MTD Interface] NAND write out-of-band
1396 * @mtd:	MTD device structure
1397 * @to:		offset to write to
1398 * @ops:	oob operation description structure
1399 *
1400 * NAND write out-of-band
1401 */
1402static int brcmnand_do_write_oob(struct mtd_info *mtd, loff_t to,
1403	struct mtd_oob_ops *ops)
1404{
1405	int page, status, len;
1406	struct nand_chip *chip = mtd->priv;
1407
1408	DEBUG(MTD_DEBUG_LEVEL3, "nand_write_oob: to = 0x%08x, len = %i\n",
1409	      (unsigned int)to, (int)ops->ooblen);
1410
1411	if (ops->mode == MTD_OOB_AUTO)
1412		len = chip->ecc.layout->oobavail;
1413	else
1414		len = mtd->oobsize;
1415
1416	/* Do not allow write past end of page */
1417	if ((ops->ooboffs + ops->ooblen) > len) {
1418		DEBUG(MTD_DEBUG_LEVEL0, "nand_write_oob: "
1419		      "Attempt to write past end of page\n");
1420		return -EINVAL;
1421	}
1422
1423	if (unlikely(ops->ooboffs >= len)) {
1424		DEBUG(MTD_DEBUG_LEVEL0, "nand_write_oob: "
1425			"Attempt to start write outside oob\n");
1426		return -EINVAL;
1427	}
1428
1429	/* Do not allow reads past end of device */
1430	if (unlikely(to >= mtd->size ||
1431	             ops->ooboffs + ops->ooblen >
1432			((mtd->size >> chip->page_shift) -
1433			 (to >> chip->page_shift)) * len)) {
1434		DEBUG(MTD_DEBUG_LEVEL0, "nand_read_oob: "
1435			"Attempt write beyond end of device\n");
1436		return -EINVAL;
1437	}
1438
1439
1440	/* Shift to get page */
1441	page = (int)(to >> chip->page_shift);
1442
1443	/* Invalidate the page cache, if we write to the cached page */
1444	if (page == chip->pagebuf)
1445		chip->pagebuf = -1;
1446
1447	memset(chip->oob_poi, 0xff, mtd->oobsize);
1448	brcmnand_fill_oob(chip, ops->oobbuf, ops);
1449	status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
1450	memset(chip->oob_poi, 0xff, mtd->oobsize);
1451
1452	if (status)
1453		return status;
1454
1455	ops->oobretlen = ops->ooblen;
1456
1457	return 0;
1458}
1459
1460/**
1461 * __brcmnand_write_oob - [MTD Interface] NAND write data and/or out-of-band
1462 * @mtd:	MTD device structure
1463 * @to:		offset to write to
1464 * @ops:	oob operation description structure
1465 */
1466int __brcmnand_write_oob(struct mtd_info *mtd, loff_t to,
1467	struct mtd_oob_ops *ops)
1468{
1469	int ret = -ENOTSUPP;
1470
1471	ops->retlen = 0;
1472
1473	/* Do not allow writes past end of device */
1474	if (ops->datbuf && (to + ops->len) > mtd->size) {
1475		DEBUG(MTD_DEBUG_LEVEL0, "brcmnand_write_oob: "
1476		      "Attempt write beyond end of device\n");
1477		return -EINVAL;
1478	}
1479
1480	switch (ops->mode) {
1481	case MTD_OOB_PLACE:
1482	case MTD_OOB_AUTO:
1483	case MTD_OOB_RAW:
1484		if (!ops->datbuf)
1485			ret = brcmnand_do_write_oob(mtd, to, ops);
1486		else
1487			ret = brcmnand_do_write_ops(mtd, to, ops);
1488
1489		break;
1490	default:
1491		DEBUG(MTD_DEBUG_LEVEL0, "__brcmnand_write_oob: "
1492		      "ops->mode unsupport; %x\n", ops->mode);
1493		break;
1494	}
1495
1496	return ret;
1497}
1498
1499/**
1500 * brcmnand_write_oob - [MTD Interface] NAND write data and/or out-of-band
1501 * @mtd:	MTD device structure
1502 * @to:		offset to write to
1503 * @ops:	oob operation description structure
1504 */
1505static int brcmnand_write_oob(struct mtd_info *mtd, loff_t to,
1506	struct mtd_oob_ops *ops)
1507{
1508	struct nand_chip *chip = mtd->priv;
1509	int ret;
1510
1511	brcmnand_get_device(chip, mtd, FL_WRITING);
1512	ret = __brcmnand_write_oob(mtd, to, ops);
1513	brcmnand_release_device(mtd);
1514
1515	return ret;
1516}
1517
1518static int brcmnand_erase_bbt(struct mtd_info *mtd, struct erase_info *instr, int allowbbt)
1519{
1520	struct nand_chip * chip = mtd->priv;
1521	int page, len, pages_per_block, block_size;
1522	loff_t addr;
1523	int ret = 0;
1524	int need_bbt = 0;
1525	si_t *sih = brcmnand_info.sih;
1526	chipcregs_t *cc = brcmnand_info.cc;
1527	osl_t *osh;
1528
1529	DEBUG(MTD_DEBUG_LEVEL3, "nand_erase: start = 0x%08x, len = %i\n",
1530	      (unsigned int)instr->addr, (unsigned int)instr->len);
1531
1532	block_size = 1 << chip->phys_erase_shift;
1533
1534	/* Start address must align on block boundary */
1535	if (instr->addr & (block_size - 1)) {
1536		DEBUG(MTD_DEBUG_LEVEL0, "nand_erase: Unaligned address\n");
1537		return -EINVAL;
1538	}
1539
1540	/* Length must align on block boundary */
1541	if (instr->len & (block_size - 1)) {
1542		DEBUG(MTD_DEBUG_LEVEL0, "nand_erase: "
1543		      "Length not block aligned\n");
1544		return -EINVAL;
1545	}
1546
1547	/* Do not allow erase past end of device */
1548	if ((instr->len + instr->addr) > mtd->size) {
1549		DEBUG(MTD_DEBUG_LEVEL0, "nand_erase: "
1550		      "Erase past end of device\n");
1551		return -EINVAL;
1552	}
1553
1554	instr->fail_addr = 0xffffffff;
1555
1556	/* Grab the lock and see if the device is available */
1557
1558	/* Shift to get first page */
1559	page = (int)(instr->addr >> chip->page_shift);
1560
1561	/* Calculate pages in each block */
1562	pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
1563
1564	osh = si_osh(sih);
1565	/* Clear ECC registers */
1566	W_REG(osh, &cc->nand_ecc_corr_addr, 0);
1567	W_REG(osh, &cc->nand_ecc_corr_addr_x, 0);
1568	W_REG(osh, &cc->nand_ecc_unc_addr, 0);
1569	W_REG(osh, &cc->nand_ecc_unc_addr_x, 0);
1570
1571	/* Loop throught the pages */
1572	len = instr->len;
1573	addr = instr->addr;
1574	instr->state = MTD_ERASING;
1575
1576	while (len) {
1577		/*
1578		 * heck if we have a bad block, we do not erase bad blocks !
1579		 */
1580		if (brcmnand_block_checkbad(mtd, ((loff_t) page) <<
1581					chip->page_shift, 0, allowbbt)) {
1582			printk(KERN_WARNING "nand_erase: attempt to erase a "
1583			       "bad block at page 0x%08x\n", page);
1584			instr->state = MTD_ERASE_FAILED;
1585			goto erase_exit;
1586		}
1587
1588		/*
1589		 * Invalidate the page cache, if we erase the block which
1590		 * contains the current cached page
1591		 */
1592		if (page <= chip->pagebuf && chip->pagebuf <
1593		    (page + pages_per_block))
1594			chip->pagebuf = -1;
1595
1596		W_REG(osh, &cc->nand_cmd_addr, (page << chip->page_shift));
1597		brcmnand_cmd(osh, cc, NCMD_BLOCK_ERASE);
1598
1599		/* Wait until flash is ready */
1600		ret = brcmnand_ctrl_write_is_complete(mtd, chip, &need_bbt);
1601
1602		if (need_bbt) {
1603			if (!allowbbt) {
1604				DEBUG(MTD_DEBUG_LEVEL0, "nand_erase: "
1605				      "Failed erase, page 0x%08x\n", page);
1606				instr->state = MTD_ERASE_FAILED;
1607				instr->fail_addr = (page << chip->page_shift);
1608				chip->block_markbad(mtd, addr);
1609				goto erase_exit;
1610			}
1611		}
1612
1613		/* Increment page address and decrement length */
1614		len -= (1 << chip->phys_erase_shift);
1615		page += pages_per_block;
1616	}
1617	instr->state = MTD_ERASE_DONE;
1618
1619erase_exit:
1620
1621	ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO;
1622	/* Do call back function */
1623	if (!ret)
1624		mtd_erase_callback(instr);
1625
1626	return ret;
1627}
1628
1629static int
1630brcmnand_erase(struct mtd_info *mtd, struct erase_info *instr)
1631{
1632	struct nand_chip *chip = mtd->priv;
1633	int allowbbt = 0;
1634	int ret = 0;
1635
1636	brcmnand_get_device(chip, mtd, FL_ERASING);
1637	/* do not allow erase of bbt */
1638	ret = brcmnand_erase_bbt(mtd, instr, allowbbt);
1639	brcmnand_release_device(mtd);
1640
1641	return ret;
1642}
1643
1644/**
1645 * brcmnand_sync - [MTD Interface] sync
1646 * @mtd:	MTD device structure
1647 *
1648 * Sync is actually a wait for chip ready function
1649 */
1650static void brcmnand_sync(struct mtd_info *mtd)
1651{
1652	struct nand_chip *chip = mtd->priv;
1653
1654	DEBUG(MTD_DEBUG_LEVEL3, "nand_sync: called\n");
1655
1656	/* Grab the lock and see if the device is available */
1657	brcmnand_get_device(chip, mtd, FL_SYNCING);
1658	PLATFORM_IOFLUSH_WAR();
1659
1660	/* Release it and go back */
1661	brcmnand_release_device(mtd);
1662}
1663
1664/**
1665 * brcmnand_block_isbad - [MTD Interface] Check if block at offset is bad
1666 * @mtd:	MTD device structure
1667 * @offs:	offset relative to mtd start
1668 */
1669static int brcmnand_block_isbad(struct mtd_info *mtd, loff_t offs)
1670{
1671	struct nand_chip *chip = mtd->priv;
1672	int ret;
1673	/* Check for invalid offset */
1674	if (offs > mtd->size)
1675		return -EINVAL;
1676
1677	brcmnand_get_device(chip, mtd, FL_READING);
1678	ret = brcmnand_block_checkbad(mtd, offs, 1, 0);
1679	brcmnand_release_device(mtd);
1680
1681	return ret;
1682}
1683
1684/**
1685 * brcmnand_default_block_markbad - [DEFAULT] mark a block bad
1686 * @mtd:	MTD device structure
1687 * @ofs:	offset from device start
1688 *
1689 * This is the default implementation, which can be overridden by
1690 * a hardware specific driver.
1691*/
1692static int brcmnand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
1693{
1694	struct nand_chip *chip = mtd->priv;
1695	uint8_t bbmarker[1] = {0};
1696	uint8_t *buf = chip->oob_poi;
1697	int block, ret;
1698	int page, dir;
1699
1700	/* Get block number */
1701	block = (int)(ofs >> chip->bbt_erase_shift);
1702	/* Get page number */
1703		page = block << (chip->bbt_erase_shift - chip->page_shift);
1704		dir = 1;
1705
1706	if (chip->bbt)
1707		chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
1708	memcpy(buf, ffchars, NAND_MAX_OOBSIZE);
1709	memcpy(buf + chip->badblockpos, bbmarker, sizeof(bbmarker));
1710	ret = chip->ecc.write_oob(mtd, chip, page);
1711	page += dir;
1712	ret = chip->ecc.write_oob(mtd, chip, page);
1713
1714	/* According to the HW guy, even if the write fails, the controller have
1715	 * written a 0 pattern that certainly would have written a non 0xFF value
1716	 * into the BI marker.
1717	 *
1718	 * Ignoring ret.  Even if we fail to write the BI bytes, just ignore it,
1719	 * and mark the block as bad in the BBT
1720	 */
1721	ret = brcmnand_update_bbt(mtd, ofs);
1722	mtd->ecc_stats.badblocks++;
1723	return ret;
1724}
1725
1726/**
1727 * brcmnand_block_markbad - [MTD Interface] Mark block at the given offset as bad
1728 * @mtd:	MTD device structure
1729 * @ofs:	offset relative to mtd start
1730 */
1731static int brcmnand_block_markbad(struct mtd_info *mtd, loff_t ofs)
1732{
1733	struct nand_chip *chip = mtd->priv;
1734	int ret;
1735
1736	brcmnand_get_device(chip, mtd, FL_READING);
1737	ret = brcmnand_block_checkbad(mtd, ofs, 1, 0);
1738	brcmnand_release_device(mtd);
1739
1740	/* If it was bad already, return success and do nothing. */
1741	if (ret > 0) {
1742		ret = 0;
1743	}
1744	else {
1745		/* Mark the block as bad */
1746		brcmnand_get_device(chip, mtd, FL_WRITING);
1747		ret = chip->block_markbad(mtd, ofs);
1748		brcmnand_release_device(mtd);
1749	}
1750
1751	return  ret;
1752}
1753
1754/**
1755 * brcmnand_suspend - [MTD Interface] Suspend the NAND flash
1756 * @mtd:	MTD device structure
1757 */
1758static int brcmnand_suspend(struct mtd_info *mtd)
1759{
1760	struct nand_chip *chip = mtd->priv;
1761
1762	return brcmnand_get_device(chip, mtd, FL_PM_SUSPENDED);
1763}
1764
1765/**
1766 * brcmnand_resume - [MTD Interface] Resume the NAND flash
1767 * @mtd:	MTD device structure
1768 */
1769static void brcmnand_resume(struct mtd_info *mtd)
1770{
1771	struct nand_chip *chip = mtd->priv;
1772
1773	if (chip->state == FL_PM_SUSPENDED)
1774		brcmnand_release_device(mtd);
1775	else
1776		printk(KERN_ERR "brcmnand_resume() called for a chip which is not "
1777		       "in suspended state\n");
1778}
1779
1780struct mtd_partition brcmnand_parts[4] = {{0}};
1781
1782struct mtd_partition *init_brcmnand_mtd_partitions(struct mtd_info *mtd, size_t size)
1783{
1784	int bootflags = boot_flags();
1785	int j = 0;
1786	int offset = 0;
1787	unsigned int image_first_offset = 0;
1788#ifdef CONFIG_FAILSAFE_UPGRADE
1789	char *img_boot = nvram_get(BOOTPARTITION);
1790	char *imag_1st_offset = nvram_get(IMAGE_FIRST_OFFSET);
1791	char *imag_2nd_offset = nvram_get(IMAGE_SECOND_OFFSET);
1792	unsigned int image_second_offset = 0;
1793	char dual_image_on = 0;
1794
1795	/* The image_1st_size and image_2nd_size are necessary if the Flash does not have any
1796	 * image
1797	 */
1798	dual_image_on = (img_boot != NULL && imag_1st_offset != NULL && imag_2nd_offset != NULL);
1799
1800	if (dual_image_on) {
1801		image_first_offset = simple_strtol(imag_1st_offset, NULL, 10);
1802		image_second_offset = simple_strtol(imag_2nd_offset, NULL, 10);
1803		printk("The first offset=%x, 2nd offset=%x\n", image_first_offset,
1804			image_second_offset);
1805
1806	}
1807#endif	/* CONFIG_FAILSAFE_UPGRADE */
1808
1809	offset = image_first_offset;
1810	if ((bootflags & FLASH_KERNEL_NFLASH) == FLASH_KERNEL_NFLASH) {
1811		brcmnand_parts[j].name = "trx";
1812		brcmnand_parts[j].offset = offset;
1813#ifdef CONFIG_FAILSAFE_UPGRADE
1814		brcmnand_parts[j].size = image_second_offset-offset;
1815#else
1816		brcmnand_parts[j].size = NFL_BOOT_OS_SIZE;
1817#endif
1818		offset += brcmnand_parts[j].size;
1819		size   -= brcmnand_parts[j].size;
1820		j++;
1821#ifdef CONFIG_FAILSAFE_UPGRADE
1822		brcmnand_parts[j].name = "trx2";
1823		brcmnand_parts[j].offset = offset;
1824		brcmnand_parts[j].size = NFL_BOOT_OS_SIZE-image_second_offset;
1825		offset += brcmnand_parts[j].size;
1826		size   -= brcmnand_parts[j].size;
1827		j++;
1828#endif
1829	}
1830
1831	size -= NFL_BBT_SIZE;
1832	if (size <= 0) {
1833		printk(KERN_ERR "%s: nand flash size is too small\n", __func__);
1834		return NULL;
1835	}
1836
1837	brcmnand_parts[j].name = "brcmnand";
1838	brcmnand_parts[j].offset = offset;
1839	brcmnand_parts[j++].size = size;
1840
1841	return brcmnand_parts;
1842}
1843
1844/**
1845 * brcmnand_check_command_done - [DEFAULT] check if command is done
1846 * @mtd:	MTD device structure
1847 *
1848 * Return 0 to process next command
1849 */
1850static int
1851brcmnand_check_command_done(void)
1852{
1853	si_t *sih = brcmnand_info.sih;
1854	chipcregs_t *cc = brcmnand_info.cc;
1855	osl_t *osh;
1856	int count = 0;
1857
1858	osh = si_osh(sih);
1859
1860	while (R_REG(osh, &cc->nflashctrl) & NFC_START) {
1861		if (++count > BRCMNAND_POLL_TIMEOUT) {
1862			printk("brcmnand_check_command_done: command timeout\n");
1863			return -1;
1864		}
1865	}
1866
1867	return 0;
1868}
1869
1870/**
1871 * brcmnand_hwcontrol - [DEFAULT] Issue command and address cycles to the chip
1872 * @mtd:	MTD device structure
1873 * @cmd:	the command to be sent
1874 * @ctrl:	the control code to be sent
1875 *
1876 * Issue command and address cycles to the chip
1877 */
1878static void
1879brcmnand_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
1880{
1881	si_t *sih = brcmnand_info.sih;
1882	chipcregs_t *cc = brcmnand_info.cc;
1883	osl_t *osh;
1884	unsigned int val = 0;
1885
1886	osh = si_osh(sih);
1887
1888	if (cmd == NAND_CMD_NONE)
1889		return;
1890
1891	if (ctrl & NAND_CLE) {
1892		val = cmd | NFC_CMD0;
1893	}
1894	else {
1895		switch (ctrl & (NAND_ALE_COL | NAND_ALE_ROW)) {
1896		case NAND_ALE_COL:
1897			W_REG(osh, &cc->nflashcoladdr, cmd);
1898			val = NFC_COL;
1899			break;
1900		case NAND_ALE_ROW:
1901			W_REG(osh, &cc->nflashrowaddr, cmd);
1902			val = NFC_ROW;
1903			break;
1904		default:
1905			BUG();
1906		}
1907	}
1908
1909	/* nCS is not needed for reset command */
1910	if (cmd != NAND_CMD_RESET)
1911		val |= NFC_CSA;
1912
1913	val |= NFC_START;
1914	W_REG(osh, &cc->nflashctrl, val);
1915
1916	brcmnand_check_command_done();
1917}
1918
1919/**
1920 * brcmnand_command_lp - [DEFAULT] Send command to NAND large page device
1921 * @mtd:	MTD device structure
1922 * @command:	the command to be sent
1923 * @column:	the column address for this command, -1 if none
1924 * @page_addr:	the page address for this command, -1 if none
1925 *
1926 * Send command to NAND device. This is the version for the new large page
1927 * devices We dont have the separate regions as we have in the small page
1928 * devices.  We must emulate NAND_CMD_READOOB to keep the code compatible.
1929 */
1930static void
1931brcmnand_command_lp(struct mtd_info *mtd, unsigned int command, int column, int page_addr)
1932{
1933	register struct nand_chip *chip = mtd->priv;
1934
1935	/* Emulate NAND_CMD_READOOB */
1936	if (command == NAND_CMD_READOOB) {
1937		column += mtd->writesize;
1938		command = NAND_CMD_READ0;
1939	}
1940
1941	/* Command latch cycle */
1942	chip->cmd_ctrl(mtd, command & 0xff, NAND_NCE | NAND_CLE);
1943
1944	if (column != -1 || page_addr != -1) {
1945		int ctrl = NAND_NCE | NAND_ALE;
1946
1947		/* Serially input address */
1948		if (column != -1) {
1949			ctrl |= NAND_ALE_COL;
1950
1951			/* Adjust columns for 16 bit buswidth */
1952			if (chip->options & NAND_BUSWIDTH_16)
1953				column >>= 1;
1954
1955			chip->cmd_ctrl(mtd, column, ctrl);
1956		}
1957
1958		if (page_addr != -1) {
1959			ctrl &= ~NAND_ALE_COL;
1960			ctrl |= NAND_ALE_ROW;
1961
1962			chip->cmd_ctrl(mtd, page_addr, ctrl);
1963		}
1964	}
1965
1966	chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE);
1967
1968	/*
1969	 * program and erase have their own busy handlers
1970	 * status, sequential in, and deplete1 need no delay
1971	 */
1972	switch (command) {
1973
1974	case NAND_CMD_CACHEDPROG:
1975	case NAND_CMD_PAGEPROG:
1976	case NAND_CMD_ERASE1:
1977	case NAND_CMD_ERASE2:
1978	case NAND_CMD_SEQIN:
1979	case NAND_CMD_RNDIN:
1980	case NAND_CMD_STATUS:
1981	case NAND_CMD_DEPLETE1:
1982		return;
1983
1984	/*
1985	 * read error status commands require only a short delay
1986	 */
1987	case NAND_CMD_STATUS_ERROR:
1988	case NAND_CMD_STATUS_ERROR0:
1989	case NAND_CMD_STATUS_ERROR1:
1990	case NAND_CMD_STATUS_ERROR2:
1991	case NAND_CMD_STATUS_ERROR3:
1992		udelay(chip->chip_delay);
1993		return;
1994
1995	case NAND_CMD_RESET:
1996		if (chip->dev_ready)
1997			break;
1998
1999		udelay(chip->chip_delay);
2000
2001		chip->cmd_ctrl(mtd, NAND_CMD_STATUS, NAND_NCE | NAND_CLE);
2002		chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE);
2003
2004		while (!(chip->read_byte(mtd) & NAND_STATUS_READY));
2005		return;
2006
2007	case NAND_CMD_RNDOUT:
2008		/* No ready / busy check necessary */
2009		chip->cmd_ctrl(mtd, NAND_CMD_RNDOUTSTART, NAND_NCE | NAND_CLE);
2010		chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE);
2011		return;
2012
2013	case NAND_CMD_READ0:
2014		chip->cmd_ctrl(mtd, NAND_CMD_READSTART, NAND_NCE | NAND_CLE);
2015		chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE);
2016
2017	/* This applies to read commands */
2018	default:
2019		/*
2020		 * If we don't have access to the busy pin, we apply the given
2021		 * command delay
2022		 */
2023		if (!chip->dev_ready) {
2024			udelay(chip->chip_delay);
2025			return;
2026		}
2027	}
2028
2029	/* Apply this short delay always to ensure that we do wait tWB in
2030	 * any case on any machine.
2031	 */
2032	ndelay(100);
2033
2034	nand_wait_ready(mtd);
2035}
2036
2037/**
2038 * brcmnand_command - [DEFAULT] Send command to NAND device
2039 * @mtd:	MTD device structure
2040 * @command:	the command to be sent
2041 * @column:	the column address for this command, -1 if none
2042 * @page_addr:	the page address for this command, -1 if none
2043 *
2044 * Send command to NAND device. This function is used for small page
2045 * devices (256/512 Bytes per page)
2046 */
2047static void
2048brcmnand_command(struct mtd_info *mtd, unsigned int command, int column, int page_addr)
2049{
2050	register struct nand_chip *chip = mtd->priv;
2051	int ctrl = NAND_CTRL_CLE;
2052
2053	/* Invoke large page command function */
2054	if (mtd->writesize > 512) {
2055		brcmnand_command_lp(mtd, command, column, page_addr);
2056		return;
2057	}
2058
2059	/*
2060	 * Write out the command to the device.
2061	 */
2062	if (command == NAND_CMD_SEQIN) {
2063		int readcmd;
2064
2065		if (column >= mtd->writesize) {
2066			/* OOB area */
2067			column -= mtd->writesize;
2068			readcmd = NAND_CMD_READOOB;
2069		} else if (column < 256) {
2070			/* First 256 bytes --> READ0 */
2071			readcmd = NAND_CMD_READ0;
2072		} else {
2073			column -= 256;
2074			readcmd = NAND_CMD_READ1;
2075		}
2076
2077		chip->cmd_ctrl(mtd, readcmd, ctrl);
2078	}
2079
2080	chip->cmd_ctrl(mtd, command, ctrl);
2081
2082	/*
2083	 * Address cycle, when necessary
2084	 */
2085	ctrl = NAND_CTRL_ALE;
2086
2087	/* Serially input address */
2088	if (column != -1) {
2089		ctrl |= NAND_ALE_COL;
2090
2091		/* Adjust columns for 16 bit buswidth */
2092		if (chip->options & NAND_BUSWIDTH_16)
2093			column >>= 1;
2094
2095		chip->cmd_ctrl(mtd, column, ctrl);
2096	}
2097
2098	if (page_addr != -1) {
2099		ctrl &= ~NAND_ALE_COL;
2100		ctrl |= NAND_ALE_ROW;
2101
2102		chip->cmd_ctrl(mtd, page_addr, ctrl);
2103	}
2104
2105	chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE);
2106
2107	/*
2108	 * program and erase have their own busy handlers
2109	 * status and sequential in needs no delay
2110	 */
2111	switch (command) {
2112
2113	case NAND_CMD_PAGEPROG:
2114	case NAND_CMD_ERASE1:
2115	case NAND_CMD_ERASE2:
2116	case NAND_CMD_SEQIN:
2117	case NAND_CMD_STATUS:
2118		return;
2119
2120	case NAND_CMD_RESET:
2121		if (chip->dev_ready)
2122			break;
2123
2124		udelay(chip->chip_delay);
2125
2126		chip->cmd_ctrl(mtd, NAND_CMD_STATUS, NAND_CTRL_CLE);
2127
2128		chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE);
2129
2130		while (!(chip->read_byte(mtd) & NAND_STATUS_READY));
2131
2132		return;
2133
2134		/* This applies to read commands */
2135	default:
2136		/*
2137		 * If we don't have access to the busy pin, we apply the given
2138		 * command delay
2139		 */
2140		if (!chip->dev_ready) {
2141			udelay(chip->chip_delay);
2142			return;
2143		}
2144	}
2145
2146	/* Apply this short delay always to ensure that we do wait tWB in
2147	 * any case on any machine.
2148	 */
2149	ndelay(100);
2150
2151	nand_wait_ready(mtd);
2152}
2153
2154/**
2155 * brcmnand_read_byte - [DEFAULT] read one byte from the chip
2156 * @mtd:	MTD device structure
2157 *
2158 * Default read function for 8bit bus width
2159 */
2160static uint8_t
2161brcmnand_read_byte(struct mtd_info *mtd)
2162{
2163	si_t *sih = brcmnand_info.sih;
2164	chipcregs_t *cc = brcmnand_info.cc;
2165	osl_t *osh;
2166	register struct nand_chip *chip = mtd->priv;
2167	unsigned int val;
2168
2169	osh = si_osh(sih);
2170
2171	val = NFC_DREAD | NFC_CSA | NFC_START;
2172	W_REG(osh, &cc->nflashctrl, val);
2173
2174	brcmnand_check_command_done();
2175
2176	return readb(chip->IO_ADDR_R);
2177}
2178
2179/**
2180 * brcmnand_write_byte - [DEFAULT] write one byte from the chip
2181 * @mtd:	MTD device structure
2182 *
2183 * Default write function for 8bit bus width
2184 */
2185static int
2186brcmnand_write_byte(struct mtd_info *mtd, u_char ch)
2187{
2188	si_t *sih = brcmnand_info.sih;
2189	chipcregs_t *cc = brcmnand_info.cc;
2190	osl_t *osh;
2191	unsigned int val;
2192
2193	osh = si_osh(sih);
2194
2195	W_REG(osh, &cc->nflashdata, (unsigned int)ch);
2196
2197	val = NFC_DWRITE | NFC_CSA | NFC_START;
2198	W_REG(osh, &cc->nflashctrl, val);
2199
2200	brcmnand_check_command_done();
2201
2202	return 0;
2203}
2204
2205/**
2206 * brcmnand_read_buf - [DEFAULT] read data from chip into buf
2207 * @mtd:	MTD device structure
2208 * @buf:	data buffer
2209 * @len:	number of bytes to read
2210 *
2211 * Default read function for 8bit bus width
2212 */
2213static void
2214brcmnand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
2215{
2216	int count = 0;
2217
2218	while (len > 0) {
2219		buf[count++] = brcmnand_read_byte(mtd);
2220		len--;
2221	}
2222}
2223
2224/**
2225 * brcmnand_write_buf - [DEFAULT] write buffer to chip
2226 * @mtd:	MTD device structure
2227 * @buf:	data buffer
2228 * @len:	number of bytes to write
2229 *
2230 * Default write function for 8bit bus width
2231 */
2232static void
2233brcmnand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
2234{
2235	int count = 0;
2236
2237	while (len > 0) {
2238		brcmnand_write_byte(mtd, buf[count++]);
2239		len--;
2240	}
2241}
2242
2243/**
2244 * nand_verify_buf - [DEFAULT] Verify chip data against buffer
2245 * @mtd:	MTD device structure
2246 * @buf:	buffer containing the data to compare
2247 * @len:	number of bytes to compare
2248 *
2249 * Default verify function for 8bit buswith
2250 */
2251static int
2252brcmnand_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
2253{
2254	int i;
2255	struct nand_chip *chip = mtd->priv;
2256	uint8_t chbuf;
2257
2258	for (i = 0; i < len; i++) {
2259		chbuf = chip->read_byte(mtd);
2260		if (buf[i] != chbuf) {
2261			return -EFAULT;
2262		}
2263	}
2264
2265	return 0;
2266}
2267
2268/**
2269 * brcmnand_devready - [DEFAULT] Check if nand flash device is ready
2270 * @mtd:	MTD device structure
2271 *
2272 * Return 0 if nand flash device is busy
2273 */
2274static int
2275brcmnand_devready(struct mtd_info *mtd)
2276{
2277	si_t *sih = brcmnand_info.sih;
2278	chipcregs_t *cc = brcmnand_info.cc;
2279	osl_t *osh;
2280	int status;
2281
2282	osh = si_osh(sih);
2283
2284	status = (R_REG(osh, &cc->nflashctrl) & NFC_RDYBUSY) ? 1 : 0;
2285
2286	return status;
2287}
2288
2289/**
2290 * brcmnand_select_chip - [DEFAULT] select chip
2291 * @mtd:	MTD device structure
2292 * @chip:	chip to be selected
2293 *
2294 * For BCM4706 just return because of only one chip is used
2295 */
2296static void
2297brcmnand_select_chip(struct mtd_info *mtd, int chip)
2298{
2299	return;
2300}
2301
2302/**
2303 * brcmnand_init_nandchip - [DEFAULT] init mtd_info and nand_chip
2304 * @mtd:	MTD device structure
2305 * @chip:	chip to be selected
2306 *
2307 */
2308static int
2309brcmnand_init_nandchip(struct mtd_info *mtd, struct nand_chip *chip)
2310{
2311	chipcregs_t *cc = brcmnand_info.cc;
2312	int ret = 0;
2313
2314	chip->cmdfunc = brcmnand_command;
2315	chip->read_byte = brcmnand_read_byte;
2316	chip->write_buf = brcmnand_write_buf;
2317	chip->read_buf = brcmnand_read_buf;
2318	chip->verify_buf = brcmnand_verify_buf;
2319	chip->select_chip = brcmnand_select_chip;
2320	chip->cmd_ctrl = brcmnand_hwcontrol;
2321	chip->dev_ready = brcmnand_devready;
2322	chip->get_device = brcmnand_get_device_bcm4706;
2323	chip->release_device = brcmnand_release_device_bcm4706;
2324
2325	chip->numchips = 1;
2326	chip->chip_shift = 0;
2327	chip->chip_delay = 50;
2328	chip->priv = mtd;
2329	chip->options = NAND_USE_FLASH_BBT;
2330
2331	chip->controller = &chip->hwcontrol;
2332	spin_lock_init(&chip->controller->lock);
2333	init_waitqueue_head(&chip->controller->wq);
2334
2335	chip->IO_ADDR_W = (void __iomem *)&cc->nflashdata;
2336	chip->IO_ADDR_R = chip->IO_ADDR_W;
2337
2338	/* BCM4706 only support software ECC mode */
2339	chip->ecc.mode = NAND_ECC_SOFT;
2340	chip->ecc.layout = NULL;
2341
2342	mtd->name = "brcmnand";
2343	mtd->priv = chip;
2344	mtd->owner = THIS_MODULE;
2345
2346	mtd->mutex = partitions_mutex_init();
2347	if (!mtd->mutex)
2348		ret = -ENOMEM;
2349
2350	return ret;
2351}
2352
2353static int __init
2354brcmnand_mtd_init(void)
2355{
2356	int ret = 0;
2357	hndnand_t *info;
2358	struct pci_dev *dev = NULL;
2359	struct nand_chip *chip;
2360	struct mtd_info *mtd;
2361#ifdef CONFIG_MTD_PARTITIONS
2362	struct mtd_partition *parts;
2363	int i;
2364#endif
2365
2366	list_for_each_entry(dev, &((pci_find_bus(0, 0))->devices), bus_list) {
2367		if ((dev != NULL) && (dev->device == CC_CORE_ID))
2368			break;
2369	}
2370
2371	if (dev == NULL) {
2372		printk(KERN_ERR "brcmnand: chipcommon not found\n");
2373		return -ENODEV;
2374	}
2375
2376	memset(&brcmnand_info, 0, sizeof(struct brcmnand_mtd));
2377
2378	/* attach to the backplane */
2379	if (!(brcmnand_info.sih = si_kattach(SI_OSH))) {
2380		printk(KERN_ERR "brcmnand: error attaching to backplane\n");
2381		ret = -EIO;
2382		goto fail;
2383	}
2384
2385	/* Map registers and flash base */
2386	if (!(brcmnand_info.cc = ioremap_nocache(
2387		pci_resource_start(dev, 0),
2388		pci_resource_len(dev, 0)))) {
2389		printk(KERN_ERR "brcmnand: error mapping registers\n");
2390		ret = -EIO;
2391		goto fail;
2392	}
2393
2394	/* Initialize serial flash access */
2395	if (!(info = hndnand_init(brcmnand_info.sih))) {
2396		printk(KERN_ERR "brcmnand: found no supported devices\n");
2397		ret = -ENODEV;
2398		goto fail;
2399	}
2400	brcmnand_info.nfl = info;
2401
2402	if (CHIPID(brcmnand_info.sih->chip) == BCM4706_CHIP_ID) {
2403		mtd = &brcmnand_info.mtd;
2404		chip = &brcmnand_info.chip;
2405
2406		if ((ret = brcmnand_init_nandchip(mtd, chip)) != 0) {
2407			printk(KERN_ERR "brcmnand_mtd_init: brcmnand_init_nandchip failed\n");
2408			goto fail;
2409		}
2410
2411		if ((ret = nand_scan(mtd, chip->numchips)) != 0) {
2412			printk(KERN_ERR "brcmnand_mtd_init: nand_scan failed\n");
2413			goto fail;
2414		}
2415
2416		goto init_partitions;
2417	}
2418
2419	page_buffer = kmalloc(sizeof(struct nand_buffers), GFP_KERNEL);
2420	if (!page_buffer) {
2421		printk(KERN_ERR "brcmnand: cannot allocate memory for page buffer\n");
2422		return -ENOMEM;
2423	}
2424	memset(page_buffer, 0, sizeof(struct nand_buffers));
2425
2426	chip = &brcmnand_info.chip;
2427	mtd = &brcmnand_info.mtd;
2428
2429	chip->ecc.mode = NAND_ECC_HW;
2430
2431	chip->buffers = (struct nand_buffers *)page_buffer;
2432	chip->numchips = 1;
2433	chip->chip_shift = 0;
2434	chip->priv = mtd;
2435	chip->options |= NAND_USE_FLASH_BBT;
2436	/* At most 2GB is supported */
2437	chip->chipsize = (info->size >= (1 << 11)) ? (1 << 31) : (info->size << 20);
2438	chip->ecc.level = info->ecclevel;
2439
2440	/* Register with MTD */
2441	mtd->name = "brcmnand";
2442	mtd->priv = &brcmnand_info.chip;
2443	mtd->owner = THIS_MODULE;
2444	mtd->mutex = partitions_mutex_init();
2445	if (!mtd->mutex) {
2446		ret = -ENOMEM;
2447		goto fail;
2448	}
2449
2450	mtd->size = chip->chipsize;
2451	mtd->erasesize = info->blocksize;
2452	mtd->writesize = info->pagesize;
2453	/* 16B oob for 512B page, 64B for 2KB page, etc.. */
2454	mtd->oobsize = (info->pagesize >> 5);
2455
2456	/* Calculate the address shift from the page size */
2457	chip->page_shift = ffs(mtd->writesize) - 1;
2458	/* Convert chipsize to number of pages per chip -1. */
2459	chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
2460
2461	chip->bbt_erase_shift = chip->phys_erase_shift =
2462		ffs(mtd->erasesize) - 1;
2463	chip->chip_shift = ffs(chip->chipsize) - 1;
2464
2465	/* Set the bad block position */
2466	chip->badblockpos = (mtd->writesize > 512) ?
2467		NAND_LARGE_BADBLOCK_POS : NAND_SMALL_BADBLOCK_POS;
2468
2469	if (!chip->controller) {
2470		chip->controller = &chip->hwcontrol;
2471		spin_lock_init(&chip->controller->lock);
2472		init_waitqueue_head(&chip->controller->wq);
2473	}
2474
2475	/* Preset the internal oob write buffer */
2476	memset(BRCMNAND_OOBBUF(chip->buffers), 0xff, mtd->oobsize);
2477
2478	/* Set the internal oob buffer location, just after the page data */
2479	chip->oob_poi = BRCMNAND_OOBBUF(chip->buffers);
2480
2481	/*
2482	 * If no default placement scheme is given, select an appropriate one
2483	 */
2484	if (!chip->ecc.layout) {
2485		switch (mtd->oobsize) {
2486		case 16:
2487			if (chip->ecc.level == BRCMNAND_ECC_HAMMING)
2488				chip->ecc.layout = &brcmnand_oob_16;
2489			else
2490				chip->ecc.layout = &brcmnand_oob_bch4_512;
2491			break;
2492		case 64:
2493			if (chip->ecc.level == BRCMNAND_ECC_HAMMING)
2494				chip->ecc.layout = &brcmnand_oob_64;
2495			else if (chip->ecc.level == BRCMNAND_ECC_BCH_4) {
2496				if (mtd->writesize == 2048)
2497					chip->ecc.layout = &brcmnand_oob_bch4_2k;
2498				else {
2499					printk(KERN_ERR "Unsupported page size of %d\n",
2500						mtd->writesize);
2501					BUG();
2502				}
2503			}
2504			break;
2505		case 128:
2506			if (chip->ecc.level == BRCMNAND_ECC_HAMMING)
2507				chip->ecc.layout = &brcmnand_oob_128;
2508			else {
2509				printk(KERN_ERR "Unsupported page size of %d\n",
2510					mtd->writesize);
2511				BUG();
2512			}
2513			break;
2514		default:
2515			printk(KERN_WARNING "No oob scheme defined for "
2516			       "oobsize %d\n", mtd->oobsize);
2517			BUG();
2518		}
2519	}
2520
2521	if (!chip->write_page)
2522		chip->write_page = brcmnand_write_page;
2523
2524	switch (chip->ecc.mode) {
2525	case NAND_ECC_HW:
2526		if (!chip->ecc.read_page)
2527			chip->ecc.read_page = brcmnand_read_page_hwecc;
2528		if (!chip->ecc.write_page)
2529			chip->ecc.write_page = brcmnand_write_page_hwecc;
2530		if (!chip->ecc.read_oob)
2531			chip->ecc.read_oob = brcmnand_read_oob_hwecc;
2532		if (!chip->ecc.write_oob)
2533			chip->ecc.write_oob = brcmnand_write_oob_hwecc;
2534		break;
2535	case NAND_ECC_SOFT:
2536		break;
2537	case NAND_ECC_NONE:
2538		break;
2539	default:
2540		printk(KERN_WARNING "Invalid NAND_ECC_MODE %d\n",
2541		       chip->ecc.mode);
2542		BUG();
2543		break;
2544	}
2545
2546	/*
2547	 * The number of bytes available for a client to place data into
2548	 * the out of band area
2549	 */
2550	chip->ecc.layout->oobavail = 0;
2551	for (i = 0; chip->ecc.layout->oobfree[i].length; i++)
2552		chip->ecc.layout->oobavail +=
2553			chip->ecc.layout->oobfree[i].length;
2554	mtd->oobavail = chip->ecc.layout->oobavail;
2555
2556	/*
2557	 * Set the number of read / write steps for one page
2558	 */
2559	chip->ecc.size = NFL_SECTOR_SIZE; /* Fixed for Broadcom controller. */
2560	chip->ecc.oobsize = 16; /* Fixed for Hamming code or 4-bit BCH for now. */
2561	chip->ecc.bytes = brcmnand_eccbytes[chip->ecc.level];
2562	chip->ecc.steps = mtd->writesize / chip->ecc.size;
2563	if (chip->ecc.steps * chip->ecc.size != mtd->writesize) {
2564		printk(KERN_WARNING "Invalid ecc parameters\n");
2565		BUG();
2566	}
2567	chip->ecc.total = chip->ecc.steps * chip->ecc.bytes;
2568
2569	/*
2570	 * Allow subpage writes up to ecc.steps. Not possible for MLC
2571	 * FLASH.
2572	 */
2573	if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
2574	    !(chip->cellinfo & NAND_CI_CELLTYPE_MSK)) {
2575		switch (chip->ecc.steps) {
2576		case 2:
2577			mtd->subpage_sft = 1;
2578			break;
2579		case 4:
2580			mtd->subpage_sft = 2;
2581			break;
2582		case 8:
2583			mtd->subpage_sft = 3;
2584			break;
2585		}
2586	}
2587	chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
2588
2589	/* Initialize state */
2590	chip->state = FL_READY;
2591
2592	/* Invalidate the pagebuffer reference */
2593	chip->pagebuf = -1;
2594
2595	if (!chip->block_markbad)
2596		chip->block_markbad = brcmnand_default_block_markbad;
2597	if (!chip->scan_bbt)
2598		chip->scan_bbt = brcmnand_default_bbt;
2599	if (!chip->erase_bbt)
2600		chip->erase_bbt = brcmnand_erase_bbt;
2601
2602	if (!chip->get_device)
2603		chip->get_device = brcmnand_get_device;
2604	if (!chip->release_device)
2605		chip->release_device = brcmnand_release_device;
2606
2607	mtd->type = MTD_NANDFLASH;
2608	mtd->flags = MTD_CAP_NANDFLASH;
2609	mtd->erase = brcmnand_erase;
2610	mtd->point = NULL;
2611	mtd->unpoint = NULL;
2612	mtd->read = brcmnand_read;
2613	mtd->write = brcmnand_write;
2614	mtd->read_oob = brcmnand_read_oob;
2615	mtd->write_oob = brcmnand_write_oob;
2616	mtd->sync = brcmnand_sync;
2617	mtd->lock = NULL;
2618	mtd->unlock = NULL;
2619	mtd->suspend = brcmnand_suspend;
2620	mtd->resume = brcmnand_resume;
2621	mtd->block_isbad = brcmnand_block_isbad;
2622	mtd->block_markbad = brcmnand_block_markbad;
2623
2624	/* propagate ecc.layout to mtd_info */
2625	mtd->ecclayout = chip->ecc.layout;
2626
2627	/* we're in mtd function now, use get/release device to protect ourselves */
2628	brcmnand_get_device(chip, mtd, FL_WRITING);
2629	ret = chip->scan_bbt(mtd);
2630	brcmnand_release_device(mtd);
2631
2632	if (ret) {
2633		printk(KERN_ERR "brcmnand: scan_bbt failed\n");
2634		goto fail;
2635	}
2636
2637init_partitions:
2638#ifdef CONFIG_MTD_PARTITIONS
2639	parts = init_brcmnand_mtd_partitions(mtd, mtd->size);
2640	if (!parts)
2641		goto fail;
2642	for (i = 0; parts[i].name; i++)
2643		;
2644
2645	ret = add_mtd_partitions(mtd, parts, i);
2646	if (ret) {
2647		printk(KERN_ERR "brcmnand: add_mtd failed\n");
2648		goto fail;
2649	}
2650	brcmnand_info.parts = parts;
2651#endif
2652	return 0;
2653
2654fail:
2655	if (brcmnand_info.cc)
2656		iounmap((void *) brcmnand_info.cc);
2657	if (brcmnand_info.sih)
2658		si_detach(brcmnand_info.sih);
2659	if (page_buffer)
2660		kfree(page_buffer);
2661	return ret;
2662}
2663
2664static void __exit
2665brcmnand_mtd_exit(void)
2666{
2667#ifdef CONFIG_MTD_PARTITIONS
2668	del_mtd_partitions(&brcmnand_info.mtd);
2669#else
2670	del_mtd_device(&brcmnand_info.mtd);
2671#endif
2672	iounmap((void *) brcmnand_info.cc);
2673	si_detach(brcmnand_info.sih);
2674}
2675
2676module_init(brcmnand_mtd_init);
2677module_exit(brcmnand_mtd_exit);
2678