• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/drivers/mtd/nand/
1/*
2 * Copyright �� 2004 Texas Instruments, Jian Zhang <jzhang@ti.com>
3 * Copyright �� 2004 Micron Technology Inc.
4 * Copyright �� 2004 David Brownell
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#define CONFIG_MTD_NAND_OMAP_HWECC
11
12#include <linux/platform_device.h>
13#include <linux/dma-mapping.h>
14#include <linux/delay.h>
15#include <linux/jiffies.h>
16#include <linux/sched.h>
17#include <linux/mtd/mtd.h>
18#include <linux/mtd/nand.h>
19#include <linux/mtd/partitions.h>
20#include <linux/io.h>
21#include <linux/slab.h>
22
23#include <plat/dma.h>
24#include <plat/gpmc.h>
25#include <plat/nand.h>
26
27#define	DRIVER_NAME	"omap2-nand"
28
29#define NAND_Ecc_P1e		(1 << 0)
30#define NAND_Ecc_P2e		(1 << 1)
31#define NAND_Ecc_P4e		(1 << 2)
32#define NAND_Ecc_P8e		(1 << 3)
33#define NAND_Ecc_P16e		(1 << 4)
34#define NAND_Ecc_P32e		(1 << 5)
35#define NAND_Ecc_P64e		(1 << 6)
36#define NAND_Ecc_P128e		(1 << 7)
37#define NAND_Ecc_P256e		(1 << 8)
38#define NAND_Ecc_P512e		(1 << 9)
39#define NAND_Ecc_P1024e		(1 << 10)
40#define NAND_Ecc_P2048e		(1 << 11)
41
42#define NAND_Ecc_P1o		(1 << 16)
43#define NAND_Ecc_P2o		(1 << 17)
44#define NAND_Ecc_P4o		(1 << 18)
45#define NAND_Ecc_P8o		(1 << 19)
46#define NAND_Ecc_P16o		(1 << 20)
47#define NAND_Ecc_P32o		(1 << 21)
48#define NAND_Ecc_P64o		(1 << 22)
49#define NAND_Ecc_P128o		(1 << 23)
50#define NAND_Ecc_P256o		(1 << 24)
51#define NAND_Ecc_P512o		(1 << 25)
52#define NAND_Ecc_P1024o		(1 << 26)
53#define NAND_Ecc_P2048o		(1 << 27)
54
55#define TF(value)	(value ? 1 : 0)
56
57#define P2048e(a)	(TF(a & NAND_Ecc_P2048e)	<< 0)
58#define P2048o(a)	(TF(a & NAND_Ecc_P2048o)	<< 1)
59#define P1e(a)		(TF(a & NAND_Ecc_P1e)		<< 2)
60#define P1o(a)		(TF(a & NAND_Ecc_P1o)		<< 3)
61#define P2e(a)		(TF(a & NAND_Ecc_P2e)		<< 4)
62#define P2o(a)		(TF(a & NAND_Ecc_P2o)		<< 5)
63#define P4e(a)		(TF(a & NAND_Ecc_P4e)		<< 6)
64#define P4o(a)		(TF(a & NAND_Ecc_P4o)		<< 7)
65
66#define P8e(a)		(TF(a & NAND_Ecc_P8e)		<< 0)
67#define P8o(a)		(TF(a & NAND_Ecc_P8o)		<< 1)
68#define P16e(a)		(TF(a & NAND_Ecc_P16e)		<< 2)
69#define P16o(a)		(TF(a & NAND_Ecc_P16o)		<< 3)
70#define P32e(a)		(TF(a & NAND_Ecc_P32e)		<< 4)
71#define P32o(a)		(TF(a & NAND_Ecc_P32o)		<< 5)
72#define P64e(a)		(TF(a & NAND_Ecc_P64e)		<< 6)
73#define P64o(a)		(TF(a & NAND_Ecc_P64o)		<< 7)
74
75#define P128e(a)	(TF(a & NAND_Ecc_P128e)		<< 0)
76#define P128o(a)	(TF(a & NAND_Ecc_P128o)		<< 1)
77#define P256e(a)	(TF(a & NAND_Ecc_P256e)		<< 2)
78#define P256o(a)	(TF(a & NAND_Ecc_P256o)		<< 3)
79#define P512e(a)	(TF(a & NAND_Ecc_P512e)		<< 4)
80#define P512o(a)	(TF(a & NAND_Ecc_P512o)		<< 5)
81#define P1024e(a)	(TF(a & NAND_Ecc_P1024e)	<< 6)
82#define P1024o(a)	(TF(a & NAND_Ecc_P1024o)	<< 7)
83
84#define P8e_s(a)	(TF(a & NAND_Ecc_P8e)		<< 0)
85#define P8o_s(a)	(TF(a & NAND_Ecc_P8o)		<< 1)
86#define P16e_s(a)	(TF(a & NAND_Ecc_P16e)		<< 2)
87#define P16o_s(a)	(TF(a & NAND_Ecc_P16o)		<< 3)
88#define P1e_s(a)	(TF(a & NAND_Ecc_P1e)		<< 4)
89#define P1o_s(a)	(TF(a & NAND_Ecc_P1o)		<< 5)
90#define P2e_s(a)	(TF(a & NAND_Ecc_P2e)		<< 6)
91#define P2o_s(a)	(TF(a & NAND_Ecc_P2o)		<< 7)
92
93#define P4e_s(a)	(TF(a & NAND_Ecc_P4e)		<< 0)
94#define P4o_s(a)	(TF(a & NAND_Ecc_P4o)		<< 1)
95
96#ifdef CONFIG_MTD_PARTITIONS
97static const char *part_probes[] = { "cmdlinepart", NULL };
98#endif
99
100#ifdef CONFIG_MTD_NAND_OMAP_PREFETCH
101static int use_prefetch = 1;
102
103/* "modprobe ... use_prefetch=0" etc */
104module_param(use_prefetch, bool, 0);
105MODULE_PARM_DESC(use_prefetch, "enable/disable use of PREFETCH");
106
107#ifdef CONFIG_MTD_NAND_OMAP_PREFETCH_DMA
108static int use_dma = 1;
109
110/* "modprobe ... use_dma=0" etc */
111module_param(use_dma, bool, 0);
112MODULE_PARM_DESC(use_dma, "enable/disable use of DMA");
113#else
114const int use_dma;
115#endif
116#else
117const int use_prefetch;
118const int use_dma;
119#endif
120
121struct omap_nand_info {
122	struct nand_hw_control		controller;
123	struct omap_nand_platform_data	*pdata;
124	struct mtd_info			mtd;
125	struct mtd_partition		*parts;
126	struct nand_chip		nand;
127	struct platform_device		*pdev;
128
129	int				gpmc_cs;
130	unsigned long			phys_base;
131	struct completion		comp;
132	int				dma_ch;
133};
134
135/**
136 * omap_hwcontrol - hardware specific access to control-lines
137 * @mtd: MTD device structure
138 * @cmd: command to device
139 * @ctrl:
140 * NAND_NCE: bit 0 -> don't care
141 * NAND_CLE: bit 1 -> Command Latch
142 * NAND_ALE: bit 2 -> Address Latch
143 *
144 * NOTE: boards may use different bits for these!!
145 */
146static void omap_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
147{
148	struct omap_nand_info *info = container_of(mtd,
149					struct omap_nand_info, mtd);
150
151	if (cmd != NAND_CMD_NONE) {
152		if (ctrl & NAND_CLE)
153			gpmc_nand_write(info->gpmc_cs, GPMC_NAND_COMMAND, cmd);
154
155		else if (ctrl & NAND_ALE)
156			gpmc_nand_write(info->gpmc_cs, GPMC_NAND_ADDRESS, cmd);
157
158		else /* NAND_NCE */
159			gpmc_nand_write(info->gpmc_cs, GPMC_NAND_DATA, cmd);
160	}
161}
162
163/**
164 * omap_read_buf8 - read data from NAND controller into buffer
165 * @mtd: MTD device structure
166 * @buf: buffer to store date
167 * @len: number of bytes to read
168 */
169static void omap_read_buf8(struct mtd_info *mtd, u_char *buf, int len)
170{
171	struct nand_chip *nand = mtd->priv;
172
173	ioread8_rep(nand->IO_ADDR_R, buf, len);
174}
175
176/**
177 * omap_write_buf8 - write buffer to NAND controller
178 * @mtd: MTD device structure
179 * @buf: data buffer
180 * @len: number of bytes to write
181 */
182static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len)
183{
184	struct omap_nand_info *info = container_of(mtd,
185						struct omap_nand_info, mtd);
186	u_char *p = (u_char *)buf;
187	u32	status = 0;
188
189	while (len--) {
190		iowrite8(*p++, info->nand.IO_ADDR_W);
191		/* wait until buffer is available for write */
192		do {
193			status = gpmc_read_status(GPMC_STATUS_BUFFER);
194		} while (!status);
195	}
196}
197
198/**
199 * omap_read_buf16 - read data from NAND controller into buffer
200 * @mtd: MTD device structure
201 * @buf: buffer to store date
202 * @len: number of bytes to read
203 */
204static void omap_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
205{
206	struct nand_chip *nand = mtd->priv;
207
208	ioread16_rep(nand->IO_ADDR_R, buf, len / 2);
209}
210
211/**
212 * omap_write_buf16 - write buffer to NAND controller
213 * @mtd: MTD device structure
214 * @buf: data buffer
215 * @len: number of bytes to write
216 */
217static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len)
218{
219	struct omap_nand_info *info = container_of(mtd,
220						struct omap_nand_info, mtd);
221	u16 *p = (u16 *) buf;
222	u32	status = 0;
223	len >>= 1;
224
225	while (len--) {
226		iowrite16(*p++, info->nand.IO_ADDR_W);
227		/* wait until buffer is available for write */
228		do {
229			status = gpmc_read_status(GPMC_STATUS_BUFFER);
230		} while (!status);
231	}
232}
233
234/**
235 * omap_read_buf_pref - read data from NAND controller into buffer
236 * @mtd: MTD device structure
237 * @buf: buffer to store date
238 * @len: number of bytes to read
239 */
240static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
241{
242	struct omap_nand_info *info = container_of(mtd,
243						struct omap_nand_info, mtd);
244	uint32_t r_count = 0;
245	int ret = 0;
246	u32 *p = (u32 *)buf;
247
248	/* take care of subpage reads */
249	if (len % 4) {
250		if (info->nand.options & NAND_BUSWIDTH_16)
251			omap_read_buf16(mtd, buf, len % 4);
252		else
253			omap_read_buf8(mtd, buf, len % 4);
254		p = (u32 *) (buf + len % 4);
255		len -= len % 4;
256	}
257
258	/* configure and start prefetch transfer */
259	ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x0);
260	if (ret) {
261		/* PFPW engine is busy, use cpu copy method */
262		if (info->nand.options & NAND_BUSWIDTH_16)
263			omap_read_buf16(mtd, buf, len);
264		else
265			omap_read_buf8(mtd, buf, len);
266	} else {
267		p = (u32 *) buf;
268		do {
269			r_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
270			r_count = r_count >> 2;
271			ioread32_rep(info->nand.IO_ADDR_R, p, r_count);
272			p += r_count;
273			len -= r_count << 2;
274		} while (len);
275		/* disable and stop the PFPW engine */
276		gpmc_prefetch_reset(info->gpmc_cs);
277	}
278}
279
280/**
281 * omap_write_buf_pref - write buffer to NAND controller
282 * @mtd: MTD device structure
283 * @buf: data buffer
284 * @len: number of bytes to write
285 */
286static void omap_write_buf_pref(struct mtd_info *mtd,
287					const u_char *buf, int len)
288{
289	struct omap_nand_info *info = container_of(mtd,
290						struct omap_nand_info, mtd);
291	uint32_t pref_count = 0, w_count = 0;
292	int i = 0, ret = 0;
293	u16 *p;
294
295	/* take care of subpage writes */
296	if (len % 2 != 0) {
297		writeb(*buf, info->nand.IO_ADDR_W);
298		p = (u16 *)(buf + 1);
299		len--;
300	}
301
302	/*  configure and start prefetch transfer */
303	ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x1);
304	if (ret) {
305		/* PFPW engine is busy, use cpu copy method */
306		if (info->nand.options & NAND_BUSWIDTH_16)
307			omap_write_buf16(mtd, buf, len);
308		else
309			omap_write_buf8(mtd, buf, len);
310	} else {
311		p = (u16 *) buf;
312		while (len) {
313			w_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
314			w_count = w_count >> 1;
315			for (i = 0; (i < w_count) && len; i++, len -= 2)
316				iowrite16(*p++, info->nand.IO_ADDR_W);
317		}
318		/* wait for data to flushed-out before reset the prefetch */
319		do {
320			pref_count = gpmc_read_status(GPMC_PREFETCH_COUNT);
321		} while (pref_count);
322		/* disable and stop the PFPW engine */
323		gpmc_prefetch_reset(info->gpmc_cs);
324	}
325}
326
327#ifdef CONFIG_MTD_NAND_OMAP_PREFETCH_DMA
328/*
329 * omap_nand_dma_cb: callback on the completion of dma transfer
330 * @lch: logical channel
331 * @ch_satuts: channel status
332 * @data: pointer to completion data structure
333 */
334static void omap_nand_dma_cb(int lch, u16 ch_status, void *data)
335{
336	complete((struct completion *) data);
337}
338
339/*
340 * omap_nand_dma_transfer: configer and start dma transfer
341 * @mtd: MTD device structure
342 * @addr: virtual address in RAM of source/destination
343 * @len: number of data bytes to be transferred
344 * @is_write: flag for read/write operation
345 */
346static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
347					unsigned int len, int is_write)
348{
349	struct omap_nand_info *info = container_of(mtd,
350					struct omap_nand_info, mtd);
351	uint32_t prefetch_status = 0;
352	enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
353							DMA_FROM_DEVICE;
354	dma_addr_t dma_addr;
355	int ret;
356
357	/* The fifo depth is 64 bytes. We have a sync at each frame and frame
358	 * length is 64 bytes.
359	 */
360	int buf_len = len >> 6;
361
362	if (addr >= high_memory) {
363		struct page *p1;
364
365		if (((size_t)addr & PAGE_MASK) !=
366			((size_t)(addr + len - 1) & PAGE_MASK))
367			goto out_copy;
368		p1 = vmalloc_to_page(addr);
369		if (!p1)
370			goto out_copy;
371		addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK);
372	}
373
374	dma_addr = dma_map_single(&info->pdev->dev, addr, len, dir);
375	if (dma_mapping_error(&info->pdev->dev, dma_addr)) {
376		dev_err(&info->pdev->dev,
377			"Couldn't DMA map a %d byte buffer\n", len);
378		goto out_copy;
379	}
380
381	if (is_write) {
382	    omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
383						info->phys_base, 0, 0);
384	    omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
385							dma_addr, 0, 0);
386	    omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
387					0x10, buf_len, OMAP_DMA_SYNC_FRAME,
388					OMAP24XX_DMA_GPMC, OMAP_DMA_DST_SYNC);
389	} else {
390	    omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
391						info->phys_base, 0, 0);
392	    omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
393							dma_addr, 0, 0);
394	    omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
395					0x10, buf_len, OMAP_DMA_SYNC_FRAME,
396					OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC);
397	}
398	/*  configure and start prefetch transfer */
399	ret = gpmc_prefetch_enable(info->gpmc_cs, 0x1, len, is_write);
400	if (ret)
401		/* PFPW engine is busy, use cpu copy methode */
402		goto out_copy;
403
404	init_completion(&info->comp);
405
406	omap_start_dma(info->dma_ch);
407
408	/* setup and start DMA using dma_addr */
409	wait_for_completion(&info->comp);
410
411	do {
412		prefetch_status = gpmc_read_status(GPMC_PREFETCH_COUNT);
413	} while (prefetch_status);
414	/* disable and stop the PFPW engine */
415	gpmc_prefetch_reset(info->gpmc_cs);
416
417	dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
418	return 0;
419
420out_copy:
421	if (info->nand.options & NAND_BUSWIDTH_16)
422		is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
423			: omap_write_buf16(mtd, (u_char *) addr, len);
424	else
425		is_write == 0 ? omap_read_buf8(mtd, (u_char *) addr, len)
426			: omap_write_buf8(mtd, (u_char *) addr, len);
427	return 0;
428}
429#else
430static void omap_nand_dma_cb(int lch, u16 ch_status, void *data) {}
431static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
432					unsigned int len, int is_write)
433{
434	return 0;
435}
436#endif
437
438/**
439 * omap_read_buf_dma_pref - read data from NAND controller into buffer
440 * @mtd: MTD device structure
441 * @buf: buffer to store date
442 * @len: number of bytes to read
443 */
444static void omap_read_buf_dma_pref(struct mtd_info *mtd, u_char *buf, int len)
445{
446	if (len <= mtd->oobsize)
447		omap_read_buf_pref(mtd, buf, len);
448	else
449		/* start transfer in DMA mode */
450		omap_nand_dma_transfer(mtd, buf, len, 0x0);
451}
452
453/**
454 * omap_write_buf_dma_pref - write buffer to NAND controller
455 * @mtd: MTD device structure
456 * @buf: data buffer
457 * @len: number of bytes to write
458 */
459static void omap_write_buf_dma_pref(struct mtd_info *mtd,
460					const u_char *buf, int len)
461{
462	if (len <= mtd->oobsize)
463		omap_write_buf_pref(mtd, buf, len);
464	else
465		/* start transfer in DMA mode */
466		omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1);
467}
468
469/**
470 * omap_verify_buf - Verify chip data against buffer
471 * @mtd: MTD device structure
472 * @buf: buffer containing the data to compare
473 * @len: number of bytes to compare
474 */
475static int omap_verify_buf(struct mtd_info *mtd, const u_char * buf, int len)
476{
477	struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
478							mtd);
479	u16 *p = (u16 *) buf;
480
481	len >>= 1;
482	while (len--) {
483		if (*p++ != cpu_to_le16(readw(info->nand.IO_ADDR_R)))
484			return -EFAULT;
485	}
486
487	return 0;
488}
489
490#ifdef CONFIG_MTD_NAND_OMAP_HWECC
491
492/**
493 * gen_true_ecc - This function will generate true ECC value
494 * @ecc_buf: buffer to store ecc code
495 *
496 * This generated true ECC value can be used when correcting
497 * data read from NAND flash memory core
498 */
499static void gen_true_ecc(u8 *ecc_buf)
500{
501	u32 tmp = ecc_buf[0] | (ecc_buf[1] << 16) |
502		((ecc_buf[2] & 0xF0) << 20) | ((ecc_buf[2] & 0x0F) << 8);
503
504	ecc_buf[0] = ~(P64o(tmp) | P64e(tmp) | P32o(tmp) | P32e(tmp) |
505			P16o(tmp) | P16e(tmp) | P8o(tmp) | P8e(tmp));
506	ecc_buf[1] = ~(P1024o(tmp) | P1024e(tmp) | P512o(tmp) | P512e(tmp) |
507			P256o(tmp) | P256e(tmp) | P128o(tmp) | P128e(tmp));
508	ecc_buf[2] = ~(P4o(tmp) | P4e(tmp) | P2o(tmp) | P2e(tmp) | P1o(tmp) |
509			P1e(tmp) | P2048o(tmp) | P2048e(tmp));
510}
511
512/**
513 * omap_compare_ecc - Detect (2 bits) and correct (1 bit) error in data
514 * @ecc_data1:  ecc code from nand spare area
515 * @ecc_data2:  ecc code from hardware register obtained from hardware ecc
516 * @page_data:  page data
517 *
518 * This function compares two ECC's and indicates if there is an error.
519 * If the error can be corrected it will be corrected to the buffer.
520 */
521static int omap_compare_ecc(u8 *ecc_data1,	/* read from NAND memory */
522			    u8 *ecc_data2,	/* read from register */
523			    u8 *page_data)
524{
525	uint	i;
526	u8	tmp0_bit[8], tmp1_bit[8], tmp2_bit[8];
527	u8	comp0_bit[8], comp1_bit[8], comp2_bit[8];
528	u8	ecc_bit[24];
529	u8	ecc_sum = 0;
530	u8	find_bit = 0;
531	uint	find_byte = 0;
532	int	isEccFF;
533
534	isEccFF = ((*(u32 *)ecc_data1 & 0xFFFFFF) == 0xFFFFFF);
535
536	gen_true_ecc(ecc_data1);
537	gen_true_ecc(ecc_data2);
538
539	for (i = 0; i <= 2; i++) {
540		*(ecc_data1 + i) = ~(*(ecc_data1 + i));
541		*(ecc_data2 + i) = ~(*(ecc_data2 + i));
542	}
543
544	for (i = 0; i < 8; i++) {
545		tmp0_bit[i]     = *ecc_data1 % 2;
546		*ecc_data1	= *ecc_data1 / 2;
547	}
548
549	for (i = 0; i < 8; i++) {
550		tmp1_bit[i]	 = *(ecc_data1 + 1) % 2;
551		*(ecc_data1 + 1) = *(ecc_data1 + 1) / 2;
552	}
553
554	for (i = 0; i < 8; i++) {
555		tmp2_bit[i]	 = *(ecc_data1 + 2) % 2;
556		*(ecc_data1 + 2) = *(ecc_data1 + 2) / 2;
557	}
558
559	for (i = 0; i < 8; i++) {
560		comp0_bit[i]     = *ecc_data2 % 2;
561		*ecc_data2       = *ecc_data2 / 2;
562	}
563
564	for (i = 0; i < 8; i++) {
565		comp1_bit[i]     = *(ecc_data2 + 1) % 2;
566		*(ecc_data2 + 1) = *(ecc_data2 + 1) / 2;
567	}
568
569	for (i = 0; i < 8; i++) {
570		comp2_bit[i]     = *(ecc_data2 + 2) % 2;
571		*(ecc_data2 + 2) = *(ecc_data2 + 2) / 2;
572	}
573
574	for (i = 0; i < 6; i++)
575		ecc_bit[i] = tmp2_bit[i + 2] ^ comp2_bit[i + 2];
576
577	for (i = 0; i < 8; i++)
578		ecc_bit[i + 6] = tmp0_bit[i] ^ comp0_bit[i];
579
580	for (i = 0; i < 8; i++)
581		ecc_bit[i + 14] = tmp1_bit[i] ^ comp1_bit[i];
582
583	ecc_bit[22] = tmp2_bit[0] ^ comp2_bit[0];
584	ecc_bit[23] = tmp2_bit[1] ^ comp2_bit[1];
585
586	for (i = 0; i < 24; i++)
587		ecc_sum += ecc_bit[i];
588
589	switch (ecc_sum) {
590	case 0:
591		/* Not reached because this function is not called if
592		 *  ECC values are equal
593		 */
594		return 0;
595
596	case 1:
597		/* Uncorrectable error */
598		DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR 1\n");
599		return -1;
600
601	case 11:
602		/* UN-Correctable error */
603		DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR B\n");
604		return -1;
605
606	case 12:
607		/* Correctable error */
608		find_byte = (ecc_bit[23] << 8) +
609			    (ecc_bit[21] << 7) +
610			    (ecc_bit[19] << 6) +
611			    (ecc_bit[17] << 5) +
612			    (ecc_bit[15] << 4) +
613			    (ecc_bit[13] << 3) +
614			    (ecc_bit[11] << 2) +
615			    (ecc_bit[9]  << 1) +
616			    ecc_bit[7];
617
618		find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1];
619
620		DEBUG(MTD_DEBUG_LEVEL0, "Correcting single bit ECC error at "
621				"offset: %d, bit: %d\n", find_byte, find_bit);
622
623		page_data[find_byte] ^= (1 << find_bit);
624
625		return 0;
626	default:
627		if (isEccFF) {
628			if (ecc_data2[0] == 0 &&
629			    ecc_data2[1] == 0 &&
630			    ecc_data2[2] == 0)
631				return 0;
632		}
633		DEBUG(MTD_DEBUG_LEVEL0, "UNCORRECTED_ERROR default\n");
634		return -1;
635	}
636}
637
638/**
639 * omap_correct_data - Compares the ECC read with HW generated ECC
640 * @mtd: MTD device structure
641 * @dat: page data
642 * @read_ecc: ecc read from nand flash
643 * @calc_ecc: ecc read from HW ECC registers
644 *
645 * Compares the ecc read from nand spare area with ECC registers values
646 * and if ECC's mismached, it will call 'omap_compare_ecc' for error detection
647 * and correction.
648 */
649static int omap_correct_data(struct mtd_info *mtd, u_char *dat,
650				u_char *read_ecc, u_char *calc_ecc)
651{
652	struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
653							mtd);
654	int blockCnt = 0, i = 0, ret = 0;
655
656	/* Ex NAND_ECC_HW12_2048 */
657	if ((info->nand.ecc.mode == NAND_ECC_HW) &&
658			(info->nand.ecc.size  == 2048))
659		blockCnt = 4;
660	else
661		blockCnt = 1;
662
663	for (i = 0; i < blockCnt; i++) {
664		if (memcmp(read_ecc, calc_ecc, 3) != 0) {
665			ret = omap_compare_ecc(read_ecc, calc_ecc, dat);
666			if (ret < 0)
667				return ret;
668		}
669		read_ecc += 3;
670		calc_ecc += 3;
671		dat      += 512;
672	}
673	return 0;
674}
675
676/**
677 * omap_calcuate_ecc - Generate non-inverted ECC bytes.
678 * @mtd: MTD device structure
679 * @dat: The pointer to data on which ecc is computed
680 * @ecc_code: The ecc_code buffer
681 *
682 * Using noninverted ECC can be considered ugly since writing a blank
683 * page ie. padding will clear the ECC bytes. This is no problem as long
684 * nobody is trying to write data on the seemingly unused page. Reading
685 * an erased page will produce an ECC mismatch between generated and read
686 * ECC bytes that has to be dealt with separately.
687 */
688static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
689				u_char *ecc_code)
690{
691	struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
692							mtd);
693	return gpmc_calculate_ecc(info->gpmc_cs, dat, ecc_code);
694}
695
696/**
697 * omap_enable_hwecc - This function enables the hardware ecc functionality
698 * @mtd: MTD device structure
699 * @mode: Read/Write mode
700 */
701static void omap_enable_hwecc(struct mtd_info *mtd, int mode)
702{
703	struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
704							mtd);
705	struct nand_chip *chip = mtd->priv;
706	unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
707
708	gpmc_enable_hwecc(info->gpmc_cs, mode, dev_width, info->nand.ecc.size);
709}
710
711#endif
712
713/**
714 * omap_wait - wait until the command is done
715 * @mtd: MTD device structure
716 * @chip: NAND Chip structure
717 *
718 * Wait function is called during Program and erase operations and
719 * the way it is called from MTD layer, we should wait till the NAND
720 * chip is ready after the programming/erase operation has completed.
721 *
722 * Erase can take up to 400ms and program up to 20ms according to
723 * general NAND and SmartMedia specs
724 */
725static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
726{
727	struct nand_chip *this = mtd->priv;
728	struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
729							mtd);
730	unsigned long timeo = jiffies;
731	int status = NAND_STATUS_FAIL, state = this->state;
732
733	if (state == FL_ERASING)
734		timeo += (HZ * 400) / 1000;
735	else
736		timeo += (HZ * 20) / 1000;
737
738	gpmc_nand_write(info->gpmc_cs,
739			GPMC_NAND_COMMAND, (NAND_CMD_STATUS & 0xFF));
740	while (time_before(jiffies, timeo)) {
741		status = gpmc_nand_read(info->gpmc_cs, GPMC_NAND_DATA);
742		if (status & NAND_STATUS_READY)
743			break;
744		cond_resched();
745	}
746	return status;
747}
748
749/**
750 * omap_dev_ready - calls the platform specific dev_ready function
751 * @mtd: MTD device structure
752 */
753static int omap_dev_ready(struct mtd_info *mtd)
754{
755	unsigned int val = 0;
756	struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
757							mtd);
758
759	val = gpmc_read_status(GPMC_GET_IRQ_STATUS);
760	if ((val & 0x100) == 0x100) {
761		/* Clear IRQ Interrupt */
762		val |= 0x100;
763		val &= ~(0x0);
764		gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, val);
765	} else {
766		unsigned int cnt = 0;
767		while (cnt++ < 0x1FF) {
768			if  ((val & 0x100) == 0x100)
769				return 0;
770			val = gpmc_read_status(GPMC_GET_IRQ_STATUS);
771		}
772	}
773
774	return 1;
775}
776
777static int __devinit omap_nand_probe(struct platform_device *pdev)
778{
779	struct omap_nand_info		*info;
780	struct omap_nand_platform_data	*pdata;
781	int				err;
782
783	pdata = pdev->dev.platform_data;
784	if (pdata == NULL) {
785		dev_err(&pdev->dev, "platform data missing\n");
786		return -ENODEV;
787	}
788
789	info = kzalloc(sizeof(struct omap_nand_info), GFP_KERNEL);
790	if (!info)
791		return -ENOMEM;
792
793	platform_set_drvdata(pdev, info);
794
795	spin_lock_init(&info->controller.lock);
796	init_waitqueue_head(&info->controller.wq);
797
798	info->pdev = pdev;
799
800	info->gpmc_cs		= pdata->cs;
801	info->phys_base		= pdata->phys_base;
802
803	info->mtd.priv		= &info->nand;
804	info->mtd.name		= dev_name(&pdev->dev);
805	info->mtd.owner		= THIS_MODULE;
806
807	info->nand.options	|= pdata->devsize ? NAND_BUSWIDTH_16 : 0;
808	info->nand.options	|= NAND_SKIP_BBTSCAN;
809
810	/* NAND write protect off */
811	gpmc_cs_configure(info->gpmc_cs, GPMC_CONFIG_WP, 0);
812
813	if (!request_mem_region(info->phys_base, NAND_IO_SIZE,
814				pdev->dev.driver->name)) {
815		err = -EBUSY;
816		goto out_free_info;
817	}
818
819	info->nand.IO_ADDR_R = ioremap(info->phys_base, NAND_IO_SIZE);
820	if (!info->nand.IO_ADDR_R) {
821		err = -ENOMEM;
822		goto out_release_mem_region;
823	}
824
825	info->nand.controller = &info->controller;
826
827	info->nand.IO_ADDR_W = info->nand.IO_ADDR_R;
828	info->nand.cmd_ctrl  = omap_hwcontrol;
829
830	/*
831	 * If RDY/BSY line is connected to OMAP then use the omap ready
832	 * funcrtion and the generic nand_wait function which reads the status
833	 * register after monitoring the RDY/BSY line.Otherwise use a standard
834	 * chip delay which is slightly more than tR (AC Timing) of the NAND
835	 * device and read status register until you get a failure or success
836	 */
837	if (pdata->dev_ready) {
838		info->nand.dev_ready = omap_dev_ready;
839		info->nand.chip_delay = 0;
840	} else {
841		info->nand.waitfunc = omap_wait;
842		info->nand.chip_delay = 50;
843	}
844
845	if (use_prefetch) {
846
847		info->nand.read_buf   = omap_read_buf_pref;
848		info->nand.write_buf  = omap_write_buf_pref;
849		if (use_dma) {
850			err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND",
851				omap_nand_dma_cb, &info->comp, &info->dma_ch);
852			if (err < 0) {
853				info->dma_ch = -1;
854				printk(KERN_WARNING "DMA request failed."
855					" Non-dma data transfer mode\n");
856			} else {
857				omap_set_dma_dest_burst_mode(info->dma_ch,
858						OMAP_DMA_DATA_BURST_16);
859				omap_set_dma_src_burst_mode(info->dma_ch,
860						OMAP_DMA_DATA_BURST_16);
861
862				info->nand.read_buf   = omap_read_buf_dma_pref;
863				info->nand.write_buf  = omap_write_buf_dma_pref;
864			}
865		}
866	} else {
867		if (info->nand.options & NAND_BUSWIDTH_16) {
868			info->nand.read_buf   = omap_read_buf16;
869			info->nand.write_buf  = omap_write_buf16;
870		} else {
871			info->nand.read_buf   = omap_read_buf8;
872			info->nand.write_buf  = omap_write_buf8;
873		}
874	}
875	info->nand.verify_buf = omap_verify_buf;
876
877#ifdef CONFIG_MTD_NAND_OMAP_HWECC
878	info->nand.ecc.bytes		= 3;
879	info->nand.ecc.size		= 512;
880	info->nand.ecc.calculate	= omap_calculate_ecc;
881	info->nand.ecc.hwctl		= omap_enable_hwecc;
882	info->nand.ecc.correct		= omap_correct_data;
883	info->nand.ecc.mode		= NAND_ECC_HW;
884
885#else
886	info->nand.ecc.mode = NAND_ECC_SOFT;
887#endif
888
889	/* DIP switches on some boards change between 8 and 16 bit
890	 * bus widths for flash.  Try the other width if the first try fails.
891	 */
892	if (nand_scan(&info->mtd, 1)) {
893		info->nand.options ^= NAND_BUSWIDTH_16;
894		if (nand_scan(&info->mtd, 1)) {
895			err = -ENXIO;
896			goto out_release_mem_region;
897		}
898	}
899
900#ifdef CONFIG_MTD_PARTITIONS
901	err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0);
902	if (err > 0)
903		add_mtd_partitions(&info->mtd, info->parts, err);
904	else if (pdata->parts)
905		add_mtd_partitions(&info->mtd, pdata->parts, pdata->nr_parts);
906	else
907#endif
908		add_mtd_device(&info->mtd);
909
910	platform_set_drvdata(pdev, &info->mtd);
911
912	return 0;
913
914out_release_mem_region:
915	release_mem_region(info->phys_base, NAND_IO_SIZE);
916out_free_info:
917	kfree(info);
918
919	return err;
920}
921
922static int omap_nand_remove(struct platform_device *pdev)
923{
924	struct mtd_info *mtd = platform_get_drvdata(pdev);
925	struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
926							mtd);
927
928	platform_set_drvdata(pdev, NULL);
929	if (use_dma)
930		omap_free_dma(info->dma_ch);
931
932	/* Release NAND device, its internal structures and partitions */
933	nand_release(&info->mtd);
934	iounmap(info->nand.IO_ADDR_R);
935	kfree(&info->mtd);
936	return 0;
937}
938
939static struct platform_driver omap_nand_driver = {
940	.probe		= omap_nand_probe,
941	.remove		= omap_nand_remove,
942	.driver		= {
943		.name	= DRIVER_NAME,
944		.owner	= THIS_MODULE,
945	},
946};
947
948static int __init omap_nand_init(void)
949{
950	printk(KERN_INFO "%s driver initializing\n", DRIVER_NAME);
951
952	/* This check is required if driver is being
953	 * loaded run time as a module
954	 */
955	if ((1 == use_dma) && (0 == use_prefetch)) {
956		printk(KERN_INFO"Wrong parameters: 'use_dma' can not be 1 "
957				"without use_prefetch'. Prefetch will not be"
958				" used in either mode (mpu or dma)\n");
959	}
960	return platform_driver_register(&omap_nand_driver);
961}
962
963static void __exit omap_nand_exit(void)
964{
965	platform_driver_unregister(&omap_nand_driver);
966}
967
968module_init(omap_nand_init);
969module_exit(omap_nand_exit);
970
971MODULE_ALIAS(DRIVER_NAME);
972MODULE_LICENSE("GPL");
973MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards");
974