• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/drivers/staging/spectra/
1/*
2 * NAND Flash Controller Device Driver
3 * Copyright (c) 2009, Intel Corporation and its suppliers.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 */
19
20#include "lld.h"
21#include "lld_nand.h"
22#include "lld_cdma.h"
23
24#include "spectraswconfig.h"
25#include "flash.h"
26#include "ffsdefs.h"
27
28#include <linux/interrupt.h>
29#include <linux/delay.h>
30#include <linux/wait.h>
31#include <linux/mutex.h>
32
33#include "nand_regs.h"
34
35#define SPECTRA_NAND_NAME    "nd"
36
37#define CEIL_DIV(X, Y) (((X)%(Y)) ? ((X)/(Y)+1) : ((X)/(Y)))
38#define MAX_PAGES_PER_RW        128
39
40#define INT_IDLE_STATE                 0
41#define INT_READ_PAGE_MAIN    0x01
42#define INT_WRITE_PAGE_MAIN    0x02
43#define INT_PIPELINE_READ_AHEAD    0x04
44#define INT_PIPELINE_WRITE_AHEAD    0x08
45#define INT_MULTI_PLANE_READ    0x10
46#define INT_MULTI_PLANE_WRITE    0x11
47
48static u32 enable_ecc;
49
50struct mrst_nand_info info;
51
52int totalUsedBanks;
53u32 GLOB_valid_banks[LLD_MAX_FLASH_BANKS];
54
55void __iomem *FlashReg;
56void __iomem *FlashMem;
57
58u16 conf_parameters[] = {
59	0x0000,
60	0x0000,
61	0x01F4,
62	0x01F4,
63	0x01F4,
64	0x01F4,
65	0x0000,
66	0x0000,
67	0x0001,
68	0x0000,
69	0x0000,
70	0x0000,
71	0x0000,
72	0x0040,
73	0x0001,
74	0x000A,
75	0x000A,
76	0x000A,
77	0x0000,
78	0x0000,
79	0x0005,
80	0x0012,
81	0x000C
82};
83
84u16   NAND_Get_Bad_Block(u32 block)
85{
86	u32 status = PASS;
87	u32 flag_bytes  = 0;
88	u32 skip_bytes  = DeviceInfo.wSpareSkipBytes;
89	u32 page, i;
90	u8 *pReadSpareBuf = buf_get_bad_block;
91
92	if (enable_ecc)
93		flag_bytes = DeviceInfo.wNumPageSpareFlag;
94
95	for (page = 0; page < 2; page++) {
96		status = NAND_Read_Page_Spare(pReadSpareBuf, block, page, 1);
97		if (status != PASS)
98			return READ_ERROR;
99		for (i = flag_bytes; i < (flag_bytes + skip_bytes); i++)
100			if (pReadSpareBuf[i] != 0xff)
101				return DEFECTIVE_BLOCK;
102	}
103
104	for (page = 1; page < 3; page++) {
105		status = NAND_Read_Page_Spare(pReadSpareBuf, block,
106			DeviceInfo.wPagesPerBlock - page , 1);
107		if (status != PASS)
108			return READ_ERROR;
109		for (i = flag_bytes; i < (flag_bytes + skip_bytes); i++)
110			if (pReadSpareBuf[i] != 0xff)
111				return DEFECTIVE_BLOCK;
112	}
113
114	return GOOD_BLOCK;
115}
116
117
118u16 NAND_Flash_Reset(void)
119{
120	u32 i;
121	u32 intr_status_rst_comp[4] = {INTR_STATUS0__RST_COMP,
122		INTR_STATUS1__RST_COMP,
123		INTR_STATUS2__RST_COMP,
124		INTR_STATUS3__RST_COMP};
125	u32 intr_status_time_out[4] = {INTR_STATUS0__TIME_OUT,
126		INTR_STATUS1__TIME_OUT,
127		INTR_STATUS2__TIME_OUT,
128		INTR_STATUS3__TIME_OUT};
129	u32 intr_status[4] = {INTR_STATUS0, INTR_STATUS1,
130		INTR_STATUS2, INTR_STATUS3};
131	u32 device_reset_banks[4] = {DEVICE_RESET__BANK0,
132		DEVICE_RESET__BANK1,
133		DEVICE_RESET__BANK2,
134		DEVICE_RESET__BANK3};
135
136	nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
137		       __FILE__, __LINE__, __func__);
138
139	for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++)
140		iowrite32(intr_status_rst_comp[i] | intr_status_time_out[i],
141		FlashReg + intr_status[i]);
142
143	for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++) {
144		iowrite32(device_reset_banks[i], FlashReg + DEVICE_RESET);
145		while (!(ioread32(FlashReg + intr_status[i]) &
146			(intr_status_rst_comp[i] | intr_status_time_out[i])))
147			;
148		if (ioread32(FlashReg + intr_status[i]) &
149			intr_status_time_out[i])
150			nand_dbg_print(NAND_DBG_WARN,
151			"NAND Reset operation timed out on bank %d\n", i);
152	}
153
154	for (i = 0; i < LLD_MAX_FLASH_BANKS; i++)
155		iowrite32(intr_status_rst_comp[i] | intr_status_time_out[i],
156			FlashReg + intr_status[i]);
157
158	return PASS;
159}
160
161static void NAND_ONFi_Timing_Mode(u16 mode)
162{
163	u16 Trea[6] = {40, 30, 25, 20, 20, 16};
164	u16 Trp[6] = {50, 25, 17, 15, 12, 10};
165	u16 Treh[6] = {30, 15, 15, 10, 10, 7};
166	u16 Trc[6] = {100, 50, 35, 30, 25, 20};
167	u16 Trhoh[6] = {0, 15, 15, 15, 15, 15};
168	u16 Trloh[6] = {0, 0, 0, 0, 5, 5};
169	u16 Tcea[6] = {100, 45, 30, 25, 25, 25};
170	u16 Tadl[6] = {200, 100, 100, 100, 70, 70};
171	u16 Trhw[6] = {200, 100, 100, 100, 100, 100};
172	u16 Trhz[6] = {200, 100, 100, 100, 100, 100};
173	u16 Twhr[6] = {120, 80, 80, 60, 60, 60};
174	u16 Tcs[6] = {70, 35, 25, 25, 20, 15};
175
176	u16 TclsRising = 1;
177	u16 data_invalid_rhoh, data_invalid_rloh, data_invalid;
178	u16 dv_window = 0;
179	u16 en_lo, en_hi;
180	u16 acc_clks;
181	u16 addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt;
182
183	nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
184		       __FILE__, __LINE__, __func__);
185
186	en_lo = CEIL_DIV(Trp[mode], CLK_X);
187	en_hi = CEIL_DIV(Treh[mode], CLK_X);
188
189#if ONFI_BLOOM_TIME
190	if ((en_hi * CLK_X) < (Treh[mode] + 2))
191		en_hi++;
192#endif
193
194	if ((en_lo + en_hi) * CLK_X < Trc[mode])
195		en_lo += CEIL_DIV((Trc[mode] - (en_lo + en_hi) * CLK_X), CLK_X);
196
197	if ((en_lo + en_hi) < CLK_MULTI)
198		en_lo += CLK_MULTI - en_lo - en_hi;
199
200	while (dv_window < 8) {
201		data_invalid_rhoh = en_lo * CLK_X + Trhoh[mode];
202
203		data_invalid_rloh = (en_lo + en_hi) * CLK_X + Trloh[mode];
204
205		data_invalid =
206		    data_invalid_rhoh <
207		    data_invalid_rloh ? data_invalid_rhoh : data_invalid_rloh;
208
209		dv_window = data_invalid - Trea[mode];
210
211		if (dv_window < 8)
212			en_lo++;
213	}
214
215	acc_clks = CEIL_DIV(Trea[mode], CLK_X);
216
217	while (((acc_clks * CLK_X) - Trea[mode]) < 3)
218		acc_clks++;
219
220	if ((data_invalid - acc_clks * CLK_X) < 2)
221		nand_dbg_print(NAND_DBG_WARN, "%s, Line %d: Warning!\n",
222			__FILE__, __LINE__);
223
224	addr_2_data = CEIL_DIV(Tadl[mode], CLK_X);
225	re_2_we = CEIL_DIV(Trhw[mode], CLK_X);
226	re_2_re = CEIL_DIV(Trhz[mode], CLK_X);
227	we_2_re = CEIL_DIV(Twhr[mode], CLK_X);
228	cs_cnt = CEIL_DIV((Tcs[mode] - Trp[mode]), CLK_X);
229	if (!TclsRising)
230		cs_cnt = CEIL_DIV(Tcs[mode], CLK_X);
231	if (cs_cnt == 0)
232		cs_cnt = 1;
233
234	if (Tcea[mode]) {
235		while (((cs_cnt * CLK_X) + Trea[mode]) < Tcea[mode])
236			cs_cnt++;
237	}
238
239#if MODE5_WORKAROUND
240	if (mode == 5)
241		acc_clks = 5;
242#endif
243
244	/* Sighting 3462430: Temporary hack for MT29F128G08CJABAWP:B */
245	if ((ioread32(FlashReg + MANUFACTURER_ID) == 0) &&
246		(ioread32(FlashReg + DEVICE_ID) == 0x88))
247		acc_clks = 6;
248
249	iowrite32(acc_clks, FlashReg + ACC_CLKS);
250	iowrite32(re_2_we, FlashReg + RE_2_WE);
251	iowrite32(re_2_re, FlashReg + RE_2_RE);
252	iowrite32(we_2_re, FlashReg + WE_2_RE);
253	iowrite32(addr_2_data, FlashReg + ADDR_2_DATA);
254	iowrite32(en_lo, FlashReg + RDWR_EN_LO_CNT);
255	iowrite32(en_hi, FlashReg + RDWR_EN_HI_CNT);
256	iowrite32(cs_cnt, FlashReg + CS_SETUP_CNT);
257}
258
259static void index_addr(u32 address, u32 data)
260{
261	iowrite32(address, FlashMem);
262	iowrite32(data, FlashMem + 0x10);
263}
264
265static void index_addr_read_data(u32 address, u32 *pdata)
266{
267	iowrite32(address, FlashMem);
268	*pdata = ioread32(FlashMem + 0x10);
269}
270
271static void set_ecc_config(void)
272{
273#if SUPPORT_8BITECC
274	if ((ioread32(FlashReg + DEVICE_MAIN_AREA_SIZE) < 4096) ||
275		(ioread32(FlashReg + DEVICE_SPARE_AREA_SIZE) <= 128))
276		iowrite32(8, FlashReg + ECC_CORRECTION);
277#endif
278
279	if ((ioread32(FlashReg + ECC_CORRECTION) & ECC_CORRECTION__VALUE)
280		== 1) {
281		DeviceInfo.wECCBytesPerSector = 4;
282		DeviceInfo.wECCBytesPerSector *= DeviceInfo.wDevicesConnected;
283		DeviceInfo.wNumPageSpareFlag =
284			DeviceInfo.wPageSpareSize -
285			DeviceInfo.wPageDataSize /
286			(ECC_SECTOR_SIZE * DeviceInfo.wDevicesConnected) *
287			DeviceInfo.wECCBytesPerSector
288			- DeviceInfo.wSpareSkipBytes;
289	} else {
290		DeviceInfo.wECCBytesPerSector =
291			(ioread32(FlashReg + ECC_CORRECTION) &
292			ECC_CORRECTION__VALUE) * 13 / 8;
293		if ((DeviceInfo.wECCBytesPerSector) % 2 == 0)
294			DeviceInfo.wECCBytesPerSector += 2;
295		else
296			DeviceInfo.wECCBytesPerSector += 1;
297
298		DeviceInfo.wECCBytesPerSector *= DeviceInfo.wDevicesConnected;
299		DeviceInfo.wNumPageSpareFlag = DeviceInfo.wPageSpareSize -
300			DeviceInfo.wPageDataSize /
301			(ECC_SECTOR_SIZE * DeviceInfo.wDevicesConnected) *
302			DeviceInfo.wECCBytesPerSector
303			- DeviceInfo.wSpareSkipBytes;
304	}
305}
306
307static u16 get_onfi_nand_para(void)
308{
309	int i;
310	u16 blks_lun_l, blks_lun_h, n_of_luns;
311	u32 blockperlun, id;
312
313	iowrite32(DEVICE_RESET__BANK0, FlashReg + DEVICE_RESET);
314
315	while (!((ioread32(FlashReg + INTR_STATUS0) &
316		INTR_STATUS0__RST_COMP) |
317		(ioread32(FlashReg + INTR_STATUS0) &
318		INTR_STATUS0__TIME_OUT)))
319		;
320
321	if (ioread32(FlashReg + INTR_STATUS0) & INTR_STATUS0__RST_COMP) {
322		iowrite32(DEVICE_RESET__BANK1, FlashReg + DEVICE_RESET);
323		while (!((ioread32(FlashReg + INTR_STATUS1) &
324			INTR_STATUS1__RST_COMP) |
325			(ioread32(FlashReg + INTR_STATUS1) &
326			INTR_STATUS1__TIME_OUT)))
327			;
328
329		if (ioread32(FlashReg + INTR_STATUS1) &
330			INTR_STATUS1__RST_COMP) {
331			iowrite32(DEVICE_RESET__BANK2,
332				FlashReg + DEVICE_RESET);
333			while (!((ioread32(FlashReg + INTR_STATUS2) &
334				INTR_STATUS2__RST_COMP) |
335				(ioread32(FlashReg + INTR_STATUS2) &
336				INTR_STATUS2__TIME_OUT)))
337				;
338
339			if (ioread32(FlashReg + INTR_STATUS2) &
340				INTR_STATUS2__RST_COMP) {
341				iowrite32(DEVICE_RESET__BANK3,
342					FlashReg + DEVICE_RESET);
343				while (!((ioread32(FlashReg + INTR_STATUS3) &
344					INTR_STATUS3__RST_COMP) |
345					(ioread32(FlashReg + INTR_STATUS3) &
346					INTR_STATUS3__TIME_OUT)))
347					;
348			} else {
349				printk(KERN_ERR "Getting a time out for bank 2!\n");
350			}
351		} else {
352			printk(KERN_ERR "Getting a time out for bank 1!\n");
353		}
354	}
355
356	iowrite32(INTR_STATUS0__TIME_OUT, FlashReg + INTR_STATUS0);
357	iowrite32(INTR_STATUS1__TIME_OUT, FlashReg + INTR_STATUS1);
358	iowrite32(INTR_STATUS2__TIME_OUT, FlashReg + INTR_STATUS2);
359	iowrite32(INTR_STATUS3__TIME_OUT, FlashReg + INTR_STATUS3);
360
361	DeviceInfo.wONFIDevFeatures =
362		ioread32(FlashReg + ONFI_DEVICE_FEATURES);
363	DeviceInfo.wONFIOptCommands =
364		ioread32(FlashReg + ONFI_OPTIONAL_COMMANDS);
365	DeviceInfo.wONFITimingMode =
366		ioread32(FlashReg + ONFI_TIMING_MODE);
367	DeviceInfo.wONFIPgmCacheTimingMode =
368		ioread32(FlashReg + ONFI_PGM_CACHE_TIMING_MODE);
369
370	n_of_luns = ioread32(FlashReg + ONFI_DEVICE_NO_OF_LUNS) &
371		ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS;
372	blks_lun_l = ioread32(FlashReg + ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L);
373	blks_lun_h = ioread32(FlashReg + ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U);
374
375	blockperlun = (blks_lun_h << 16) | blks_lun_l;
376
377	DeviceInfo.wTotalBlocks = n_of_luns * blockperlun;
378
379	if (!(ioread32(FlashReg + ONFI_TIMING_MODE) &
380		ONFI_TIMING_MODE__VALUE))
381		return FAIL;
382
383	for (i = 5; i > 0; i--) {
384		if (ioread32(FlashReg + ONFI_TIMING_MODE) & (0x01 << i))
385			break;
386	}
387
388	NAND_ONFi_Timing_Mode(i);
389
390	index_addr(MODE_11 | 0, 0x90);
391	index_addr(MODE_11 | 1, 0);
392
393	for (i = 0; i < 3; i++)
394		index_addr_read_data(MODE_11 | 2, &id);
395
396	nand_dbg_print(NAND_DBG_DEBUG, "3rd ID: 0x%x\n", id);
397
398	DeviceInfo.MLCDevice = id & 0x0C;
399
400	/* By now, all the ONFI devices we know support the page cache */
401	/* rw feature. So here we enable the pipeline_rw_ahead feature */
402	/* iowrite32(1, FlashReg + CACHE_WRITE_ENABLE); */
403	/* iowrite32(1, FlashReg + CACHE_READ_ENABLE);  */
404
405	return PASS;
406}
407
408static void get_samsung_nand_para(void)
409{
410	u8 no_of_planes;
411	u32 blk_size;
412	u64 plane_size, capacity;
413	u32 id_bytes[5];
414	int i;
415
416	index_addr((u32)(MODE_11 | 0), 0x90);
417	index_addr((u32)(MODE_11 | 1), 0);
418	for (i = 0; i < 5; i++)
419		index_addr_read_data((u32)(MODE_11 | 2), &id_bytes[i]);
420
421	nand_dbg_print(NAND_DBG_DEBUG,
422		"ID bytes: 0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n",
423		id_bytes[0], id_bytes[1], id_bytes[2],
424		id_bytes[3], id_bytes[4]);
425
426	if ((id_bytes[1] & 0xff) == 0xd3) { /* Samsung K9WAG08U1A */
427		/* Set timing register values according to datasheet */
428		iowrite32(5, FlashReg + ACC_CLKS);
429		iowrite32(20, FlashReg + RE_2_WE);
430		iowrite32(12, FlashReg + WE_2_RE);
431		iowrite32(14, FlashReg + ADDR_2_DATA);
432		iowrite32(3, FlashReg + RDWR_EN_LO_CNT);
433		iowrite32(2, FlashReg + RDWR_EN_HI_CNT);
434		iowrite32(2, FlashReg + CS_SETUP_CNT);
435	}
436
437	no_of_planes = 1 << ((id_bytes[4] & 0x0c) >> 2);
438	plane_size  = (u64)64 << ((id_bytes[4] & 0x70) >> 4);
439	blk_size = 64 << ((ioread32(FlashReg + DEVICE_PARAM_1) & 0x30) >> 4);
440	capacity = (u64)128 * plane_size * no_of_planes;
441
442	DeviceInfo.wTotalBlocks = (u32)GLOB_u64_Div(capacity, blk_size);
443}
444
445static void get_toshiba_nand_para(void)
446{
447	void __iomem *scratch_reg;
448	u32 tmp;
449
450	/* spare area size for some kind of Toshiba NAND device */
451	if ((ioread32(FlashReg + DEVICE_MAIN_AREA_SIZE) == 4096) &&
452		(ioread32(FlashReg + DEVICE_SPARE_AREA_SIZE) == 64)) {
453		iowrite32(216, FlashReg + DEVICE_SPARE_AREA_SIZE);
454		tmp = ioread32(FlashReg + DEVICES_CONNECTED) *
455			ioread32(FlashReg + DEVICE_SPARE_AREA_SIZE);
456		iowrite32(tmp, FlashReg + LOGICAL_PAGE_SPARE_SIZE);
457#if SUPPORT_15BITECC
458		iowrite32(15, FlashReg + ECC_CORRECTION);
459#elif SUPPORT_8BITECC
460		iowrite32(8, FlashReg + ECC_CORRECTION);
461#endif
462	}
463
464	/* As Toshiba NAND can not provide it's block number, */
465	/* so here we need user to provide the correct block */
466	/* number in a scratch register before the Linux NAND */
467	/* driver is loaded. If no valid value found in the scratch */
468	/* register, then we use default block number value */
469	scratch_reg = ioremap_nocache(SCRATCH_REG_ADDR, SCRATCH_REG_SIZE);
470	if (!scratch_reg) {
471		printk(KERN_ERR "Spectra: ioremap failed in %s, Line %d",
472			__FILE__, __LINE__);
473		DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
474	} else {
475		nand_dbg_print(NAND_DBG_WARN,
476			"Spectra: ioremap reg address: 0x%p\n", scratch_reg);
477		DeviceInfo.wTotalBlocks = 1 << ioread8(scratch_reg);
478		if (DeviceInfo.wTotalBlocks < 512)
479			DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
480		iounmap(scratch_reg);
481	}
482}
483
484static void get_hynix_nand_para(void)
485{
486	void __iomem *scratch_reg;
487	u32 main_size, spare_size;
488
489	switch (DeviceInfo.wDeviceID) {
490	case 0xD5: /* Hynix H27UAG8T2A, H27UBG8U5A or H27UCG8VFA */
491	case 0xD7: /* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */
492		iowrite32(128, FlashReg + PAGES_PER_BLOCK);
493		iowrite32(4096, FlashReg + DEVICE_MAIN_AREA_SIZE);
494		iowrite32(224, FlashReg + DEVICE_SPARE_AREA_SIZE);
495		main_size = 4096 * ioread32(FlashReg + DEVICES_CONNECTED);
496		spare_size = 224 * ioread32(FlashReg + DEVICES_CONNECTED);
497		iowrite32(main_size, FlashReg + LOGICAL_PAGE_DATA_SIZE);
498		iowrite32(spare_size, FlashReg + LOGICAL_PAGE_SPARE_SIZE);
499		iowrite32(0, FlashReg + DEVICE_WIDTH);
500#if SUPPORT_15BITECC
501		iowrite32(15, FlashReg + ECC_CORRECTION);
502#elif SUPPORT_8BITECC
503		iowrite32(8, FlashReg + ECC_CORRECTION);
504#endif
505		DeviceInfo.MLCDevice  = 1;
506		break;
507	default:
508		nand_dbg_print(NAND_DBG_WARN,
509			"Spectra: Unknown Hynix NAND (Device ID: 0x%x)."
510			"Will use default parameter values instead.\n",
511			DeviceInfo.wDeviceID);
512	}
513
514	scratch_reg = ioremap_nocache(SCRATCH_REG_ADDR, SCRATCH_REG_SIZE);
515	if (!scratch_reg) {
516		printk(KERN_ERR "Spectra: ioremap failed in %s, Line %d",
517			__FILE__, __LINE__);
518		DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
519	} else {
520		nand_dbg_print(NAND_DBG_WARN,
521			"Spectra: ioremap reg address: 0x%p\n", scratch_reg);
522		DeviceInfo.wTotalBlocks = 1 << ioread8(scratch_reg);
523		if (DeviceInfo.wTotalBlocks < 512)
524			DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
525		iounmap(scratch_reg);
526	}
527}
528
529static void find_valid_banks(void)
530{
531	u32 id[LLD_MAX_FLASH_BANKS];
532	int i;
533
534	totalUsedBanks = 0;
535	for (i = 0; i < LLD_MAX_FLASH_BANKS; i++) {
536		index_addr((u32)(MODE_11 | (i << 24) | 0), 0x90);
537		index_addr((u32)(MODE_11 | (i << 24) | 1), 0);
538		index_addr_read_data((u32)(MODE_11 | (i << 24) | 2), &id[i]);
539
540		nand_dbg_print(NAND_DBG_DEBUG,
541			"Return 1st ID for bank[%d]: %x\n", i, id[i]);
542
543		if (i == 0) {
544			if (id[i] & 0x0ff)
545				GLOB_valid_banks[i] = 1;
546		} else {
547			if ((id[i] & 0x0ff) == (id[0] & 0x0ff))
548				GLOB_valid_banks[i] = 1;
549		}
550
551		totalUsedBanks += GLOB_valid_banks[i];
552	}
553
554	nand_dbg_print(NAND_DBG_DEBUG,
555		"totalUsedBanks: %d\n", totalUsedBanks);
556}
557
558static void detect_partition_feature(void)
559{
560	if (ioread32(FlashReg + FEATURES) & FEATURES__PARTITION) {
561		if ((ioread32(FlashReg + PERM_SRC_ID_1) &
562			PERM_SRC_ID_1__SRCID) == SPECTRA_PARTITION_ID) {
563			DeviceInfo.wSpectraStartBlock =
564			    ((ioread32(FlashReg + MIN_MAX_BANK_1) &
565			      MIN_MAX_BANK_1__MIN_VALUE) *
566			     DeviceInfo.wTotalBlocks)
567			    +
568			    (ioread32(FlashReg + MIN_BLK_ADDR_1) &
569			    MIN_BLK_ADDR_1__VALUE);
570
571			DeviceInfo.wSpectraEndBlock =
572			    (((ioread32(FlashReg + MIN_MAX_BANK_1) &
573			       MIN_MAX_BANK_1__MAX_VALUE) >> 2) *
574			     DeviceInfo.wTotalBlocks)
575			    +
576			    (ioread32(FlashReg + MAX_BLK_ADDR_1) &
577			    MAX_BLK_ADDR_1__VALUE);
578
579			DeviceInfo.wTotalBlocks *= totalUsedBanks;
580
581			if (DeviceInfo.wSpectraEndBlock >=
582			    DeviceInfo.wTotalBlocks) {
583				DeviceInfo.wSpectraEndBlock =
584				    DeviceInfo.wTotalBlocks - 1;
585			}
586
587			DeviceInfo.wDataBlockNum =
588				DeviceInfo.wSpectraEndBlock -
589				DeviceInfo.wSpectraStartBlock + 1;
590		} else {
591			DeviceInfo.wTotalBlocks *= totalUsedBanks;
592			DeviceInfo.wSpectraStartBlock = SPECTRA_START_BLOCK;
593			DeviceInfo.wSpectraEndBlock =
594				DeviceInfo.wTotalBlocks - 1;
595			DeviceInfo.wDataBlockNum =
596				DeviceInfo.wSpectraEndBlock -
597				DeviceInfo.wSpectraStartBlock + 1;
598		}
599	} else {
600		DeviceInfo.wTotalBlocks *= totalUsedBanks;
601		DeviceInfo.wSpectraStartBlock = SPECTRA_START_BLOCK;
602		DeviceInfo.wSpectraEndBlock = DeviceInfo.wTotalBlocks - 1;
603		DeviceInfo.wDataBlockNum =
604			DeviceInfo.wSpectraEndBlock -
605			DeviceInfo.wSpectraStartBlock + 1;
606	}
607}
608
609static void dump_device_info(void)
610{
611	nand_dbg_print(NAND_DBG_DEBUG, "DeviceInfo:\n");
612	nand_dbg_print(NAND_DBG_DEBUG, "DeviceMaker: 0x%x\n",
613		DeviceInfo.wDeviceMaker);
614	nand_dbg_print(NAND_DBG_DEBUG, "DeviceID: 0x%x\n",
615		DeviceInfo.wDeviceID);
616	nand_dbg_print(NAND_DBG_DEBUG, "DeviceType: 0x%x\n",
617		DeviceInfo.wDeviceType);
618	nand_dbg_print(NAND_DBG_DEBUG, "SpectraStartBlock: %d\n",
619		DeviceInfo.wSpectraStartBlock);
620	nand_dbg_print(NAND_DBG_DEBUG, "SpectraEndBlock: %d\n",
621		DeviceInfo.wSpectraEndBlock);
622	nand_dbg_print(NAND_DBG_DEBUG, "TotalBlocks: %d\n",
623		DeviceInfo.wTotalBlocks);
624	nand_dbg_print(NAND_DBG_DEBUG, "PagesPerBlock: %d\n",
625		DeviceInfo.wPagesPerBlock);
626	nand_dbg_print(NAND_DBG_DEBUG, "PageSize: %d\n",
627		DeviceInfo.wPageSize);
628	nand_dbg_print(NAND_DBG_DEBUG, "PageDataSize: %d\n",
629		DeviceInfo.wPageDataSize);
630	nand_dbg_print(NAND_DBG_DEBUG, "PageSpareSize: %d\n",
631		DeviceInfo.wPageSpareSize);
632	nand_dbg_print(NAND_DBG_DEBUG, "NumPageSpareFlag: %d\n",
633		DeviceInfo.wNumPageSpareFlag);
634	nand_dbg_print(NAND_DBG_DEBUG, "ECCBytesPerSector: %d\n",
635		DeviceInfo.wECCBytesPerSector);
636	nand_dbg_print(NAND_DBG_DEBUG, "BlockSize: %d\n",
637		DeviceInfo.wBlockSize);
638	nand_dbg_print(NAND_DBG_DEBUG, "BlockDataSize: %d\n",
639		DeviceInfo.wBlockDataSize);
640	nand_dbg_print(NAND_DBG_DEBUG, "DataBlockNum: %d\n",
641		DeviceInfo.wDataBlockNum);
642	nand_dbg_print(NAND_DBG_DEBUG, "PlaneNum: %d\n",
643		DeviceInfo.bPlaneNum);
644	nand_dbg_print(NAND_DBG_DEBUG, "DeviceMainAreaSize: %d\n",
645		DeviceInfo.wDeviceMainAreaSize);
646	nand_dbg_print(NAND_DBG_DEBUG, "DeviceSpareAreaSize: %d\n",
647		DeviceInfo.wDeviceSpareAreaSize);
648	nand_dbg_print(NAND_DBG_DEBUG, "DevicesConnected: %d\n",
649		DeviceInfo.wDevicesConnected);
650	nand_dbg_print(NAND_DBG_DEBUG, "DeviceWidth: %d\n",
651		DeviceInfo.wDeviceWidth);
652	nand_dbg_print(NAND_DBG_DEBUG, "HWRevision: 0x%x\n",
653		DeviceInfo.wHWRevision);
654	nand_dbg_print(NAND_DBG_DEBUG, "HWFeatures: 0x%x\n",
655		DeviceInfo.wHWFeatures);
656	nand_dbg_print(NAND_DBG_DEBUG, "ONFIDevFeatures: 0x%x\n",
657		DeviceInfo.wONFIDevFeatures);
658	nand_dbg_print(NAND_DBG_DEBUG, "ONFIOptCommands: 0x%x\n",
659		DeviceInfo.wONFIOptCommands);
660	nand_dbg_print(NAND_DBG_DEBUG, "ONFITimingMode: 0x%x\n",
661		DeviceInfo.wONFITimingMode);
662	nand_dbg_print(NAND_DBG_DEBUG, "ONFIPgmCacheTimingMode: 0x%x\n",
663		DeviceInfo.wONFIPgmCacheTimingMode);
664	nand_dbg_print(NAND_DBG_DEBUG, "MLCDevice: %s\n",
665		DeviceInfo.MLCDevice ? "Yes" : "No");
666	nand_dbg_print(NAND_DBG_DEBUG, "SpareSkipBytes: %d\n",
667		DeviceInfo.wSpareSkipBytes);
668	nand_dbg_print(NAND_DBG_DEBUG, "BitsInPageNumber: %d\n",
669		DeviceInfo.nBitsInPageNumber);
670	nand_dbg_print(NAND_DBG_DEBUG, "BitsInPageDataSize: %d\n",
671		DeviceInfo.nBitsInPageDataSize);
672	nand_dbg_print(NAND_DBG_DEBUG, "BitsInBlockDataSize: %d\n",
673		DeviceInfo.nBitsInBlockDataSize);
674}
675
676u16 NAND_Read_Device_ID(void)
677{
678	u16 status = PASS;
679	u8 no_of_planes;
680
681	nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
682		       __FILE__, __LINE__, __func__);
683
684	iowrite32(0x02, FlashReg + SPARE_AREA_SKIP_BYTES);
685	iowrite32(0xffff, FlashReg + SPARE_AREA_MARKER);
686	DeviceInfo.wDeviceMaker = ioread32(FlashReg + MANUFACTURER_ID);
687	DeviceInfo.wDeviceID = ioread32(FlashReg + DEVICE_ID);
688	DeviceInfo.MLCDevice = ioread32(FlashReg + DEVICE_PARAM_0) & 0x0c;
689
690	if (ioread32(FlashReg + ONFI_DEVICE_NO_OF_LUNS) &
691		ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE) { /* ONFI 1.0 NAND */
692		if (FAIL == get_onfi_nand_para())
693			return FAIL;
694	} else if (DeviceInfo.wDeviceMaker == 0xEC) { /* Samsung NAND */
695		get_samsung_nand_para();
696	} else if (DeviceInfo.wDeviceMaker == 0x98) { /* Toshiba NAND */
697		get_toshiba_nand_para();
698	} else if (DeviceInfo.wDeviceMaker == 0xAD) { /* Hynix NAND */
699		get_hynix_nand_para();
700	} else {
701		DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
702	}
703
704	nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:"
705			"acc_clks: %d, re_2_we: %d, we_2_re: %d,"
706			"addr_2_data: %d, rdwr_en_lo_cnt: %d, "
707			"rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
708			ioread32(FlashReg + ACC_CLKS),
709			ioread32(FlashReg + RE_2_WE),
710			ioread32(FlashReg + WE_2_RE),
711			ioread32(FlashReg + ADDR_2_DATA),
712			ioread32(FlashReg + RDWR_EN_LO_CNT),
713			ioread32(FlashReg + RDWR_EN_HI_CNT),
714			ioread32(FlashReg + CS_SETUP_CNT));
715
716	DeviceInfo.wHWRevision = ioread32(FlashReg + REVISION);
717	DeviceInfo.wHWFeatures = ioread32(FlashReg + FEATURES);
718
719	DeviceInfo.wDeviceMainAreaSize =
720		ioread32(FlashReg + DEVICE_MAIN_AREA_SIZE);
721	DeviceInfo.wDeviceSpareAreaSize =
722		ioread32(FlashReg + DEVICE_SPARE_AREA_SIZE);
723
724	DeviceInfo.wPageDataSize =
725		ioread32(FlashReg + LOGICAL_PAGE_DATA_SIZE);
726
727	/* Note: When using the Micon 4K NAND device, the controller will report
728	 * Page Spare Size as 216 bytes. But Micron's Spec say it's 218 bytes.
729	 * And if force set it to 218 bytes, the controller can not work
730	 * correctly. So just let it be. But keep in mind that this bug may
731	 * cause
732	 * other problems in future.       - Yunpeng  2008-10-10
733	 */
734	DeviceInfo.wPageSpareSize =
735		ioread32(FlashReg + LOGICAL_PAGE_SPARE_SIZE);
736
737	DeviceInfo.wPagesPerBlock = ioread32(FlashReg + PAGES_PER_BLOCK);
738
739	DeviceInfo.wPageSize =
740	    DeviceInfo.wPageDataSize + DeviceInfo.wPageSpareSize;
741	DeviceInfo.wBlockSize =
742	    DeviceInfo.wPageSize * DeviceInfo.wPagesPerBlock;
743	DeviceInfo.wBlockDataSize =
744	    DeviceInfo.wPagesPerBlock * DeviceInfo.wPageDataSize;
745
746	DeviceInfo.wDeviceWidth = ioread32(FlashReg + DEVICE_WIDTH);
747	DeviceInfo.wDeviceType =
748		((ioread32(FlashReg + DEVICE_WIDTH) > 0) ? 16 : 8);
749
750	DeviceInfo.wDevicesConnected = ioread32(FlashReg + DEVICES_CONNECTED);
751
752	DeviceInfo.wSpareSkipBytes =
753		ioread32(FlashReg + SPARE_AREA_SKIP_BYTES) *
754		DeviceInfo.wDevicesConnected;
755
756	DeviceInfo.nBitsInPageNumber =
757		(u8)GLOB_Calc_Used_Bits(DeviceInfo.wPagesPerBlock);
758	DeviceInfo.nBitsInPageDataSize =
759		(u8)GLOB_Calc_Used_Bits(DeviceInfo.wPageDataSize);
760	DeviceInfo.nBitsInBlockDataSize =
761		(u8)GLOB_Calc_Used_Bits(DeviceInfo.wBlockDataSize);
762
763	set_ecc_config();
764
765	no_of_planes = ioread32(FlashReg + NUMBER_OF_PLANES) &
766		NUMBER_OF_PLANES__VALUE;
767
768	switch (no_of_planes) {
769	case 0:
770	case 1:
771	case 3:
772	case 7:
773		DeviceInfo.bPlaneNum = no_of_planes + 1;
774		break;
775	default:
776		status = FAIL;
777		break;
778	}
779
780	find_valid_banks();
781
782	detect_partition_feature();
783
784	dump_device_info();
785
786	return status;
787}
788
789u16 NAND_UnlockArrayAll(void)
790{
791	u64 start_addr, end_addr;
792
793	nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
794		       __FILE__, __LINE__, __func__);
795
796	start_addr = 0;
797	end_addr = ((u64)DeviceInfo.wBlockSize *
798		(DeviceInfo.wTotalBlocks - 1)) >>
799		DeviceInfo.nBitsInPageDataSize;
800
801	index_addr((u32)(MODE_10 | (u32)start_addr), 0x10);
802	index_addr((u32)(MODE_10 | (u32)end_addr), 0x11);
803
804	return PASS;
805}
806
807void NAND_LLD_Enable_Disable_Interrupts(u16 INT_ENABLE)
808{
809	nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
810		       __FILE__, __LINE__, __func__);
811
812	if (INT_ENABLE)
813		iowrite32(1, FlashReg + GLOBAL_INT_ENABLE);
814	else
815		iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
816}
817
818u16 NAND_Erase_Block(u32 block)
819{
820	u16 status = PASS;
821	u64 flash_add;
822	u16 flash_bank;
823	u32 intr_status = 0;
824	u32 intr_status_addresses[4] = {INTR_STATUS0,
825		INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
826
827	nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
828		       __FILE__, __LINE__, __func__);
829
830	flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
831		* DeviceInfo.wBlockDataSize;
832
833	flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
834
835	if (block >= DeviceInfo.wTotalBlocks)
836		status = FAIL;
837
838	if (status == PASS) {
839		intr_status = intr_status_addresses[flash_bank];
840
841		iowrite32(INTR_STATUS0__ERASE_COMP | INTR_STATUS0__ERASE_FAIL,
842			FlashReg + intr_status);
843
844		index_addr((u32)(MODE_10 | (flash_bank << 24) |
845			(flash_add >> DeviceInfo.nBitsInPageDataSize)), 1);
846
847		while (!(ioread32(FlashReg + intr_status) &
848			(INTR_STATUS0__ERASE_COMP | INTR_STATUS0__ERASE_FAIL)))
849			;
850
851		if (ioread32(FlashReg + intr_status) &
852			INTR_STATUS0__ERASE_FAIL)
853			status = FAIL;
854
855		iowrite32(INTR_STATUS0__ERASE_COMP | INTR_STATUS0__ERASE_FAIL,
856			FlashReg + intr_status);
857	}
858
859	return status;
860}
861
862static u32 Boundary_Check_Block_Page(u32 block, u16 page,
863						u16 page_count)
864{
865	u32 status = PASS;
866
867	if (block >= DeviceInfo.wTotalBlocks)
868		status = FAIL;
869
870	if (page + page_count > DeviceInfo.wPagesPerBlock)
871		status = FAIL;
872
873	return status;
874}
875
876u16 NAND_Read_Page_Spare(u8 *read_data, u32 block, u16 page,
877			    u16 page_count)
878{
879	u32 status = PASS;
880	u32 i;
881	u64 flash_add;
882	u32 PageSpareSize = DeviceInfo.wPageSpareSize;
883	u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
884	u32 flash_bank;
885	u32 intr_status = 0;
886	u32 intr_status_addresses[4] = {INTR_STATUS0,
887		INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
888	u8 *page_spare = buf_read_page_spare;
889
890	if (block >= DeviceInfo.wTotalBlocks) {
891		printk(KERN_ERR "block too big: %d\n", (int)block);
892		status = FAIL;
893	}
894
895	if (page >= DeviceInfo.wPagesPerBlock) {
896		printk(KERN_ERR "page too big: %d\n", page);
897		status = FAIL;
898	}
899
900	if (page_count > 1) {
901		printk(KERN_ERR "page count too big: %d\n", page_count);
902		status = FAIL;
903	}
904
905	flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
906		* DeviceInfo.wBlockDataSize +
907		(u64)page * DeviceInfo.wPageDataSize;
908
909	flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
910
911	if (status == PASS) {
912		intr_status = intr_status_addresses[flash_bank];
913		iowrite32(ioread32(FlashReg + intr_status),
914			FlashReg + intr_status);
915
916		index_addr((u32)(MODE_10 | (flash_bank << 24) |
917			(flash_add >> DeviceInfo.nBitsInPageDataSize)),
918			0x41);
919		index_addr((u32)(MODE_10 | (flash_bank << 24) |
920			(flash_add >> DeviceInfo.nBitsInPageDataSize)),
921			0x2000 | page_count);
922		while (!(ioread32(FlashReg + intr_status) &
923			INTR_STATUS0__LOAD_COMP))
924			;
925
926		iowrite32((u32)(MODE_01 | (flash_bank << 24) |
927			(flash_add >> DeviceInfo.nBitsInPageDataSize)),
928			FlashMem);
929
930		for (i = 0; i < (PageSpareSize / 4); i++)
931			*((u32 *)page_spare + i) =
932					ioread32(FlashMem + 0x10);
933
934		if (enable_ecc) {
935			for (i = 0; i < spareFlagBytes; i++)
936				read_data[i] =
937					page_spare[PageSpareSize -
938						spareFlagBytes + i];
939			for (i = 0; i < (PageSpareSize - spareFlagBytes); i++)
940				read_data[spareFlagBytes + i] =
941							page_spare[i];
942		} else {
943			for (i = 0; i < PageSpareSize; i++)
944				read_data[i] = page_spare[i];
945		}
946
947		index_addr((u32)(MODE_10 | (flash_bank << 24) |
948			(flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
949	}
950
951	return status;
952}
953
954/* No use function. Should be removed later */
955u16 NAND_Write_Page_Spare(u8 *write_data, u32 block, u16 page,
956			     u16 page_count)
957{
958	printk(KERN_ERR
959	       "Error! This function (NAND_Write_Page_Spare) should never"
960		" be called!\n");
961	return ERR;
962}
963
964/* op value:  0 - DDMA read;  1 - DDMA write */
965static void ddma_trans(u8 *data, u64 flash_add,
966			u32 flash_bank, int op, u32 numPages)
967{
968	u32 data_addr;
969
970	/* Map virtual address to bus address for DDMA */
971	data_addr = virt_to_bus(data);
972
973	index_addr((u32)(MODE_10 | (flash_bank << 24) |
974		(flash_add >> DeviceInfo.nBitsInPageDataSize)),
975		(u16)(2 << 12) | (op << 8) | numPages);
976
977	index_addr((u32)(MODE_10 | (flash_bank << 24) |
978		((u16)(0x0FFFF & (data_addr >> 16)) << 8)),
979		(u16)(2 << 12) | (2 << 8) | 0);
980
981	index_addr((u32)(MODE_10 | (flash_bank << 24) |
982		((u16)(0x0FFFF & data_addr) << 8)),
983		(u16)(2 << 12) | (3 << 8) | 0);
984
985	index_addr((u32)(MODE_10 | (flash_bank << 24) |
986		(1 << 16) | (0x40 << 8)),
987		(u16)(2 << 12) | (4 << 8) | 0);
988}
989
990/* If data in buf are all 0xff, then return 1; otherwise return 0 */
991static int check_all_1(u8 *buf)
992{
993	int i, j, cnt;
994
995	for (i = 0; i < DeviceInfo.wPageDataSize; i++) {
996		if (buf[i] != 0xff) {
997			cnt = 0;
998			nand_dbg_print(NAND_DBG_WARN,
999				"the first non-0xff data byte is: %d\n", i);
1000			for (j = i; j < DeviceInfo.wPageDataSize; j++) {
1001				nand_dbg_print(NAND_DBG_WARN, "0x%x ", buf[j]);
1002				cnt++;
1003				if (cnt > 8)
1004					break;
1005			}
1006			nand_dbg_print(NAND_DBG_WARN, "\n");
1007			return 0;
1008		}
1009	}
1010
1011	return 1;
1012}
1013
1014static int do_ecc_new(unsigned long bank, u8 *buf,
1015				u32 block, u16 page)
1016{
1017	int status = PASS;
1018	u16 err_page = 0;
1019	u16 err_byte;
1020	u8 err_sect;
1021	u8 err_dev;
1022	u16 err_fix_info;
1023	u16 err_addr;
1024	u32 ecc_sect_size;
1025	u8 *err_pos;
1026	u32 err_page_addr[4] = {ERR_PAGE_ADDR0,
1027		ERR_PAGE_ADDR1, ERR_PAGE_ADDR2, ERR_PAGE_ADDR3};
1028
1029	ecc_sect_size = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
1030
1031	do {
1032		err_page = ioread32(FlashReg + err_page_addr[bank]);
1033		err_addr = ioread32(FlashReg + ECC_ERROR_ADDRESS);
1034		err_byte = err_addr & ECC_ERROR_ADDRESS__OFFSET;
1035		err_sect = ((err_addr & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12);
1036		err_fix_info = ioread32(FlashReg + ERR_CORRECTION_INFO);
1037		err_dev = ((err_fix_info & ERR_CORRECTION_INFO__DEVICE_NR)
1038			>> 8);
1039		if (err_fix_info & ERR_CORRECTION_INFO__ERROR_TYPE) {
1040			nand_dbg_print(NAND_DBG_WARN,
1041				"%s, Line %d Uncorrectable ECC error "
1042				"when read block %d page %d."
1043				"PTN_INTR register: 0x%x "
1044				"err_page: %d, err_sect: %d, err_byte: %d, "
1045				"err_dev: %d, ecc_sect_size: %d, "
1046				"err_fix_info: 0x%x\n",
1047				__FILE__, __LINE__, block, page,
1048				ioread32(FlashReg + PTN_INTR),
1049				err_page, err_sect, err_byte, err_dev,
1050				ecc_sect_size, (u32)err_fix_info);
1051
1052			if (check_all_1(buf))
1053				nand_dbg_print(NAND_DBG_WARN, "%s, Line %d"
1054					       "All 0xff!\n",
1055					       __FILE__, __LINE__);
1056			else
1057				nand_dbg_print(NAND_DBG_WARN, "%s, Line %d"
1058					       "Not all 0xff!\n",
1059					       __FILE__, __LINE__);
1060			status = FAIL;
1061		} else {
1062			nand_dbg_print(NAND_DBG_WARN,
1063				"%s, Line %d Found ECC error "
1064				"when read block %d page %d."
1065				"err_page: %d, err_sect: %d, err_byte: %d, "
1066				"err_dev: %d, ecc_sect_size: %d, "
1067				"err_fix_info: 0x%x\n",
1068				__FILE__, __LINE__, block, page,
1069				err_page, err_sect, err_byte, err_dev,
1070				ecc_sect_size, (u32)err_fix_info);
1071			if (err_byte < ECC_SECTOR_SIZE) {
1072				err_pos = buf +
1073					(err_page - page) *
1074					DeviceInfo.wPageDataSize +
1075					err_sect * ecc_sect_size +
1076					err_byte *
1077					DeviceInfo.wDevicesConnected +
1078					err_dev;
1079
1080				*err_pos ^= err_fix_info &
1081					ERR_CORRECTION_INFO__BYTEMASK;
1082			}
1083		}
1084	} while (!(err_fix_info & ERR_CORRECTION_INFO__LAST_ERR_INFO));
1085
1086	return status;
1087}
1088
1089u16 NAND_Read_Page_Main_Polling(u8 *read_data,
1090		u32 block, u16 page, u16 page_count)
1091{
1092	u32 status = PASS;
1093	u64 flash_add;
1094	u32 intr_status = 0;
1095	u32 flash_bank;
1096	u32 intr_status_addresses[4] = {INTR_STATUS0,
1097		INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
1098	u8 *read_data_l;
1099
1100	nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
1101		       __FILE__, __LINE__, __func__);
1102
1103	status = Boundary_Check_Block_Page(block, page, page_count);
1104	if (status != PASS)
1105		return status;
1106
1107	flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
1108		* DeviceInfo.wBlockDataSize +
1109		(u64)page * DeviceInfo.wPageDataSize;
1110	flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
1111
1112	iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
1113
1114	intr_status = intr_status_addresses[flash_bank];
1115	iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
1116
1117	if (page_count > 1) {
1118		read_data_l = read_data;
1119		while (page_count > MAX_PAGES_PER_RW) {
1120			if (ioread32(FlashReg + MULTIPLANE_OPERATION))
1121				status = NAND_Multiplane_Read(read_data_l,
1122					block, page, MAX_PAGES_PER_RW);
1123			else
1124				status = NAND_Pipeline_Read_Ahead_Polling(
1125					read_data_l, block, page,
1126					MAX_PAGES_PER_RW);
1127
1128			if (status == FAIL)
1129				return status;
1130
1131			read_data_l += DeviceInfo.wPageDataSize *
1132					MAX_PAGES_PER_RW;
1133			page_count -= MAX_PAGES_PER_RW;
1134			page += MAX_PAGES_PER_RW;
1135		}
1136		if (ioread32(FlashReg + MULTIPLANE_OPERATION))
1137			status = NAND_Multiplane_Read(read_data_l,
1138					block, page, page_count);
1139		else
1140			status = NAND_Pipeline_Read_Ahead_Polling(
1141					read_data_l, block, page, page_count);
1142
1143		return status;
1144	}
1145
1146	iowrite32(1, FlashReg + DMA_ENABLE);
1147	while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
1148		;
1149
1150	iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
1151	iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
1152
1153	ddma_trans(read_data, flash_add, flash_bank, 0, 1);
1154
1155	if (enable_ecc) {
1156		while (!(ioread32(FlashReg + intr_status) &
1157			(INTR_STATUS0__ECC_TRANSACTION_DONE |
1158			INTR_STATUS0__ECC_ERR)))
1159			;
1160
1161		if (ioread32(FlashReg + intr_status) &
1162			INTR_STATUS0__ECC_ERR) {
1163			iowrite32(INTR_STATUS0__ECC_ERR,
1164				FlashReg + intr_status);
1165			status = do_ecc_new(flash_bank, read_data,
1166					block, page);
1167		}
1168
1169		if (ioread32(FlashReg + intr_status) &
1170			INTR_STATUS0__ECC_TRANSACTION_DONE &
1171			INTR_STATUS0__ECC_ERR)
1172			iowrite32(INTR_STATUS0__ECC_TRANSACTION_DONE |
1173				INTR_STATUS0__ECC_ERR,
1174				FlashReg + intr_status);
1175		else if (ioread32(FlashReg + intr_status) &
1176			INTR_STATUS0__ECC_TRANSACTION_DONE)
1177			iowrite32(INTR_STATUS0__ECC_TRANSACTION_DONE,
1178				FlashReg + intr_status);
1179		else if (ioread32(FlashReg + intr_status) &
1180			INTR_STATUS0__ECC_ERR)
1181			iowrite32(INTR_STATUS0__ECC_ERR,
1182				FlashReg + intr_status);
1183	} else {
1184		while (!(ioread32(FlashReg + intr_status) &
1185			INTR_STATUS0__DMA_CMD_COMP))
1186			;
1187		iowrite32(INTR_STATUS0__DMA_CMD_COMP, FlashReg + intr_status);
1188	}
1189
1190	iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
1191
1192	iowrite32(0, FlashReg + DMA_ENABLE);
1193	while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
1194		;
1195
1196	return status;
1197}
1198
1199u16 NAND_Pipeline_Read_Ahead_Polling(u8 *read_data,
1200			u32 block, u16 page, u16 page_count)
1201{
1202	u32 status = PASS;
1203	u32 NumPages = page_count;
1204	u64 flash_add;
1205	u32 flash_bank;
1206	u32 intr_status = 0;
1207	u32 intr_status_addresses[4] = {INTR_STATUS0,
1208		INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
1209	u32 ecc_done_OR_dma_comp;
1210
1211	nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
1212		       __FILE__, __LINE__, __func__);
1213
1214	status = Boundary_Check_Block_Page(block, page, page_count);
1215
1216	if (page_count < 2)
1217		status = FAIL;
1218
1219	flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
1220		*DeviceInfo.wBlockDataSize +
1221		(u64)page * DeviceInfo.wPageDataSize;
1222
1223	flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
1224
1225	if (status == PASS) {
1226		intr_status = intr_status_addresses[flash_bank];
1227		iowrite32(ioread32(FlashReg + intr_status),
1228			FlashReg + intr_status);
1229
1230		iowrite32(1, FlashReg + DMA_ENABLE);
1231		while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
1232			;
1233
1234		iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
1235
1236		index_addr((u32)(MODE_10 | (flash_bank << 24) |
1237			(flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
1238		ddma_trans(read_data, flash_add, flash_bank, 0, NumPages);
1239
1240		ecc_done_OR_dma_comp = 0;
1241		while (1) {
1242			if (enable_ecc) {
1243				while (!ioread32(FlashReg + intr_status))
1244					;
1245
1246				if (ioread32(FlashReg + intr_status) &
1247					INTR_STATUS0__ECC_ERR) {
1248					iowrite32(INTR_STATUS0__ECC_ERR,
1249						FlashReg + intr_status);
1250					status = do_ecc_new(flash_bank,
1251						read_data, block, page);
1252				} else if (ioread32(FlashReg + intr_status) &
1253					INTR_STATUS0__DMA_CMD_COMP) {
1254					iowrite32(INTR_STATUS0__DMA_CMD_COMP,
1255						FlashReg + intr_status);
1256
1257					if (1 == ecc_done_OR_dma_comp)
1258						break;
1259
1260					ecc_done_OR_dma_comp = 1;
1261				} else if (ioread32(FlashReg + intr_status) &
1262					INTR_STATUS0__ECC_TRANSACTION_DONE) {
1263					iowrite32(
1264					INTR_STATUS0__ECC_TRANSACTION_DONE,
1265					FlashReg + intr_status);
1266
1267					if (1 == ecc_done_OR_dma_comp)
1268						break;
1269
1270					ecc_done_OR_dma_comp = 1;
1271				}
1272			} else {
1273				while (!(ioread32(FlashReg + intr_status) &
1274					INTR_STATUS0__DMA_CMD_COMP))
1275					;
1276
1277				iowrite32(INTR_STATUS0__DMA_CMD_COMP,
1278					FlashReg + intr_status);
1279				break;
1280			}
1281
1282			iowrite32((~INTR_STATUS0__ECC_ERR) &
1283				(~INTR_STATUS0__ECC_TRANSACTION_DONE) &
1284				(~INTR_STATUS0__DMA_CMD_COMP),
1285				FlashReg + intr_status);
1286
1287		}
1288
1289		iowrite32(ioread32(FlashReg + intr_status),
1290			FlashReg + intr_status);
1291
1292		iowrite32(0, FlashReg + DMA_ENABLE);
1293
1294		while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
1295			;
1296	}
1297	return status;
1298}
1299
1300u16 NAND_Read_Page_Main(u8 *read_data, u32 block, u16 page,
1301			   u16 page_count)
1302{
1303	u32 status = PASS;
1304	u64 flash_add;
1305	u32 intr_status = 0;
1306	u32 flash_bank;
1307	u32 intr_status_addresses[4] = {INTR_STATUS0,
1308		INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
1309	int ret;
1310	u8 *read_data_l;
1311
1312	nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
1313		       __FILE__, __LINE__, __func__);
1314
1315	status = Boundary_Check_Block_Page(block, page, page_count);
1316	if (status != PASS)
1317		return status;
1318
1319	flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
1320		* DeviceInfo.wBlockDataSize +
1321		(u64)page * DeviceInfo.wPageDataSize;
1322	flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
1323
1324	iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
1325
1326	intr_status = intr_status_addresses[flash_bank];
1327	iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
1328
1329	if (page_count > 1) {
1330		read_data_l = read_data;
1331		while (page_count > MAX_PAGES_PER_RW) {
1332			if (ioread32(FlashReg + MULTIPLANE_OPERATION))
1333				status = NAND_Multiplane_Read(read_data_l,
1334					block, page, MAX_PAGES_PER_RW);
1335			else
1336				status = NAND_Pipeline_Read_Ahead(
1337					read_data_l, block, page,
1338					MAX_PAGES_PER_RW);
1339
1340			if (status == FAIL)
1341				return status;
1342
1343			read_data_l += DeviceInfo.wPageDataSize *
1344					MAX_PAGES_PER_RW;
1345			page_count -= MAX_PAGES_PER_RW;
1346			page += MAX_PAGES_PER_RW;
1347		}
1348		if (ioread32(FlashReg + MULTIPLANE_OPERATION))
1349			status = NAND_Multiplane_Read(read_data_l,
1350					block, page, page_count);
1351		else
1352			status = NAND_Pipeline_Read_Ahead(
1353					read_data_l, block, page, page_count);
1354
1355		return status;
1356	}
1357
1358	iowrite32(1, FlashReg + DMA_ENABLE);
1359	while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
1360		;
1361
1362	iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
1363	iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
1364
1365	/* Fill the mrst_nand_info structure */
1366	info.state = INT_READ_PAGE_MAIN;
1367	info.read_data = read_data;
1368	info.flash_bank = flash_bank;
1369	info.block = block;
1370	info.page = page;
1371	info.ret = PASS;
1372
1373	ddma_trans(read_data, flash_add, flash_bank, 0, 1);
1374
1375	iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable Interrupt */
1376
1377	ret = wait_for_completion_timeout(&info.complete, 10 * HZ);
1378	if (!ret) {
1379		printk(KERN_ERR "Wait for completion timeout "
1380			"in %s, Line %d\n", __FILE__, __LINE__);
1381		status = ERR;
1382	} else {
1383		status = info.ret;
1384	}
1385
1386	iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
1387
1388	iowrite32(0, FlashReg + DMA_ENABLE);
1389	while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
1390		;
1391
1392	return status;
1393}
1394
1395void Conv_Spare_Data_Log2Phy_Format(u8 *data)
1396{
1397	int i;
1398	const u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
1399	const u32 PageSpareSize  = DeviceInfo.wPageSpareSize;
1400
1401	if (enable_ecc) {
1402		for (i = spareFlagBytes - 1; i >= 0; i++)
1403			data[PageSpareSize - spareFlagBytes + i] = data[i];
1404	}
1405}
1406
1407void Conv_Spare_Data_Phy2Log_Format(u8 *data)
1408{
1409	int i;
1410	const u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
1411	const u32 PageSpareSize = DeviceInfo.wPageSpareSize;
1412
1413	if (enable_ecc) {
1414		for (i = 0; i < spareFlagBytes; i++)
1415			data[i] = data[PageSpareSize - spareFlagBytes + i];
1416	}
1417}
1418
1419
1420void Conv_Main_Spare_Data_Log2Phy_Format(u8 *data, u16 page_count)
1421{
1422	const u32 PageSize = DeviceInfo.wPageSize;
1423	const u32 PageDataSize = DeviceInfo.wPageDataSize;
1424	const u32 eccBytes = DeviceInfo.wECCBytesPerSector;
1425	const u32 spareSkipBytes = DeviceInfo.wSpareSkipBytes;
1426	const u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
1427	u32 eccSectorSize;
1428	u32 page_offset;
1429	int i, j;
1430
1431	eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
1432	if (enable_ecc) {
1433		while (page_count > 0) {
1434			page_offset = (page_count - 1) * PageSize;
1435			j = (DeviceInfo.wPageDataSize / eccSectorSize);
1436			for (i = spareFlagBytes - 1; i >= 0; i--)
1437				data[page_offset +
1438					(eccSectorSize + eccBytes) * j + i] =
1439					data[page_offset + PageDataSize + i];
1440			for (j--; j >= 1; j--) {
1441				for (i = eccSectorSize - 1; i >= 0; i--)
1442					data[page_offset +
1443					(eccSectorSize + eccBytes) * j + i] =
1444						data[page_offset +
1445						eccSectorSize * j + i];
1446			}
1447			for (i = (PageSize - spareSkipBytes) - 1;
1448				i >= PageDataSize; i--)
1449				data[page_offset + i + spareSkipBytes] =
1450					data[page_offset + i];
1451			page_count--;
1452		}
1453	}
1454}
1455
1456void Conv_Main_Spare_Data_Phy2Log_Format(u8 *data, u16 page_count)
1457{
1458	const u32 PageSize = DeviceInfo.wPageSize;
1459	const u32 PageDataSize = DeviceInfo.wPageDataSize;
1460	const u32 eccBytes = DeviceInfo.wECCBytesPerSector;
1461	const u32 spareSkipBytes = DeviceInfo.wSpareSkipBytes;
1462	const u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
1463	u32 eccSectorSize;
1464	u32 page_offset;
1465	int i, j;
1466
1467	eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
1468	if (enable_ecc) {
1469		while (page_count > 0) {
1470			page_offset = (page_count - 1) * PageSize;
1471			for (i = PageDataSize;
1472				i < PageSize - spareSkipBytes;
1473				i++)
1474				data[page_offset + i] =
1475					data[page_offset + i +
1476					spareSkipBytes];
1477			for (j = 1;
1478			j < DeviceInfo.wPageDataSize / eccSectorSize;
1479			j++) {
1480				for (i = 0; i < eccSectorSize; i++)
1481					data[page_offset +
1482					eccSectorSize * j + i] =
1483						data[page_offset +
1484						(eccSectorSize + eccBytes) * j
1485						+ i];
1486			}
1487			for (i = 0; i < spareFlagBytes; i++)
1488				data[page_offset + PageDataSize + i] =
1489					data[page_offset +
1490					(eccSectorSize + eccBytes) * j + i];
1491			page_count--;
1492		}
1493	}
1494}
1495
1496/* Un-tested function */
1497u16 NAND_Multiplane_Read(u8 *read_data, u32 block, u16 page,
1498			    u16 page_count)
1499{
1500	u32 status = PASS;
1501	u32 NumPages = page_count;
1502	u64 flash_add;
1503	u32 flash_bank;
1504	u32 intr_status = 0;
1505	u32 intr_status_addresses[4] = {INTR_STATUS0,
1506		INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
1507	u32 ecc_done_OR_dma_comp;
1508
1509	nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
1510		       __FILE__, __LINE__, __func__);
1511
1512	status = Boundary_Check_Block_Page(block, page, page_count);
1513
1514	flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
1515		* DeviceInfo.wBlockDataSize +
1516		(u64)page * DeviceInfo.wPageDataSize;
1517
1518	flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
1519
1520	if (status == PASS) {
1521		intr_status = intr_status_addresses[flash_bank];
1522		iowrite32(ioread32(FlashReg + intr_status),
1523			FlashReg + intr_status);
1524
1525		iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
1526		iowrite32(0x01, FlashReg + MULTIPLANE_OPERATION);
1527
1528		iowrite32(1, FlashReg + DMA_ENABLE);
1529		while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
1530			;
1531		index_addr((u32)(MODE_10 | (flash_bank << 24) |
1532			(flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
1533		ddma_trans(read_data, flash_add, flash_bank, 0, NumPages);
1534
1535		ecc_done_OR_dma_comp = 0;
1536		while (1) {
1537			if (enable_ecc) {
1538				while (!ioread32(FlashReg + intr_status))
1539					;
1540
1541				if (ioread32(FlashReg + intr_status) &
1542					INTR_STATUS0__ECC_ERR) {
1543					iowrite32(INTR_STATUS0__ECC_ERR,
1544						FlashReg + intr_status);
1545					status = do_ecc_new(flash_bank,
1546						read_data, block, page);
1547				} else if (ioread32(FlashReg + intr_status) &
1548					INTR_STATUS0__DMA_CMD_COMP) {
1549					iowrite32(INTR_STATUS0__DMA_CMD_COMP,
1550						FlashReg + intr_status);
1551
1552					if (1 == ecc_done_OR_dma_comp)
1553						break;
1554
1555					ecc_done_OR_dma_comp = 1;
1556				} else if (ioread32(FlashReg + intr_status) &
1557					INTR_STATUS0__ECC_TRANSACTION_DONE) {
1558					iowrite32(
1559					INTR_STATUS0__ECC_TRANSACTION_DONE,
1560					FlashReg + intr_status);
1561
1562					if (1 == ecc_done_OR_dma_comp)
1563						break;
1564
1565					ecc_done_OR_dma_comp = 1;
1566				}
1567			} else {
1568				while (!(ioread32(FlashReg + intr_status) &
1569					INTR_STATUS0__DMA_CMD_COMP))
1570					;
1571				iowrite32(INTR_STATUS0__DMA_CMD_COMP,
1572					FlashReg + intr_status);
1573				break;
1574			}
1575
1576			iowrite32((~INTR_STATUS0__ECC_ERR) &
1577				(~INTR_STATUS0__ECC_TRANSACTION_DONE) &
1578				(~INTR_STATUS0__DMA_CMD_COMP),
1579				FlashReg + intr_status);
1580
1581		}
1582
1583		iowrite32(ioread32(FlashReg + intr_status),
1584			FlashReg + intr_status);
1585
1586		iowrite32(0, FlashReg + DMA_ENABLE);
1587
1588		while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
1589			;
1590
1591		iowrite32(0, FlashReg + MULTIPLANE_OPERATION);
1592	}
1593
1594	return status;
1595}
1596
1597u16 NAND_Pipeline_Read_Ahead(u8 *read_data, u32 block,
1598				u16 page, u16 page_count)
1599{
1600	u32 status = PASS;
1601	u32 NumPages = page_count;
1602	u64 flash_add;
1603	u32 flash_bank;
1604	u32 intr_status = 0;
1605	u32 intr_status_addresses[4] = {INTR_STATUS0,
1606		INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
1607	int ret;
1608
1609	nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
1610		       __FILE__, __LINE__, __func__);
1611
1612	status = Boundary_Check_Block_Page(block, page, page_count);
1613
1614	if (page_count < 2)
1615		status = FAIL;
1616
1617	if (status != PASS)
1618		return status;
1619
1620	flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
1621		*DeviceInfo.wBlockDataSize +
1622		(u64)page * DeviceInfo.wPageDataSize;
1623
1624	flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
1625
1626	intr_status = intr_status_addresses[flash_bank];
1627	iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
1628
1629	iowrite32(1, FlashReg + DMA_ENABLE);
1630	while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
1631		;
1632
1633	iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
1634
1635	/* Fill the mrst_nand_info structure */
1636	info.state = INT_PIPELINE_READ_AHEAD;
1637	info.read_data = read_data;
1638	info.flash_bank = flash_bank;
1639	info.block = block;
1640	info.page = page;
1641	info.ret = PASS;
1642
1643	index_addr((u32)(MODE_10 | (flash_bank << 24) |
1644		(flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
1645
1646	ddma_trans(read_data, flash_add, flash_bank, 0, NumPages);
1647
1648	iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable Interrupt */
1649
1650	ret = wait_for_completion_timeout(&info.complete, 10 * HZ);
1651	if (!ret) {
1652		printk(KERN_ERR "Wait for completion timeout "
1653			"in %s, Line %d\n", __FILE__, __LINE__);
1654		status = ERR;
1655	} else {
1656		status = info.ret;
1657	}
1658
1659	iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
1660
1661	iowrite32(0, FlashReg + DMA_ENABLE);
1662
1663	while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
1664		;
1665
1666	return status;
1667}
1668
1669
1670u16 NAND_Write_Page_Main(u8 *write_data, u32 block, u16 page,
1671			    u16 page_count)
1672{
1673	u32 status = PASS;
1674	u64 flash_add;
1675	u32 intr_status = 0;
1676	u32 flash_bank;
1677	u32 intr_status_addresses[4] = {INTR_STATUS0,
1678		INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
1679	int ret;
1680	u8 *write_data_l;
1681
1682	nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
1683		       __FILE__, __LINE__, __func__);
1684
1685	status = Boundary_Check_Block_Page(block, page, page_count);
1686	if (status != PASS)
1687		return status;
1688
1689	flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
1690		* DeviceInfo.wBlockDataSize +
1691		(u64)page * DeviceInfo.wPageDataSize;
1692
1693	flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
1694
1695	intr_status = intr_status_addresses[flash_bank];
1696
1697	iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
1698
1699	iowrite32(INTR_STATUS0__PROGRAM_COMP |
1700		INTR_STATUS0__PROGRAM_FAIL, FlashReg + intr_status);
1701
1702	if (page_count > 1) {
1703		write_data_l = write_data;
1704		while (page_count > MAX_PAGES_PER_RW) {
1705			if (ioread32(FlashReg + MULTIPLANE_OPERATION))
1706				status = NAND_Multiplane_Write(write_data_l,
1707					block, page, MAX_PAGES_PER_RW);
1708			else
1709				status = NAND_Pipeline_Write_Ahead(
1710					write_data_l, block, page,
1711					MAX_PAGES_PER_RW);
1712			if (status == FAIL)
1713				return status;
1714
1715			write_data_l += DeviceInfo.wPageDataSize *
1716					MAX_PAGES_PER_RW;
1717			page_count -= MAX_PAGES_PER_RW;
1718			page += MAX_PAGES_PER_RW;
1719		}
1720		if (ioread32(FlashReg + MULTIPLANE_OPERATION))
1721			status = NAND_Multiplane_Write(write_data_l,
1722				block, page, page_count);
1723		else
1724			status = NAND_Pipeline_Write_Ahead(write_data_l,
1725				block, page, page_count);
1726
1727		return status;
1728	}
1729
1730	iowrite32(1, FlashReg + DMA_ENABLE);
1731	while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
1732		;
1733
1734	iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
1735
1736	iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
1737
1738	/* Fill the mrst_nand_info structure */
1739	info.state = INT_WRITE_PAGE_MAIN;
1740	info.write_data = write_data;
1741	info.flash_bank = flash_bank;
1742	info.block = block;
1743	info.page = page;
1744	info.ret = PASS;
1745
1746	ddma_trans(write_data, flash_add, flash_bank, 1, 1);
1747
1748	iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable interrupt */
1749
1750	ret = wait_for_completion_timeout(&info.complete, 10 * HZ);
1751	if (!ret) {
1752		printk(KERN_ERR "Wait for completion timeout "
1753			"in %s, Line %d\n", __FILE__, __LINE__);
1754		status = ERR;
1755	} else {
1756		status = info.ret;
1757	}
1758
1759	iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
1760
1761	iowrite32(0, FlashReg + DMA_ENABLE);
1762	while (ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG)
1763		;
1764
1765	return status;
1766}
1767
1768void NAND_ECC_Ctrl(int enable)
1769{
1770	if (enable) {
1771		nand_dbg_print(NAND_DBG_WARN,
1772			"Will enable ECC in %s, Line %d, Function: %s\n",
1773			__FILE__, __LINE__, __func__);
1774		iowrite32(1, FlashReg + ECC_ENABLE);
1775		enable_ecc = 1;
1776	} else {
1777		nand_dbg_print(NAND_DBG_WARN,
1778			"Will disable ECC in %s, Line %d, Function: %s\n",
1779			__FILE__, __LINE__, __func__);
1780		iowrite32(0, FlashReg + ECC_ENABLE);
1781		enable_ecc = 0;
1782	}
1783}
1784
1785u16 NAND_Write_Page_Main_Spare(u8 *write_data, u32 block,
1786					u16 page, u16 page_count)
1787{
1788	u32 status = PASS;
1789	u32 i, j, page_num = 0;
1790	u32 PageSize = DeviceInfo.wPageSize;
1791	u32 PageDataSize = DeviceInfo.wPageDataSize;
1792	u32 eccBytes = DeviceInfo.wECCBytesPerSector;
1793	u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
1794	u32 spareSkipBytes  = DeviceInfo.wSpareSkipBytes;
1795	u64 flash_add;
1796	u32 eccSectorSize;
1797	u32 flash_bank;
1798	u32 intr_status = 0;
1799	u32 intr_status_addresses[4] = {INTR_STATUS0,
1800		INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
1801	u8 *page_main_spare = buf_write_page_main_spare;
1802
1803	nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
1804		       __FILE__, __LINE__, __func__);
1805
1806	eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
1807
1808	status = Boundary_Check_Block_Page(block, page, page_count);
1809
1810	flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
1811
1812	if (status == PASS) {
1813		intr_status = intr_status_addresses[flash_bank];
1814
1815		iowrite32(1, FlashReg + TRANSFER_SPARE_REG);
1816
1817		while ((status != FAIL) && (page_count > 0)) {
1818			flash_add = (u64)(block %
1819			(DeviceInfo.wTotalBlocks / totalUsedBanks)) *
1820			DeviceInfo.wBlockDataSize +
1821			(u64)page * DeviceInfo.wPageDataSize;
1822
1823			iowrite32(ioread32(FlashReg + intr_status),
1824				FlashReg + intr_status);
1825
1826			iowrite32((u32)(MODE_01 | (flash_bank << 24) |
1827				(flash_add >>
1828				DeviceInfo.nBitsInPageDataSize)),
1829				FlashMem);
1830
1831			if (enable_ecc) {
1832				for (j = 0;
1833				     j <
1834				     DeviceInfo.wPageDataSize / eccSectorSize;
1835				     j++) {
1836					for (i = 0; i < eccSectorSize; i++)
1837						page_main_spare[(eccSectorSize +
1838								 eccBytes) * j +
1839								i] =
1840						    write_data[eccSectorSize *
1841							       j + i];
1842
1843					for (i = 0; i < eccBytes; i++)
1844						page_main_spare[(eccSectorSize +
1845								 eccBytes) * j +
1846								eccSectorSize +
1847								i] =
1848						    write_data[PageDataSize +
1849							       spareFlagBytes +
1850							       eccBytes * j +
1851							       i];
1852				}
1853
1854				for (i = 0; i < spareFlagBytes; i++)
1855					page_main_spare[(eccSectorSize +
1856							 eccBytes) * j + i] =
1857					    write_data[PageDataSize + i];
1858
1859				for (i = PageSize - 1; i >= PageDataSize +
1860							spareSkipBytes; i--)
1861					page_main_spare[i] = page_main_spare[i -
1862								spareSkipBytes];
1863
1864				for (i = PageDataSize; i < PageDataSize +
1865							spareSkipBytes; i++)
1866					page_main_spare[i] = 0xff;
1867
1868				for (i = 0; i < PageSize / 4; i++)
1869					iowrite32(
1870					*((u32 *)page_main_spare + i),
1871					FlashMem + 0x10);
1872			} else {
1873
1874				for (i = 0; i < PageSize / 4; i++)
1875					iowrite32(*((u32 *)write_data + i),
1876						FlashMem + 0x10);
1877			}
1878
1879			while (!(ioread32(FlashReg + intr_status) &
1880				(INTR_STATUS0__PROGRAM_COMP |
1881				INTR_STATUS0__PROGRAM_FAIL)))
1882				;
1883
1884			if (ioread32(FlashReg + intr_status) &
1885				INTR_STATUS0__PROGRAM_FAIL)
1886				status = FAIL;
1887
1888			iowrite32(ioread32(FlashReg + intr_status),
1889					FlashReg + intr_status);
1890
1891			page_num++;
1892			page_count--;
1893			write_data += PageSize;
1894		}
1895
1896		iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
1897	}
1898
1899	return status;
1900}
1901
1902u16 NAND_Read_Page_Main_Spare(u8 *read_data, u32 block, u16 page,
1903				 u16 page_count)
1904{
1905	u32 status = PASS;
1906	u32 i, j;
1907	u64 flash_add = 0;
1908	u32 PageSize = DeviceInfo.wPageSize;
1909	u32 PageDataSize = DeviceInfo.wPageDataSize;
1910	u32 PageSpareSize = DeviceInfo.wPageSpareSize;
1911	u32 eccBytes = DeviceInfo.wECCBytesPerSector;
1912	u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
1913	u32 spareSkipBytes  = DeviceInfo.wSpareSkipBytes;
1914	u32 eccSectorSize;
1915	u32 flash_bank;
1916	u32 intr_status = 0;
1917	u8 *read_data_l = read_data;
1918	u32 intr_status_addresses[4] = {INTR_STATUS0,
1919		INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
1920	u8 *page_main_spare = buf_read_page_main_spare;
1921
1922	nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
1923		       __FILE__, __LINE__, __func__);
1924
1925	eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
1926
1927	status = Boundary_Check_Block_Page(block, page, page_count);
1928
1929	flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
1930
1931	if (status == PASS) {
1932		intr_status = intr_status_addresses[flash_bank];
1933
1934		iowrite32(1, FlashReg + TRANSFER_SPARE_REG);
1935
1936		iowrite32(ioread32(FlashReg + intr_status),
1937				FlashReg + intr_status);
1938
1939		while ((status != FAIL) && (page_count > 0)) {
1940			flash_add = (u64)(block %
1941				(DeviceInfo.wTotalBlocks / totalUsedBanks))
1942				* DeviceInfo.wBlockDataSize +
1943				(u64)page * DeviceInfo.wPageDataSize;
1944
1945			index_addr((u32)(MODE_10 | (flash_bank << 24) |
1946				(flash_add >> DeviceInfo.nBitsInPageDataSize)),
1947				0x43);
1948			index_addr((u32)(MODE_10 | (flash_bank << 24) |
1949				(flash_add >> DeviceInfo.nBitsInPageDataSize)),
1950				0x2000 | page_count);
1951
1952			while (!(ioread32(FlashReg + intr_status) &
1953				INTR_STATUS0__LOAD_COMP))
1954				;
1955
1956			iowrite32((u32)(MODE_01 | (flash_bank << 24) |
1957				(flash_add >>
1958				DeviceInfo.nBitsInPageDataSize)),
1959				FlashMem);
1960
1961			for (i = 0; i < PageSize / 4; i++)
1962				*(((u32 *)page_main_spare) + i) =
1963					ioread32(FlashMem + 0x10);
1964
1965			if (enable_ecc) {
1966				for (i = PageDataSize;  i < PageSize -
1967							spareSkipBytes; i++)
1968					page_main_spare[i] = page_main_spare[i +
1969								spareSkipBytes];
1970
1971				for (j = 0;
1972				j < DeviceInfo.wPageDataSize / eccSectorSize;
1973				j++) {
1974
1975					for (i = 0; i < eccSectorSize; i++)
1976						read_data_l[eccSectorSize * j +
1977							    i] =
1978						    page_main_spare[
1979							(eccSectorSize +
1980							eccBytes) * j + i];
1981
1982					for (i = 0; i < eccBytes; i++)
1983						read_data_l[PageDataSize +
1984							    spareFlagBytes +
1985							    eccBytes * j + i] =
1986						    page_main_spare[
1987							(eccSectorSize +
1988							eccBytes) * j +
1989							eccSectorSize + i];
1990				}
1991
1992				for (i = 0; i < spareFlagBytes; i++)
1993					read_data_l[PageDataSize + i] =
1994					    page_main_spare[(eccSectorSize +
1995							     eccBytes) * j + i];
1996			} else {
1997				for (i = 0; i < (PageDataSize + PageSpareSize);
1998				     i++)
1999					read_data_l[i] = page_main_spare[i];
2000
2001			}
2002
2003			if (enable_ecc) {
2004				while (!(ioread32(FlashReg + intr_status) &
2005					(INTR_STATUS0__ECC_TRANSACTION_DONE |
2006					INTR_STATUS0__ECC_ERR)))
2007					;
2008
2009				if (ioread32(FlashReg + intr_status) &
2010					INTR_STATUS0__ECC_ERR) {
2011					iowrite32(INTR_STATUS0__ECC_ERR,
2012						FlashReg + intr_status);
2013					status = do_ecc_new(flash_bank,
2014						read_data, block, page);
2015				}
2016
2017				if (ioread32(FlashReg + intr_status) &
2018					INTR_STATUS0__ECC_TRANSACTION_DONE &
2019					INTR_STATUS0__ECC_ERR) {
2020					iowrite32(INTR_STATUS0__ECC_ERR |
2021					INTR_STATUS0__ECC_TRANSACTION_DONE,
2022					FlashReg + intr_status);
2023				} else if (ioread32(FlashReg + intr_status) &
2024					INTR_STATUS0__ECC_TRANSACTION_DONE) {
2025					iowrite32(
2026					INTR_STATUS0__ECC_TRANSACTION_DONE,
2027					FlashReg + intr_status);
2028				} else if (ioread32(FlashReg + intr_status) &
2029					INTR_STATUS0__ECC_ERR) {
2030					iowrite32(INTR_STATUS0__ECC_ERR,
2031						FlashReg + intr_status);
2032				}
2033			}
2034
2035			page++;
2036			page_count--;
2037			read_data_l += PageSize;
2038		}
2039	}
2040
2041	iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
2042
2043	index_addr((u32)(MODE_10 | (flash_bank << 24) |
2044		(flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
2045
2046	return status;
2047}
2048
2049u16 NAND_Pipeline_Write_Ahead(u8 *write_data, u32 block,
2050			u16 page, u16 page_count)
2051{
2052	u16 status = PASS;
2053	u32 NumPages = page_count;
2054	u64 flash_add;
2055	u32 flash_bank;
2056	u32 intr_status = 0;
2057	u32 intr_status_addresses[4] = {INTR_STATUS0,
2058		INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
2059	int ret;
2060
2061	nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
2062		       __FILE__, __LINE__, __func__);
2063
2064	status = Boundary_Check_Block_Page(block, page, page_count);
2065
2066	if (page_count < 2)
2067		status = FAIL;
2068
2069	if (status != PASS)
2070		return status;
2071
2072	flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
2073		* DeviceInfo.wBlockDataSize +
2074		(u64)page * DeviceInfo.wPageDataSize;
2075
2076	flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
2077
2078	intr_status = intr_status_addresses[flash_bank];
2079	iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
2080
2081	iowrite32(1, FlashReg + DMA_ENABLE);
2082	while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
2083		;
2084
2085	iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
2086
2087	/* Fill the mrst_nand_info structure */
2088	info.state = INT_PIPELINE_WRITE_AHEAD;
2089	info.write_data = write_data;
2090	info.flash_bank = flash_bank;
2091	info.block = block;
2092	info.page = page;
2093	info.ret = PASS;
2094
2095	index_addr((u32)(MODE_10 | (flash_bank << 24) |
2096		(flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
2097
2098	ddma_trans(write_data, flash_add, flash_bank, 1, NumPages);
2099
2100	iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable interrupt */
2101
2102	ret = wait_for_completion_timeout(&info.complete, 10 * HZ);
2103	if (!ret) {
2104		printk(KERN_ERR "Wait for completion timeout "
2105			"in %s, Line %d\n", __FILE__, __LINE__);
2106		status = ERR;
2107	} else {
2108		status = info.ret;
2109	}
2110
2111	iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
2112
2113	iowrite32(0, FlashReg + DMA_ENABLE);
2114	while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
2115		;
2116
2117	return status;
2118}
2119
2120/* Un-tested function */
2121u16 NAND_Multiplane_Write(u8 *write_data, u32 block, u16 page,
2122			     u16 page_count)
2123{
2124	u16 status = PASS;
2125	u32 NumPages = page_count;
2126	u64 flash_add;
2127	u32 flash_bank;
2128	u32 intr_status = 0;
2129	u32 intr_status_addresses[4] = {INTR_STATUS0,
2130		INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
2131	u16 status2 = PASS;
2132	u32 t;
2133
2134	nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
2135		       __FILE__, __LINE__, __func__);
2136
2137	status = Boundary_Check_Block_Page(block, page, page_count);
2138	if (status != PASS)
2139		return status;
2140
2141	flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
2142		* DeviceInfo.wBlockDataSize +
2143		(u64)page * DeviceInfo.wPageDataSize;
2144
2145	flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
2146
2147	intr_status = intr_status_addresses[flash_bank];
2148	iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
2149
2150	iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
2151	iowrite32(0x01, FlashReg + MULTIPLANE_OPERATION);
2152
2153	iowrite32(1, FlashReg + DMA_ENABLE);
2154	while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
2155		;
2156
2157	iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
2158
2159	index_addr((u32)(MODE_10 | (flash_bank << 24) |
2160		(flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
2161
2162	ddma_trans(write_data, flash_add, flash_bank, 1, NumPages);
2163
2164	while (1) {
2165		while (!ioread32(FlashReg + intr_status))
2166			;
2167
2168		if (ioread32(FlashReg + intr_status) &
2169			INTR_STATUS0__DMA_CMD_COMP) {
2170			iowrite32(INTR_STATUS0__DMA_CMD_COMP,
2171				FlashReg + intr_status);
2172			status = PASS;
2173			if (status2 == FAIL)
2174				status = FAIL;
2175			break;
2176		} else if (ioread32(FlashReg + intr_status) &
2177				INTR_STATUS0__PROGRAM_FAIL) {
2178			status2 = FAIL;
2179			status = FAIL;
2180			t = ioread32(FlashReg + intr_status) &
2181				INTR_STATUS0__PROGRAM_FAIL;
2182			iowrite32(t, FlashReg + intr_status);
2183		} else {
2184			iowrite32((~INTR_STATUS0__PROGRAM_FAIL) &
2185				(~INTR_STATUS0__DMA_CMD_COMP),
2186				FlashReg + intr_status);
2187		}
2188	}
2189
2190	iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
2191
2192	iowrite32(0, FlashReg + DMA_ENABLE);
2193
2194	while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
2195		;
2196
2197	iowrite32(0, FlashReg + MULTIPLANE_OPERATION);
2198
2199	return status;
2200}
2201
2202
2203#if CMD_DMA
2204static irqreturn_t cdma_isr(int irq, void *dev_id)
2205{
2206	struct mrst_nand_info *dev = dev_id;
2207	int first_failed_cmd;
2208
2209	nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
2210		       __FILE__, __LINE__, __func__);
2211
2212	if (!is_cdma_interrupt())
2213		return IRQ_NONE;
2214
2215	/* Disable controller interrupts */
2216	iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
2217	GLOB_FTL_Event_Status(&first_failed_cmd);
2218	complete(&dev->complete);
2219
2220	return IRQ_HANDLED;
2221}
2222#else
2223static void handle_nand_int_read(struct mrst_nand_info *dev)
2224{
2225	u32 intr_status_addresses[4] = {INTR_STATUS0,
2226		INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
2227	u32 intr_status;
2228	u32 ecc_done_OR_dma_comp = 0;
2229
2230	nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
2231		       __FILE__, __LINE__, __func__);
2232
2233	dev->ret = PASS;
2234	intr_status = intr_status_addresses[dev->flash_bank];
2235
2236	while (1) {
2237		if (enable_ecc) {
2238			if (ioread32(FlashReg + intr_status) &
2239				INTR_STATUS0__ECC_ERR) {
2240				iowrite32(INTR_STATUS0__ECC_ERR,
2241					FlashReg + intr_status);
2242				dev->ret = do_ecc_new(dev->flash_bank,
2243						dev->read_data,
2244						dev->block, dev->page);
2245			} else if (ioread32(FlashReg + intr_status) &
2246				INTR_STATUS0__DMA_CMD_COMP) {
2247				iowrite32(INTR_STATUS0__DMA_CMD_COMP,
2248					FlashReg + intr_status);
2249				if (1 == ecc_done_OR_dma_comp)
2250					break;
2251				ecc_done_OR_dma_comp = 1;
2252			} else if (ioread32(FlashReg + intr_status) &
2253				INTR_STATUS0__ECC_TRANSACTION_DONE) {
2254				iowrite32(INTR_STATUS0__ECC_TRANSACTION_DONE,
2255					FlashReg + intr_status);
2256				if (1 == ecc_done_OR_dma_comp)
2257					break;
2258				ecc_done_OR_dma_comp = 1;
2259			}
2260		} else {
2261			if (ioread32(FlashReg + intr_status) &
2262				INTR_STATUS0__DMA_CMD_COMP) {
2263				iowrite32(INTR_STATUS0__DMA_CMD_COMP,
2264					FlashReg + intr_status);
2265				break;
2266			} else {
2267				printk(KERN_ERR "Illegal INTS "
2268					"(offset addr 0x%x) value: 0x%x\n",
2269					intr_status,
2270					ioread32(FlashReg + intr_status));
2271			}
2272		}
2273
2274		iowrite32((~INTR_STATUS0__ECC_ERR) &
2275		(~INTR_STATUS0__ECC_TRANSACTION_DONE) &
2276		(~INTR_STATUS0__DMA_CMD_COMP),
2277		FlashReg + intr_status);
2278	}
2279}
2280
2281static void handle_nand_int_write(struct mrst_nand_info *dev)
2282{
2283	u32 intr_status;
2284	u32 intr[4] = {INTR_STATUS0, INTR_STATUS1,
2285		INTR_STATUS2, INTR_STATUS3};
2286	int status = PASS;
2287
2288	nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
2289		       __FILE__, __LINE__, __func__);
2290
2291	dev->ret = PASS;
2292	intr_status = intr[dev->flash_bank];
2293
2294	while (1) {
2295		while (!ioread32(FlashReg + intr_status))
2296			;
2297
2298		if (ioread32(FlashReg + intr_status) &
2299			INTR_STATUS0__DMA_CMD_COMP) {
2300			iowrite32(INTR_STATUS0__DMA_CMD_COMP,
2301				FlashReg + intr_status);
2302			if (FAIL == status)
2303				dev->ret = FAIL;
2304			break;
2305		} else if (ioread32(FlashReg + intr_status) &
2306			INTR_STATUS0__PROGRAM_FAIL) {
2307			status = FAIL;
2308			iowrite32(INTR_STATUS0__PROGRAM_FAIL,
2309				FlashReg + intr_status);
2310		} else {
2311			iowrite32((~INTR_STATUS0__PROGRAM_FAIL) &
2312				(~INTR_STATUS0__DMA_CMD_COMP),
2313				FlashReg + intr_status);
2314		}
2315	}
2316}
2317
2318static irqreturn_t ddma_isr(int irq, void *dev_id)
2319{
2320	struct mrst_nand_info *dev = dev_id;
2321	u32 int_mask, ints0, ints1, ints2, ints3, ints_offset;
2322	u32 intr[4] = {INTR_STATUS0, INTR_STATUS1,
2323		INTR_STATUS2, INTR_STATUS3};
2324
2325	int_mask = INTR_STATUS0__DMA_CMD_COMP |
2326		INTR_STATUS0__ECC_TRANSACTION_DONE |
2327		INTR_STATUS0__ECC_ERR |
2328		INTR_STATUS0__PROGRAM_FAIL |
2329		INTR_STATUS0__ERASE_FAIL;
2330
2331	ints0 = ioread32(FlashReg + INTR_STATUS0);
2332	ints1 = ioread32(FlashReg + INTR_STATUS1);
2333	ints2 = ioread32(FlashReg + INTR_STATUS2);
2334	ints3 = ioread32(FlashReg + INTR_STATUS3);
2335
2336	ints_offset = intr[dev->flash_bank];
2337
2338	nand_dbg_print(NAND_DBG_DEBUG,
2339		"INTR0: 0x%x, INTR1: 0x%x, INTR2: 0x%x, INTR3: 0x%x, "
2340		"DMA_INTR: 0x%x, "
2341		"dev->state: 0x%x, dev->flash_bank: %d\n",
2342		ints0, ints1, ints2, ints3,
2343		ioread32(FlashReg + DMA_INTR),
2344		dev->state, dev->flash_bank);
2345
2346	if (!(ioread32(FlashReg + ints_offset) & int_mask)) {
2347		iowrite32(ints0, FlashReg + INTR_STATUS0);
2348		iowrite32(ints1, FlashReg + INTR_STATUS1);
2349		iowrite32(ints2, FlashReg + INTR_STATUS2);
2350		iowrite32(ints3, FlashReg + INTR_STATUS3);
2351		nand_dbg_print(NAND_DBG_WARN,
2352			"ddma_isr: Invalid interrupt for NAND controller. "
2353			"Ignore it\n");
2354		return IRQ_NONE;
2355	}
2356
2357	switch (dev->state) {
2358	case INT_READ_PAGE_MAIN:
2359	case INT_PIPELINE_READ_AHEAD:
2360		/* Disable controller interrupts */
2361		iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
2362		handle_nand_int_read(dev);
2363		break;
2364	case INT_WRITE_PAGE_MAIN:
2365	case INT_PIPELINE_WRITE_AHEAD:
2366		iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
2367		handle_nand_int_write(dev);
2368		break;
2369	default:
2370		printk(KERN_ERR "ddma_isr - Illegal state: 0x%x\n",
2371			dev->state);
2372		return IRQ_NONE;
2373	}
2374
2375	dev->state = INT_IDLE_STATE;
2376	complete(&dev->complete);
2377	return IRQ_HANDLED;
2378}
2379#endif
2380
2381static const struct pci_device_id nand_pci_ids[] = {
2382	{
2383	 .vendor = 0x8086,
2384	 .device = 0x0809,
2385	 .subvendor = PCI_ANY_ID,
2386	 .subdevice = PCI_ANY_ID,
2387	 },
2388	{ /* end: all zeroes */ }
2389};
2390
2391static int nand_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
2392{
2393	int ret = -ENODEV;
2394	unsigned long csr_base;
2395	unsigned long csr_len;
2396	struct mrst_nand_info *pndev = &info;
2397
2398	nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
2399		       __FILE__, __LINE__, __func__);
2400
2401	ret = pci_enable_device(dev);
2402	if (ret) {
2403		printk(KERN_ERR "Spectra: pci_enable_device failed.\n");
2404		return ret;
2405	}
2406
2407	pci_set_master(dev);
2408	pndev->dev = dev;
2409
2410	csr_base = pci_resource_start(dev, 0);
2411	if (!csr_base) {
2412		printk(KERN_ERR "Spectra: pci_resource_start failed!\n");
2413		return -ENODEV;
2414	}
2415
2416	csr_len = pci_resource_len(dev, 0);
2417	if (!csr_len) {
2418		printk(KERN_ERR "Spectra: pci_resource_len failed!\n");
2419		return -ENODEV;
2420	}
2421
2422	ret = pci_request_regions(dev, SPECTRA_NAND_NAME);
2423	if (ret) {
2424		printk(KERN_ERR "Spectra: Unable to request "
2425		       "memory region\n");
2426		goto failed_req_csr;
2427	}
2428
2429	pndev->ioaddr = ioremap_nocache(csr_base, csr_len);
2430	if (!pndev->ioaddr) {
2431		printk(KERN_ERR "Spectra: Unable to remap memory region\n");
2432		ret = -ENOMEM;
2433		goto failed_remap_csr;
2434	}
2435	nand_dbg_print(NAND_DBG_DEBUG, "Spectra: CSR 0x%08lx -> 0x%p (0x%lx)\n",
2436		       csr_base, pndev->ioaddr, csr_len);
2437
2438	init_completion(&pndev->complete);
2439	nand_dbg_print(NAND_DBG_DEBUG, "Spectra: IRQ %d\n", dev->irq);
2440
2441#if CMD_DMA
2442	if (request_irq(dev->irq, cdma_isr, IRQF_SHARED,
2443			SPECTRA_NAND_NAME, &info)) {
2444		printk(KERN_ERR "Spectra: Unable to allocate IRQ\n");
2445		ret = -ENODEV;
2446		iounmap(pndev->ioaddr);
2447		goto failed_remap_csr;
2448	}
2449#else
2450	if (request_irq(dev->irq, ddma_isr, IRQF_SHARED,
2451			SPECTRA_NAND_NAME, &info)) {
2452		printk(KERN_ERR "Spectra: Unable to allocate IRQ\n");
2453		ret = -ENODEV;
2454		iounmap(pndev->ioaddr);
2455		goto failed_remap_csr;
2456	}
2457#endif
2458
2459	pci_set_drvdata(dev, pndev);
2460
2461	return 0;
2462
2463failed_remap_csr:
2464	pci_release_regions(dev);
2465failed_req_csr:
2466
2467	return ret;
2468}
2469
2470static void nand_pci_remove(struct pci_dev *dev)
2471{
2472	struct mrst_nand_info *pndev = pci_get_drvdata(dev);
2473
2474	nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
2475		       __FILE__, __LINE__, __func__);
2476
2477#if CMD_DMA
2478	free_irq(dev->irq, pndev);
2479#endif
2480	iounmap(pndev->ioaddr);
2481	pci_release_regions(dev);
2482	pci_disable_device(dev);
2483}
2484
2485MODULE_DEVICE_TABLE(pci, nand_pci_ids);
2486
2487static struct pci_driver nand_pci_driver = {
2488	.name = SPECTRA_NAND_NAME,
2489	.id_table = nand_pci_ids,
2490	.probe = nand_pci_probe,
2491	.remove = nand_pci_remove,
2492};
2493
2494int NAND_Flash_Init(void)
2495{
2496	int retval;
2497	u32 int_mask;
2498
2499	nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
2500		       __FILE__, __LINE__, __func__);
2501
2502	FlashReg = ioremap_nocache(GLOB_HWCTL_REG_BASE,
2503			GLOB_HWCTL_REG_SIZE);
2504	if (!FlashReg) {
2505		printk(KERN_ERR "Spectra: ioremap_nocache failed!");
2506		return -ENOMEM;
2507	}
2508	nand_dbg_print(NAND_DBG_WARN,
2509		"Spectra: Remapped reg base address: "
2510		"0x%p, len: %d\n",
2511		FlashReg, GLOB_HWCTL_REG_SIZE);
2512
2513	FlashMem = ioremap_nocache(GLOB_HWCTL_MEM_BASE,
2514			GLOB_HWCTL_MEM_SIZE);
2515	if (!FlashMem) {
2516		printk(KERN_ERR "Spectra: ioremap_nocache failed!");
2517		iounmap(FlashReg);
2518		return -ENOMEM;
2519	}
2520	nand_dbg_print(NAND_DBG_WARN,
2521		"Spectra: Remapped flash base address: "
2522		"0x%p, len: %d\n",
2523		(void *)FlashMem, GLOB_HWCTL_MEM_SIZE);
2524
2525	nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:"
2526			"acc_clks: %d, re_2_we: %d, we_2_re: %d,"
2527			"addr_2_data: %d, rdwr_en_lo_cnt: %d, "
2528			"rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
2529			ioread32(FlashReg + ACC_CLKS),
2530			ioread32(FlashReg + RE_2_WE),
2531			ioread32(FlashReg + WE_2_RE),
2532			ioread32(FlashReg + ADDR_2_DATA),
2533			ioread32(FlashReg + RDWR_EN_LO_CNT),
2534			ioread32(FlashReg + RDWR_EN_HI_CNT),
2535			ioread32(FlashReg + CS_SETUP_CNT));
2536
2537	NAND_Flash_Reset();
2538
2539	iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
2540
2541#if CMD_DMA
2542	info.pcmds_num = 0;
2543	info.flash_bank = 0;
2544	info.cdma_num = 0;
2545	int_mask = (DMA_INTR__DESC_COMP_CHANNEL0 |
2546		DMA_INTR__DESC_COMP_CHANNEL1 |
2547		DMA_INTR__DESC_COMP_CHANNEL2 |
2548		DMA_INTR__DESC_COMP_CHANNEL3 |
2549		DMA_INTR__MEMCOPY_DESC_COMP);
2550	iowrite32(int_mask, FlashReg + DMA_INTR_EN);
2551	iowrite32(0xFFFF, FlashReg + DMA_INTR);
2552
2553	int_mask = (INTR_STATUS0__ECC_ERR |
2554		INTR_STATUS0__PROGRAM_FAIL |
2555		INTR_STATUS0__ERASE_FAIL);
2556#else
2557	int_mask = INTR_STATUS0__DMA_CMD_COMP |
2558		INTR_STATUS0__ECC_TRANSACTION_DONE |
2559		INTR_STATUS0__ECC_ERR |
2560		INTR_STATUS0__PROGRAM_FAIL |
2561		INTR_STATUS0__ERASE_FAIL;
2562#endif
2563	iowrite32(int_mask, FlashReg + INTR_EN0);
2564	iowrite32(int_mask, FlashReg + INTR_EN1);
2565	iowrite32(int_mask, FlashReg + INTR_EN2);
2566	iowrite32(int_mask, FlashReg + INTR_EN3);
2567
2568	/* Clear all status bits */
2569	iowrite32(0xFFFF, FlashReg + INTR_STATUS0);
2570	iowrite32(0xFFFF, FlashReg + INTR_STATUS1);
2571	iowrite32(0xFFFF, FlashReg + INTR_STATUS2);
2572	iowrite32(0xFFFF, FlashReg + INTR_STATUS3);
2573
2574	iowrite32(0x0F, FlashReg + RB_PIN_ENABLED);
2575	iowrite32(CHIP_EN_DONT_CARE__FLAG, FlashReg + CHIP_ENABLE_DONT_CARE);
2576
2577	/* Should set value for these registers when init */
2578	iowrite32(0, FlashReg + TWO_ROW_ADDR_CYCLES);
2579	iowrite32(1, FlashReg + ECC_ENABLE);
2580	enable_ecc = 1;
2581
2582	retval = pci_register_driver(&nand_pci_driver);
2583	if (retval)
2584		return -ENOMEM;
2585
2586	return PASS;
2587}
2588
2589/* Free memory */
2590int nand_release_spectra(void)
2591{
2592	pci_unregister_driver(&nand_pci_driver);
2593	iounmap(FlashMem);
2594	iounmap(FlashReg);
2595
2596	return 0;
2597}
2598