• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/mtd/chips/
1/*
2 * Common Flash Interface support:
3 *   AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
4 *
5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
8 *
9 * 2_by_8 routines added by Simon Munton
10 *
11 * 4_by_16 work by Carolyn J. Smith
12 *
13 * XIP support hooks by Vitaly Wool (based on code for Intel flash
14 * by Nicolas Pitre)
15 *
16 * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0
17 *
18 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
19 *
20 * This code is GPL
21 */
22
23#include <linux/module.h>
24#include <linux/types.h>
25#include <linux/kernel.h>
26#include <linux/sched.h>
27#include <linux/init.h>
28#include <asm/io.h>
29#include <asm/byteorder.h>
30
31#include <linux/errno.h>
32#include <linux/slab.h>
33#include <linux/delay.h>
34#include <linux/interrupt.h>
35#include <linux/reboot.h>
36#include <linux/mtd/map.h>
37#include <linux/mtd/mtd.h>
38#include <linux/mtd/cfi.h>
39#include <linux/mtd/xip.h>
40
41#define AMD_BOOTLOC_BUG
42#define FORCE_WORD_WRITE 0
43
44#define MAX_WORD_RETRIES 3
45
46#define SST49LF004B	        0x0060
47#define SST49LF040B	        0x0050
48#define SST49LF008A		0x005a
49#define AT49BV6416		0x00d6
50
51static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
52static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
53static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
55static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
56static void cfi_amdstd_sync (struct mtd_info *);
57static int cfi_amdstd_suspend (struct mtd_info *);
58static void cfi_amdstd_resume (struct mtd_info *);
59static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
60static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
61
62static void cfi_amdstd_destroy(struct mtd_info *);
63
64struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
65static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
66
67static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
68static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
69#include "fwh_lock.h"
70
71static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
72static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
73
74static struct mtd_chip_driver cfi_amdstd_chipdrv = {
75	.probe		= NULL, /* Not usable directly */
76	.destroy	= cfi_amdstd_destroy,
77	.name		= "cfi_cmdset_0002",
78	.module		= THIS_MODULE
79};
80
81
82/* #define DEBUG_CFI_FEATURES */
83
84
85#ifdef DEBUG_CFI_FEATURES
86static void cfi_tell_features(struct cfi_pri_amdstd *extp)
87{
88	const char* erase_suspend[3] = {
89		"Not supported", "Read only", "Read/write"
90	};
91	const char* top_bottom[6] = {
92		"No WP", "8x8KiB sectors at top & bottom, no WP",
93		"Bottom boot", "Top boot",
94		"Uniform, Bottom WP", "Uniform, Top WP"
95	};
96
97	printk("  Silicon revision: %d\n", extp->SiliconRevision >> 1);
98	printk("  Address sensitive unlock: %s\n",
99	       (extp->SiliconRevision & 1) ? "Not required" : "Required");
100
101	if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
102		printk("  Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
103	else
104		printk("  Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
105
106	if (extp->BlkProt == 0)
107		printk("  Block protection: Not supported\n");
108	else
109		printk("  Block protection: %d sectors per group\n", extp->BlkProt);
110
111
112	printk("  Temporary block unprotect: %s\n",
113	       extp->TmpBlkUnprotect ? "Supported" : "Not supported");
114	printk("  Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
115	printk("  Number of simultaneous operations: %d\n", extp->SimultaneousOps);
116	printk("  Burst mode: %s\n",
117	       extp->BurstMode ? "Supported" : "Not supported");
118	if (extp->PageMode == 0)
119		printk("  Page mode: Not supported\n");
120	else
121		printk("  Page mode: %d word page\n", extp->PageMode << 2);
122
123	printk("  Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
124	       extp->VppMin >> 4, extp->VppMin & 0xf);
125	printk("  Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
126	       extp->VppMax >> 4, extp->VppMax & 0xf);
127
128	if (extp->TopBottom < ARRAY_SIZE(top_bottom))
129		printk("  Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
130	else
131		printk("  Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
132}
133#endif
134
135#ifdef AMD_BOOTLOC_BUG
136/* Wheee. Bring me the head of someone at AMD. */
137static void fixup_amd_bootblock(struct mtd_info *mtd, void* param)
138{
139	struct map_info *map = mtd->priv;
140	struct cfi_private *cfi = map->fldrv_priv;
141	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
142	__u8 major = extp->MajorVersion;
143	__u8 minor = extp->MinorVersion;
144
145	if (((major << 8) | minor) < 0x3131) {
146		/* CFI version 1.0 => don't trust bootloc */
147
148		DEBUG(MTD_DEBUG_LEVEL1,
149			"%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
150			map->name, cfi->mfr, cfi->id);
151
152		/* AFAICS all 29LV400 with a bottom boot block have a device ID
153		 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode.
154		 * These were badly detected as they have the 0x80 bit set
155		 * so treat them as a special case.
156		 */
157		if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) &&
158
159			/* Macronix added CFI to their 2nd generation
160			 * MX29LV400C B/T but AFAICS no other 29LV400 (AMD,
161			 * Fujitsu, Spansion, EON, ESI and older Macronix)
162			 * has CFI.
163			 *
164			 * Therefore also check the manufacturer.
165			 * This reduces the risk of false detection due to
166			 * the 8-bit device ID.
167			 */
168			(cfi->mfr == CFI_MFR_MACRONIX)) {
169			DEBUG(MTD_DEBUG_LEVEL1,
170				"%s: Macronix MX29LV400C with bottom boot block"
171				" detected\n", map->name);
172			extp->TopBottom = 2;	/* bottom boot */
173		} else
174		if (cfi->id & 0x80) {
175			printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
176			extp->TopBottom = 3;	/* top boot */
177		} else {
178			extp->TopBottom = 2;	/* bottom boot */
179		}
180
181		DEBUG(MTD_DEBUG_LEVEL1,
182			"%s: AMD CFI PRI V%c.%c has no boot block field;"
183			" deduced %s from Device ID\n", map->name, major, minor,
184			extp->TopBottom == 2 ? "bottom" : "top");
185	}
186}
187#endif
188
189static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
190{
191	struct map_info *map = mtd->priv;
192	struct cfi_private *cfi = map->fldrv_priv;
193	if (cfi->cfiq->BufWriteTimeoutTyp) {
194		DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" );
195		mtd->write = cfi_amdstd_write_buffers;
196	}
197}
198
199/* Atmel chips don't use the same PRI format as AMD chips */
200static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
201{
202	struct map_info *map = mtd->priv;
203	struct cfi_private *cfi = map->fldrv_priv;
204	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
205	struct cfi_pri_atmel atmel_pri;
206
207	memcpy(&atmel_pri, extp, sizeof(atmel_pri));
208	memset((char *)extp + 5, 0, sizeof(*extp) - 5);
209
210	if (atmel_pri.Features & 0x02)
211		extp->EraseSuspend = 2;
212
213	/* Some chips got it backwards... */
214	if (cfi->id == AT49BV6416) {
215		if (atmel_pri.BottomBoot)
216			extp->TopBottom = 3;
217		else
218			extp->TopBottom = 2;
219	} else {
220		if (atmel_pri.BottomBoot)
221			extp->TopBottom = 2;
222		else
223			extp->TopBottom = 3;
224	}
225
226	/* burst write mode not supported */
227	cfi->cfiq->BufWriteTimeoutTyp = 0;
228	cfi->cfiq->BufWriteTimeoutMax = 0;
229}
230
231static void fixup_use_secsi(struct mtd_info *mtd, void *param)
232{
233	/* Setup for chips with a secsi area */
234	mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
235	mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
236}
237
238static void fixup_use_erase_chip(struct mtd_info *mtd, void *param)
239{
240	struct map_info *map = mtd->priv;
241	struct cfi_private *cfi = map->fldrv_priv;
242	if ((cfi->cfiq->NumEraseRegions == 1) &&
243		((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
244		mtd->erase = cfi_amdstd_erase_chip;
245	}
246
247}
248
249/*
250 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
251 * locked by default.
252 */
253static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param)
254{
255	mtd->lock = cfi_atmel_lock;
256	mtd->unlock = cfi_atmel_unlock;
257	mtd->flags |= MTD_POWERUP_LOCK;
258}
259
260static void fixup_old_sst_eraseregion(struct mtd_info *mtd)
261{
262	struct map_info *map = mtd->priv;
263	struct cfi_private *cfi = map->fldrv_priv;
264
265	/*
266	 * These flashes report two seperate eraseblock regions based on the
267	 * sector_erase-size and block_erase-size, although they both operate on the
268	 * same memory. This is not allowed according to CFI, so we just pick the
269	 * sector_erase-size.
270	 */
271	cfi->cfiq->NumEraseRegions = 1;
272}
273
274static void fixup_sst39vf(struct mtd_info *mtd, void *param)
275{
276	struct map_info *map = mtd->priv;
277	struct cfi_private *cfi = map->fldrv_priv;
278
279	fixup_old_sst_eraseregion(mtd);
280
281	cfi->addr_unlock1 = 0x5555;
282	cfi->addr_unlock2 = 0x2AAA;
283}
284
285static void fixup_sst39vf_rev_b(struct mtd_info *mtd, void *param)
286{
287	struct map_info *map = mtd->priv;
288	struct cfi_private *cfi = map->fldrv_priv;
289
290	fixup_old_sst_eraseregion(mtd);
291
292	cfi->addr_unlock1 = 0x555;
293	cfi->addr_unlock2 = 0x2AA;
294}
295
296static void fixup_s29gl064n_sectors(struct mtd_info *mtd, void *param)
297{
298	struct map_info *map = mtd->priv;
299	struct cfi_private *cfi = map->fldrv_priv;
300
301	if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
302		cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
303		pr_warning("%s: Bad S29GL064N CFI data, adjust from 64 to 128 sectors\n", mtd->name);
304	}
305}
306
307static void fixup_s29gl032n_sectors(struct mtd_info *mtd, void *param)
308{
309	struct map_info *map = mtd->priv;
310	struct cfi_private *cfi = map->fldrv_priv;
311
312	if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
313		cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
314		pr_warning("%s: Bad S29GL032N CFI data, adjust from 127 to 63 sectors\n", mtd->name);
315	}
316}
317
318/* Used to fix CFI-Tables of chips without Extended Query Tables */
319static struct cfi_fixup cfi_nopri_fixup_table[] = {
320	{ CFI_MFR_SST, 0x234A, fixup_sst39vf, NULL, }, // SST39VF1602
321	{ CFI_MFR_SST, 0x234B, fixup_sst39vf, NULL, }, // SST39VF1601
322	{ CFI_MFR_SST, 0x235A, fixup_sst39vf, NULL, }, // SST39VF3202
323	{ CFI_MFR_SST, 0x235B, fixup_sst39vf, NULL, }, // SST39VF3201
324	{ CFI_MFR_SST, 0x235C, fixup_sst39vf_rev_b, NULL, }, // SST39VF3202B
325	{ CFI_MFR_SST, 0x235D, fixup_sst39vf_rev_b, NULL, }, // SST39VF3201B
326	{ CFI_MFR_SST, 0x236C, fixup_sst39vf_rev_b, NULL, }, // SST39VF6402B
327	{ CFI_MFR_SST, 0x236D, fixup_sst39vf_rev_b, NULL, }, // SST39VF6401B
328	{ 0, 0, NULL, NULL }
329};
330
331static struct cfi_fixup cfi_fixup_table[] = {
332	{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
333#ifdef AMD_BOOTLOC_BUG
334	{ CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
335	{ CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock, NULL },
336#endif
337	{ CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, },
338	{ CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, },
339	{ CFI_MFR_AMD, 0x0055, fixup_use_secsi, NULL, },
340	{ CFI_MFR_AMD, 0x0056, fixup_use_secsi, NULL, },
341	{ CFI_MFR_AMD, 0x005C, fixup_use_secsi, NULL, },
342	{ CFI_MFR_AMD, 0x005F, fixup_use_secsi, NULL, },
343	{ CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors, NULL, },
344	{ CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors, NULL, },
345	{ CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors, NULL, },
346	{ CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors, NULL, },
347#if !FORCE_WORD_WRITE
348	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, },
349#endif
350	{ 0, 0, NULL, NULL }
351};
352static struct cfi_fixup jedec_fixup_table[] = {
353	{ CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock, NULL, },
354	{ CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock, NULL, },
355	{ CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock, NULL, },
356	{ 0, 0, NULL, NULL }
357};
358
359static struct cfi_fixup fixup_table[] = {
360	/* The CFI vendor ids and the JEDEC vendor IDs appear
361	 * to be common.  It is like the devices id's are as
362	 * well.  This table is to pick all cases where
363	 * we know that is the case.
364	 */
365	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip, NULL },
366	{ CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock, NULL },
367	{ 0, 0, NULL, NULL }
368};
369
370
371static void cfi_fixup_major_minor(struct cfi_private *cfi,
372				  struct cfi_pri_amdstd *extp)
373{
374	if (cfi->mfr == CFI_MFR_SAMSUNG && cfi->id == 0x257e &&
375	    extp->MajorVersion == '0')
376		extp->MajorVersion = '1';
377}
378
379struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
380{
381	struct cfi_private *cfi = map->fldrv_priv;
382	struct mtd_info *mtd;
383	int i;
384
385	mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
386	if (!mtd) {
387		printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
388		return NULL;
389	}
390	mtd->priv = map;
391	mtd->type = MTD_NORFLASH;
392
393	/* Fill in the default mtd operations */
394	mtd->erase   = cfi_amdstd_erase_varsize;
395	mtd->write   = cfi_amdstd_write_words;
396	mtd->read    = cfi_amdstd_read;
397	mtd->sync    = cfi_amdstd_sync;
398	mtd->suspend = cfi_amdstd_suspend;
399	mtd->resume  = cfi_amdstd_resume;
400	mtd->flags   = MTD_CAP_NORFLASH;
401	mtd->name    = map->name;
402	mtd->writesize = 1;
403
404	mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
405
406	if (cfi->cfi_mode==CFI_MODE_CFI){
407		unsigned char bootloc;
408		__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
409		struct cfi_pri_amdstd *extp;
410
411		extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
412		if (extp) {
413			/*
414			 * It's a real CFI chip, not one for which the probe
415			 * routine faked a CFI structure.
416			 */
417			cfi_fixup_major_minor(cfi, extp);
418
419			/*
420			 * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4
421			 * see: http://www.amd.com/us-en/assets/content_type/DownloadableAssets/cfi_r20.pdf, page 19
422			 *      http://www.amd.com/us-en/assets/content_type/DownloadableAssets/cfi_100_20011201.pdf
423			 *      http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf
424			 */
425			if (extp->MajorVersion != '1' ||
426			    (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '4'))) {
427				printk(KERN_ERR "  Unknown Amd/Fujitsu Extended Query "
428				       "version %c.%c (%#02x/%#02x).\n",
429				       extp->MajorVersion, extp->MinorVersion,
430				       extp->MajorVersion, extp->MinorVersion);
431				kfree(extp);
432				kfree(mtd);
433				return NULL;
434			}
435
436			printk(KERN_INFO "  Amd/Fujitsu Extended Query version %c.%c.\n",
437			       extp->MajorVersion, extp->MinorVersion);
438
439			/* Install our own private info structure */
440			cfi->cmdset_priv = extp;
441
442			/* Apply cfi device specific fixups */
443			cfi_fixup(mtd, cfi_fixup_table);
444
445#ifdef DEBUG_CFI_FEATURES
446			/* Tell the user about it in lots of lovely detail */
447			cfi_tell_features(extp);
448#endif
449
450			bootloc = extp->TopBottom;
451			if ((bootloc < 2) || (bootloc > 5)) {
452				printk(KERN_WARNING "%s: CFI contains unrecognised boot "
453				       "bank location (%d). Assuming bottom.\n",
454				       map->name, bootloc);
455				bootloc = 2;
456			}
457
458			if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
459				printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name);
460
461				for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
462					int j = (cfi->cfiq->NumEraseRegions-1)-i;
463					__u32 swap;
464
465					swap = cfi->cfiq->EraseRegionInfo[i];
466					cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
467					cfi->cfiq->EraseRegionInfo[j] = swap;
468				}
469			}
470			/* Set the default CFI lock/unlock addresses */
471			cfi->addr_unlock1 = 0x555;
472			cfi->addr_unlock2 = 0x2aa;
473		}
474		cfi_fixup(mtd, cfi_nopri_fixup_table);
475
476		if (!cfi->addr_unlock1 || !cfi->addr_unlock2) {
477			kfree(mtd);
478			return NULL;
479		}
480
481	} /* CFI mode */
482	else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
483		/* Apply jedec specific fixups */
484		cfi_fixup(mtd, jedec_fixup_table);
485	}
486	/* Apply generic fixups */
487	cfi_fixup(mtd, fixup_table);
488
489	for (i=0; i< cfi->numchips; i++) {
490		cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
491		cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
492		cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
493		cfi->chips[i].ref_point_counter = 0;
494		init_waitqueue_head(&(cfi->chips[i].wq));
495	}
496
497	map->fldrv = &cfi_amdstd_chipdrv;
498
499	return cfi_amdstd_setup(mtd);
500}
501struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
502struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
503EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
504EXPORT_SYMBOL_GPL(cfi_cmdset_0006);
505EXPORT_SYMBOL_GPL(cfi_cmdset_0701);
506
507static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
508{
509	struct map_info *map = mtd->priv;
510	struct cfi_private *cfi = map->fldrv_priv;
511	unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
512	unsigned long offset = 0;
513	int i,j;
514
515	printk(KERN_NOTICE "number of %s chips: %d\n",
516	       (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
517	/* Select the correct geometry setup */
518	mtd->size = devsize * cfi->numchips;
519
520	mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
521	mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
522				    * mtd->numeraseregions, GFP_KERNEL);
523	if (!mtd->eraseregions) {
524		printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
525		goto setup_err;
526	}
527
528	for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
529		unsigned long ernum, ersize;
530		ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
531		ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
532
533		if (mtd->erasesize < ersize) {
534			mtd->erasesize = ersize;
535		}
536		for (j=0; j<cfi->numchips; j++) {
537			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
538			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
539			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
540		}
541		offset += (ersize * ernum);
542	}
543	if (offset != devsize) {
544		/* Argh */
545		printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
546		goto setup_err;
547	}
548
549	__module_get(THIS_MODULE);
550	register_reboot_notifier(&mtd->reboot_notifier);
551	return mtd;
552
553 setup_err:
554	kfree(mtd->eraseregions);
555	kfree(mtd);
556	kfree(cfi->cmdset_priv);
557	kfree(cfi->cfiq);
558	return NULL;
559}
560
561/*
562 * Return true if the chip is ready.
563 *
564 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
565 * non-suspended sector) and is indicated by no toggle bits toggling.
566 *
567 * Note that anything more complicated than checking if no bits are toggling
568 * (including checking DQ5 for an error status) is tricky to get working
569 * correctly and is therefore not done	(particulary with interleaved chips
570 * as each chip must be checked independantly of the others).
571 */
572static int __xipram chip_ready(struct map_info *map, unsigned long addr)
573{
574	map_word d, t;
575
576	d = map_read(map, addr);
577	t = map_read(map, addr);
578
579	return map_word_equal(map, d, t);
580}
581
582/*
583 * Return true if the chip is ready and has the correct value.
584 *
585 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
586 * non-suspended sector) and it is indicated by no bits toggling.
587 *
588 * Error are indicated by toggling bits or bits held with the wrong value,
589 * or with bits toggling.
590 *
591 * Note that anything more complicated than checking if no bits are toggling
592 * (including checking DQ5 for an error status) is tricky to get working
593 * correctly and is therefore not done	(particulary with interleaved chips
594 * as each chip must be checked independantly of the others).
595 *
596 */
597static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected)
598{
599	map_word oldd, curd;
600
601	oldd = map_read(map, addr);
602	curd = map_read(map, addr);
603
604	return	map_word_equal(map, oldd, curd) &&
605		map_word_equal(map, curd, expected);
606}
607
608static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
609{
610	DECLARE_WAITQUEUE(wait, current);
611	struct cfi_private *cfi = map->fldrv_priv;
612	unsigned long timeo;
613	struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
614
615 resettime:
616	timeo = jiffies + HZ;
617 retry:
618	switch (chip->state) {
619
620	case FL_STATUS:
621		for (;;) {
622			if (chip_ready(map, adr))
623				break;
624
625			if (time_after(jiffies, timeo)) {
626				printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
627				return -EIO;
628			}
629			mutex_unlock(&chip->mutex);
630			cfi_udelay(1);
631			mutex_lock(&chip->mutex);
632			/* Someone else might have been playing with it. */
633			goto retry;
634		}
635
636	case FL_READY:
637	case FL_CFI_QUERY:
638	case FL_JEDEC_QUERY:
639		return 0;
640
641	case FL_ERASING:
642		if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
643		    !(mode == FL_READY || mode == FL_POINT ||
644		    (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
645			goto sleep;
646
647		/* We could check to see if we're trying to access the sector
648		 * that is currently being erased. However, no user will try
649		 * anything like that so we just wait for the timeout. */
650
651		/* Erase suspend */
652		/* It's harmless to issue the Erase-Suspend and Erase-Resume
653		 * commands when the erase algorithm isn't in progress. */
654		map_write(map, CMD(0xB0), chip->in_progress_block_addr);
655		chip->oldstate = FL_ERASING;
656		chip->state = FL_ERASE_SUSPENDING;
657		chip->erase_suspended = 1;
658		for (;;) {
659			if (chip_ready(map, adr))
660				break;
661
662			if (time_after(jiffies, timeo)) {
663				/* Should have suspended the erase by now.
664				 * Send an Erase-Resume command as either
665				 * there was an error (so leave the erase
666				 * routine to recover from it) or we trying to
667				 * use the erase-in-progress sector. */
668				map_write(map, CMD(0x30), chip->in_progress_block_addr);
669				chip->state = FL_ERASING;
670				chip->oldstate = FL_READY;
671				printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
672				return -EIO;
673			}
674
675			mutex_unlock(&chip->mutex);
676			cfi_udelay(1);
677			mutex_lock(&chip->mutex);
678			/* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
679			   So we can just loop here. */
680		}
681		chip->state = FL_READY;
682		return 0;
683
684	case FL_XIP_WHILE_ERASING:
685		if (mode != FL_READY && mode != FL_POINT &&
686		    (!cfip || !(cfip->EraseSuspend&2)))
687			goto sleep;
688		chip->oldstate = chip->state;
689		chip->state = FL_READY;
690		return 0;
691
692	case FL_SHUTDOWN:
693		/* The machine is rebooting */
694		return -EIO;
695
696	case FL_POINT:
697		/* Only if there's no operation suspended... */
698		if (mode == FL_READY && chip->oldstate == FL_READY)
699			return 0;
700
701	default:
702	sleep:
703		set_current_state(TASK_UNINTERRUPTIBLE);
704		add_wait_queue(&chip->wq, &wait);
705		mutex_unlock(&chip->mutex);
706		schedule();
707		remove_wait_queue(&chip->wq, &wait);
708		mutex_lock(&chip->mutex);
709		goto resettime;
710	}
711}
712
713
714static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
715{
716	struct cfi_private *cfi = map->fldrv_priv;
717
718	switch(chip->oldstate) {
719	case FL_ERASING:
720		chip->state = chip->oldstate;
721		map_write(map, CMD(0x30), chip->in_progress_block_addr);
722		chip->oldstate = FL_READY;
723		chip->state = FL_ERASING;
724		break;
725
726	case FL_XIP_WHILE_ERASING:
727		chip->state = chip->oldstate;
728		chip->oldstate = FL_READY;
729		break;
730
731	case FL_READY:
732	case FL_STATUS:
733		/* We should really make set_vpp() count, rather than doing this */
734		DISABLE_VPP(map);
735		break;
736	default:
737		printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
738	}
739	wake_up(&chip->wq);
740}
741
742#ifdef CONFIG_MTD_XIP
743
744/*
745 * No interrupt what so ever can be serviced while the flash isn't in array
746 * mode.  This is ensured by the xip_disable() and xip_enable() functions
747 * enclosing any code path where the flash is known not to be in array mode.
748 * And within a XIP disabled code path, only functions marked with __xipram
749 * may be called and nothing else (it's a good thing to inspect generated
750 * assembly to make sure inline functions were actually inlined and that gcc
751 * didn't emit calls to its own support functions). Also configuring MTD CFI
752 * support to a single buswidth and a single interleave is also recommended.
753 */
754
755static void xip_disable(struct map_info *map, struct flchip *chip,
756			unsigned long adr)
757{
758	/* TODO: chips with no XIP use should ignore and return */
759	(void) map_read(map, adr); /* ensure mmu mapping is up to date */
760	local_irq_disable();
761}
762
763static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
764				unsigned long adr)
765{
766	struct cfi_private *cfi = map->fldrv_priv;
767
768	if (chip->state != FL_POINT && chip->state != FL_READY) {
769		map_write(map, CMD(0xf0), adr);
770		chip->state = FL_READY;
771	}
772	(void) map_read(map, adr);
773	xip_iprefetch();
774	local_irq_enable();
775}
776
777/*
778 * When a delay is required for the flash operation to complete, the
779 * xip_udelay() function is polling for both the given timeout and pending
780 * (but still masked) hardware interrupts.  Whenever there is an interrupt
781 * pending then the flash erase operation is suspended, array mode restored
782 * and interrupts unmasked.  Task scheduling might also happen at that
783 * point.  The CPU eventually returns from the interrupt or the call to
784 * schedule() and the suspended flash operation is resumed for the remaining
785 * of the delay period.
786 *
787 * Warning: this function _will_ fool interrupt latency tracing tools.
788 */
789
790static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
791				unsigned long adr, int usec)
792{
793	struct cfi_private *cfi = map->fldrv_priv;
794	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
795	map_word status, OK = CMD(0x80);
796	unsigned long suspended, start = xip_currtime();
797	flstate_t oldstate;
798
799	do {
800		cpu_relax();
801		if (xip_irqpending() && extp &&
802		    ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
803		    (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
804			/*
805			 * Let's suspend the erase operation when supported.
806			 * Note that we currently don't try to suspend
807			 * interleaved chips if there is already another
808			 * operation suspended (imagine what happens
809			 * when one chip was already done with the current
810			 * operation while another chip suspended it, then
811			 * we resume the whole thing at once).  Yes, it
812			 * can happen!
813			 */
814			map_write(map, CMD(0xb0), adr);
815			usec -= xip_elapsed_since(start);
816			suspended = xip_currtime();
817			do {
818				if (xip_elapsed_since(suspended) > 100000) {
819					/*
820					 * The chip doesn't want to suspend
821					 * after waiting for 100 msecs.
822					 * This is a critical error but there
823					 * is not much we can do here.
824					 */
825					return;
826				}
827				status = map_read(map, adr);
828			} while (!map_word_andequal(map, status, OK, OK));
829
830			/* Suspend succeeded */
831			oldstate = chip->state;
832			if (!map_word_bitsset(map, status, CMD(0x40)))
833				break;
834			chip->state = FL_XIP_WHILE_ERASING;
835			chip->erase_suspended = 1;
836			map_write(map, CMD(0xf0), adr);
837			(void) map_read(map, adr);
838			xip_iprefetch();
839			local_irq_enable();
840			mutex_unlock(&chip->mutex);
841			xip_iprefetch();
842			cond_resched();
843
844			/*
845			 * We're back.  However someone else might have
846			 * decided to go write to the chip if we are in
847			 * a suspended erase state.  If so let's wait
848			 * until it's done.
849			 */
850			mutex_lock(&chip->mutex);
851			while (chip->state != FL_XIP_WHILE_ERASING) {
852				DECLARE_WAITQUEUE(wait, current);
853				set_current_state(TASK_UNINTERRUPTIBLE);
854				add_wait_queue(&chip->wq, &wait);
855				mutex_unlock(&chip->mutex);
856				schedule();
857				remove_wait_queue(&chip->wq, &wait);
858				mutex_lock(&chip->mutex);
859			}
860			/* Disallow XIP again */
861			local_irq_disable();
862
863			/* Resume the write or erase operation */
864			map_write(map, CMD(0x30), adr);
865			chip->state = oldstate;
866			start = xip_currtime();
867		} else if (usec >= 1000000/HZ) {
868			/*
869			 * Try to save on CPU power when waiting delay
870			 * is at least a system timer tick period.
871			 * No need to be extremely accurate here.
872			 */
873			xip_cpu_idle();
874		}
875		status = map_read(map, adr);
876	} while (!map_word_andequal(map, status, OK, OK)
877		 && xip_elapsed_since(start) < usec);
878}
879
880#define UDELAY(map, chip, adr, usec)  xip_udelay(map, chip, adr, usec)
881
882/*
883 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
884 * the flash is actively programming or erasing since we have to poll for
885 * the operation to complete anyway.  We can't do that in a generic way with
886 * a XIP setup so do it before the actual flash operation in this case
887 * and stub it out from INVALIDATE_CACHE_UDELAY.
888 */
889#define XIP_INVAL_CACHED_RANGE(map, from, size)  \
890	INVALIDATE_CACHED_RANGE(map, from, size)
891
892#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
893	UDELAY(map, chip, adr, usec)
894
895/*
896 * Extra notes:
897 *
898 * Activating this XIP support changes the way the code works a bit.  For
899 * example the code to suspend the current process when concurrent access
900 * happens is never executed because xip_udelay() will always return with the
901 * same chip state as it was entered with.  This is why there is no care for
902 * the presence of add_wait_queue() or schedule() calls from within a couple
903 * xip_disable()'d  areas of code, like in do_erase_oneblock for example.
904 * The queueing and scheduling are always happening within xip_udelay().
905 *
906 * Similarly, get_chip() and put_chip() just happen to always be executed
907 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
908 * is in array mode, therefore never executing many cases therein and not
909 * causing any problem with XIP.
910 */
911
912#else
913
914#define xip_disable(map, chip, adr)
915#define xip_enable(map, chip, adr)
916#define XIP_INVAL_CACHED_RANGE(x...)
917
918#define UDELAY(map, chip, adr, usec)  \
919do {  \
920	mutex_unlock(&chip->mutex);  \
921	cfi_udelay(usec);  \
922	mutex_lock(&chip->mutex);  \
923} while (0)
924
925#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
926do {  \
927	mutex_unlock(&chip->mutex);  \
928	INVALIDATE_CACHED_RANGE(map, adr, len);  \
929	cfi_udelay(usec);  \
930	mutex_lock(&chip->mutex);  \
931} while (0)
932
933#endif
934
935static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
936{
937	unsigned long cmd_addr;
938	struct cfi_private *cfi = map->fldrv_priv;
939	int ret;
940
941	adr += chip->start;
942
943	/* Ensure cmd read/writes are aligned. */
944	cmd_addr = adr & ~(map_bankwidth(map)-1);
945
946	mutex_lock(&chip->mutex);
947	ret = get_chip(map, chip, cmd_addr, FL_READY);
948	if (ret) {
949		mutex_unlock(&chip->mutex);
950		return ret;
951	}
952
953	if (chip->state != FL_POINT && chip->state != FL_READY) {
954		map_write(map, CMD(0xf0), cmd_addr);
955		chip->state = FL_READY;
956	}
957
958	map_copy_from(map, buf, adr, len);
959
960	put_chip(map, chip, cmd_addr);
961
962	mutex_unlock(&chip->mutex);
963	return 0;
964}
965
966
967static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
968{
969	struct map_info *map = mtd->priv;
970	struct cfi_private *cfi = map->fldrv_priv;
971	unsigned long ofs;
972	int chipnum;
973	int ret = 0;
974
975	/* ofs: offset within the first chip that the first read should start */
976
977	chipnum = (from >> cfi->chipshift);
978	ofs = from - (chipnum <<  cfi->chipshift);
979
980
981	*retlen = 0;
982
983	while (len) {
984		unsigned long thislen;
985
986		if (chipnum >= cfi->numchips)
987			break;
988
989		if ((len + ofs -1) >> cfi->chipshift)
990			thislen = (1<<cfi->chipshift) - ofs;
991		else
992			thislen = len;
993
994		ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
995		if (ret)
996			break;
997
998		*retlen += thislen;
999		len -= thislen;
1000		buf += thislen;
1001
1002		ofs = 0;
1003		chipnum++;
1004	}
1005	return ret;
1006}
1007
1008
1009static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1010{
1011	DECLARE_WAITQUEUE(wait, current);
1012	unsigned long timeo = jiffies + HZ;
1013	struct cfi_private *cfi = map->fldrv_priv;
1014
1015 retry:
1016	mutex_lock(&chip->mutex);
1017
1018	if (chip->state != FL_READY){
1019		set_current_state(TASK_UNINTERRUPTIBLE);
1020		add_wait_queue(&chip->wq, &wait);
1021
1022		mutex_unlock(&chip->mutex);
1023
1024		schedule();
1025		remove_wait_queue(&chip->wq, &wait);
1026		timeo = jiffies + HZ;
1027
1028		goto retry;
1029	}
1030
1031	adr += chip->start;
1032
1033	chip->state = FL_READY;
1034
1035	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1036	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1037	cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1038
1039	map_copy_from(map, buf, adr, len);
1040
1041	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1042	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1043	cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1044	cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1045
1046	wake_up(&chip->wq);
1047	mutex_unlock(&chip->mutex);
1048
1049	return 0;
1050}
1051
1052static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1053{
1054	struct map_info *map = mtd->priv;
1055	struct cfi_private *cfi = map->fldrv_priv;
1056	unsigned long ofs;
1057	int chipnum;
1058	int ret = 0;
1059
1060
1061	/* ofs: offset within the first chip that the first read should start */
1062
1063	/* 8 secsi bytes per chip */
1064	chipnum=from>>3;
1065	ofs=from & 7;
1066
1067
1068	*retlen = 0;
1069
1070	while (len) {
1071		unsigned long thislen;
1072
1073		if (chipnum >= cfi->numchips)
1074			break;
1075
1076		if ((len + ofs -1) >> 3)
1077			thislen = (1<<3) - ofs;
1078		else
1079			thislen = len;
1080
1081		ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1082		if (ret)
1083			break;
1084
1085		*retlen += thislen;
1086		len -= thislen;
1087		buf += thislen;
1088
1089		ofs = 0;
1090		chipnum++;
1091	}
1092	return ret;
1093}
1094
1095
1096static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
1097{
1098	struct cfi_private *cfi = map->fldrv_priv;
1099	unsigned long timeo = jiffies + HZ;
1100	/*
1101	 * We use a 1ms + 1 jiffies generic timeout for writes (most devices
1102	 * have a max write time of a few hundreds usec). However, we should
1103	 * use the maximum timeout value given by the chip at probe time
1104	 * instead.  Unfortunately, struct flchip does have a field for
1105	 * maximum timeout, only for typical which can be far too short
1106	 * depending of the conditions.	 The ' + 1' is to avoid having a
1107	 * timeout of 0 jiffies if HZ is smaller than 1000.
1108	 */
1109	unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1110	int ret = 0;
1111	map_word oldd;
1112	int retry_cnt = 0;
1113
1114	adr += chip->start;
1115
1116	mutex_lock(&chip->mutex);
1117	ret = get_chip(map, chip, adr, FL_WRITING);
1118	if (ret) {
1119		mutex_unlock(&chip->mutex);
1120		return ret;
1121	}
1122
1123	DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1124	       __func__, adr, datum.x[0] );
1125
1126	/*
1127	 * Check for a NOP for the case when the datum to write is already
1128	 * present - it saves time and works around buggy chips that corrupt
1129	 * data at other locations when 0xff is written to a location that
1130	 * already contains 0xff.
1131	 */
1132	oldd = map_read(map, adr);
1133	if (map_word_equal(map, oldd, datum)) {
1134		DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n",
1135		       __func__);
1136		goto op_done;
1137	}
1138
1139	XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1140	ENABLE_VPP(map);
1141	xip_disable(map, chip, adr);
1142 retry:
1143	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1144	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1145	cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1146	map_write(map, datum, adr);
1147	chip->state = FL_WRITING;
1148
1149	INVALIDATE_CACHE_UDELAY(map, chip,
1150				adr, map_bankwidth(map),
1151				chip->word_write_time);
1152
1153	/* See comment above for timeout value. */
1154	timeo = jiffies + uWriteTimeout;
1155	for (;;) {
1156		if (chip->state != FL_WRITING) {
1157			/* Someone's suspended the write. Sleep */
1158			DECLARE_WAITQUEUE(wait, current);
1159
1160			set_current_state(TASK_UNINTERRUPTIBLE);
1161			add_wait_queue(&chip->wq, &wait);
1162			mutex_unlock(&chip->mutex);
1163			schedule();
1164			remove_wait_queue(&chip->wq, &wait);
1165			timeo = jiffies + (HZ / 2);
1166			mutex_lock(&chip->mutex);
1167			continue;
1168		}
1169
1170		if (time_after(jiffies, timeo) && !chip_ready(map, adr)){
1171			xip_enable(map, chip, adr);
1172			printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1173			xip_disable(map, chip, adr);
1174			break;
1175		}
1176
1177		if (chip_ready(map, adr))
1178			break;
1179
1180		/* Latency issues. Drop the lock, wait a while and retry */
1181		UDELAY(map, chip, adr, 1);
1182	}
1183	/* Did we succeed? */
1184	if (!chip_good(map, adr, datum)) {
1185		/* reset on all failures. */
1186		map_write( map, CMD(0xF0), chip->start );
1187
1188		if (++retry_cnt <= MAX_WORD_RETRIES)
1189			goto retry;
1190
1191		ret = -EIO;
1192	}
1193	xip_enable(map, chip, adr);
1194 op_done:
1195	chip->state = FL_READY;
1196	put_chip(map, chip, adr);
1197	mutex_unlock(&chip->mutex);
1198
1199	return ret;
1200}
1201
1202
1203static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1204				  size_t *retlen, const u_char *buf)
1205{
1206	struct map_info *map = mtd->priv;
1207	struct cfi_private *cfi = map->fldrv_priv;
1208	int ret = 0;
1209	int chipnum;
1210	unsigned long ofs, chipstart;
1211	DECLARE_WAITQUEUE(wait, current);
1212
1213	*retlen = 0;
1214	if (!len)
1215		return 0;
1216
1217	chipnum = to >> cfi->chipshift;
1218	ofs = to  - (chipnum << cfi->chipshift);
1219	chipstart = cfi->chips[chipnum].start;
1220
1221	/* If it's not bus-aligned, do the first byte write */
1222	if (ofs & (map_bankwidth(map)-1)) {
1223		unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1224		int i = ofs - bus_ofs;
1225		int n = 0;
1226		map_word tmp_buf;
1227
1228 retry:
1229		mutex_lock(&cfi->chips[chipnum].mutex);
1230
1231		if (cfi->chips[chipnum].state != FL_READY) {
1232			set_current_state(TASK_UNINTERRUPTIBLE);
1233			add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1234
1235			mutex_unlock(&cfi->chips[chipnum].mutex);
1236
1237			schedule();
1238			remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1239			goto retry;
1240		}
1241
1242		/* Load 'tmp_buf' with old contents of flash */
1243		tmp_buf = map_read(map, bus_ofs+chipstart);
1244
1245		mutex_unlock(&cfi->chips[chipnum].mutex);
1246
1247		/* Number of bytes to copy from buffer */
1248		n = min_t(int, len, map_bankwidth(map)-i);
1249
1250		tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1251
1252		ret = do_write_oneword(map, &cfi->chips[chipnum],
1253				       bus_ofs, tmp_buf);
1254		if (ret)
1255			return ret;
1256
1257		ofs += n;
1258		buf += n;
1259		(*retlen) += n;
1260		len -= n;
1261
1262		if (ofs >> cfi->chipshift) {
1263			chipnum ++;
1264			ofs = 0;
1265			if (chipnum == cfi->numchips)
1266				return 0;
1267		}
1268	}
1269
1270	/* We are now aligned, write as much as possible */
1271	while(len >= map_bankwidth(map)) {
1272		map_word datum;
1273
1274		datum = map_word_load(map, buf);
1275
1276		ret = do_write_oneword(map, &cfi->chips[chipnum],
1277				       ofs, datum);
1278		if (ret)
1279			return ret;
1280
1281		ofs += map_bankwidth(map);
1282		buf += map_bankwidth(map);
1283		(*retlen) += map_bankwidth(map);
1284		len -= map_bankwidth(map);
1285
1286		if (ofs >> cfi->chipshift) {
1287			chipnum ++;
1288			ofs = 0;
1289			if (chipnum == cfi->numchips)
1290				return 0;
1291			chipstart = cfi->chips[chipnum].start;
1292		}
1293	}
1294
1295	/* Write the trailing bytes if any */
1296	if (len & (map_bankwidth(map)-1)) {
1297		map_word tmp_buf;
1298
1299 retry1:
1300		mutex_lock(&cfi->chips[chipnum].mutex);
1301
1302		if (cfi->chips[chipnum].state != FL_READY) {
1303			set_current_state(TASK_UNINTERRUPTIBLE);
1304			add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1305
1306			mutex_unlock(&cfi->chips[chipnum].mutex);
1307
1308			schedule();
1309			remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1310			goto retry1;
1311		}
1312
1313		tmp_buf = map_read(map, ofs + chipstart);
1314
1315		mutex_unlock(&cfi->chips[chipnum].mutex);
1316
1317		tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1318
1319		ret = do_write_oneword(map, &cfi->chips[chipnum],
1320				ofs, tmp_buf);
1321		if (ret)
1322			return ret;
1323
1324		(*retlen) += len;
1325	}
1326
1327	return 0;
1328}
1329
1330
1331static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1332				    unsigned long adr, const u_char *buf,
1333				    int len)
1334{
1335	struct cfi_private *cfi = map->fldrv_priv;
1336	unsigned long timeo = jiffies + HZ;
1337	/* see comments in do_write_oneword() regarding uWriteTimeo. */
1338	unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1339	int ret = -EIO;
1340	unsigned long cmd_adr;
1341	int z, words;
1342	map_word datum;
1343
1344	adr += chip->start;
1345	cmd_adr = adr;
1346
1347	mutex_lock(&chip->mutex);
1348	ret = get_chip(map, chip, adr, FL_WRITING);
1349	if (ret) {
1350		mutex_unlock(&chip->mutex);
1351		return ret;
1352	}
1353
1354	datum = map_word_load(map, buf);
1355
1356	DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1357	       __func__, adr, datum.x[0] );
1358
1359	XIP_INVAL_CACHED_RANGE(map, adr, len);
1360	ENABLE_VPP(map);
1361	xip_disable(map, chip, cmd_adr);
1362
1363	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1364	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1365	//cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1366
1367	/* Write Buffer Load */
1368	map_write(map, CMD(0x25), cmd_adr);
1369
1370	chip->state = FL_WRITING_TO_BUFFER;
1371
1372	/* Write length of data to come */
1373	words = len / map_bankwidth(map);
1374	map_write(map, CMD(words - 1), cmd_adr);
1375	/* Write data */
1376	z = 0;
1377	while(z < words * map_bankwidth(map)) {
1378		datum = map_word_load(map, buf);
1379		map_write(map, datum, adr + z);
1380
1381		z += map_bankwidth(map);
1382		buf += map_bankwidth(map);
1383	}
1384	z -= map_bankwidth(map);
1385
1386	adr += z;
1387
1388	/* Write Buffer Program Confirm: GO GO GO */
1389	map_write(map, CMD(0x29), cmd_adr);
1390	chip->state = FL_WRITING;
1391
1392	INVALIDATE_CACHE_UDELAY(map, chip,
1393				adr, map_bankwidth(map),
1394				chip->word_write_time);
1395
1396	timeo = jiffies + uWriteTimeout;
1397
1398	for (;;) {
1399		if (chip->state != FL_WRITING) {
1400			/* Someone's suspended the write. Sleep */
1401			DECLARE_WAITQUEUE(wait, current);
1402
1403			set_current_state(TASK_UNINTERRUPTIBLE);
1404			add_wait_queue(&chip->wq, &wait);
1405			mutex_unlock(&chip->mutex);
1406			schedule();
1407			remove_wait_queue(&chip->wq, &wait);
1408			timeo = jiffies + (HZ / 2);
1409			mutex_lock(&chip->mutex);
1410			continue;
1411		}
1412
1413		if (time_after(jiffies, timeo) && !chip_ready(map, adr))
1414			break;
1415
1416		if (chip_ready(map, adr)) {
1417			xip_enable(map, chip, adr);
1418			goto op_done;
1419		}
1420
1421		/* Latency issues. Drop the lock, wait a while and retry */
1422		UDELAY(map, chip, adr, 1);
1423	}
1424
1425	/* reset on all failures. */
1426	map_write( map, CMD(0xF0), chip->start );
1427	xip_enable(map, chip, adr);
1428
1429	printk(KERN_WARNING "MTD %s(): software timeout\n",
1430	       __func__ );
1431
1432	ret = -EIO;
1433 op_done:
1434	chip->state = FL_READY;
1435	put_chip(map, chip, adr);
1436	mutex_unlock(&chip->mutex);
1437
1438	return ret;
1439}
1440
1441
1442static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1443				    size_t *retlen, const u_char *buf)
1444{
1445	struct map_info *map = mtd->priv;
1446	struct cfi_private *cfi = map->fldrv_priv;
1447	int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1448	int ret = 0;
1449	int chipnum;
1450	unsigned long ofs;
1451
1452	*retlen = 0;
1453	if (!len)
1454		return 0;
1455
1456	chipnum = to >> cfi->chipshift;
1457	ofs = to  - (chipnum << cfi->chipshift);
1458
1459	/* If it's not bus-aligned, do the first word write */
1460	if (ofs & (map_bankwidth(map)-1)) {
1461		size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1462		if (local_len > len)
1463			local_len = len;
1464		ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1465					     local_len, retlen, buf);
1466		if (ret)
1467			return ret;
1468		ofs += local_len;
1469		buf += local_len;
1470		len -= local_len;
1471
1472		if (ofs >> cfi->chipshift) {
1473			chipnum ++;
1474			ofs = 0;
1475			if (chipnum == cfi->numchips)
1476				return 0;
1477		}
1478	}
1479
1480	/* Write buffer is worth it only if more than one word to write... */
1481	while (len >= map_bankwidth(map) * 2) {
1482		/* We must not cross write block boundaries */
1483		int size = wbufsize - (ofs & (wbufsize-1));
1484
1485		if (size > len)
1486			size = len;
1487		if (size % map_bankwidth(map))
1488			size -= size % map_bankwidth(map);
1489
1490		ret = do_write_buffer(map, &cfi->chips[chipnum],
1491				      ofs, buf, size);
1492		if (ret)
1493			return ret;
1494
1495		ofs += size;
1496		buf += size;
1497		(*retlen) += size;
1498		len -= size;
1499
1500		if (ofs >> cfi->chipshift) {
1501			chipnum ++;
1502			ofs = 0;
1503			if (chipnum == cfi->numchips)
1504				return 0;
1505		}
1506	}
1507
1508	if (len) {
1509		size_t retlen_dregs = 0;
1510
1511		ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1512					     len, &retlen_dregs, buf);
1513
1514		*retlen += retlen_dregs;
1515		return ret;
1516	}
1517
1518	return 0;
1519}
1520
1521
1522/*
1523 * Handle devices with one erase region, that only implement
1524 * the chip erase command.
1525 */
1526static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
1527{
1528	struct cfi_private *cfi = map->fldrv_priv;
1529	unsigned long timeo = jiffies + HZ;
1530	unsigned long int adr;
1531	DECLARE_WAITQUEUE(wait, current);
1532	int ret = 0;
1533
1534	adr = cfi->addr_unlock1;
1535
1536	mutex_lock(&chip->mutex);
1537	ret = get_chip(map, chip, adr, FL_WRITING);
1538	if (ret) {
1539		mutex_unlock(&chip->mutex);
1540		return ret;
1541	}
1542
1543	DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1544	       __func__, chip->start );
1545
1546	XIP_INVAL_CACHED_RANGE(map, adr, map->size);
1547	ENABLE_VPP(map);
1548	xip_disable(map, chip, adr);
1549
1550	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1551	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1552	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1553	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1554	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1555	cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1556
1557	chip->state = FL_ERASING;
1558	chip->erase_suspended = 0;
1559	chip->in_progress_block_addr = adr;
1560
1561	INVALIDATE_CACHE_UDELAY(map, chip,
1562				adr, map->size,
1563				chip->erase_time*500);
1564
1565	timeo = jiffies + (HZ*20);
1566
1567	for (;;) {
1568		if (chip->state != FL_ERASING) {
1569			/* Someone's suspended the erase. Sleep */
1570			set_current_state(TASK_UNINTERRUPTIBLE);
1571			add_wait_queue(&chip->wq, &wait);
1572			mutex_unlock(&chip->mutex);
1573			schedule();
1574			remove_wait_queue(&chip->wq, &wait);
1575			mutex_lock(&chip->mutex);
1576			continue;
1577		}
1578		if (chip->erase_suspended) {
1579			/* This erase was suspended and resumed.
1580			   Adjust the timeout */
1581			timeo = jiffies + (HZ*20);
1582			chip->erase_suspended = 0;
1583		}
1584
1585		if (chip_ready(map, adr))
1586			break;
1587
1588		if (time_after(jiffies, timeo)) {
1589			printk(KERN_WARNING "MTD %s(): software timeout\n",
1590				__func__ );
1591			break;
1592		}
1593
1594		/* Latency issues. Drop the lock, wait a while and retry */
1595		UDELAY(map, chip, adr, 1000000/HZ);
1596	}
1597	/* Did we succeed? */
1598	if (!chip_good(map, adr, map_word_ff(map))) {
1599		/* reset on all failures. */
1600		map_write( map, CMD(0xF0), chip->start );
1601
1602		ret = -EIO;
1603	}
1604
1605	chip->state = FL_READY;
1606	xip_enable(map, chip, adr);
1607	put_chip(map, chip, adr);
1608	mutex_unlock(&chip->mutex);
1609
1610	return ret;
1611}
1612
1613
1614static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
1615{
1616	struct cfi_private *cfi = map->fldrv_priv;
1617	unsigned long timeo = jiffies + HZ;
1618	DECLARE_WAITQUEUE(wait, current);
1619	int ret = 0;
1620
1621	adr += chip->start;
1622
1623	mutex_lock(&chip->mutex);
1624	ret = get_chip(map, chip, adr, FL_ERASING);
1625	if (ret) {
1626		mutex_unlock(&chip->mutex);
1627		return ret;
1628	}
1629
1630	DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1631	       __func__, adr );
1632
1633	XIP_INVAL_CACHED_RANGE(map, adr, len);
1634	ENABLE_VPP(map);
1635	xip_disable(map, chip, adr);
1636
1637	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1638	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1639	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1640	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1641	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1642	map_write(map, CMD(0x30), adr);
1643
1644	chip->state = FL_ERASING;
1645	chip->erase_suspended = 0;
1646	chip->in_progress_block_addr = adr;
1647
1648	INVALIDATE_CACHE_UDELAY(map, chip,
1649				adr, len,
1650				chip->erase_time*500);
1651
1652	timeo = jiffies + (HZ*20);
1653
1654	for (;;) {
1655		if (chip->state != FL_ERASING) {
1656			/* Someone's suspended the erase. Sleep */
1657			set_current_state(TASK_UNINTERRUPTIBLE);
1658			add_wait_queue(&chip->wq, &wait);
1659			mutex_unlock(&chip->mutex);
1660			schedule();
1661			remove_wait_queue(&chip->wq, &wait);
1662			mutex_lock(&chip->mutex);
1663			continue;
1664		}
1665		if (chip->erase_suspended) {
1666			/* This erase was suspended and resumed.
1667			   Adjust the timeout */
1668			timeo = jiffies + (HZ*20);
1669			chip->erase_suspended = 0;
1670		}
1671
1672		if (chip_ready(map, adr)) {
1673			xip_enable(map, chip, adr);
1674			break;
1675		}
1676
1677		if (time_after(jiffies, timeo)) {
1678			xip_enable(map, chip, adr);
1679			printk(KERN_WARNING "MTD %s(): software timeout\n",
1680				__func__ );
1681			break;
1682		}
1683
1684		/* Latency issues. Drop the lock, wait a while and retry */
1685		UDELAY(map, chip, adr, 1000000/HZ);
1686	}
1687	/* Did we succeed? */
1688	if (!chip_good(map, adr, map_word_ff(map))) {
1689		/* reset on all failures. */
1690		map_write( map, CMD(0xF0), chip->start );
1691
1692		ret = -EIO;
1693	}
1694
1695	chip->state = FL_READY;
1696	put_chip(map, chip, adr);
1697	mutex_unlock(&chip->mutex);
1698	return ret;
1699}
1700
1701
1702static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1703{
1704	unsigned long ofs, len;
1705	int ret;
1706
1707	ofs = instr->addr;
1708	len = instr->len;
1709
1710	ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1711	if (ret)
1712		return ret;
1713
1714	instr->state = MTD_ERASE_DONE;
1715	mtd_erase_callback(instr);
1716
1717	return 0;
1718}
1719
1720
1721static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
1722{
1723	struct map_info *map = mtd->priv;
1724	struct cfi_private *cfi = map->fldrv_priv;
1725	int ret = 0;
1726
1727	if (instr->addr != 0)
1728		return -EINVAL;
1729
1730	if (instr->len != mtd->size)
1731		return -EINVAL;
1732
1733	ret = do_erase_chip(map, &cfi->chips[0]);
1734	if (ret)
1735		return ret;
1736
1737	instr->state = MTD_ERASE_DONE;
1738	mtd_erase_callback(instr);
1739
1740	return 0;
1741}
1742
1743static int do_atmel_lock(struct map_info *map, struct flchip *chip,
1744			 unsigned long adr, int len, void *thunk)
1745{
1746	struct cfi_private *cfi = map->fldrv_priv;
1747	int ret;
1748
1749	mutex_lock(&chip->mutex);
1750	ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
1751	if (ret)
1752		goto out_unlock;
1753	chip->state = FL_LOCKING;
1754
1755	DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
1756	      __func__, adr, len);
1757
1758	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1759			 cfi->device_type, NULL);
1760	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1761			 cfi->device_type, NULL);
1762	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
1763			 cfi->device_type, NULL);
1764	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1765			 cfi->device_type, NULL);
1766	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1767			 cfi->device_type, NULL);
1768	map_write(map, CMD(0x40), chip->start + adr);
1769
1770	chip->state = FL_READY;
1771	put_chip(map, chip, adr + chip->start);
1772	ret = 0;
1773
1774out_unlock:
1775	mutex_unlock(&chip->mutex);
1776	return ret;
1777}
1778
1779static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
1780			   unsigned long adr, int len, void *thunk)
1781{
1782	struct cfi_private *cfi = map->fldrv_priv;
1783	int ret;
1784
1785	mutex_lock(&chip->mutex);
1786	ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
1787	if (ret)
1788		goto out_unlock;
1789	chip->state = FL_UNLOCKING;
1790
1791	DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
1792	      __func__, adr, len);
1793
1794	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1795			 cfi->device_type, NULL);
1796	map_write(map, CMD(0x70), adr);
1797
1798	chip->state = FL_READY;
1799	put_chip(map, chip, adr + chip->start);
1800	ret = 0;
1801
1802out_unlock:
1803	mutex_unlock(&chip->mutex);
1804	return ret;
1805}
1806
1807static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1808{
1809	return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
1810}
1811
1812static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1813{
1814	return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
1815}
1816
1817
1818static void cfi_amdstd_sync (struct mtd_info *mtd)
1819{
1820	struct map_info *map = mtd->priv;
1821	struct cfi_private *cfi = map->fldrv_priv;
1822	int i;
1823	struct flchip *chip;
1824	int ret = 0;
1825	DECLARE_WAITQUEUE(wait, current);
1826
1827	for (i=0; !ret && i<cfi->numchips; i++) {
1828		chip = &cfi->chips[i];
1829
1830	retry:
1831		mutex_lock(&chip->mutex);
1832
1833		switch(chip->state) {
1834		case FL_READY:
1835		case FL_STATUS:
1836		case FL_CFI_QUERY:
1837		case FL_JEDEC_QUERY:
1838			chip->oldstate = chip->state;
1839			chip->state = FL_SYNCING;
1840			/* No need to wake_up() on this state change -
1841			 * as the whole point is that nobody can do anything
1842			 * with the chip now anyway.
1843			 */
1844		case FL_SYNCING:
1845			mutex_unlock(&chip->mutex);
1846			break;
1847
1848		default:
1849			/* Not an idle state */
1850			set_current_state(TASK_UNINTERRUPTIBLE);
1851			add_wait_queue(&chip->wq, &wait);
1852
1853			mutex_unlock(&chip->mutex);
1854
1855			schedule();
1856
1857			remove_wait_queue(&chip->wq, &wait);
1858
1859			goto retry;
1860		}
1861	}
1862
1863	/* Unlock the chips again */
1864
1865	for (i--; i >=0; i--) {
1866		chip = &cfi->chips[i];
1867
1868		mutex_lock(&chip->mutex);
1869
1870		if (chip->state == FL_SYNCING) {
1871			chip->state = chip->oldstate;
1872			wake_up(&chip->wq);
1873		}
1874		mutex_unlock(&chip->mutex);
1875	}
1876}
1877
1878
1879static int cfi_amdstd_suspend(struct mtd_info *mtd)
1880{
1881	struct map_info *map = mtd->priv;
1882	struct cfi_private *cfi = map->fldrv_priv;
1883	int i;
1884	struct flchip *chip;
1885	int ret = 0;
1886
1887	for (i=0; !ret && i<cfi->numchips; i++) {
1888		chip = &cfi->chips[i];
1889
1890		mutex_lock(&chip->mutex);
1891
1892		switch(chip->state) {
1893		case FL_READY:
1894		case FL_STATUS:
1895		case FL_CFI_QUERY:
1896		case FL_JEDEC_QUERY:
1897			chip->oldstate = chip->state;
1898			chip->state = FL_PM_SUSPENDED;
1899			/* No need to wake_up() on this state change -
1900			 * as the whole point is that nobody can do anything
1901			 * with the chip now anyway.
1902			 */
1903		case FL_PM_SUSPENDED:
1904			break;
1905
1906		default:
1907			ret = -EAGAIN;
1908			break;
1909		}
1910		mutex_unlock(&chip->mutex);
1911	}
1912
1913	/* Unlock the chips again */
1914
1915	if (ret) {
1916		for (i--; i >=0; i--) {
1917			chip = &cfi->chips[i];
1918
1919			mutex_lock(&chip->mutex);
1920
1921			if (chip->state == FL_PM_SUSPENDED) {
1922				chip->state = chip->oldstate;
1923				wake_up(&chip->wq);
1924			}
1925			mutex_unlock(&chip->mutex);
1926		}
1927	}
1928
1929	return ret;
1930}
1931
1932
1933static void cfi_amdstd_resume(struct mtd_info *mtd)
1934{
1935	struct map_info *map = mtd->priv;
1936	struct cfi_private *cfi = map->fldrv_priv;
1937	int i;
1938	struct flchip *chip;
1939
1940	for (i=0; i<cfi->numchips; i++) {
1941
1942		chip = &cfi->chips[i];
1943
1944		mutex_lock(&chip->mutex);
1945
1946		if (chip->state == FL_PM_SUSPENDED) {
1947			chip->state = FL_READY;
1948			map_write(map, CMD(0xF0), chip->start);
1949			wake_up(&chip->wq);
1950		}
1951		else
1952			printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
1953
1954		mutex_unlock(&chip->mutex);
1955	}
1956}
1957
1958
1959/*
1960 * Ensure that the flash device is put back into read array mode before
1961 * unloading the driver or rebooting.  On some systems, rebooting while
1962 * the flash is in query/program/erase mode will prevent the CPU from
1963 * fetching the bootloader code, requiring a hard reset or power cycle.
1964 */
1965static int cfi_amdstd_reset(struct mtd_info *mtd)
1966{
1967	struct map_info *map = mtd->priv;
1968	struct cfi_private *cfi = map->fldrv_priv;
1969	int i, ret;
1970	struct flchip *chip;
1971
1972	for (i = 0; i < cfi->numchips; i++) {
1973
1974		chip = &cfi->chips[i];
1975
1976		mutex_lock(&chip->mutex);
1977
1978		ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
1979		if (!ret) {
1980			map_write(map, CMD(0xF0), chip->start);
1981			chip->state = FL_SHUTDOWN;
1982			put_chip(map, chip, chip->start);
1983		}
1984
1985		mutex_unlock(&chip->mutex);
1986	}
1987
1988	return 0;
1989}
1990
1991
1992static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val,
1993			       void *v)
1994{
1995	struct mtd_info *mtd;
1996
1997	mtd = container_of(nb, struct mtd_info, reboot_notifier);
1998	cfi_amdstd_reset(mtd);
1999	return NOTIFY_DONE;
2000}
2001
2002
2003static void cfi_amdstd_destroy(struct mtd_info *mtd)
2004{
2005	struct map_info *map = mtd->priv;
2006	struct cfi_private *cfi = map->fldrv_priv;
2007
2008	cfi_amdstd_reset(mtd);
2009	unregister_reboot_notifier(&mtd->reboot_notifier);
2010	kfree(cfi->cmdset_priv);
2011	kfree(cfi->cfiq);
2012	kfree(cfi);
2013	kfree(mtd->eraseregions);
2014}
2015
2016MODULE_LICENSE("GPL");
2017MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
2018MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
2019MODULE_ALIAS("cfi_cmdset_0006");
2020MODULE_ALIAS("cfi_cmdset_0701");
2021