1/*
2 * Common Flash Interface support:
3 *   AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
4 *
5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
8 *
9 * 2_by_8 routines added by Simon Munton
10 *
11 * 4_by_16 work by Carolyn J. Smith
12 *
13 * XIP support hooks by Vitaly Wool (based on code for Intel flash
14 * by Nicolas Pitre)
15 *
16 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
17 *
18 * This code is GPL
19 *
20 * $Id: cfi_cmdset_0002.c,v 1.1.1.1 2007/08/03 18:52:43 Exp $
21 *
22 */
23
24#include <linux/module.h>
25#include <linux/types.h>
26#include <linux/kernel.h>
27#include <linux/sched.h>
28#include <linux/init.h>
29#include <asm/io.h>
30#include <asm/byteorder.h>
31
32#include <linux/errno.h>
33#include <linux/slab.h>
34#include <linux/delay.h>
35#include <linux/interrupt.h>
36#include <linux/mtd/compatmac.h>
37#include <linux/mtd/map.h>
38#include <linux/mtd/mtd.h>
39#include <linux/mtd/cfi.h>
40#include <linux/mtd/xip.h>
41
42#define AMD_BOOTLOC_BUG
43#define FORCE_WORD_WRITE 0
44
45#define MAX_WORD_RETRIES 3
46
47#define MANUFACTURER_AMD	0x0001
48#define MANUFACTURER_ATMEL	0x001F
49#define MANUFACTURER_SST	0x00BF
50#define SST49LF004B	        0x0060
51#define SST49LF040B	        0x0050
52#define SST49LF008A		0x005a
53#define AT49BV6416		0x00d6
54
55static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
56static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
57static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
58static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
59static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
60static void cfi_amdstd_sync (struct mtd_info *);
61static int cfi_amdstd_suspend (struct mtd_info *);
62static void cfi_amdstd_resume (struct mtd_info *);
63static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
64
65static void cfi_amdstd_destroy(struct mtd_info *);
66
67struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
68static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
69
70static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
71static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
72#include "fwh_lock.h"
73
74static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
75static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
76
77static struct mtd_chip_driver cfi_amdstd_chipdrv = {
78	.probe		= NULL, /* Not usable directly */
79	.destroy	= cfi_amdstd_destroy,
80	.name		= "cfi_cmdset_0002",
81	.module		= THIS_MODULE
82};
83
84
85/* #define DEBUG_CFI_FEATURES */
86
87
88#ifdef DEBUG_CFI_FEATURES
89static void cfi_tell_features(struct cfi_pri_amdstd *extp)
90{
91	const char* erase_suspend[3] = {
92		"Not supported", "Read only", "Read/write"
93	};
94	const char* top_bottom[6] = {
95		"No WP", "8x8KiB sectors at top & bottom, no WP",
96		"Bottom boot", "Top boot",
97		"Uniform, Bottom WP", "Uniform, Top WP"
98	};
99
100	printk("  Silicon revision: %d\n", extp->SiliconRevision >> 1);
101	printk("  Address sensitive unlock: %s\n",
102	       (extp->SiliconRevision & 1) ? "Not required" : "Required");
103
104	if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
105		printk("  Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
106	else
107		printk("  Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
108
109	if (extp->BlkProt == 0)
110		printk("  Block protection: Not supported\n");
111	else
112		printk("  Block protection: %d sectors per group\n", extp->BlkProt);
113
114
115	printk("  Temporary block unprotect: %s\n",
116	       extp->TmpBlkUnprotect ? "Supported" : "Not supported");
117	printk("  Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
118	printk("  Number of simultaneous operations: %d\n", extp->SimultaneousOps);
119	printk("  Burst mode: %s\n",
120	       extp->BurstMode ? "Supported" : "Not supported");
121	if (extp->PageMode == 0)
122		printk("  Page mode: Not supported\n");
123	else
124		printk("  Page mode: %d word page\n", extp->PageMode << 2);
125
126	printk("  Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
127	       extp->VppMin >> 4, extp->VppMin & 0xf);
128	printk("  Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
129	       extp->VppMax >> 4, extp->VppMax & 0xf);
130
131	if (extp->TopBottom < ARRAY_SIZE(top_bottom))
132		printk("  Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
133	else
134		printk("  Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
135}
136#endif
137
138#ifdef AMD_BOOTLOC_BUG
139/* Wheee. Bring me the head of someone at AMD. */
140static void fixup_amd_bootblock(struct mtd_info *mtd, void* param)
141{
142	struct map_info *map = mtd->priv;
143	struct cfi_private *cfi = map->fldrv_priv;
144	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
145	__u8 major = extp->MajorVersion;
146	__u8 minor = extp->MinorVersion;
147
148	if (((major << 8) | minor) < 0x3131) {
149		/* CFI version 1.0 => don't trust bootloc */
150		if (cfi->id & 0x80) {
151			printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
152			extp->TopBottom = 3;	/* top boot */
153		} else {
154			extp->TopBottom = 2;	/* bottom boot */
155		}
156	}
157}
158#endif
159
160static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
161{
162	struct map_info *map = mtd->priv;
163	struct cfi_private *cfi = map->fldrv_priv;
164	if (cfi->cfiq->BufWriteTimeoutTyp) {
165		DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" );
166		mtd->write = cfi_amdstd_write_buffers;
167	}
168}
169
170/* Atmel chips don't use the same PRI format as AMD chips */
171static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
172{
173	struct map_info *map = mtd->priv;
174	struct cfi_private *cfi = map->fldrv_priv;
175	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
176	struct cfi_pri_atmel atmel_pri;
177
178	memcpy(&atmel_pri, extp, sizeof(atmel_pri));
179	memset((char *)extp + 5, 0, sizeof(*extp) - 5);
180
181	if (atmel_pri.Features & 0x02)
182		extp->EraseSuspend = 2;
183
184	if (atmel_pri.BottomBoot)
185		extp->TopBottom = 2;
186	else
187		extp->TopBottom = 3;
188}
189
190static void fixup_use_secsi(struct mtd_info *mtd, void *param)
191{
192	/* Setup for chips with a secsi area */
193	mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
194	mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
195}
196
197static void fixup_use_erase_chip(struct mtd_info *mtd, void *param)
198{
199	struct map_info *map = mtd->priv;
200	struct cfi_private *cfi = map->fldrv_priv;
201	if ((cfi->cfiq->NumEraseRegions == 1) &&
202		((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
203		mtd->erase = cfi_amdstd_erase_chip;
204	}
205
206}
207
208/*
209 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
210 * locked by default.
211 */
212static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param)
213{
214	mtd->lock = cfi_atmel_lock;
215	mtd->unlock = cfi_atmel_unlock;
216	mtd->flags |= MTD_STUPID_LOCK;
217}
218
219static struct cfi_fixup cfi_fixup_table[] = {
220#ifdef AMD_BOOTLOC_BUG
221	{ CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
222#endif
223	{ CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, },
224	{ CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, },
225	{ CFI_MFR_AMD, 0x0055, fixup_use_secsi, NULL, },
226	{ CFI_MFR_AMD, 0x0056, fixup_use_secsi, NULL, },
227	{ CFI_MFR_AMD, 0x005C, fixup_use_secsi, NULL, },
228	{ CFI_MFR_AMD, 0x005F, fixup_use_secsi, NULL, },
229#if !FORCE_WORD_WRITE
230	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, },
231#endif
232	{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
233	{ 0, 0, NULL, NULL }
234};
235static struct cfi_fixup jedec_fixup_table[] = {
236	{ MANUFACTURER_SST, SST49LF004B, fixup_use_fwh_lock, NULL, },
237	{ MANUFACTURER_SST, SST49LF040B, fixup_use_fwh_lock, NULL, },
238	{ MANUFACTURER_SST, SST49LF008A, fixup_use_fwh_lock, NULL, },
239	{ 0, 0, NULL, NULL }
240};
241
242static struct cfi_fixup fixup_table[] = {
243	/* The CFI vendor ids and the JEDEC vendor IDs appear
244	 * to be common.  It is like the devices id's are as
245	 * well.  This table is to pick all cases where
246	 * we know that is the case.
247	 */
248	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip, NULL },
249	{ CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock, NULL },
250	{ 0, 0, NULL, NULL }
251};
252
253
254struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
255{
256	struct cfi_private *cfi = map->fldrv_priv;
257	struct mtd_info *mtd;
258	int i;
259
260	mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
261	if (!mtd) {
262		printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
263		return NULL;
264	}
265	mtd->priv = map;
266	mtd->type = MTD_NORFLASH;
267
268	/* Fill in the default mtd operations */
269	mtd->erase   = cfi_amdstd_erase_varsize;
270	mtd->write   = cfi_amdstd_write_words;
271	mtd->read    = cfi_amdstd_read;
272	mtd->sync    = cfi_amdstd_sync;
273	mtd->suspend = cfi_amdstd_suspend;
274	mtd->resume  = cfi_amdstd_resume;
275	mtd->flags   = MTD_CAP_NORFLASH;
276	mtd->name    = map->name;
277	mtd->writesize = 1;
278
279	if (cfi->cfi_mode==CFI_MODE_CFI){
280		unsigned char bootloc;
281		/*
282		 * It's a real CFI chip, not one for which the probe
283		 * routine faked a CFI structure. So we read the feature
284		 * table from it.
285		 */
286		__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
287		struct cfi_pri_amdstd *extp;
288
289		extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
290		if (!extp) {
291			kfree(mtd);
292			return NULL;
293		}
294
295		if (extp->MajorVersion != '1' ||
296		    (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
297			printk(KERN_ERR "  Unknown Amd/Fujitsu Extended Query "
298			       "version %c.%c.\n",  extp->MajorVersion,
299			       extp->MinorVersion);
300			kfree(extp);
301			kfree(mtd);
302			return NULL;
303		}
304
305		/* Install our own private info structure */
306		cfi->cmdset_priv = extp;
307
308		/* Apply cfi device specific fixups */
309		cfi_fixup(mtd, cfi_fixup_table);
310
311#ifdef DEBUG_CFI_FEATURES
312		/* Tell the user about it in lots of lovely detail */
313		cfi_tell_features(extp);
314#endif
315
316		bootloc = extp->TopBottom;
317		if ((bootloc != 2) && (bootloc != 3)) {
318			printk(KERN_WARNING "%s: CFI does not contain boot "
319			       "bank location. Assuming top.\n", map->name);
320			bootloc = 2;
321		}
322
323		if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
324			printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name);
325
326			for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
327				int j = (cfi->cfiq->NumEraseRegions-1)-i;
328				__u32 swap;
329
330				swap = cfi->cfiq->EraseRegionInfo[i];
331				cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
332				cfi->cfiq->EraseRegionInfo[j] = swap;
333			}
334		}
335		/* Set the default CFI lock/unlock addresses */
336		cfi->addr_unlock1 = 0x555;
337		cfi->addr_unlock2 = 0x2aa;
338		/* Modify the unlock address if we are in compatibility mode */
339		if (	/* x16 in x8 mode */
340			((cfi->device_type == CFI_DEVICETYPE_X8) &&
341				(cfi->cfiq->InterfaceDesc == 2)) ||
342			/* x32 in x16 mode */
343			((cfi->device_type == CFI_DEVICETYPE_X16) &&
344				(cfi->cfiq->InterfaceDesc == 4)))
345		{
346			cfi->addr_unlock1 = 0xaaa;
347			cfi->addr_unlock2 = 0x555;
348		}
349
350	} /* CFI mode */
351	else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
352		/* Apply jedec specific fixups */
353		cfi_fixup(mtd, jedec_fixup_table);
354	}
355	/* Apply generic fixups */
356	cfi_fixup(mtd, fixup_table);
357
358	for (i=0; i< cfi->numchips; i++) {
359		cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
360		cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
361		cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
362		cfi->chips[i].ref_point_counter = 0;
363		init_waitqueue_head(&(cfi->chips[i].wq));
364	}
365
366	map->fldrv = &cfi_amdstd_chipdrv;
367
368	return cfi_amdstd_setup(mtd);
369}
370EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
371
372static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
373{
374	struct map_info *map = mtd->priv;
375	struct cfi_private *cfi = map->fldrv_priv;
376	unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
377	unsigned long offset = 0;
378	int i,j;
379
380	printk(KERN_NOTICE "number of %s chips: %d\n",
381	       (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
382	/* Select the correct geometry setup */
383	mtd->size = devsize * cfi->numchips;
384
385	mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
386	mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
387				    * mtd->numeraseregions, GFP_KERNEL);
388	if (!mtd->eraseregions) {
389		printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
390		goto setup_err;
391	}
392
393	for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
394		unsigned long ernum, ersize;
395		ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
396		ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
397
398		if (mtd->erasesize < ersize) {
399			mtd->erasesize = ersize;
400		}
401		for (j=0; j<cfi->numchips; j++) {
402			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
403			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
404			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
405		}
406		offset += (ersize * ernum);
407	}
408	if (offset != devsize) {
409		/* Argh */
410		printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
411		goto setup_err;
412	}
413
414	printk(KERN_NOTICE "cfi_cmdset_0002: Disabling erase-suspend-program due to code brokenness.\n");
415
416	__module_get(THIS_MODULE);
417	return mtd;
418
419 setup_err:
420	if(mtd) {
421		kfree(mtd->eraseregions);
422		kfree(mtd);
423	}
424	kfree(cfi->cmdset_priv);
425	kfree(cfi->cfiq);
426	return NULL;
427}
428
429/*
430 * Return true if the chip is ready.
431 *
432 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
433 * non-suspended sector) and is indicated by no toggle bits toggling.
434 *
435 * Note that anything more complicated than checking if no bits are toggling
436 * (including checking DQ5 for an error status) is tricky to get working
437 * correctly and is therefore not done	(particulary with interleaved chips
438 * as each chip must be checked independantly of the others).
439 */
440static int __xipram chip_ready(struct map_info *map, unsigned long addr)
441{
442	map_word d, t;
443
444	d = map_read(map, addr);
445	t = map_read(map, addr);
446
447	return map_word_equal(map, d, t);
448}
449
450/*
451 * Return true if the chip is ready and has the correct value.
452 *
453 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
454 * non-suspended sector) and it is indicated by no bits toggling.
455 *
456 * Error are indicated by toggling bits or bits held with the wrong value,
457 * or with bits toggling.
458 *
459 * Note that anything more complicated than checking if no bits are toggling
460 * (including checking DQ5 for an error status) is tricky to get working
461 * correctly and is therefore not done	(particulary with interleaved chips
462 * as each chip must be checked independantly of the others).
463 *
464 */
465static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected)
466{
467	map_word oldd, curd;
468
469	oldd = map_read(map, addr);
470	curd = map_read(map, addr);
471
472	return	map_word_equal(map, oldd, curd) &&
473		map_word_equal(map, curd, expected);
474}
475
476static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
477{
478	DECLARE_WAITQUEUE(wait, current);
479	struct cfi_private *cfi = map->fldrv_priv;
480	unsigned long timeo;
481	struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
482
483 resettime:
484	timeo = jiffies + HZ;
485 retry:
486	switch (chip->state) {
487
488	case FL_STATUS:
489		for (;;) {
490			if (chip_ready(map, adr))
491				break;
492
493			if (time_after(jiffies, timeo)) {
494				printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
495				spin_unlock(chip->mutex);
496				return -EIO;
497			}
498			spin_unlock(chip->mutex);
499			cfi_udelay(1);
500			spin_lock(chip->mutex);
501			/* Someone else might have been playing with it. */
502			goto retry;
503		}
504
505	case FL_READY:
506	case FL_CFI_QUERY:
507	case FL_JEDEC_QUERY:
508		return 0;
509
510	case FL_ERASING:
511		if (mode == FL_WRITING)
512			goto sleep;
513
514		if (!(   mode == FL_READY
515		      || mode == FL_POINT
516		      || !cfip
517		      || (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))
518		      || (mode == FL_WRITING && (cfip->EraseSuspend & 0x1)
519		    )))
520			goto sleep;
521
522		/* We could check to see if we're trying to access the sector
523		 * that is currently being erased. However, no user will try
524		 * anything like that so we just wait for the timeout. */
525
526		/* Erase suspend */
527		/* It's harmless to issue the Erase-Suspend and Erase-Resume
528		 * commands when the erase algorithm isn't in progress. */
529		map_write(map, CMD(0xB0), chip->in_progress_block_addr);
530		chip->oldstate = FL_ERASING;
531		chip->state = FL_ERASE_SUSPENDING;
532		chip->erase_suspended = 1;
533		for (;;) {
534			if (chip_ready(map, adr))
535				break;
536
537			if (time_after(jiffies, timeo)) {
538				/* Should have suspended the erase by now.
539				 * Send an Erase-Resume command as either
540				 * there was an error (so leave the erase
541				 * routine to recover from it) or we trying to
542				 * use the erase-in-progress sector. */
543				map_write(map, CMD(0x30), chip->in_progress_block_addr);
544				chip->state = FL_ERASING;
545				chip->oldstate = FL_READY;
546				printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
547				return -EIO;
548			}
549
550			spin_unlock(chip->mutex);
551			cfi_udelay(1);
552			spin_lock(chip->mutex);
553			/* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
554			   So we can just loop here. */
555		}
556		chip->state = FL_READY;
557		return 0;
558
559	case FL_XIP_WHILE_ERASING:
560		if (mode != FL_READY && mode != FL_POINT &&
561		    (!cfip || !(cfip->EraseSuspend&2)))
562			goto sleep;
563		chip->oldstate = chip->state;
564		chip->state = FL_READY;
565		return 0;
566
567	case FL_POINT:
568		/* Only if there's no operation suspended... */
569		if (mode == FL_READY && chip->oldstate == FL_READY)
570			return 0;
571
572	default:
573	sleep:
574		set_current_state(TASK_UNINTERRUPTIBLE);
575		add_wait_queue(&chip->wq, &wait);
576		spin_unlock(chip->mutex);
577		schedule();
578		remove_wait_queue(&chip->wq, &wait);
579		spin_lock(chip->mutex);
580		goto resettime;
581	}
582}
583
584
585static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
586{
587	struct cfi_private *cfi = map->fldrv_priv;
588
589	switch(chip->oldstate) {
590	case FL_ERASING:
591		chip->state = chip->oldstate;
592		map_write(map, CMD(0x30), chip->in_progress_block_addr);
593		chip->oldstate = FL_READY;
594		chip->state = FL_ERASING;
595		break;
596
597	case FL_XIP_WHILE_ERASING:
598		chip->state = chip->oldstate;
599		chip->oldstate = FL_READY;
600		break;
601
602	case FL_READY:
603	case FL_STATUS:
604		/* We should really make set_vpp() count, rather than doing this */
605		DISABLE_VPP(map);
606		break;
607	default:
608		printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
609	}
610	wake_up(&chip->wq);
611}
612
613#ifdef CONFIG_MTD_XIP
614
615/*
616 * No interrupt what so ever can be serviced while the flash isn't in array
617 * mode.  This is ensured by the xip_disable() and xip_enable() functions
618 * enclosing any code path where the flash is known not to be in array mode.
619 * And within a XIP disabled code path, only functions marked with __xipram
620 * may be called and nothing else (it's a good thing to inspect generated
621 * assembly to make sure inline functions were actually inlined and that gcc
622 * didn't emit calls to its own support functions). Also configuring MTD CFI
623 * support to a single buswidth and a single interleave is also recommended.
624 */
625
626static void xip_disable(struct map_info *map, struct flchip *chip,
627			unsigned long adr)
628{
629	/* TODO: chips with no XIP use should ignore and return */
630	(void) map_read(map, adr); /* ensure mmu mapping is up to date */
631	local_irq_disable();
632}
633
634static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
635				unsigned long adr)
636{
637	struct cfi_private *cfi = map->fldrv_priv;
638
639	if (chip->state != FL_POINT && chip->state != FL_READY) {
640		map_write(map, CMD(0xf0), adr);
641		chip->state = FL_READY;
642	}
643	(void) map_read(map, adr);
644	xip_iprefetch();
645	local_irq_enable();
646}
647
648/*
649 * When a delay is required for the flash operation to complete, the
650 * xip_udelay() function is polling for both the given timeout and pending
651 * (but still masked) hardware interrupts.  Whenever there is an interrupt
652 * pending then the flash erase operation is suspended, array mode restored
653 * and interrupts unmasked.  Task scheduling might also happen at that
654 * point.  The CPU eventually returns from the interrupt or the call to
655 * schedule() and the suspended flash operation is resumed for the remaining
656 * of the delay period.
657 *
658 * Warning: this function _will_ fool interrupt latency tracing tools.
659 */
660
661static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
662				unsigned long adr, int usec)
663{
664	struct cfi_private *cfi = map->fldrv_priv;
665	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
666	map_word status, OK = CMD(0x80);
667	unsigned long suspended, start = xip_currtime();
668	flstate_t oldstate;
669
670	do {
671		cpu_relax();
672		if (xip_irqpending() && extp &&
673		    ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
674		    (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
675			/*
676			 * Let's suspend the erase operation when supported.
677			 * Note that we currently don't try to suspend
678			 * interleaved chips if there is already another
679			 * operation suspended (imagine what happens
680			 * when one chip was already done with the current
681			 * operation while another chip suspended it, then
682			 * we resume the whole thing at once).  Yes, it
683			 * can happen!
684			 */
685			map_write(map, CMD(0xb0), adr);
686			usec -= xip_elapsed_since(start);
687			suspended = xip_currtime();
688			do {
689				if (xip_elapsed_since(suspended) > 100000) {
690					/*
691					 * The chip doesn't want to suspend
692					 * after waiting for 100 msecs.
693					 * This is a critical error but there
694					 * is not much we can do here.
695					 */
696					return;
697				}
698				status = map_read(map, adr);
699			} while (!map_word_andequal(map, status, OK, OK));
700
701			/* Suspend succeeded */
702			oldstate = chip->state;
703			if (!map_word_bitsset(map, status, CMD(0x40)))
704				break;
705			chip->state = FL_XIP_WHILE_ERASING;
706			chip->erase_suspended = 1;
707			map_write(map, CMD(0xf0), adr);
708			(void) map_read(map, adr);
709			asm volatile (".rep 8; nop; .endr");
710			local_irq_enable();
711			spin_unlock(chip->mutex);
712			asm volatile (".rep 8; nop; .endr");
713			cond_resched();
714
715			/*
716			 * We're back.  However someone else might have
717			 * decided to go write to the chip if we are in
718			 * a suspended erase state.  If so let's wait
719			 * until it's done.
720			 */
721			spin_lock(chip->mutex);
722			while (chip->state != FL_XIP_WHILE_ERASING) {
723				DECLARE_WAITQUEUE(wait, current);
724				set_current_state(TASK_UNINTERRUPTIBLE);
725				add_wait_queue(&chip->wq, &wait);
726				spin_unlock(chip->mutex);
727				schedule();
728				remove_wait_queue(&chip->wq, &wait);
729				spin_lock(chip->mutex);
730			}
731			/* Disallow XIP again */
732			local_irq_disable();
733
734			/* Resume the write or erase operation */
735			map_write(map, CMD(0x30), adr);
736			chip->state = oldstate;
737			start = xip_currtime();
738		} else if (usec >= 1000000/HZ) {
739			/*
740			 * Try to save on CPU power when waiting delay
741			 * is at least a system timer tick period.
742			 * No need to be extremely accurate here.
743			 */
744			xip_cpu_idle();
745		}
746		status = map_read(map, adr);
747	} while (!map_word_andequal(map, status, OK, OK)
748		 && xip_elapsed_since(start) < usec);
749}
750
751#define UDELAY(map, chip, adr, usec)  xip_udelay(map, chip, adr, usec)
752
753/*
754 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
755 * the flash is actively programming or erasing since we have to poll for
756 * the operation to complete anyway.  We can't do that in a generic way with
757 * a XIP setup so do it before the actual flash operation in this case
758 * and stub it out from INVALIDATE_CACHE_UDELAY.
759 */
760#define XIP_INVAL_CACHED_RANGE(map, from, size)  \
761	INVALIDATE_CACHED_RANGE(map, from, size)
762
763#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
764	UDELAY(map, chip, adr, usec)
765
766/*
767 * Extra notes:
768 *
769 * Activating this XIP support changes the way the code works a bit.  For
770 * example the code to suspend the current process when concurrent access
771 * happens is never executed because xip_udelay() will always return with the
772 * same chip state as it was entered with.  This is why there is no care for
773 * the presence of add_wait_queue() or schedule() calls from within a couple
774 * xip_disable()'d  areas of code, like in do_erase_oneblock for example.
775 * The queueing and scheduling are always happening within xip_udelay().
776 *
777 * Similarly, get_chip() and put_chip() just happen to always be executed
778 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
779 * is in array mode, therefore never executing many cases therein and not
780 * causing any problem with XIP.
781 */
782
783#else
784
785#define xip_disable(map, chip, adr)
786#define xip_enable(map, chip, adr)
787#define XIP_INVAL_CACHED_RANGE(x...)
788
789#define UDELAY(map, chip, adr, usec)  \
790do {  \
791	spin_unlock(chip->mutex);  \
792	cfi_udelay(usec);  \
793	spin_lock(chip->mutex);  \
794} while (0)
795
796#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
797do {  \
798	spin_unlock(chip->mutex);  \
799	INVALIDATE_CACHED_RANGE(map, adr, len);  \
800	cfi_udelay(usec);  \
801	spin_lock(chip->mutex);  \
802} while (0)
803
804#endif
805
806static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
807{
808	unsigned long cmd_addr;
809	struct cfi_private *cfi = map->fldrv_priv;
810	int ret;
811
812	adr += chip->start;
813
814	/* Ensure cmd read/writes are aligned. */
815	cmd_addr = adr & ~(map_bankwidth(map)-1);
816
817	spin_lock(chip->mutex);
818	ret = get_chip(map, chip, cmd_addr, FL_READY);
819	if (ret) {
820		spin_unlock(chip->mutex);
821		return ret;
822	}
823
824	if (chip->state != FL_POINT && chip->state != FL_READY) {
825		map_write(map, CMD(0xf0), cmd_addr);
826		chip->state = FL_READY;
827	}
828
829	map_copy_from(map, buf, adr, len);
830
831	put_chip(map, chip, cmd_addr);
832
833	spin_unlock(chip->mutex);
834	return 0;
835}
836
837
838static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
839{
840	struct map_info *map = mtd->priv;
841	struct cfi_private *cfi = map->fldrv_priv;
842	unsigned long ofs;
843	int chipnum;
844	int ret = 0;
845
846	/* ofs: offset within the first chip that the first read should start */
847
848	chipnum = (from >> cfi->chipshift);
849	ofs = from - (chipnum <<  cfi->chipshift);
850
851
852	*retlen = 0;
853
854	while (len) {
855		unsigned long thislen;
856
857		if (chipnum >= cfi->numchips)
858			break;
859
860		if ((len + ofs -1) >> cfi->chipshift)
861			thislen = (1<<cfi->chipshift) - ofs;
862		else
863			thislen = len;
864
865		ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
866		if (ret)
867			break;
868
869		*retlen += thislen;
870		len -= thislen;
871		buf += thislen;
872
873		ofs = 0;
874		chipnum++;
875	}
876	return ret;
877}
878
879
880static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
881{
882	DECLARE_WAITQUEUE(wait, current);
883	unsigned long timeo = jiffies + HZ;
884	struct cfi_private *cfi = map->fldrv_priv;
885
886 retry:
887	spin_lock(chip->mutex);
888
889	if (chip->state != FL_READY){
890		set_current_state(TASK_UNINTERRUPTIBLE);
891		add_wait_queue(&chip->wq, &wait);
892
893		spin_unlock(chip->mutex);
894
895		schedule();
896		remove_wait_queue(&chip->wq, &wait);
897		timeo = jiffies + HZ;
898
899		goto retry;
900	}
901
902	adr += chip->start;
903
904	chip->state = FL_READY;
905
906	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
907	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
908	cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
909
910	map_copy_from(map, buf, adr, len);
911
912	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
913	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
914	cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
915	cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
916
917	wake_up(&chip->wq);
918	spin_unlock(chip->mutex);
919
920	return 0;
921}
922
923static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
924{
925	struct map_info *map = mtd->priv;
926	struct cfi_private *cfi = map->fldrv_priv;
927	unsigned long ofs;
928	int chipnum;
929	int ret = 0;
930
931
932	/* ofs: offset within the first chip that the first read should start */
933
934	/* 8 secsi bytes per chip */
935	chipnum=from>>3;
936	ofs=from & 7;
937
938
939	*retlen = 0;
940
941	while (len) {
942		unsigned long thislen;
943
944		if (chipnum >= cfi->numchips)
945			break;
946
947		if ((len + ofs -1) >> 3)
948			thislen = (1<<3) - ofs;
949		else
950			thislen = len;
951
952		ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
953		if (ret)
954			break;
955
956		*retlen += thislen;
957		len -= thislen;
958		buf += thislen;
959
960		ofs = 0;
961		chipnum++;
962	}
963	return ret;
964}
965
966
967static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
968{
969	struct cfi_private *cfi = map->fldrv_priv;
970	unsigned long timeo = jiffies + HZ;
971	/*
972	 * We use a 1ms + 1 jiffies generic timeout for writes (most devices
973	 * have a max write time of a few hundreds usec). However, we should
974	 * use the maximum timeout value given by the chip at probe time
975	 * instead.  Unfortunately, struct flchip does have a field for
976	 * maximum timeout, only for typical which can be far too short
977	 * depending of the conditions.	 The ' + 1' is to avoid having a
978	 * timeout of 0 jiffies if HZ is smaller than 1000.
979	 */
980	unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
981	int ret = 0;
982	map_word oldd;
983	int retry_cnt = 0;
984
985	adr += chip->start;
986
987	spin_lock(chip->mutex);
988	ret = get_chip(map, chip, adr, FL_WRITING);
989	if (ret) {
990		spin_unlock(chip->mutex);
991		return ret;
992	}
993
994	DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
995	       __func__, adr, datum.x[0] );
996
997	/*
998	 * Check for a NOP for the case when the datum to write is already
999	 * present - it saves time and works around buggy chips that corrupt
1000	 * data at other locations when 0xff is written to a location that
1001	 * already contains 0xff.
1002	 */
1003	oldd = map_read(map, adr);
1004	if (map_word_equal(map, oldd, datum)) {
1005		DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n",
1006		       __func__);
1007		goto op_done;
1008	}
1009
1010	XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1011	ENABLE_VPP(map);
1012	xip_disable(map, chip, adr);
1013 retry:
1014	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1015	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1016	cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1017	map_write(map, datum, adr);
1018	chip->state = FL_WRITING;
1019
1020	INVALIDATE_CACHE_UDELAY(map, chip,
1021				adr, map_bankwidth(map),
1022				chip->word_write_time);
1023
1024	/* See comment above for timeout value. */
1025	timeo = jiffies + uWriteTimeout;
1026	for (;;) {
1027		if (chip->state != FL_WRITING) {
1028			/* Someone's suspended the write. Sleep */
1029			DECLARE_WAITQUEUE(wait, current);
1030
1031			set_current_state(TASK_UNINTERRUPTIBLE);
1032			add_wait_queue(&chip->wq, &wait);
1033			spin_unlock(chip->mutex);
1034			schedule();
1035			remove_wait_queue(&chip->wq, &wait);
1036			timeo = jiffies + (HZ / 2);
1037			spin_lock(chip->mutex);
1038			continue;
1039		}
1040
1041		if (time_after(jiffies, timeo) && !chip_ready(map, adr)){
1042			xip_enable(map, chip, adr);
1043			printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1044			xip_disable(map, chip, adr);
1045			break;
1046		}
1047
1048		if (chip_ready(map, adr))
1049			break;
1050
1051		/* Latency issues. Drop the lock, wait a while and retry */
1052		UDELAY(map, chip, adr, 1);
1053	}
1054	/* Did we succeed? */
1055	if (!chip_good(map, adr, datum)) {
1056		/* reset on all failures. */
1057		map_write( map, CMD(0xF0), chip->start );
1058
1059		if (++retry_cnt <= MAX_WORD_RETRIES)
1060			goto retry;
1061
1062		ret = -EIO;
1063	}
1064	xip_enable(map, chip, adr);
1065 op_done:
1066	chip->state = FL_READY;
1067	put_chip(map, chip, adr);
1068	spin_unlock(chip->mutex);
1069
1070	return ret;
1071}
1072
1073
1074static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1075				  size_t *retlen, const u_char *buf)
1076{
1077	struct map_info *map = mtd->priv;
1078	struct cfi_private *cfi = map->fldrv_priv;
1079	int ret = 0;
1080	int chipnum;
1081	unsigned long ofs, chipstart;
1082	DECLARE_WAITQUEUE(wait, current);
1083
1084	*retlen = 0;
1085	if (!len)
1086		return 0;
1087
1088	chipnum = to >> cfi->chipshift;
1089	ofs = to  - (chipnum << cfi->chipshift);
1090	chipstart = cfi->chips[chipnum].start;
1091
1092	/* If it's not bus-aligned, do the first byte write */
1093	if (ofs & (map_bankwidth(map)-1)) {
1094		unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1095		int i = ofs - bus_ofs;
1096		int n = 0;
1097		map_word tmp_buf;
1098
1099 retry:
1100		spin_lock(cfi->chips[chipnum].mutex);
1101
1102		if (cfi->chips[chipnum].state != FL_READY) {
1103			set_current_state(TASK_UNINTERRUPTIBLE);
1104			add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1105
1106			spin_unlock(cfi->chips[chipnum].mutex);
1107
1108			schedule();
1109			remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1110			goto retry;
1111		}
1112
1113		/* Load 'tmp_buf' with old contents of flash */
1114		tmp_buf = map_read(map, bus_ofs+chipstart);
1115
1116		spin_unlock(cfi->chips[chipnum].mutex);
1117
1118		/* Number of bytes to copy from buffer */
1119		n = min_t(int, len, map_bankwidth(map)-i);
1120
1121		tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1122
1123		ret = do_write_oneword(map, &cfi->chips[chipnum],
1124				       bus_ofs, tmp_buf);
1125		if (ret)
1126			return ret;
1127
1128		ofs += n;
1129		buf += n;
1130		(*retlen) += n;
1131		len -= n;
1132
1133		if (ofs >> cfi->chipshift) {
1134			chipnum ++;
1135			ofs = 0;
1136			if (chipnum == cfi->numchips)
1137				return 0;
1138		}
1139	}
1140
1141	/* We are now aligned, write as much as possible */
1142	while(len >= map_bankwidth(map)) {
1143		map_word datum;
1144
1145		datum = map_word_load(map, buf);
1146
1147		ret = do_write_oneword(map, &cfi->chips[chipnum],
1148				       ofs, datum);
1149		if (ret)
1150			return ret;
1151
1152		ofs += map_bankwidth(map);
1153		buf += map_bankwidth(map);
1154		(*retlen) += map_bankwidth(map);
1155		len -= map_bankwidth(map);
1156
1157		if (ofs >> cfi->chipshift) {
1158			chipnum ++;
1159			ofs = 0;
1160			if (chipnum == cfi->numchips)
1161				return 0;
1162			chipstart = cfi->chips[chipnum].start;
1163		}
1164	}
1165
1166	/* Write the trailing bytes if any */
1167	if (len & (map_bankwidth(map)-1)) {
1168		map_word tmp_buf;
1169
1170 retry1:
1171		spin_lock(cfi->chips[chipnum].mutex);
1172
1173		if (cfi->chips[chipnum].state != FL_READY) {
1174			set_current_state(TASK_UNINTERRUPTIBLE);
1175			add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1176
1177			spin_unlock(cfi->chips[chipnum].mutex);
1178
1179			schedule();
1180			remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1181			goto retry1;
1182		}
1183
1184		tmp_buf = map_read(map, ofs + chipstart);
1185
1186		spin_unlock(cfi->chips[chipnum].mutex);
1187
1188		tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1189
1190		ret = do_write_oneword(map, &cfi->chips[chipnum],
1191				ofs, tmp_buf);
1192		if (ret)
1193			return ret;
1194
1195		(*retlen) += len;
1196	}
1197
1198	return 0;
1199}
1200
1201
1202static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1203				    unsigned long adr, const u_char *buf,
1204				    int len)
1205{
1206	struct cfi_private *cfi = map->fldrv_priv;
1207	unsigned long timeo = jiffies + HZ;
1208	/* see comments in do_write_oneword() regarding uWriteTimeo. */
1209	unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1210	int ret = -EIO;
1211	unsigned long cmd_adr;
1212	int z, words;
1213	map_word datum;
1214
1215	adr += chip->start;
1216	cmd_adr = adr;
1217
1218	spin_lock(chip->mutex);
1219	ret = get_chip(map, chip, adr, FL_WRITING);
1220	if (ret) {
1221		spin_unlock(chip->mutex);
1222		return ret;
1223	}
1224
1225	datum = map_word_load(map, buf);
1226
1227	DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1228	       __func__, adr, datum.x[0] );
1229
1230	XIP_INVAL_CACHED_RANGE(map, adr, len);
1231	ENABLE_VPP(map);
1232	xip_disable(map, chip, cmd_adr);
1233
1234	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1235	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1236	//cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1237
1238	/* Write Buffer Load */
1239	map_write(map, CMD(0x25), cmd_adr);
1240
1241	chip->state = FL_WRITING_TO_BUFFER;
1242
1243	/* Write length of data to come */
1244	words = len / map_bankwidth(map);
1245	map_write(map, CMD(words - 1), cmd_adr);
1246	/* Write data */
1247	z = 0;
1248	while(z < words * map_bankwidth(map)) {
1249		datum = map_word_load(map, buf);
1250		map_write(map, datum, adr + z);
1251
1252		z += map_bankwidth(map);
1253		buf += map_bankwidth(map);
1254	}
1255	z -= map_bankwidth(map);
1256
1257	adr += z;
1258
1259	/* Write Buffer Program Confirm: GO GO GO */
1260	map_write(map, CMD(0x29), cmd_adr);
1261	chip->state = FL_WRITING;
1262
1263	INVALIDATE_CACHE_UDELAY(map, chip,
1264				adr, map_bankwidth(map),
1265				chip->word_write_time);
1266
1267	timeo = jiffies + uWriteTimeout;
1268
1269	for (;;) {
1270		if (chip->state != FL_WRITING) {
1271			/* Someone's suspended the write. Sleep */
1272			DECLARE_WAITQUEUE(wait, current);
1273
1274			set_current_state(TASK_UNINTERRUPTIBLE);
1275			add_wait_queue(&chip->wq, &wait);
1276			spin_unlock(chip->mutex);
1277			schedule();
1278			remove_wait_queue(&chip->wq, &wait);
1279			timeo = jiffies + (HZ / 2);
1280			spin_lock(chip->mutex);
1281			continue;
1282		}
1283
1284		if (time_after(jiffies, timeo) && !chip_ready(map, adr))
1285			break;
1286
1287		if (chip_ready(map, adr)) {
1288			xip_enable(map, chip, adr);
1289			goto op_done;
1290		}
1291
1292		/* Latency issues. Drop the lock, wait a while and retry */
1293		UDELAY(map, chip, adr, 1);
1294	}
1295
1296	/* reset on all failures. */
1297	map_write( map, CMD(0xF0), chip->start );
1298	xip_enable(map, chip, adr);
1299
1300	printk(KERN_WARNING "MTD %s(): software timeout\n",
1301	       __func__ );
1302
1303	ret = -EIO;
1304 op_done:
1305	chip->state = FL_READY;
1306	put_chip(map, chip, adr);
1307	spin_unlock(chip->mutex);
1308
1309	return ret;
1310}
1311
1312
1313static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1314				    size_t *retlen, const u_char *buf)
1315{
1316	struct map_info *map = mtd->priv;
1317	struct cfi_private *cfi = map->fldrv_priv;
1318	int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1319	int ret = 0;
1320	int chipnum;
1321	unsigned long ofs;
1322
1323	*retlen = 0;
1324	if (!len)
1325		return 0;
1326
1327	chipnum = to >> cfi->chipshift;
1328	ofs = to  - (chipnum << cfi->chipshift);
1329
1330	/* If it's not bus-aligned, do the first word write */
1331	if (ofs & (map_bankwidth(map)-1)) {
1332		size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1333		if (local_len > len)
1334			local_len = len;
1335		ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1336					     local_len, retlen, buf);
1337		if (ret)
1338			return ret;
1339		ofs += local_len;
1340		buf += local_len;
1341		len -= local_len;
1342
1343		if (ofs >> cfi->chipshift) {
1344			chipnum ++;
1345			ofs = 0;
1346			if (chipnum == cfi->numchips)
1347				return 0;
1348		}
1349	}
1350
1351	/* Write buffer is worth it only if more than one word to write... */
1352	while (len >= map_bankwidth(map) * 2) {
1353		/* We must not cross write block boundaries */
1354		int size = wbufsize - (ofs & (wbufsize-1));
1355
1356		if (size > len)
1357			size = len;
1358		if (size % map_bankwidth(map))
1359			size -= size % map_bankwidth(map);
1360
1361		ret = do_write_buffer(map, &cfi->chips[chipnum],
1362				      ofs, buf, size);
1363		if (ret)
1364			return ret;
1365
1366		ofs += size;
1367		buf += size;
1368		(*retlen) += size;
1369		len -= size;
1370
1371		if (ofs >> cfi->chipshift) {
1372			chipnum ++;
1373			ofs = 0;
1374			if (chipnum == cfi->numchips)
1375				return 0;
1376		}
1377	}
1378
1379	if (len) {
1380		size_t retlen_dregs = 0;
1381
1382		ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1383					     len, &retlen_dregs, buf);
1384
1385		*retlen += retlen_dregs;
1386		return ret;
1387	}
1388
1389	return 0;
1390}
1391
1392
1393/*
1394 * Handle devices with one erase region, that only implement
1395 * the chip erase command.
1396 */
1397static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
1398{
1399	struct cfi_private *cfi = map->fldrv_priv;
1400	unsigned long timeo = jiffies + HZ;
1401	unsigned long int adr;
1402	DECLARE_WAITQUEUE(wait, current);
1403	int ret = 0;
1404
1405	adr = cfi->addr_unlock1;
1406
1407	spin_lock(chip->mutex);
1408	ret = get_chip(map, chip, adr, FL_WRITING);
1409	if (ret) {
1410		spin_unlock(chip->mutex);
1411		return ret;
1412	}
1413
1414	DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1415	       __func__, chip->start );
1416
1417	XIP_INVAL_CACHED_RANGE(map, adr, map->size);
1418	ENABLE_VPP(map);
1419	xip_disable(map, chip, adr);
1420
1421	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1422	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1423	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1424	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1425	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1426	cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1427
1428	chip->state = FL_ERASING;
1429	chip->erase_suspended = 0;
1430	chip->in_progress_block_addr = adr;
1431
1432	INVALIDATE_CACHE_UDELAY(map, chip,
1433				adr, map->size,
1434				chip->erase_time*500);
1435
1436	timeo = jiffies + (HZ*20);
1437
1438	for (;;) {
1439		if (chip->state != FL_ERASING) {
1440			/* Someone's suspended the erase. Sleep */
1441			set_current_state(TASK_UNINTERRUPTIBLE);
1442			add_wait_queue(&chip->wq, &wait);
1443			spin_unlock(chip->mutex);
1444			schedule();
1445			remove_wait_queue(&chip->wq, &wait);
1446			spin_lock(chip->mutex);
1447			continue;
1448		}
1449		if (chip->erase_suspended) {
1450			/* This erase was suspended and resumed.
1451			   Adjust the timeout */
1452			timeo = jiffies + (HZ*20);
1453			chip->erase_suspended = 0;
1454		}
1455
1456		if (chip_ready(map, adr))
1457			break;
1458
1459		if (time_after(jiffies, timeo)) {
1460			printk(KERN_WARNING "MTD %s(): software timeout\n",
1461				__func__ );
1462			break;
1463		}
1464
1465		/* Latency issues. Drop the lock, wait a while and retry */
1466		UDELAY(map, chip, adr, 1000000/HZ);
1467	}
1468	/* Did we succeed? */
1469	if (!chip_good(map, adr, map_word_ff(map))) {
1470		/* reset on all failures. */
1471		map_write( map, CMD(0xF0), chip->start );
1472
1473		ret = -EIO;
1474	}
1475
1476	chip->state = FL_READY;
1477	xip_enable(map, chip, adr);
1478	put_chip(map, chip, adr);
1479	spin_unlock(chip->mutex);
1480
1481	return ret;
1482}
1483
1484
1485static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
1486{
1487	struct cfi_private *cfi = map->fldrv_priv;
1488	unsigned long timeo = jiffies + HZ;
1489	DECLARE_WAITQUEUE(wait, current);
1490	int ret = 0;
1491
1492	adr += chip->start;
1493
1494	spin_lock(chip->mutex);
1495	ret = get_chip(map, chip, adr, FL_ERASING);
1496	if (ret) {
1497		spin_unlock(chip->mutex);
1498		return ret;
1499	}
1500
1501	DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1502	       __func__, adr );
1503
1504	XIP_INVAL_CACHED_RANGE(map, adr, len);
1505	ENABLE_VPP(map);
1506	xip_disable(map, chip, adr);
1507
1508	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1509	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1510	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1511	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1512	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1513	map_write(map, CMD(0x30), adr);
1514
1515	chip->state = FL_ERASING;
1516	chip->erase_suspended = 0;
1517	chip->in_progress_block_addr = adr;
1518
1519	INVALIDATE_CACHE_UDELAY(map, chip,
1520				adr, len,
1521				chip->erase_time*500);
1522
1523	timeo = jiffies + (HZ*20);
1524
1525	for (;;) {
1526		if (chip->state != FL_ERASING) {
1527			/* Someone's suspended the erase. Sleep */
1528			set_current_state(TASK_UNINTERRUPTIBLE);
1529			add_wait_queue(&chip->wq, &wait);
1530			spin_unlock(chip->mutex);
1531			schedule();
1532			remove_wait_queue(&chip->wq, &wait);
1533			spin_lock(chip->mutex);
1534			continue;
1535		}
1536		if (chip->erase_suspended) {
1537			/* This erase was suspended and resumed.
1538			   Adjust the timeout */
1539			timeo = jiffies + (HZ*20);
1540			chip->erase_suspended = 0;
1541		}
1542
1543		if (chip_ready(map, adr)) {
1544			xip_enable(map, chip, adr);
1545			break;
1546		}
1547
1548		if (time_after(jiffies, timeo)) {
1549			xip_enable(map, chip, adr);
1550			printk(KERN_WARNING "MTD %s(): software timeout\n",
1551				__func__ );
1552			break;
1553		}
1554
1555		/* Latency issues. Drop the lock, wait a while and retry */
1556		UDELAY(map, chip, adr, 1000000/HZ);
1557	}
1558	/* Did we succeed? */
1559	if (!chip_good(map, adr, map_word_ff(map))) {
1560		/* reset on all failures. */
1561		map_write( map, CMD(0xF0), chip->start );
1562
1563		ret = -EIO;
1564	}
1565
1566	chip->state = FL_READY;
1567	put_chip(map, chip, adr);
1568	spin_unlock(chip->mutex);
1569	return ret;
1570}
1571
1572
1573int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1574{
1575	unsigned long ofs, len;
1576	int ret;
1577
1578	ofs = instr->addr;
1579	len = instr->len;
1580
1581	ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1582	if (ret)
1583		return ret;
1584
1585	instr->state = MTD_ERASE_DONE;
1586	mtd_erase_callback(instr);
1587
1588	return 0;
1589}
1590
1591
1592static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
1593{
1594	struct map_info *map = mtd->priv;
1595	struct cfi_private *cfi = map->fldrv_priv;
1596	int ret = 0;
1597
1598	if (instr->addr != 0)
1599		return -EINVAL;
1600
1601	if (instr->len != mtd->size)
1602		return -EINVAL;
1603
1604	ret = do_erase_chip(map, &cfi->chips[0]);
1605	if (ret)
1606		return ret;
1607
1608	instr->state = MTD_ERASE_DONE;
1609	mtd_erase_callback(instr);
1610
1611	return 0;
1612}
1613
1614static int do_atmel_lock(struct map_info *map, struct flchip *chip,
1615			 unsigned long adr, int len, void *thunk)
1616{
1617	struct cfi_private *cfi = map->fldrv_priv;
1618	int ret;
1619
1620	spin_lock(chip->mutex);
1621	ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
1622	if (ret)
1623		goto out_unlock;
1624	chip->state = FL_LOCKING;
1625
1626	DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
1627	      __func__, adr, len);
1628
1629	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1630			 cfi->device_type, NULL);
1631	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1632			 cfi->device_type, NULL);
1633	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
1634			 cfi->device_type, NULL);
1635	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1636			 cfi->device_type, NULL);
1637	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1638			 cfi->device_type, NULL);
1639	map_write(map, CMD(0x40), chip->start + adr);
1640
1641	chip->state = FL_READY;
1642	put_chip(map, chip, adr + chip->start);
1643	ret = 0;
1644
1645out_unlock:
1646	spin_unlock(chip->mutex);
1647	return ret;
1648}
1649
1650static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
1651			   unsigned long adr, int len, void *thunk)
1652{
1653	struct cfi_private *cfi = map->fldrv_priv;
1654	int ret;
1655
1656	spin_lock(chip->mutex);
1657	ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
1658	if (ret)
1659		goto out_unlock;
1660	chip->state = FL_UNLOCKING;
1661
1662	DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
1663	      __func__, adr, len);
1664
1665	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1666			 cfi->device_type, NULL);
1667	map_write(map, CMD(0x70), adr);
1668
1669	chip->state = FL_READY;
1670	put_chip(map, chip, adr + chip->start);
1671	ret = 0;
1672
1673out_unlock:
1674	spin_unlock(chip->mutex);
1675	return ret;
1676}
1677
1678static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1679{
1680	return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
1681}
1682
1683static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1684{
1685	return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
1686}
1687
1688
1689static void cfi_amdstd_sync (struct mtd_info *mtd)
1690{
1691	struct map_info *map = mtd->priv;
1692	struct cfi_private *cfi = map->fldrv_priv;
1693	int i;
1694	struct flchip *chip;
1695	int ret = 0;
1696	DECLARE_WAITQUEUE(wait, current);
1697
1698	for (i=0; !ret && i<cfi->numchips; i++) {
1699		chip = &cfi->chips[i];
1700
1701	retry:
1702		spin_lock(chip->mutex);
1703
1704		switch(chip->state) {
1705		case FL_READY:
1706		case FL_STATUS:
1707		case FL_CFI_QUERY:
1708		case FL_JEDEC_QUERY:
1709			chip->oldstate = chip->state;
1710			chip->state = FL_SYNCING;
1711			/* No need to wake_up() on this state change -
1712			 * as the whole point is that nobody can do anything
1713			 * with the chip now anyway.
1714			 */
1715		case FL_SYNCING:
1716			spin_unlock(chip->mutex);
1717			break;
1718
1719		default:
1720			/* Not an idle state */
1721			add_wait_queue(&chip->wq, &wait);
1722
1723			spin_unlock(chip->mutex);
1724
1725			schedule();
1726
1727			remove_wait_queue(&chip->wq, &wait);
1728
1729			goto retry;
1730		}
1731	}
1732
1733	/* Unlock the chips again */
1734
1735	for (i--; i >=0; i--) {
1736		chip = &cfi->chips[i];
1737
1738		spin_lock(chip->mutex);
1739
1740		if (chip->state == FL_SYNCING) {
1741			chip->state = chip->oldstate;
1742			wake_up(&chip->wq);
1743		}
1744		spin_unlock(chip->mutex);
1745	}
1746}
1747
1748
1749static int cfi_amdstd_suspend(struct mtd_info *mtd)
1750{
1751	struct map_info *map = mtd->priv;
1752	struct cfi_private *cfi = map->fldrv_priv;
1753	int i;
1754	struct flchip *chip;
1755	int ret = 0;
1756
1757	for (i=0; !ret && i<cfi->numchips; i++) {
1758		chip = &cfi->chips[i];
1759
1760		spin_lock(chip->mutex);
1761
1762		switch(chip->state) {
1763		case FL_READY:
1764		case FL_STATUS:
1765		case FL_CFI_QUERY:
1766		case FL_JEDEC_QUERY:
1767			chip->oldstate = chip->state;
1768			chip->state = FL_PM_SUSPENDED;
1769			/* No need to wake_up() on this state change -
1770			 * as the whole point is that nobody can do anything
1771			 * with the chip now anyway.
1772			 */
1773		case FL_PM_SUSPENDED:
1774			break;
1775
1776		default:
1777			ret = -EAGAIN;
1778			break;
1779		}
1780		spin_unlock(chip->mutex);
1781	}
1782
1783	/* Unlock the chips again */
1784
1785	if (ret) {
1786		for (i--; i >=0; i--) {
1787			chip = &cfi->chips[i];
1788
1789			spin_lock(chip->mutex);
1790
1791			if (chip->state == FL_PM_SUSPENDED) {
1792				chip->state = chip->oldstate;
1793				wake_up(&chip->wq);
1794			}
1795			spin_unlock(chip->mutex);
1796		}
1797	}
1798
1799	return ret;
1800}
1801
1802
1803static void cfi_amdstd_resume(struct mtd_info *mtd)
1804{
1805	struct map_info *map = mtd->priv;
1806	struct cfi_private *cfi = map->fldrv_priv;
1807	int i;
1808	struct flchip *chip;
1809
1810	for (i=0; i<cfi->numchips; i++) {
1811
1812		chip = &cfi->chips[i];
1813
1814		spin_lock(chip->mutex);
1815
1816		if (chip->state == FL_PM_SUSPENDED) {
1817			chip->state = FL_READY;
1818			map_write(map, CMD(0xF0), chip->start);
1819			wake_up(&chip->wq);
1820		}
1821		else
1822			printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
1823
1824		spin_unlock(chip->mutex);
1825	}
1826}
1827
1828static void cfi_amdstd_destroy(struct mtd_info *mtd)
1829{
1830	struct map_info *map = mtd->priv;
1831	struct cfi_private *cfi = map->fldrv_priv;
1832
1833	kfree(cfi->cmdset_priv);
1834	kfree(cfi->cfiq);
1835	kfree(cfi);
1836	kfree(mtd->eraseregions);
1837}
1838
1839MODULE_LICENSE("GPL");
1840MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
1841MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
1842