• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/mtd/chips/
1/*
2 * Common Flash Interface support:
3 *   ST Advanced Architecture Command Set (ID 0x0020)
4 *
5 * (C) 2000 Red Hat. GPL'd
6 *
7 * 10/10/2000	Nicolas Pitre <nico@fluxnic.net>
8 * 	- completely revamped method functions so they are aware and
9 * 	  independent of the flash geometry (buswidth, interleave, etc.)
10 * 	- scalability vs code size is completely set at compile-time
11 * 	  (see include/linux/mtd/cfi.h for selection)
12 *	- optimized write buffer method
13 * 06/21/2002	Joern Engel <joern@wh.fh-wedel.de> and others
14 *	- modified Intel Command Set 0x0001 to support ST Advanced Architecture
15 *	  (command set 0x0020)
16 *	- added a writev function
17 * 07/13/2005	Joern Engel <joern@wh.fh-wedel.de>
18 * 	- Plugged memory leak in cfi_staa_writev().
19 */
20
21#include <linux/module.h>
22#include <linux/types.h>
23#include <linux/kernel.h>
24#include <linux/sched.h>
25#include <linux/init.h>
26#include <asm/io.h>
27#include <asm/byteorder.h>
28
29#include <linux/errno.h>
30#include <linux/slab.h>
31#include <linux/delay.h>
32#include <linux/interrupt.h>
33#include <linux/mtd/map.h>
34#include <linux/mtd/cfi.h>
35#include <linux/mtd/mtd.h>
36
37
38static int cfi_staa_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *);
39static int cfi_staa_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
40static int cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
41		unsigned long count, loff_t to, size_t *retlen);
42static int cfi_staa_erase_varsize(struct mtd_info *, struct erase_info *);
43static void cfi_staa_sync (struct mtd_info *);
44static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
45static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
46static int cfi_staa_suspend (struct mtd_info *);
47static void cfi_staa_resume (struct mtd_info *);
48
49static void cfi_staa_destroy(struct mtd_info *);
50
51struct mtd_info *cfi_cmdset_0020(struct map_info *, int);
52
53static struct mtd_info *cfi_staa_setup (struct map_info *);
54
55static struct mtd_chip_driver cfi_staa_chipdrv = {
56	.probe		= NULL, /* Not usable directly */
57	.destroy	= cfi_staa_destroy,
58	.name		= "cfi_cmdset_0020",
59	.module		= THIS_MODULE
60};
61
62/* #define DEBUG_LOCK_BITS */
63//#define DEBUG_CFI_FEATURES
64
65#ifdef DEBUG_CFI_FEATURES
66static void cfi_tell_features(struct cfi_pri_intelext *extp)
67{
68        int i;
69        printk("  Feature/Command Support: %4.4X\n", extp->FeatureSupport);
70	printk("     - Chip Erase:         %s\n", extp->FeatureSupport&1?"supported":"unsupported");
71	printk("     - Suspend Erase:      %s\n", extp->FeatureSupport&2?"supported":"unsupported");
72	printk("     - Suspend Program:    %s\n", extp->FeatureSupport&4?"supported":"unsupported");
73	printk("     - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
74	printk("     - Queued Erase:       %s\n", extp->FeatureSupport&16?"supported":"unsupported");
75	printk("     - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
76	printk("     - Protection Bits:    %s\n", extp->FeatureSupport&64?"supported":"unsupported");
77	printk("     - Page-mode read:     %s\n", extp->FeatureSupport&128?"supported":"unsupported");
78	printk("     - Synchronous read:   %s\n", extp->FeatureSupport&256?"supported":"unsupported");
79	for (i=9; i<32; i++) {
80		if (extp->FeatureSupport & (1<<i))
81			printk("     - Unknown Bit %X:      supported\n", i);
82	}
83
84	printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
85	printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
86	for (i=1; i<8; i++) {
87		if (extp->SuspendCmdSupport & (1<<i))
88			printk("     - Unknown Bit %X:               supported\n", i);
89	}
90
91	printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
92	printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
93	printk("     - Valid Bit Active:     %s\n", extp->BlkStatusRegMask&2?"yes":"no");
94	for (i=2; i<16; i++) {
95		if (extp->BlkStatusRegMask & (1<<i))
96			printk("     - Unknown Bit %X Active: yes\n",i);
97	}
98
99	printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
100	       extp->VccOptimal >> 8, extp->VccOptimal & 0xf);
101	if (extp->VppOptimal)
102		printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
103		       extp->VppOptimal >> 8, extp->VppOptimal & 0xf);
104}
105#endif
106
107/* This routine is made available to other mtd code via
108 * inter_module_register.  It must only be accessed through
109 * inter_module_get which will bump the use count of this module.  The
110 * addresses passed back in cfi are valid as long as the use count of
111 * this module is non-zero, i.e. between inter_module_get and
112 * inter_module_put.  Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
113 */
114struct mtd_info *cfi_cmdset_0020(struct map_info *map, int primary)
115{
116	struct cfi_private *cfi = map->fldrv_priv;
117	int i;
118
119	if (cfi->cfi_mode) {
120		/*
121		 * It's a real CFI chip, not one for which the probe
122		 * routine faked a CFI structure. So we read the feature
123		 * table from it.
124		 */
125		__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
126		struct cfi_pri_intelext *extp;
127
128		extp = (struct cfi_pri_intelext*)cfi_read_pri(map, adr, sizeof(*extp), "ST Microelectronics");
129		if (!extp)
130			return NULL;
131
132		if (extp->MajorVersion != '1' ||
133		    (extp->MinorVersion < '0' || extp->MinorVersion > '3')) {
134			printk(KERN_ERR "  Unknown ST Microelectronics"
135			       " Extended Query version %c.%c.\n",
136			       extp->MajorVersion, extp->MinorVersion);
137			kfree(extp);
138			return NULL;
139		}
140
141		/* Do some byteswapping if necessary */
142		extp->FeatureSupport = cfi32_to_cpu(extp->FeatureSupport);
143		extp->BlkStatusRegMask = cfi32_to_cpu(extp->BlkStatusRegMask);
144
145#ifdef DEBUG_CFI_FEATURES
146		/* Tell the user about it in lots of lovely detail */
147		cfi_tell_features(extp);
148#endif
149
150		/* Install our own private info structure */
151		cfi->cmdset_priv = extp;
152	}
153
154	for (i=0; i< cfi->numchips; i++) {
155		cfi->chips[i].word_write_time = 128;
156		cfi->chips[i].buffer_write_time = 128;
157		cfi->chips[i].erase_time = 1024;
158		cfi->chips[i].ref_point_counter = 0;
159		init_waitqueue_head(&(cfi->chips[i].wq));
160	}
161
162	return cfi_staa_setup(map);
163}
164EXPORT_SYMBOL_GPL(cfi_cmdset_0020);
165
166static struct mtd_info *cfi_staa_setup(struct map_info *map)
167{
168	struct cfi_private *cfi = map->fldrv_priv;
169	struct mtd_info *mtd;
170	unsigned long offset = 0;
171	int i,j;
172	unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
173
174	mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
175	//printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
176
177	if (!mtd) {
178		printk(KERN_ERR "Failed to allocate memory for MTD device\n");
179		kfree(cfi->cmdset_priv);
180		return NULL;
181	}
182
183	mtd->priv = map;
184	mtd->type = MTD_NORFLASH;
185	mtd->size = devsize * cfi->numchips;
186
187	mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
188	mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
189			* mtd->numeraseregions, GFP_KERNEL);
190	if (!mtd->eraseregions) {
191		printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
192		kfree(cfi->cmdset_priv);
193		kfree(mtd);
194		return NULL;
195	}
196
197	for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
198		unsigned long ernum, ersize;
199		ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
200		ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
201
202		if (mtd->erasesize < ersize) {
203			mtd->erasesize = ersize;
204		}
205		for (j=0; j<cfi->numchips; j++) {
206			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
207			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
208			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
209		}
210		offset += (ersize * ernum);
211		}
212
213		if (offset != devsize) {
214			/* Argh */
215			printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
216			kfree(mtd->eraseregions);
217			kfree(cfi->cmdset_priv);
218			kfree(mtd);
219			return NULL;
220		}
221
222		for (i=0; i<mtd->numeraseregions;i++){
223			printk(KERN_DEBUG "%d: offset=0x%llx,size=0x%x,blocks=%d\n",
224			       i, (unsigned long long)mtd->eraseregions[i].offset,
225			       mtd->eraseregions[i].erasesize,
226			       mtd->eraseregions[i].numblocks);
227		}
228
229	/* Also select the correct geometry setup too */
230	mtd->erase = cfi_staa_erase_varsize;
231	mtd->read = cfi_staa_read;
232        mtd->write = cfi_staa_write_buffers;
233	mtd->writev = cfi_staa_writev;
234	mtd->sync = cfi_staa_sync;
235	mtd->lock = cfi_staa_lock;
236	mtd->unlock = cfi_staa_unlock;
237	mtd->suspend = cfi_staa_suspend;
238	mtd->resume = cfi_staa_resume;
239	mtd->flags = MTD_CAP_NORFLASH & ~MTD_BIT_WRITEABLE;
240	mtd->writesize = 8;
241	map->fldrv = &cfi_staa_chipdrv;
242	__module_get(THIS_MODULE);
243	mtd->name = map->name;
244	return mtd;
245}
246
247
248static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
249{
250	map_word status, status_OK;
251	unsigned long timeo;
252	DECLARE_WAITQUEUE(wait, current);
253	int suspended = 0;
254	unsigned long cmd_addr;
255	struct cfi_private *cfi = map->fldrv_priv;
256
257	adr += chip->start;
258
259	/* Ensure cmd read/writes are aligned. */
260	cmd_addr = adr & ~(map_bankwidth(map)-1);
261
262	/* Let's determine this according to the interleave only once */
263	status_OK = CMD(0x80);
264
265	timeo = jiffies + HZ;
266 retry:
267	mutex_lock(&chip->mutex);
268
269	/* Check that the chip's ready to talk to us.
270	 * If it's in FL_ERASING state, suspend it and make it talk now.
271	 */
272	switch (chip->state) {
273	case FL_ERASING:
274		if (!(((struct cfi_pri_intelext *)cfi->cmdset_priv)->FeatureSupport & 2))
275			goto sleep; /* We don't support erase suspend */
276
277		map_write (map, CMD(0xb0), cmd_addr);
278		/* If the flash has finished erasing, then 'erase suspend'
279		 * appears to make some (28F320) flash devices switch to
280		 * 'read' mode.  Make sure that we switch to 'read status'
281		 * mode so we get the right data. --rmk
282		 */
283		map_write(map, CMD(0x70), cmd_addr);
284		chip->oldstate = FL_ERASING;
285		chip->state = FL_ERASE_SUSPENDING;
286		//		printk("Erase suspending at 0x%lx\n", cmd_addr);
287		for (;;) {
288			status = map_read(map, cmd_addr);
289			if (map_word_andequal(map, status, status_OK, status_OK))
290				break;
291
292			if (time_after(jiffies, timeo)) {
293				/* Urgh */
294				map_write(map, CMD(0xd0), cmd_addr);
295				/* make sure we're in 'read status' mode */
296				map_write(map, CMD(0x70), cmd_addr);
297				chip->state = FL_ERASING;
298				mutex_unlock(&chip->mutex);
299				printk(KERN_ERR "Chip not ready after erase "
300				       "suspended: status = 0x%lx\n", status.x[0]);
301				return -EIO;
302			}
303
304			mutex_unlock(&chip->mutex);
305			cfi_udelay(1);
306			mutex_lock(&chip->mutex);
307		}
308
309		suspended = 1;
310		map_write(map, CMD(0xff), cmd_addr);
311		chip->state = FL_READY;
312		break;
313
314
315	case FL_READY:
316		break;
317
318	case FL_CFI_QUERY:
319	case FL_JEDEC_QUERY:
320		map_write(map, CMD(0x70), cmd_addr);
321		chip->state = FL_STATUS;
322
323	case FL_STATUS:
324		status = map_read(map, cmd_addr);
325		if (map_word_andequal(map, status, status_OK, status_OK)) {
326			map_write(map, CMD(0xff), cmd_addr);
327			chip->state = FL_READY;
328			break;
329		}
330
331		/* Urgh. Chip not yet ready to talk to us. */
332		if (time_after(jiffies, timeo)) {
333			mutex_unlock(&chip->mutex);
334			printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %lx\n", status.x[0]);
335			return -EIO;
336		}
337
338		/* Latency issues. Drop the lock, wait a while and retry */
339		mutex_unlock(&chip->mutex);
340		cfi_udelay(1);
341		goto retry;
342
343	default:
344	sleep:
345		/* Stick ourselves on a wait queue to be woken when
346		   someone changes the status */
347		set_current_state(TASK_UNINTERRUPTIBLE);
348		add_wait_queue(&chip->wq, &wait);
349		mutex_unlock(&chip->mutex);
350		schedule();
351		remove_wait_queue(&chip->wq, &wait);
352		timeo = jiffies + HZ;
353		goto retry;
354	}
355
356	map_copy_from(map, buf, adr, len);
357
358	if (suspended) {
359		chip->state = chip->oldstate;
360		/* What if one interleaved chip has finished and the
361		   other hasn't? The old code would leave the finished
362		   one in READY mode. That's bad, and caused -EROFS
363		   errors to be returned from do_erase_oneblock because
364		   that's the only bit it checked for at the time.
365		   As the state machine appears to explicitly allow
366		   sending the 0x70 (Read Status) command to an erasing
367		   chip and expecting it to be ignored, that's what we
368		   do. */
369		map_write(map, CMD(0xd0), cmd_addr);
370		map_write(map, CMD(0x70), cmd_addr);
371	}
372
373	wake_up(&chip->wq);
374	mutex_unlock(&chip->mutex);
375	return 0;
376}
377
378static int cfi_staa_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
379{
380	struct map_info *map = mtd->priv;
381	struct cfi_private *cfi = map->fldrv_priv;
382	unsigned long ofs;
383	int chipnum;
384	int ret = 0;
385
386	/* ofs: offset within the first chip that the first read should start */
387	chipnum = (from >> cfi->chipshift);
388	ofs = from - (chipnum <<  cfi->chipshift);
389
390	*retlen = 0;
391
392	while (len) {
393		unsigned long thislen;
394
395		if (chipnum >= cfi->numchips)
396			break;
397
398		if ((len + ofs -1) >> cfi->chipshift)
399			thislen = (1<<cfi->chipshift) - ofs;
400		else
401			thislen = len;
402
403		ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
404		if (ret)
405			break;
406
407		*retlen += thislen;
408		len -= thislen;
409		buf += thislen;
410
411		ofs = 0;
412		chipnum++;
413	}
414	return ret;
415}
416
417static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
418				  unsigned long adr, const u_char *buf, int len)
419{
420	struct cfi_private *cfi = map->fldrv_priv;
421	map_word status, status_OK;
422	unsigned long cmd_adr, timeo;
423	DECLARE_WAITQUEUE(wait, current);
424	int wbufsize, z;
425
426        /* M58LW064A requires bus alignment for buffer wriets -- saw */
427        if (adr & (map_bankwidth(map)-1))
428            return -EINVAL;
429
430        wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
431        adr += chip->start;
432	cmd_adr = adr & ~(wbufsize-1);
433
434	/* Let's determine this according to the interleave only once */
435        status_OK = CMD(0x80);
436
437	timeo = jiffies + HZ;
438 retry:
439
440#ifdef DEBUG_CFI_FEATURES
441       printk("%s: chip->state[%d]\n", __func__, chip->state);
442#endif
443	mutex_lock(&chip->mutex);
444
445	/* Check that the chip's ready to talk to us.
446	 * Later, we can actually think about interrupting it
447	 * if it's in FL_ERASING state.
448	 * Not just yet, though.
449	 */
450	switch (chip->state) {
451	case FL_READY:
452		break;
453
454	case FL_CFI_QUERY:
455	case FL_JEDEC_QUERY:
456		map_write(map, CMD(0x70), cmd_adr);
457                chip->state = FL_STATUS;
458#ifdef DEBUG_CFI_FEATURES
459	printk("%s: 1 status[%x]\n", __func__, map_read(map, cmd_adr));
460#endif
461
462	case FL_STATUS:
463		status = map_read(map, cmd_adr);
464		if (map_word_andequal(map, status, status_OK, status_OK))
465			break;
466		/* Urgh. Chip not yet ready to talk to us. */
467		if (time_after(jiffies, timeo)) {
468			mutex_unlock(&chip->mutex);
469                        printk(KERN_ERR "waiting for chip to be ready timed out in buffer write Xstatus = %lx, status = %lx\n",
470                               status.x[0], map_read(map, cmd_adr).x[0]);
471			return -EIO;
472		}
473
474		/* Latency issues. Drop the lock, wait a while and retry */
475		mutex_unlock(&chip->mutex);
476		cfi_udelay(1);
477		goto retry;
478
479	default:
480		/* Stick ourselves on a wait queue to be woken when
481		   someone changes the status */
482		set_current_state(TASK_UNINTERRUPTIBLE);
483		add_wait_queue(&chip->wq, &wait);
484		mutex_unlock(&chip->mutex);
485		schedule();
486		remove_wait_queue(&chip->wq, &wait);
487		timeo = jiffies + HZ;
488		goto retry;
489	}
490
491	ENABLE_VPP(map);
492	map_write(map, CMD(0xe8), cmd_adr);
493	chip->state = FL_WRITING_TO_BUFFER;
494
495	z = 0;
496	for (;;) {
497		status = map_read(map, cmd_adr);
498		if (map_word_andequal(map, status, status_OK, status_OK))
499			break;
500
501		mutex_unlock(&chip->mutex);
502		cfi_udelay(1);
503		mutex_lock(&chip->mutex);
504
505		if (++z > 100) {
506			/* Argh. Not ready for write to buffer */
507			DISABLE_VPP(map);
508                        map_write(map, CMD(0x70), cmd_adr);
509			chip->state = FL_STATUS;
510			mutex_unlock(&chip->mutex);
511			printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx\n", status.x[0]);
512			return -EIO;
513		}
514	}
515
516	/* Write length of data to come */
517	map_write(map, CMD(len/map_bankwidth(map)-1), cmd_adr );
518
519	/* Write data */
520	for (z = 0; z < len;
521	     z += map_bankwidth(map), buf += map_bankwidth(map)) {
522		map_word d;
523		d = map_word_load(map, buf);
524		map_write(map, d, adr+z);
525	}
526	/* GO GO GO */
527	map_write(map, CMD(0xd0), cmd_adr);
528	chip->state = FL_WRITING;
529
530	mutex_unlock(&chip->mutex);
531	cfi_udelay(chip->buffer_write_time);
532	mutex_lock(&chip->mutex);
533
534	timeo = jiffies + (HZ/2);
535	z = 0;
536	for (;;) {
537		if (chip->state != FL_WRITING) {
538			/* Someone's suspended the write. Sleep */
539			set_current_state(TASK_UNINTERRUPTIBLE);
540			add_wait_queue(&chip->wq, &wait);
541			mutex_unlock(&chip->mutex);
542			schedule();
543			remove_wait_queue(&chip->wq, &wait);
544			timeo = jiffies + (HZ / 2);
545			mutex_lock(&chip->mutex);
546			continue;
547		}
548
549		status = map_read(map, cmd_adr);
550		if (map_word_andequal(map, status, status_OK, status_OK))
551			break;
552
553		/* OK Still waiting */
554		if (time_after(jiffies, timeo)) {
555                        /* clear status */
556                        map_write(map, CMD(0x50), cmd_adr);
557                        /* put back into read status register mode */
558                        map_write(map, CMD(0x70), adr);
559			chip->state = FL_STATUS;
560			DISABLE_VPP(map);
561			mutex_unlock(&chip->mutex);
562			printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
563			return -EIO;
564		}
565
566		/* Latency issues. Drop the lock, wait a while and retry */
567		mutex_unlock(&chip->mutex);
568		cfi_udelay(1);
569		z++;
570		mutex_lock(&chip->mutex);
571	}
572	if (!z) {
573		chip->buffer_write_time--;
574		if (!chip->buffer_write_time)
575			chip->buffer_write_time++;
576	}
577	if (z > 1)
578		chip->buffer_write_time++;
579
580	/* Done and happy. */
581	DISABLE_VPP(map);
582	chip->state = FL_STATUS;
583
584        /* check for errors: 'lock bit', 'VPP', 'dead cell'/'unerased cell' or 'incorrect cmd' -- saw */
585        if (map_word_bitsset(map, status, CMD(0x3a))) {
586#ifdef DEBUG_CFI_FEATURES
587		printk("%s: 2 status[%lx]\n", __func__, status.x[0]);
588#endif
589		/* clear status */
590		map_write(map, CMD(0x50), cmd_adr);
591		/* put back into read status register mode */
592		map_write(map, CMD(0x70), adr);
593		wake_up(&chip->wq);
594		mutex_unlock(&chip->mutex);
595		return map_word_bitsset(map, status, CMD(0x02)) ? -EROFS : -EIO;
596	}
597	wake_up(&chip->wq);
598	mutex_unlock(&chip->mutex);
599
600        return 0;
601}
602
603static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to,
604				       size_t len, size_t *retlen, const u_char *buf)
605{
606	struct map_info *map = mtd->priv;
607	struct cfi_private *cfi = map->fldrv_priv;
608	int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
609	int ret = 0;
610	int chipnum;
611	unsigned long ofs;
612
613	*retlen = 0;
614	if (!len)
615		return 0;
616
617	chipnum = to >> cfi->chipshift;
618	ofs = to  - (chipnum << cfi->chipshift);
619
620#ifdef DEBUG_CFI_FEATURES
621	printk("%s: map_bankwidth(map)[%x]\n", __func__, map_bankwidth(map));
622	printk("%s: chipnum[%x] wbufsize[%x]\n", __func__, chipnum, wbufsize);
623	printk("%s: ofs[%x] len[%x]\n", __func__, ofs, len);
624#endif
625
626        /* Write buffer is worth it only if more than one word to write... */
627        while (len > 0) {
628		/* We must not cross write block boundaries */
629		int size = wbufsize - (ofs & (wbufsize-1));
630
631                if (size > len)
632                    size = len;
633
634                ret = do_write_buffer(map, &cfi->chips[chipnum],
635				      ofs, buf, size);
636		if (ret)
637			return ret;
638
639		ofs += size;
640		buf += size;
641		(*retlen) += size;
642		len -= size;
643
644		if (ofs >> cfi->chipshift) {
645			chipnum ++;
646			ofs = 0;
647			if (chipnum == cfi->numchips)
648				return 0;
649		}
650	}
651
652	return 0;
653}
654
655#define ECCBUF_SIZE (mtd->writesize)
656#define ECCBUF_DIV(x) ((x) & ~(ECCBUF_SIZE - 1))
657#define ECCBUF_MOD(x) ((x) &  (ECCBUF_SIZE - 1))
658static int
659cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
660		unsigned long count, loff_t to, size_t *retlen)
661{
662	unsigned long i;
663	size_t	 totlen = 0, thislen;
664	int	 ret = 0;
665	size_t	 buflen = 0;
666	static char *buffer;
667
668	if (!ECCBUF_SIZE) {
669		/* We should fall back to a general writev implementation.
670		 * Until that is written, just break.
671		 */
672		return -EIO;
673	}
674	buffer = kmalloc(ECCBUF_SIZE, GFP_KERNEL);
675	if (!buffer)
676		return -ENOMEM;
677
678	for (i=0; i<count; i++) {
679		size_t elem_len = vecs[i].iov_len;
680		void *elem_base = vecs[i].iov_base;
681		if (!elem_len)
682			continue;
683		if (buflen) { /* cut off head */
684			if (buflen + elem_len < ECCBUF_SIZE) { /* just accumulate */
685				memcpy(buffer+buflen, elem_base, elem_len);
686				buflen += elem_len;
687				continue;
688			}
689			memcpy(buffer+buflen, elem_base, ECCBUF_SIZE-buflen);
690			ret = mtd->write(mtd, to, ECCBUF_SIZE, &thislen, buffer);
691			totlen += thislen;
692			if (ret || thislen != ECCBUF_SIZE)
693				goto write_error;
694			elem_len -= thislen-buflen;
695			elem_base += thislen-buflen;
696			to += ECCBUF_SIZE;
697		}
698		if (ECCBUF_DIV(elem_len)) { /* write clean aligned data */
699			ret = mtd->write(mtd, to, ECCBUF_DIV(elem_len), &thislen, elem_base);
700			totlen += thislen;
701			if (ret || thislen != ECCBUF_DIV(elem_len))
702				goto write_error;
703			to += thislen;
704		}
705		buflen = ECCBUF_MOD(elem_len); /* cut off tail */
706		if (buflen) {
707			memset(buffer, 0xff, ECCBUF_SIZE);
708			memcpy(buffer, elem_base + thislen, buflen);
709		}
710	}
711	if (buflen) { /* flush last page, even if not full */
712		/* This is sometimes intended behaviour, really */
713		ret = mtd->write(mtd, to, buflen, &thislen, buffer);
714		totlen += thislen;
715		if (ret || thislen != ECCBUF_SIZE)
716			goto write_error;
717	}
718write_error:
719	if (retlen)
720		*retlen = totlen;
721	kfree(buffer);
722	return ret;
723}
724
725
726static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
727{
728	struct cfi_private *cfi = map->fldrv_priv;
729	map_word status, status_OK;
730	unsigned long timeo;
731	int retries = 3;
732	DECLARE_WAITQUEUE(wait, current);
733	int ret = 0;
734
735	adr += chip->start;
736
737	/* Let's determine this according to the interleave only once */
738	status_OK = CMD(0x80);
739
740	timeo = jiffies + HZ;
741retry:
742	mutex_lock(&chip->mutex);
743
744	/* Check that the chip's ready to talk to us. */
745	switch (chip->state) {
746	case FL_CFI_QUERY:
747	case FL_JEDEC_QUERY:
748	case FL_READY:
749		map_write(map, CMD(0x70), adr);
750		chip->state = FL_STATUS;
751
752	case FL_STATUS:
753		status = map_read(map, adr);
754		if (map_word_andequal(map, status, status_OK, status_OK))
755			break;
756
757		/* Urgh. Chip not yet ready to talk to us. */
758		if (time_after(jiffies, timeo)) {
759			mutex_unlock(&chip->mutex);
760			printk(KERN_ERR "waiting for chip to be ready timed out in erase\n");
761			return -EIO;
762		}
763
764		/* Latency issues. Drop the lock, wait a while and retry */
765		mutex_unlock(&chip->mutex);
766		cfi_udelay(1);
767		goto retry;
768
769	default:
770		/* Stick ourselves on a wait queue to be woken when
771		   someone changes the status */
772		set_current_state(TASK_UNINTERRUPTIBLE);
773		add_wait_queue(&chip->wq, &wait);
774		mutex_unlock(&chip->mutex);
775		schedule();
776		remove_wait_queue(&chip->wq, &wait);
777		timeo = jiffies + HZ;
778		goto retry;
779	}
780
781	ENABLE_VPP(map);
782	/* Clear the status register first */
783	map_write(map, CMD(0x50), adr);
784
785	/* Now erase */
786	map_write(map, CMD(0x20), adr);
787	map_write(map, CMD(0xD0), adr);
788	chip->state = FL_ERASING;
789
790	mutex_unlock(&chip->mutex);
791	msleep(1000);
792	mutex_lock(&chip->mutex);
793
794	/* Once the state machine's known to be working I'll do that */
795
796	timeo = jiffies + (HZ*20);
797	for (;;) {
798		if (chip->state != FL_ERASING) {
799			/* Someone's suspended the erase. Sleep */
800			set_current_state(TASK_UNINTERRUPTIBLE);
801			add_wait_queue(&chip->wq, &wait);
802			mutex_unlock(&chip->mutex);
803			schedule();
804			remove_wait_queue(&chip->wq, &wait);
805			timeo = jiffies + (HZ*20);
806			mutex_lock(&chip->mutex);
807			continue;
808		}
809
810		status = map_read(map, adr);
811		if (map_word_andequal(map, status, status_OK, status_OK))
812			break;
813
814		/* OK Still waiting */
815		if (time_after(jiffies, timeo)) {
816			map_write(map, CMD(0x70), adr);
817			chip->state = FL_STATUS;
818			printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
819			DISABLE_VPP(map);
820			mutex_unlock(&chip->mutex);
821			return -EIO;
822		}
823
824		/* Latency issues. Drop the lock, wait a while and retry */
825		mutex_unlock(&chip->mutex);
826		cfi_udelay(1);
827		mutex_lock(&chip->mutex);
828	}
829
830	DISABLE_VPP(map);
831	ret = 0;
832
833	/* We've broken this before. It doesn't hurt to be safe */
834	map_write(map, CMD(0x70), adr);
835	chip->state = FL_STATUS;
836	status = map_read(map, adr);
837
838	/* check for lock bit */
839	if (map_word_bitsset(map, status, CMD(0x3a))) {
840		unsigned char chipstatus = status.x[0];
841		if (!map_word_equal(map, status, CMD(chipstatus))) {
842			int i, w;
843			for (w=0; w<map_words(map); w++) {
844				for (i = 0; i<cfi_interleave(cfi); i++) {
845					chipstatus |= status.x[w] >> (cfi->device_type * 8);
846				}
847			}
848			printk(KERN_WARNING "Status is not identical for all chips: 0x%lx. Merging to give 0x%02x\n",
849			       status.x[0], chipstatus);
850		}
851		/* Reset the error bits */
852		map_write(map, CMD(0x50), adr);
853		map_write(map, CMD(0x70), adr);
854
855		if ((chipstatus & 0x30) == 0x30) {
856			printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", chipstatus);
857			ret = -EIO;
858		} else if (chipstatus & 0x02) {
859			/* Protection bit set */
860			ret = -EROFS;
861		} else if (chipstatus & 0x8) {
862			/* Voltage */
863			printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", chipstatus);
864			ret = -EIO;
865		} else if (chipstatus & 0x20) {
866			if (retries--) {
867				printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
868				timeo = jiffies + HZ;
869				chip->state = FL_STATUS;
870				mutex_unlock(&chip->mutex);
871				goto retry;
872			}
873			printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
874			ret = -EIO;
875		}
876	}
877
878	wake_up(&chip->wq);
879	mutex_unlock(&chip->mutex);
880	return ret;
881}
882
883static int cfi_staa_erase_varsize(struct mtd_info *mtd,
884				  struct erase_info *instr)
885{	struct map_info *map = mtd->priv;
886	struct cfi_private *cfi = map->fldrv_priv;
887	unsigned long adr, len;
888	int chipnum, ret = 0;
889	int i, first;
890	struct mtd_erase_region_info *regions = mtd->eraseregions;
891
892	if (instr->addr > mtd->size)
893		return -EINVAL;
894
895	if ((instr->len + instr->addr) > mtd->size)
896		return -EINVAL;
897
898	/* Check that both start and end of the requested erase are
899	 * aligned with the erasesize at the appropriate addresses.
900	 */
901
902	i = 0;
903
904	/* Skip all erase regions which are ended before the start of
905	   the requested erase. Actually, to save on the calculations,
906	   we skip to the first erase region which starts after the
907	   start of the requested erase, and then go back one.
908	*/
909
910	while (i < mtd->numeraseregions && instr->addr >= regions[i].offset)
911	       i++;
912	i--;
913
914	/* OK, now i is pointing at the erase region in which this
915	   erase request starts. Check the start of the requested
916	   erase range is aligned with the erase size which is in
917	   effect here.
918	*/
919
920	if (instr->addr & (regions[i].erasesize-1))
921		return -EINVAL;
922
923	/* Remember the erase region we start on */
924	first = i;
925
926	/* Next, check that the end of the requested erase is aligned
927	 * with the erase region at that address.
928	 */
929
930	while (i<mtd->numeraseregions && (instr->addr + instr->len) >= regions[i].offset)
931		i++;
932
933	/* As before, drop back one to point at the region in which
934	   the address actually falls
935	*/
936	i--;
937
938	if ((instr->addr + instr->len) & (regions[i].erasesize-1))
939		return -EINVAL;
940
941	chipnum = instr->addr >> cfi->chipshift;
942	adr = instr->addr - (chipnum << cfi->chipshift);
943	len = instr->len;
944
945	i=first;
946
947	while(len) {
948		ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
949
950		if (ret)
951			return ret;
952
953		adr += regions[i].erasesize;
954		len -= regions[i].erasesize;
955
956		if (adr % (1<< cfi->chipshift) == (((unsigned long)regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
957			i++;
958
959		if (adr >> cfi->chipshift) {
960			adr = 0;
961			chipnum++;
962
963			if (chipnum >= cfi->numchips)
964			break;
965		}
966	}
967
968	instr->state = MTD_ERASE_DONE;
969	mtd_erase_callback(instr);
970
971	return 0;
972}
973
974static void cfi_staa_sync (struct mtd_info *mtd)
975{
976	struct map_info *map = mtd->priv;
977	struct cfi_private *cfi = map->fldrv_priv;
978	int i;
979	struct flchip *chip;
980	int ret = 0;
981	DECLARE_WAITQUEUE(wait, current);
982
983	for (i=0; !ret && i<cfi->numchips; i++) {
984		chip = &cfi->chips[i];
985
986	retry:
987		mutex_lock(&chip->mutex);
988
989		switch(chip->state) {
990		case FL_READY:
991		case FL_STATUS:
992		case FL_CFI_QUERY:
993		case FL_JEDEC_QUERY:
994			chip->oldstate = chip->state;
995			chip->state = FL_SYNCING;
996			/* No need to wake_up() on this state change -
997			 * as the whole point is that nobody can do anything
998			 * with the chip now anyway.
999			 */
1000		case FL_SYNCING:
1001			mutex_unlock(&chip->mutex);
1002			break;
1003
1004		default:
1005			/* Not an idle state */
1006			set_current_state(TASK_UNINTERRUPTIBLE);
1007			add_wait_queue(&chip->wq, &wait);
1008
1009			mutex_unlock(&chip->mutex);
1010			schedule();
1011		        remove_wait_queue(&chip->wq, &wait);
1012
1013			goto retry;
1014		}
1015	}
1016
1017	/* Unlock the chips again */
1018
1019	for (i--; i >=0; i--) {
1020		chip = &cfi->chips[i];
1021
1022		mutex_lock(&chip->mutex);
1023
1024		if (chip->state == FL_SYNCING) {
1025			chip->state = chip->oldstate;
1026			wake_up(&chip->wq);
1027		}
1028		mutex_unlock(&chip->mutex);
1029	}
1030}
1031
1032static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
1033{
1034	struct cfi_private *cfi = map->fldrv_priv;
1035	map_word status, status_OK;
1036	unsigned long timeo = jiffies + HZ;
1037	DECLARE_WAITQUEUE(wait, current);
1038
1039	adr += chip->start;
1040
1041	/* Let's determine this according to the interleave only once */
1042	status_OK = CMD(0x80);
1043
1044	timeo = jiffies + HZ;
1045retry:
1046	mutex_lock(&chip->mutex);
1047
1048	/* Check that the chip's ready to talk to us. */
1049	switch (chip->state) {
1050	case FL_CFI_QUERY:
1051	case FL_JEDEC_QUERY:
1052	case FL_READY:
1053		map_write(map, CMD(0x70), adr);
1054		chip->state = FL_STATUS;
1055
1056	case FL_STATUS:
1057		status = map_read(map, adr);
1058		if (map_word_andequal(map, status, status_OK, status_OK))
1059			break;
1060
1061		/* Urgh. Chip not yet ready to talk to us. */
1062		if (time_after(jiffies, timeo)) {
1063			mutex_unlock(&chip->mutex);
1064			printk(KERN_ERR "waiting for chip to be ready timed out in lock\n");
1065			return -EIO;
1066		}
1067
1068		/* Latency issues. Drop the lock, wait a while and retry */
1069		mutex_unlock(&chip->mutex);
1070		cfi_udelay(1);
1071		goto retry;
1072
1073	default:
1074		/* Stick ourselves on a wait queue to be woken when
1075		   someone changes the status */
1076		set_current_state(TASK_UNINTERRUPTIBLE);
1077		add_wait_queue(&chip->wq, &wait);
1078		mutex_unlock(&chip->mutex);
1079		schedule();
1080		remove_wait_queue(&chip->wq, &wait);
1081		timeo = jiffies + HZ;
1082		goto retry;
1083	}
1084
1085	ENABLE_VPP(map);
1086	map_write(map, CMD(0x60), adr);
1087	map_write(map, CMD(0x01), adr);
1088	chip->state = FL_LOCKING;
1089
1090	mutex_unlock(&chip->mutex);
1091	msleep(1000);
1092	mutex_lock(&chip->mutex);
1093
1094	/* Once the state machine's known to be working I'll do that */
1095
1096	timeo = jiffies + (HZ*2);
1097	for (;;) {
1098
1099		status = map_read(map, adr);
1100		if (map_word_andequal(map, status, status_OK, status_OK))
1101			break;
1102
1103		/* OK Still waiting */
1104		if (time_after(jiffies, timeo)) {
1105			map_write(map, CMD(0x70), adr);
1106			chip->state = FL_STATUS;
1107			printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
1108			DISABLE_VPP(map);
1109			mutex_unlock(&chip->mutex);
1110			return -EIO;
1111		}
1112
1113		/* Latency issues. Drop the lock, wait a while and retry */
1114		mutex_unlock(&chip->mutex);
1115		cfi_udelay(1);
1116		mutex_lock(&chip->mutex);
1117	}
1118
1119	/* Done and happy. */
1120	chip->state = FL_STATUS;
1121	DISABLE_VPP(map);
1122	wake_up(&chip->wq);
1123	mutex_unlock(&chip->mutex);
1124	return 0;
1125}
1126static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1127{
1128	struct map_info *map = mtd->priv;
1129	struct cfi_private *cfi = map->fldrv_priv;
1130	unsigned long adr;
1131	int chipnum, ret = 0;
1132#ifdef DEBUG_LOCK_BITS
1133	int ofs_factor = cfi->interleave * cfi->device_type;
1134#endif
1135
1136	if (ofs & (mtd->erasesize - 1))
1137		return -EINVAL;
1138
1139	if (len & (mtd->erasesize -1))
1140		return -EINVAL;
1141
1142	if ((len + ofs) > mtd->size)
1143		return -EINVAL;
1144
1145	chipnum = ofs >> cfi->chipshift;
1146	adr = ofs - (chipnum << cfi->chipshift);
1147
1148	while(len) {
1149
1150#ifdef DEBUG_LOCK_BITS
1151		cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1152		printk("before lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1153		cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1154#endif
1155
1156		ret = do_lock_oneblock(map, &cfi->chips[chipnum], adr);
1157
1158#ifdef DEBUG_LOCK_BITS
1159		cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1160		printk("after lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1161		cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1162#endif
1163
1164		if (ret)
1165			return ret;
1166
1167		adr += mtd->erasesize;
1168		len -= mtd->erasesize;
1169
1170		if (adr >> cfi->chipshift) {
1171			adr = 0;
1172			chipnum++;
1173
1174			if (chipnum >= cfi->numchips)
1175			break;
1176		}
1177	}
1178	return 0;
1179}
1180static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
1181{
1182	struct cfi_private *cfi = map->fldrv_priv;
1183	map_word status, status_OK;
1184	unsigned long timeo = jiffies + HZ;
1185	DECLARE_WAITQUEUE(wait, current);
1186
1187	adr += chip->start;
1188
1189	/* Let's determine this according to the interleave only once */
1190	status_OK = CMD(0x80);
1191
1192	timeo = jiffies + HZ;
1193retry:
1194	mutex_lock(&chip->mutex);
1195
1196	/* Check that the chip's ready to talk to us. */
1197	switch (chip->state) {
1198	case FL_CFI_QUERY:
1199	case FL_JEDEC_QUERY:
1200	case FL_READY:
1201		map_write(map, CMD(0x70), adr);
1202		chip->state = FL_STATUS;
1203
1204	case FL_STATUS:
1205		status = map_read(map, adr);
1206		if (map_word_andequal(map, status, status_OK, status_OK))
1207			break;
1208
1209		/* Urgh. Chip not yet ready to talk to us. */
1210		if (time_after(jiffies, timeo)) {
1211			mutex_unlock(&chip->mutex);
1212			printk(KERN_ERR "waiting for chip to be ready timed out in unlock\n");
1213			return -EIO;
1214		}
1215
1216		/* Latency issues. Drop the lock, wait a while and retry */
1217		mutex_unlock(&chip->mutex);
1218		cfi_udelay(1);
1219		goto retry;
1220
1221	default:
1222		/* Stick ourselves on a wait queue to be woken when
1223		   someone changes the status */
1224		set_current_state(TASK_UNINTERRUPTIBLE);
1225		add_wait_queue(&chip->wq, &wait);
1226		mutex_unlock(&chip->mutex);
1227		schedule();
1228		remove_wait_queue(&chip->wq, &wait);
1229		timeo = jiffies + HZ;
1230		goto retry;
1231	}
1232
1233	ENABLE_VPP(map);
1234	map_write(map, CMD(0x60), adr);
1235	map_write(map, CMD(0xD0), adr);
1236	chip->state = FL_UNLOCKING;
1237
1238	mutex_unlock(&chip->mutex);
1239	msleep(1000);
1240	mutex_lock(&chip->mutex);
1241
1242	/* Once the state machine's known to be working I'll do that */
1243
1244	timeo = jiffies + (HZ*2);
1245	for (;;) {
1246
1247		status = map_read(map, adr);
1248		if (map_word_andequal(map, status, status_OK, status_OK))
1249			break;
1250
1251		/* OK Still waiting */
1252		if (time_after(jiffies, timeo)) {
1253			map_write(map, CMD(0x70), adr);
1254			chip->state = FL_STATUS;
1255			printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
1256			DISABLE_VPP(map);
1257			mutex_unlock(&chip->mutex);
1258			return -EIO;
1259		}
1260
1261		/* Latency issues. Drop the unlock, wait a while and retry */
1262		mutex_unlock(&chip->mutex);
1263		cfi_udelay(1);
1264		mutex_lock(&chip->mutex);
1265	}
1266
1267	/* Done and happy. */
1268	chip->state = FL_STATUS;
1269	DISABLE_VPP(map);
1270	wake_up(&chip->wq);
1271	mutex_unlock(&chip->mutex);
1272	return 0;
1273}
1274static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1275{
1276	struct map_info *map = mtd->priv;
1277	struct cfi_private *cfi = map->fldrv_priv;
1278	unsigned long adr;
1279	int chipnum, ret = 0;
1280#ifdef DEBUG_LOCK_BITS
1281	int ofs_factor = cfi->interleave * cfi->device_type;
1282#endif
1283
1284	chipnum = ofs >> cfi->chipshift;
1285	adr = ofs - (chipnum << cfi->chipshift);
1286
1287#ifdef DEBUG_LOCK_BITS
1288	{
1289		unsigned long temp_adr = adr;
1290		unsigned long temp_len = len;
1291
1292		cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1293                while (temp_len) {
1294			printk("before unlock %x: block status register is %x\n",temp_adr,cfi_read_query(map, temp_adr+(2*ofs_factor)));
1295			temp_adr += mtd->erasesize;
1296			temp_len -= mtd->erasesize;
1297		}
1298		cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1299	}
1300#endif
1301
1302	ret = do_unlock_oneblock(map, &cfi->chips[chipnum], adr);
1303
1304#ifdef DEBUG_LOCK_BITS
1305	cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1306	printk("after unlock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1307	cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1308#endif
1309
1310	return ret;
1311}
1312
1313static int cfi_staa_suspend(struct mtd_info *mtd)
1314{
1315	struct map_info *map = mtd->priv;
1316	struct cfi_private *cfi = map->fldrv_priv;
1317	int i;
1318	struct flchip *chip;
1319	int ret = 0;
1320
1321	for (i=0; !ret && i<cfi->numchips; i++) {
1322		chip = &cfi->chips[i];
1323
1324		mutex_lock(&chip->mutex);
1325
1326		switch(chip->state) {
1327		case FL_READY:
1328		case FL_STATUS:
1329		case FL_CFI_QUERY:
1330		case FL_JEDEC_QUERY:
1331			chip->oldstate = chip->state;
1332			chip->state = FL_PM_SUSPENDED;
1333			/* No need to wake_up() on this state change -
1334			 * as the whole point is that nobody can do anything
1335			 * with the chip now anyway.
1336			 */
1337		case FL_PM_SUSPENDED:
1338			break;
1339
1340		default:
1341			ret = -EAGAIN;
1342			break;
1343		}
1344		mutex_unlock(&chip->mutex);
1345	}
1346
1347	/* Unlock the chips again */
1348
1349	if (ret) {
1350		for (i--; i >=0; i--) {
1351			chip = &cfi->chips[i];
1352
1353			mutex_lock(&chip->mutex);
1354
1355			if (chip->state == FL_PM_SUSPENDED) {
1356				/* No need to force it into a known state here,
1357				   because we're returning failure, and it didn't
1358				   get power cycled */
1359				chip->state = chip->oldstate;
1360				wake_up(&chip->wq);
1361			}
1362			mutex_unlock(&chip->mutex);
1363		}
1364	}
1365
1366	return ret;
1367}
1368
1369static void cfi_staa_resume(struct mtd_info *mtd)
1370{
1371	struct map_info *map = mtd->priv;
1372	struct cfi_private *cfi = map->fldrv_priv;
1373	int i;
1374	struct flchip *chip;
1375
1376	for (i=0; i<cfi->numchips; i++) {
1377
1378		chip = &cfi->chips[i];
1379
1380		mutex_lock(&chip->mutex);
1381
1382		/* Go to known state. Chip may have been power cycled */
1383		if (chip->state == FL_PM_SUSPENDED) {
1384			map_write(map, CMD(0xFF), 0);
1385			chip->state = FL_READY;
1386			wake_up(&chip->wq);
1387		}
1388
1389		mutex_unlock(&chip->mutex);
1390	}
1391}
1392
1393static void cfi_staa_destroy(struct mtd_info *mtd)
1394{
1395	struct map_info *map = mtd->priv;
1396	struct cfi_private *cfi = map->fldrv_priv;
1397	kfree(cfi->cmdset_priv);
1398	kfree(cfi);
1399}
1400
1401MODULE_LICENSE("GPL");
1402