1/*
2 * Common Flash Interface support:
3 *   Intel Extended Vendor Command Set (ID 0x0001)
4 *
5 * (C) 2000 Red Hat. GPL'd
6 *
7 * $Id: cfi_cmdset_0001.c,v 1.1.1.1 2008/10/15 03:26:35 james26_jang Exp $
8 *
9 *
10 * 10/10/2000	Nicolas Pitre <nico@cam.org>
11 * 	- completely revamped method functions so they are aware and
12 * 	  independent of the flash geometry (buswidth, interleave, etc.)
13 * 	- scalability vs code size is completely set at compile-time
14 * 	  (see include/linux/mtd/cfi.h for selection)
15 *	- optimized write buffer method
16 */
17
18#include <linux/module.h>
19#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/sched.h>
22#include <asm/io.h>
23#include <asm/byteorder.h>
24
25#include <linux/errno.h>
26#include <linux/slab.h>
27#include <linux/delay.h>
28#include <linux/interrupt.h>
29#include <linux/mtd/map.h>
30#include <linux/mtd/cfi.h>
31#include <linux/mtd/compatmac.h>
32
33static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
34static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
35static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
36static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
37static void cfi_intelext_sync (struct mtd_info *);
38static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
39static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
40static int cfi_intelext_suspend (struct mtd_info *);
41static void cfi_intelext_resume (struct mtd_info *);
42
43static void cfi_intelext_destroy(struct mtd_info *);
44
45struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
46
47static struct mtd_info *cfi_intelext_setup (struct map_info *);
48
49static struct mtd_chip_driver cfi_intelext_chipdrv = {
50	probe: NULL, /* Not usable directly */
51	destroy: cfi_intelext_destroy,
52	name: "cfi_cmdset_0001",
53	module: THIS_MODULE
54};
55
56/* #define DEBUG_LOCK_BITS */
57/* #define DEBUG_CFI_FEATURES */
58
59#ifdef DEBUG_CFI_FEATURES
60static void cfi_tell_features(struct cfi_pri_intelext *extp)
61{
62	int i;
63
64	printk("  Feature/Command Support: %4.4X\n", extp->FeatureSupport);
65	printk("     - Chip Erase:         %s\n", extp->FeatureSupport&1?"supported":"unsupported");
66	printk("     - Suspend Erase:      %s\n", extp->FeatureSupport&2?"supported":"unsupported");
67	printk("     - Suspend Program:    %s\n", extp->FeatureSupport&4?"supported":"unsupported");
68	printk("     - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
69	printk("     - Queued Erase:       %s\n", extp->FeatureSupport&16?"supported":"unsupported");
70	printk("     - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
71	printk("     - Protection Bits:    %s\n", extp->FeatureSupport&64?"supported":"unsupported");
72	printk("     - Page-mode read:     %s\n", extp->FeatureSupport&128?"supported":"unsupported");
73	printk("     - Synchronous read:   %s\n", extp->FeatureSupport&256?"supported":"unsupported");
74	for (i=9; i<32; i++) {
75		if (extp->FeatureSupport & (1<<i))
76			printk("     - Unknown Bit %X:      supported\n", i);
77	}
78
79	printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
80	printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
81	for (i=1; i<8; i++) {
82		if (extp->SuspendCmdSupport & (1<<i))
83			printk("     - Unknown Bit %X:               supported\n", i);
84	}
85
86	printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
87	printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
88	printk("     - Valid Bit Active:     %s\n", extp->BlkStatusRegMask&2?"yes":"no");
89	for (i=2; i<16; i++) {
90		if (extp->BlkStatusRegMask & (1<<i))
91			printk("     - Unknown Bit %X Active: yes\n",i);
92	}
93
94	printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
95	       extp->VccOptimal >> 8, extp->VccOptimal & 0xf);
96	if (extp->VppOptimal)
97		printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
98		       extp->VppOptimal >> 8, extp->VppOptimal & 0xf);
99}
100#endif
101
102/* This routine is made available to other mtd code via
103 * inter_module_register.  It must only be accessed through
104 * inter_module_get which will bump the use count of this module.  The
105 * addresses passed back in cfi are valid as long as the use count of
106 * this module is non-zero, i.e. between inter_module_get and
107 * inter_module_put.  Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
108 */
109struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
110{
111	struct cfi_private *cfi = map->fldrv_priv;
112	int i;
113	__u32 base = cfi->chips[0].start;
114
115	if (cfi->cfi_mode) {
116		/*
117		 * It's a real CFI chip, not one for which the probe
118		 * routine faked a CFI structure. So we read the feature
119		 * table from it.
120		 */
121		__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
122		struct cfi_pri_intelext *extp;
123		int ofs_factor = cfi->interleave * cfi->device_type;
124
125		//printk(" Intel/Sharp Extended Query Table at 0x%4.4X\n", adr);
126		if (!adr)
127			return NULL;
128
129		/* Switch it into Query Mode */
130		cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
131
132		extp = kmalloc(sizeof(*extp), GFP_KERNEL);
133		if (!extp) {
134			printk(KERN_ERR "Failed to allocate memory\n");
135			return NULL;
136		}
137
138		/* Read in the Extended Query Table */
139		for (i=0; i<sizeof(*extp); i++) {
140			((unsigned char *)extp)[i] =
141				cfi_read_query(map, (base+((adr+i)*ofs_factor)));
142		}
143
144		if (extp->MajorVersion != '1' ||
145		    (extp->MinorVersion < '0' || extp->MinorVersion > '2')) {
146			printk(KERN_WARNING "  Unknown IntelExt Extended Query "
147			       "version %c.%c.\n",  extp->MajorVersion,
148			       extp->MinorVersion);
149			kfree(extp);
150			return NULL;
151		}
152
153		/* Do some byteswapping if necessary */
154		extp->FeatureSupport = cfi32_to_cpu(extp->FeatureSupport);
155		extp->BlkStatusRegMask = cfi32_to_cpu(extp->BlkStatusRegMask);
156
157#ifdef DEBUG_CFI_FEATURES
158		/* Tell the user about it in lots of lovely detail */
159		cfi_tell_features(extp);
160#endif
161
162		/* Install our own private info structure */
163		cfi->cmdset_priv = extp;
164	}
165
166	for (i=0; i< cfi->numchips; i++) {
167		cfi->chips[i].word_write_time = 128;
168		cfi->chips[i].buffer_write_time = 128;
169		cfi->chips[i].erase_time = 1024;
170	}
171
172	map->fldrv = &cfi_intelext_chipdrv;
173	MOD_INC_USE_COUNT;
174
175	/* Make sure it's in read mode */
176	cfi_send_gen_cmd(0xff, 0x55, base, map, cfi, cfi->device_type, NULL);
177	return cfi_intelext_setup(map);
178}
179
180static struct mtd_info *cfi_intelext_setup(struct map_info *map)
181{
182	struct cfi_private *cfi = map->fldrv_priv;
183	struct mtd_info *mtd;
184	unsigned long offset = 0;
185	int i,j;
186	unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
187
188	mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
189	printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
190
191	if (!mtd) {
192		printk(KERN_ERR "Failed to allocate memory for MTD device\n");
193		kfree(cfi->cmdset_priv);
194		return NULL;
195	}
196
197	memset(mtd, 0, sizeof(*mtd));
198	mtd->priv = map;
199	mtd->type = MTD_NORFLASH;
200	mtd->size = devsize * cfi->numchips;
201
202	mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
203	mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
204			* mtd->numeraseregions, GFP_KERNEL);
205	if (!mtd->eraseregions) {
206		printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
207		kfree(cfi->cmdset_priv);
208		return NULL;
209	}
210
211	for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
212		unsigned long ernum, ersize;
213		ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
214		ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
215
216		if (mtd->erasesize < ersize) {
217			mtd->erasesize = ersize;
218		}
219		for (j=0; j<cfi->numchips; j++) {
220			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
221			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
222			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
223		}
224		offset += (ersize * ernum);
225		}
226
227		if (offset != devsize) {
228			/* Argh */
229			printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
230			kfree(mtd->eraseregions);
231			kfree(cfi->cmdset_priv);
232			return NULL;
233		}
234
235		for (i=0; i<mtd->numeraseregions;i++){
236			printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n",
237			       i,mtd->eraseregions[i].offset,
238			       mtd->eraseregions[i].erasesize,
239			       mtd->eraseregions[i].numblocks);
240		}
241
242	/* Also select the correct geometry setup too */
243		mtd->erase = cfi_intelext_erase_varsize;
244	mtd->read = cfi_intelext_read;
245	if ( cfi->cfiq->BufWriteTimeoutTyp ) {
246		//printk(KERN_INFO "Using buffer write method\n" );
247		mtd->write = cfi_intelext_write_buffers;
248	} else {
249		//printk(KERN_INFO "Using word write method\n" );
250		mtd->write = cfi_intelext_write_words;
251	}
252	mtd->sync = cfi_intelext_sync;
253	mtd->lock = cfi_intelext_lock;
254	mtd->unlock = cfi_intelext_unlock;
255	mtd->suspend = cfi_intelext_suspend;
256	mtd->resume = cfi_intelext_resume;
257	mtd->flags = MTD_CAP_NORFLASH;
258	map->fldrv = &cfi_intelext_chipdrv;
259	MOD_INC_USE_COUNT;
260	mtd->name = map->name;
261	return mtd;
262}
263
264
265static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
266{
267	__u32 status, status_OK;
268	unsigned long timeo;
269	DECLARE_WAITQUEUE(wait, current);
270	int suspended = 0;
271	unsigned long cmd_addr;
272	struct cfi_private *cfi = map->fldrv_priv;
273
274	adr += chip->start;
275
276	/* Ensure cmd read/writes are aligned. */
277	cmd_addr = adr & ~(CFIDEV_BUSWIDTH-1);
278
279	/* Let's determine this according to the interleave only once */
280	status_OK = CMD(0x80);
281
282	timeo = jiffies + HZ;
283 retry:
284	spin_lock_bh(chip->mutex);
285
286	/* Check that the chip's ready to talk to us.
287	 * If it's in FL_ERASING state, suspend it and make it talk now.
288	 */
289	switch (chip->state) {
290	case FL_ERASING:
291		if (!((struct cfi_pri_intelext *)cfi->cmdset_priv)->FeatureSupport & 2)
292			goto sleep; /* We don't support erase suspend */
293
294		cfi_write (map, CMD(0xb0), cmd_addr);
295		/* If the flash has finished erasing, then 'erase suspend'
296		 * appears to make some (28F320) flash devices switch to
297		 * 'read' mode.  Make sure that we switch to 'read status'
298		 * mode so we get the right data. --rmk
299		 */
300		cfi_write(map, CMD(0x70), cmd_addr);
301		chip->oldstate = FL_ERASING;
302		chip->state = FL_ERASE_SUSPENDING;
303		//		printk("Erase suspending at 0x%lx\n", cmd_addr);
304		for (;;) {
305			status = cfi_read(map, cmd_addr);
306			if ((status & status_OK) == status_OK)
307				break;
308
309			if (time_after(jiffies, timeo)) {
310				/* Urgh */
311				cfi_write(map, CMD(0xd0), cmd_addr);
312				/* make sure we're in 'read status' mode */
313				cfi_write(map, CMD(0x70), cmd_addr);
314				chip->state = FL_ERASING;
315				spin_unlock_bh(chip->mutex);
316				printk(KERN_ERR "Chip not ready after erase "
317				       "suspended: status = 0x%x\n", status);
318				return -EIO;
319			}
320
321			spin_unlock_bh(chip->mutex);
322			cfi_udelay(1);
323			spin_lock_bh(chip->mutex);
324		}
325
326		suspended = 1;
327		cfi_write(map, CMD(0xff), cmd_addr);
328		chip->state = FL_READY;
329		break;
330
331
332	case FL_READY:
333		break;
334
335	case FL_CFI_QUERY:
336	case FL_JEDEC_QUERY:
337		cfi_write(map, CMD(0x70), cmd_addr);
338		chip->state = FL_STATUS;
339
340	case FL_STATUS:
341		status = cfi_read(map, cmd_addr);
342		if ((status & status_OK) == status_OK) {
343			cfi_write(map, CMD(0xff), cmd_addr);
344			chip->state = FL_READY;
345			break;
346		}
347
348		/* Urgh. Chip not yet ready to talk to us. */
349		if (time_after(jiffies, timeo)) {
350			spin_unlock_bh(chip->mutex);
351			printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %x\n", status);
352			return -EIO;
353		}
354
355		/* Latency issues. Drop the lock, wait a while and retry */
356		spin_unlock_bh(chip->mutex);
357		cfi_udelay(1);
358		goto retry;
359
360	default:
361	sleep:
362		/* Stick ourselves on a wait queue to be woken when
363		   someone changes the status */
364		set_current_state(TASK_UNINTERRUPTIBLE);
365		add_wait_queue(&chip->wq, &wait);
366		spin_unlock_bh(chip->mutex);
367		schedule();
368		remove_wait_queue(&chip->wq, &wait);
369		timeo = jiffies + HZ;
370		goto retry;
371	}
372
373	map->copy_from(map, buf, adr, len);
374
375	if (suspended) {
376		chip->state = chip->oldstate;
377		/* What if one interleaved chip has finished and the
378		   other hasn't? The old code would leave the finished
379		   one in READY mode. That's bad, and caused -EROFS
380		   errors to be returned from do_erase_oneblock because
381		   that's the only bit it checked for at the time.
382		   As the state machine appears to explicitly allow
383		   sending the 0x70 (Read Status) command to an erasing
384		   chip and expecting it to be ignored, that's what we
385		   do. */
386		cfi_write(map, CMD(0xd0), cmd_addr);
387		cfi_write(map, CMD(0x70), cmd_addr);
388	}
389
390	wake_up(&chip->wq);
391	spin_unlock_bh(chip->mutex);
392	return 0;
393}
394
395static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
396{
397	struct map_info *map = mtd->priv;
398	struct cfi_private *cfi = map->fldrv_priv;
399	unsigned long ofs;
400	int chipnum;
401	int ret = 0;
402
403	/* ofs: offset within the first chip that the first read should start */
404	chipnum = (from >> cfi->chipshift);
405	ofs = from - (chipnum <<  cfi->chipshift);
406
407	*retlen = 0;
408
409	while (len) {
410		unsigned long thislen;
411
412		if (chipnum >= cfi->numchips)
413			break;
414
415		if ((len + ofs -1) >> cfi->chipshift)
416			thislen = (1<<cfi->chipshift) - ofs;
417		else
418			thislen = len;
419
420		ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
421		if (ret)
422			break;
423
424		*retlen += thislen;
425		len -= thislen;
426		buf += thislen;
427
428		ofs = 0;
429		chipnum++;
430	}
431	return ret;
432}
433
434static int do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, __u32 datum)
435{
436	struct cfi_private *cfi = map->fldrv_priv;
437	__u32 status, status_OK;
438	unsigned long timeo;
439	DECLARE_WAITQUEUE(wait, current);
440	int z;
441
442	adr += chip->start;
443
444	/* Let's determine this according to the interleave only once */
445	status_OK = CMD(0x80);
446
447	timeo = jiffies + HZ;
448 retry:
449	spin_lock_bh(chip->mutex);
450
451	/* Check that the chip's ready to talk to us.
452	 * Later, we can actually think about interrupting it
453	 * if it's in FL_ERASING state.
454	 * Not just yet, though.
455	 */
456	switch (chip->state) {
457	case FL_READY:
458		break;
459
460	case FL_CFI_QUERY:
461	case FL_JEDEC_QUERY:
462		cfi_write(map, CMD(0x70), adr);
463		chip->state = FL_STATUS;
464
465	case FL_STATUS:
466		status = cfi_read(map, adr);
467		if ((status & status_OK) == status_OK)
468			break;
469
470		/* Urgh. Chip not yet ready to talk to us. */
471		if (time_after(jiffies, timeo)) {
472			spin_unlock_bh(chip->mutex);
473			printk(KERN_ERR "waiting for chip to be ready timed out in read\n");
474			return -EIO;
475		}
476
477		/* Latency issues. Drop the lock, wait a while and retry */
478		spin_unlock_bh(chip->mutex);
479		cfi_udelay(1);
480		goto retry;
481
482	default:
483		/* Stick ourselves on a wait queue to be woken when
484		   someone changes the status */
485		set_current_state(TASK_UNINTERRUPTIBLE);
486		add_wait_queue(&chip->wq, &wait);
487		spin_unlock_bh(chip->mutex);
488		schedule();
489		remove_wait_queue(&chip->wq, &wait);
490		timeo = jiffies + HZ;
491		goto retry;
492	}
493
494	ENABLE_VPP(map);
495	cfi_write(map, CMD(0x40), adr);
496	cfi_write(map, datum, adr);
497	chip->state = FL_WRITING;
498
499	spin_unlock_bh(chip->mutex);
500	cfi_udelay(chip->word_write_time);
501	spin_lock_bh(chip->mutex);
502
503	timeo = jiffies + (HZ/2);
504	z = 0;
505	for (;;) {
506		if (chip->state != FL_WRITING) {
507			/* Someone's suspended the write. Sleep */
508			set_current_state(TASK_UNINTERRUPTIBLE);
509			add_wait_queue(&chip->wq, &wait);
510			spin_unlock_bh(chip->mutex);
511			schedule();
512			remove_wait_queue(&chip->wq, &wait);
513			timeo = jiffies + (HZ / 2);
514			spin_lock_bh(chip->mutex);
515			continue;
516		}
517
518		status = cfi_read(map, adr);
519		if ((status & status_OK) == status_OK)
520			break;
521
522		/* OK Still waiting */
523		if (time_after(jiffies, timeo)) {
524			chip->state = FL_STATUS;
525			DISABLE_VPP(map);
526			spin_unlock_bh(chip->mutex);
527			printk(KERN_ERR "waiting for chip to be ready timed out in word write\n");
528			return -EIO;
529		}
530
531		/* Latency issues. Drop the lock, wait a while and retry */
532		spin_unlock_bh(chip->mutex);
533		z++;
534		cfi_udelay(1);
535		spin_lock_bh(chip->mutex);
536	}
537	if (!z) {
538		chip->word_write_time--;
539		if (!chip->word_write_time)
540			chip->word_write_time++;
541	}
542	if (z > 1)
543		chip->word_write_time++;
544
545	/* Done and happy. */
546	DISABLE_VPP(map);
547	chip->state = FL_STATUS;
548	/* check for lock bit */
549	if (status & CMD(0x02)) {
550		/* clear status */
551		cfi_write(map, CMD(0x50), adr);
552		/* put back into read status register mode */
553		cfi_write(map, CMD(0x70), adr);
554		wake_up(&chip->wq);
555		spin_unlock_bh(chip->mutex);
556		return -EROFS;
557	}
558	wake_up(&chip->wq);
559	spin_unlock_bh(chip->mutex);
560	return 0;
561}
562
563
564static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
565{
566	struct map_info *map = mtd->priv;
567	struct cfi_private *cfi = map->fldrv_priv;
568	int ret = 0;
569	int chipnum;
570	unsigned long ofs;
571
572	*retlen = 0;
573	if (!len)
574		return 0;
575
576	chipnum = to >> cfi->chipshift;
577	ofs = to  - (chipnum << cfi->chipshift);
578
579	/* If it's not bus-aligned, do the first byte write */
580	if (ofs & (CFIDEV_BUSWIDTH-1)) {
581		unsigned long bus_ofs = ofs & ~(CFIDEV_BUSWIDTH-1);
582		int gap = ofs - bus_ofs;
583		int i = 0, n = 0;
584		u_char tmp_buf[4];
585		__u32 datum;
586
587		while (gap--)
588			tmp_buf[i++] = 0xff;
589		while (len && i < CFIDEV_BUSWIDTH)
590			tmp_buf[i++] = buf[n++], len--;
591		while (i < CFIDEV_BUSWIDTH)
592			tmp_buf[i++] = 0xff;
593
594		if (cfi_buswidth_is_2()) {
595			datum = *(__u16*)tmp_buf;
596		} else if (cfi_buswidth_is_4()) {
597			datum = *(__u32*)tmp_buf;
598		} else {
599			return -EINVAL;  /* should never happen, but be safe */
600		}
601
602		ret = do_write_oneword(map, &cfi->chips[chipnum],
603					       bus_ofs, datum);
604		if (ret)
605			return ret;
606
607		ofs += n;
608		buf += n;
609		(*retlen) += n;
610
611		if (ofs >> cfi->chipshift) {
612			chipnum ++;
613			ofs = 0;
614			if (chipnum == cfi->numchips)
615				return 0;
616		}
617	}
618
619	while(len >= CFIDEV_BUSWIDTH) {
620		__u32 datum;
621
622		if (cfi_buswidth_is_1()) {
623			datum = *(__u8*)buf;
624		} else if (cfi_buswidth_is_2()) {
625			datum = *(__u16*)buf;
626		} else if (cfi_buswidth_is_4()) {
627			datum = *(__u32*)buf;
628		} else {
629			return -EINVAL;
630		}
631
632		ret = do_write_oneword(map, &cfi->chips[chipnum],
633				ofs, datum);
634		if (ret)
635			return ret;
636
637		ofs += CFIDEV_BUSWIDTH;
638		buf += CFIDEV_BUSWIDTH;
639		(*retlen) += CFIDEV_BUSWIDTH;
640		len -= CFIDEV_BUSWIDTH;
641
642		if (ofs >> cfi->chipshift) {
643			chipnum ++;
644			ofs = 0;
645			if (chipnum == cfi->numchips)
646				return 0;
647		}
648	}
649
650	if (len & (CFIDEV_BUSWIDTH-1)) {
651		int i = 0, n = 0;
652		u_char tmp_buf[4];
653		__u32 datum;
654
655		while (len--)
656			tmp_buf[i++] = buf[n++];
657		while (i < CFIDEV_BUSWIDTH)
658			tmp_buf[i++] = 0xff;
659
660		if (cfi_buswidth_is_2()) {
661			datum = *(__u16*)tmp_buf;
662		} else if (cfi_buswidth_is_4()) {
663			datum = *(__u32*)tmp_buf;
664		} else {
665			return -EINVAL;  /* should never happen, but be safe */
666		}
667
668		ret = do_write_oneword(map, &cfi->chips[chipnum],
669					       ofs, datum);
670		if (ret)
671			return ret;
672
673		(*retlen) += n;
674	}
675
676	return 0;
677}
678
679
680static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
681				  unsigned long adr, const u_char *buf, int len)
682{
683	struct cfi_private *cfi = map->fldrv_priv;
684	__u32 status, status_OK;
685	unsigned long cmd_adr, timeo;
686	DECLARE_WAITQUEUE(wait, current);
687	int wbufsize, z;
688
689	wbufsize = CFIDEV_INTERLEAVE << cfi->cfiq->MaxBufWriteSize;
690	adr += chip->start;
691	cmd_adr = adr & ~(wbufsize-1);
692
693	/* Let's determine this according to the interleave only once */
694	status_OK = CMD(0x80);
695
696	timeo = jiffies + HZ;
697 retry:
698	spin_lock_bh(chip->mutex);
699
700	/* Check that the chip's ready to talk to us.
701	 * Later, we can actually think about interrupting it
702	 * if it's in FL_ERASING state.
703	 * Not just yet, though.
704	 */
705	switch (chip->state) {
706	case FL_READY:
707		break;
708
709	case FL_CFI_QUERY:
710	case FL_JEDEC_QUERY:
711		cfi_write(map, CMD(0x70), cmd_adr);
712		chip->state = FL_STATUS;
713
714	case FL_STATUS:
715		status = cfi_read(map, cmd_adr);
716		if ((status & status_OK) == status_OK)
717			break;
718		/* Urgh. Chip not yet ready to talk to us. */
719		if (time_after(jiffies, timeo)) {
720			spin_unlock_bh(chip->mutex);
721			printk(KERN_ERR "waiting for chip to be ready timed out in buffer write\n");
722			return -EIO;
723		}
724
725		/* Latency issues. Drop the lock, wait a while and retry */
726		spin_unlock_bh(chip->mutex);
727		cfi_udelay(1);
728		goto retry;
729
730	default:
731		/* Stick ourselves on a wait queue to be woken when
732		   someone changes the status */
733		set_current_state(TASK_UNINTERRUPTIBLE);
734		add_wait_queue(&chip->wq, &wait);
735		spin_unlock_bh(chip->mutex);
736		schedule();
737		remove_wait_queue(&chip->wq, &wait);
738		timeo = jiffies + HZ;
739		goto retry;
740	}
741
742	ENABLE_VPP(map);
743	cfi_write(map, CMD(0xe8), cmd_adr);
744	chip->state = FL_WRITING_TO_BUFFER;
745
746	z = 0;
747	for (;;) {
748		status = cfi_read(map, cmd_adr);
749		if ((status & status_OK) == status_OK)
750			break;
751
752		spin_unlock_bh(chip->mutex);
753		cfi_udelay(1);
754		spin_lock_bh(chip->mutex);
755
756		if (++z > 20) {
757			/* Argh. Not ready for write to buffer */
758			cfi_write(map, CMD(0x70), cmd_adr);
759			chip->state = FL_STATUS;
760			DISABLE_VPP(map);
761			spin_unlock_bh(chip->mutex);
762			printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %x, status = %x\n", status, cfi_read(map, cmd_adr));
763			return -EIO;
764		}
765	}
766
767	/* Write length of data to come */
768	cfi_write(map, CMD(len/CFIDEV_BUSWIDTH-1), cmd_adr );
769
770	/* Write data */
771	for (z = 0; z < len; z += CFIDEV_BUSWIDTH) {
772		if (cfi_buswidth_is_1()) {
773			map->write8 (map, *((__u8*)buf)++, adr+z);
774		} else if (cfi_buswidth_is_2()) {
775			map->write16 (map, *((__u16*)buf)++, adr+z);
776		} else if (cfi_buswidth_is_4()) {
777			map->write32 (map, *((__u32*)buf)++, adr+z);
778		} else {
779			DISABLE_VPP(map);
780			return -EINVAL;
781		}
782	}
783	/* GO GO GO */
784	cfi_write(map, CMD(0xd0), cmd_adr);
785	chip->state = FL_WRITING;
786
787	spin_unlock_bh(chip->mutex);
788	cfi_udelay(chip->buffer_write_time);
789	spin_lock_bh(chip->mutex);
790
791	timeo = jiffies + (HZ/2);
792	z = 0;
793	for (;;) {
794		if (chip->state != FL_WRITING) {
795			/* Someone's suspended the write. Sleep */
796			set_current_state(TASK_UNINTERRUPTIBLE);
797			add_wait_queue(&chip->wq, &wait);
798			spin_unlock_bh(chip->mutex);
799			schedule();
800			remove_wait_queue(&chip->wq, &wait);
801			timeo = jiffies + (HZ / 2);
802			spin_lock_bh(chip->mutex);
803			continue;
804		}
805
806		status = cfi_read(map, cmd_adr);
807		if ((status & status_OK) == status_OK)
808			break;
809
810		/* OK Still waiting */
811		if (time_after(jiffies, timeo)) {
812			chip->state = FL_STATUS;
813			DISABLE_VPP(map);
814			spin_unlock_bh(chip->mutex);
815			printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
816			return -EIO;
817		}
818
819		/* Latency issues. Drop the lock, wait a while and retry */
820		spin_unlock_bh(chip->mutex);
821		cfi_udelay(1);
822		z++;
823		spin_lock_bh(chip->mutex);
824	}
825	if (!z) {
826		chip->buffer_write_time--;
827		if (!chip->buffer_write_time)
828			chip->buffer_write_time++;
829	}
830	if (z > 1)
831		chip->buffer_write_time++;
832
833	/* Done and happy. */
834	DISABLE_VPP(map);
835	chip->state = FL_STATUS;
836	/* check for lock bit */
837	if (status & CMD(0x02)) {
838		/* clear status */
839		cfi_write(map, CMD(0x50), cmd_adr);
840		/* put back into read status register mode */
841		cfi_write(map, CMD(0x70), adr);
842		wake_up(&chip->wq);
843		spin_unlock_bh(chip->mutex);
844		return -EROFS;
845	}
846	wake_up(&chip->wq);
847	spin_unlock_bh(chip->mutex);
848	return 0;
849}
850
851static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
852				       size_t len, size_t *retlen, const u_char *buf)
853{
854	struct map_info *map = mtd->priv;
855	struct cfi_private *cfi = map->fldrv_priv;
856	int wbufsize = CFIDEV_INTERLEAVE << cfi->cfiq->MaxBufWriteSize;
857	int ret = 0;
858	int chipnum;
859	unsigned long ofs;
860
861	*retlen = 0;
862	if (!len)
863		return 0;
864
865	chipnum = to >> cfi->chipshift;
866	ofs = to  - (chipnum << cfi->chipshift);
867
868	/* If it's not bus-aligned, do the first word write */
869	if (ofs & (CFIDEV_BUSWIDTH-1)) {
870		size_t local_len = (-ofs)&(CFIDEV_BUSWIDTH-1);
871		if (local_len > len)
872			local_len = len;
873		ret = cfi_intelext_write_words(mtd, to, local_len,
874					       retlen, buf);
875		if (ret)
876			return ret;
877		ofs += local_len;
878		buf += local_len;
879		len -= local_len;
880
881		if (ofs >> cfi->chipshift) {
882			chipnum ++;
883			ofs = 0;
884			if (chipnum == cfi->numchips)
885				return 0;
886		}
887	}
888
889	/* Write buffer is worth it only if more than one word to write... */
890	while(len > CFIDEV_BUSWIDTH) {
891		/* We must not cross write block boundaries */
892		int size = wbufsize - (ofs & (wbufsize-1));
893
894		if (size > len)
895			size = len & ~(CFIDEV_BUSWIDTH-1);
896		ret = do_write_buffer(map, &cfi->chips[chipnum],
897				      ofs, buf, size);
898		if (ret)
899			return ret;
900
901		ofs += size;
902		buf += size;
903		(*retlen) += size;
904		len -= size;
905
906		if (ofs >> cfi->chipshift) {
907			chipnum ++;
908			ofs = 0;
909			if (chipnum == cfi->numchips)
910				return 0;
911		}
912	}
913
914	/* ... and write the remaining bytes */
915	if (len > 0) {
916		size_t local_retlen;
917		ret = cfi_intelext_write_words(mtd, ofs + (chipnum << cfi->chipshift),
918					       len, &local_retlen, buf);
919		if (ret)
920			return ret;
921		(*retlen) += local_retlen;
922	}
923
924	return 0;
925}
926
927
928static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
929{
930	struct cfi_private *cfi = map->fldrv_priv;
931	__u32 status, status_OK;
932	unsigned long timeo;
933	int retries = 3;
934	DECLARE_WAITQUEUE(wait, current);
935	int ret = 0;
936
937	adr += chip->start;
938
939	/* Let's determine this according to the interleave only once */
940	status_OK = CMD(0x80);
941
942	timeo = jiffies + HZ;
943retry:
944	spin_lock_bh(chip->mutex);
945
946	/* Check that the chip's ready to talk to us. */
947	switch (chip->state) {
948	case FL_CFI_QUERY:
949	case FL_JEDEC_QUERY:
950	case FL_READY:
951		cfi_write(map, CMD(0x70), adr);
952		chip->state = FL_STATUS;
953
954	case FL_STATUS:
955		status = cfi_read(map, adr);
956		if ((status & status_OK) == status_OK)
957			break;
958
959		/* Urgh. Chip not yet ready to talk to us. */
960		if (time_after(jiffies, timeo)) {
961			spin_unlock_bh(chip->mutex);
962			printk(KERN_ERR "waiting for chip to be ready timed out in erase\n");
963			return -EIO;
964		}
965
966		/* Latency issues. Drop the lock, wait a while and retry */
967		spin_unlock_bh(chip->mutex);
968		cfi_udelay(1);
969		goto retry;
970
971	default:
972		/* Stick ourselves on a wait queue to be woken when
973		   someone changes the status */
974		set_current_state(TASK_UNINTERRUPTIBLE);
975		add_wait_queue(&chip->wq, &wait);
976		spin_unlock_bh(chip->mutex);
977		schedule();
978		remove_wait_queue(&chip->wq, &wait);
979		timeo = jiffies + HZ;
980		goto retry;
981	}
982
983	ENABLE_VPP(map);
984	/* Clear the status register first */
985	cfi_write(map, CMD(0x50), adr);
986
987	/* Now erase */
988	cfi_write(map, CMD(0x20), adr);
989	cfi_write(map, CMD(0xD0), adr);
990	chip->state = FL_ERASING;
991
992	spin_unlock_bh(chip->mutex);
993	schedule_timeout(HZ);
994	spin_lock_bh(chip->mutex);
995
996	/* Once the state machine's known to be working I'll do that */
997
998	timeo = jiffies + (HZ*20);
999	for (;;) {
1000		if (chip->state != FL_ERASING) {
1001			/* Someone's suspended the erase. Sleep */
1002			set_current_state(TASK_UNINTERRUPTIBLE);
1003			add_wait_queue(&chip->wq, &wait);
1004			spin_unlock_bh(chip->mutex);
1005			schedule();
1006			remove_wait_queue(&chip->wq, &wait);
1007			timeo = jiffies + (HZ*20);
1008			spin_lock_bh(chip->mutex);
1009			continue;
1010		}
1011
1012		status = cfi_read(map, adr);
1013		if ((status & status_OK) == status_OK)
1014			break;
1015
1016		/* OK Still waiting */
1017		if (time_after(jiffies, timeo)) {
1018			cfi_write(map, CMD(0x70), adr);
1019			chip->state = FL_STATUS;
1020			printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %x, status = %x.\n", status, cfi_read(map, adr));
1021			DISABLE_VPP(map);
1022			spin_unlock_bh(chip->mutex);
1023			return -EIO;
1024		}
1025
1026		/* Latency issues. Drop the lock, wait a while and retry */
1027		spin_unlock_bh(chip->mutex);
1028		cfi_udelay(1);
1029		spin_lock_bh(chip->mutex);
1030	}
1031
1032	DISABLE_VPP(map);
1033	ret = 0;
1034
1035	/* We've broken this before. It doesn't hurt to be safe */
1036	cfi_write(map, CMD(0x70), adr);
1037	chip->state = FL_STATUS;
1038	status = cfi_read(map, adr);
1039
1040	/* check for lock bit */
1041	if (status & CMD(0x3a)) {
1042		unsigned char chipstatus = status;
1043		if (status != CMD(status & 0xff)) {
1044			int i;
1045			for (i = 1; i<CFIDEV_INTERLEAVE; i++) {
1046				      chipstatus |= status >> (cfi->device_type * 8);
1047			}
1048			printk(KERN_WARNING "Status is not identical for all chips: 0x%x. Merging to give 0x%02x\n", status, chipstatus);
1049		}
1050		/* Reset the error bits */
1051		cfi_write(map, CMD(0x50), adr);
1052		cfi_write(map, CMD(0x70), adr);
1053
1054		if ((chipstatus & 0x30) == 0x30) {
1055			printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", status);
1056			ret = -EIO;
1057		} else if (chipstatus & 0x02) {
1058			/* Protection bit set */
1059			ret = -EROFS;
1060		} else if (chipstatus & 0x8) {
1061			/* Voltage */
1062			printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", status);
1063			ret = -EIO;
1064		} else if (chipstatus & 0x20) {
1065			if (retries--) {
1066				printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, status);
1067				timeo = jiffies + HZ;
1068				chip->state = FL_STATUS;
1069				spin_unlock_bh(chip->mutex);
1070				goto retry;
1071			}
1072			printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, status);
1073			ret = -EIO;
1074		}
1075	}
1076
1077	wake_up(&chip->wq);
1078	spin_unlock_bh(chip->mutex);
1079	return ret;
1080}
1081
1082int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1083{	struct map_info *map = mtd->priv;
1084	struct cfi_private *cfi = map->fldrv_priv;
1085	unsigned long adr, len;
1086	int chipnum, ret = 0;
1087	int i, first;
1088	struct mtd_erase_region_info *regions = mtd->eraseregions;
1089
1090	if (instr->addr > mtd->size)
1091		return -EINVAL;
1092
1093	if ((instr->len + instr->addr) > mtd->size)
1094		return -EINVAL;
1095
1096	/* Check that both start and end of the requested erase are
1097	 * aligned with the erasesize at the appropriate addresses.
1098	 */
1099
1100	i = 0;
1101
1102	/* Skip all erase regions which are ended before the start of
1103	   the requested erase. Actually, to save on the calculations,
1104	   we skip to the first erase region which starts after the
1105	   start of the requested erase, and then go back one.
1106	*/
1107
1108	while (i < mtd->numeraseregions && instr->addr >= regions[i].offset)
1109	       i++;
1110	i--;
1111
1112	/* OK, now i is pointing at the erase region in which this
1113	   erase request starts. Check the start of the requested
1114	   erase range is aligned with the erase size which is in
1115	   effect here.
1116	*/
1117
1118	if (instr->addr & (regions[i].erasesize-1))
1119		return -EINVAL;
1120
1121	/* Remember the erase region we start on */
1122	first = i;
1123
1124	/* Next, check that the end of the requested erase is aligned
1125	 * with the erase region at that address.
1126	 */
1127
1128	while (i<mtd->numeraseregions && (instr->addr + instr->len) >= regions[i].offset)
1129		i++;
1130
1131	/* As before, drop back one to point at the region in which
1132	   the address actually falls
1133	*/
1134	i--;
1135
1136	if ((instr->addr + instr->len) & (regions[i].erasesize-1))
1137		return -EINVAL;
1138
1139	chipnum = instr->addr >> cfi->chipshift;
1140	adr = instr->addr - (chipnum << cfi->chipshift);
1141	len = instr->len;
1142
1143	i=first;
1144
1145	while(len) {
1146		ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
1147
1148		if (ret)
1149			return ret;
1150
1151		adr += regions[i].erasesize;
1152		len -= regions[i].erasesize;
1153
1154		if (adr % (1<< cfi->chipshift) == ((regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
1155			i++;
1156
1157		if (adr >> cfi->chipshift) {
1158			adr = 0;
1159			chipnum++;
1160
1161			if (chipnum >= cfi->numchips)
1162			break;
1163		}
1164	}
1165
1166	instr->state = MTD_ERASE_DONE;
1167	if (instr->callback)
1168		instr->callback(instr);
1169
1170	return 0;
1171}
1172
1173static void cfi_intelext_sync (struct mtd_info *mtd)
1174{
1175	struct map_info *map = mtd->priv;
1176	struct cfi_private *cfi = map->fldrv_priv;
1177	int i;
1178	struct flchip *chip;
1179	int ret = 0;
1180	DECLARE_WAITQUEUE(wait, current);
1181
1182	for (i=0; !ret && i<cfi->numchips; i++) {
1183		chip = &cfi->chips[i];
1184
1185	retry:
1186		spin_lock_bh(chip->mutex);
1187
1188		switch(chip->state) {
1189		case FL_READY:
1190		case FL_STATUS:
1191		case FL_CFI_QUERY:
1192		case FL_JEDEC_QUERY:
1193			chip->oldstate = chip->state;
1194			chip->state = FL_SYNCING;
1195			/* No need to wake_up() on this state change -
1196			 * as the whole point is that nobody can do anything
1197			 * with the chip now anyway.
1198			 */
1199		case FL_SYNCING:
1200			spin_unlock_bh(chip->mutex);
1201			break;
1202
1203		default:
1204			/* Not an idle state */
1205			add_wait_queue(&chip->wq, &wait);
1206
1207			spin_unlock_bh(chip->mutex);
1208			schedule();
1209		        remove_wait_queue(&chip->wq, &wait);
1210
1211			goto retry;
1212		}
1213	}
1214
1215	/* Unlock the chips again */
1216
1217	for (i--; i >=0; i--) {
1218		chip = &cfi->chips[i];
1219
1220		spin_lock_bh(chip->mutex);
1221
1222		if (chip->state == FL_SYNCING) {
1223			chip->state = chip->oldstate;
1224			wake_up(&chip->wq);
1225		}
1226
1227		/* make absolutely sure that chip is out of lock/suspend state */
1228		cfi_write(map, CMD(0xFF), 0);
1229		spin_unlock_bh(chip->mutex);
1230	}
1231}
1232
1233static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
1234{
1235	struct cfi_private *cfi = map->fldrv_priv;
1236	__u32 status, status_OK;
1237	unsigned long timeo = jiffies + HZ;
1238	DECLARE_WAITQUEUE(wait, current);
1239
1240	adr += chip->start;
1241
1242	/* Let's determine this according to the interleave only once */
1243	status_OK = CMD(0x80);
1244
1245	timeo = jiffies + HZ;
1246retry:
1247	spin_lock_bh(chip->mutex);
1248
1249	/* Check that the chip's ready to talk to us. */
1250	switch (chip->state) {
1251	case FL_CFI_QUERY:
1252	case FL_JEDEC_QUERY:
1253	case FL_READY:
1254		cfi_write(map, CMD(0x70), adr);
1255		chip->state = FL_STATUS;
1256
1257	case FL_STATUS:
1258		status = cfi_read(map, adr);
1259		if ((status & status_OK) == status_OK)
1260			break;
1261
1262		/* Urgh. Chip not yet ready to talk to us. */
1263		if (time_after(jiffies, timeo)) {
1264			spin_unlock_bh(chip->mutex);
1265			printk(KERN_ERR "waiting for chip to be ready timed out in lock\n");
1266			return -EIO;
1267		}
1268
1269		/* Latency issues. Drop the lock, wait a while and retry */
1270		spin_unlock_bh(chip->mutex);
1271		cfi_udelay(1);
1272		goto retry;
1273
1274	default:
1275		/* Stick ourselves on a wait queue to be woken when
1276		   someone changes the status */
1277		set_current_state(TASK_UNINTERRUPTIBLE);
1278		add_wait_queue(&chip->wq, &wait);
1279		spin_unlock_bh(chip->mutex);
1280		schedule();
1281		remove_wait_queue(&chip->wq, &wait);
1282		timeo = jiffies + HZ;
1283		goto retry;
1284	}
1285
1286	ENABLE_VPP(map);
1287	cfi_write(map, CMD(0x60), adr);
1288	cfi_write(map, CMD(0x01), adr);
1289	chip->state = FL_LOCKING;
1290
1291	spin_unlock_bh(chip->mutex);
1292	schedule_timeout(HZ);
1293	spin_lock_bh(chip->mutex);
1294
1295	/* Once the state machine's known to be working I'll do that */
1296
1297	timeo = jiffies + (HZ*2);
1298	for (;;) {
1299
1300		status = cfi_read(map, adr);
1301		if ((status & status_OK) == status_OK)
1302			break;
1303
1304		/* OK Still waiting */
1305		if (time_after(jiffies, timeo)) {
1306			cfi_write(map, CMD(0x70), adr);
1307			chip->state = FL_STATUS;
1308			printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %x, status = %x.\n", status, cfi_read(map, adr));
1309			DISABLE_VPP(map);
1310			spin_unlock_bh(chip->mutex);
1311			return -EIO;
1312		}
1313
1314		/* Latency issues. Drop the lock, wait a while and retry */
1315		spin_unlock_bh(chip->mutex);
1316		cfi_udelay(1);
1317		spin_lock_bh(chip->mutex);
1318	}
1319
1320	/* Done and happy. */
1321	cfi_write(map, CMD(0x70), adr);
1322	chip->state = FL_STATUS;
1323	DISABLE_VPP(map);
1324	wake_up(&chip->wq);
1325	spin_unlock_bh(chip->mutex);
1326	return 0;
1327}
1328static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1329{
1330	struct map_info *map = mtd->priv;
1331	struct cfi_private *cfi = map->fldrv_priv;
1332	unsigned long adr;
1333	int chipnum, ret = 0;
1334#ifdef DEBUG_LOCK_BITS
1335	int ofs_factor = cfi->interleave * cfi->device_type;
1336#endif
1337
1338	if (ofs & (mtd->erasesize - 1))
1339		return -EINVAL;
1340
1341	if (len & (mtd->erasesize -1))
1342		return -EINVAL;
1343
1344	if ((len + ofs) > mtd->size)
1345		return -EINVAL;
1346
1347	chipnum = ofs >> cfi->chipshift;
1348	adr = ofs - (chipnum << cfi->chipshift);
1349
1350	while(len) {
1351
1352#ifdef DEBUG_LOCK_BITS
1353		cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1354		printk("before lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1355		cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1356#endif
1357
1358		ret = do_lock_oneblock(map, &cfi->chips[chipnum], adr);
1359
1360#ifdef DEBUG_LOCK_BITS
1361		cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1362		printk("after lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1363		cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1364#endif
1365
1366		if (ret)
1367			return ret;
1368
1369		adr += mtd->erasesize;
1370		len -= mtd->erasesize;
1371
1372		if (adr >> cfi->chipshift) {
1373			adr = 0;
1374			chipnum++;
1375
1376			if (chipnum >= cfi->numchips)
1377			break;
1378		}
1379	}
1380	return 0;
1381}
1382static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
1383{
1384	struct cfi_private *cfi = map->fldrv_priv;
1385	__u32 status, status_OK;
1386	unsigned long timeo = jiffies + HZ;
1387	DECLARE_WAITQUEUE(wait, current);
1388
1389	adr += chip->start;
1390
1391	/* Let's determine this according to the interleave only once */
1392	status_OK = CMD(0x80);
1393
1394	timeo = jiffies + HZ;
1395retry:
1396	spin_lock_bh(chip->mutex);
1397
1398	/* Check that the chip's ready to talk to us. */
1399	switch (chip->state) {
1400	case FL_CFI_QUERY:
1401	case FL_JEDEC_QUERY:
1402	case FL_READY:
1403		cfi_write(map, CMD(0x70), adr);
1404		chip->state = FL_STATUS;
1405
1406	case FL_STATUS:
1407		status = cfi_read(map, adr);
1408		if ((status & status_OK) == status_OK)
1409			break;
1410
1411		/* Urgh. Chip not yet ready to talk to us. */
1412		if (time_after(jiffies, timeo)) {
1413			spin_unlock_bh(chip->mutex);
1414			printk(KERN_ERR "waiting for chip to be ready timed out in unlock\n");
1415			return -EIO;
1416		}
1417
1418		/* Latency issues. Drop the lock, wait a while and retry */
1419		spin_unlock_bh(chip->mutex);
1420		cfi_udelay(1);
1421		goto retry;
1422
1423	default:
1424		/* Stick ourselves on a wait queue to be woken when
1425		   someone changes the status */
1426		set_current_state(TASK_UNINTERRUPTIBLE);
1427		add_wait_queue(&chip->wq, &wait);
1428		spin_unlock_bh(chip->mutex);
1429		schedule();
1430		remove_wait_queue(&chip->wq, &wait);
1431		timeo = jiffies + HZ;
1432		goto retry;
1433	}
1434
1435	ENABLE_VPP(map);
1436	cfi_write(map, CMD(0x60), adr);
1437	cfi_write(map, CMD(0xD0), adr);
1438	chip->state = FL_UNLOCKING;
1439
1440	spin_unlock_bh(chip->mutex);
1441	schedule_timeout(HZ);
1442	spin_lock_bh(chip->mutex);
1443
1444	/* Once the state machine's known to be working I'll do that */
1445
1446	timeo = jiffies + (HZ*2);
1447	for (;;) {
1448
1449		status = cfi_read(map, adr);
1450		if ((status & status_OK) == status_OK)
1451			break;
1452
1453		/* OK Still waiting */
1454		if (time_after(jiffies, timeo)) {
1455			cfi_write(map, CMD(0x70), adr);
1456			chip->state = FL_STATUS;
1457			printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %x, status = %x.\n", status, cfi_read(map, adr));
1458			DISABLE_VPP(map);
1459			spin_unlock_bh(chip->mutex);
1460			return -EIO;
1461		}
1462
1463		/* Latency issues. Drop the unlock, wait a while and retry */
1464		spin_unlock_bh(chip->mutex);
1465		cfi_udelay(1);
1466		spin_lock_bh(chip->mutex);
1467	}
1468
1469	/* Done and happy. */
1470	cfi_write(map, CMD(0x70), adr);
1471	chip->state = FL_STATUS;
1472	DISABLE_VPP(map);
1473	wake_up(&chip->wq);
1474	spin_unlock_bh(chip->mutex);
1475	return 0;
1476}
1477static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1478{
1479	struct map_info *map = mtd->priv;
1480	struct cfi_private *cfi = map->fldrv_priv;
1481	unsigned long adr;
1482	int chipnum, ret = 0;
1483#ifdef DEBUG_LOCK_BITS
1484	int ofs_factor = cfi->interleave * cfi->device_type;
1485#endif
1486	int i, first;
1487	struct mtd_erase_region_info *regions = mtd->eraseregions;
1488
1489	if (ofs > mtd->size)
1490		return -EINVAL;
1491
1492	if ((len + ofs) > mtd->size)
1493		return -EINVAL;
1494
1495	/* Check that both start and end of the requested erase are
1496	 * aligned with the erasesize at the appropriate addresses.
1497	 */
1498
1499	i = 0;
1500
1501	/* Skip all erase regions which are ended before the start of
1502	   the requested erase. Actually, to save on the calculations,
1503	   we skip to the first erase region which starts after the
1504	   start of the requested erase, and then go back one.
1505	*/
1506
1507	while (i < mtd->numeraseregions && ofs >= regions[i].offset)
1508	       i++;
1509	i--;
1510
1511	/* OK, now i is pointing at the erase region in which this
1512	   erase request starts. Check the start of the requested
1513	   erase range is aligned with the erase size which is in
1514	   effect here.
1515	*/
1516
1517	if (ofs & (regions[i].erasesize-1))
1518		return -EINVAL;
1519
1520	/* Remember the erase region we start on */
1521	first = i;
1522
1523	/* Next, check that the end of the requested erase is aligned
1524	 * with the erase region at that address.
1525	 */
1526
1527	while (i<mtd->numeraseregions && (ofs + len) >= regions[i].offset)
1528		i++;
1529
1530	/* As before, drop back one to point at the region in which
1531	   the address actually falls
1532	*/
1533	i--;
1534
1535	if ((ofs + len) & (regions[i].erasesize-1))
1536		return -EINVAL;
1537
1538	chipnum = ofs >> cfi->chipshift;
1539	adr = ofs - (chipnum << cfi->chipshift);
1540	i = first;
1541
1542	while(len) {
1543
1544#ifdef DEBUG_LOCK_BITS
1545		cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1546		printk("before unlock %x: block status register is %x\n",adr,cfi_read_query(map, adr+(2*ofs_factor)));
1547		cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1548#endif
1549
1550		ret = do_unlock_oneblock(map, &cfi->chips[chipnum], adr);
1551
1552#ifdef DEBUG_LOCK_BITS
1553		cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1554		printk("after unlock %x: block status register is %x\n",adr,cfi_read_query(map, adr+(2*ofs_factor)));
1555		cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1556#endif
1557
1558		if (ret)
1559			return ret;
1560
1561		adr += regions[i].erasesize;
1562		len -= regions[i].erasesize;
1563
1564		if (adr % (1<< cfi->chipshift) == ((regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
1565			i++;
1566
1567		if (adr >> cfi->chipshift) {
1568			adr = 0;
1569			chipnum++;
1570
1571			if (chipnum >= cfi->numchips)
1572			break;
1573		}
1574	}
1575	return 0;
1576}
1577
1578static int cfi_intelext_suspend(struct mtd_info *mtd)
1579{
1580	struct map_info *map = mtd->priv;
1581	struct cfi_private *cfi = map->fldrv_priv;
1582	int i;
1583	struct flchip *chip;
1584	int ret = 0;
1585
1586	for (i=0; !ret && i<cfi->numchips; i++) {
1587		chip = &cfi->chips[i];
1588
1589		spin_lock_bh(chip->mutex);
1590
1591		switch(chip->state) {
1592		case FL_READY:
1593		case FL_STATUS:
1594		case FL_CFI_QUERY:
1595		case FL_JEDEC_QUERY:
1596			chip->oldstate = chip->state;
1597			chip->state = FL_PM_SUSPENDED;
1598			/* No need to wake_up() on this state change -
1599			 * as the whole point is that nobody can do anything
1600			 * with the chip now anyway.
1601			 */
1602		case FL_PM_SUSPENDED:
1603			break;
1604
1605		default:
1606			ret = -EAGAIN;
1607			break;
1608		}
1609		spin_unlock_bh(chip->mutex);
1610	}
1611
1612	/* Unlock the chips again */
1613
1614	if (ret) {
1615		for (i--; i >=0; i--) {
1616			chip = &cfi->chips[i];
1617
1618			spin_lock_bh(chip->mutex);
1619
1620			if (chip->state == FL_PM_SUSPENDED) {
1621				/* No need to force it into a known state here,
1622				   because we're returning failure, and it didn't
1623				   get power cycled */
1624				chip->state = chip->oldstate;
1625				wake_up(&chip->wq);
1626			}
1627			spin_unlock_bh(chip->mutex);
1628		}
1629	}
1630
1631	return ret;
1632}
1633
1634static void cfi_intelext_resume(struct mtd_info *mtd)
1635{
1636	struct map_info *map = mtd->priv;
1637	struct cfi_private *cfi = map->fldrv_priv;
1638	int i;
1639	struct flchip *chip;
1640
1641	for (i=0; i<cfi->numchips; i++) {
1642
1643		chip = &cfi->chips[i];
1644
1645		spin_lock_bh(chip->mutex);
1646
1647		/* Go to known state. Chip may have been power cycled */
1648		if (chip->state == FL_PM_SUSPENDED) {
1649			cfi_write(map, CMD(0xFF), 0);
1650			chip->state = FL_READY;
1651			wake_up(&chip->wq);
1652		}
1653
1654		spin_unlock_bh(chip->mutex);
1655	}
1656}
1657
1658static void cfi_intelext_destroy(struct mtd_info *mtd)
1659{
1660	struct map_info *map = mtd->priv;
1661	struct cfi_private *cfi = map->fldrv_priv;
1662	kfree(cfi->cmdset_priv);
1663	kfree(cfi);
1664}
1665
1666static char im_name_1[]="cfi_cmdset_0001";
1667static char im_name_3[]="cfi_cmdset_0003";
1668
1669int __init cfi_intelext_init(void)
1670{
1671	inter_module_register(im_name_1, THIS_MODULE, &cfi_cmdset_0001);
1672	inter_module_register(im_name_3, THIS_MODULE, &cfi_cmdset_0001);
1673	return 0;
1674}
1675
1676static void __exit cfi_intelext_exit(void)
1677{
1678	inter_module_unregister(im_name_1);
1679	inter_module_unregister(im_name_3);
1680}
1681
1682module_init(cfi_intelext_init);
1683module_exit(cfi_intelext_exit);
1684
1685MODULE_LICENSE("GPL");
1686MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
1687MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
1688