1/*
2 * Common Flash Interface support:
3 *   AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
4 *
5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
6 *
7 * 2_by_8 routines added by Simon Munton
8 *
9 * This code is GPL
10 *
11 * $Id: cfi_cmdset_0002.c,v 1.1.1.1 2008/10/15 03:26:35 james26_jang Exp $
12 *
13 */
14
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/kernel.h>
18#include <linux/sched.h>
19#include <asm/io.h>
20#include <asm/byteorder.h>
21
22#include <linux/errno.h>
23#include <linux/slab.h>
24#include <linux/delay.h>
25#include <linux/interrupt.h>
26#include <linux/mtd/map.h>
27#include <linux/mtd/cfi.h>
28
29#define AMD_BOOTLOC_BUG
30
31static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
32static int cfi_amdstd_write(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
33static int cfi_amdstd_erase_onesize(struct mtd_info *, struct erase_info *);
34static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
35static void cfi_amdstd_sync (struct mtd_info *);
36static int cfi_amdstd_suspend (struct mtd_info *);
37static void cfi_amdstd_resume (struct mtd_info *);
38
39static void cfi_amdstd_destroy(struct mtd_info *);
40
41struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
42static struct mtd_info *cfi_amdstd_setup (struct map_info *);
43
44
45static struct mtd_chip_driver cfi_amdstd_chipdrv = {
46	probe: NULL, /* Not usable directly */
47	destroy: cfi_amdstd_destroy,
48	name: "cfi_cmdset_0002",
49	module: THIS_MODULE
50};
51
52struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
53{
54	struct cfi_private *cfi = map->fldrv_priv;
55	unsigned char bootloc;
56	int ofs_factor = cfi->interleave * cfi->device_type;
57	int i;
58	__u8 major, minor;
59	__u32 base = cfi->chips[0].start;
60
61	if (cfi->cfi_mode==1){
62		__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
63
64		cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
65
66		major = cfi_read_query(map, base + (adr+3)*ofs_factor);
67		minor = cfi_read_query(map, base + (adr+4)*ofs_factor);
68
69		printk(KERN_NOTICE " Amd/Fujitsu Extended Query Table v%c.%c at 0x%4.4X\n",
70		       major, minor, adr);
71				cfi_send_gen_cmd(0xf0, 0x55, base, map, cfi, cfi->device_type, NULL);
72
73		cfi_send_gen_cmd(0xaa, 0x555, base, map, cfi, cfi->device_type, NULL);
74		cfi_send_gen_cmd(0x55, 0x2aa, base, map, cfi, cfi->device_type, NULL);
75		cfi_send_gen_cmd(0x90, 0x555, base, map, cfi, cfi->device_type, NULL);
76		cfi->mfr = cfi_read_query(map, base);
77		cfi->id = cfi_read_query(map, base + ofs_factor);
78
79		printk(KERN_NOTICE " MFR: %x ID %x\n", cfi->mfr, cfi->id);
80
81		/* Wheee. Bring me the head of someone at AMD. */
82#ifdef AMD_BOOTLOC_BUG
83		if (((major << 8) | minor) < 0x3131) {
84			/* CFI version 1.0 => don't trust bootloc */
85			if (cfi->id & 0x80) {
86				printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
87				bootloc = 3;	/* top boot */
88			} else {
89				bootloc = 2;	/* bottom boot */
90			}
91		} else
92#endif
93			{
94				cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
95				bootloc = cfi_read_query(map, base + (adr+15)*ofs_factor);
96			}
97		if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
98			printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name);
99
100			for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
101				int j = (cfi->cfiq->NumEraseRegions-1)-i;
102				__u32 swap;
103
104				swap = cfi->cfiq->EraseRegionInfo[i];
105				cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
106				cfi->cfiq->EraseRegionInfo[j] = swap;
107			}
108		}
109		switch (cfi->device_type) {
110		case CFI_DEVICETYPE_X8:
111			cfi->addr_unlock1 = 0x555;
112			cfi->addr_unlock2 = 0x2aa;
113			break;
114		case CFI_DEVICETYPE_X16:
115			cfi->addr_unlock1 = 0xaaa;
116			if (map->buswidth == cfi->interleave) {
117				/* X16 chip(s) in X8 mode */
118				cfi->addr_unlock2 = 0x555;
119			} else {
120				cfi->addr_unlock2 = 0x554;
121			}
122			break;
123		case CFI_DEVICETYPE_X32:
124			cfi->addr_unlock1 = 0x1555;
125			cfi->addr_unlock2 = 0xaaa;
126			break;
127		default:
128			printk(KERN_NOTICE "Eep. Unknown cfi_cmdset_0002 device type %d\n", cfi->device_type);
129			return NULL;
130		}
131	} /* CFI mode */
132
133	for (i=0; i< cfi->numchips; i++) {
134		cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
135		// 2006/06/07, reduce write time for SYN only, by Chen-I
136		if(cfi->mfr==0x01 && cfi->id==0x7e && cfi->chips[i].word_write_time>16)
137		{
138			cfi->chips[i].word_write_time=16;
139		}
140		cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
141		cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
142	}
143
144	map->fldrv = &cfi_amdstd_chipdrv;
145	MOD_INC_USE_COUNT;
146
147	cfi_send_gen_cmd(0xf0, 0x55, base, map, cfi, cfi->device_type, NULL);
148	return cfi_amdstd_setup(map);
149}
150
151static struct mtd_info *cfi_amdstd_setup(struct map_info *map)
152{
153	struct cfi_private *cfi = map->fldrv_priv;
154	struct mtd_info *mtd;
155	unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
156
157	mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
158	printk(KERN_NOTICE "number of %s chips: %d\n", (cfi->cfi_mode)?"CFI":"JEDEC",cfi->numchips);
159
160	if (!mtd) {
161	  printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
162	  kfree(cfi->cmdset_priv);
163	  return NULL;
164	}
165
166	memset(mtd, 0, sizeof(*mtd));
167	mtd->priv = map;
168	mtd->type = MTD_NORFLASH;
169	/* Also select the correct geometry setup too */
170	mtd->size = devsize * cfi->numchips;
171
172	if (cfi->cfiq->NumEraseRegions == 1) {
173		/* No need to muck about with multiple erase sizes */
174		mtd->erasesize = ((cfi->cfiq->EraseRegionInfo[0] >> 8) & ~0xff) * cfi->interleave;
175	} else {
176		unsigned long offset = 0;
177		int i,j;
178
179		mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
180		mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) * mtd->numeraseregions, GFP_KERNEL);
181		if (!mtd->eraseregions) {
182			printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
183			kfree(cfi->cmdset_priv);
184			return NULL;
185		}
186
187		for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
188			unsigned long ernum, ersize;
189			ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
190			ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
191
192			if (mtd->erasesize < ersize) {
193				mtd->erasesize = ersize;
194			}
195			for (j=0; j<cfi->numchips; j++) {
196				mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
197				mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
198				mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
199			}
200			offset += (ersize * ernum);
201		}
202		if (offset != devsize) {
203			/* Argh */
204			printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
205			kfree(mtd->eraseregions);
206			kfree(cfi->cmdset_priv);
207			return NULL;
208		}
209	}
210
211	switch (CFIDEV_BUSWIDTH)
212	{
213	case 1:
214	case 2:
215	case 4:
216		if (mtd->numeraseregions > 1)
217			mtd->erase = cfi_amdstd_erase_varsize;
218		else
219			mtd->erase = cfi_amdstd_erase_onesize;
220		mtd->read = cfi_amdstd_read;
221		mtd->write = cfi_amdstd_write;
222		break;
223
224	default:
225	        printk(KERN_WARNING "Unsupported buswidth\n");
226		kfree(mtd);
227		kfree(cfi->cmdset_priv);
228		return NULL;
229		break;
230	}
231	mtd->sync = cfi_amdstd_sync;
232	mtd->suspend = cfi_amdstd_suspend;
233	mtd->resume = cfi_amdstd_resume;
234	mtd->flags = MTD_CAP_NORFLASH;
235	map->fldrv = &cfi_amdstd_chipdrv;
236	mtd->name = map->name;
237	MOD_INC_USE_COUNT;
238	return mtd;
239}
240
241static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
242{
243	DECLARE_WAITQUEUE(wait, current);
244	unsigned long timeo = jiffies + HZ;
245
246 retry:
247	cfi_spin_lock(chip->mutex);
248
249	if (chip->state != FL_READY){
250		set_current_state(TASK_UNINTERRUPTIBLE);
251		add_wait_queue(&chip->wq, &wait);
252
253		cfi_spin_unlock(chip->mutex);
254
255		schedule();
256		remove_wait_queue(&chip->wq, &wait);
257		timeo = jiffies + HZ;
258
259		goto retry;
260	}
261
262	adr += chip->start;
263
264	chip->state = FL_READY;
265
266	map->copy_from(map, buf, adr, len);
267
268	wake_up(&chip->wq);
269	cfi_spin_unlock(chip->mutex);
270
271	return 0;
272}
273
274static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
275{
276	struct map_info *map = mtd->priv;
277	struct cfi_private *cfi = map->fldrv_priv;
278	unsigned long ofs;
279	int chipnum;
280	int ret = 0;
281
282	/* ofs: offset within the first chip that the first read should start */
283
284	chipnum = (from >> cfi->chipshift);
285	ofs = from - (chipnum <<  cfi->chipshift);
286
287
288	*retlen = 0;
289
290	while (len) {
291		unsigned long thislen;
292
293		if (chipnum >= cfi->numchips)
294			break;
295
296		if ((len + ofs -1) >> cfi->chipshift)
297			thislen = (1<<cfi->chipshift) - ofs;
298		else
299			thislen = len;
300
301		ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
302		if (ret)
303			break;
304
305		*retlen += thislen;
306		len -= thislen;
307		buf += thislen;
308
309		ofs = 0;
310		chipnum++;
311	}
312	return ret;
313}
314
315static int do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, __u32 datum, int fast)
316{
317	unsigned long timeo = jiffies + HZ;
318	unsigned int Last[4];
319	unsigned long Count = 0;
320	struct cfi_private *cfi = map->fldrv_priv;
321	DECLARE_WAITQUEUE(wait, current);
322	int ret = 0;
323
324 retry:
325	cfi_spin_lock(chip->mutex);
326
327	if (chip->state != FL_READY){
328		set_current_state(TASK_UNINTERRUPTIBLE);
329		add_wait_queue(&chip->wq, &wait);
330
331		cfi_spin_unlock(chip->mutex);
332
333		schedule();
334		remove_wait_queue(&chip->wq, &wait);
335		timeo = jiffies + HZ;
336
337		goto retry;
338	}
339
340	chip->state = FL_WRITING;
341
342	adr += chip->start;
343	ENABLE_VPP(map);
344	if (fast) { /* Unlock bypass */
345		cfi_send_gen_cmd(0xA0, 0, chip->start, map, cfi, cfi->device_type, NULL);
346	}
347	else {
348	        cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
349	        cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
350	        cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
351	}
352
353	cfi_write(map, datum, adr);
354
355	cfi_spin_unlock(chip->mutex);
356	cfi_udelay(chip->word_write_time);
357	cfi_spin_lock(chip->mutex);
358
359	Last[0] = cfi_read(map, adr);
360	//	printk("Last[0] is %x\n", Last[0]);
361	Last[1] = cfi_read(map, adr);
362	//	printk("Last[1] is %x\n", Last[1]);
363	Last[2] = cfi_read(map, adr);
364	//	printk("Last[2] is %x\n", Last[2]);
365
366	for (Count = 3; Last[(Count - 1) % 4] != Last[(Count - 2) % 4] && Count < 10000; Count++){
367		cfi_spin_unlock(chip->mutex);
368		cfi_udelay(10);
369		cfi_spin_lock(chip->mutex);
370
371	        Last[Count % 4] = cfi_read(map, adr);
372		//		printk("Last[%d%%4] is %x\n", Count, Last[Count%4]);
373	}
374
375	if (Last[(Count - 1) % 4] != datum){
376		printk(KERN_WARNING "Last[%ld] is %x, datum is %x\n",(Count - 1) % 4,Last[(Count - 1) % 4],datum);
377	        cfi_send_gen_cmd(0xF0, 0, chip->start, map, cfi, cfi->device_type, NULL);
378		DISABLE_VPP(map);
379		ret = -EIO;
380	}
381	DISABLE_VPP(map);
382	chip->state = FL_READY;
383	wake_up(&chip->wq);
384	cfi_spin_unlock(chip->mutex);
385
386	return ret;
387}
388
389static int cfi_amdstd_write (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
390{
391	struct map_info *map = mtd->priv;
392	struct cfi_private *cfi = map->fldrv_priv;
393	int ret = 0;
394	int chipnum;
395	unsigned long ofs, chipstart;
396
397	*retlen = 0;
398	if (!len)
399		return 0;
400
401	chipnum = to >> cfi->chipshift;
402	ofs = to  - (chipnum << cfi->chipshift);
403	chipstart = cfi->chips[chipnum].start;
404
405	/* If it's not bus-aligned, do the first byte write */
406	if (ofs & (CFIDEV_BUSWIDTH-1)) {
407		unsigned long bus_ofs = ofs & ~(CFIDEV_BUSWIDTH-1);
408		int i = ofs - bus_ofs;
409		int n = 0;
410		u_char tmp_buf[4];
411		__u32 datum;
412
413		map->copy_from(map, tmp_buf, bus_ofs + cfi->chips[chipnum].start, CFIDEV_BUSWIDTH);
414		while (len && i < CFIDEV_BUSWIDTH)
415			tmp_buf[i++] = buf[n++], len--;
416
417		if (cfi_buswidth_is_2()) {
418			datum = *(__u16*)tmp_buf;
419		} else if (cfi_buswidth_is_4()) {
420			datum = *(__u32*)tmp_buf;
421		} else {
422			return -EINVAL;  /* should never happen, but be safe */
423		}
424
425		ret = do_write_oneword(map, &cfi->chips[chipnum],
426				bus_ofs, datum, 0);
427		if (ret)
428			return ret;
429
430		ofs += n;
431		buf += n;
432		(*retlen) += n;
433
434		if (ofs >> cfi->chipshift) {
435			chipnum ++;
436			ofs = 0;
437			if (chipnum == cfi->numchips)
438				return 0;
439		}
440	}
441
442	/* Go into unlock bypass mode */
443	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chipstart, map, cfi, CFI_DEVICETYPE_X8, NULL);
444	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chipstart, map, cfi, CFI_DEVICETYPE_X8, NULL);
445	cfi_send_gen_cmd(0x20, cfi->addr_unlock1, chipstart, map, cfi, CFI_DEVICETYPE_X8, NULL);
446
447	/* We are now aligned, write as much as possible */
448	while(len >= CFIDEV_BUSWIDTH) {
449		__u32 datum;
450
451		if (cfi_buswidth_is_1()) {
452			datum = *(__u8*)buf;
453		} else if (cfi_buswidth_is_2()) {
454			datum = *(__u16*)buf;
455		} else if (cfi_buswidth_is_4()) {
456			datum = *(__u32*)buf;
457		} else {
458			return -EINVAL;
459		}
460		ret = do_write_oneword(map, &cfi->chips[chipnum],
461				       ofs, datum, cfi->fast_prog);
462		if (ret) {
463			if (cfi->fast_prog){
464				/* Get out of unlock bypass mode */
465				cfi_send_gen_cmd(0x90, 0, chipstart, map, cfi, cfi->device_type, NULL);
466				cfi_send_gen_cmd(0x00, 0, chipstart, map, cfi, cfi->device_type, NULL);
467			}
468			return ret;
469		}
470
471		ofs += CFIDEV_BUSWIDTH;
472		buf += CFIDEV_BUSWIDTH;
473		(*retlen) += CFIDEV_BUSWIDTH;
474		len -= CFIDEV_BUSWIDTH;
475
476		if (ofs >> cfi->chipshift) {
477			if (cfi->fast_prog){
478				/* Get out of unlock bypass mode */
479				cfi_send_gen_cmd(0x90, 0, chipstart, map, cfi, cfi->device_type, NULL);
480				cfi_send_gen_cmd(0x00, 0, chipstart, map, cfi, cfi->device_type, NULL);
481			}
482
483			chipnum ++;
484			ofs = 0;
485			if (chipnum == cfi->numchips)
486				return 0;
487			chipstart = cfi->chips[chipnum].start;
488			if (cfi->fast_prog){
489				/* Go into unlock bypass mode for next set of chips */
490				cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chipstart, map, cfi, CFI_DEVICETYPE_X8, NULL);
491				cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chipstart, map, cfi, CFI_DEVICETYPE_X8, NULL);
492				cfi_send_gen_cmd(0x20, cfi->addr_unlock1, chipstart, map, cfi, CFI_DEVICETYPE_X8, NULL);
493			}
494		}
495	}
496
497	if (cfi->fast_prog){
498		/* Get out of unlock bypass mode */
499		cfi_send_gen_cmd(0x90, 0, chipstart, map, cfi, cfi->device_type, NULL);
500		cfi_send_gen_cmd(0x00, 0, chipstart, map, cfi, cfi->device_type, NULL);
501	}
502
503	if (len & (CFIDEV_BUSWIDTH-1)) {
504		int i = 0, n = 0;
505		u_char tmp_buf[4];
506		__u32 datum;
507
508		map->copy_from(map, tmp_buf, ofs + cfi->chips[chipnum].start, CFIDEV_BUSWIDTH);
509		while (len--)
510			tmp_buf[i++] = buf[n++];
511
512		if (cfi_buswidth_is_2()) {
513			datum = *(__u16*)tmp_buf;
514		} else if (cfi_buswidth_is_4()) {
515			datum = *(__u32*)tmp_buf;
516		} else {
517			return -EINVAL;  /* should never happen, but be safe */
518		}
519
520		ret = do_write_oneword(map, &cfi->chips[chipnum],
521				ofs, datum, 0);
522		if (ret)
523			return ret;
524
525		(*retlen) += n;
526	}
527
528	return 0;
529}
530
531static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
532{
533	unsigned int status;
534	unsigned long timeo = jiffies + HZ;
535	struct cfi_private *cfi = map->fldrv_priv;
536	unsigned int rdy_mask;
537	DECLARE_WAITQUEUE(wait, current);
538
539 retry:
540	cfi_spin_lock(chip->mutex);
541
542	if (chip->state != FL_READY){
543		set_current_state(TASK_UNINTERRUPTIBLE);
544		add_wait_queue(&chip->wq, &wait);
545
546		cfi_spin_unlock(chip->mutex);
547
548		schedule();
549		remove_wait_queue(&chip->wq, &wait);
550		timeo = jiffies + HZ;
551
552		goto retry;
553	}
554
555	chip->state = FL_ERASING;
556
557	adr += chip->start;
558	ENABLE_VPP(map);
559	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
560	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
561	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
562	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
563	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
564	cfi_write(map, CMD(0x30), adr);
565
566	timeo = jiffies + (HZ*20);
567
568	cfi_spin_unlock(chip->mutex);
569	schedule_timeout(HZ);
570	cfi_spin_lock(chip->mutex);
571
572	rdy_mask = CMD(0x80);
573
574	/* Once the state machine's known to be working I'll do that */
575
576	while ( ( (status = cfi_read(map,adr)) & rdy_mask ) != rdy_mask ) {
577		static int z=0;
578
579		if (chip->state != FL_ERASING) {
580			/* Someone's suspended the erase. Sleep */
581			set_current_state(TASK_UNINTERRUPTIBLE);
582			add_wait_queue(&chip->wq, &wait);
583
584			cfi_spin_unlock(chip->mutex);
585			printk(KERN_DEBUG "erase suspended. Sleeping\n");
586
587			schedule();
588			remove_wait_queue(&chip->wq, &wait);
589			timeo = jiffies + (HZ*2);
590			cfi_spin_lock(chip->mutex);
591			continue;
592		}
593
594		/* OK Still waiting */
595		if (time_after(jiffies, timeo)) {
596			chip->state = FL_READY;
597			cfi_spin_unlock(chip->mutex);
598			printk(KERN_WARNING "waiting for erase to complete timed out.");
599			DISABLE_VPP(map);
600			return -EIO;
601		}
602
603		/* Latency issues. Drop the lock, wait a while and retry */
604		cfi_spin_unlock(chip->mutex);
605
606		z++;
607		if ( 0 && !(z % 100 ))
608			printk(KERN_WARNING "chip not ready yet after erase. looping\n");
609
610		cfi_udelay(1);
611
612		cfi_spin_lock(chip->mutex);
613		continue;
614	}
615
616	/* Done and happy. */
617	DISABLE_VPP(map);
618	chip->state = FL_READY;
619	wake_up(&chip->wq);
620	cfi_spin_unlock(chip->mutex);
621	return 0;
622}
623
624static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
625{
626	struct map_info *map = mtd->priv;
627	struct cfi_private *cfi = map->fldrv_priv;
628	unsigned long adr, len;
629	int chipnum, ret = 0;
630	int i, first;
631	struct mtd_erase_region_info *regions = mtd->eraseregions;
632
633	if (instr->addr > mtd->size)
634		return -EINVAL;
635
636	if ((instr->len + instr->addr) > mtd->size)
637		return -EINVAL;
638
639	/* Check that both start and end of the requested erase are
640	 * aligned with the erasesize at the appropriate addresses.
641	 */
642
643	i = 0;
644
645	/* Skip all erase regions which are ended before the start of
646	   the requested erase. Actually, to save on the calculations,
647	   we skip to the first erase region which starts after the
648	   start of the requested erase, and then go back one.
649	*/
650
651	while (i < mtd->numeraseregions && instr->addr >= regions[i].offset)
652	       i++;
653	i--;
654
655	/* OK, now i is pointing at the erase region in which this
656	   erase request starts. Check the start of the requested
657	   erase range is aligned with the erase size which is in
658	   effect here.
659	*/
660
661	if (instr->addr & (regions[i].erasesize-1))
662		return -EINVAL;
663
664	/* Remember the erase region we start on */
665	first = i;
666
667	/* Next, check that the end of the requested erase is aligned
668	 * with the erase region at that address.
669	 */
670
671	while (i<mtd->numeraseregions && (instr->addr + instr->len) >= regions[i].offset)
672		i++;
673
674	/* As before, drop back one to point at the region in which
675	   the address actually falls
676	*/
677	i--;
678
679	if ((instr->addr + instr->len) & (regions[i].erasesize-1))
680		return -EINVAL;
681
682	chipnum = instr->addr >> cfi->chipshift;
683	adr = instr->addr - (chipnum << cfi->chipshift);
684	len = instr->len;
685
686	i=first;
687
688	while(len) {
689		ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
690
691		if (ret)
692			return ret;
693
694		adr += regions[i].erasesize;
695		len -= regions[i].erasesize;
696
697		if (adr % (1<< cfi->chipshift) == ((regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
698			i++;
699
700		if (adr >> cfi->chipshift) {
701			adr = 0;
702			chipnum++;
703
704			if (chipnum >= cfi->numchips)
705			break;
706		}
707	}
708
709	instr->state = MTD_ERASE_DONE;
710	if (instr->callback)
711		instr->callback(instr);
712
713	return 0;
714}
715
716static int cfi_amdstd_erase_onesize(struct mtd_info *mtd, struct erase_info *instr)
717{
718	struct map_info *map = mtd->priv;
719	struct cfi_private *cfi = map->fldrv_priv;
720	unsigned long adr, len;
721	int chipnum, ret = 0;
722
723	if (instr->addr & (mtd->erasesize - 1))
724		return -EINVAL;
725
726	if (instr->len & (mtd->erasesize -1))
727		return -EINVAL;
728
729	if ((instr->len + instr->addr) > mtd->size)
730		return -EINVAL;
731
732	chipnum = instr->addr >> cfi->chipshift;
733	adr = instr->addr - (chipnum << cfi->chipshift);
734	len = instr->len;
735
736	while(len) {
737		ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
738
739		if (ret)
740			return ret;
741
742		adr += mtd->erasesize;
743		len -= mtd->erasesize;
744
745		if (adr >> cfi->chipshift) {
746			adr = 0;
747			chipnum++;
748
749			if (chipnum >= cfi->numchips)
750			break;
751		}
752	}
753
754	instr->state = MTD_ERASE_DONE;
755	if (instr->callback)
756		instr->callback(instr);
757
758	return 0;
759}
760
761static void cfi_amdstd_sync (struct mtd_info *mtd)
762{
763	struct map_info *map = mtd->priv;
764	struct cfi_private *cfi = map->fldrv_priv;
765	int i;
766	struct flchip *chip;
767	int ret = 0;
768	DECLARE_WAITQUEUE(wait, current);
769
770	for (i=0; !ret && i<cfi->numchips; i++) {
771		chip = &cfi->chips[i];
772
773	retry:
774		cfi_spin_lock(chip->mutex);
775
776		switch(chip->state) {
777		case FL_READY:
778		case FL_STATUS:
779		case FL_CFI_QUERY:
780		case FL_JEDEC_QUERY:
781			chip->oldstate = chip->state;
782			chip->state = FL_SYNCING;
783			/* No need to wake_up() on this state change -
784			 * as the whole point is that nobody can do anything
785			 * with the chip now anyway.
786			 */
787		case FL_SYNCING:
788			cfi_spin_unlock(chip->mutex);
789			break;
790
791		default:
792			/* Not an idle state */
793			add_wait_queue(&chip->wq, &wait);
794
795			cfi_spin_unlock(chip->mutex);
796
797			schedule();
798
799		        remove_wait_queue(&chip->wq, &wait);
800
801			goto retry;
802		}
803	}
804
805	/* Unlock the chips again */
806
807	for (i--; i >=0; i--) {
808		chip = &cfi->chips[i];
809
810		cfi_spin_lock(chip->mutex);
811
812		if (chip->state == FL_SYNCING) {
813			chip->state = chip->oldstate;
814			wake_up(&chip->wq);
815		}
816		cfi_spin_unlock(chip->mutex);
817	}
818}
819
820
821static int cfi_amdstd_suspend(struct mtd_info *mtd)
822{
823	struct map_info *map = mtd->priv;
824	struct cfi_private *cfi = map->fldrv_priv;
825	int i;
826	struct flchip *chip;
827	int ret = 0;
828//printk("suspend\n");
829
830	for (i=0; !ret && i<cfi->numchips; i++) {
831		chip = &cfi->chips[i];
832
833		cfi_spin_lock(chip->mutex);
834
835		switch(chip->state) {
836		case FL_READY:
837		case FL_STATUS:
838		case FL_CFI_QUERY:
839		case FL_JEDEC_QUERY:
840			chip->oldstate = chip->state;
841			chip->state = FL_PM_SUSPENDED;
842			/* No need to wake_up() on this state change -
843			 * as the whole point is that nobody can do anything
844			 * with the chip now anyway.
845			 */
846		case FL_PM_SUSPENDED:
847			break;
848
849		default:
850			ret = -EAGAIN;
851			break;
852		}
853		cfi_spin_unlock(chip->mutex);
854	}
855
856	/* Unlock the chips again */
857
858	if (ret) {
859    		for (i--; i >=0; i--) {
860			chip = &cfi->chips[i];
861
862			cfi_spin_lock(chip->mutex);
863
864			if (chip->state == FL_PM_SUSPENDED) {
865				chip->state = chip->oldstate;
866				wake_up(&chip->wq);
867			}
868			cfi_spin_unlock(chip->mutex);
869		}
870	}
871
872	return ret;
873}
874
875static void cfi_amdstd_resume(struct mtd_info *mtd)
876{
877	struct map_info *map = mtd->priv;
878	struct cfi_private *cfi = map->fldrv_priv;
879	int i;
880	struct flchip *chip;
881//printk("resume\n");
882
883	for (i=0; i<cfi->numchips; i++) {
884
885		chip = &cfi->chips[i];
886
887		cfi_spin_lock(chip->mutex);
888
889		if (chip->state == FL_PM_SUSPENDED) {
890			chip->state = FL_READY;
891			cfi_write(map, CMD(0xF0), chip->start);
892			wake_up(&chip->wq);
893		}
894		else
895			printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
896
897		cfi_spin_unlock(chip->mutex);
898	}
899}
900
901static void cfi_amdstd_destroy(struct mtd_info *mtd)
902{
903	struct map_info *map = mtd->priv;
904	struct cfi_private *cfi = map->fldrv_priv;
905	kfree(cfi->cmdset_priv);
906	kfree(cfi);
907}
908
909static char im_name[]="cfi_cmdset_0002";
910
911int __init cfi_amdstd_init(void)
912{
913	inter_module_register(im_name, THIS_MODULE, &cfi_cmdset_0002);
914	return 0;
915}
916
917static void __exit cfi_amdstd_exit(void)
918{
919	inter_module_unregister(im_name);
920}
921
922module_init(cfi_amdstd_init);
923module_exit(cfi_amdstd_exit);
924
925MODULE_LICENSE("GPL");
926MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
927MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
928