1/*
2 * Common Flash Interface support:
3 *   SST Standard Vendor Command Set (ID 0x0701)
4 *
5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
6 *
7 * 2_by_8 routines added by Simon Munton
8 *
9 * This code is GPL
10 *
11 * $Id: cfi_cmdset_0701.c,v 1.1.1.1 2008/10/15 03:26:35 james26_jang Exp $
12 *
13 */
14
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/kernel.h>
18#include <linux/sched.h>
19#include <asm/io.h>
20#include <asm/byteorder.h>
21
22#include <linux/errno.h>
23#include <linux/slab.h>
24#include <linux/delay.h>
25#include <linux/interrupt.h>
26#include <linux/mtd/map.h>
27#include <linux/mtd/cfi.h>
28
29static int cfi_sststd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
30static int cfi_sststd_write(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
31static int cfi_sststd_erase_onesize(struct mtd_info *, struct erase_info *);
32static int cfi_sststd_erase_varsize(struct mtd_info *, struct erase_info *);
33static void cfi_sststd_sync (struct mtd_info *);
34static int cfi_sststd_suspend (struct mtd_info *);
35static void cfi_sststd_resume (struct mtd_info *);
36
37static void cfi_sststd_destroy(struct mtd_info *);
38
39struct mtd_info *cfi_cmdset_0701(struct map_info *, int);
40static struct mtd_info *cfi_sststd_setup (struct map_info *);
41
42
43static struct mtd_chip_driver cfi_sststd_chipdrv = {
44	probe: NULL, /* Not usable directly */
45	destroy: cfi_sststd_destroy,
46	name: "cfi_cmdset_0701",
47	module: THIS_MODULE
48};
49
50struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary)
51{
52	struct cfi_private *cfi = map->fldrv_priv;
53	int ofs_factor = cfi->interleave * cfi->device_type;
54	int i;
55	__u8 major, minor;
56	__u32 base = cfi->chips[0].start;
57
58	if (cfi->cfi_mode==1){
59		__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
60
61		cfi_send_gen_cmd(0xAA, 0x5555, base, map, cfi, cfi->device_type, NULL);
62		cfi_send_gen_cmd(0x55, 0x2AAA, base, map, cfi, cfi->device_type, NULL);
63		cfi_send_gen_cmd(0x98, 0x5555, base, map, cfi, cfi->device_type, NULL);
64
65		major = cfi_read_query(map, base + (adr+3)*ofs_factor);
66		minor = cfi_read_query(map, base + (adr+4)*ofs_factor);
67
68		printk(" SST Query Table v%c.%c at 0x%4.4X\n",
69		       major, minor, adr);
70		cfi_send_gen_cmd(0xf0, 0x5555, base, map, cfi, cfi->device_type, NULL);
71
72		cfi_send_gen_cmd(0xAA, 0x5555, base, map, cfi, cfi->device_type, NULL);
73		cfi_send_gen_cmd(0x55, 0x2AAA, base, map, cfi, cfi->device_type, NULL);
74		cfi_send_gen_cmd(0x90, 0x5555, base, map, cfi, cfi->device_type, NULL);
75		cfi->mfr = cfi_read_query(map, base);
76		cfi->id = cfi_read_query(map, base + ofs_factor);
77
78		cfi_send_gen_cmd(0xAA, 0x5555, base, map, cfi, cfi->device_type, NULL);
79		cfi_send_gen_cmd(0x55, 0x2AAA, base, map, cfi, cfi->device_type, NULL);
80		cfi_send_gen_cmd(0x98, 0x5555, base, map, cfi, cfi->device_type, NULL);
81
82		switch (cfi->device_type) {
83		case CFI_DEVICETYPE_X16:
84			cfi->addr_unlock1 = 0x5555;
85			cfi->addr_unlock2 = 0x2AAA;
86			break;
87		default:
88			printk(KERN_NOTICE "Eep. Unknown cfi_cmdset_0701 device type %d\n", cfi->device_type);
89			return NULL;
90		}
91	} /* CFI mode */
92
93	for (i=0; i< cfi->numchips; i++) {
94		cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
95		cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
96		cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
97	}
98
99	map->fldrv = &cfi_sststd_chipdrv;
100	MOD_INC_USE_COUNT;
101
102	cfi_send_gen_cmd(0xf0, 0x5555, base, map, cfi, cfi->device_type, NULL);
103	return cfi_sststd_setup(map);
104}
105
106static struct mtd_info *cfi_sststd_setup(struct map_info *map)
107{
108	struct cfi_private *cfi = map->fldrv_priv;
109	struct mtd_info *mtd;
110	unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
111
112	mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
113	printk("number of %s chips: %d\n", (cfi->cfi_mode)?"JEDEC":"CFI",cfi->numchips);
114
115	if (!mtd) {
116	  printk("Failed to allocate memory for MTD device\n");
117	  kfree(cfi->cmdset_priv);
118	  return NULL;
119	}
120
121	memset(mtd, 0, sizeof(*mtd));
122	mtd->priv = map;
123	mtd->type = MTD_NORFLASH;
124	/* Also select the correct geometry setup too */
125	mtd->size = devsize * cfi->numchips;
126
127	if (cfi->cfiq->NumEraseRegions == 1) {
128		/* No need to muck about with multiple erase sizes */
129		mtd->erasesize = ((cfi->cfiq->EraseRegionInfo[0] >> 8) & ~0xff) * cfi->interleave;
130	} else {
131		unsigned long offset = 0;
132		int i,j;
133
134		mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
135		mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) * mtd->numeraseregions, GFP_KERNEL);
136		if (!mtd->eraseregions) {
137			printk("Failed to allocate memory for MTD erase region info\n");
138			kfree(cfi->cmdset_priv);
139			return NULL;
140		}
141
142		for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
143			unsigned long ernum, ersize;
144			ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
145			ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
146
147			if (mtd->erasesize < ersize) {
148				mtd->erasesize = ersize;
149			}
150			for (j=0; j<cfi->numchips; j++) {
151				mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
152				mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
153				mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
154			}
155			offset += (ersize * ernum);
156		}
157
158		// debug
159		for (i=0; i<mtd->numeraseregions;i++){
160			printk("%d: offset=0x%x,size=0x%x,blocks=%d\n",
161			       i,mtd->eraseregions[i].offset,
162			       mtd->eraseregions[i].erasesize,
163			       mtd->eraseregions[i].numblocks);
164		}
165	}
166
167	switch (CFIDEV_BUSWIDTH)
168	{
169	case 1:
170	case 2:
171	case 4:
172		if (mtd->numeraseregions > 1)
173			mtd->erase = cfi_sststd_erase_varsize;
174		else
175			mtd->erase = cfi_sststd_erase_onesize;
176		mtd->read = cfi_sststd_read;
177		mtd->write = cfi_sststd_write;
178		break;
179
180	default:
181	        printk("Unsupported buswidth\n");
182		kfree(mtd);
183		kfree(cfi->cmdset_priv);
184		return NULL;
185		break;
186	}
187	mtd->sync = cfi_sststd_sync;
188	mtd->suspend = cfi_sststd_suspend;
189	mtd->resume = cfi_sststd_resume;
190	mtd->flags = MTD_CAP_NORFLASH;
191	map->fldrv = &cfi_sststd_chipdrv;
192	mtd->name = map->name;
193	MOD_INC_USE_COUNT;
194	return mtd;
195}
196
197static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
198{
199	DECLARE_WAITQUEUE(wait, current);
200	unsigned long timeo = jiffies + HZ;
201
202 retry:
203	cfi_spin_lock(chip->mutex);
204
205	if (chip->state != FL_READY){
206	        printk("Waiting for chip to read, status = %d\n", chip->state);
207		set_current_state(TASK_UNINTERRUPTIBLE);
208		add_wait_queue(&chip->wq, &wait);
209
210		cfi_spin_unlock(chip->mutex);
211
212		schedule();
213		remove_wait_queue(&chip->wq, &wait);
214		timeo = jiffies + HZ;
215
216		goto retry;
217	}
218
219	adr += chip->start;
220
221	chip->state = FL_READY;
222
223	map->copy_from(map, buf, adr, len);
224
225	wake_up(&chip->wq);
226	cfi_spin_unlock(chip->mutex);
227
228	return 0;
229}
230
231static int cfi_sststd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
232{
233	struct map_info *map = mtd->priv;
234	struct cfi_private *cfi = map->fldrv_priv;
235	unsigned long ofs;
236	int chipnum;
237	int ret = 0;
238
239	/* ofs: offset within the first chip that the first read should start */
240
241	chipnum = (from >> cfi->chipshift);
242	ofs = from - (chipnum <<  cfi->chipshift);
243
244
245	*retlen = 0;
246
247	while (len) {
248		unsigned long thislen;
249
250		if (chipnum >= cfi->numchips)
251			break;
252
253		if ((len + ofs -1) >> cfi->chipshift)
254			thislen = (1<<cfi->chipshift) - ofs;
255		else
256			thislen = len;
257
258		ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
259		if (ret)
260			break;
261
262		*retlen += thislen;
263		len -= thislen;
264		buf += thislen;
265
266		ofs = 0;
267		chipnum++;
268	}
269	return ret;
270}
271
272static int do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, __u32 datum, int fast)
273{
274	unsigned long timeo = jiffies + HZ;
275	unsigned int Last[4];
276	unsigned long Count = 0;
277	struct cfi_private *cfi = map->fldrv_priv;
278	DECLARE_WAITQUEUE(wait, current);
279	int ret = 0;
280
281 retry:
282	cfi_spin_lock(chip->mutex);
283
284	if (chip->state != FL_READY){
285	        printk("Waiting for chip to write, status = %d\n", chip->state);
286		set_current_state(TASK_UNINTERRUPTIBLE);
287		add_wait_queue(&chip->wq, &wait);
288
289		cfi_spin_unlock(chip->mutex);
290
291		schedule();
292		remove_wait_queue(&chip->wq, &wait);
293		printk("Wake up to write:\n");
294		timeo = jiffies + HZ;
295
296		goto retry;
297	}
298
299	chip->state = FL_WRITING;
300
301	adr += chip->start;
302	ENABLE_VPP(map);
303    cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X16, NULL);
304    cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X16, NULL);
305    cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X16, NULL);
306
307	cfi_write(map, datum, adr);
308
309	cfi_spin_unlock(chip->mutex);
310	cfi_udelay(chip->word_write_time);
311	cfi_spin_lock(chip->mutex);
312
313	Last[0] = cfi_read(map, adr);
314	//	printk("Last[0] is %x\n", Last[0]);
315	Last[1] = cfi_read(map, adr);
316	//	printk("Last[1] is %x\n", Last[1]);
317	Last[2] = cfi_read(map, adr);
318	//	printk("Last[2] is %x\n", Last[2]);
319
320	for (Count = 3; Last[(Count - 1) % 4] != Last[(Count - 2) % 4] && Count < 10000; Count++){
321		cfi_spin_unlock(chip->mutex);
322		cfi_udelay(10);
323		cfi_spin_lock(chip->mutex);
324
325	        Last[Count % 4] = cfi_read(map, adr);
326		//		printk("Last[%d%%4] is %x\n", Count, Last[Count%4]);
327	}
328
329	if (Last[(Count - 1) % 4] != datum){
330		printk("Last[%ld] is %x, datum is %x\n",(Count - 1) % 4,Last[(Count - 1) % 4],datum);
331	        cfi_send_gen_cmd(0xF0, 0, chip->start, map, cfi, cfi->device_type, NULL);
332		DISABLE_VPP(map);
333		ret = -EIO;
334	}
335	DISABLE_VPP(map);
336	chip->state = FL_READY;
337	wake_up(&chip->wq);
338	cfi_spin_unlock(chip->mutex);
339
340	return ret;
341}
342
343static int cfi_sststd_write (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
344{
345	struct map_info *map = mtd->priv;
346	struct cfi_private *cfi = map->fldrv_priv;
347	int ret = 0;
348	int chipnum;
349	unsigned long ofs, chipstart;
350
351	*retlen = 0;
352	if (!len)
353		return 0;
354
355	chipnum = to >> cfi->chipshift;
356	ofs = to  - (chipnum << cfi->chipshift);
357	chipstart = cfi->chips[chipnum].start;
358
359	/* If it's not bus-aligned, do the first byte write */
360	if (ofs & (CFIDEV_BUSWIDTH-1)) {
361		unsigned long bus_ofs = ofs & ~(CFIDEV_BUSWIDTH-1);
362		int i = ofs - bus_ofs;
363		int n = 0;
364		u_char tmp_buf[4];
365		__u32 datum;
366
367		map->copy_from(map, tmp_buf, bus_ofs + cfi->chips[chipnum].start, CFIDEV_BUSWIDTH);
368		while (len && i < CFIDEV_BUSWIDTH)
369			tmp_buf[i++] = buf[n++], len--;
370
371		if (cfi_buswidth_is_2()) {
372			datum = *(__u16*)tmp_buf;
373		} else if (cfi_buswidth_is_4()) {
374			datum = *(__u32*)tmp_buf;
375		} else {
376			return -EINVAL;  /* should never happen, but be safe */
377		}
378
379		ret = do_write_oneword(map, &cfi->chips[chipnum],
380				bus_ofs, datum, 0);
381		if (ret)
382			return ret;
383
384		ofs += n;
385		buf += n;
386		(*retlen) += n;
387
388		if (ofs >> cfi->chipshift) {
389			chipnum ++;
390			ofs = 0;
391			if (chipnum == cfi->numchips)
392				return 0;
393		}
394	}
395
396	/* We are now aligned, write as much as possible */
397	while(len >= CFIDEV_BUSWIDTH) {
398		__u32 datum;
399
400		if (cfi_buswidth_is_1()) {
401			datum = *(__u8*)buf;
402		} else if (cfi_buswidth_is_2()) {
403			datum = *(__u16*)buf;
404		} else if (cfi_buswidth_is_4()) {
405			datum = *(__u32*)buf;
406		} else {
407			return -EINVAL;
408		}
409		ret = do_write_oneword(map, &cfi->chips[chipnum],
410				       ofs, datum, cfi->fast_prog);
411		if (ret) {
412			return ret;
413		}
414
415		ofs += CFIDEV_BUSWIDTH;
416		buf += CFIDEV_BUSWIDTH;
417		(*retlen) += CFIDEV_BUSWIDTH;
418		len -= CFIDEV_BUSWIDTH;
419
420		if (ofs >> cfi->chipshift) {
421			chipnum ++;
422			ofs = 0;
423			if (chipnum == cfi->numchips)
424				return 0;
425			chipstart = cfi->chips[chipnum].start;
426		}
427	}
428
429	if (len & (CFIDEV_BUSWIDTH-1)) {
430		int i = 0, n = 0;
431		u_char tmp_buf[4];
432		__u32 datum;
433
434		map->copy_from(map, tmp_buf, ofs + cfi->chips[chipnum].start, CFIDEV_BUSWIDTH);
435		while (len--)
436			tmp_buf[i++] = buf[n++];
437
438		if (cfi_buswidth_is_2()) {
439			datum = *(__u16*)tmp_buf;
440		} else if (cfi_buswidth_is_4()) {
441			datum = *(__u32*)tmp_buf;
442		} else {
443			return -EINVAL;  /* should never happen, but be safe */
444		}
445
446		ret = do_write_oneword(map, &cfi->chips[chipnum],
447				ofs, datum, 0);
448		if (ret)
449			return ret;
450
451		(*retlen) += n;
452	}
453
454	return 0;
455}
456
457static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
458{
459	unsigned int status;
460	unsigned long timeo = jiffies + HZ;
461	struct cfi_private *cfi = map->fldrv_priv;
462	unsigned int rdy_mask;
463	DECLARE_WAITQUEUE(wait, current);
464
465 retry:
466	cfi_spin_lock(chip->mutex);
467
468	if (chip->state != FL_READY){
469		set_current_state(TASK_UNINTERRUPTIBLE);
470		add_wait_queue(&chip->wq, &wait);
471
472		cfi_spin_unlock(chip->mutex);
473
474		schedule();
475		remove_wait_queue(&chip->wq, &wait);
476		timeo = jiffies + HZ;
477
478		goto retry;
479	}
480
481	chip->state = FL_ERASING;
482
483	adr += chip->start;
484	ENABLE_VPP(map);
485	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X16, NULL);
486	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X16, NULL);
487	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X16, NULL);
488	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X16, NULL);
489	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X16, NULL);
490	cfi_write(map, CMD(0x30), adr);
491
492	timeo = jiffies + (HZ*20);
493
494	cfi_spin_unlock(chip->mutex);
495	schedule_timeout(HZ);
496	cfi_spin_lock(chip->mutex);
497
498	rdy_mask = CMD(0x80);
499
500	/* Once the state machine's known to be working I'll do that */
501
502	while ( ( (status = cfi_read(map,adr)) & rdy_mask ) != rdy_mask ) {
503		static int z=0;
504
505		if (chip->state != FL_ERASING) {
506			/* Someone's suspended the erase. Sleep */
507			set_current_state(TASK_UNINTERRUPTIBLE);
508			add_wait_queue(&chip->wq, &wait);
509
510			cfi_spin_unlock(chip->mutex);
511			printk("erase suspended. Sleeping\n");
512
513			schedule();
514			remove_wait_queue(&chip->wq, &wait);
515			timeo = jiffies + (HZ*2);
516			cfi_spin_lock(chip->mutex);
517			continue;
518		}
519
520		/* OK Still waiting */
521		if (time_after(jiffies, timeo)) {
522			chip->state = FL_READY;
523			cfi_spin_unlock(chip->mutex);
524			printk("waiting for erase to complete timed out.");
525			DISABLE_VPP(map);
526			return -EIO;
527		}
528
529		/* Latency issues. Drop the lock, wait a while and retry */
530		cfi_spin_unlock(chip->mutex);
531
532		z++;
533		if ( 0 && !(z % 100 ))
534			printk("chip not ready yet after erase. looping\n");
535
536		cfi_udelay(1);
537
538		cfi_spin_lock(chip->mutex);
539		continue;
540	}
541
542	/* Done and happy. */
543	DISABLE_VPP(map);
544	chip->state = FL_READY;
545	wake_up(&chip->wq);
546	cfi_spin_unlock(chip->mutex);
547	return 0;
548}
549
550static int cfi_sststd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
551{
552	struct map_info *map = mtd->priv;
553	struct cfi_private *cfi = map->fldrv_priv;
554	unsigned long adr, len;
555	int chipnum, ret = 0;
556	int i, first;
557	struct mtd_erase_region_info *regions = mtd->eraseregions;
558
559	if (instr->addr > mtd->size)
560		return -EINVAL;
561
562	if ((instr->len + instr->addr) > mtd->size)
563		return -EINVAL;
564
565	/* Check that both start and end of the requested erase are
566	 * aligned with the erasesize at the appropriate addresses.
567	 */
568
569	i = 0;
570
571	/* Skip all erase regions which are ended before the start of
572	   the requested erase. Actually, to save on the calculations,
573	   we skip to the first erase region which starts after the
574	   start of the requested erase, and then go back one.
575	*/
576
577	while (i < mtd->numeraseregions && instr->addr >= regions[i].offset)
578	       i++;
579	i--;
580
581	/* OK, now i is pointing at the erase region in which this
582	   erase request starts. Check the start of the requested
583	   erase range is aligned with the erase size which is in
584	   effect here.
585	*/
586
587	if (instr->addr & (regions[i].erasesize-1))
588		return -EINVAL;
589
590	/* Remember the erase region we start on */
591	first = i;
592
593	/* Next, check that the end of the requested erase is aligned
594	 * with the erase region at that address.
595	 */
596
597	while (i<mtd->numeraseregions && (instr->addr + instr->len) >= regions[i].offset)
598		i++;
599
600	/* As before, drop back one to point at the region in which
601	   the address actually falls
602	*/
603	i--;
604
605	if ((instr->addr + instr->len) & (regions[i].erasesize-1))
606		return -EINVAL;
607
608	chipnum = instr->addr >> cfi->chipshift;
609	adr = instr->addr - (chipnum << cfi->chipshift);
610	len = instr->len;
611
612	i=first;
613
614	while(len) {
615		ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
616
617		if (ret)
618			return ret;
619
620		adr += regions[i].erasesize;
621		len -= regions[i].erasesize;
622
623		if (adr % (1<< cfi->chipshift) == ((regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
624			i++;
625
626		if (adr >> cfi->chipshift) {
627			adr = 0;
628			chipnum++;
629
630			if (chipnum >= cfi->numchips)
631			break;
632		}
633	}
634
635	instr->state = MTD_ERASE_DONE;
636	if (instr->callback)
637		instr->callback(instr);
638
639	return 0;
640}
641
642static int cfi_sststd_erase_onesize(struct mtd_info *mtd, struct erase_info *instr)
643{
644	struct map_info *map = mtd->priv;
645	struct cfi_private *cfi = map->fldrv_priv;
646	unsigned long adr, len;
647	int chipnum, ret = 0;
648
649	if (instr->addr & (mtd->erasesize - 1))
650		return -EINVAL;
651
652	if (instr->len & (mtd->erasesize -1))
653		return -EINVAL;
654
655	if ((instr->len + instr->addr) > mtd->size)
656		return -EINVAL;
657
658	chipnum = instr->addr >> cfi->chipshift;
659	adr = instr->addr - (chipnum << cfi->chipshift);
660	len = instr->len;
661
662	while(len) {
663		ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
664
665		if (ret)
666			return ret;
667
668		adr += mtd->erasesize;
669		len -= mtd->erasesize;
670
671		if (adr >> cfi->chipshift) {
672			adr = 0;
673			chipnum++;
674
675			if (chipnum >= cfi->numchips)
676			break;
677		}
678	}
679
680	instr->state = MTD_ERASE_DONE;
681	if (instr->callback)
682		instr->callback(instr);
683
684	return 0;
685}
686
687static void cfi_sststd_sync (struct mtd_info *mtd)
688{
689	struct map_info *map = mtd->priv;
690	struct cfi_private *cfi = map->fldrv_priv;
691	int i;
692	struct flchip *chip;
693	int ret = 0;
694	DECLARE_WAITQUEUE(wait, current);
695
696	for (i=0; !ret && i<cfi->numchips; i++) {
697		chip = &cfi->chips[i];
698
699	retry:
700		cfi_spin_lock(chip->mutex);
701
702		switch(chip->state) {
703		case FL_READY:
704		case FL_STATUS:
705		case FL_CFI_QUERY:
706		case FL_JEDEC_QUERY:
707			chip->oldstate = chip->state;
708			chip->state = FL_SYNCING;
709			/* No need to wake_up() on this state change -
710			 * as the whole point is that nobody can do anything
711			 * with the chip now anyway.
712			 */
713		case FL_SYNCING:
714			cfi_spin_unlock(chip->mutex);
715			break;
716
717		default:
718			/* Not an idle state */
719			add_wait_queue(&chip->wq, &wait);
720
721			cfi_spin_unlock(chip->mutex);
722
723			schedule();
724
725		        remove_wait_queue(&chip->wq, &wait);
726
727			goto retry;
728		}
729	}
730
731	/* Unlock the chips again */
732
733	for (i--; i >=0; i--) {
734		chip = &cfi->chips[i];
735
736		cfi_spin_lock(chip->mutex);
737
738		if (chip->state == FL_SYNCING) {
739			chip->state = chip->oldstate;
740			wake_up(&chip->wq);
741		}
742		cfi_spin_unlock(chip->mutex);
743	}
744}
745
746
747static int cfi_sststd_suspend(struct mtd_info *mtd)
748{
749	struct map_info *map = mtd->priv;
750	struct cfi_private *cfi = map->fldrv_priv;
751	int i;
752	struct flchip *chip;
753	int ret = 0;
754//printk("suspend\n");
755
756	for (i=0; !ret && i<cfi->numchips; i++) {
757		chip = &cfi->chips[i];
758
759		cfi_spin_lock(chip->mutex);
760
761		switch(chip->state) {
762		case FL_READY:
763		case FL_STATUS:
764		case FL_CFI_QUERY:
765		case FL_JEDEC_QUERY:
766			chip->oldstate = chip->state;
767			chip->state = FL_PM_SUSPENDED;
768			/* No need to wake_up() on this state change -
769			 * as the whole point is that nobody can do anything
770			 * with the chip now anyway.
771			 */
772		case FL_PM_SUSPENDED:
773			break;
774
775		default:
776			ret = -EAGAIN;
777			break;
778		}
779		cfi_spin_unlock(chip->mutex);
780	}
781
782	/* Unlock the chips again */
783
784	if (ret) {
785    		for (i--; i >=0; i--) {
786			chip = &cfi->chips[i];
787
788			cfi_spin_lock(chip->mutex);
789
790			if (chip->state == FL_PM_SUSPENDED) {
791				chip->state = chip->oldstate;
792				wake_up(&chip->wq);
793			}
794			cfi_spin_unlock(chip->mutex);
795		}
796	}
797
798	return ret;
799}
800
801static void cfi_sststd_resume(struct mtd_info *mtd)
802{
803	struct map_info *map = mtd->priv;
804	struct cfi_private *cfi = map->fldrv_priv;
805	int i;
806	struct flchip *chip;
807//printk("resume\n");
808
809	for (i=0; i<cfi->numchips; i++) {
810
811		chip = &cfi->chips[i];
812
813		cfi_spin_lock(chip->mutex);
814
815		if (chip->state == FL_PM_SUSPENDED) {
816			chip->state = FL_READY;
817			cfi_write(map, CMD(0xF0), chip->start);
818			wake_up(&chip->wq);
819		}
820		else
821			printk("Argh. Chip not in PM_SUSPENDED state upon resume()\n");
822
823		cfi_spin_unlock(chip->mutex);
824	}
825}
826
827static void cfi_sststd_destroy(struct mtd_info *mtd)
828{
829	struct map_info *map = mtd->priv;
830	struct cfi_private *cfi = map->fldrv_priv;
831	kfree(cfi->cmdset_priv);
832	kfree(cfi);
833}
834
835#if LINUX_VERSION_CODE < 0x20212 && defined(MODULE)
836#define cfi_sststd_init init_module
837#define cfi_sststd_exit cleanup_module
838#endif
839
840static char im_name[]="cfi_cmdset_0701";
841
842mod_init_t cfi_sststd_init(void)
843{
844	inter_module_register(im_name, THIS_MODULE, &cfi_cmdset_0701);
845	return 0;
846}
847
848mod_exit_t cfi_sststd_exit(void)
849{
850	inter_module_unregister(im_name);
851}
852
853module_init(cfi_sststd_init);
854module_exit(cfi_sststd_exit);
855
856