1/*
2 * MTD map driver for AMD compatible flash chips (non-CFI)
3 *
4 * Author: Jonas Holmberg <jonas.holmberg@axis.com>
5 *
6 * $Id: amd_flash.c,v 1.1.1.1 2008/10/15 03:26:35 james26_jang Exp $
7 *
8 * Copyright (c) 2001 Axis Communications AB
9 *
10 * This file is under GPL.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/sched.h>
18#include <linux/errno.h>
19#include <linux/slab.h>
20#include <linux/delay.h>
21#include <linux/interrupt.h>
22#include <linux/mtd/map.h>
23#include <linux/mtd/mtd.h>
24#include <linux/mtd/flashchip.h>
25
26/* There's no limit. It exists only to avoid realloc. */
27#define MAX_AMD_CHIPS 8
28
29#define DEVICE_TYPE_X8	(8 / 8)
30#define DEVICE_TYPE_X16	(16 / 8)
31#define DEVICE_TYPE_X32	(32 / 8)
32
33/* Addresses */
34#define ADDR_MANUFACTURER		0x0000
35#define ADDR_DEVICE_ID			0x0001
36#define ADDR_SECTOR_LOCK		0x0002
37#define ADDR_HANDSHAKE			0x0003
38#define ADDR_UNLOCK_1			0x0555
39#define ADDR_UNLOCK_2			0x02AA
40
41/* Commands */
42#define CMD_UNLOCK_DATA_1		0x00AA
43#define CMD_UNLOCK_DATA_2		0x0055
44#define CMD_MANUFACTURER_UNLOCK_DATA	0x0090
45#define CMD_UNLOCK_BYPASS_MODE		0x0020
46#define CMD_PROGRAM_UNLOCK_DATA		0x00A0
47#define CMD_RESET_DATA			0x00F0
48#define CMD_SECTOR_ERASE_UNLOCK_DATA	0x0080
49#define CMD_SECTOR_ERASE_UNLOCK_DATA_2	0x0030
50
51#define CMD_UNLOCK_SECTOR		0x0060
52
53/* Manufacturers */
54#define MANUFACTURER_AMD	0x0001
55#define MANUFACTURER_ATMEL	0x001F
56#define MANUFACTURER_FUJITSU	0x0004
57#define MANUFACTURER_ST		0x0020
58#define MANUFACTURER_SST	0x00BF
59#define MANUFACTURER_TOSHIBA	0x0098
60
61/* AMD */
62#define AM29F800BB	0x2258
63#define AM29F800BT	0x22D6
64#define AM29LV800BB	0x225B
65#define AM29LV800BT	0x22DA
66#define AM29LV160DT	0x22C4
67#define AM29LV160DB	0x2249
68#define AM29BDS323D     0x22D1
69#define AM29BDS643D	0x227E
70
71/* Atmel */
72#define AT49xV16x	0x00C0
73#define AT49xV16xT	0x00C2
74
75/* Fujitsu */
76#define MBM29LV160TE	0x22C4
77#define MBM29LV160BE	0x2249
78
79/* ST - www.st.com */
80#define M29W800T	0x00D7
81#define M29W160DT	0x22C4
82#define M29W160DB	0x2249
83
84/* SST */
85#define SST39LF800	0x2781
86#define SST39LF160	0x2782
87
88/* Toshiba */
89#define TC58FVT160	0x00C2
90#define TC58FVB160	0x0043
91
92#define D6_MASK	0x40
93
94struct amd_flash_private {
95	int device_type;
96	int interleave;
97	int numchips;
98	unsigned long chipshift;
99//	const char *im_name;
100	struct flchip chips[0];
101};
102
103struct amd_flash_info {
104	const __u16 mfr_id;
105	const __u16 dev_id;
106	const char *name;
107	const u_long size;
108	const int numeraseregions;
109	const struct mtd_erase_region_info regions[4];
110};
111
112
113
114static int amd_flash_read(struct mtd_info *, loff_t, size_t, size_t *,
115			  u_char *);
116static int amd_flash_write(struct mtd_info *, loff_t, size_t, size_t *,
117			   const u_char *);
118static int amd_flash_erase(struct mtd_info *, struct erase_info *);
119static void amd_flash_sync(struct mtd_info *);
120static int amd_flash_suspend(struct mtd_info *);
121static void amd_flash_resume(struct mtd_info *);
122static void amd_flash_destroy(struct mtd_info *);
123static struct mtd_info *amd_flash_probe(struct map_info *map);
124
125
126static struct mtd_chip_driver amd_flash_chipdrv = {
127	probe: amd_flash_probe,
128	destroy: amd_flash_destroy,
129	name: "amd_flash",
130	module: THIS_MODULE
131};
132
133
134
135static const char im_name[] = "amd_flash";
136
137
138
139static inline __u32 wide_read(struct map_info *map, __u32 addr)
140{
141	if (map->buswidth == 1) {
142		return map->read8(map, addr);
143	} else if (map->buswidth == 2) {
144		return map->read16(map, addr);
145	} else if (map->buswidth == 4) {
146		return map->read32(map, addr);
147        }
148
149	return 0;
150}
151
152static inline void wide_write(struct map_info *map, __u32 val, __u32 addr)
153{
154	if (map->buswidth == 1) {
155		map->write8(map, val, addr);
156	} else if (map->buswidth == 2) {
157		map->write16(map, val, addr);
158	} else if (map->buswidth == 4) {
159		map->write32(map, val, addr);
160	}
161}
162
163static inline __u32 make_cmd(struct map_info *map, __u32 cmd)
164{
165	const struct amd_flash_private *private = map->fldrv_priv;
166	if ((private->interleave == 2) &&
167	    (private->device_type == DEVICE_TYPE_X16)) {
168		cmd |= (cmd << 16);
169	}
170
171	return cmd;
172}
173
174static inline void send_unlock(struct map_info *map, unsigned long base)
175{
176	wide_write(map, (CMD_UNLOCK_DATA_1 << 16) | CMD_UNLOCK_DATA_1,
177		   base + (map->buswidth * ADDR_UNLOCK_1));
178	wide_write(map, (CMD_UNLOCK_DATA_2 << 16) | CMD_UNLOCK_DATA_2,
179		   base + (map->buswidth * ADDR_UNLOCK_2));
180}
181
182static inline void send_cmd(struct map_info *map, unsigned long base, __u32 cmd)
183{
184	send_unlock(map, base);
185	wide_write(map, make_cmd(map, cmd),
186		   base + (map->buswidth * ADDR_UNLOCK_1));
187}
188
189static inline void send_cmd_to_addr(struct map_info *map, unsigned long base,
190				    __u32 cmd, unsigned long addr)
191{
192	send_unlock(map, base);
193	wide_write(map, make_cmd(map, cmd), addr);
194}
195
196static inline int flash_is_busy(struct map_info *map, unsigned long addr,
197				int interleave)
198{
199
200	if ((interleave == 2) && (map->buswidth == 4)) {
201		__u32 read1, read2;
202
203		read1 = wide_read(map, addr);
204		read2 = wide_read(map, addr);
205
206		return (((read1 >> 16) & D6_MASK) !=
207			((read2 >> 16) & D6_MASK)) ||
208		       (((read1 & 0xffff) & D6_MASK) !=
209			((read2 & 0xffff) & D6_MASK));
210	}
211
212	return ((wide_read(map, addr) & D6_MASK) !=
213		(wide_read(map, addr) & D6_MASK));
214}
215
216static inline void unlock_sector(struct map_info *map, unsigned long sect_addr,
217				 int unlock)
218{
219	/* Sector lock address. A6 = 1 for unlock, A6 = 0 for lock */
220	int SLA = unlock ?
221		(sect_addr |  (0x40 * map->buswidth)) :
222		(sect_addr & ~(0x40 * map->buswidth)) ;
223
224	__u32 cmd = make_cmd(map, CMD_UNLOCK_SECTOR);
225
226	wide_write(map, make_cmd(map, CMD_RESET_DATA), 0);
227	wide_write(map, cmd, SLA); /* 1st cycle: write cmd to any address */
228	wide_write(map, cmd, SLA); /* 2nd cycle: write cmd to any address */
229	wide_write(map, cmd, SLA); /* 3rd cycle: write cmd to SLA */
230}
231
232static inline int is_sector_locked(struct map_info *map,
233				   unsigned long sect_addr)
234{
235	int status;
236
237	wide_write(map, CMD_RESET_DATA, 0);
238	send_cmd(map, sect_addr, CMD_MANUFACTURER_UNLOCK_DATA);
239
240	/* status is 0x0000 for unlocked and 0x0001 for locked */
241	status = wide_read(map, sect_addr + (map->buswidth * ADDR_SECTOR_LOCK));
242	wide_write(map, CMD_RESET_DATA, 0);
243	return status;
244}
245
246static int amd_flash_do_unlock(struct mtd_info *mtd, loff_t ofs, size_t len,
247			       int is_unlock)
248{
249	struct map_info *map;
250	struct mtd_erase_region_info *merip;
251	int eraseoffset, erasesize, eraseblocks;
252	int i;
253	int retval = 0;
254	int lock_status;
255
256	map = mtd->priv;
257
258	/* Pass the whole chip through sector by sector and check for each
259	   sector if the sector and the given interval overlap */
260	for(i = 0; i < mtd->numeraseregions; i++) {
261		merip = &mtd->eraseregions[i];
262
263		eraseoffset = merip->offset;
264		erasesize = merip->erasesize;
265		eraseblocks = merip->numblocks;
266
267		if (ofs > eraseoffset + erasesize)
268			continue;
269
270		while (eraseblocks > 0) {
271			if (ofs < eraseoffset + erasesize && ofs + len > eraseoffset) {
272				unlock_sector(map, eraseoffset, is_unlock);
273
274				lock_status = is_sector_locked(map, eraseoffset);
275
276				if (is_unlock && lock_status) {
277					printk("Cannot unlock sector at address %x length %xx\n",
278					       eraseoffset, merip->erasesize);
279					retval = -1;
280				} else if (!is_unlock && !lock_status) {
281					printk("Cannot lock sector at address %x length %x\n",
282					       eraseoffset, merip->erasesize);
283					retval = -1;
284				}
285			}
286			eraseoffset += erasesize;
287			eraseblocks --;
288		}
289	}
290	return retval;
291}
292
293static int amd_flash_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
294{
295	return amd_flash_do_unlock(mtd, ofs, len, 1);
296}
297
298static int amd_flash_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
299{
300	return amd_flash_do_unlock(mtd, ofs, len, 0);
301}
302
303
304/*
305 * Reads JEDEC manufacturer ID and device ID and returns the index of the first
306 * matching table entry (-1 if not found or alias for already found chip).
307 */
308static int probe_new_chip(struct mtd_info *mtd, __u32 base,
309			  struct flchip *chips,
310			  struct amd_flash_private *private,
311			  const struct amd_flash_info *table, int table_size)
312{
313	__u32 mfr_id;
314	__u32 dev_id;
315	struct map_info *map = mtd->priv;
316	struct amd_flash_private temp;
317	int i;
318
319	temp.device_type = DEVICE_TYPE_X16;	// Assume X16 (FIXME)
320	temp.interleave = 2;
321	map->fldrv_priv = &temp;
322
323	/* Enter autoselect mode. */
324	send_cmd(map, base, CMD_RESET_DATA);
325	send_cmd(map, base, CMD_MANUFACTURER_UNLOCK_DATA);
326
327	mfr_id = wide_read(map, base + (map->buswidth * ADDR_MANUFACTURER));
328	dev_id = wide_read(map, base + (map->buswidth * ADDR_DEVICE_ID));
329
330	if ((map->buswidth == 4) && ((mfr_id >> 16) == (mfr_id & 0xffff)) &&
331	    ((dev_id >> 16) == (dev_id & 0xffff))) {
332		mfr_id &= 0xffff;
333		dev_id &= 0xffff;
334	} else {
335		temp.interleave = 1;
336	}
337
338	for (i = 0; i < table_size; i++) {
339		if ((mfr_id == table[i].mfr_id) &&
340		    (dev_id == table[i].dev_id)) {
341			if (chips) {
342				int j;
343
344				/* Is this an alias for an already found chip?
345				 * In that case that chip should be in
346				 * autoselect mode now.
347				 */
348				for (j = 0; j < private->numchips; j++) {
349					__u32 mfr_id_other;
350					__u32 dev_id_other;
351
352					mfr_id_other =
353						wide_read(map, chips[j].start +
354							       (map->buswidth *
355								ADDR_MANUFACTURER
356							       ));
357					dev_id_other =
358						wide_read(map, chips[j].start +
359					    		       (map->buswidth *
360							        ADDR_DEVICE_ID));
361					if (temp.interleave == 2) {
362						mfr_id_other &= 0xffff;
363						dev_id_other &= 0xffff;
364					}
365					if ((mfr_id_other == mfr_id) &&
366					    (dev_id_other == dev_id)) {
367
368						/* Exit autoselect mode. */
369						send_cmd(map, base,
370							 CMD_RESET_DATA);
371
372						return -1;
373					}
374				}
375
376				if (private->numchips == MAX_AMD_CHIPS) {
377					printk(KERN_WARNING
378					       "%s: Too many flash chips "
379					       "detected. Increase "
380					       "MAX_AMD_CHIPS from %d.\n",
381					       map->name, MAX_AMD_CHIPS);
382
383					return -1;
384				}
385
386				chips[private->numchips].start = base;
387				chips[private->numchips].state = FL_READY;
388				chips[private->numchips].mutex =
389					&chips[private->numchips]._spinlock;
390				private->numchips++;
391			}
392
393			printk("%s: Found %d x %ldMiB %s at 0x%x\n", map->name,
394			       temp.interleave, (table[i].size)/(1024*1024),
395			       table[i].name, base);
396
397			mtd->size += table[i].size * temp.interleave;
398			mtd->numeraseregions += table[i].numeraseregions;
399
400			break;
401		}
402	}
403
404	/* Exit autoselect mode. */
405	send_cmd(map, base, CMD_RESET_DATA);
406
407	if (i == table_size) {
408		printk(KERN_DEBUG "%s: unknown flash device at 0x%x, "
409		       "mfr id 0x%x, dev id 0x%x\n", map->name,
410		       base, mfr_id, dev_id);
411		map->fldrv_priv = NULL;
412
413		return -1;
414	}
415
416	private->device_type = temp.device_type;
417	private->interleave = temp.interleave;
418
419	return i;
420}
421
422
423
424static struct mtd_info *amd_flash_probe(struct map_info *map)
425{
426	/* Keep this table on the stack so that it gets deallocated after the
427	 * probe is done.
428	 */
429	const struct amd_flash_info table[] = {
430	{
431		mfr_id: MANUFACTURER_AMD,
432		dev_id: AM29LV160DT,
433		name: "AMD AM29LV160DT",
434		size: 0x00200000,
435		numeraseregions: 4,
436		regions: {
437			{ offset: 0x000000, erasesize: 0x10000, numblocks: 31 },
438			{ offset: 0x1F0000, erasesize: 0x08000, numblocks:  1 },
439			{ offset: 0x1F8000, erasesize: 0x02000, numblocks:  2 },
440			{ offset: 0x1FC000, erasesize: 0x04000, numblocks:  1 }
441		}
442	}, {
443		mfr_id: MANUFACTURER_AMD,
444		dev_id: AM29LV160DB,
445		name: "AMD AM29LV160DB",
446		size: 0x00200000,
447		numeraseregions: 4,
448		regions: {
449			{ offset: 0x000000, erasesize: 0x04000, numblocks:  1 },
450			{ offset: 0x004000, erasesize: 0x02000, numblocks:  2 },
451			{ offset: 0x008000, erasesize: 0x08000, numblocks:  1 },
452			{ offset: 0x010000, erasesize: 0x10000, numblocks: 31 }
453		}
454	}, {
455		mfr_id: MANUFACTURER_TOSHIBA,
456		dev_id: TC58FVT160,
457		name: "Toshiba TC58FVT160",
458		size: 0x00200000,
459		numeraseregions: 4,
460		regions: {
461			{ offset: 0x000000, erasesize: 0x10000, numblocks: 31 },
462			{ offset: 0x1F0000, erasesize: 0x08000, numblocks:  1 },
463			{ offset: 0x1F8000, erasesize: 0x02000, numblocks:  2 },
464			{ offset: 0x1FC000, erasesize: 0x04000, numblocks:  1 }
465		}
466	}, {
467		mfr_id: MANUFACTURER_FUJITSU,
468		dev_id: MBM29LV160TE,
469		name: "Fujitsu MBM29LV160TE",
470		size: 0x00200000,
471		numeraseregions: 4,
472		regions: {
473			{ offset: 0x000000, erasesize: 0x10000, numblocks: 31 },
474			{ offset: 0x1F0000, erasesize: 0x08000, numblocks:  1 },
475			{ offset: 0x1F8000, erasesize: 0x02000, numblocks:  2 },
476			{ offset: 0x1FC000, erasesize: 0x04000, numblocks:  1 }
477		}
478	}, {
479		mfr_id: MANUFACTURER_TOSHIBA,
480		dev_id: TC58FVB160,
481		name: "Toshiba TC58FVB160",
482		size: 0x00200000,
483		numeraseregions: 4,
484		regions: {
485			{ offset: 0x000000, erasesize: 0x04000, numblocks:  1 },
486			{ offset: 0x004000, erasesize: 0x02000, numblocks:  2 },
487			{ offset: 0x008000, erasesize: 0x08000, numblocks:  1 },
488			{ offset: 0x010000, erasesize: 0x10000, numblocks: 31 }
489		}
490	}, {
491		mfr_id: MANUFACTURER_FUJITSU,
492		dev_id: MBM29LV160BE,
493		name: "Fujitsu MBM29LV160BE",
494		size: 0x00200000,
495		numeraseregions: 4,
496		regions: {
497			{ offset: 0x000000, erasesize: 0x04000, numblocks:  1 },
498			{ offset: 0x004000, erasesize: 0x02000, numblocks:  2 },
499			{ offset: 0x008000, erasesize: 0x08000, numblocks:  1 },
500			{ offset: 0x010000, erasesize: 0x10000, numblocks: 31 }
501		}
502	}, {
503		mfr_id: MANUFACTURER_AMD,
504		dev_id: AM29LV800BB,
505		name: "AMD AM29LV800BB",
506		size: 0x00100000,
507		numeraseregions: 4,
508		regions: {
509			{ offset: 0x000000, erasesize: 0x04000, numblocks:  1 },
510			{ offset: 0x004000, erasesize: 0x02000, numblocks:  2 },
511			{ offset: 0x008000, erasesize: 0x08000, numblocks:  1 },
512			{ offset: 0x010000, erasesize: 0x10000, numblocks: 15 }
513		}
514	}, {
515		mfr_id: MANUFACTURER_AMD,
516		dev_id: AM29F800BB,
517		name: "AMD AM29F800BB",
518		size: 0x00100000,
519		numeraseregions: 4,
520		regions: {
521			{ offset: 0x000000, erasesize: 0x04000, numblocks:  1 },
522			{ offset: 0x004000, erasesize: 0x02000, numblocks:  2 },
523			{ offset: 0x008000, erasesize: 0x08000, numblocks:  1 },
524			{ offset: 0x010000, erasesize: 0x10000, numblocks: 15 }
525		}
526	}, {
527		mfr_id: MANUFACTURER_AMD,
528		dev_id: AM29LV800BT,
529		name: "AMD AM29LV800BT",
530		size: 0x00100000,
531		numeraseregions: 4,
532		regions: {
533			{ offset: 0x000000, erasesize: 0x10000, numblocks: 15 },
534			{ offset: 0x0F0000, erasesize: 0x08000, numblocks:  1 },
535			{ offset: 0x0F8000, erasesize: 0x02000, numblocks:  2 },
536			{ offset: 0x0FC000, erasesize: 0x04000, numblocks:  1 }
537		}
538	}, {
539		mfr_id: MANUFACTURER_AMD,
540		dev_id: AM29F800BT,
541		name: "AMD AM29F800BT",
542		size: 0x00100000,
543		numeraseregions: 4,
544		regions: {
545			{ offset: 0x000000, erasesize: 0x10000, numblocks: 15 },
546			{ offset: 0x0F0000, erasesize: 0x08000, numblocks:  1 },
547			{ offset: 0x0F8000, erasesize: 0x02000, numblocks:  2 },
548			{ offset: 0x0FC000, erasesize: 0x04000, numblocks:  1 }
549		}
550	}, {
551		mfr_id: MANUFACTURER_AMD,
552		dev_id: AM29LV800BB,
553		name: "AMD AM29LV800BB",
554		size: 0x00100000,
555		numeraseregions: 4,
556		regions: {
557			{ offset: 0x000000, erasesize: 0x10000, numblocks: 15 },
558			{ offset: 0x0F0000, erasesize: 0x08000, numblocks:  1 },
559			{ offset: 0x0F8000, erasesize: 0x02000, numblocks:  2 },
560			{ offset: 0x0FC000, erasesize: 0x04000, numblocks:  1 }
561		}
562	}, {
563		mfr_id: MANUFACTURER_ST,
564		dev_id: M29W800T,
565		name: "ST M29W800T",
566		size: 0x00100000,
567		numeraseregions: 4,
568		regions: {
569			{ offset: 0x000000, erasesize: 0x10000, numblocks: 15 },
570			{ offset: 0x0F0000, erasesize: 0x08000, numblocks:  1 },
571			{ offset: 0x0F8000, erasesize: 0x02000, numblocks:  2 },
572			{ offset: 0x0FC000, erasesize: 0x04000, numblocks:  1 }
573		}
574	}, {
575		mfr_id: MANUFACTURER_ST,
576		dev_id: M29W160DT,
577		name: "ST M29W160DT",
578		size: 0x00200000,
579		numeraseregions: 4,
580		regions: {
581			{ offset: 0x000000, erasesize: 0x10000, numblocks: 31 },
582			{ offset: 0x1F0000, erasesize: 0x08000, numblocks:  1 },
583			{ offset: 0x1F8000, erasesize: 0x02000, numblocks:  2 },
584			{ offset: 0x1FC000, erasesize: 0x04000, numblocks:  1 }
585		}
586	}, {
587		mfr_id: MANUFACTURER_ST,
588		dev_id: M29W160DB,
589		name: "ST M29W160DB",
590		size: 0x00200000,
591		numeraseregions: 4,
592		regions: {
593			{ offset: 0x000000, erasesize: 0x04000, numblocks:  1 },
594			{ offset: 0x004000, erasesize: 0x02000, numblocks:  2 },
595			{ offset: 0x008000, erasesize: 0x08000, numblocks:  1 },
596			{ offset: 0x010000, erasesize: 0x10000, numblocks: 31 }
597		}
598	}, {
599		mfr_id: MANUFACTURER_AMD,
600		dev_id: AM29BDS323D,
601		name: "AMD AM29BDS323D",
602		size: 0x00400000,
603		numeraseregions: 3,
604		regions: {
605			{ offset: 0x000000, erasesize: 0x10000, numblocks: 48 },
606			{ offset: 0x300000, erasesize: 0x10000, numblocks: 15 },
607			{ offset: 0x3f0000, erasesize: 0x02000, numblocks:  8 },
608		}
609	}, {
610		mfr_id: MANUFACTURER_AMD,
611		dev_id: AM29BDS643D,
612		name: "AMD AM29BDS643D",
613		size: 0x00800000,
614		numeraseregions: 3,
615		regions: {
616			{ offset: 0x000000, erasesize: 0x10000, numblocks: 96 },
617			{ offset: 0x600000, erasesize: 0x10000, numblocks: 31 },
618			{ offset: 0x7f0000, erasesize: 0x02000, numblocks:  8 },
619		}
620	}, {
621		mfr_id: MANUFACTURER_ATMEL,
622		dev_id: AT49xV16x,
623		name: "Atmel AT49xV16x",
624		size: 0x00200000,
625		numeraseregions: 2,
626		regions: {
627			{ offset: 0x000000, erasesize: 0x02000, numblocks:  8 },
628			{ offset: 0x010000, erasesize: 0x10000, numblocks: 31 }
629		}
630	}, {
631		mfr_id: MANUFACTURER_ATMEL,
632		dev_id: AT49xV16xT,
633		name: "Atmel AT49xV16xT",
634		size: 0x00200000,
635		numeraseregions: 2,
636		regions: {
637			{ offset: 0x000000, erasesize: 0x10000, numblocks: 31 },
638			{ offset: 0x1F0000, erasesize: 0x02000, numblocks:  8 }
639		}
640	}
641	};
642
643	struct mtd_info *mtd;
644	struct flchip chips[MAX_AMD_CHIPS];
645	int table_pos[MAX_AMD_CHIPS];
646	struct amd_flash_private temp;
647	struct amd_flash_private *private;
648	u_long size;
649	unsigned long base;
650	int i;
651	int reg_idx;
652	int offset;
653
654	mtd = (struct mtd_info*)kmalloc(sizeof(*mtd), GFP_KERNEL);
655	if (!mtd) {
656		printk(KERN_WARNING
657		       "%s: kmalloc failed for info structure\n", map->name);
658		return NULL;
659	}
660	memset(mtd, 0, sizeof(*mtd));
661	mtd->priv = map;
662
663	memset(&temp, 0, sizeof(temp));
664
665	printk("%s: Probing for AMD compatible flash...\n", map->name);
666
667	if ((table_pos[0] = probe_new_chip(mtd, 0, NULL, &temp, table,
668					   sizeof(table)/sizeof(table[0])))
669	    == -1) {
670		printk(KERN_WARNING
671		       "%s: Found no AMD compatible device at location zero\n",
672		       map->name);
673		kfree(mtd);
674
675		return NULL;
676	}
677
678	chips[0].start = 0;
679	chips[0].state = FL_READY;
680	chips[0].mutex = &chips[0]._spinlock;
681	temp.numchips = 1;
682	for (size = mtd->size; size > 1; size >>= 1) {
683		temp.chipshift++;
684	}
685	switch (temp.interleave) {
686		case 2:
687			temp.chipshift += 1;
688			break;
689		case 4:
690			temp.chipshift += 2;
691			break;
692	}
693
694	/* Find out if there are any more chips in the map. */
695	for (base = (1 << temp.chipshift);
696	     base < map->size;
697	     base += (1 << temp.chipshift)) {
698	     	int numchips = temp.numchips;
699		table_pos[numchips] = probe_new_chip(mtd, base, chips,
700			&temp, table, sizeof(table)/sizeof(table[0]));
701	}
702
703	mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) *
704				    mtd->numeraseregions, GFP_KERNEL);
705	if (!mtd->eraseregions) {
706		printk(KERN_WARNING "%s: Failed to allocate "
707		       "memory for MTD erase region info\n", map->name);
708		kfree(mtd);
709		map->fldrv_priv = NULL;
710		return 0;
711	}
712
713	reg_idx = 0;
714	offset = 0;
715	for (i = 0; i < temp.numchips; i++) {
716		int dev_size;
717		int j;
718
719		dev_size = 0;
720		for (j = 0; j < table[table_pos[i]].numeraseregions; j++) {
721			mtd->eraseregions[reg_idx].offset = offset +
722				(table[table_pos[i]].regions[j].offset *
723				 temp.interleave);
724			mtd->eraseregions[reg_idx].erasesize =
725				table[table_pos[i]].regions[j].erasesize *
726				temp.interleave;
727			mtd->eraseregions[reg_idx].numblocks =
728				table[table_pos[i]].regions[j].numblocks;
729			if (mtd->erasesize <
730			    mtd->eraseregions[reg_idx].erasesize) {
731				mtd->erasesize =
732					mtd->eraseregions[reg_idx].erasesize;
733			}
734			dev_size += mtd->eraseregions[reg_idx].erasesize *
735				    mtd->eraseregions[reg_idx].numblocks;
736			reg_idx++;
737		}
738		offset += dev_size;
739	}
740	mtd->type = MTD_NORFLASH;
741	mtd->flags = MTD_CAP_NORFLASH;
742	mtd->name = map->name;
743	mtd->erase = amd_flash_erase;
744	mtd->read = amd_flash_read;
745	mtd->write = amd_flash_write;
746	mtd->sync = amd_flash_sync;
747	mtd->suspend = amd_flash_suspend;
748	mtd->resume = amd_flash_resume;
749	mtd->lock = amd_flash_lock;
750	mtd->unlock = amd_flash_unlock;
751
752	private = kmalloc(sizeof(*private) + (sizeof(struct flchip) *
753					      temp.numchips), GFP_KERNEL);
754	if (!private) {
755		printk(KERN_WARNING
756		       "%s: kmalloc failed for private structure\n", map->name);
757		kfree(mtd);
758		map->fldrv_priv = NULL;
759		return NULL;
760	}
761	memcpy(private, &temp, sizeof(temp));
762	memcpy(private->chips, chips,
763	       sizeof(struct flchip) * private->numchips);
764	for (i = 0; i < private->numchips; i++) {
765		init_waitqueue_head(&private->chips[i].wq);
766		spin_lock_init(&private->chips[i]._spinlock);
767	}
768
769	map->fldrv_priv = private;
770
771	map->fldrv = &amd_flash_chipdrv;
772	MOD_INC_USE_COUNT;
773
774	return mtd;
775}
776
777
778
779static inline int read_one_chip(struct map_info *map, struct flchip *chip,
780			       loff_t adr, size_t len, u_char *buf)
781{
782	DECLARE_WAITQUEUE(wait, current);
783	unsigned long timeo = jiffies + HZ;
784
785retry:
786	spin_lock_bh(chip->mutex);
787
788	if (chip->state != FL_READY){
789		printk(KERN_INFO "%s: waiting for chip to read, state = %d\n",
790		       map->name, chip->state);
791		set_current_state(TASK_UNINTERRUPTIBLE);
792		add_wait_queue(&chip->wq, &wait);
793
794		spin_unlock_bh(chip->mutex);
795
796		schedule();
797		remove_wait_queue(&chip->wq, &wait);
798
799		if(signal_pending(current)) {
800			return -EINTR;
801		}
802
803		timeo = jiffies + HZ;
804
805		goto retry;
806	}
807
808	adr += chip->start;
809
810	chip->state = FL_READY;
811
812	map->copy_from(map, buf, adr, len);
813
814	wake_up(&chip->wq);
815	spin_unlock_bh(chip->mutex);
816
817	return 0;
818}
819
820
821
822static int amd_flash_read(struct mtd_info *mtd, loff_t from, size_t len,
823			  size_t *retlen, u_char *buf)
824{
825	struct map_info *map = mtd->priv;
826	struct amd_flash_private *private = map->fldrv_priv;
827	unsigned long ofs;
828	int chipnum;
829	int ret = 0;
830
831	if ((from + len) > mtd->size) {
832		printk(KERN_WARNING "%s: read request past end of device "
833		       "(0x%lx)\n", map->name, (unsigned long)from + len);
834
835		return -EINVAL;
836	}
837
838	/* Offset within the first chip that the first read should start. */
839	chipnum = (from >> private->chipshift);
840	ofs = from - (chipnum <<  private->chipshift);
841
842	*retlen = 0;
843
844	while (len) {
845		unsigned long this_len;
846
847		if (chipnum >= private->numchips) {
848			break;
849		}
850
851		if ((len + ofs - 1) >> private->chipshift) {
852			this_len = (1 << private->chipshift) - ofs;
853		} else {
854			this_len = len;
855		}
856
857		ret = read_one_chip(map, &private->chips[chipnum], ofs,
858				    this_len, buf);
859		if (ret) {
860			break;
861		}
862
863		*retlen += this_len;
864		len -= this_len;
865		buf += this_len;
866
867		ofs = 0;
868		chipnum++;
869	}
870
871	return ret;
872}
873
874
875
876static int write_one_word(struct map_info *map, struct flchip *chip,
877			  unsigned long adr, __u32 datum)
878{
879	unsigned long timeo = jiffies + HZ;
880	struct amd_flash_private *private = map->fldrv_priv;
881	DECLARE_WAITQUEUE(wait, current);
882	int ret = 0;
883	int times_left;
884
885retry:
886	spin_lock_bh(chip->mutex);
887
888	if (chip->state != FL_READY){
889		printk("%s: waiting for chip to write, state = %d\n",
890		       map->name, chip->state);
891		set_current_state(TASK_UNINTERRUPTIBLE);
892		add_wait_queue(&chip->wq, &wait);
893
894		spin_unlock_bh(chip->mutex);
895
896		schedule();
897		remove_wait_queue(&chip->wq, &wait);
898		printk(KERN_INFO "%s: woke up to write\n", map->name);
899		if(signal_pending(current))
900			return -EINTR;
901
902		timeo = jiffies + HZ;
903
904		goto retry;
905	}
906
907	chip->state = FL_WRITING;
908
909	adr += chip->start;
910	ENABLE_VPP(map);
911	send_cmd(map, chip->start, CMD_PROGRAM_UNLOCK_DATA);
912	wide_write(map, datum, adr);
913
914	times_left = 500000;
915	while (times_left-- && flash_is_busy(map, adr, private->interleave)) {
916		if (current->need_resched) {
917			spin_unlock_bh(chip->mutex);
918			schedule();
919			spin_lock_bh(chip->mutex);
920		}
921	}
922
923	if (!times_left) {
924		printk(KERN_WARNING "%s: write to 0x%lx timed out!\n",
925		       map->name, adr);
926		ret = -EIO;
927	} else {
928		__u32 verify;
929		if ((verify = wide_read(map, adr)) != datum) {
930			printk(KERN_WARNING "%s: write to 0x%lx failed. "
931			       "datum = %x, verify = %x\n",
932			       map->name, adr, datum, verify);
933			ret = -EIO;
934		}
935	}
936
937	DISABLE_VPP(map);
938	chip->state = FL_READY;
939	wake_up(&chip->wq);
940	spin_unlock_bh(chip->mutex);
941
942	return ret;
943}
944
945
946
947static int amd_flash_write(struct mtd_info *mtd, loff_t to , size_t len,
948			   size_t *retlen, const u_char *buf)
949{
950	struct map_info *map = mtd->priv;
951	struct amd_flash_private *private = map->fldrv_priv;
952	int ret = 0;
953	int chipnum;
954	unsigned long ofs;
955	unsigned long chipstart;
956
957	*retlen = 0;
958	if (!len) {
959		return 0;
960	}
961
962	chipnum = to >> private->chipshift;
963	ofs = to  - (chipnum << private->chipshift);
964	chipstart = private->chips[chipnum].start;
965
966	/* If it's not bus-aligned, do the first byte write. */
967	if (ofs & (map->buswidth - 1)) {
968		unsigned long bus_ofs = ofs & ~(map->buswidth - 1);
969		int i = ofs - bus_ofs;
970		int n = 0;
971		u_char tmp_buf[4];
972		__u32 datum;
973
974		map->copy_from(map, tmp_buf,
975			       bus_ofs + private->chips[chipnum].start,
976			       map->buswidth);
977		while (len && i < map->buswidth)
978			tmp_buf[i++] = buf[n++], len--;
979
980		if (map->buswidth == 2) {
981			datum = *(__u16*)tmp_buf;
982		} else if (map->buswidth == 4) {
983			datum = *(__u32*)tmp_buf;
984		} else {
985			return -EINVAL;  /* should never happen, but be safe */
986		}
987
988		ret = write_one_word(map, &private->chips[chipnum], bus_ofs,
989				     datum);
990		if (ret) {
991			return ret;
992		}
993
994		ofs += n;
995		buf += n;
996		(*retlen) += n;
997
998		if (ofs >> private->chipshift) {
999			chipnum++;
1000			ofs = 0;
1001			if (chipnum == private->numchips) {
1002				return 0;
1003			}
1004		}
1005	}
1006
1007	/* We are now aligned, write as much as possible. */
1008	while(len >= map->buswidth) {
1009		__u32 datum;
1010
1011		if (map->buswidth == 1) {
1012			datum = *(__u8*)buf;
1013		} else if (map->buswidth == 2) {
1014			datum = *(__u16*)buf;
1015		} else if (map->buswidth == 4) {
1016			datum = *(__u32*)buf;
1017		} else {
1018			return -EINVAL;
1019		}
1020
1021		ret = write_one_word(map, &private->chips[chipnum], ofs, datum);
1022
1023		if (ret) {
1024			return ret;
1025		}
1026
1027		ofs += map->buswidth;
1028		buf += map->buswidth;
1029		(*retlen) += map->buswidth;
1030		len -= map->buswidth;
1031
1032		if (ofs >> private->chipshift) {
1033			chipnum++;
1034			ofs = 0;
1035			if (chipnum == private->numchips) {
1036				return 0;
1037			}
1038			chipstart = private->chips[chipnum].start;
1039		}
1040	}
1041
1042	if (len & (map->buswidth - 1)) {
1043		int i = 0, n = 0;
1044		u_char tmp_buf[2];
1045		__u32 datum;
1046
1047		map->copy_from(map, tmp_buf,
1048			       ofs + private->chips[chipnum].start,
1049			       map->buswidth);
1050		while (len--) {
1051			tmp_buf[i++] = buf[n++];
1052		}
1053
1054		if (map->buswidth == 2) {
1055			datum = *(__u16*)tmp_buf;
1056		} else if (map->buswidth == 4) {
1057			datum = *(__u32*)tmp_buf;
1058		} else {
1059			return -EINVAL;  /* should never happen, but be safe */
1060		}
1061
1062		ret = write_one_word(map, &private->chips[chipnum], ofs, datum);
1063
1064		if (ret) {
1065			return ret;
1066		}
1067
1068		(*retlen) += n;
1069	}
1070
1071	return 0;
1072}
1073
1074
1075
1076static inline int erase_one_block(struct map_info *map, struct flchip *chip,
1077				  unsigned long adr, u_long size)
1078{
1079	unsigned long timeo = jiffies + HZ;
1080	struct amd_flash_private *private = map->fldrv_priv;
1081	DECLARE_WAITQUEUE(wait, current);
1082
1083retry:
1084	spin_lock_bh(chip->mutex);
1085
1086	if (chip->state != FL_READY){
1087		set_current_state(TASK_UNINTERRUPTIBLE);
1088		add_wait_queue(&chip->wq, &wait);
1089
1090		spin_unlock_bh(chip->mutex);
1091
1092		schedule();
1093		remove_wait_queue(&chip->wq, &wait);
1094
1095		if (signal_pending(current)) {
1096			return -EINTR;
1097		}
1098
1099		timeo = jiffies + HZ;
1100
1101		goto retry;
1102	}
1103
1104	chip->state = FL_ERASING;
1105
1106	adr += chip->start;
1107	ENABLE_VPP(map);
1108	send_cmd(map, chip->start, CMD_SECTOR_ERASE_UNLOCK_DATA);
1109	send_cmd_to_addr(map, chip->start, CMD_SECTOR_ERASE_UNLOCK_DATA_2, adr);
1110
1111	timeo = jiffies + (HZ * 20);
1112
1113	spin_unlock_bh(chip->mutex);
1114	schedule_timeout(HZ);
1115	spin_lock_bh(chip->mutex);
1116
1117	while (flash_is_busy(map, adr, private->interleave)) {
1118
1119		if (chip->state != FL_ERASING) {
1120			/* Someone's suspended the erase. Sleep */
1121			set_current_state(TASK_UNINTERRUPTIBLE);
1122			add_wait_queue(&chip->wq, &wait);
1123
1124			spin_unlock_bh(chip->mutex);
1125			printk(KERN_INFO "%s: erase suspended. Sleeping\n",
1126			       map->name);
1127			schedule();
1128			remove_wait_queue(&chip->wq, &wait);
1129
1130			if (signal_pending(current)) {
1131				return -EINTR;
1132			}
1133
1134			timeo = jiffies + (HZ*2);
1135			spin_lock_bh(chip->mutex);
1136			continue;
1137		}
1138
1139		/* OK Still waiting */
1140		if (time_after(jiffies, timeo)) {
1141			chip->state = FL_READY;
1142			spin_unlock_bh(chip->mutex);
1143			printk(KERN_WARNING "%s: waiting for erase to complete "
1144			       "timed out.\n", map->name);
1145			DISABLE_VPP(map);
1146
1147			return -EIO;
1148		}
1149
1150		/* Latency issues. Drop the lock, wait a while and retry */
1151		spin_unlock_bh(chip->mutex);
1152
1153		if (current->need_resched)
1154			schedule();
1155		else
1156			udelay(1);
1157
1158		spin_lock_bh(chip->mutex);
1159	}
1160
1161	/* Verify every single word */
1162	{
1163		int address;
1164		int error = 0;
1165		__u8 verify;
1166
1167		for (address = adr; address < (adr + size); address++) {
1168			if ((verify = map->read8(map, address)) != 0xFF) {
1169				error = 1;
1170				break;
1171			}
1172		}
1173		if (error) {
1174			chip->state = FL_READY;
1175			spin_unlock_bh(chip->mutex);
1176			printk(KERN_WARNING
1177			       "%s: verify error at 0x%x, size %ld.\n",
1178			       map->name, address, size);
1179			DISABLE_VPP(map);
1180
1181			return -EIO;
1182		}
1183	}
1184
1185	DISABLE_VPP(map);
1186	chip->state = FL_READY;
1187	wake_up(&chip->wq);
1188	spin_unlock_bh(chip->mutex);
1189
1190	return 0;
1191}
1192
1193
1194
1195static int amd_flash_erase(struct mtd_info *mtd, struct erase_info *instr)
1196{
1197	struct map_info *map = mtd->priv;
1198	struct amd_flash_private *private = map->fldrv_priv;
1199	unsigned long adr, len;
1200	int chipnum;
1201	int ret = 0;
1202	int i;
1203	int first;
1204	struct mtd_erase_region_info *regions = mtd->eraseregions;
1205
1206	if (instr->addr > mtd->size) {
1207		return -EINVAL;
1208	}
1209
1210	if ((instr->len + instr->addr) > mtd->size) {
1211		return -EINVAL;
1212	}
1213
1214	/* Check that both start and end of the requested erase are
1215	 * aligned with the erasesize at the appropriate addresses.
1216	 */
1217
1218	i = 0;
1219
1220        /* Skip all erase regions which are ended before the start of
1221           the requested erase. Actually, to save on the calculations,
1222           we skip to the first erase region which starts after the
1223           start of the requested erase, and then go back one.
1224        */
1225
1226        while ((i < mtd->numeraseregions) &&
1227	       (instr->addr >= regions[i].offset)) {
1228               i++;
1229	}
1230        i--;
1231
1232	/* OK, now i is pointing at the erase region in which this
1233	 * erase request starts. Check the start of the requested
1234	 * erase range is aligned with the erase size which is in
1235	 * effect here.
1236	 */
1237
1238	if (instr->addr & (regions[i].erasesize-1)) {
1239		return -EINVAL;
1240	}
1241
1242	/* Remember the erase region we start on. */
1243
1244	first = i;
1245
1246	/* Next, check that the end of the requested erase is aligned
1247	 * with the erase region at that address.
1248	 */
1249
1250	while ((i < mtd->numeraseregions) &&
1251	       ((instr->addr + instr->len) >= regions[i].offset)) {
1252                i++;
1253	}
1254
1255	/* As before, drop back one to point at the region in which
1256	 * the address actually falls.
1257	 */
1258
1259	i--;
1260
1261	if ((instr->addr + instr->len) & (regions[i].erasesize-1)) {
1262                return -EINVAL;
1263	}
1264
1265	chipnum = instr->addr >> private->chipshift;
1266	adr = instr->addr - (chipnum << private->chipshift);
1267	len = instr->len;
1268
1269	i = first;
1270
1271	while (len) {
1272		ret = erase_one_block(map, &private->chips[chipnum], adr,
1273				      regions[i].erasesize);
1274
1275		if (ret) {
1276			return ret;
1277		}
1278
1279		adr += regions[i].erasesize;
1280		len -= regions[i].erasesize;
1281
1282		if ((adr % (1 << private->chipshift)) ==
1283		    ((regions[i].offset + (regions[i].erasesize *
1284		    			   regions[i].numblocks))
1285		     % (1 << private->chipshift))) {
1286			i++;
1287		}
1288
1289		if (adr >> private->chipshift) {
1290			adr = 0;
1291			chipnum++;
1292			if (chipnum >= private->numchips) {
1293				break;
1294			}
1295		}
1296	}
1297
1298	instr->state = MTD_ERASE_DONE;
1299	if (instr->callback) {
1300		instr->callback(instr);
1301	}
1302
1303	return 0;
1304}
1305
1306
1307
1308static void amd_flash_sync(struct mtd_info *mtd)
1309{
1310	struct map_info *map = mtd->priv;
1311	struct amd_flash_private *private = map->fldrv_priv;
1312	int i;
1313	struct flchip *chip;
1314	int ret = 0;
1315	DECLARE_WAITQUEUE(wait, current);
1316
1317	for (i = 0; !ret && (i < private->numchips); i++) {
1318		chip = &private->chips[i];
1319
1320	retry:
1321		spin_lock_bh(chip->mutex);
1322
1323		switch(chip->state) {
1324		case FL_READY:
1325		case FL_STATUS:
1326		case FL_CFI_QUERY:
1327		case FL_JEDEC_QUERY:
1328			chip->oldstate = chip->state;
1329			chip->state = FL_SYNCING;
1330			/* No need to wake_up() on this state change -
1331			 * as the whole point is that nobody can do anything
1332			 * with the chip now anyway.
1333			 */
1334		case FL_SYNCING:
1335			spin_unlock_bh(chip->mutex);
1336			break;
1337
1338		default:
1339			/* Not an idle state */
1340			add_wait_queue(&chip->wq, &wait);
1341
1342			spin_unlock_bh(chip->mutex);
1343
1344			schedule();
1345
1346		        remove_wait_queue(&chip->wq, &wait);
1347
1348			goto retry;
1349		}
1350	}
1351
1352	/* Unlock the chips again */
1353	for (i--; i >= 0; i--) {
1354		chip = &private->chips[i];
1355
1356		spin_lock_bh(chip->mutex);
1357
1358		if (chip->state == FL_SYNCING) {
1359			chip->state = chip->oldstate;
1360			wake_up(&chip->wq);
1361		}
1362		spin_unlock_bh(chip->mutex);
1363	}
1364}
1365
1366
1367
1368static int amd_flash_suspend(struct mtd_info *mtd)
1369{
1370printk("amd_flash_suspend(): not implemented!\n");
1371	return -EINVAL;
1372}
1373
1374
1375
1376static void amd_flash_resume(struct mtd_info *mtd)
1377{
1378printk("amd_flash_resume(): not implemented!\n");
1379}
1380
1381
1382
1383static void amd_flash_destroy(struct mtd_info *mtd)
1384{
1385	struct map_info *map = mtd->priv;
1386	struct amd_flash_private *private = map->fldrv_priv;
1387	kfree(private);
1388}
1389
1390int __init amd_flash_init(void)
1391{
1392	register_mtd_chip_driver(&amd_flash_chipdrv);
1393	return 0;
1394}
1395
1396void __exit amd_flash_exit(void)
1397{
1398	unregister_mtd_chip_driver(&amd_flash_chipdrv);
1399}
1400
1401module_init(amd_flash_init);
1402module_exit(amd_flash_exit);
1403
1404MODULE_LICENSE("GPL");
1405MODULE_AUTHOR("Jonas Holmberg <jonas.holmberg@axis.com>");
1406MODULE_DESCRIPTION("Old MTD chip driver for AMD flash chips");
1407