• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/drivers/staging/spectra/
1/*
2 * NAND Flash Controller Device Driver
3 * Copyright (c) 2009, Intel Corporation and its suppliers.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 */
19
20#include "ffsport.h"
21#include "flash.h"
22#include <linux/interrupt.h>
23#include <linux/delay.h>
24#include <linux/blkdev.h>
25#include <linux/wait.h>
26#include <linux/mutex.h>
27#include <linux/kthread.h>
28#include <linux/log2.h>
29#include <linux/init.h>
30#include <linux/smp_lock.h>
31#include <linux/slab.h>
32
33/**** Helper functions used for Div, Remainder operation on u64 ****/
34
35/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
36* Function:     GLOB_Calc_Used_Bits
37* Inputs:       Power of 2 number
38* Outputs:      Number of Used Bits
39*               0, if the argument is 0
40* Description:  Calculate the number of bits used by a given power of 2 number
41*               Number can be upto 32 bit
42*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
43int GLOB_Calc_Used_Bits(u32 n)
44{
45	int tot_bits = 0;
46
47	if (n >= 1 << 16) {
48		n >>= 16;
49		tot_bits += 16;
50	}
51
52	if (n >= 1 << 8) {
53		n >>=  8;
54		tot_bits +=  8;
55	}
56
57	if (n >= 1 << 4) {
58		n >>=  4;
59		tot_bits +=  4;
60	}
61
62	if (n >= 1 << 2) {
63		n >>=  2;
64		tot_bits +=  2;
65	}
66
67	if (n >= 1 << 1)
68		tot_bits +=  1;
69
70	return ((n == 0) ? (0) : tot_bits);
71}
72
73/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
74* Function:     GLOB_u64_Div
75* Inputs:       Number of u64
76*               A power of 2 number as Division
77* Outputs:      Quotient of the Divisor operation
78* Description:  It divides the address by divisor by using bit shift operation
79*               (essentially without explicitely using "/").
80*               Divisor is a power of 2 number and Divided is of u64
81*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
82u64 GLOB_u64_Div(u64 addr, u32 divisor)
83{
84	return  (u64)(addr >> GLOB_Calc_Used_Bits(divisor));
85}
86
87/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
88* Function:     GLOB_u64_Remainder
89* Inputs:       Number of u64
90*               Divisor Type (1 -PageAddress, 2- BlockAddress)
91* Outputs:      Remainder of the Division operation
92* Description:  It calculates the remainder of a number (of u64) by
93*               divisor(power of 2 number ) by using bit shifting and multiply
94*               operation(essentially without explicitely using "/").
95*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
96u64 GLOB_u64_Remainder(u64 addr, u32 divisor_type)
97{
98	u64 result = 0;
99
100	if (divisor_type == 1) { /* Remainder -- Page */
101		result = (addr >> DeviceInfo.nBitsInPageDataSize);
102		result = result * DeviceInfo.wPageDataSize;
103	} else if (divisor_type == 2) { /* Remainder -- Block */
104		result = (addr >> DeviceInfo.nBitsInBlockDataSize);
105		result = result * DeviceInfo.wBlockDataSize;
106	}
107
108	result = addr - result;
109
110	return result;
111}
112
113#define NUM_DEVICES             1
114#define PARTITIONS              8
115
116#define GLOB_SBD_NAME          "nd"
117#define GLOB_SBD_IRQ_NUM       (29)
118
119#define GLOB_SBD_IOCTL_GC                        (0x7701)
120#define GLOB_SBD_IOCTL_WL                        (0x7702)
121#define GLOB_SBD_IOCTL_FORMAT                    (0x7703)
122#define GLOB_SBD_IOCTL_ERASE_FLASH               (0x7704)
123#define GLOB_SBD_IOCTL_FLUSH_CACHE               (0x7705)
124#define GLOB_SBD_IOCTL_COPY_BLK_TABLE            (0x7706)
125#define GLOB_SBD_IOCTL_COPY_WEAR_LEVELING_TABLE  (0x7707)
126#define GLOB_SBD_IOCTL_GET_NAND_INFO             (0x7708)
127#define GLOB_SBD_IOCTL_WRITE_DATA                (0x7709)
128#define GLOB_SBD_IOCTL_READ_DATA                 (0x770A)
129
130static int reserved_mb = 0;
131module_param(reserved_mb, int, 0);
132MODULE_PARM_DESC(reserved_mb, "Reserved space for OS image, in MiB (default 25 MiB)");
133
134int nand_debug_level;
135module_param(nand_debug_level, int, 0644);
136MODULE_PARM_DESC(nand_debug_level, "debug level value: 1-3");
137
138MODULE_LICENSE("GPL");
139
140struct spectra_nand_dev {
141	struct pci_dev *dev;
142	u64 size;
143	u16 users;
144	spinlock_t qlock;
145	void __iomem *ioaddr;  /* Mapped address */
146	struct request_queue *queue;
147	struct task_struct *thread;
148	struct gendisk *gd;
149	u8 *tmp_buf;
150};
151
152
153static int GLOB_SBD_majornum;
154
155static char *GLOB_version = GLOB_VERSION;
156
157static struct spectra_nand_dev nand_device[NUM_DEVICES];
158
159static struct mutex spectra_lock;
160
161static int res_blks_os = 1;
162
163struct spectra_indentfy_dev_tag IdentifyDeviceData;
164
165static int force_flush_cache(void)
166{
167	nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
168		__FILE__, __LINE__, __func__);
169
170	if (ERR == GLOB_FTL_Flush_Cache()) {
171		printk(KERN_ERR "Fail to Flush FTL Cache!\n");
172		return -EFAULT;
173	}
174#if CMD_DMA
175		if (glob_ftl_execute_cmds())
176			return -EIO;
177		else
178			return 0;
179#endif
180	return 0;
181}
182
183struct ioctl_rw_page_info {
184	u8 *data;
185	unsigned int page;
186};
187
188static int ioctl_read_page_data(unsigned long arg)
189{
190	u8 *buf;
191	struct ioctl_rw_page_info info;
192	int result = PASS;
193
194	if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
195		return -EFAULT;
196
197	buf = kmalloc(IdentifyDeviceData.PageDataSize, GFP_ATOMIC);
198	if (!buf) {
199		printk(KERN_ERR "ioctl_read_page_data: "
200		       "failed to allocate memory\n");
201		return -ENOMEM;
202	}
203
204	mutex_lock(&spectra_lock);
205	result = GLOB_FTL_Page_Read(buf,
206		(u64)info.page * IdentifyDeviceData.PageDataSize);
207	mutex_unlock(&spectra_lock);
208
209	if (copy_to_user((void __user *)info.data, buf,
210			   IdentifyDeviceData.PageDataSize)) {
211		printk(KERN_ERR "ioctl_read_page_data: "
212		       "failed to copy user data\n");
213		kfree(buf);
214		return -EFAULT;
215	}
216
217	kfree(buf);
218	return result;
219}
220
221static int ioctl_write_page_data(unsigned long arg)
222{
223	u8 *buf;
224	struct ioctl_rw_page_info info;
225	int result = PASS;
226
227	if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
228		return -EFAULT;
229
230	buf = kmalloc(IdentifyDeviceData.PageDataSize, GFP_ATOMIC);
231	if (!buf) {
232		printk(KERN_ERR "ioctl_write_page_data: "
233		       "failed to allocate memory\n");
234		return -ENOMEM;
235	}
236
237	if (copy_from_user(buf, (void __user *)info.data,
238			   IdentifyDeviceData.PageDataSize)) {
239		printk(KERN_ERR "ioctl_write_page_data: "
240		       "failed to copy user data\n");
241		kfree(buf);
242		return -EFAULT;
243	}
244
245	mutex_lock(&spectra_lock);
246	result = GLOB_FTL_Page_Write(buf,
247		(u64)info.page * IdentifyDeviceData.PageDataSize);
248	mutex_unlock(&spectra_lock);
249
250	kfree(buf);
251	return result;
252}
253
254/* Return how many blocks should be reserved for bad block replacement */
255static int get_res_blk_num_bad_blk(void)
256{
257	return IdentifyDeviceData.wDataBlockNum / 10;
258}
259
260/* Return how many blocks should be reserved for OS image */
261static int get_res_blk_num_os(void)
262{
263	u32 res_blks, blk_size;
264
265	blk_size = IdentifyDeviceData.PageDataSize *
266		IdentifyDeviceData.PagesPerBlock;
267
268	res_blks = (reserved_mb * 1024 * 1024) / blk_size;
269
270	if ((res_blks < 1) || (res_blks >= IdentifyDeviceData.wDataBlockNum))
271		res_blks = 1; /* Reserved 1 block for block table */
272
273	return res_blks;
274}
275
276/* Transfer a full request. */
277static int do_transfer(struct spectra_nand_dev *tr, struct request *req)
278{
279	u64 start_addr, addr;
280	u32 logical_start_sect, hd_start_sect;
281	u32 nsect, hd_sects;
282	u32 rsect, tsect = 0;
283	char *buf;
284	u32 ratio = IdentifyDeviceData.PageDataSize >> 9;
285
286	start_addr = (u64)(blk_rq_pos(req)) << 9;
287	/* Add a big enough offset to prevent the OS Image from
288	*  being accessed or damaged by file system */
289	start_addr += IdentifyDeviceData.PageDataSize *
290			IdentifyDeviceData.PagesPerBlock *
291			res_blks_os;
292
293	if (req->cmd_type & REQ_FLUSH) {
294		if (force_flush_cache()) /* Fail to flush cache */
295			return -EIO;
296		else
297			return 0;
298	}
299
300	if (req->cmd_type != REQ_TYPE_FS)
301		return -EIO;
302
303	if (blk_rq_pos(req) + blk_rq_cur_sectors(req) > get_capacity(tr->gd)) {
304		printk(KERN_ERR "Spectra error: request over the NAND "
305			"capacity!sector %d, current_nr_sectors %d, "
306			"while capacity is %d\n",
307			(int)blk_rq_pos(req),
308			blk_rq_cur_sectors(req),
309			(int)get_capacity(tr->gd));
310		return -EIO;
311	}
312
313	logical_start_sect = start_addr >> 9;
314	hd_start_sect = logical_start_sect / ratio;
315	rsect = logical_start_sect - hd_start_sect * ratio;
316
317	addr = (u64)hd_start_sect * ratio * 512;
318	buf = req->buffer;
319	nsect = blk_rq_cur_sectors(req);
320
321	if (rsect)
322		tsect =  (ratio - rsect) < nsect ? (ratio - rsect) : nsect;
323
324	switch (rq_data_dir(req)) {
325	case READ:
326		/* Read the first NAND page */
327		if (rsect) {
328			if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
329				printk(KERN_ERR "Error in %s, Line %d\n",
330					__FILE__, __LINE__);
331				return -EIO;
332			}
333			memcpy(buf, tr->tmp_buf + (rsect << 9), tsect << 9);
334			addr += IdentifyDeviceData.PageDataSize;
335			buf += tsect << 9;
336			nsect -= tsect;
337		}
338
339		/* Read the other NAND pages */
340		for (hd_sects = nsect / ratio; hd_sects > 0; hd_sects--) {
341			if (GLOB_FTL_Page_Read(buf, addr)) {
342				printk(KERN_ERR "Error in %s, Line %d\n",
343					__FILE__, __LINE__);
344				return -EIO;
345			}
346			addr += IdentifyDeviceData.PageDataSize;
347			buf += IdentifyDeviceData.PageDataSize;
348		}
349
350		/* Read the last NAND pages */
351		if (nsect % ratio) {
352			if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
353				printk(KERN_ERR "Error in %s, Line %d\n",
354					__FILE__, __LINE__);
355				return -EIO;
356			}
357			memcpy(buf, tr->tmp_buf, (nsect % ratio) << 9);
358		}
359#if CMD_DMA
360		if (glob_ftl_execute_cmds())
361			return -EIO;
362		else
363			return 0;
364#endif
365		return 0;
366
367	case WRITE:
368		/* Write the first NAND page */
369		if (rsect) {
370			if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
371				printk(KERN_ERR "Error in %s, Line %d\n",
372					__FILE__, __LINE__);
373				return -EIO;
374			}
375			memcpy(tr->tmp_buf + (rsect << 9), buf, tsect << 9);
376			if (GLOB_FTL_Page_Write(tr->tmp_buf, addr)) {
377				printk(KERN_ERR "Error in %s, Line %d\n",
378					__FILE__, __LINE__);
379				return -EIO;
380			}
381			addr += IdentifyDeviceData.PageDataSize;
382			buf += tsect << 9;
383			nsect -= tsect;
384		}
385
386		/* Write the other NAND pages */
387		for (hd_sects = nsect / ratio; hd_sects > 0; hd_sects--) {
388			if (GLOB_FTL_Page_Write(buf, addr)) {
389				printk(KERN_ERR "Error in %s, Line %d\n",
390					__FILE__, __LINE__);
391				return -EIO;
392			}
393			addr += IdentifyDeviceData.PageDataSize;
394			buf += IdentifyDeviceData.PageDataSize;
395		}
396
397		/* Write the last NAND pages */
398		if (nsect % ratio) {
399			if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
400				printk(KERN_ERR "Error in %s, Line %d\n",
401					__FILE__, __LINE__);
402				return -EIO;
403			}
404			memcpy(tr->tmp_buf, buf, (nsect % ratio) << 9);
405			if (GLOB_FTL_Page_Write(tr->tmp_buf, addr)) {
406				printk(KERN_ERR "Error in %s, Line %d\n",
407					__FILE__, __LINE__);
408				return -EIO;
409			}
410		}
411#if CMD_DMA
412		if (glob_ftl_execute_cmds())
413			return -EIO;
414		else
415			return 0;
416#endif
417		return 0;
418
419	default:
420		printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
421		return -EIO;
422	}
423}
424
425/* This function is copied from drivers/mtd/mtd_blkdevs.c */
426static int spectra_trans_thread(void *arg)
427{
428	struct spectra_nand_dev *tr = arg;
429	struct request_queue *rq = tr->queue;
430	struct request *req = NULL;
431
432	/* we might get involved when memory gets low, so use PF_MEMALLOC */
433	current->flags |= PF_MEMALLOC;
434
435	spin_lock_irq(rq->queue_lock);
436	while (!kthread_should_stop()) {
437		int res;
438
439		if (!req) {
440			req = blk_fetch_request(rq);
441			if (!req) {
442				set_current_state(TASK_INTERRUPTIBLE);
443				spin_unlock_irq(rq->queue_lock);
444				schedule();
445				spin_lock_irq(rq->queue_lock);
446				continue;
447			}
448		}
449
450		spin_unlock_irq(rq->queue_lock);
451
452		mutex_lock(&spectra_lock);
453		res = do_transfer(tr, req);
454		mutex_unlock(&spectra_lock);
455
456		spin_lock_irq(rq->queue_lock);
457
458		if (!__blk_end_request_cur(req, res))
459			req = NULL;
460	}
461
462	if (req)
463		__blk_end_request_all(req, -EIO);
464
465	spin_unlock_irq(rq->queue_lock);
466
467	return 0;
468}
469
470
471/* Request function that "handles clustering". */
472static void GLOB_SBD_request(struct request_queue *rq)
473{
474	struct spectra_nand_dev *pdev = rq->queuedata;
475	wake_up_process(pdev->thread);
476}
477
478static int GLOB_SBD_open(struct block_device *bdev, fmode_t mode)
479
480{
481	nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
482		       __FILE__, __LINE__, __func__);
483	return 0;
484}
485
486static int GLOB_SBD_release(struct gendisk *disk, fmode_t mode)
487{
488	int ret;
489
490	nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
491		       __FILE__, __LINE__, __func__);
492
493	mutex_lock(&spectra_lock);
494	ret = force_flush_cache();
495	mutex_unlock(&spectra_lock);
496
497	return 0;
498}
499
500static int GLOB_SBD_getgeo(struct block_device *bdev, struct hd_geometry *geo)
501{
502	geo->heads = 4;
503	geo->sectors = 16;
504	geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
505
506	nand_dbg_print(NAND_DBG_DEBUG,
507		"heads: %d, sectors: %d, cylinders: %d\n",
508		geo->heads, geo->sectors, geo->cylinders);
509
510	return 0;
511}
512
513int GLOB_SBD_ioctl(struct block_device *bdev, fmode_t mode,
514		unsigned int cmd, unsigned long arg)
515{
516	int ret;
517
518	nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
519		       __FILE__, __LINE__, __func__);
520
521	switch (cmd) {
522	case GLOB_SBD_IOCTL_GC:
523		nand_dbg_print(NAND_DBG_DEBUG,
524			       "Spectra IOCTL: Garbage Collection "
525			       "being performed\n");
526		if (PASS != GLOB_FTL_Garbage_Collection())
527			return -EFAULT;
528		return 0;
529
530	case GLOB_SBD_IOCTL_WL:
531		nand_dbg_print(NAND_DBG_DEBUG,
532			       "Spectra IOCTL: Static Wear Leveling "
533			       "being performed\n");
534		if (PASS != GLOB_FTL_Wear_Leveling())
535			return -EFAULT;
536		return 0;
537
538	case GLOB_SBD_IOCTL_FORMAT:
539		nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: Flash format "
540			       "being performed\n");
541		if (PASS != GLOB_FTL_Flash_Format())
542			return -EFAULT;
543		return 0;
544
545	case GLOB_SBD_IOCTL_FLUSH_CACHE:
546		nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: Cache flush "
547			       "being performed\n");
548		mutex_lock(&spectra_lock);
549		ret = force_flush_cache();
550		mutex_unlock(&spectra_lock);
551		return ret;
552
553	case GLOB_SBD_IOCTL_COPY_BLK_TABLE:
554		nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
555			       "Copy block table\n");
556		if (copy_to_user((void __user *)arg,
557			get_blk_table_start_addr(),
558			get_blk_table_len()))
559			return -EFAULT;
560		return 0;
561
562	case GLOB_SBD_IOCTL_COPY_WEAR_LEVELING_TABLE:
563		nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
564			       "Copy wear leveling table\n");
565		if (copy_to_user((void __user *)arg,
566			get_wear_leveling_table_start_addr(),
567			get_wear_leveling_table_len()))
568			return -EFAULT;
569		return 0;
570
571	case GLOB_SBD_IOCTL_GET_NAND_INFO:
572		nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
573			       "Get NAND info\n");
574		if (copy_to_user((void __user *)arg, &IdentifyDeviceData,
575			sizeof(IdentifyDeviceData)))
576			return -EFAULT;
577		return 0;
578
579	case GLOB_SBD_IOCTL_WRITE_DATA:
580		nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
581			       "Write one page data\n");
582		return ioctl_write_page_data(arg);
583
584	case GLOB_SBD_IOCTL_READ_DATA:
585		nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
586			       "Read one page data\n");
587		return ioctl_read_page_data(arg);
588	}
589
590	return -ENOTTY;
591}
592
593int GLOB_SBD_unlocked_ioctl(struct block_device *bdev, fmode_t mode,
594		unsigned int cmd, unsigned long arg)
595{
596	int ret;
597
598	lock_kernel();
599	ret = GLOB_SBD_ioctl(bdev, mode, cmd, arg);
600	unlock_kernel();
601
602	return ret;
603}
604
605static struct block_device_operations GLOB_SBD_ops = {
606	.owner = THIS_MODULE,
607	.open = GLOB_SBD_open,
608	.release = GLOB_SBD_release,
609	.ioctl = GLOB_SBD_unlocked_ioctl,
610	.getgeo = GLOB_SBD_getgeo,
611};
612
613static int SBD_setup_device(struct spectra_nand_dev *dev, int which)
614{
615	int res_blks;
616	u32 sects;
617
618	nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
619		       __FILE__, __LINE__, __func__);
620
621	memset(dev, 0, sizeof(struct spectra_nand_dev));
622
623	nand_dbg_print(NAND_DBG_WARN, "Reserved %d blocks "
624		"for OS image, %d blocks for bad block replacement.\n",
625		get_res_blk_num_os(),
626		get_res_blk_num_bad_blk());
627
628	res_blks = get_res_blk_num_bad_blk() + get_res_blk_num_os();
629
630	dev->size = (u64)IdentifyDeviceData.PageDataSize *
631		IdentifyDeviceData.PagesPerBlock *
632		(IdentifyDeviceData.wDataBlockNum - res_blks);
633
634	res_blks_os = get_res_blk_num_os();
635
636	spin_lock_init(&dev->qlock);
637
638	dev->tmp_buf = kmalloc(IdentifyDeviceData.PageDataSize, GFP_ATOMIC);
639	if (!dev->tmp_buf) {
640		printk(KERN_ERR "Failed to kmalloc memory in %s Line %d, exit.\n",
641			__FILE__, __LINE__);
642		goto out_vfree;
643	}
644
645	dev->queue = blk_init_queue(GLOB_SBD_request, &dev->qlock);
646	if (dev->queue == NULL) {
647		printk(KERN_ERR
648		       "Spectra: Request queue could not be initialized."
649			" Aborting\n ");
650		goto out_vfree;
651	}
652	dev->queue->queuedata = dev;
653
654	/* As Linux block layer doens't support >4KB hardware sector,  */
655	/* Here we force report 512 byte hardware sector size to Kernel */
656	blk_queue_logical_block_size(dev->queue, 512);
657
658	blk_queue_ordered(dev->queue, QUEUE_ORDERED_DRAIN_FLUSH);
659
660	dev->thread = kthread_run(spectra_trans_thread, dev, "nand_thd");
661	if (IS_ERR(dev->thread)) {
662		blk_cleanup_queue(dev->queue);
663		unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME);
664		return PTR_ERR(dev->thread);
665	}
666
667	dev->gd = alloc_disk(PARTITIONS);
668	if (!dev->gd) {
669		printk(KERN_ERR
670		       "Spectra: Could not allocate disk. Aborting \n ");
671		goto out_vfree;
672	}
673	dev->gd->major = GLOB_SBD_majornum;
674	dev->gd->first_minor = which * PARTITIONS;
675	dev->gd->fops = &GLOB_SBD_ops;
676	dev->gd->queue = dev->queue;
677	dev->gd->private_data = dev;
678	snprintf(dev->gd->disk_name, 32, "%s%c", GLOB_SBD_NAME, which + 'a');
679
680	sects = dev->size >> 9;
681	nand_dbg_print(NAND_DBG_WARN, "Capacity sects: %d\n", sects);
682	set_capacity(dev->gd, sects);
683
684	add_disk(dev->gd);
685
686	return 0;
687out_vfree:
688	return -ENOMEM;
689}
690
691/*
692static ssize_t show_nand_block_num(struct device *dev,
693	struct device_attribute *attr, char *buf)
694{
695	return snprintf(buf, PAGE_SIZE, "%d\n",
696		(int)IdentifyDeviceData.wDataBlockNum);
697}
698
699static ssize_t show_nand_pages_per_block(struct device *dev,
700	struct device_attribute *attr, char *buf)
701{
702	return snprintf(buf, PAGE_SIZE, "%d\n",
703		(int)IdentifyDeviceData.PagesPerBlock);
704}
705
706static ssize_t show_nand_page_size(struct device *dev,
707	struct device_attribute *attr, char *buf)
708{
709	return snprintf(buf, PAGE_SIZE, "%d\n",
710		(int)IdentifyDeviceData.PageDataSize);
711}
712
713static DEVICE_ATTR(nand_block_num, 0444, show_nand_block_num, NULL);
714static DEVICE_ATTR(nand_pages_per_block, 0444, show_nand_pages_per_block, NULL);
715static DEVICE_ATTR(nand_page_size, 0444, show_nand_page_size, NULL);
716
717static void create_sysfs_entry(struct device *dev)
718{
719	if (device_create_file(dev, &dev_attr_nand_block_num))
720		printk(KERN_ERR "Spectra: "
721			"failed to create sysfs entry nand_block_num.\n");
722	if (device_create_file(dev, &dev_attr_nand_pages_per_block))
723		printk(KERN_ERR "Spectra: "
724		"failed to create sysfs entry nand_pages_per_block.\n");
725	if (device_create_file(dev, &dev_attr_nand_page_size))
726		printk(KERN_ERR "Spectra: "
727		"failed to create sysfs entry nand_page_size.\n");
728}
729*/
730
731static int GLOB_SBD_init(void)
732{
733	int i;
734
735	/* Set debug output level (0~3) here. 3 is most verbose */
736	printk(KERN_ALERT "Spectra: %s\n", GLOB_version);
737
738	mutex_init(&spectra_lock);
739
740	GLOB_SBD_majornum = register_blkdev(0, GLOB_SBD_NAME);
741	if (GLOB_SBD_majornum <= 0) {
742		printk(KERN_ERR "Unable to get the major %d for Spectra",
743		       GLOB_SBD_majornum);
744		return -EBUSY;
745	}
746
747	if (PASS != GLOB_FTL_Flash_Init()) {
748		printk(KERN_ERR "Spectra: Unable to Initialize Flash Device. "
749		       "Aborting\n");
750		goto out_flash_register;
751	}
752
753	/* create_sysfs_entry(&dev->dev); */
754
755	if (PASS != GLOB_FTL_IdentifyDevice(&IdentifyDeviceData)) {
756		printk(KERN_ERR "Spectra: Unable to Read Flash Device. "
757		       "Aborting\n");
758		goto out_flash_register;
759	} else {
760		nand_dbg_print(NAND_DBG_WARN, "In GLOB_SBD_init: "
761			       "Num blocks=%d, pagesperblock=%d, "
762			       "pagedatasize=%d, ECCBytesPerSector=%d\n",
763		       (int)IdentifyDeviceData.NumBlocks,
764		       (int)IdentifyDeviceData.PagesPerBlock,
765		       (int)IdentifyDeviceData.PageDataSize,
766		       (int)IdentifyDeviceData.wECCBytesPerSector);
767	}
768
769	printk(KERN_ALERT "Spectra: searching block table, please wait ...\n");
770	if (GLOB_FTL_Init() != PASS) {
771		printk(KERN_ERR "Spectra: Unable to Initialize FTL Layer. "
772		       "Aborting\n");
773		goto out_ftl_flash_register;
774	}
775	printk(KERN_ALERT "Spectra: block table has been found.\n");
776
777	for (i = 0; i < NUM_DEVICES; i++)
778		if (SBD_setup_device(&nand_device[i], i) == -ENOMEM)
779			goto out_ftl_flash_register;
780
781	nand_dbg_print(NAND_DBG_DEBUG,
782		       "Spectra: module loaded with major number %d\n",
783		       GLOB_SBD_majornum);
784
785	return 0;
786
787out_ftl_flash_register:
788	GLOB_FTL_Cache_Release();
789out_flash_register:
790	GLOB_FTL_Flash_Release();
791	unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME);
792	printk(KERN_ERR "Spectra: Module load failed.\n");
793
794	return -ENOMEM;
795}
796
797static void __exit GLOB_SBD_exit(void)
798{
799	int i;
800
801	nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
802		       __FILE__, __LINE__, __func__);
803
804	for (i = 0; i < NUM_DEVICES; i++) {
805		struct spectra_nand_dev *dev = &nand_device[i];
806		if (dev->gd) {
807			del_gendisk(dev->gd);
808			put_disk(dev->gd);
809		}
810		if (dev->queue)
811			blk_cleanup_queue(dev->queue);
812		kfree(dev->tmp_buf);
813	}
814
815	unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME);
816
817	mutex_lock(&spectra_lock);
818	force_flush_cache();
819	mutex_unlock(&spectra_lock);
820
821	GLOB_FTL_Cache_Release();
822
823	GLOB_FTL_Flash_Release();
824
825	nand_dbg_print(NAND_DBG_DEBUG,
826		       "Spectra FTL module (major number %d) unloaded.\n",
827		       GLOB_SBD_majornum);
828}
829
830module_init(GLOB_SBD_init);
831module_exit(GLOB_SBD_exit);
832