• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/ide/
1/*
2 *  Copyright (C) 1994-1998	   Linus Torvalds & authors (see below)
3 *  Copyright (C) 1998-2002	   Linux ATA Development
4 *				      Andre Hedrick <andre@linux-ide.org>
5 *  Copyright (C) 2003		   Red Hat
6 *  Copyright (C) 2003-2005, 2007  Bartlomiej Zolnierkiewicz
7 */
8
9/*
10 *  Mostly written by Mark Lord <mlord@pobox.com>
11 *                and Gadi Oxman <gadio@netvision.net.il>
12 *                and Andre Hedrick <andre@linux-ide.org>
13 *
14 * This is the IDE/ATA disk driver, as evolved from hd.c and ide.c.
15 */
16
17#include <linux/types.h>
18#include <linux/string.h>
19#include <linux/kernel.h>
20#include <linux/timer.h>
21#include <linux/mm.h>
22#include <linux/interrupt.h>
23#include <linux/major.h>
24#include <linux/errno.h>
25#include <linux/genhd.h>
26#include <linux/slab.h>
27#include <linux/delay.h>
28#include <linux/mutex.h>
29#include <linux/leds.h>
30#include <linux/ide.h>
31
32#include <asm/byteorder.h>
33#include <asm/irq.h>
34#include <asm/uaccess.h>
35#include <asm/io.h>
36#include <asm/div64.h>
37
38#include "ide-disk.h"
39
40static const u8 ide_rw_cmds[] = {
41	ATA_CMD_READ_MULTI,
42	ATA_CMD_WRITE_MULTI,
43	ATA_CMD_READ_MULTI_EXT,
44	ATA_CMD_WRITE_MULTI_EXT,
45	ATA_CMD_PIO_READ,
46	ATA_CMD_PIO_WRITE,
47	ATA_CMD_PIO_READ_EXT,
48	ATA_CMD_PIO_WRITE_EXT,
49	ATA_CMD_READ,
50	ATA_CMD_WRITE,
51	ATA_CMD_READ_EXT,
52	ATA_CMD_WRITE_EXT,
53};
54
55static void ide_tf_set_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 dma)
56{
57	u8 index, lba48, write;
58
59	lba48 = (cmd->tf_flags & IDE_TFLAG_LBA48) ? 2 : 0;
60	write = (cmd->tf_flags & IDE_TFLAG_WRITE) ? 1 : 0;
61
62	if (dma) {
63		cmd->protocol = ATA_PROT_DMA;
64		index = 8;
65	} else {
66		cmd->protocol = ATA_PROT_PIO;
67		if (drive->mult_count) {
68			cmd->tf_flags |= IDE_TFLAG_MULTI_PIO;
69			index = 0;
70		} else
71			index = 4;
72	}
73
74	cmd->tf.command = ide_rw_cmds[index + lba48 + write];
75}
76
77/*
78 * __ide_do_rw_disk() issues READ and WRITE commands to a disk,
79 * using LBA if supported, or CHS otherwise, to address sectors.
80 */
81static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
82					sector_t block)
83{
84	ide_hwif_t *hwif	= drive->hwif;
85	u16 nsectors		= (u16)blk_rq_sectors(rq);
86	u8 lba48		= !!(drive->dev_flags & IDE_DFLAG_LBA48);
87	u8 dma			= !!(drive->dev_flags & IDE_DFLAG_USING_DMA);
88	struct ide_cmd		cmd;
89	struct ide_taskfile	*tf = &cmd.tf;
90	ide_startstop_t		rc;
91
92	if ((hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) && lba48 && dma) {
93		if (block + blk_rq_sectors(rq) > 1ULL << 28)
94			dma = 0;
95		else
96			lba48 = 0;
97	}
98
99	memset(&cmd, 0, sizeof(cmd));
100	cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
101	cmd.valid.in.tf  = IDE_VALID_IN_TF  | IDE_VALID_DEVICE;
102
103	if (drive->dev_flags & IDE_DFLAG_LBA) {
104		if (lba48) {
105			pr_debug("%s: LBA=0x%012llx\n", drive->name,
106					(unsigned long long)block);
107
108			tf->nsect  = nsectors & 0xff;
109			tf->lbal   = (u8) block;
110			tf->lbam   = (u8)(block >>  8);
111			tf->lbah   = (u8)(block >> 16);
112			tf->device = ATA_LBA;
113
114			tf = &cmd.hob;
115			tf->nsect = (nsectors >> 8) & 0xff;
116			tf->lbal  = (u8)(block >> 24);
117			if (sizeof(block) != 4) {
118				tf->lbam = (u8)((u64)block >> 32);
119				tf->lbah = (u8)((u64)block >> 40);
120			}
121
122			cmd.valid.out.hob = IDE_VALID_OUT_HOB;
123			cmd.valid.in.hob  = IDE_VALID_IN_HOB;
124			cmd.tf_flags |= IDE_TFLAG_LBA48;
125		} else {
126			tf->nsect  = nsectors & 0xff;
127			tf->lbal   = block;
128			tf->lbam   = block >>= 8;
129			tf->lbah   = block >>= 8;
130			tf->device = ((block >> 8) & 0xf) | ATA_LBA;
131		}
132	} else {
133		unsigned int sect, head, cyl, track;
134
135		track = (int)block / drive->sect;
136		sect  = (int)block % drive->sect + 1;
137		head  = track % drive->head;
138		cyl   = track / drive->head;
139
140		pr_debug("%s: CHS=%u/%u/%u\n", drive->name, cyl, head, sect);
141
142		tf->nsect  = nsectors & 0xff;
143		tf->lbal   = sect;
144		tf->lbam   = cyl;
145		tf->lbah   = cyl >> 8;
146		tf->device = head;
147	}
148
149	cmd.tf_flags |= IDE_TFLAG_FS;
150
151	if (rq_data_dir(rq))
152		cmd.tf_flags |= IDE_TFLAG_WRITE;
153
154	ide_tf_set_cmd(drive, &cmd, dma);
155	cmd.rq = rq;
156
157	if (dma == 0) {
158		ide_init_sg_cmd(&cmd, nsectors << 9);
159		ide_map_sg(drive, &cmd);
160	}
161
162	rc = do_rw_taskfile(drive, &cmd);
163
164	if (rc == ide_stopped && dma) {
165		/* fallback to PIO */
166		cmd.tf_flags |= IDE_TFLAG_DMA_PIO_FALLBACK;
167		ide_tf_set_cmd(drive, &cmd, 0);
168		ide_init_sg_cmd(&cmd, nsectors << 9);
169		rc = do_rw_taskfile(drive, &cmd);
170	}
171
172	return rc;
173}
174
175/*
176 * 268435455  == 137439 MB or 28bit limit
177 * 320173056  == 163929 MB or 48bit addressing
178 * 1073741822 == 549756 MB or 48bit addressing fake drive
179 */
180
181static ide_startstop_t ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
182				      sector_t block)
183{
184	ide_hwif_t *hwif = drive->hwif;
185
186	BUG_ON(drive->dev_flags & IDE_DFLAG_BLOCKED);
187	BUG_ON(rq->cmd_type != REQ_TYPE_FS);
188
189	ledtrig_ide_activity();
190
191	pr_debug("%s: %sing: block=%llu, sectors=%u, buffer=0x%08lx\n",
192		 drive->name, rq_data_dir(rq) == READ ? "read" : "writ",
193		 (unsigned long long)block, blk_rq_sectors(rq),
194		 (unsigned long)rq->buffer);
195
196	if (hwif->rw_disk)
197		hwif->rw_disk(drive, rq);
198
199	return __ide_do_rw_disk(drive, rq, block);
200}
201
202/*
203 * Queries for true maximum capacity of the drive.
204 * Returns maximum LBA address (> 0) of the drive, 0 if failed.
205 */
206static u64 idedisk_read_native_max_address(ide_drive_t *drive, int lba48)
207{
208	struct ide_cmd cmd;
209	struct ide_taskfile *tf = &cmd.tf;
210	u64 addr = 0;
211
212	memset(&cmd, 0, sizeof(cmd));
213	if (lba48)
214		tf->command = ATA_CMD_READ_NATIVE_MAX_EXT;
215	else
216		tf->command = ATA_CMD_READ_NATIVE_MAX;
217	tf->device  = ATA_LBA;
218
219	cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
220	cmd.valid.in.tf  = IDE_VALID_IN_TF  | IDE_VALID_DEVICE;
221	if (lba48) {
222		cmd.valid.out.hob = IDE_VALID_OUT_HOB;
223		cmd.valid.in.hob  = IDE_VALID_IN_HOB;
224		cmd.tf_flags = IDE_TFLAG_LBA48;
225	}
226
227	ide_no_data_taskfile(drive, &cmd);
228
229	/* if OK, compute maximum address value */
230	if (!(tf->status & ATA_ERR))
231		addr = ide_get_lba_addr(&cmd, lba48) + 1;
232
233	return addr;
234}
235
236/*
237 * Sets maximum virtual LBA address of the drive.
238 * Returns new maximum virtual LBA address (> 0) or 0 on failure.
239 */
240static u64 idedisk_set_max_address(ide_drive_t *drive, u64 addr_req, int lba48)
241{
242	struct ide_cmd cmd;
243	struct ide_taskfile *tf = &cmd.tf;
244	u64 addr_set = 0;
245
246	addr_req--;
247
248	memset(&cmd, 0, sizeof(cmd));
249	tf->lbal     = (addr_req >>  0) & 0xff;
250	tf->lbam     = (addr_req >>= 8) & 0xff;
251	tf->lbah     = (addr_req >>= 8) & 0xff;
252	if (lba48) {
253		cmd.hob.lbal = (addr_req >>= 8) & 0xff;
254		cmd.hob.lbam = (addr_req >>= 8) & 0xff;
255		cmd.hob.lbah = (addr_req >>= 8) & 0xff;
256		tf->command  = ATA_CMD_SET_MAX_EXT;
257	} else {
258		tf->device   = (addr_req >>= 8) & 0x0f;
259		tf->command  = ATA_CMD_SET_MAX;
260	}
261	tf->device |= ATA_LBA;
262
263	cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
264	cmd.valid.in.tf  = IDE_VALID_IN_TF  | IDE_VALID_DEVICE;
265	if (lba48) {
266		cmd.valid.out.hob = IDE_VALID_OUT_HOB;
267		cmd.valid.in.hob  = IDE_VALID_IN_HOB;
268		cmd.tf_flags = IDE_TFLAG_LBA48;
269	}
270
271	ide_no_data_taskfile(drive, &cmd);
272
273	/* if OK, compute maximum address value */
274	if (!(tf->status & ATA_ERR))
275		addr_set = ide_get_lba_addr(&cmd, lba48) + 1;
276
277	return addr_set;
278}
279
280static unsigned long long sectors_to_MB(unsigned long long n)
281{
282	n <<= 9;		/* make it bytes */
283	do_div(n, 1000000);	/* make it MB */
284	return n;
285}
286
287/*
288 * Some disks report total number of sectors instead of
289 * maximum sector address.  We list them here.
290 */
291static const struct drive_list_entry hpa_list[] = {
292	{ "ST340823A",	NULL },
293	{ "ST320413A",	NULL },
294	{ "ST310211A",	NULL },
295	{ NULL,		NULL }
296};
297
298static u64 ide_disk_hpa_get_native_capacity(ide_drive_t *drive, int lba48)
299{
300	u64 capacity, set_max;
301
302	capacity = drive->capacity64;
303	set_max  = idedisk_read_native_max_address(drive, lba48);
304
305	if (ide_in_drive_list(drive->id, hpa_list)) {
306		if (set_max == capacity + 1)
307			set_max--;
308	}
309
310	return set_max;
311}
312
313static u64 ide_disk_hpa_set_capacity(ide_drive_t *drive, u64 set_max, int lba48)
314{
315	set_max = idedisk_set_max_address(drive, set_max, lba48);
316	if (set_max)
317		drive->capacity64 = set_max;
318
319	return set_max;
320}
321
322static void idedisk_check_hpa(ide_drive_t *drive)
323{
324	u64 capacity, set_max;
325	int lba48 = ata_id_lba48_enabled(drive->id);
326
327	capacity = drive->capacity64;
328	set_max  = ide_disk_hpa_get_native_capacity(drive, lba48);
329
330	if (set_max <= capacity)
331		return;
332
333	drive->probed_capacity = set_max;
334
335	printk(KERN_INFO "%s: Host Protected Area detected.\n"
336			 "\tcurrent capacity is %llu sectors (%llu MB)\n"
337			 "\tnative  capacity is %llu sectors (%llu MB)\n",
338			 drive->name,
339			 capacity, sectors_to_MB(capacity),
340			 set_max, sectors_to_MB(set_max));
341
342	if ((drive->dev_flags & IDE_DFLAG_NOHPA) == 0)
343		return;
344
345	set_max = ide_disk_hpa_set_capacity(drive, set_max, lba48);
346	if (set_max)
347		printk(KERN_INFO "%s: Host Protected Area disabled.\n",
348				 drive->name);
349}
350
351static int ide_disk_get_capacity(ide_drive_t *drive)
352{
353	u16 *id = drive->id;
354	int lba;
355
356	if (ata_id_lba48_enabled(id)) {
357		/* drive speaks 48-bit LBA */
358		lba = 1;
359		drive->capacity64 = ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
360	} else if (ata_id_has_lba(id) && ata_id_is_lba_capacity_ok(id)) {
361		/* drive speaks 28-bit LBA */
362		lba = 1;
363		drive->capacity64 = ata_id_u32(id, ATA_ID_LBA_CAPACITY);
364	} else {
365		/* drive speaks boring old 28-bit CHS */
366		lba = 0;
367		drive->capacity64 = drive->cyl * drive->head * drive->sect;
368	}
369
370	drive->probed_capacity = drive->capacity64;
371
372	if (lba) {
373		drive->dev_flags |= IDE_DFLAG_LBA;
374
375		/*
376		* If this device supports the Host Protected Area feature set,
377		* then we may need to change our opinion about its capacity.
378		*/
379		if (ata_id_hpa_enabled(id))
380			idedisk_check_hpa(drive);
381	}
382
383	/* limit drive capacity to 137GB if LBA48 cannot be used */
384	if ((drive->dev_flags & IDE_DFLAG_LBA48) == 0 &&
385	    drive->capacity64 > 1ULL << 28) {
386		printk(KERN_WARNING "%s: cannot use LBA48 - full capacity "
387		       "%llu sectors (%llu MB)\n",
388		       drive->name, (unsigned long long)drive->capacity64,
389		       sectors_to_MB(drive->capacity64));
390		drive->probed_capacity = drive->capacity64 = 1ULL << 28;
391	}
392
393	if ((drive->hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) &&
394	    (drive->dev_flags & IDE_DFLAG_LBA48)) {
395		if (drive->capacity64 > 1ULL << 28) {
396			printk(KERN_INFO "%s: cannot use LBA48 DMA - PIO mode"
397					 " will be used for accessing sectors "
398					 "> %u\n", drive->name, 1 << 28);
399		} else
400			drive->dev_flags &= ~IDE_DFLAG_LBA48;
401	}
402
403	return 0;
404}
405
406static void ide_disk_unlock_native_capacity(ide_drive_t *drive)
407{
408	u16 *id = drive->id;
409	int lba48 = ata_id_lba48_enabled(id);
410
411	if ((drive->dev_flags & IDE_DFLAG_LBA) == 0 ||
412	    ata_id_hpa_enabled(id) == 0)
413		return;
414
415	/*
416	 * according to the spec the SET MAX ADDRESS command shall be
417	 * immediately preceded by a READ NATIVE MAX ADDRESS command
418	 */
419	if (!ide_disk_hpa_get_native_capacity(drive, lba48))
420		return;
421
422	if (ide_disk_hpa_set_capacity(drive, drive->probed_capacity, lba48))
423		drive->dev_flags |= IDE_DFLAG_NOHPA; /* disable HPA on resume */
424}
425
426static int idedisk_prep_fn(struct request_queue *q, struct request *rq)
427{
428	ide_drive_t *drive = q->queuedata;
429	struct ide_cmd *cmd;
430
431	if (!(rq->cmd_flags & REQ_FLUSH))
432		return BLKPREP_OK;
433
434	cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC);
435
436	BUG_ON(cmd == NULL);
437
438	memset(cmd, 0, sizeof(*cmd));
439	if (ata_id_flush_ext_enabled(drive->id) &&
440	    (drive->capacity64 >= (1UL << 28)))
441		cmd->tf.command = ATA_CMD_FLUSH_EXT;
442	else
443		cmd->tf.command = ATA_CMD_FLUSH;
444	cmd->valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
445	cmd->tf_flags = IDE_TFLAG_DYN;
446	cmd->protocol = ATA_PROT_NODATA;
447
448	rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
449	rq->special = cmd;
450	cmd->rq = rq;
451
452	return BLKPREP_OK;
453}
454
455ide_devset_get(multcount, mult_count);
456
457/*
458 * This is tightly woven into the driver->do_special can not touch.
459 * DON'T do it again until a total personality rewrite is committed.
460 */
461static int set_multcount(ide_drive_t *drive, int arg)
462{
463	struct request *rq;
464	int error;
465
466	if (arg < 0 || arg > (drive->id[ATA_ID_MAX_MULTSECT] & 0xff))
467		return -EINVAL;
468
469	if (drive->special_flags & IDE_SFLAG_SET_MULTMODE)
470		return -EBUSY;
471
472	rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
473	rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
474
475	drive->mult_req = arg;
476	drive->special_flags |= IDE_SFLAG_SET_MULTMODE;
477	error = blk_execute_rq(drive->queue, NULL, rq, 0);
478	blk_put_request(rq);
479
480	return (drive->mult_count == arg) ? 0 : -EIO;
481}
482
483ide_devset_get_flag(nowerr, IDE_DFLAG_NOWERR);
484
485static int set_nowerr(ide_drive_t *drive, int arg)
486{
487	if (arg < 0 || arg > 1)
488		return -EINVAL;
489
490	if (arg)
491		drive->dev_flags |= IDE_DFLAG_NOWERR;
492	else
493		drive->dev_flags &= ~IDE_DFLAG_NOWERR;
494
495	drive->bad_wstat = arg ? BAD_R_STAT : BAD_W_STAT;
496
497	return 0;
498}
499
500static int ide_do_setfeature(ide_drive_t *drive, u8 feature, u8 nsect)
501{
502	struct ide_cmd cmd;
503
504	memset(&cmd, 0, sizeof(cmd));
505	cmd.tf.feature = feature;
506	cmd.tf.nsect   = nsect;
507	cmd.tf.command = ATA_CMD_SET_FEATURES;
508	cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
509	cmd.valid.in.tf  = IDE_VALID_IN_TF  | IDE_VALID_DEVICE;
510
511	return ide_no_data_taskfile(drive, &cmd);
512}
513
514static void update_ordered(ide_drive_t *drive)
515{
516	u16 *id = drive->id;
517	unsigned ordered = QUEUE_ORDERED_NONE;
518
519	if (drive->dev_flags & IDE_DFLAG_WCACHE) {
520		unsigned long long capacity;
521		int barrier;
522		/*
523		 * We must avoid issuing commands a drive does not
524		 * understand or we may crash it. We check flush cache
525		 * is supported. We also check we have the LBA48 flush
526		 * cache if the drive capacity is too large. By this
527		 * time we have trimmed the drive capacity if LBA48 is
528		 * not available so we don't need to recheck that.
529		 */
530		capacity = ide_gd_capacity(drive);
531		barrier = ata_id_flush_enabled(id) &&
532			(drive->dev_flags & IDE_DFLAG_NOFLUSH) == 0 &&
533			((drive->dev_flags & IDE_DFLAG_LBA48) == 0 ||
534			 capacity <= (1ULL << 28) ||
535			 ata_id_flush_ext_enabled(id));
536
537		printk(KERN_INFO "%s: cache flushes %ssupported\n",
538		       drive->name, barrier ? "" : "not ");
539
540		if (barrier) {
541			ordered = QUEUE_ORDERED_DRAIN_FLUSH;
542			blk_queue_prep_rq(drive->queue, idedisk_prep_fn);
543		}
544	} else
545		ordered = QUEUE_ORDERED_DRAIN;
546
547	blk_queue_ordered(drive->queue, ordered);
548}
549
550ide_devset_get_flag(wcache, IDE_DFLAG_WCACHE);
551
552static int set_wcache(ide_drive_t *drive, int arg)
553{
554	int err = 1;
555
556	if (arg < 0 || arg > 1)
557		return -EINVAL;
558
559	if (ata_id_flush_enabled(drive->id)) {
560		err = ide_do_setfeature(drive,
561			arg ? SETFEATURES_WC_ON : SETFEATURES_WC_OFF, 0);
562		if (err == 0) {
563			if (arg)
564				drive->dev_flags |= IDE_DFLAG_WCACHE;
565			else
566				drive->dev_flags &= ~IDE_DFLAG_WCACHE;
567		}
568	}
569
570	update_ordered(drive);
571
572	return err;
573}
574
575static int do_idedisk_flushcache(ide_drive_t *drive)
576{
577	struct ide_cmd cmd;
578
579	memset(&cmd, 0, sizeof(cmd));
580	if (ata_id_flush_ext_enabled(drive->id))
581		cmd.tf.command = ATA_CMD_FLUSH_EXT;
582	else
583		cmd.tf.command = ATA_CMD_FLUSH;
584	cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
585	cmd.valid.in.tf  = IDE_VALID_IN_TF  | IDE_VALID_DEVICE;
586
587	return ide_no_data_taskfile(drive, &cmd);
588}
589
590ide_devset_get(acoustic, acoustic);
591
592static int set_acoustic(ide_drive_t *drive, int arg)
593{
594	if (arg < 0 || arg > 254)
595		return -EINVAL;
596
597	ide_do_setfeature(drive,
598		arg ? SETFEATURES_AAM_ON : SETFEATURES_AAM_OFF, arg);
599
600	drive->acoustic = arg;
601
602	return 0;
603}
604
605ide_devset_get_flag(addressing, IDE_DFLAG_LBA48);
606
607/*
608 * drive->addressing:
609 *	0: 28-bit
610 *	1: 48-bit
611 *	2: 48-bit capable doing 28-bit
612 */
613static int set_addressing(ide_drive_t *drive, int arg)
614{
615	if (arg < 0 || arg > 2)
616		return -EINVAL;
617
618	if (arg && ((drive->hwif->host_flags & IDE_HFLAG_NO_LBA48) ||
619	    ata_id_lba48_enabled(drive->id) == 0))
620		return -EIO;
621
622	if (arg == 2)
623		arg = 0;
624
625	if (arg)
626		drive->dev_flags |= IDE_DFLAG_LBA48;
627	else
628		drive->dev_flags &= ~IDE_DFLAG_LBA48;
629
630	return 0;
631}
632
633ide_ext_devset_rw(acoustic, acoustic);
634ide_ext_devset_rw(address, addressing);
635ide_ext_devset_rw(multcount, multcount);
636ide_ext_devset_rw(wcache, wcache);
637
638ide_ext_devset_rw_sync(nowerr, nowerr);
639
640static int ide_disk_check(ide_drive_t *drive, const char *s)
641{
642	return 1;
643}
644
645static void ide_disk_setup(ide_drive_t *drive)
646{
647	struct ide_disk_obj *idkp = drive->driver_data;
648	struct request_queue *q = drive->queue;
649	ide_hwif_t *hwif = drive->hwif;
650	u16 *id = drive->id;
651	char *m = (char *)&id[ATA_ID_PROD];
652	unsigned long long capacity;
653
654	ide_proc_register_driver(drive, idkp->driver);
655
656	if ((drive->dev_flags & IDE_DFLAG_ID_READ) == 0)
657		return;
658
659	if (drive->dev_flags & IDE_DFLAG_REMOVABLE) {
660		/*
661		 * Removable disks (eg. SYQUEST); ignore 'WD' drives
662		 */
663		if (m[0] != 'W' || m[1] != 'D')
664			drive->dev_flags |= IDE_DFLAG_DOORLOCKING;
665	}
666
667	(void)set_addressing(drive, 1);
668
669	if (drive->dev_flags & IDE_DFLAG_LBA48) {
670		int max_s = 2048;
671
672		if (max_s > hwif->rqsize)
673			max_s = hwif->rqsize;
674
675		blk_queue_max_hw_sectors(q, max_s);
676	}
677
678	printk(KERN_INFO "%s: max request size: %dKiB\n", drive->name,
679	       queue_max_sectors(q) / 2);
680
681	if (ata_id_is_ssd(id))
682		queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
683
684	/* calculate drive capacity, and select LBA if possible */
685	ide_disk_get_capacity(drive);
686
687	/*
688	 * if possible, give fdisk access to more of the drive,
689	 * by correcting bios_cyls:
690	 */
691	capacity = ide_gd_capacity(drive);
692
693	if ((drive->dev_flags & IDE_DFLAG_FORCED_GEOM) == 0) {
694		if (ata_id_lba48_enabled(drive->id)) {
695			/* compatibility */
696			drive->bios_sect = 63;
697			drive->bios_head = 255;
698		}
699
700		if (drive->bios_sect && drive->bios_head) {
701			unsigned int cap0 = capacity; /* truncate to 32 bits */
702			unsigned int cylsz, cyl;
703
704			if (cap0 != capacity)
705				drive->bios_cyl = 65535;
706			else {
707				cylsz = drive->bios_sect * drive->bios_head;
708				cyl = cap0 / cylsz;
709				if (cyl > 65535)
710					cyl = 65535;
711				if (cyl > drive->bios_cyl)
712					drive->bios_cyl = cyl;
713			}
714		}
715	}
716	printk(KERN_INFO "%s: %llu sectors (%llu MB)",
717			 drive->name, capacity, sectors_to_MB(capacity));
718
719	/* Only print cache size when it was specified */
720	if (id[ATA_ID_BUF_SIZE])
721		printk(KERN_CONT " w/%dKiB Cache", id[ATA_ID_BUF_SIZE] / 2);
722
723	printk(KERN_CONT ", CHS=%d/%d/%d\n",
724			 drive->bios_cyl, drive->bios_head, drive->bios_sect);
725
726	/* write cache enabled? */
727	if ((id[ATA_ID_CSFO] & 1) || ata_id_wcache_enabled(id))
728		drive->dev_flags |= IDE_DFLAG_WCACHE;
729
730	set_wcache(drive, 1);
731
732	if ((drive->dev_flags & IDE_DFLAG_LBA) == 0 &&
733	    (drive->head == 0 || drive->head > 16)) {
734		printk(KERN_ERR "%s: invalid geometry: %d physical heads?\n",
735			drive->name, drive->head);
736		drive->dev_flags &= ~IDE_DFLAG_ATTACH;
737	} else
738		drive->dev_flags |= IDE_DFLAG_ATTACH;
739}
740
741static void ide_disk_flush(ide_drive_t *drive)
742{
743	if (ata_id_flush_enabled(drive->id) == 0 ||
744	    (drive->dev_flags & IDE_DFLAG_WCACHE) == 0)
745		return;
746
747	if (do_idedisk_flushcache(drive))
748		printk(KERN_INFO "%s: wcache flush failed!\n", drive->name);
749}
750
751static int ide_disk_init_media(ide_drive_t *drive, struct gendisk *disk)
752{
753	return 0;
754}
755
756static int ide_disk_set_doorlock(ide_drive_t *drive, struct gendisk *disk,
757				 int on)
758{
759	struct ide_cmd cmd;
760	int ret;
761
762	if ((drive->dev_flags & IDE_DFLAG_DOORLOCKING) == 0)
763		return 0;
764
765	memset(&cmd, 0, sizeof(cmd));
766	cmd.tf.command = on ? ATA_CMD_MEDIA_LOCK : ATA_CMD_MEDIA_UNLOCK;
767	cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
768	cmd.valid.in.tf  = IDE_VALID_IN_TF  | IDE_VALID_DEVICE;
769
770	ret = ide_no_data_taskfile(drive, &cmd);
771
772	if (ret)
773		drive->dev_flags &= ~IDE_DFLAG_DOORLOCKING;
774
775	return ret;
776}
777
778const struct ide_disk_ops ide_ata_disk_ops = {
779	.check			= ide_disk_check,
780	.unlock_native_capacity	= ide_disk_unlock_native_capacity,
781	.get_capacity		= ide_disk_get_capacity,
782	.setup			= ide_disk_setup,
783	.flush			= ide_disk_flush,
784	.init_media		= ide_disk_init_media,
785	.set_doorlock		= ide_disk_set_doorlock,
786	.do_request		= ide_do_rw_disk,
787	.ioctl			= ide_disk_ioctl,
788};
789