• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/drivers/scsi/
1/*
2 *      sd.c Copyright (C) 1992 Drew Eckhardt
3 *           Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
4 *
5 *      Linux scsi disk driver
6 *              Initial versions: Drew Eckhardt
7 *              Subsequent revisions: Eric Youngdale
8 *	Modification history:
9 *       - Drew Eckhardt <drew@colorado.edu> original
10 *       - Eric Youngdale <eric@andante.org> add scatter-gather, multiple
11 *         outstanding request, and other enhancements.
12 *         Support loadable low-level scsi drivers.
13 *       - Jirka Hanika <geo@ff.cuni.cz> support more scsi disks using
14 *         eight major numbers.
15 *       - Richard Gooch <rgooch@atnf.csiro.au> support devfs.
16 *	 - Torben Mathiasen <tmm@image.dk> Resource allocation fixes in
17 *	   sd_init and cleanups.
18 *	 - Alex Davis <letmein@erols.com> Fix problem where partition info
19 *	   not being read in sd_open. Fix problem where removable media
20 *	   could be ejected after sd_open.
21 *	 - Douglas Gilbert <dgilbert@interlog.com> cleanup for lk 2.5.x
22 *	 - Badari Pulavarty <pbadari@us.ibm.com>, Matthew Wilcox
23 *	   <willy@debian.org>, Kurt Garloff <garloff@suse.de>:
24 *	   Support 32k/1M disks.
25 *
26 *	Logging policy (needs CONFIG_SCSI_LOGGING defined):
27 *	 - setting up transfer: SCSI_LOG_HLQUEUE levels 1 and 2
28 *	 - end of transfer (bh + scsi_lib): SCSI_LOG_HLCOMPLETE level 1
29 *	 - entering sd_ioctl: SCSI_LOG_IOCTL level 1
30 *	 - entering other commands: SCSI_LOG_HLQUEUE level 3
31 *	Note: when the logging level is set by the user, it must be greater
32 *	than the level indicated above to trigger output.
33 */
34
35#include <linux/module.h>
36#include <linux/fs.h>
37#include <linux/kernel.h>
38#include <linux/mm.h>
39#include <linux/bio.h>
40#include <linux/genhd.h>
41#include <linux/hdreg.h>
42#include <linux/errno.h>
43#include <linux/idr.h>
44#include <linux/interrupt.h>
45#include <linux/init.h>
46#include <linux/blkdev.h>
47#include <linux/blkpg.h>
48#include <linux/delay.h>
49#include <linux/smp_lock.h>
50#include <linux/mutex.h>
51#include <linux/string_helpers.h>
52#include <linux/async.h>
53#include <linux/slab.h>
54#include <asm/uaccess.h>
55#include <asm/unaligned.h>
56
57#include <scsi/scsi.h>
58#include <scsi/scsi_cmnd.h>
59#include <scsi/scsi_dbg.h>
60#include <scsi/scsi_device.h>
61#include <scsi/scsi_driver.h>
62#include <scsi/scsi_eh.h>
63#include <scsi/scsi_host.h>
64#include <scsi/scsi_ioctl.h>
65#include <scsi/scsicam.h>
66
67#include "sd.h"
68#include "scsi_logging.h"
69
70MODULE_AUTHOR("Eric Youngdale");
71MODULE_DESCRIPTION("SCSI disk (sd) driver");
72MODULE_LICENSE("GPL");
73
74MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK0_MAJOR);
75MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK1_MAJOR);
76MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK2_MAJOR);
77MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK3_MAJOR);
78MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK4_MAJOR);
79MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK5_MAJOR);
80MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK6_MAJOR);
81MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK7_MAJOR);
82MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK8_MAJOR);
83MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK9_MAJOR);
84MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK10_MAJOR);
85MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK11_MAJOR);
86MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK12_MAJOR);
87MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK13_MAJOR);
88MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK14_MAJOR);
89MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK15_MAJOR);
90MODULE_ALIAS_SCSI_DEVICE(TYPE_DISK);
91MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD);
92MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC);
93
94#if !defined(CONFIG_DEBUG_BLOCK_EXT_DEVT)
95#define SD_MINORS	16
96#else
97#define SD_MINORS	0
98#endif
99
100static int  sd_revalidate_disk(struct gendisk *);
101static void sd_unlock_native_capacity(struct gendisk *disk);
102static int  sd_probe(struct device *);
103static int  sd_remove(struct device *);
104static void sd_shutdown(struct device *);
105static int sd_suspend(struct device *, pm_message_t state);
106static int sd_resume(struct device *);
107static void sd_rescan(struct device *);
108static int sd_done(struct scsi_cmnd *);
109static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer);
110static void scsi_disk_release(struct device *cdev);
111static void sd_print_sense_hdr(struct scsi_disk *, struct scsi_sense_hdr *);
112static void sd_print_result(struct scsi_disk *, int);
113
114static DEFINE_SPINLOCK(sd_index_lock);
115static DEFINE_IDA(sd_index_ida);
116
117/* This semaphore is used to mediate the 0->1 reference get in the
118 * face of object destruction (i.e. we can't allow a get on an
119 * object after last put) */
120static DEFINE_MUTEX(sd_ref_mutex);
121
122static struct kmem_cache *sd_cdb_cache;
123static mempool_t *sd_cdb_pool;
124
125static const char *sd_cache_types[] = {
126	"write through", "none", "write back",
127	"write back, no read (daft)"
128};
129
130static ssize_t
131sd_store_cache_type(struct device *dev, struct device_attribute *attr,
132		    const char *buf, size_t count)
133{
134	int i, ct = -1, rcd, wce, sp;
135	struct scsi_disk *sdkp = to_scsi_disk(dev);
136	struct scsi_device *sdp = sdkp->device;
137	char buffer[64];
138	char *buffer_data;
139	struct scsi_mode_data data;
140	struct scsi_sense_hdr sshdr;
141	int len;
142
143	if (sdp->type != TYPE_DISK)
144		/* no cache control on RBC devices; theoretically they
145		 * can do it, but there's probably so many exceptions
146		 * it's not worth the risk */
147		return -EINVAL;
148
149	for (i = 0; i < ARRAY_SIZE(sd_cache_types); i++) {
150		len = strlen(sd_cache_types[i]);
151		if (strncmp(sd_cache_types[i], buf, len) == 0 &&
152		    buf[len] == '\n') {
153			ct = i;
154			break;
155		}
156	}
157	if (ct < 0)
158		return -EINVAL;
159	rcd = ct & 0x01 ? 1 : 0;
160	wce = ct & 0x02 ? 1 : 0;
161	if (scsi_mode_sense(sdp, 0x08, 8, buffer, sizeof(buffer), SD_TIMEOUT,
162			    SD_MAX_RETRIES, &data, NULL))
163		return -EINVAL;
164	len = min_t(size_t, sizeof(buffer), data.length - data.header_length -
165		  data.block_descriptor_length);
166	buffer_data = buffer + data.header_length +
167		data.block_descriptor_length;
168	buffer_data[2] &= ~0x05;
169	buffer_data[2] |= wce << 2 | rcd;
170	sp = buffer_data[0] & 0x80 ? 1 : 0;
171
172	if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT,
173			     SD_MAX_RETRIES, &data, &sshdr)) {
174		if (scsi_sense_valid(&sshdr))
175			sd_print_sense_hdr(sdkp, &sshdr);
176		return -EINVAL;
177	}
178	revalidate_disk(sdkp->disk);
179	return count;
180}
181
182static ssize_t
183sd_store_manage_start_stop(struct device *dev, struct device_attribute *attr,
184			   const char *buf, size_t count)
185{
186	struct scsi_disk *sdkp = to_scsi_disk(dev);
187	struct scsi_device *sdp = sdkp->device;
188
189	if (!capable(CAP_SYS_ADMIN))
190		return -EACCES;
191
192	sdp->manage_start_stop = simple_strtoul(buf, NULL, 10);
193
194	return count;
195}
196
197static ssize_t
198sd_store_allow_restart(struct device *dev, struct device_attribute *attr,
199		       const char *buf, size_t count)
200{
201	struct scsi_disk *sdkp = to_scsi_disk(dev);
202	struct scsi_device *sdp = sdkp->device;
203
204	if (!capable(CAP_SYS_ADMIN))
205		return -EACCES;
206
207	if (sdp->type != TYPE_DISK)
208		return -EINVAL;
209
210	sdp->allow_restart = simple_strtoul(buf, NULL, 10);
211
212	return count;
213}
214
215static ssize_t
216sd_show_cache_type(struct device *dev, struct device_attribute *attr,
217		   char *buf)
218{
219	struct scsi_disk *sdkp = to_scsi_disk(dev);
220	int ct = sdkp->RCD + 2*sdkp->WCE;
221
222	return snprintf(buf, 40, "%s\n", sd_cache_types[ct]);
223}
224
225static ssize_t
226sd_show_fua(struct device *dev, struct device_attribute *attr, char *buf)
227{
228	struct scsi_disk *sdkp = to_scsi_disk(dev);
229
230	return snprintf(buf, 20, "%u\n", sdkp->DPOFUA);
231}
232
233static ssize_t
234sd_show_manage_start_stop(struct device *dev, struct device_attribute *attr,
235			  char *buf)
236{
237	struct scsi_disk *sdkp = to_scsi_disk(dev);
238	struct scsi_device *sdp = sdkp->device;
239
240	return snprintf(buf, 20, "%u\n", sdp->manage_start_stop);
241}
242
243static ssize_t
244sd_show_allow_restart(struct device *dev, struct device_attribute *attr,
245		      char *buf)
246{
247	struct scsi_disk *sdkp = to_scsi_disk(dev);
248
249	return snprintf(buf, 40, "%d\n", sdkp->device->allow_restart);
250}
251
252static ssize_t
253sd_show_protection_type(struct device *dev, struct device_attribute *attr,
254			char *buf)
255{
256	struct scsi_disk *sdkp = to_scsi_disk(dev);
257
258	return snprintf(buf, 20, "%u\n", sdkp->protection_type);
259}
260
261static ssize_t
262sd_show_app_tag_own(struct device *dev, struct device_attribute *attr,
263		    char *buf)
264{
265	struct scsi_disk *sdkp = to_scsi_disk(dev);
266
267	return snprintf(buf, 20, "%u\n", sdkp->ATO);
268}
269
270static ssize_t
271sd_show_thin_provisioning(struct device *dev, struct device_attribute *attr,
272			  char *buf)
273{
274	struct scsi_disk *sdkp = to_scsi_disk(dev);
275
276	return snprintf(buf, 20, "%u\n", sdkp->thin_provisioning);
277}
278
279static struct device_attribute sd_disk_attrs[] = {
280	__ATTR(cache_type, S_IRUGO|S_IWUSR, sd_show_cache_type,
281	       sd_store_cache_type),
282	__ATTR(FUA, S_IRUGO, sd_show_fua, NULL),
283	__ATTR(allow_restart, S_IRUGO|S_IWUSR, sd_show_allow_restart,
284	       sd_store_allow_restart),
285	__ATTR(manage_start_stop, S_IRUGO|S_IWUSR, sd_show_manage_start_stop,
286	       sd_store_manage_start_stop),
287	__ATTR(protection_type, S_IRUGO, sd_show_protection_type, NULL),
288	__ATTR(app_tag_own, S_IRUGO, sd_show_app_tag_own, NULL),
289	__ATTR(thin_provisioning, S_IRUGO, sd_show_thin_provisioning, NULL),
290	__ATTR_NULL,
291};
292
293static struct class sd_disk_class = {
294	.name		= "scsi_disk",
295	.owner		= THIS_MODULE,
296	.dev_release	= scsi_disk_release,
297	.dev_attrs	= sd_disk_attrs,
298};
299
300static struct scsi_driver sd_template = {
301	.owner			= THIS_MODULE,
302	.gendrv = {
303		.name		= "sd",
304		.probe		= sd_probe,
305		.remove		= sd_remove,
306		.suspend	= sd_suspend,
307		.resume		= sd_resume,
308		.shutdown	= sd_shutdown,
309	},
310	.rescan			= sd_rescan,
311	.done			= sd_done,
312};
313
314/*
315 * Device no to disk mapping:
316 *
317 *       major         disc2     disc  p1
318 *   |............|.............|....|....| <- dev_t
319 *    31        20 19          8 7  4 3  0
320 *
321 * Inside a major, we have 16k disks, however mapped non-
322 * contiguously. The first 16 disks are for major0, the next
323 * ones with major1, ... Disk 256 is for major0 again, disk 272
324 * for major1, ...
325 * As we stay compatible with our numbering scheme, we can reuse
326 * the well-know SCSI majors 8, 65--71, 136--143.
327 */
328static int sd_major(int major_idx)
329{
330	switch (major_idx) {
331	case 0:
332		return SCSI_DISK0_MAJOR;
333	case 1 ... 7:
334		return SCSI_DISK1_MAJOR + major_idx - 1;
335	case 8 ... 15:
336		return SCSI_DISK8_MAJOR + major_idx - 8;
337	default:
338		BUG();
339		return 0;	/* shut up gcc */
340	}
341}
342
343static struct scsi_disk *__scsi_disk_get(struct gendisk *disk)
344{
345	struct scsi_disk *sdkp = NULL;
346
347	if (disk->private_data) {
348		sdkp = scsi_disk(disk);
349		if (scsi_device_get(sdkp->device) == 0)
350			get_device(&sdkp->dev);
351		else
352			sdkp = NULL;
353	}
354	return sdkp;
355}
356
357static struct scsi_disk *scsi_disk_get(struct gendisk *disk)
358{
359	struct scsi_disk *sdkp;
360
361	mutex_lock(&sd_ref_mutex);
362	sdkp = __scsi_disk_get(disk);
363	mutex_unlock(&sd_ref_mutex);
364	return sdkp;
365}
366
367static struct scsi_disk *scsi_disk_get_from_dev(struct device *dev)
368{
369	struct scsi_disk *sdkp;
370
371	mutex_lock(&sd_ref_mutex);
372	sdkp = dev_get_drvdata(dev);
373	if (sdkp)
374		sdkp = __scsi_disk_get(sdkp->disk);
375	mutex_unlock(&sd_ref_mutex);
376	return sdkp;
377}
378
379static void scsi_disk_put(struct scsi_disk *sdkp)
380{
381	struct scsi_device *sdev = sdkp->device;
382
383	mutex_lock(&sd_ref_mutex);
384	put_device(&sdkp->dev);
385	scsi_device_put(sdev);
386	mutex_unlock(&sd_ref_mutex);
387}
388
389static void sd_prot_op(struct scsi_cmnd *scmd, unsigned int dif)
390{
391	unsigned int prot_op = SCSI_PROT_NORMAL;
392	unsigned int dix = scsi_prot_sg_count(scmd);
393
394	if (scmd->sc_data_direction == DMA_FROM_DEVICE) {
395		if (dif && dix)
396			prot_op = SCSI_PROT_READ_PASS;
397		else if (dif && !dix)
398			prot_op = SCSI_PROT_READ_STRIP;
399		else if (!dif && dix)
400			prot_op = SCSI_PROT_READ_INSERT;
401	} else {
402		if (dif && dix)
403			prot_op = SCSI_PROT_WRITE_PASS;
404		else if (dif && !dix)
405			prot_op = SCSI_PROT_WRITE_INSERT;
406		else if (!dif && dix)
407			prot_op = SCSI_PROT_WRITE_STRIP;
408	}
409
410	scsi_set_prot_op(scmd, prot_op);
411	scsi_set_prot_type(scmd, dif);
412}
413
414/**
415 * scsi_setup_discard_cmnd - unmap blocks on thinly provisioned device
416 * @sdp: scsi device to operate one
417 * @rq: Request to prepare
418 *
419 * Will issue either UNMAP or WRITE SAME(16) depending on preference
420 * indicated by target device.
421 **/
422static int scsi_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq)
423{
424	struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
425	struct bio *bio = rq->bio;
426	sector_t sector = bio->bi_sector;
427	unsigned int nr_sectors = bio_sectors(bio);
428	unsigned int len;
429	int ret;
430	struct page *page;
431
432	if (sdkp->device->sector_size == 4096) {
433		sector >>= 3;
434		nr_sectors >>= 3;
435	}
436
437	rq->timeout = SD_TIMEOUT;
438
439	memset(rq->cmd, 0, rq->cmd_len);
440
441	page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
442	if (!page)
443		return BLKPREP_DEFER;
444
445	if (sdkp->unmap) {
446		char *buf = page_address(page);
447
448		rq->cmd_len = 10;
449		rq->cmd[0] = UNMAP;
450		rq->cmd[8] = 24;
451
452		put_unaligned_be16(6 + 16, &buf[0]);
453		put_unaligned_be16(16, &buf[2]);
454		put_unaligned_be64(sector, &buf[8]);
455		put_unaligned_be32(nr_sectors, &buf[16]);
456
457		len = 24;
458	} else {
459		rq->cmd_len = 16;
460		rq->cmd[0] = WRITE_SAME_16;
461		rq->cmd[1] = 0x8; /* UNMAP */
462		put_unaligned_be64(sector, &rq->cmd[2]);
463		put_unaligned_be32(nr_sectors, &rq->cmd[10]);
464
465		len = sdkp->device->sector_size;
466	}
467
468	blk_add_request_payload(rq, page, len);
469	ret = scsi_setup_blk_pc_cmnd(sdp, rq);
470	rq->buffer = page_address(page);
471	if (ret != BLKPREP_OK) {
472		__free_page(page);
473		rq->buffer = NULL;
474	}
475	return ret;
476}
477
478static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq)
479{
480	rq->timeout = SD_TIMEOUT;
481	rq->retries = SD_MAX_RETRIES;
482	rq->cmd[0] = SYNCHRONIZE_CACHE;
483	rq->cmd_len = 10;
484
485	return scsi_setup_blk_pc_cmnd(sdp, rq);
486}
487
488static void sd_unprep_fn(struct request_queue *q, struct request *rq)
489{
490	if (rq->cmd_flags & REQ_DISCARD) {
491		free_page((unsigned long)rq->buffer);
492		rq->buffer = NULL;
493	}
494}
495
496/**
497 *	sd_init_command - build a scsi (read or write) command from
498 *	information in the request structure.
499 *	@SCpnt: pointer to mid-level's per scsi command structure that
500 *	contains request and into which the scsi command is written
501 *
502 *	Returns 1 if successful and 0 if error (or cannot be done now).
503 **/
504static int sd_prep_fn(struct request_queue *q, struct request *rq)
505{
506	struct scsi_cmnd *SCpnt;
507	struct scsi_device *sdp = q->queuedata;
508	struct gendisk *disk = rq->rq_disk;
509	struct scsi_disk *sdkp;
510	sector_t block = blk_rq_pos(rq);
511	sector_t threshold;
512	unsigned int this_count = blk_rq_sectors(rq);
513	int ret, host_dif;
514	unsigned char protect;
515
516	/*
517	 * Discard request come in as REQ_TYPE_FS but we turn them into
518	 * block PC requests to make life easier.
519	 */
520	if (rq->cmd_flags & REQ_DISCARD) {
521		ret = scsi_setup_discard_cmnd(sdp, rq);
522		goto out;
523	} else if (rq->cmd_flags & REQ_FLUSH) {
524		ret = scsi_setup_flush_cmnd(sdp, rq);
525		goto out;
526	} else if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
527		ret = scsi_setup_blk_pc_cmnd(sdp, rq);
528		goto out;
529	} else if (rq->cmd_type != REQ_TYPE_FS) {
530		ret = BLKPREP_KILL;
531		goto out;
532	}
533	ret = scsi_setup_fs_cmnd(sdp, rq);
534	if (ret != BLKPREP_OK)
535		goto out;
536	SCpnt = rq->special;
537	sdkp = scsi_disk(disk);
538
539	/* from here on until we're complete, any goto out
540	 * is used for a killable error condition */
541	ret = BLKPREP_KILL;
542
543	SCSI_LOG_HLQUEUE(1, scmd_printk(KERN_INFO, SCpnt,
544					"sd_init_command: block=%llu, "
545					"count=%d\n",
546					(unsigned long long)block,
547					this_count));
548
549	if (!sdp || !scsi_device_online(sdp) ||
550	    block + blk_rq_sectors(rq) > get_capacity(disk)) {
551		SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
552						"Finishing %u sectors\n",
553						blk_rq_sectors(rq)));
554		SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
555						"Retry with 0x%p\n", SCpnt));
556		goto out;
557	}
558
559	if (sdp->changed) {
560		/*
561		 * quietly refuse to do anything to a changed disc until
562		 * the changed bit has been reset
563		 */
564		/* printk("SCSI disk has been changed. Prohibiting further I/O.\n"); */
565		goto out;
566	}
567
568	/*
569	 * Some SD card readers can't handle multi-sector accesses which touch
570	 * the last one or two hardware sectors.  Split accesses as needed.
571	 */
572	threshold = get_capacity(disk) - SD_LAST_BUGGY_SECTORS *
573		(sdp->sector_size / 512);
574
575	if (unlikely(sdp->last_sector_bug && block + this_count > threshold)) {
576		if (block < threshold) {
577			/* Access up to the threshold but not beyond */
578			this_count = threshold - block;
579		} else {
580			/* Access only a single hardware sector */
581			this_count = sdp->sector_size / 512;
582		}
583	}
584
585	SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, "block=%llu\n",
586					(unsigned long long)block));
587
588	/*
589	 * If we have a 1K hardware sectorsize, prevent access to single
590	 * 512 byte sectors.  In theory we could handle this - in fact
591	 * the scsi cdrom driver must be able to handle this because
592	 * we typically use 1K blocksizes, and cdroms typically have
593	 * 2K hardware sectorsizes.  Of course, things are simpler
594	 * with the cdrom, since it is read-only.  For performance
595	 * reasons, the filesystems should be able to handle this
596	 * and not force the scsi disk driver to use bounce buffers
597	 * for this.
598	 */
599	if (sdp->sector_size == 1024) {
600		if ((block & 1) || (blk_rq_sectors(rq) & 1)) {
601			scmd_printk(KERN_ERR, SCpnt,
602				    "Bad block number requested\n");
603			goto out;
604		} else {
605			block = block >> 1;
606			this_count = this_count >> 1;
607		}
608	}
609	if (sdp->sector_size == 2048) {
610		if ((block & 3) || (blk_rq_sectors(rq) & 3)) {
611			scmd_printk(KERN_ERR, SCpnt,
612				    "Bad block number requested\n");
613			goto out;
614		} else {
615			block = block >> 2;
616			this_count = this_count >> 2;
617		}
618	}
619	if (sdp->sector_size == 4096) {
620		if ((block & 7) || (blk_rq_sectors(rq) & 7)) {
621			scmd_printk(KERN_ERR, SCpnt,
622				    "Bad block number requested\n");
623			goto out;
624		} else {
625			block = block >> 3;
626			this_count = this_count >> 3;
627		}
628	}
629	if (rq_data_dir(rq) == WRITE) {
630		if (!sdp->writeable) {
631			goto out;
632		}
633		SCpnt->cmnd[0] = WRITE_6;
634		SCpnt->sc_data_direction = DMA_TO_DEVICE;
635
636		if (blk_integrity_rq(rq) &&
637		    sd_dif_prepare(rq, block, sdp->sector_size) == -EIO)
638			goto out;
639
640	} else if (rq_data_dir(rq) == READ) {
641		SCpnt->cmnd[0] = READ_6;
642		SCpnt->sc_data_direction = DMA_FROM_DEVICE;
643	} else {
644		scmd_printk(KERN_ERR, SCpnt, "Unknown command %x\n", rq->cmd_flags);
645		goto out;
646	}
647
648	SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
649					"%s %d/%u 512 byte blocks.\n",
650					(rq_data_dir(rq) == WRITE) ?
651					"writing" : "reading", this_count,
652					blk_rq_sectors(rq)));
653
654	/* Set RDPROTECT/WRPROTECT if disk is formatted with DIF */
655	host_dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type);
656	if (host_dif)
657		protect = 1 << 5;
658	else
659		protect = 0;
660
661	if (host_dif == SD_DIF_TYPE2_PROTECTION) {
662		SCpnt->cmnd = mempool_alloc(sd_cdb_pool, GFP_ATOMIC);
663
664		if (unlikely(SCpnt->cmnd == NULL)) {
665			ret = BLKPREP_DEFER;
666			goto out;
667		}
668
669		SCpnt->cmd_len = SD_EXT_CDB_SIZE;
670		memset(SCpnt->cmnd, 0, SCpnt->cmd_len);
671		SCpnt->cmnd[0] = VARIABLE_LENGTH_CMD;
672		SCpnt->cmnd[7] = 0x18;
673		SCpnt->cmnd[9] = (rq_data_dir(rq) == READ) ? READ_32 : WRITE_32;
674		SCpnt->cmnd[10] = protect | ((rq->cmd_flags & REQ_FUA) ? 0x8 : 0);
675
676		/* LBA */
677		SCpnt->cmnd[12] = sizeof(block) > 4 ? (unsigned char) (block >> 56) & 0xff : 0;
678		SCpnt->cmnd[13] = sizeof(block) > 4 ? (unsigned char) (block >> 48) & 0xff : 0;
679		SCpnt->cmnd[14] = sizeof(block) > 4 ? (unsigned char) (block >> 40) & 0xff : 0;
680		SCpnt->cmnd[15] = sizeof(block) > 4 ? (unsigned char) (block >> 32) & 0xff : 0;
681		SCpnt->cmnd[16] = (unsigned char) (block >> 24) & 0xff;
682		SCpnt->cmnd[17] = (unsigned char) (block >> 16) & 0xff;
683		SCpnt->cmnd[18] = (unsigned char) (block >> 8) & 0xff;
684		SCpnt->cmnd[19] = (unsigned char) block & 0xff;
685
686		/* Expected Indirect LBA */
687		SCpnt->cmnd[20] = (unsigned char) (block >> 24) & 0xff;
688		SCpnt->cmnd[21] = (unsigned char) (block >> 16) & 0xff;
689		SCpnt->cmnd[22] = (unsigned char) (block >> 8) & 0xff;
690		SCpnt->cmnd[23] = (unsigned char) block & 0xff;
691
692		/* Transfer length */
693		SCpnt->cmnd[28] = (unsigned char) (this_count >> 24) & 0xff;
694		SCpnt->cmnd[29] = (unsigned char) (this_count >> 16) & 0xff;
695		SCpnt->cmnd[30] = (unsigned char) (this_count >> 8) & 0xff;
696		SCpnt->cmnd[31] = (unsigned char) this_count & 0xff;
697	} else if (sdp->use_16_for_rw) {
698		SCpnt->cmnd[0] += READ_16 - READ_6;
699		SCpnt->cmnd[1] = protect | ((rq->cmd_flags & REQ_FUA) ? 0x8 : 0);
700		SCpnt->cmnd[2] = sizeof(block) > 4 ? (unsigned char) (block >> 56) & 0xff : 0;
701		SCpnt->cmnd[3] = sizeof(block) > 4 ? (unsigned char) (block >> 48) & 0xff : 0;
702		SCpnt->cmnd[4] = sizeof(block) > 4 ? (unsigned char) (block >> 40) & 0xff : 0;
703		SCpnt->cmnd[5] = sizeof(block) > 4 ? (unsigned char) (block >> 32) & 0xff : 0;
704		SCpnt->cmnd[6] = (unsigned char) (block >> 24) & 0xff;
705		SCpnt->cmnd[7] = (unsigned char) (block >> 16) & 0xff;
706		SCpnt->cmnd[8] = (unsigned char) (block >> 8) & 0xff;
707		SCpnt->cmnd[9] = (unsigned char) block & 0xff;
708		SCpnt->cmnd[10] = (unsigned char) (this_count >> 24) & 0xff;
709		SCpnt->cmnd[11] = (unsigned char) (this_count >> 16) & 0xff;
710		SCpnt->cmnd[12] = (unsigned char) (this_count >> 8) & 0xff;
711		SCpnt->cmnd[13] = (unsigned char) this_count & 0xff;
712		SCpnt->cmnd[14] = SCpnt->cmnd[15] = 0;
713	} else if ((this_count > 0xff) || (block > 0x1fffff) ||
714		   scsi_device_protection(SCpnt->device) ||
715		   SCpnt->device->use_10_for_rw) {
716		if (this_count > 0xffff)
717			this_count = 0xffff;
718
719		SCpnt->cmnd[0] += READ_10 - READ_6;
720		SCpnt->cmnd[1] = protect | ((rq->cmd_flags & REQ_FUA) ? 0x8 : 0);
721		SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff;
722		SCpnt->cmnd[3] = (unsigned char) (block >> 16) & 0xff;
723		SCpnt->cmnd[4] = (unsigned char) (block >> 8) & 0xff;
724		SCpnt->cmnd[5] = (unsigned char) block & 0xff;
725		SCpnt->cmnd[6] = SCpnt->cmnd[9] = 0;
726		SCpnt->cmnd[7] = (unsigned char) (this_count >> 8) & 0xff;
727		SCpnt->cmnd[8] = (unsigned char) this_count & 0xff;
728	} else {
729		if (unlikely(rq->cmd_flags & REQ_FUA)) {
730			/*
731			 * This happens only if this drive failed
732			 * 10byte rw command with ILLEGAL_REQUEST
733			 * during operation and thus turned off
734			 * use_10_for_rw.
735			 */
736			scmd_printk(KERN_ERR, SCpnt,
737				    "FUA write on READ/WRITE(6) drive\n");
738			goto out;
739		}
740
741		SCpnt->cmnd[1] |= (unsigned char) ((block >> 16) & 0x1f);
742		SCpnt->cmnd[2] = (unsigned char) ((block >> 8) & 0xff);
743		SCpnt->cmnd[3] = (unsigned char) block & 0xff;
744		SCpnt->cmnd[4] = (unsigned char) this_count;
745		SCpnt->cmnd[5] = 0;
746	}
747	SCpnt->sdb.length = this_count * sdp->sector_size;
748
749	/* If DIF or DIX is enabled, tell HBA how to handle request */
750	if (host_dif || scsi_prot_sg_count(SCpnt))
751		sd_prot_op(SCpnt, host_dif);
752
753	/*
754	 * We shouldn't disconnect in the middle of a sector, so with a dumb
755	 * host adapter, it's safe to assume that we can at least transfer
756	 * this many bytes between each connect / disconnect.
757	 */
758	SCpnt->transfersize = sdp->sector_size;
759	SCpnt->underflow = this_count << 9;
760	SCpnt->allowed = SD_MAX_RETRIES;
761
762	/*
763	 * This indicates that the command is ready from our end to be
764	 * queued.
765	 */
766	ret = BLKPREP_OK;
767 out:
768	return scsi_prep_return(q, rq, ret);
769}
770
771/**
772 *	sd_open - open a scsi disk device
773 *	@inode: only i_rdev member may be used
774 *	@filp: only f_mode and f_flags may be used
775 *
776 *	Returns 0 if successful. Returns a negated errno value in case
777 *	of error.
778 *
779 *	Note: This can be called from a user context (e.g. fsck(1) )
780 *	or from within the kernel (e.g. as a result of a mount(1) ).
781 *	In the latter case @inode and @filp carry an abridged amount
782 *	of information as noted above.
783 *
784 *	Locking: called with bdev->bd_mutex held.
785 **/
786static int sd_open(struct block_device *bdev, fmode_t mode)
787{
788	struct scsi_disk *sdkp = scsi_disk_get(bdev->bd_disk);
789	struct scsi_device *sdev;
790	int retval;
791
792	if (!sdkp)
793		return -ENXIO;
794
795	SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_open\n"));
796
797	sdev = sdkp->device;
798
799	retval = scsi_autopm_get_device(sdev);
800	if (retval)
801		goto error_autopm;
802
803	/*
804	 * If the device is in error recovery, wait until it is done.
805	 * If the device is offline, then disallow any access to it.
806	 */
807	retval = -ENXIO;
808	if (!scsi_block_when_processing_errors(sdev))
809		goto error_out;
810
811	if (sdev->removable || sdkp->write_prot)
812		check_disk_change(bdev);
813
814	/*
815	 * If the drive is empty, just let the open fail.
816	 */
817	retval = -ENOMEDIUM;
818	if (sdev->removable && !sdkp->media_present && !(mode & FMODE_NDELAY))
819		goto error_out;
820
821	/*
822	 * If the device has the write protect tab set, have the open fail
823	 * if the user expects to be able to write to the thing.
824	 */
825	retval = -EROFS;
826	if (sdkp->write_prot && (mode & FMODE_WRITE))
827		goto error_out;
828
829	/*
830	 * It is possible that the disk changing stuff resulted in
831	 * the device being taken offline.  If this is the case,
832	 * report this to the user, and don't pretend that the
833	 * open actually succeeded.
834	 */
835	retval = -ENXIO;
836	if (!scsi_device_online(sdev))
837		goto error_out;
838
839	if ((atomic_inc_return(&sdkp->openers) == 1) && sdev->removable) {
840		if (scsi_block_when_processing_errors(sdev))
841			scsi_set_medium_removal(sdev, SCSI_REMOVAL_PREVENT);
842	}
843
844	return 0;
845
846error_out:
847	scsi_autopm_put_device(sdev);
848error_autopm:
849	scsi_disk_put(sdkp);
850	return retval;
851}
852
853/**
854 *	sd_release - invoked when the (last) close(2) is called on this
855 *	scsi disk.
856 *	@inode: only i_rdev member may be used
857 *	@filp: only f_mode and f_flags may be used
858 *
859 *	Returns 0.
860 *
861 *	Note: may block (uninterruptible) if error recovery is underway
862 *	on this disk.
863 *
864 *	Locking: called with bdev->bd_mutex held.
865 **/
866static int sd_release(struct gendisk *disk, fmode_t mode)
867{
868	struct scsi_disk *sdkp = scsi_disk(disk);
869	struct scsi_device *sdev = sdkp->device;
870
871	SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_release\n"));
872
873	if (atomic_dec_return(&sdkp->openers) == 0 && sdev->removable) {
874		if (scsi_block_when_processing_errors(sdev))
875			scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
876	}
877
878
879	scsi_autopm_put_device(sdev);
880	scsi_disk_put(sdkp);
881	return 0;
882}
883
884static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
885{
886	struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
887	struct scsi_device *sdp = sdkp->device;
888	struct Scsi_Host *host = sdp->host;
889	int diskinfo[4];
890
891	/* default to most commonly used values */
892        diskinfo[0] = 0x40;	/* 1 << 6 */
893       	diskinfo[1] = 0x20;	/* 1 << 5 */
894       	diskinfo[2] = sdkp->capacity >> 11;
895
896	/* override with calculated, extended default, or driver values */
897	if (host->hostt->bios_param)
898		host->hostt->bios_param(sdp, bdev, sdkp->capacity, diskinfo);
899	else
900		scsicam_bios_param(bdev, sdkp->capacity, diskinfo);
901
902	geo->heads = diskinfo[0];
903	geo->sectors = diskinfo[1];
904	geo->cylinders = diskinfo[2];
905	return 0;
906}
907
908/**
909 *	sd_ioctl - process an ioctl
910 *	@inode: only i_rdev/i_bdev members may be used
911 *	@filp: only f_mode and f_flags may be used
912 *	@cmd: ioctl command number
913 *	@arg: this is third argument given to ioctl(2) system call.
914 *	Often contains a pointer.
915 *
916 *	Returns 0 if successful (some ioctls return postive numbers on
917 *	success as well). Returns a negated errno value in case of error.
918 *
919 *	Note: most ioctls are forward onto the block subsystem or further
920 *	down in the scsi subsystem.
921 **/
922static int sd_ioctl(struct block_device *bdev, fmode_t mode,
923		    unsigned int cmd, unsigned long arg)
924{
925	struct gendisk *disk = bdev->bd_disk;
926	struct scsi_device *sdp = scsi_disk(disk)->device;
927	void __user *p = (void __user *)arg;
928	int error;
929
930	SCSI_LOG_IOCTL(1, printk("sd_ioctl: disk=%s, cmd=0x%x\n",
931						disk->disk_name, cmd));
932
933	/*
934	 * If we are in the middle of error recovery, don't let anyone
935	 * else try and use this device.  Also, if error recovery fails, it
936	 * may try and take the device offline, in which case all further
937	 * access to the device is prohibited.
938	 */
939	error = scsi_nonblockable_ioctl(sdp, cmd, p,
940					(mode & FMODE_NDELAY) != 0);
941	if (!scsi_block_when_processing_errors(sdp) || !error)
942		goto out;
943
944	/*
945	 * Send SCSI addressing ioctls directly to mid level, send other
946	 * ioctls to block level and then onto mid level if they can't be
947	 * resolved.
948	 */
949	switch (cmd) {
950		case SCSI_IOCTL_GET_IDLUN:
951		case SCSI_IOCTL_GET_BUS_NUMBER:
952			error = scsi_ioctl(sdp, cmd, p);
953			break;
954		default:
955			error = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, p);
956			if (error != -ENOTTY)
957				break;
958			error = scsi_ioctl(sdp, cmd, p);
959			break;
960	}
961out:
962	return error;
963}
964
965static void set_media_not_present(struct scsi_disk *sdkp)
966{
967	sdkp->media_present = 0;
968	sdkp->capacity = 0;
969	sdkp->device->changed = 1;
970}
971
972/**
973 *	sd_media_changed - check if our medium changed
974 *	@disk: kernel device descriptor
975 *
976 *	Returns 0 if not applicable or no change; 1 if change
977 *
978 *	Note: this function is invoked from the block subsystem.
979 **/
980static int sd_media_changed(struct gendisk *disk)
981{
982	struct scsi_disk *sdkp = scsi_disk(disk);
983	struct scsi_device *sdp = sdkp->device;
984	struct scsi_sense_hdr *sshdr = NULL;
985	int retval;
986
987	SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_media_changed\n"));
988
989	if (!sdp->removable)
990		return 0;
991
992	/*
993	 * If the device is offline, don't send any commands - just pretend as
994	 * if the command failed.  If the device ever comes back online, we
995	 * can deal with it then.  It is only because of unrecoverable errors
996	 * that we would ever take a device offline in the first place.
997	 */
998	if (!scsi_device_online(sdp)) {
999		set_media_not_present(sdkp);
1000		retval = 1;
1001		goto out;
1002	}
1003
1004	/*
1005	 * Using TEST_UNIT_READY enables differentiation between drive with
1006	 * no cartridge loaded - NOT READY, drive with changed cartridge -
1007	 * UNIT ATTENTION, or with same cartridge - GOOD STATUS.
1008	 *
1009	 * Drives that auto spin down. eg iomega jaz 1G, will be started
1010	 * by sd_spinup_disk() from sd_revalidate_disk(), which happens whenever
1011	 * sd_revalidate() is called.
1012	 */
1013	retval = -ENODEV;
1014
1015	if (scsi_block_when_processing_errors(sdp)) {
1016		sshdr  = kzalloc(sizeof(*sshdr), GFP_KERNEL);
1017		retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, SD_MAX_RETRIES,
1018					      sshdr);
1019	}
1020
1021	/*
1022	 * Unable to test, unit probably not ready.   This usually
1023	 * means there is no disc in the drive.  Mark as changed,
1024	 * and we will figure it out later once the drive is
1025	 * available again.
1026	 */
1027	if (retval || (scsi_sense_valid(sshdr) &&
1028		       /* 0x3a is medium not present */
1029		       sshdr->asc == 0x3a)) {
1030		set_media_not_present(sdkp);
1031		retval = 1;
1032		goto out;
1033	}
1034
1035	/*
1036	 * For removable scsi disk we have to recognise the presence
1037	 * of a disk in the drive. This is kept in the struct scsi_disk
1038	 * struct and tested at open !  Daniel Roche (dan@lectra.fr)
1039	 */
1040	sdkp->media_present = 1;
1041
1042	retval = sdp->changed;
1043	sdp->changed = 0;
1044out:
1045	if (retval != sdkp->previous_state)
1046		sdev_evt_send_simple(sdp, SDEV_EVT_MEDIA_CHANGE, GFP_KERNEL);
1047	sdkp->previous_state = retval;
1048	kfree(sshdr);
1049	return retval;
1050}
1051
1052static int sd_sync_cache(struct scsi_disk *sdkp)
1053{
1054	int retries, res;
1055	struct scsi_device *sdp = sdkp->device;
1056	struct scsi_sense_hdr sshdr;
1057
1058	if (!scsi_device_online(sdp))
1059		return -ENODEV;
1060
1061
1062	for (retries = 3; retries > 0; --retries) {
1063		unsigned char cmd[10] = { 0 };
1064
1065		cmd[0] = SYNCHRONIZE_CACHE;
1066		/*
1067		 * Leave the rest of the command zero to indicate
1068		 * flush everything.
1069		 */
1070		res = scsi_execute_req(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
1071				       SD_TIMEOUT, SD_MAX_RETRIES, NULL);
1072		if (res == 0)
1073			break;
1074	}
1075
1076	if (res) {
1077		sd_print_result(sdkp, res);
1078		if (driver_byte(res) & DRIVER_SENSE)
1079			sd_print_sense_hdr(sdkp, &sshdr);
1080	}
1081
1082	if (res)
1083		return -EIO;
1084	return 0;
1085}
1086
1087static void sd_rescan(struct device *dev)
1088{
1089	struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
1090
1091	if (sdkp) {
1092		revalidate_disk(sdkp->disk);
1093		scsi_disk_put(sdkp);
1094	}
1095}
1096
1097
1098#ifdef CONFIG_COMPAT
1099/*
1100 * This gets directly called from VFS. When the ioctl
1101 * is not recognized we go back to the other translation paths.
1102 */
1103static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
1104			   unsigned int cmd, unsigned long arg)
1105{
1106	struct scsi_device *sdev = scsi_disk(bdev->bd_disk)->device;
1107
1108	/*
1109	 * If we are in the middle of error recovery, don't let anyone
1110	 * else try and use this device.  Also, if error recovery fails, it
1111	 * may try and take the device offline, in which case all further
1112	 * access to the device is prohibited.
1113	 */
1114	if (!scsi_block_when_processing_errors(sdev))
1115		return -ENODEV;
1116
1117	if (sdev->host->hostt->compat_ioctl) {
1118		int ret;
1119
1120		ret = sdev->host->hostt->compat_ioctl(sdev, cmd, (void __user *)arg);
1121
1122		return ret;
1123	}
1124
1125	/*
1126	 * Let the static ioctl translation table take care of it.
1127	 */
1128	return -ENOIOCTLCMD;
1129}
1130#endif
1131
1132static const struct block_device_operations sd_fops = {
1133	.owner			= THIS_MODULE,
1134	.open			= sd_open,
1135	.release		= sd_release,
1136	.ioctl			= sd_ioctl,
1137	.getgeo			= sd_getgeo,
1138#ifdef CONFIG_COMPAT
1139	.compat_ioctl		= sd_compat_ioctl,
1140#endif
1141	.media_changed		= sd_media_changed,
1142	.revalidate_disk	= sd_revalidate_disk,
1143	.unlock_native_capacity	= sd_unlock_native_capacity,
1144};
1145
1146static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
1147{
1148	u64 start_lba = blk_rq_pos(scmd->request);
1149	u64 end_lba = blk_rq_pos(scmd->request) + (scsi_bufflen(scmd) / 512);
1150	u64 bad_lba;
1151	int info_valid;
1152	/*
1153	 * resid is optional but mostly filled in.  When it's unused,
1154	 * its value is zero, so we assume the whole buffer transferred
1155	 */
1156	unsigned int transferred = scsi_bufflen(scmd) - scsi_get_resid(scmd);
1157	unsigned int good_bytes;
1158
1159	if (scmd->request->cmd_type != REQ_TYPE_FS)
1160		return 0;
1161
1162	info_valid = scsi_get_sense_info_fld(scmd->sense_buffer,
1163					     SCSI_SENSE_BUFFERSIZE,
1164					     &bad_lba);
1165	if (!info_valid)
1166		return 0;
1167
1168	if (scsi_bufflen(scmd) <= scmd->device->sector_size)
1169		return 0;
1170
1171	if (scmd->device->sector_size < 512) {
1172		/* only legitimate sector_size here is 256 */
1173		start_lba <<= 1;
1174		end_lba <<= 1;
1175	} else {
1176		/* be careful ... don't want any overflows */
1177		u64 factor = scmd->device->sector_size / 512;
1178		do_div(start_lba, factor);
1179		do_div(end_lba, factor);
1180	}
1181
1182	/* The bad lba was reported incorrectly, we have no idea where
1183	 * the error is.
1184	 */
1185	if (bad_lba < start_lba  || bad_lba >= end_lba)
1186		return 0;
1187
1188	/* This computation should always be done in terms of
1189	 * the resolution of the device's medium.
1190	 */
1191	good_bytes = (bad_lba - start_lba) * scmd->device->sector_size;
1192	return min(good_bytes, transferred);
1193}
1194
1195/**
1196 *	sd_done - bottom half handler: called when the lower level
1197 *	driver has completed (successfully or otherwise) a scsi command.
1198 *	@SCpnt: mid-level's per command structure.
1199 *
1200 *	Note: potentially run from within an ISR. Must not block.
1201 **/
1202static int sd_done(struct scsi_cmnd *SCpnt)
1203{
1204	int result = SCpnt->result;
1205	unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt);
1206	struct scsi_sense_hdr sshdr;
1207	struct scsi_disk *sdkp = scsi_disk(SCpnt->request->rq_disk);
1208	int sense_valid = 0;
1209	int sense_deferred = 0;
1210
1211	if (SCpnt->request->cmd_flags & REQ_DISCARD) {
1212		if (!result)
1213			scsi_set_resid(SCpnt, 0);
1214		return good_bytes;
1215	}
1216
1217	if (result) {
1218		sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr);
1219		if (sense_valid)
1220			sense_deferred = scsi_sense_is_deferred(&sshdr);
1221	}
1222#ifdef CONFIG_SCSI_LOGGING
1223	SCSI_LOG_HLCOMPLETE(1, scsi_print_result(SCpnt));
1224	if (sense_valid) {
1225		SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, SCpnt,
1226						   "sd_done: sb[respc,sk,asc,"
1227						   "ascq]=%x,%x,%x,%x\n",
1228						   sshdr.response_code,
1229						   sshdr.sense_key, sshdr.asc,
1230						   sshdr.ascq));
1231	}
1232#endif
1233	if (driver_byte(result) != DRIVER_SENSE &&
1234	    (!sense_valid || sense_deferred))
1235		goto out;
1236
1237	switch (sshdr.sense_key) {
1238	case HARDWARE_ERROR:
1239	case MEDIUM_ERROR:
1240		good_bytes = sd_completed_bytes(SCpnt);
1241		break;
1242	case RECOVERED_ERROR:
1243		good_bytes = scsi_bufflen(SCpnt);
1244		break;
1245	case NO_SENSE:
1246		/* This indicates a false check condition, so ignore it.  An
1247		 * unknown amount of data was transferred so treat it as an
1248		 * error.
1249		 */
1250		scsi_print_sense("sd", SCpnt);
1251		SCpnt->result = 0;
1252		memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1253		break;
1254	case ABORTED_COMMAND: /* DIF: Target detected corruption */
1255	case ILLEGAL_REQUEST: /* DIX: Host detected corruption */
1256		if (sshdr.asc == 0x10)
1257			good_bytes = sd_completed_bytes(SCpnt);
1258		break;
1259	default:
1260		break;
1261	}
1262 out:
1263	if (rq_data_dir(SCpnt->request) == READ && scsi_prot_sg_count(SCpnt))
1264		sd_dif_complete(SCpnt, good_bytes);
1265
1266	if (scsi_host_dif_capable(sdkp->device->host, sdkp->protection_type)
1267	    == SD_DIF_TYPE2_PROTECTION && SCpnt->cmnd != SCpnt->request->cmd) {
1268
1269		/* We have to print a failed command here as the
1270		 * extended CDB gets freed before scsi_io_completion()
1271		 * is called.
1272		 */
1273		if (result)
1274			scsi_print_command(SCpnt);
1275
1276		mempool_free(SCpnt->cmnd, sd_cdb_pool);
1277		SCpnt->cmnd = NULL;
1278		SCpnt->cmd_len = 0;
1279	}
1280
1281	return good_bytes;
1282}
1283
1284static int media_not_present(struct scsi_disk *sdkp,
1285			     struct scsi_sense_hdr *sshdr)
1286{
1287
1288	if (!scsi_sense_valid(sshdr))
1289		return 0;
1290	/* not invoked for commands that could return deferred errors */
1291	if (sshdr->sense_key != NOT_READY &&
1292	    sshdr->sense_key != UNIT_ATTENTION)
1293		return 0;
1294	if (sshdr->asc != 0x3A) /* medium not present */
1295		return 0;
1296
1297	set_media_not_present(sdkp);
1298	return 1;
1299}
1300
1301/*
1302 * spinup disk - called only in sd_revalidate_disk()
1303 */
1304static void
1305sd_spinup_disk(struct scsi_disk *sdkp)
1306{
1307	unsigned char cmd[10];
1308	unsigned long spintime_expire = 0;
1309	int retries, spintime;
1310	unsigned int the_result;
1311	struct scsi_sense_hdr sshdr;
1312	int sense_valid = 0;
1313
1314	spintime = 0;
1315
1316	/* Spin up drives, as required.  Only do this at boot time */
1317	/* Spinup needs to be done for module loads too. */
1318	do {
1319		retries = 0;
1320
1321		do {
1322			cmd[0] = TEST_UNIT_READY;
1323			memset((void *) &cmd[1], 0, 9);
1324
1325			the_result = scsi_execute_req(sdkp->device, cmd,
1326						      DMA_NONE, NULL, 0,
1327						      &sshdr, SD_TIMEOUT,
1328						      SD_MAX_RETRIES, NULL);
1329
1330			/*
1331			 * If the drive has indicated to us that it
1332			 * doesn't have any media in it, don't bother
1333			 * with any more polling.
1334			 */
1335			if (media_not_present(sdkp, &sshdr))
1336				return;
1337
1338			if (the_result)
1339				sense_valid = scsi_sense_valid(&sshdr);
1340			retries++;
1341		} while (retries < 3 &&
1342			 (!scsi_status_is_good(the_result) ||
1343			  ((driver_byte(the_result) & DRIVER_SENSE) &&
1344			  sense_valid && sshdr.sense_key == UNIT_ATTENTION)));
1345
1346		if ((driver_byte(the_result) & DRIVER_SENSE) == 0) {
1347			/* no sense, TUR either succeeded or failed
1348			 * with a status error */
1349			if(!spintime && !scsi_status_is_good(the_result)) {
1350				sd_printk(KERN_NOTICE, sdkp, "Unit Not Ready\n");
1351				sd_print_result(sdkp, the_result);
1352			}
1353			break;
1354		}
1355
1356		/*
1357		 * The device does not want the automatic start to be issued.
1358		 */
1359		if (sdkp->device->no_start_on_add)
1360			break;
1361
1362		if (sense_valid && sshdr.sense_key == NOT_READY) {
1363			if (sshdr.asc == 4 && sshdr.ascq == 3)
1364				break;	/* manual intervention required */
1365			if (sshdr.asc == 4 && sshdr.ascq == 0xb)
1366				break;	/* standby */
1367			if (sshdr.asc == 4 && sshdr.ascq == 0xc)
1368				break;	/* unavailable */
1369			/*
1370			 * Issue command to spin up drive when not ready
1371			 */
1372			if (!spintime) {
1373				sd_printk(KERN_NOTICE, sdkp, "Spinning up disk...");
1374				cmd[0] = START_STOP;
1375				cmd[1] = 1;	/* Return immediately */
1376				memset((void *) &cmd[2], 0, 8);
1377				cmd[4] = 1;	/* Start spin cycle */
1378				if (sdkp->device->start_stop_pwr_cond)
1379					cmd[4] |= 1 << 4;
1380				scsi_execute_req(sdkp->device, cmd, DMA_NONE,
1381						 NULL, 0, &sshdr,
1382						 SD_TIMEOUT, SD_MAX_RETRIES,
1383						 NULL);
1384				spintime_expire = jiffies + 100 * HZ;
1385				spintime = 1;
1386			}
1387			/* Wait 1 second for next try */
1388			msleep(1000);
1389			printk(".");
1390
1391		/*
1392		 * Wait for USB flash devices with slow firmware.
1393		 * Yes, this sense key/ASC combination shouldn't
1394		 * occur here.  It's characteristic of these devices.
1395		 */
1396		} else if (sense_valid &&
1397				sshdr.sense_key == UNIT_ATTENTION &&
1398				sshdr.asc == 0x28) {
1399			if (!spintime) {
1400				spintime_expire = jiffies + 5 * HZ;
1401				spintime = 1;
1402			}
1403			/* Wait 1 second for next try */
1404			msleep(1000);
1405		} else {
1406			/* we don't understand the sense code, so it's
1407			 * probably pointless to loop */
1408			if(!spintime) {
1409				sd_printk(KERN_NOTICE, sdkp, "Unit Not Ready\n");
1410				sd_print_sense_hdr(sdkp, &sshdr);
1411			}
1412			break;
1413		}
1414
1415	} while (spintime && time_before_eq(jiffies, spintime_expire));
1416
1417	if (spintime) {
1418		if (scsi_status_is_good(the_result))
1419			printk("ready\n");
1420		else
1421			printk("not responding...\n");
1422	}
1423}
1424
1425
1426/*
1427 * Determine whether disk supports Data Integrity Field.
1428 */
1429static void sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer)
1430{
1431	struct scsi_device *sdp = sdkp->device;
1432	u8 type;
1433
1434	if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0)
1435		return;
1436
1437	type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */
1438
1439	if (type == sdkp->protection_type || !sdkp->first_scan)
1440		return;
1441
1442	sdkp->protection_type = type;
1443
1444	if (type > SD_DIF_TYPE3_PROTECTION) {
1445		sd_printk(KERN_ERR, sdkp, "formatted with unsupported "	\
1446			  "protection type %u. Disabling disk!\n", type);
1447		sdkp->capacity = 0;
1448		return;
1449	}
1450
1451	if (scsi_host_dif_capable(sdp->host, type))
1452		sd_printk(KERN_NOTICE, sdkp,
1453			  "Enabling DIF Type %u protection\n", type);
1454	else
1455		sd_printk(KERN_NOTICE, sdkp,
1456			  "Disabling DIF Type %u protection\n", type);
1457}
1458
1459static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
1460			struct scsi_sense_hdr *sshdr, int sense_valid,
1461			int the_result)
1462{
1463	sd_print_result(sdkp, the_result);
1464	if (driver_byte(the_result) & DRIVER_SENSE)
1465		sd_print_sense_hdr(sdkp, sshdr);
1466	else
1467		sd_printk(KERN_NOTICE, sdkp, "Sense not available.\n");
1468
1469	/*
1470	 * Set dirty bit for removable devices if not ready -
1471	 * sometimes drives will not report this properly.
1472	 */
1473	if (sdp->removable &&
1474	    sense_valid && sshdr->sense_key == NOT_READY)
1475		sdp->changed = 1;
1476
1477	/*
1478	 * We used to set media_present to 0 here to indicate no media
1479	 * in the drive, but some drives fail read capacity even with
1480	 * media present, so we can't do that.
1481	 */
1482	sdkp->capacity = 0; /* unknown mapped to zero - as usual */
1483}
1484
1485#define RC16_LEN 32
1486#if RC16_LEN > SD_BUF_SIZE
1487#error RC16_LEN must not be more than SD_BUF_SIZE
1488#endif
1489
1490#define READ_CAPACITY_RETRIES_ON_RESET	10
1491
1492static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
1493						unsigned char *buffer)
1494{
1495	unsigned char cmd[16];
1496	struct scsi_sense_hdr sshdr;
1497	int sense_valid = 0;
1498	int the_result;
1499	int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET;
1500	unsigned int alignment;
1501	unsigned long long lba;
1502	unsigned sector_size;
1503
1504	do {
1505		memset(cmd, 0, 16);
1506		cmd[0] = SERVICE_ACTION_IN;
1507		cmd[1] = SAI_READ_CAPACITY_16;
1508		cmd[13] = RC16_LEN;
1509		memset(buffer, 0, RC16_LEN);
1510
1511		the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
1512					buffer, RC16_LEN, &sshdr,
1513					SD_TIMEOUT, SD_MAX_RETRIES, NULL);
1514
1515		if (media_not_present(sdkp, &sshdr))
1516			return -ENODEV;
1517
1518		if (the_result) {
1519			sense_valid = scsi_sense_valid(&sshdr);
1520			if (sense_valid &&
1521			    sshdr.sense_key == ILLEGAL_REQUEST &&
1522			    (sshdr.asc == 0x20 || sshdr.asc == 0x24) &&
1523			    sshdr.ascq == 0x00)
1524				/* Invalid Command Operation Code or
1525				 * Invalid Field in CDB, just retry
1526				 * silently with RC10 */
1527				return -EINVAL;
1528			if (sense_valid &&
1529			    sshdr.sense_key == UNIT_ATTENTION &&
1530			    sshdr.asc == 0x29 && sshdr.ascq == 0x00)
1531				/* Device reset might occur several times,
1532				 * give it one more chance */
1533				if (--reset_retries > 0)
1534					continue;
1535		}
1536		retries--;
1537
1538	} while (the_result && retries);
1539
1540	if (the_result) {
1541		sd_printk(KERN_NOTICE, sdkp, "READ CAPACITY(16) failed\n");
1542		read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result);
1543		return -EINVAL;
1544	}
1545
1546	sector_size = get_unaligned_be32(&buffer[8]);
1547	lba = get_unaligned_be64(&buffer[0]);
1548
1549	sd_read_protection_type(sdkp, buffer);
1550
1551	if ((sizeof(sdkp->capacity) == 4) && (lba >= 0xffffffffULL)) {
1552		sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
1553			"kernel compiled with support for large block "
1554			"devices.\n");
1555		sdkp->capacity = 0;
1556		return -EOVERFLOW;
1557	}
1558
1559	/* Logical blocks per physical block exponent */
1560	sdkp->hw_sector_size = (1 << (buffer[13] & 0xf)) * sector_size;
1561
1562	/* Lowest aligned logical block */
1563	alignment = ((buffer[14] & 0x3f) << 8 | buffer[15]) * sector_size;
1564	blk_queue_alignment_offset(sdp->request_queue, alignment);
1565	if (alignment && sdkp->first_scan)
1566		sd_printk(KERN_NOTICE, sdkp,
1567			  "physical block alignment offset: %u\n", alignment);
1568
1569	if (buffer[14] & 0x80) { /* TPE */
1570		struct request_queue *q = sdp->request_queue;
1571
1572		sdkp->thin_provisioning = 1;
1573		q->limits.discard_granularity = sdkp->hw_sector_size;
1574		q->limits.max_discard_sectors = 0xffffffff;
1575
1576		if (buffer[14] & 0x40) /* TPRZ */
1577			q->limits.discard_zeroes_data = 1;
1578
1579		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
1580	}
1581
1582	sdkp->capacity = lba + 1;
1583	return sector_size;
1584}
1585
1586static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
1587						unsigned char *buffer)
1588{
1589	unsigned char cmd[16];
1590	struct scsi_sense_hdr sshdr;
1591	int sense_valid = 0;
1592	int the_result;
1593	int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET;
1594	sector_t lba;
1595	unsigned sector_size;
1596
1597	do {
1598		cmd[0] = READ_CAPACITY;
1599		memset(&cmd[1], 0, 9);
1600		memset(buffer, 0, 8);
1601
1602		the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
1603					buffer, 8, &sshdr,
1604					SD_TIMEOUT, SD_MAX_RETRIES, NULL);
1605
1606		if (media_not_present(sdkp, &sshdr))
1607			return -ENODEV;
1608
1609		if (the_result) {
1610			sense_valid = scsi_sense_valid(&sshdr);
1611			if (sense_valid &&
1612			    sshdr.sense_key == UNIT_ATTENTION &&
1613			    sshdr.asc == 0x29 && sshdr.ascq == 0x00)
1614				/* Device reset might occur several times,
1615				 * give it one more chance */
1616				if (--reset_retries > 0)
1617					continue;
1618		}
1619		retries--;
1620
1621	} while (the_result && retries);
1622
1623	if (the_result) {
1624		sd_printk(KERN_NOTICE, sdkp, "READ CAPACITY failed\n");
1625		read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result);
1626		return -EINVAL;
1627	}
1628
1629	sector_size = get_unaligned_be32(&buffer[4]);
1630	lba = get_unaligned_be32(&buffer[0]);
1631
1632	if ((sizeof(sdkp->capacity) == 4) && (lba == 0xffffffff)) {
1633		sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
1634			"kernel compiled with support for large block "
1635			"devices.\n");
1636		sdkp->capacity = 0;
1637		return -EOVERFLOW;
1638	}
1639
1640	sdkp->capacity = lba + 1;
1641	sdkp->hw_sector_size = sector_size;
1642	return sector_size;
1643}
1644
1645static int sd_try_rc16_first(struct scsi_device *sdp)
1646{
1647	if (sdp->host->max_cmd_len < 16)
1648		return 0;
1649	if (sdp->scsi_level > SCSI_SPC_2)
1650		return 1;
1651	if (scsi_device_protection(sdp))
1652		return 1;
1653	return 0;
1654}
1655
1656/*
1657 * read disk capacity
1658 */
1659static void
1660sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer)
1661{
1662	int sector_size;
1663	struct scsi_device *sdp = sdkp->device;
1664	sector_t old_capacity = sdkp->capacity;
1665
1666	if (sd_try_rc16_first(sdp)) {
1667		sector_size = read_capacity_16(sdkp, sdp, buffer);
1668		if (sector_size == -EOVERFLOW)
1669			goto got_data;
1670		if (sector_size == -ENODEV)
1671			return;
1672		if (sector_size < 0)
1673			sector_size = read_capacity_10(sdkp, sdp, buffer);
1674		if (sector_size < 0)
1675			return;
1676	} else {
1677		sector_size = read_capacity_10(sdkp, sdp, buffer);
1678		if (sector_size == -EOVERFLOW)
1679			goto got_data;
1680		if (sector_size < 0)
1681			return;
1682		if ((sizeof(sdkp->capacity) > 4) &&
1683		    (sdkp->capacity > 0xffffffffULL)) {
1684			int old_sector_size = sector_size;
1685			sd_printk(KERN_NOTICE, sdkp, "Very big device. "
1686					"Trying to use READ CAPACITY(16).\n");
1687			sector_size = read_capacity_16(sdkp, sdp, buffer);
1688			if (sector_size < 0) {
1689				sd_printk(KERN_NOTICE, sdkp,
1690					"Using 0xffffffff as device size\n");
1691				sdkp->capacity = 1 + (sector_t) 0xffffffff;
1692				sector_size = old_sector_size;
1693				goto got_data;
1694			}
1695		}
1696	}
1697
1698	/* Some devices are known to return the total number of blocks,
1699	 * not the highest block number.  Some devices have versions
1700	 * which do this and others which do not.  Some devices we might
1701	 * suspect of doing this but we don't know for certain.
1702	 *
1703	 * If we know the reported capacity is wrong, decrement it.  If
1704	 * we can only guess, then assume the number of blocks is even
1705	 * (usually true but not always) and err on the side of lowering
1706	 * the capacity.
1707	 */
1708	if (sdp->fix_capacity ||
1709	    (sdp->guess_capacity && (sdkp->capacity & 0x01))) {
1710		sd_printk(KERN_INFO, sdkp, "Adjusting the sector count "
1711				"from its reported value: %llu\n",
1712				(unsigned long long) sdkp->capacity);
1713		--sdkp->capacity;
1714	}
1715
1716got_data:
1717	if (sector_size == 0) {
1718		sector_size = 512;
1719		sd_printk(KERN_NOTICE, sdkp, "Sector size 0 reported, "
1720			  "assuming 512.\n");
1721	}
1722
1723	if (sector_size != 512 &&
1724	    sector_size != 1024 &&
1725	    sector_size != 2048 &&
1726	    sector_size != 4096 &&
1727	    sector_size != 256) {
1728		sd_printk(KERN_NOTICE, sdkp, "Unsupported sector size %d.\n",
1729			  sector_size);
1730		/*
1731		 * The user might want to re-format the drive with
1732		 * a supported sectorsize.  Once this happens, it
1733		 * would be relatively trivial to set the thing up.
1734		 * For this reason, we leave the thing in the table.
1735		 */
1736		sdkp->capacity = 0;
1737		/*
1738		 * set a bogus sector size so the normal read/write
1739		 * logic in the block layer will eventually refuse any
1740		 * request on this device without tripping over power
1741		 * of two sector size assumptions
1742		 */
1743		sector_size = 512;
1744	}
1745	blk_queue_logical_block_size(sdp->request_queue, sector_size);
1746
1747	{
1748		char cap_str_2[10], cap_str_10[10];
1749		u64 sz = (u64)sdkp->capacity << ilog2(sector_size);
1750
1751		string_get_size(sz, STRING_UNITS_2, cap_str_2,
1752				sizeof(cap_str_2));
1753		string_get_size(sz, STRING_UNITS_10, cap_str_10,
1754				sizeof(cap_str_10));
1755
1756		if (sdkp->first_scan || old_capacity != sdkp->capacity) {
1757			sd_printk(KERN_NOTICE, sdkp,
1758				  "%llu %d-byte logical blocks: (%s/%s)\n",
1759				  (unsigned long long)sdkp->capacity,
1760				  sector_size, cap_str_10, cap_str_2);
1761
1762			if (sdkp->hw_sector_size != sector_size)
1763				sd_printk(KERN_NOTICE, sdkp,
1764					  "%u-byte physical blocks\n",
1765					  sdkp->hw_sector_size);
1766		}
1767	}
1768
1769	sdp->use_16_for_rw = (sdkp->capacity > 0xffffffff);
1770
1771	/* Rescale capacity to 512-byte units */
1772	if (sector_size == 4096)
1773		sdkp->capacity <<= 3;
1774	else if (sector_size == 2048)
1775		sdkp->capacity <<= 2;
1776	else if (sector_size == 1024)
1777		sdkp->capacity <<= 1;
1778	else if (sector_size == 256)
1779		sdkp->capacity >>= 1;
1780
1781	blk_queue_physical_block_size(sdp->request_queue, sdkp->hw_sector_size);
1782	sdkp->device->sector_size = sector_size;
1783}
1784
1785/* called with buffer of length 512 */
1786static inline int
1787sd_do_mode_sense(struct scsi_device *sdp, int dbd, int modepage,
1788		 unsigned char *buffer, int len, struct scsi_mode_data *data,
1789		 struct scsi_sense_hdr *sshdr)
1790{
1791	return scsi_mode_sense(sdp, dbd, modepage, buffer, len,
1792			       SD_TIMEOUT, SD_MAX_RETRIES, data,
1793			       sshdr);
1794}
1795
1796/*
1797 * read write protect setting, if possible - called only in sd_revalidate_disk()
1798 * called with buffer of length SD_BUF_SIZE
1799 */
1800static void
1801sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
1802{
1803	int res;
1804	struct scsi_device *sdp = sdkp->device;
1805	struct scsi_mode_data data;
1806	int old_wp = sdkp->write_prot;
1807
1808	set_disk_ro(sdkp->disk, 0);
1809	if (sdp->skip_ms_page_3f) {
1810		sd_printk(KERN_NOTICE, sdkp, "Assuming Write Enabled\n");
1811		return;
1812	}
1813
1814	if (sdp->use_192_bytes_for_3f) {
1815		res = sd_do_mode_sense(sdp, 0, 0x3F, buffer, 192, &data, NULL);
1816	} else {
1817		/*
1818		 * First attempt: ask for all pages (0x3F), but only 4 bytes.
1819		 * We have to start carefully: some devices hang if we ask
1820		 * for more than is available.
1821		 */
1822		res = sd_do_mode_sense(sdp, 0, 0x3F, buffer, 4, &data, NULL);
1823
1824		/*
1825		 * Second attempt: ask for page 0 When only page 0 is
1826		 * implemented, a request for page 3F may return Sense Key
1827		 * 5: Illegal Request, Sense Code 24: Invalid field in
1828		 * CDB.
1829		 */
1830		if (!scsi_status_is_good(res))
1831			res = sd_do_mode_sense(sdp, 0, 0, buffer, 4, &data, NULL);
1832
1833		/*
1834		 * Third attempt: ask 255 bytes, as we did earlier.
1835		 */
1836		if (!scsi_status_is_good(res))
1837			res = sd_do_mode_sense(sdp, 0, 0x3F, buffer, 255,
1838					       &data, NULL);
1839	}
1840
1841	if (!scsi_status_is_good(res)) {
1842		sd_printk(KERN_WARNING, sdkp,
1843			  "Test WP failed, assume Write Enabled\n");
1844	} else {
1845		sdkp->write_prot = ((data.device_specific & 0x80) != 0);
1846		set_disk_ro(sdkp->disk, sdkp->write_prot);
1847		if (sdkp->first_scan || old_wp != sdkp->write_prot) {
1848			sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n",
1849				  sdkp->write_prot ? "on" : "off");
1850			sd_printk(KERN_DEBUG, sdkp,
1851				  "Mode Sense: %02x %02x %02x %02x\n",
1852				  buffer[0], buffer[1], buffer[2], buffer[3]);
1853		}
1854	}
1855}
1856
1857/*
1858 * sd_read_cache_type - called only from sd_revalidate_disk()
1859 * called with buffer of length SD_BUF_SIZE
1860 */
1861static void
1862sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
1863{
1864	int len = 0, res;
1865	struct scsi_device *sdp = sdkp->device;
1866
1867	int dbd;
1868	int modepage;
1869	struct scsi_mode_data data;
1870	struct scsi_sense_hdr sshdr;
1871	int old_wce = sdkp->WCE;
1872	int old_rcd = sdkp->RCD;
1873	int old_dpofua = sdkp->DPOFUA;
1874
1875	if (sdp->skip_ms_page_8)
1876		goto defaults;
1877
1878	if (sdp->type == TYPE_RBC) {
1879		modepage = 6;
1880		dbd = 8;
1881	} else {
1882		modepage = 8;
1883		dbd = 0;
1884	}
1885
1886	/* cautiously ask */
1887	res = sd_do_mode_sense(sdp, dbd, modepage, buffer, 4, &data, &sshdr);
1888
1889	if (!scsi_status_is_good(res))
1890		goto bad_sense;
1891
1892	if (!data.header_length) {
1893		modepage = 6;
1894		sd_printk(KERN_ERR, sdkp, "Missing header in MODE_SENSE response\n");
1895	}
1896
1897	/* that went OK, now ask for the proper length */
1898	len = data.length;
1899
1900	/*
1901	 * We're only interested in the first three bytes, actually.
1902	 * But the data cache page is defined for the first 20.
1903	 */
1904	if (len < 3)
1905		goto bad_sense;
1906	if (len > 20)
1907		len = 20;
1908
1909	/* Take headers and block descriptors into account */
1910	len += data.header_length + data.block_descriptor_length;
1911	if (len > SD_BUF_SIZE)
1912		goto bad_sense;
1913
1914	/* Get the data */
1915	res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len, &data, &sshdr);
1916
1917	if (scsi_status_is_good(res)) {
1918		int offset = data.header_length + data.block_descriptor_length;
1919
1920		if (offset >= SD_BUF_SIZE - 2) {
1921			sd_printk(KERN_ERR, sdkp, "Malformed MODE SENSE response\n");
1922			goto defaults;
1923		}
1924
1925		if ((buffer[offset] & 0x3f) != modepage) {
1926			sd_printk(KERN_ERR, sdkp, "Got wrong page\n");
1927			goto defaults;
1928		}
1929
1930		if (modepage == 8) {
1931			sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0);
1932			sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0);
1933		} else {
1934			sdkp->WCE = ((buffer[offset + 2] & 0x01) == 0);
1935			sdkp->RCD = 0;
1936		}
1937
1938		sdkp->DPOFUA = (data.device_specific & 0x10) != 0;
1939		if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) {
1940			sd_printk(KERN_NOTICE, sdkp,
1941				  "Uses READ/WRITE(6), disabling FUA\n");
1942			sdkp->DPOFUA = 0;
1943		}
1944
1945		if (sdkp->first_scan || old_wce != sdkp->WCE ||
1946		    old_rcd != sdkp->RCD || old_dpofua != sdkp->DPOFUA)
1947			sd_printk(KERN_NOTICE, sdkp,
1948				  "Write cache: %s, read cache: %s, %s\n",
1949				  sdkp->WCE ? "enabled" : "disabled",
1950				  sdkp->RCD ? "disabled" : "enabled",
1951				  sdkp->DPOFUA ? "supports DPO and FUA"
1952				  : "doesn't support DPO or FUA");
1953
1954		return;
1955	}
1956
1957bad_sense:
1958	if (scsi_sense_valid(&sshdr) &&
1959	    sshdr.sense_key == ILLEGAL_REQUEST &&
1960	    sshdr.asc == 0x24 && sshdr.ascq == 0x0)
1961		/* Invalid field in CDB */
1962		sd_printk(KERN_NOTICE, sdkp, "Cache data unavailable\n");
1963	else
1964		sd_printk(KERN_ERR, sdkp, "Asking for cache data failed\n");
1965
1966defaults:
1967	sd_printk(KERN_ERR, sdkp, "Assuming drive cache: write through\n");
1968	sdkp->WCE = 0;
1969	sdkp->RCD = 0;
1970	sdkp->DPOFUA = 0;
1971}
1972
1973/*
1974 * The ATO bit indicates whether the DIF application tag is available
1975 * for use by the operating system.
1976 */
1977static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
1978{
1979	int res, offset;
1980	struct scsi_device *sdp = sdkp->device;
1981	struct scsi_mode_data data;
1982	struct scsi_sense_hdr sshdr;
1983
1984	if (sdp->type != TYPE_DISK)
1985		return;
1986
1987	if (sdkp->protection_type == 0)
1988		return;
1989
1990	res = scsi_mode_sense(sdp, 1, 0x0a, buffer, 36, SD_TIMEOUT,
1991			      SD_MAX_RETRIES, &data, &sshdr);
1992
1993	if (!scsi_status_is_good(res) || !data.header_length ||
1994	    data.length < 6) {
1995		sd_printk(KERN_WARNING, sdkp,
1996			  "getting Control mode page failed, assume no ATO\n");
1997
1998		if (scsi_sense_valid(&sshdr))
1999			sd_print_sense_hdr(sdkp, &sshdr);
2000
2001		return;
2002	}
2003
2004	offset = data.header_length + data.block_descriptor_length;
2005
2006	if ((buffer[offset] & 0x3f) != 0x0a) {
2007		sd_printk(KERN_ERR, sdkp, "ATO Got wrong page\n");
2008		return;
2009	}
2010
2011	if ((buffer[offset + 5] & 0x80) == 0)
2012		return;
2013
2014	sdkp->ATO = 1;
2015
2016	return;
2017}
2018
2019/**
2020 * sd_read_block_limits - Query disk device for preferred I/O sizes.
2021 * @disk: disk to query
2022 */
2023static void sd_read_block_limits(struct scsi_disk *sdkp)
2024{
2025	struct request_queue *q = sdkp->disk->queue;
2026	unsigned int sector_sz = sdkp->device->sector_size;
2027	const int vpd_len = 64;
2028	unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL);
2029
2030	if (!buffer ||
2031	    /* Block Limits VPD */
2032	    scsi_get_vpd_page(sdkp->device, 0xb0, buffer, vpd_len))
2033		goto out;
2034
2035	blk_queue_io_min(sdkp->disk->queue,
2036			 get_unaligned_be16(&buffer[6]) * sector_sz);
2037	blk_queue_io_opt(sdkp->disk->queue,
2038			 get_unaligned_be32(&buffer[12]) * sector_sz);
2039
2040	/* Thin provisioning enabled and page length indicates TP support */
2041	if (sdkp->thin_provisioning && buffer[3] == 0x3c) {
2042		unsigned int lba_count, desc_count, granularity;
2043
2044		lba_count = get_unaligned_be32(&buffer[20]);
2045		desc_count = get_unaligned_be32(&buffer[24]);
2046
2047		if (lba_count) {
2048			q->limits.max_discard_sectors =
2049				lba_count * sector_sz >> 9;
2050
2051			if (desc_count)
2052				sdkp->unmap = 1;
2053		}
2054
2055		granularity = get_unaligned_be32(&buffer[28]);
2056
2057		if (granularity)
2058			q->limits.discard_granularity = granularity * sector_sz;
2059
2060		if (buffer[32] & 0x80)
2061			q->limits.discard_alignment =
2062				get_unaligned_be32(&buffer[32]) & ~(1 << 31);
2063	}
2064
2065 out:
2066	kfree(buffer);
2067}
2068
2069/**
2070 * sd_read_block_characteristics - Query block dev. characteristics
2071 * @disk: disk to query
2072 */
2073static void sd_read_block_characteristics(struct scsi_disk *sdkp)
2074{
2075	unsigned char *buffer;
2076	u16 rot;
2077	const int vpd_len = 64;
2078
2079	buffer = kmalloc(vpd_len, GFP_KERNEL);
2080
2081	if (!buffer ||
2082	    /* Block Device Characteristics VPD */
2083	    scsi_get_vpd_page(sdkp->device, 0xb1, buffer, vpd_len))
2084		goto out;
2085
2086	rot = get_unaligned_be16(&buffer[4]);
2087
2088	if (rot == 1)
2089		queue_flag_set_unlocked(QUEUE_FLAG_NONROT, sdkp->disk->queue);
2090
2091 out:
2092	kfree(buffer);
2093}
2094
2095static int sd_try_extended_inquiry(struct scsi_device *sdp)
2096{
2097	/*
2098	 * Although VPD inquiries can go to SCSI-2 type devices,
2099	 * some USB ones crash on receiving them, and the pages
2100	 * we currently ask for are for SPC-3 and beyond
2101	 */
2102	if (sdp->scsi_level > SCSI_SPC_2)
2103		return 1;
2104	return 0;
2105}
2106
2107/**
2108 *	sd_revalidate_disk - called the first time a new disk is seen,
2109 *	performs disk spin up, read_capacity, etc.
2110 *	@disk: struct gendisk we care about
2111 **/
2112static int sd_revalidate_disk(struct gendisk *disk)
2113{
2114	struct scsi_disk *sdkp = scsi_disk(disk);
2115	struct scsi_device *sdp = sdkp->device;
2116	unsigned char *buffer;
2117	unsigned ordered;
2118
2119	SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp,
2120				      "sd_revalidate_disk\n"));
2121
2122	/*
2123	 * If the device is offline, don't try and read capacity or any
2124	 * of the other niceties.
2125	 */
2126	if (!scsi_device_online(sdp))
2127		goto out;
2128
2129	buffer = kmalloc(SD_BUF_SIZE, GFP_KERNEL);
2130	if (!buffer) {
2131		sd_printk(KERN_WARNING, sdkp, "sd_revalidate_disk: Memory "
2132			  "allocation failure.\n");
2133		goto out;
2134	}
2135
2136	sd_spinup_disk(sdkp);
2137
2138	/*
2139	 * Without media there is no reason to ask; moreover, some devices
2140	 * react badly if we do.
2141	 */
2142	if (sdkp->media_present) {
2143		sd_read_capacity(sdkp, buffer);
2144
2145		if (sd_try_extended_inquiry(sdp)) {
2146			sd_read_block_limits(sdkp);
2147			sd_read_block_characteristics(sdkp);
2148		}
2149
2150		sd_read_write_protect_flag(sdkp, buffer);
2151		sd_read_cache_type(sdkp, buffer);
2152		sd_read_app_tag_own(sdkp, buffer);
2153	}
2154
2155	sdkp->first_scan = 0;
2156
2157	/*
2158	 * We now have all cache related info, determine how we deal
2159	 * with ordered requests.  Note that as the current SCSI
2160	 * dispatch function can alter request order, we cannot use
2161	 * QUEUE_ORDERED_TAG_* even when ordered tag is supported.
2162	 */
2163	if (sdkp->WCE)
2164		ordered = sdkp->DPOFUA
2165			? QUEUE_ORDERED_DRAIN_FUA : QUEUE_ORDERED_DRAIN_FLUSH;
2166	else
2167		ordered = QUEUE_ORDERED_DRAIN;
2168
2169	blk_queue_ordered(sdkp->disk->queue, ordered);
2170
2171	set_capacity(disk, sdkp->capacity);
2172	kfree(buffer);
2173
2174 out:
2175	return 0;
2176}
2177
2178/**
2179 *	sd_unlock_native_capacity - unlock native capacity
2180 *	@disk: struct gendisk to set capacity for
2181 *
2182 *	Block layer calls this function if it detects that partitions
2183 *	on @disk reach beyond the end of the device.  If the SCSI host
2184 *	implements ->unlock_native_capacity() method, it's invoked to
2185 *	give it a chance to adjust the device capacity.
2186 *
2187 *	CONTEXT:
2188 *	Defined by block layer.  Might sleep.
2189 */
2190static void sd_unlock_native_capacity(struct gendisk *disk)
2191{
2192	struct scsi_device *sdev = scsi_disk(disk)->device;
2193
2194	if (sdev->host->hostt->unlock_native_capacity)
2195		sdev->host->hostt->unlock_native_capacity(sdev);
2196}
2197
2198/**
2199 *	sd_format_disk_name - format disk name
2200 *	@prefix: name prefix - ie. "sd" for SCSI disks
2201 *	@index: index of the disk to format name for
2202 *	@buf: output buffer
2203 *	@buflen: length of the output buffer
2204 *
2205 *	SCSI disk names starts at sda.  The 26th device is sdz and the
2206 *	27th is sdaa.  The last one for two lettered suffix is sdzz
2207 *	which is followed by sdaaa.
2208 *
2209 *	This is basically 26 base counting with one extra 'nil' entry
2210 *	at the beginning from the second digit on and can be
2211 *	determined using similar method as 26 base conversion with the
2212 *	index shifted -1 after each digit is computed.
2213 *
2214 *	CONTEXT:
2215 *	Don't care.
2216 *
2217 *	RETURNS:
2218 *	0 on success, -errno on failure.
2219 */
2220static int sd_format_disk_name(char *prefix, int index, char *buf, int buflen)
2221{
2222	const int base = 'z' - 'a' + 1;
2223	char *begin = buf + strlen(prefix);
2224	char *end = buf + buflen;
2225	char *p;
2226	int unit;
2227
2228	p = end - 1;
2229	*p = '\0';
2230	unit = base;
2231	do {
2232		if (p == begin)
2233			return -EINVAL;
2234		*--p = 'a' + (index % unit);
2235		index = (index / unit) - 1;
2236	} while (index >= 0);
2237
2238	memmove(begin, p, end - p);
2239	memcpy(buf, prefix, strlen(prefix));
2240
2241	return 0;
2242}
2243
2244/*
2245 * The asynchronous part of sd_probe
2246 */
2247static void sd_probe_async(void *data, async_cookie_t cookie)
2248{
2249	struct scsi_disk *sdkp = data;
2250	struct scsi_device *sdp;
2251	struct gendisk *gd;
2252	u32 index;
2253	struct device *dev;
2254
2255	sdp = sdkp->device;
2256	gd = sdkp->disk;
2257	index = sdkp->index;
2258	dev = &sdp->sdev_gendev;
2259
2260	gd->major = sd_major((index & 0xf0) >> 4);
2261	gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
2262	gd->minors = SD_MINORS;
2263
2264	gd->fops = &sd_fops;
2265	gd->private_data = &sdkp->driver;
2266	gd->queue = sdkp->device->request_queue;
2267
2268	/* defaults, until the device tells us otherwise */
2269	sdp->sector_size = 512;
2270	sdkp->capacity = 0;
2271	sdkp->media_present = 1;
2272	sdkp->write_prot = 0;
2273	sdkp->WCE = 0;
2274	sdkp->RCD = 0;
2275	sdkp->ATO = 0;
2276	sdkp->first_scan = 1;
2277
2278	sd_revalidate_disk(gd);
2279
2280	blk_queue_prep_rq(sdp->request_queue, sd_prep_fn);
2281	blk_queue_unprep_rq(sdp->request_queue, sd_unprep_fn);
2282
2283	gd->driverfs_dev = &sdp->sdev_gendev;
2284	gd->flags = GENHD_FL_EXT_DEVT;
2285	if (sdp->removable)
2286		gd->flags |= GENHD_FL_REMOVABLE;
2287
2288	add_disk(gd);
2289	sd_dif_config_host(sdkp);
2290
2291	sd_revalidate_disk(gd);
2292
2293	sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
2294		  sdp->removable ? "removable " : "");
2295	scsi_autopm_put_device(sdp);
2296	put_device(&sdkp->dev);
2297}
2298
2299/**
2300 *	sd_probe - called during driver initialization and whenever a
2301 *	new scsi device is attached to the system. It is called once
2302 *	for each scsi device (not just disks) present.
2303 *	@dev: pointer to device object
2304 *
2305 *	Returns 0 if successful (or not interested in this scsi device
2306 *	(e.g. scanner)); 1 when there is an error.
2307 *
2308 *	Note: this function is invoked from the scsi mid-level.
2309 *	This function sets up the mapping between a given
2310 *	<host,channel,id,lun> (found in sdp) and new device name
2311 *	(e.g. /dev/sda). More precisely it is the block device major
2312 *	and minor number that is chosen here.
2313 *
2314 *	Assume sd_attach is not re-entrant (for time being)
2315 *	Also think about sd_attach() and sd_remove() running coincidentally.
2316 **/
2317static int sd_probe(struct device *dev)
2318{
2319	struct scsi_device *sdp = to_scsi_device(dev);
2320	struct scsi_disk *sdkp;
2321	struct gendisk *gd;
2322	int index;
2323	int error;
2324
2325	error = -ENODEV;
2326	if (sdp->type != TYPE_DISK && sdp->type != TYPE_MOD && sdp->type != TYPE_RBC)
2327		goto out;
2328
2329	SCSI_LOG_HLQUEUE(3, sdev_printk(KERN_INFO, sdp,
2330					"sd_attach\n"));
2331
2332	error = -ENOMEM;
2333	sdkp = kzalloc(sizeof(*sdkp), GFP_KERNEL);
2334	if (!sdkp)
2335		goto out;
2336
2337	gd = alloc_disk(SD_MINORS);
2338	if (!gd)
2339		goto out_free;
2340
2341	do {
2342		if (!ida_pre_get(&sd_index_ida, GFP_KERNEL))
2343			goto out_put;
2344
2345		spin_lock(&sd_index_lock);
2346		error = ida_get_new(&sd_index_ida, &index);
2347		spin_unlock(&sd_index_lock);
2348	} while (error == -EAGAIN);
2349
2350	if (error)
2351		goto out_put;
2352
2353	if (index >= SD_MAX_DISKS) {
2354		error = -ENODEV;
2355		sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name space exhausted.\n");
2356		goto out_free_index;
2357	}
2358
2359	error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN);
2360	if (error)
2361		goto out_free_index;
2362
2363	sdkp->device = sdp;
2364	sdkp->driver = &sd_template;
2365	sdkp->disk = gd;
2366	sdkp->index = index;
2367	atomic_set(&sdkp->openers, 0);
2368	sdkp->previous_state = 1;
2369
2370	if (!sdp->request_queue->rq_timeout) {
2371		if (sdp->type != TYPE_MOD)
2372			blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT);
2373		else
2374			blk_queue_rq_timeout(sdp->request_queue,
2375					     SD_MOD_TIMEOUT);
2376	}
2377
2378	device_initialize(&sdkp->dev);
2379	sdkp->dev.parent = dev;
2380	sdkp->dev.class = &sd_disk_class;
2381	dev_set_name(&sdkp->dev, dev_name(dev));
2382
2383	if (device_add(&sdkp->dev))
2384		goto out_free_index;
2385
2386	get_device(dev);
2387	dev_set_drvdata(dev, sdkp);
2388
2389	get_device(&sdkp->dev);	/* prevent release before async_schedule */
2390	async_schedule(sd_probe_async, sdkp);
2391
2392	return 0;
2393
2394 out_free_index:
2395	spin_lock(&sd_index_lock);
2396	ida_remove(&sd_index_ida, index);
2397	spin_unlock(&sd_index_lock);
2398 out_put:
2399	put_disk(gd);
2400 out_free:
2401	kfree(sdkp);
2402 out:
2403	return error;
2404}
2405
2406/**
2407 *	sd_remove - called whenever a scsi disk (previously recognized by
2408 *	sd_probe) is detached from the system. It is called (potentially
2409 *	multiple times) during sd module unload.
2410 *	@sdp: pointer to mid level scsi device object
2411 *
2412 *	Note: this function is invoked from the scsi mid-level.
2413 *	This function potentially frees up a device name (e.g. /dev/sdc)
2414 *	that could be re-used by a subsequent sd_probe().
2415 *	This function is not called when the built-in sd driver is "exit-ed".
2416 **/
2417static int sd_remove(struct device *dev)
2418{
2419	struct scsi_disk *sdkp;
2420
2421	sdkp = dev_get_drvdata(dev);
2422	scsi_autopm_get_device(sdkp->device);
2423
2424	async_synchronize_full();
2425	blk_queue_prep_rq(sdkp->device->request_queue, scsi_prep_fn);
2426	blk_queue_unprep_rq(sdkp->device->request_queue, NULL);
2427	device_del(&sdkp->dev);
2428	del_gendisk(sdkp->disk);
2429	sd_shutdown(dev);
2430
2431	mutex_lock(&sd_ref_mutex);
2432	dev_set_drvdata(dev, NULL);
2433	put_device(&sdkp->dev);
2434	mutex_unlock(&sd_ref_mutex);
2435
2436	return 0;
2437}
2438
2439/**
2440 *	scsi_disk_release - Called to free the scsi_disk structure
2441 *	@dev: pointer to embedded class device
2442 *
2443 *	sd_ref_mutex must be held entering this routine.  Because it is
2444 *	called on last put, you should always use the scsi_disk_get()
2445 *	scsi_disk_put() helpers which manipulate the semaphore directly
2446 *	and never do a direct put_device.
2447 **/
2448static void scsi_disk_release(struct device *dev)
2449{
2450	struct scsi_disk *sdkp = to_scsi_disk(dev);
2451	struct gendisk *disk = sdkp->disk;
2452
2453	spin_lock(&sd_index_lock);
2454	ida_remove(&sd_index_ida, sdkp->index);
2455	spin_unlock(&sd_index_lock);
2456
2457	disk->private_data = NULL;
2458	put_disk(disk);
2459	put_device(&sdkp->device->sdev_gendev);
2460
2461	kfree(sdkp);
2462}
2463
2464static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
2465{
2466	unsigned char cmd[6] = { START_STOP };	/* START_VALID */
2467	struct scsi_sense_hdr sshdr;
2468	struct scsi_device *sdp = sdkp->device;
2469	int res;
2470
2471	if (start)
2472		cmd[4] |= 1;	/* START */
2473
2474	if (sdp->start_stop_pwr_cond)
2475		cmd[4] |= start ? 1 << 4 : 3 << 4;	/* Active or Standby */
2476
2477	if (!scsi_device_online(sdp))
2478		return -ENODEV;
2479
2480	res = scsi_execute_req(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
2481			       SD_TIMEOUT, SD_MAX_RETRIES, NULL);
2482	if (res) {
2483		sd_printk(KERN_WARNING, sdkp, "START_STOP FAILED\n");
2484		sd_print_result(sdkp, res);
2485		if (driver_byte(res) & DRIVER_SENSE)
2486			sd_print_sense_hdr(sdkp, &sshdr);
2487	}
2488
2489	return res;
2490}
2491
2492/*
2493 * Send a SYNCHRONIZE CACHE instruction down to the device through
2494 * the normal SCSI command structure.  Wait for the command to
2495 * complete.
2496 */
2497static void sd_shutdown(struct device *dev)
2498{
2499	struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
2500
2501	if (!sdkp)
2502		return;         /* this can happen */
2503
2504	if (sdkp->WCE) {
2505		sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
2506		sd_sync_cache(sdkp);
2507	}
2508
2509	if (system_state != SYSTEM_RESTART && sdkp->device->manage_start_stop) {
2510		sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
2511		sd_start_stop_device(sdkp, 0);
2512	}
2513
2514	scsi_disk_put(sdkp);
2515}
2516
2517static int sd_suspend(struct device *dev, pm_message_t mesg)
2518{
2519	struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
2520	int ret = 0;
2521
2522	if (!sdkp)
2523		return 0;	/* this can happen */
2524
2525	if (sdkp->WCE) {
2526		sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
2527		ret = sd_sync_cache(sdkp);
2528		if (ret)
2529			goto done;
2530	}
2531
2532	if ((mesg.event & PM_EVENT_SLEEP) && sdkp->device->manage_start_stop) {
2533		sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
2534		ret = sd_start_stop_device(sdkp, 0);
2535	}
2536
2537done:
2538	scsi_disk_put(sdkp);
2539	return ret;
2540}
2541
2542static int sd_resume(struct device *dev)
2543{
2544	struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
2545	int ret = 0;
2546
2547	if (!sdkp->device->manage_start_stop)
2548		goto done;
2549
2550	sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
2551	ret = sd_start_stop_device(sdkp, 1);
2552
2553done:
2554	scsi_disk_put(sdkp);
2555	return ret;
2556}
2557
2558/**
2559 *	init_sd - entry point for this driver (both when built in or when
2560 *	a module).
2561 *
2562 *	Note: this function registers this driver with the scsi mid-level.
2563 **/
2564static int __init init_sd(void)
2565{
2566	int majors = 0, i, err;
2567
2568	SCSI_LOG_HLQUEUE(3, printk("init_sd: sd driver entry point\n"));
2569
2570	for (i = 0; i < SD_MAJORS; i++)
2571		if (register_blkdev(sd_major(i), "sd") == 0)
2572			majors++;
2573
2574	if (!majors)
2575		return -ENODEV;
2576
2577	err = class_register(&sd_disk_class);
2578	if (err)
2579		goto err_out;
2580
2581	err = scsi_register_driver(&sd_template.gendrv);
2582	if (err)
2583		goto err_out_class;
2584
2585	sd_cdb_cache = kmem_cache_create("sd_ext_cdb", SD_EXT_CDB_SIZE,
2586					 0, 0, NULL);
2587	if (!sd_cdb_cache) {
2588		printk(KERN_ERR "sd: can't init extended cdb cache\n");
2589		goto err_out_class;
2590	}
2591
2592	sd_cdb_pool = mempool_create_slab_pool(SD_MEMPOOL_SIZE, sd_cdb_cache);
2593	if (!sd_cdb_pool) {
2594		printk(KERN_ERR "sd: can't init extended cdb pool\n");
2595		goto err_out_cache;
2596	}
2597
2598	return 0;
2599
2600err_out_cache:
2601	kmem_cache_destroy(sd_cdb_cache);
2602
2603err_out_class:
2604	class_unregister(&sd_disk_class);
2605err_out:
2606	for (i = 0; i < SD_MAJORS; i++)
2607		unregister_blkdev(sd_major(i), "sd");
2608	return err;
2609}
2610
2611/**
2612 *	exit_sd - exit point for this driver (when it is a module).
2613 *
2614 *	Note: this function unregisters this driver from the scsi mid-level.
2615 **/
2616static void __exit exit_sd(void)
2617{
2618	int i;
2619
2620	SCSI_LOG_HLQUEUE(3, printk("exit_sd: exiting sd driver\n"));
2621
2622	mempool_destroy(sd_cdb_pool);
2623	kmem_cache_destroy(sd_cdb_cache);
2624
2625	scsi_unregister_driver(&sd_template.gendrv);
2626	class_unregister(&sd_disk_class);
2627
2628	for (i = 0; i < SD_MAJORS; i++)
2629		unregister_blkdev(sd_major(i), "sd");
2630}
2631
2632module_init(init_sd);
2633module_exit(exit_sd);
2634
2635static void sd_print_sense_hdr(struct scsi_disk *sdkp,
2636			       struct scsi_sense_hdr *sshdr)
2637{
2638	sd_printk(KERN_INFO, sdkp, " ");
2639	scsi_show_sense_hdr(sshdr);
2640	sd_printk(KERN_INFO, sdkp, " ");
2641	scsi_show_extd_sense(sshdr->asc, sshdr->ascq);
2642}
2643
2644static void sd_print_result(struct scsi_disk *sdkp, int result)
2645{
2646	sd_printk(KERN_INFO, sdkp, " ");
2647	scsi_show_result(result);
2648}
2649