1/*
2 *  scsi.c Copyright (C) 1992 Drew Eckhardt
3 *         Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
4 *         Copyright (C) 2002, 2003 Christoph Hellwig
5 *
6 *  generic mid-level SCSI driver
7 *      Initial versions: Drew Eckhardt
8 *      Subsequent revisions: Eric Youngdale
9 *
10 *  <drew@colorado.edu>
11 *
12 *  Bug correction thanks go to :
13 *      Rik Faith <faith@cs.unc.edu>
14 *      Tommy Thorn <tthorn>
15 *      Thomas Wuensche <tw@fgb1.fgb.mw.tu-muenchen.de>
16 *
17 *  Modified by Eric Youngdale eric@andante.org or ericy@gnu.ai.mit.edu to
18 *  add scatter-gather, multiple outstanding request, and other
19 *  enhancements.
20 *
21 *  Native multichannel, wide scsi, /proc/scsi and hot plugging
22 *  support added by Michael Neuffer <mike@i-connect.net>
23 *
24 *  Added request_module("scsi_hostadapter") for kerneld:
25 *  (Put an "alias scsi_hostadapter your_hostadapter" in /etc/modprobe.conf)
26 *  Bjorn Ekwall  <bj0rn@blox.se>
27 *  (changed to kmod)
28 *
29 *  Major improvements to the timeout, abort, and reset processing,
30 *  as well as performance modifications for large queue depths by
31 *  Leonard N. Zubkoff <lnz@dandelion.com>
32 *
33 *  Converted cli() code to spinlocks, Ingo Molnar
34 *
35 *  Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli
36 *
37 *  out_of_space hacks, D. Gilbert (dpg) 990608
38 */
39
40#include <linux/module.h>
41#include <linux/moduleparam.h>
42#include <linux/kernel.h>
43#include <linux/timer.h>
44#include <linux/string.h>
45#include <linux/slab.h>
46#include <linux/blkdev.h>
47#include <linux/delay.h>
48#include <linux/init.h>
49#include <linux/completion.h>
50#include <linux/unistd.h>
51#include <linux/spinlock.h>
52#include <linux/kmod.h>
53#include <linux/interrupt.h>
54#include <linux/notifier.h>
55#include <linux/cpu.h>
56#include <linux/mutex.h>
57
58#include <scsi/scsi.h>
59#include <scsi/scsi_cmnd.h>
60#include <scsi/scsi_dbg.h>
61#include <scsi/scsi_device.h>
62#include <scsi/scsi_eh.h>
63#include <scsi/scsi_host.h>
64#include <scsi/scsi_tcq.h>
65
66#include "scsi_priv.h"
67#include "scsi_logging.h"
68
69static void scsi_done(struct scsi_cmnd *cmd);
70
71/*
72 * Definitions and constants.
73 */
74
75#define MIN_RESET_DELAY (2*HZ)
76
77/* Do not call reset on error if we just did a reset within 15 sec. */
78#define MIN_RESET_PERIOD (15*HZ)
79
80/*
81 * Macro to determine the size of SCSI command. This macro takes vendor
82 * unique commands into account. SCSI commands in groups 6 and 7 are
83 * vendor unique and we will depend upon the command length being
84 * supplied correctly in cmd_len.
85 */
86#define CDB_SIZE(cmd)	(((((cmd)->cmnd[0] >> 5) & 7) < 6) ? \
87				COMMAND_SIZE((cmd)->cmnd[0]) : (cmd)->cmd_len)
88
89/*
90 * Note - the initial logging level can be set here to log events at boot time.
91 * After the system is up, you may enable logging via the /proc interface.
92 */
93unsigned int scsi_logging_level;
94#if defined(CONFIG_SCSI_LOGGING)
95EXPORT_SYMBOL(scsi_logging_level);
96#endif
97
98/* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
99 * You may not alter any existing entry (although adding new ones is
100 * encouraged once assigned by ANSI/INCITS T10
101 */
102static const char *const scsi_device_types[] = {
103	"Direct-Access    ",
104	"Sequential-Access",
105	"Printer          ",
106	"Processor        ",
107	"WORM             ",
108	"CD-ROM           ",
109	"Scanner          ",
110	"Optical Device   ",
111	"Medium Changer   ",
112	"Communications   ",
113	"ASC IT8          ",
114	"ASC IT8          ",
115	"RAID             ",
116	"Enclosure        ",
117	"Direct-Access-RBC",
118	"Optical card     ",
119	"Bridge controller",
120	"Object storage   ",
121	"Automation/Drive ",
122};
123
124const char * scsi_device_type(unsigned type)
125{
126	if (type == 0x1e)
127		return "Well-known LUN   ";
128	if (type == 0x1f)
129		return "No Device        ";
130	if (type >= ARRAY_SIZE(scsi_device_types))
131		return "Unknown          ";
132	return scsi_device_types[type];
133}
134
135EXPORT_SYMBOL(scsi_device_type);
136
137struct scsi_host_cmd_pool {
138	struct kmem_cache	*slab;
139	unsigned int	users;
140	char		*name;
141	unsigned int	slab_flags;
142	gfp_t		gfp_mask;
143};
144
145static struct scsi_host_cmd_pool scsi_cmd_pool = {
146	.name		= "scsi_cmd_cache",
147	.slab_flags	= SLAB_HWCACHE_ALIGN,
148};
149
150static struct scsi_host_cmd_pool scsi_cmd_dma_pool = {
151	.name		= "scsi_cmd_cache(DMA)",
152	.slab_flags	= SLAB_HWCACHE_ALIGN|SLAB_CACHE_DMA,
153	.gfp_mask	= __GFP_DMA,
154};
155
156static DEFINE_MUTEX(host_cmd_pool_mutex);
157
158struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask)
159{
160	struct scsi_cmnd *cmd;
161
162	cmd = kmem_cache_alloc(shost->cmd_pool->slab,
163			gfp_mask | shost->cmd_pool->gfp_mask);
164
165	if (unlikely(!cmd)) {
166		unsigned long flags;
167
168		spin_lock_irqsave(&shost->free_list_lock, flags);
169		if (likely(!list_empty(&shost->free_list))) {
170			cmd = list_entry(shost->free_list.next,
171					 struct scsi_cmnd, list);
172			list_del_init(&cmd->list);
173		}
174		spin_unlock_irqrestore(&shost->free_list_lock, flags);
175	}
176
177	return cmd;
178}
179EXPORT_SYMBOL_GPL(__scsi_get_command);
180
181/*
182 * Function:	scsi_get_command()
183 *
184 * Purpose:	Allocate and setup a scsi command block
185 *
186 * Arguments:	dev	- parent scsi device
187 *		gfp_mask- allocator flags
188 *
189 * Returns:	The allocated scsi command structure.
190 */
191struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
192{
193	struct scsi_cmnd *cmd;
194
195	/* Bail if we can't get a reference to the device */
196	if (!get_device(&dev->sdev_gendev))
197		return NULL;
198
199	cmd = __scsi_get_command(dev->host, gfp_mask);
200
201	if (likely(cmd != NULL)) {
202		unsigned long flags;
203
204		memset(cmd, 0, sizeof(*cmd));
205		cmd->device = dev;
206		init_timer(&cmd->eh_timeout);
207		INIT_LIST_HEAD(&cmd->list);
208		spin_lock_irqsave(&dev->list_lock, flags);
209		list_add_tail(&cmd->list, &dev->cmd_list);
210		spin_unlock_irqrestore(&dev->list_lock, flags);
211		cmd->jiffies_at_alloc = jiffies;
212	} else
213		put_device(&dev->sdev_gendev);
214
215	return cmd;
216}
217EXPORT_SYMBOL(scsi_get_command);
218
219void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd,
220			struct device *dev)
221{
222	unsigned long flags;
223
224	/* changing locks here, don't need to restore the irq state */
225	spin_lock_irqsave(&shost->free_list_lock, flags);
226	if (unlikely(list_empty(&shost->free_list))) {
227		list_add(&cmd->list, &shost->free_list);
228		cmd = NULL;
229	}
230	spin_unlock_irqrestore(&shost->free_list_lock, flags);
231
232	if (likely(cmd != NULL))
233		kmem_cache_free(shost->cmd_pool->slab, cmd);
234
235	put_device(dev);
236}
237EXPORT_SYMBOL(__scsi_put_command);
238
239/*
240 * Function:	scsi_put_command()
241 *
242 * Purpose:	Free a scsi command block
243 *
244 * Arguments:	cmd	- command block to free
245 *
246 * Returns:	Nothing.
247 *
248 * Notes:	The command must not belong to any lists.
249 */
250void scsi_put_command(struct scsi_cmnd *cmd)
251{
252	struct scsi_device *sdev = cmd->device;
253	unsigned long flags;
254
255	/* serious error if the command hasn't come from a device list */
256	spin_lock_irqsave(&cmd->device->list_lock, flags);
257	BUG_ON(list_empty(&cmd->list));
258	list_del_init(&cmd->list);
259	spin_unlock_irqrestore(&cmd->device->list_lock, flags);
260
261	__scsi_put_command(cmd->device->host, cmd, &sdev->sdev_gendev);
262}
263EXPORT_SYMBOL(scsi_put_command);
264
265/*
266 * Function:	scsi_setup_command_freelist()
267 *
268 * Purpose:	Setup the command freelist for a scsi host.
269 *
270 * Arguments:	shost	- host to allocate the freelist for.
271 *
272 * Returns:	Nothing.
273 */
274int scsi_setup_command_freelist(struct Scsi_Host *shost)
275{
276	struct scsi_host_cmd_pool *pool;
277	struct scsi_cmnd *cmd;
278
279	spin_lock_init(&shost->free_list_lock);
280	INIT_LIST_HEAD(&shost->free_list);
281
282	/*
283	 * Select a command slab for this host and create it if not
284	 * yet existant.
285	 */
286	mutex_lock(&host_cmd_pool_mutex);
287	pool = (shost->unchecked_isa_dma ? &scsi_cmd_dma_pool : &scsi_cmd_pool);
288	if (!pool->users) {
289		pool->slab = kmem_cache_create(pool->name,
290				sizeof(struct scsi_cmnd), 0,
291				pool->slab_flags, NULL, NULL);
292		if (!pool->slab)
293			goto fail;
294	}
295
296	pool->users++;
297	shost->cmd_pool = pool;
298	mutex_unlock(&host_cmd_pool_mutex);
299
300	/*
301	 * Get one backup command for this host.
302	 */
303	cmd = kmem_cache_alloc(shost->cmd_pool->slab,
304			GFP_KERNEL | shost->cmd_pool->gfp_mask);
305	if (!cmd)
306		goto fail2;
307	list_add(&cmd->list, &shost->free_list);
308	return 0;
309
310 fail2:
311	if (!--pool->users)
312		kmem_cache_destroy(pool->slab);
313	return -ENOMEM;
314 fail:
315	mutex_unlock(&host_cmd_pool_mutex);
316	return -ENOMEM;
317
318}
319
320/*
321 * Function:	scsi_destroy_command_freelist()
322 *
323 * Purpose:	Release the command freelist for a scsi host.
324 *
325 * Arguments:	shost	- host that's freelist is going to be destroyed
326 */
327void scsi_destroy_command_freelist(struct Scsi_Host *shost)
328{
329	while (!list_empty(&shost->free_list)) {
330		struct scsi_cmnd *cmd;
331
332		cmd = list_entry(shost->free_list.next, struct scsi_cmnd, list);
333		list_del_init(&cmd->list);
334		kmem_cache_free(shost->cmd_pool->slab, cmd);
335	}
336
337	mutex_lock(&host_cmd_pool_mutex);
338	if (!--shost->cmd_pool->users)
339		kmem_cache_destroy(shost->cmd_pool->slab);
340	mutex_unlock(&host_cmd_pool_mutex);
341}
342
343#ifdef CONFIG_SCSI_LOGGING
344void scsi_log_send(struct scsi_cmnd *cmd)
345{
346	unsigned int level;
347
348	/*
349	 * If ML QUEUE log level is greater than or equal to:
350	 *
351	 * 1: nothing (match completion)
352	 *
353	 * 2: log opcode + command of all commands
354	 *
355	 * 3: same as 2 plus dump cmd address
356	 *
357	 * 4: same as 3 plus dump extra junk
358	 */
359	if (unlikely(scsi_logging_level)) {
360		level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT,
361				       SCSI_LOG_MLQUEUE_BITS);
362		if (level > 1) {
363			scmd_printk(KERN_INFO, cmd, "Send: ");
364			if (level > 2)
365				printk("0x%p ", cmd);
366			printk("\n");
367			scsi_print_command(cmd);
368			if (level > 3) {
369				printk(KERN_INFO "buffer = 0x%p, bufflen = %d,"
370				       " done = 0x%p, queuecommand 0x%p\n",
371					cmd->request_buffer, cmd->request_bufflen,
372					cmd->done,
373					cmd->device->host->hostt->queuecommand);
374
375			}
376		}
377	}
378}
379
380void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
381{
382	unsigned int level;
383
384	/*
385	 * If ML COMPLETE log level is greater than or equal to:
386	 *
387	 * 1: log disposition, result, opcode + command, and conditionally
388	 * sense data for failures or non SUCCESS dispositions.
389	 *
390	 * 2: same as 1 but for all command completions.
391	 *
392	 * 3: same as 2 plus dump cmd address
393	 *
394	 * 4: same as 3 plus dump extra junk
395	 */
396	if (unlikely(scsi_logging_level)) {
397		level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
398				       SCSI_LOG_MLCOMPLETE_BITS);
399		if (((level > 0) && (cmd->result || disposition != SUCCESS)) ||
400		    (level > 1)) {
401			scmd_printk(KERN_INFO, cmd, "Done: ");
402			if (level > 2)
403				printk("0x%p ", cmd);
404			/*
405			 * Dump truncated values, so we usually fit within
406			 * 80 chars.
407			 */
408			switch (disposition) {
409			case SUCCESS:
410				printk("SUCCESS\n");
411				break;
412			case NEEDS_RETRY:
413				printk("RETRY\n");
414				break;
415			case ADD_TO_MLQUEUE:
416				printk("MLQUEUE\n");
417				break;
418			case FAILED:
419				printk("FAILED\n");
420				break;
421			case TIMEOUT_ERROR:
422				/*
423				 * If called via scsi_times_out.
424				 */
425				printk("TIMEOUT\n");
426				break;
427			default:
428				printk("UNKNOWN\n");
429			}
430			scsi_print_result(cmd);
431			scsi_print_command(cmd);
432			if (status_byte(cmd->result) & CHECK_CONDITION)
433				scsi_print_sense("", cmd);
434			if (level > 3)
435				scmd_printk(KERN_INFO, cmd,
436					    "scsi host busy %d failed %d\n",
437					    cmd->device->host->host_busy,
438					    cmd->device->host->host_failed);
439		}
440	}
441}
442#endif
443
444/*
445 * Assign a serial number and pid to the request for error recovery
446 * and debugging purposes.  Protected by the Host_Lock of host.
447 */
448static inline void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd *cmd)
449{
450	cmd->serial_number = host->cmd_serial_number++;
451	if (cmd->serial_number == 0)
452		cmd->serial_number = host->cmd_serial_number++;
453
454	cmd->pid = host->cmd_pid++;
455	if (cmd->pid == 0)
456		cmd->pid = host->cmd_pid++;
457}
458
459/*
460 * Function:    scsi_dispatch_command
461 *
462 * Purpose:     Dispatch a command to the low-level driver.
463 *
464 * Arguments:   cmd - command block we are dispatching.
465 *
466 * Notes:
467 */
468int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
469{
470	struct Scsi_Host *host = cmd->device->host;
471	unsigned long flags = 0;
472	unsigned long timeout;
473	int rtn = 0;
474
475	/* check if the device is still usable */
476	if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
477		/* in SDEV_DEL we error all commands. DID_NO_CONNECT
478		 * returns an immediate error upwards, and signals
479		 * that the device is no longer present */
480		cmd->result = DID_NO_CONNECT << 16;
481		atomic_inc(&cmd->device->iorequest_cnt);
482		__scsi_done(cmd);
483		/* return 0 (because the command has been processed) */
484		goto out;
485	}
486
487	/* Check to see if the scsi lld put this device into state SDEV_BLOCK. */
488	if (unlikely(cmd->device->sdev_state == SDEV_BLOCK)) {
489		/*
490		 * in SDEV_BLOCK, the command is just put back on the device
491		 * queue.  The suspend state has already blocked the queue so
492		 * future requests should not occur until the device
493		 * transitions out of the suspend state.
494		 */
495		scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
496
497		SCSI_LOG_MLQUEUE(3, printk("queuecommand : device blocked \n"));
498
499		/*
500		 * NOTE: rtn is still zero here because we don't need the
501		 * queue to be plugged on return (it's already stopped)
502		 */
503		goto out;
504	}
505
506	/*
507	 * If SCSI-2 or lower, store the LUN value in cmnd.
508	 */
509	if (cmd->device->scsi_level <= SCSI_2 &&
510	    cmd->device->scsi_level != SCSI_UNKNOWN) {
511		cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) |
512			       (cmd->device->lun << 5 & 0xe0);
513	}
514
515	/*
516	 * We will wait MIN_RESET_DELAY clock ticks after the last reset so
517	 * we can avoid the drive not being ready.
518	 */
519	timeout = host->last_reset + MIN_RESET_DELAY;
520
521	if (host->resetting && time_before(jiffies, timeout)) {
522		int ticks_remaining = timeout - jiffies;
523		/*
524		 * NOTE: This may be executed from within an interrupt
525		 * handler!  This is bad, but for now, it'll do.  The irq
526		 * level of the interrupt handler has been masked out by the
527		 * platform dependent interrupt handling code already, so the
528		 * sti() here will not cause another call to the SCSI host's
529		 * interrupt handler (assuming there is one irq-level per
530		 * host).
531		 */
532		while (--ticks_remaining >= 0)
533			mdelay(1 + 999 / HZ);
534		host->resetting = 0;
535	}
536
537	/*
538	 * AK: unlikely race here: for some reason the timer could
539	 * expire before the serial number is set up below.
540	 */
541	scsi_add_timer(cmd, cmd->timeout_per_command, scsi_times_out);
542
543	scsi_log_send(cmd);
544
545	/*
546	 * We will use a queued command if possible, otherwise we will
547	 * emulate the queuing and calling of completion function ourselves.
548	 */
549	atomic_inc(&cmd->device->iorequest_cnt);
550
551	/*
552	 * Before we queue this command, check if the command
553	 * length exceeds what the host adapter can handle.
554	 */
555	if (CDB_SIZE(cmd) > cmd->device->host->max_cmd_len) {
556		SCSI_LOG_MLQUEUE(3,
557				printk("queuecommand : command too long.\n"));
558		cmd->result = (DID_ABORT << 16);
559
560		scsi_done(cmd);
561		goto out;
562	}
563
564	spin_lock_irqsave(host->host_lock, flags);
565	scsi_cmd_get_serial(host, cmd);
566
567	if (unlikely(host->shost_state == SHOST_DEL)) {
568		cmd->result = (DID_NO_CONNECT << 16);
569		scsi_done(cmd);
570	} else {
571		rtn = host->hostt->queuecommand(cmd, scsi_done);
572	}
573	spin_unlock_irqrestore(host->host_lock, flags);
574	if (rtn) {
575		if (scsi_delete_timer(cmd)) {
576			atomic_inc(&cmd->device->iodone_cnt);
577			scsi_queue_insert(cmd,
578					  (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ?
579					  rtn : SCSI_MLQUEUE_HOST_BUSY);
580		}
581		SCSI_LOG_MLQUEUE(3,
582		    printk("queuecommand : request rejected\n"));
583	}
584
585 out:
586	SCSI_LOG_MLQUEUE(3, printk("leaving scsi_dispatch_cmnd()\n"));
587	return rtn;
588}
589
590/**
591 * scsi_req_abort_cmd -- Request command recovery for the specified command
592 * cmd: pointer to the SCSI command of interest
593 *
594 * This function requests that SCSI Core start recovery for the
595 * command by deleting the timer and adding the command to the eh
596 * queue.  It can be called by either LLDDs or SCSI Core.  LLDDs who
597 * implement their own error recovery MAY ignore the timeout event if
598 * they generated scsi_req_abort_cmd.
599 */
600void scsi_req_abort_cmd(struct scsi_cmnd *cmd)
601{
602	if (!scsi_delete_timer(cmd))
603		return;
604	scsi_times_out(cmd);
605}
606EXPORT_SYMBOL(scsi_req_abort_cmd);
607
608/**
609 * scsi_done - Enqueue the finished SCSI command into the done queue.
610 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives
611 * ownership back to SCSI Core -- i.e. the LLDD has finished with it.
612 *
613 * This function is the mid-level's (SCSI Core) interrupt routine, which
614 * regains ownership of the SCSI command (de facto) from a LLDD, and enqueues
615 * the command to the done queue for further processing.
616 *
617 * This is the producer of the done queue who enqueues at the tail.
618 *
619 * This function is interrupt context safe.
620 */
621static void scsi_done(struct scsi_cmnd *cmd)
622{
623	/*
624	 * We don't have to worry about this one timing out any more.
625	 * If we are unable to remove the timer, then the command
626	 * has already timed out.  In which case, we have no choice but to
627	 * let the timeout function run, as we have no idea where in fact
628	 * that function could really be.  It might be on another processor,
629	 * etc, etc.
630	 */
631	if (!scsi_delete_timer(cmd))
632		return;
633	__scsi_done(cmd);
634}
635
636/* Private entry to scsi_done() to complete a command when the timer
637 * isn't running --- used by scsi_times_out */
638void __scsi_done(struct scsi_cmnd *cmd)
639{
640	struct request *rq = cmd->request;
641
642	/*
643	 * Set the serial numbers back to zero
644	 */
645	cmd->serial_number = 0;
646
647	atomic_inc(&cmd->device->iodone_cnt);
648	if (cmd->result)
649		atomic_inc(&cmd->device->ioerr_cnt);
650
651	BUG_ON(!rq);
652
653	/*
654	 * The uptodate/nbytes values don't matter, as we allow partial
655	 * completes and thus will check this in the softirq callback
656	 */
657	rq->completion_data = cmd;
658	blk_complete_request(rq);
659}
660
661/*
662 * Function:    scsi_finish_command
663 *
664 * Purpose:     Pass command off to upper layer for finishing of I/O
665 *              request, waking processes that are waiting on results,
666 *              etc.
667 */
668void scsi_finish_command(struct scsi_cmnd *cmd)
669{
670	struct scsi_device *sdev = cmd->device;
671	struct Scsi_Host *shost = sdev->host;
672
673	scsi_device_unbusy(sdev);
674
675        shost->host_blocked = 0;
676        sdev->device_blocked = 0;
677
678	/*
679	 * If we have valid sense information, then some kind of recovery
680	 * must have taken place.  Make a note of this.
681	 */
682	if (SCSI_SENSE_VALID(cmd))
683		cmd->result |= (DRIVER_SENSE << 24);
684
685	SCSI_LOG_MLCOMPLETE(4, sdev_printk(KERN_INFO, sdev,
686				"Notifying upper driver of completion "
687				"(result %x)\n", cmd->result));
688
689	cmd->done(cmd);
690}
691EXPORT_SYMBOL(scsi_finish_command);
692
693/*
694 * Function:	scsi_adjust_queue_depth()
695 *
696 * Purpose:	Allow low level drivers to tell us to change the queue depth
697 * 		on a specific SCSI device
698 *
699 * Arguments:	sdev	- SCSI Device in question
700 * 		tagged	- Do we use tagged queueing (non-0) or do we treat
701 * 			  this device as an untagged device (0)
702 * 		tags	- Number of tags allowed if tagged queueing enabled,
703 * 			  or number of commands the low level driver can
704 * 			  queue up in non-tagged mode (as per cmd_per_lun).
705 *
706 * Returns:	Nothing
707 *
708 * Lock Status:	None held on entry
709 *
710 * Notes:	Low level drivers may call this at any time and we will do
711 * 		the right thing depending on whether or not the device is
712 * 		currently active and whether or not it even has the
713 * 		command blocks built yet.
714 */
715void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags)
716{
717	unsigned long flags;
718
719	/*
720	 * refuse to set tagged depth to an unworkable size
721	 */
722	if (tags <= 0)
723		return;
724
725	spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
726
727	/* Check to see if the queue is managed by the block layer
728	 * if it is, and we fail to adjust the depth, exit */
729	if (blk_queue_tagged(sdev->request_queue) &&
730	    blk_queue_resize_tags(sdev->request_queue, tags) != 0)
731		goto out;
732
733	sdev->queue_depth = tags;
734	switch (tagged) {
735		case MSG_ORDERED_TAG:
736			sdev->ordered_tags = 1;
737			sdev->simple_tags = 1;
738			break;
739		case MSG_SIMPLE_TAG:
740			sdev->ordered_tags = 0;
741			sdev->simple_tags = 1;
742			break;
743		default:
744			sdev_printk(KERN_WARNING, sdev,
745				    "scsi_adjust_queue_depth, bad queue type, "
746				    "disabled\n");
747		case 0:
748			sdev->ordered_tags = sdev->simple_tags = 0;
749			sdev->queue_depth = tags;
750			break;
751	}
752 out:
753	spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
754}
755EXPORT_SYMBOL(scsi_adjust_queue_depth);
756
757/*
758 * Function:	scsi_track_queue_full()
759 *
760 * Purpose:	This function will track successive QUEUE_FULL events on a
761 * 		specific SCSI device to determine if and when there is a
762 * 		need to adjust the queue depth on the device.
763 *
764 * Arguments:	sdev	- SCSI Device in question
765 * 		depth	- Current number of outstanding SCSI commands on
766 * 			  this device, not counting the one returned as
767 * 			  QUEUE_FULL.
768 *
769 * Returns:	0 - No change needed
770 * 		>0 - Adjust queue depth to this new depth
771 * 		-1 - Drop back to untagged operation using host->cmd_per_lun
772 * 			as the untagged command depth
773 *
774 * Lock Status:	None held on entry
775 *
776 * Notes:	Low level drivers may call this at any time and we will do
777 * 		"The Right Thing."  We are interrupt context safe.
778 */
779int scsi_track_queue_full(struct scsi_device *sdev, int depth)
780{
781	if ((jiffies >> 4) == sdev->last_queue_full_time)
782		return 0;
783
784	sdev->last_queue_full_time = (jiffies >> 4);
785	if (sdev->last_queue_full_depth != depth) {
786		sdev->last_queue_full_count = 1;
787		sdev->last_queue_full_depth = depth;
788	} else {
789		sdev->last_queue_full_count++;
790	}
791
792	if (sdev->last_queue_full_count <= 10)
793		return 0;
794	if (sdev->last_queue_full_depth < 8) {
795		/* Drop back to untagged */
796		scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
797		return -1;
798	}
799
800	if (sdev->ordered_tags)
801		scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth);
802	else
803		scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth);
804	return depth;
805}
806EXPORT_SYMBOL(scsi_track_queue_full);
807
808/**
809 * scsi_device_get  -  get an addition reference to a scsi_device
810 * @sdev:	device to get a reference to
811 *
812 * Gets a reference to the scsi_device and increments the use count
813 * of the underlying LLDD module.  You must hold host_lock of the
814 * parent Scsi_Host or already have a reference when calling this.
815 */
816int scsi_device_get(struct scsi_device *sdev)
817{
818	if (sdev->sdev_state == SDEV_DEL)
819		return -ENXIO;
820	if (!get_device(&sdev->sdev_gendev))
821		return -ENXIO;
822	/* We can fail this if we're doing SCSI operations
823	 * from module exit (like cache flush) */
824	try_module_get(sdev->host->hostt->module);
825
826	return 0;
827}
828EXPORT_SYMBOL(scsi_device_get);
829
830/**
831 * scsi_device_put  -  release a reference to a scsi_device
832 * @sdev:	device to release a reference on.
833 *
834 * Release a reference to the scsi_device and decrements the use count
835 * of the underlying LLDD module.  The device is freed once the last
836 * user vanishes.
837 */
838void scsi_device_put(struct scsi_device *sdev)
839{
840#ifdef CONFIG_MODULE_UNLOAD
841	struct module *module = sdev->host->hostt->module;
842
843	/* The module refcount will be zero if scsi_device_get()
844	 * was called from a module removal routine */
845	if (module && module_refcount(module) != 0)
846		module_put(module);
847#endif
848	put_device(&sdev->sdev_gendev);
849}
850EXPORT_SYMBOL(scsi_device_put);
851
852/* helper for shost_for_each_device, thus not documented */
853struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost,
854					   struct scsi_device *prev)
855{
856	struct list_head *list = (prev ? &prev->siblings : &shost->__devices);
857	struct scsi_device *next = NULL;
858	unsigned long flags;
859
860	spin_lock_irqsave(shost->host_lock, flags);
861	while (list->next != &shost->__devices) {
862		next = list_entry(list->next, struct scsi_device, siblings);
863		/* skip devices that we can't get a reference to */
864		if (!scsi_device_get(next))
865			break;
866		next = NULL;
867		list = list->next;
868	}
869	spin_unlock_irqrestore(shost->host_lock, flags);
870
871	if (prev)
872		scsi_device_put(prev);
873	return next;
874}
875EXPORT_SYMBOL(__scsi_iterate_devices);
876
877/**
878 * starget_for_each_device  -  helper to walk all devices of a target
879 * @starget:	target whose devices we want to iterate over.
880 *
881 * This traverses over each devices of @shost.  The devices have
882 * a reference that must be released by scsi_host_put when breaking
883 * out of the loop.
884 */
885void starget_for_each_device(struct scsi_target *starget, void * data,
886		     void (*fn)(struct scsi_device *, void *))
887{
888	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
889	struct scsi_device *sdev;
890
891	shost_for_each_device(sdev, shost) {
892		if ((sdev->channel == starget->channel) &&
893		    (sdev->id == starget->id))
894			fn(sdev, data);
895	}
896}
897EXPORT_SYMBOL(starget_for_each_device);
898
899/**
900 * __scsi_device_lookup_by_target - find a device given the target (UNLOCKED)
901 * @starget:	SCSI target pointer
902 * @lun:	SCSI Logical Unit Number
903 *
904 * Looks up the scsi_device with the specified @lun for a give
905 * @starget. The returned scsi_device does not have an additional
906 * reference.  You must hold the host's host_lock over this call and
907 * any access to the returned scsi_device.
908 *
909 * Note:  The only reason why drivers would want to use this is because
910 * they're need to access the device list in irq context.  Otherwise you
911 * really want to use scsi_device_lookup_by_target instead.
912 **/
913struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *starget,
914						   uint lun)
915{
916	struct scsi_device *sdev;
917
918	list_for_each_entry(sdev, &starget->devices, same_target_siblings) {
919		if (sdev->lun ==lun)
920			return sdev;
921	}
922
923	return NULL;
924}
925EXPORT_SYMBOL(__scsi_device_lookup_by_target);
926
927/**
928 * scsi_device_lookup_by_target - find a device given the target
929 * @starget:	SCSI target pointer
930 * @lun:	SCSI Logical Unit Number
931 *
932 * Looks up the scsi_device with the specified @channel, @id, @lun for a
933 * give host.  The returned scsi_device has an additional reference that
934 * needs to be release with scsi_host_put once you're done with it.
935 **/
936struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget,
937						 uint lun)
938{
939	struct scsi_device *sdev;
940	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
941	unsigned long flags;
942
943	spin_lock_irqsave(shost->host_lock, flags);
944	sdev = __scsi_device_lookup_by_target(starget, lun);
945	if (sdev && scsi_device_get(sdev))
946		sdev = NULL;
947	spin_unlock_irqrestore(shost->host_lock, flags);
948
949	return sdev;
950}
951EXPORT_SYMBOL(scsi_device_lookup_by_target);
952
953/**
954 * scsi_device_lookup - find a device given the host (UNLOCKED)
955 * @shost:	SCSI host pointer
956 * @channel:	SCSI channel (zero if only one channel)
957 * @pun:	SCSI target number (physical unit number)
958 * @lun:	SCSI Logical Unit Number
959 *
960 * Looks up the scsi_device with the specified @channel, @id, @lun for a
961 * give host. The returned scsi_device does not have an additional reference.
962 * You must hold the host's host_lock over this call and any access to the
963 * returned scsi_device.
964 *
965 * Note:  The only reason why drivers would want to use this is because
966 * they're need to access the device list in irq context.  Otherwise you
967 * really want to use scsi_device_lookup instead.
968 **/
969struct scsi_device *__scsi_device_lookup(struct Scsi_Host *shost,
970		uint channel, uint id, uint lun)
971{
972	struct scsi_device *sdev;
973
974	list_for_each_entry(sdev, &shost->__devices, siblings) {
975		if (sdev->channel == channel && sdev->id == id &&
976				sdev->lun ==lun)
977			return sdev;
978	}
979
980	return NULL;
981}
982EXPORT_SYMBOL(__scsi_device_lookup);
983
984/**
985 * scsi_device_lookup - find a device given the host
986 * @shost:	SCSI host pointer
987 * @channel:	SCSI channel (zero if only one channel)
988 * @id:		SCSI target number (physical unit number)
989 * @lun:	SCSI Logical Unit Number
990 *
991 * Looks up the scsi_device with the specified @channel, @id, @lun for a
992 * give host.  The returned scsi_device has an additional reference that
993 * needs to be release with scsi_host_put once you're done with it.
994 **/
995struct scsi_device *scsi_device_lookup(struct Scsi_Host *shost,
996		uint channel, uint id, uint lun)
997{
998	struct scsi_device *sdev;
999	unsigned long flags;
1000
1001	spin_lock_irqsave(shost->host_lock, flags);
1002	sdev = __scsi_device_lookup(shost, channel, id, lun);
1003	if (sdev && scsi_device_get(sdev))
1004		sdev = NULL;
1005	spin_unlock_irqrestore(shost->host_lock, flags);
1006
1007	return sdev;
1008}
1009EXPORT_SYMBOL(scsi_device_lookup);
1010
1011/**
1012 * scsi_device_cancel - cancel outstanding IO to this device
1013 * @sdev:	Pointer to struct scsi_device
1014 * @recovery:	Boolean instructing function to recover device or not.
1015 *
1016 **/
1017int scsi_device_cancel(struct scsi_device *sdev, int recovery)
1018{
1019	struct scsi_cmnd *scmd;
1020	LIST_HEAD(active_list);
1021	struct list_head *lh, *lh_sf;
1022	unsigned long flags;
1023
1024	scsi_device_set_state(sdev, SDEV_CANCEL);
1025
1026	spin_lock_irqsave(&sdev->list_lock, flags);
1027	list_for_each_entry(scmd, &sdev->cmd_list, list) {
1028		if (scmd->request) {
1029			/*
1030			 * If we are unable to remove the timer, it means
1031			 * that the command has already timed out or
1032			 * finished.
1033			 */
1034			if (!scsi_delete_timer(scmd))
1035				continue;
1036			list_add_tail(&scmd->eh_entry, &active_list);
1037		}
1038	}
1039	spin_unlock_irqrestore(&sdev->list_lock, flags);
1040
1041	if (!list_empty(&active_list)) {
1042		list_for_each_safe(lh, lh_sf, &active_list) {
1043			scmd = list_entry(lh, struct scsi_cmnd, eh_entry);
1044			list_del_init(lh);
1045			if (recovery &&
1046			    !scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD)) {
1047				scmd->result = (DID_ABORT << 16);
1048				scsi_finish_command(scmd);
1049			}
1050		}
1051	}
1052
1053	return 0;
1054}
1055EXPORT_SYMBOL(scsi_device_cancel);
1056
1057MODULE_DESCRIPTION("SCSI core");
1058MODULE_LICENSE("GPL");
1059
1060module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR);
1061MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels");
1062
1063static int __init init_scsi(void)
1064{
1065	int error;
1066
1067	error = scsi_init_queue();
1068	if (error)
1069		return error;
1070	error = scsi_init_procfs();
1071	if (error)
1072		goto cleanup_queue;
1073	error = scsi_init_devinfo();
1074	if (error)
1075		goto cleanup_procfs;
1076	error = scsi_init_hosts();
1077	if (error)
1078		goto cleanup_devlist;
1079	error = scsi_init_sysctl();
1080	if (error)
1081		goto cleanup_hosts;
1082	error = scsi_sysfs_register();
1083	if (error)
1084		goto cleanup_sysctl;
1085
1086	scsi_netlink_init();
1087
1088	printk(KERN_NOTICE "SCSI subsystem initialized\n");
1089	return 0;
1090
1091cleanup_sysctl:
1092	scsi_exit_sysctl();
1093cleanup_hosts:
1094	scsi_exit_hosts();
1095cleanup_devlist:
1096	scsi_exit_devinfo();
1097cleanup_procfs:
1098	scsi_exit_procfs();
1099cleanup_queue:
1100	scsi_exit_queue();
1101	printk(KERN_ERR "SCSI subsystem failed to initialize, error = %d\n",
1102	       -error);
1103	return error;
1104}
1105
1106static void __exit exit_scsi(void)
1107{
1108	scsi_netlink_exit();
1109	scsi_sysfs_unregister();
1110	scsi_exit_sysctl();
1111	scsi_exit_hosts();
1112	scsi_exit_devinfo();
1113	scsi_exit_procfs();
1114	scsi_exit_queue();
1115}
1116
1117subsys_initcall(init_scsi);
1118module_exit(exit_scsi);
1119