1// SPDX-License-Identifier: GPL-2.0
2/*
3 * scsi_scan.c
4 *
5 * Copyright (C) 2000 Eric Youngdale,
6 * Copyright (C) 2002 Patrick Mansfield
7 *
8 * The general scanning/probing algorithm is as follows, exceptions are
9 * made to it depending on device specific flags, compilation options, and
10 * global variable (boot or module load time) settings.
11 *
12 * A specific LUN is scanned via an INQUIRY command; if the LUN has a
13 * device attached, a scsi_device is allocated and setup for it.
14 *
15 * For every id of every channel on the given host:
16 *
17 * 	Scan LUN 0; if the target responds to LUN 0 (even if there is no
18 * 	device or storage attached to LUN 0):
19 *
20 * 		If LUN 0 has a device attached, allocate and setup a
21 * 		scsi_device for it.
22 *
23 * 		If target is SCSI-3 or up, issue a REPORT LUN, and scan
24 * 		all of the LUNs returned by the REPORT LUN; else,
25 * 		sequentially scan LUNs up until some maximum is reached,
26 * 		or a LUN is seen that cannot have a device attached to it.
27 */
28
29#include <linux/module.h>
30#include <linux/moduleparam.h>
31#include <linux/init.h>
32#include <linux/blkdev.h>
33#include <linux/delay.h>
34#include <linux/kthread.h>
35#include <linux/spinlock.h>
36#include <linux/async.h>
37#include <linux/slab.h>
38#include <asm/unaligned.h>
39
40#include <scsi/scsi.h>
41#include <scsi/scsi_cmnd.h>
42#include <scsi/scsi_device.h>
43#include <scsi/scsi_driver.h>
44#include <scsi/scsi_devinfo.h>
45#include <scsi/scsi_host.h>
46#include <scsi/scsi_transport.h>
47#include <scsi/scsi_dh.h>
48#include <scsi/scsi_eh.h>
49
50#include "scsi_priv.h"
51#include "scsi_logging.h"
52
53#define ALLOC_FAILURE_MSG	KERN_ERR "%s: Allocation failure during" \
54	" SCSI scanning, some SCSI devices might not be configured\n"
55
56/*
57 * Default timeout
58 */
59#define SCSI_TIMEOUT (2*HZ)
60#define SCSI_REPORT_LUNS_TIMEOUT (30*HZ)
61
62/*
63 * Prefix values for the SCSI id's (stored in sysfs name field)
64 */
65#define SCSI_UID_SER_NUM 'S'
66#define SCSI_UID_UNKNOWN 'Z'
67
68/*
69 * Return values of some of the scanning functions.
70 *
71 * SCSI_SCAN_NO_RESPONSE: no valid response received from the target, this
72 * includes allocation or general failures preventing IO from being sent.
73 *
74 * SCSI_SCAN_TARGET_PRESENT: target responded, but no device is available
75 * on the given LUN.
76 *
77 * SCSI_SCAN_LUN_PRESENT: target responded, and a device is available on a
78 * given LUN.
79 */
80#define SCSI_SCAN_NO_RESPONSE		0
81#define SCSI_SCAN_TARGET_PRESENT	1
82#define SCSI_SCAN_LUN_PRESENT		2
83
84static const char *scsi_null_device_strs = "nullnullnullnull";
85
86#define MAX_SCSI_LUNS	512
87
88static u64 max_scsi_luns = MAX_SCSI_LUNS;
89
90module_param_named(max_luns, max_scsi_luns, ullong, S_IRUGO|S_IWUSR);
91MODULE_PARM_DESC(max_luns,
92		 "last scsi LUN (should be between 1 and 2^64-1)");
93
94#ifdef CONFIG_SCSI_SCAN_ASYNC
95#define SCSI_SCAN_TYPE_DEFAULT "async"
96#else
97#define SCSI_SCAN_TYPE_DEFAULT "sync"
98#endif
99
100static char scsi_scan_type[7] = SCSI_SCAN_TYPE_DEFAULT;
101
102module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type),
103		    S_IRUGO|S_IWUSR);
104MODULE_PARM_DESC(scan, "sync, async, manual, or none. "
105		 "Setting to 'manual' disables automatic scanning, but allows "
106		 "for manual device scan via the 'scan' sysfs attribute.");
107
108static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ + 18;
109
110module_param_named(inq_timeout, scsi_inq_timeout, uint, S_IRUGO|S_IWUSR);
111MODULE_PARM_DESC(inq_timeout,
112		 "Timeout (in seconds) waiting for devices to answer INQUIRY."
113		 " Default is 20. Some devices may need more; most need less.");
114
115/* This lock protects only this list */
116static DEFINE_SPINLOCK(async_scan_lock);
117static LIST_HEAD(scanning_hosts);
118
119struct async_scan_data {
120	struct list_head list;
121	struct Scsi_Host *shost;
122	struct completion prev_finished;
123};
124
125/*
126 * scsi_enable_async_suspend - Enable async suspend and resume
127 */
128void scsi_enable_async_suspend(struct device *dev)
129{
130	/*
131	 * If a user has disabled async probing a likely reason is due to a
132	 * storage enclosure that does not inject staggered spin-ups. For
133	 * safety, make resume synchronous as well in that case.
134	 */
135	if (strncmp(scsi_scan_type, "async", 5) != 0)
136		return;
137	/* Enable asynchronous suspend and resume. */
138	device_enable_async_suspend(dev);
139}
140
141/**
142 * scsi_complete_async_scans - Wait for asynchronous scans to complete
143 *
144 * When this function returns, any host which started scanning before
145 * this function was called will have finished its scan.  Hosts which
146 * started scanning after this function was called may or may not have
147 * finished.
148 */
149int scsi_complete_async_scans(void)
150{
151	struct async_scan_data *data;
152
153	do {
154		if (list_empty(&scanning_hosts))
155			return 0;
156		/* If we can't get memory immediately, that's OK.  Just
157		 * sleep a little.  Even if we never get memory, the async
158		 * scans will finish eventually.
159		 */
160		data = kmalloc(sizeof(*data), GFP_KERNEL);
161		if (!data)
162			msleep(1);
163	} while (!data);
164
165	data->shost = NULL;
166	init_completion(&data->prev_finished);
167
168	spin_lock(&async_scan_lock);
169	/* Check that there's still somebody else on the list */
170	if (list_empty(&scanning_hosts))
171		goto done;
172	list_add_tail(&data->list, &scanning_hosts);
173	spin_unlock(&async_scan_lock);
174
175	printk(KERN_INFO "scsi: waiting for bus probes to complete ...\n");
176	wait_for_completion(&data->prev_finished);
177
178	spin_lock(&async_scan_lock);
179	list_del(&data->list);
180	if (!list_empty(&scanning_hosts)) {
181		struct async_scan_data *next = list_entry(scanning_hosts.next,
182				struct async_scan_data, list);
183		complete(&next->prev_finished);
184	}
185 done:
186	spin_unlock(&async_scan_lock);
187
188	kfree(data);
189	return 0;
190}
191
192/**
193 * scsi_unlock_floptical - unlock device via a special MODE SENSE command
194 * @sdev:	scsi device to send command to
195 * @result:	area to store the result of the MODE SENSE
196 *
197 * Description:
198 *     Send a vendor specific MODE SENSE (not a MODE SELECT) command.
199 *     Called for BLIST_KEY devices.
200 **/
201static void scsi_unlock_floptical(struct scsi_device *sdev,
202				  unsigned char *result)
203{
204	unsigned char scsi_cmd[MAX_COMMAND_SIZE];
205
206	sdev_printk(KERN_NOTICE, sdev, "unlocking floptical drive\n");
207	scsi_cmd[0] = MODE_SENSE;
208	scsi_cmd[1] = 0;
209	scsi_cmd[2] = 0x2e;
210	scsi_cmd[3] = 0;
211	scsi_cmd[4] = 0x2a;     /* size */
212	scsi_cmd[5] = 0;
213	scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN, result, 0x2a,
214			 SCSI_TIMEOUT, 3, NULL);
215}
216
217static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
218					unsigned int depth)
219{
220	int new_shift = sbitmap_calculate_shift(depth);
221	bool need_alloc = !sdev->budget_map.map;
222	bool need_free = false;
223	int ret;
224	struct sbitmap sb_backup;
225
226	depth = min_t(unsigned int, depth, scsi_device_max_queue_depth(sdev));
227
228	/*
229	 * realloc if new shift is calculated, which is caused by setting
230	 * up one new default queue depth after calling ->slave_configure
231	 */
232	if (!need_alloc && new_shift != sdev->budget_map.shift)
233		need_alloc = need_free = true;
234
235	if (!need_alloc)
236		return 0;
237
238	/*
239	 * Request queue has to be frozen for reallocating budget map,
240	 * and here disk isn't added yet, so freezing is pretty fast
241	 */
242	if (need_free) {
243		blk_mq_freeze_queue(sdev->request_queue);
244		sb_backup = sdev->budget_map;
245	}
246	ret = sbitmap_init_node(&sdev->budget_map,
247				scsi_device_max_queue_depth(sdev),
248				new_shift, GFP_KERNEL,
249				sdev->request_queue->node, false, true);
250	if (!ret)
251		sbitmap_resize(&sdev->budget_map, depth);
252
253	if (need_free) {
254		if (ret)
255			sdev->budget_map = sb_backup;
256		else
257			sbitmap_free(&sb_backup);
258		ret = 0;
259		blk_mq_unfreeze_queue(sdev->request_queue);
260	}
261	return ret;
262}
263
264/**
265 * scsi_alloc_sdev - allocate and setup a scsi_Device
266 * @starget: which target to allocate a &scsi_device for
267 * @lun: which lun
268 * @hostdata: usually NULL and set by ->slave_alloc instead
269 *
270 * Description:
271 *     Allocate, initialize for io, and return a pointer to a scsi_Device.
272 *     Stores the @shost, @channel, @id, and @lun in the scsi_Device, and
273 *     adds scsi_Device to the appropriate list.
274 *
275 * Return value:
276 *     scsi_Device pointer, or NULL on failure.
277 **/
278static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
279					   u64 lun, void *hostdata)
280{
281	unsigned int depth;
282	struct scsi_device *sdev;
283	struct request_queue *q;
284	int display_failure_msg = 1, ret;
285	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
286
287	sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size,
288		       GFP_KERNEL);
289	if (!sdev)
290		goto out;
291
292	sdev->vendor = scsi_null_device_strs;
293	sdev->model = scsi_null_device_strs;
294	sdev->rev = scsi_null_device_strs;
295	sdev->host = shost;
296	sdev->queue_ramp_up_period = SCSI_DEFAULT_RAMP_UP_PERIOD;
297	sdev->id = starget->id;
298	sdev->lun = lun;
299	sdev->channel = starget->channel;
300	mutex_init(&sdev->state_mutex);
301	sdev->sdev_state = SDEV_CREATED;
302	INIT_LIST_HEAD(&sdev->siblings);
303	INIT_LIST_HEAD(&sdev->same_target_siblings);
304	INIT_LIST_HEAD(&sdev->starved_entry);
305	INIT_LIST_HEAD(&sdev->event_list);
306	spin_lock_init(&sdev->list_lock);
307	mutex_init(&sdev->inquiry_mutex);
308	INIT_WORK(&sdev->event_work, scsi_evt_thread);
309	INIT_WORK(&sdev->requeue_work, scsi_requeue_run_queue);
310
311	sdev->sdev_gendev.parent = get_device(&starget->dev);
312	sdev->sdev_target = starget;
313
314	/* usually NULL and set by ->slave_alloc instead */
315	sdev->hostdata = hostdata;
316
317	/* if the device needs this changing, it may do so in the
318	 * slave_configure function */
319	sdev->max_device_blocked = SCSI_DEFAULT_DEVICE_BLOCKED;
320
321	/*
322	 * Some low level driver could use device->type
323	 */
324	sdev->type = -1;
325
326	/*
327	 * Assume that the device will have handshaking problems,
328	 * and then fix this field later if it turns out it
329	 * doesn't
330	 */
331	sdev->borken = 1;
332
333	sdev->sg_reserved_size = INT_MAX;
334
335	q = blk_mq_alloc_queue(&sdev->host->tag_set, NULL, NULL);
336	if (IS_ERR(q)) {
337		/* release fn is set up in scsi_sysfs_device_initialise, so
338		 * have to free and put manually here */
339		put_device(&starget->dev);
340		kfree(sdev);
341		goto out;
342	}
343	kref_get(&sdev->host->tagset_refcnt);
344	sdev->request_queue = q;
345	q->queuedata = sdev;
346	__scsi_init_queue(sdev->host, q);
347
348	depth = sdev->host->cmd_per_lun ?: 1;
349
350	/*
351	 * Use .can_queue as budget map's depth because we have to
352	 * support adjusting queue depth from sysfs. Meantime use
353	 * default device queue depth to figure out sbitmap shift
354	 * since we use this queue depth most of times.
355	 */
356	if (scsi_realloc_sdev_budget_map(sdev, depth)) {
357		put_device(&starget->dev);
358		kfree(sdev);
359		goto out;
360	}
361
362	scsi_change_queue_depth(sdev, depth);
363
364	scsi_sysfs_device_initialize(sdev);
365
366	if (shost->hostt->slave_alloc) {
367		ret = shost->hostt->slave_alloc(sdev);
368		if (ret) {
369			/*
370			 * if LLDD reports slave not present, don't clutter
371			 * console with alloc failure messages
372			 */
373			if (ret == -ENXIO)
374				display_failure_msg = 0;
375			goto out_device_destroy;
376		}
377	}
378
379	return sdev;
380
381out_device_destroy:
382	__scsi_remove_device(sdev);
383out:
384	if (display_failure_msg)
385		printk(ALLOC_FAILURE_MSG, __func__);
386	return NULL;
387}
388
389static void scsi_target_destroy(struct scsi_target *starget)
390{
391	struct device *dev = &starget->dev;
392	struct Scsi_Host *shost = dev_to_shost(dev->parent);
393	unsigned long flags;
394
395	BUG_ON(starget->state == STARGET_DEL);
396	starget->state = STARGET_DEL;
397	transport_destroy_device(dev);
398	spin_lock_irqsave(shost->host_lock, flags);
399	if (shost->hostt->target_destroy)
400		shost->hostt->target_destroy(starget);
401	list_del_init(&starget->siblings);
402	spin_unlock_irqrestore(shost->host_lock, flags);
403	put_device(dev);
404}
405
406static void scsi_target_dev_release(struct device *dev)
407{
408	struct device *parent = dev->parent;
409	struct scsi_target *starget = to_scsi_target(dev);
410
411	kfree(starget);
412	put_device(parent);
413}
414
415static const struct device_type scsi_target_type = {
416	.name =		"scsi_target",
417	.release =	scsi_target_dev_release,
418};
419
420int scsi_is_target_device(const struct device *dev)
421{
422	return dev->type == &scsi_target_type;
423}
424EXPORT_SYMBOL(scsi_is_target_device);
425
426static struct scsi_target *__scsi_find_target(struct device *parent,
427					      int channel, uint id)
428{
429	struct scsi_target *starget, *found_starget = NULL;
430	struct Scsi_Host *shost = dev_to_shost(parent);
431	/*
432	 * Search for an existing target for this sdev.
433	 */
434	list_for_each_entry(starget, &shost->__targets, siblings) {
435		if (starget->id == id &&
436		    starget->channel == channel) {
437			found_starget = starget;
438			break;
439		}
440	}
441	if (found_starget)
442		get_device(&found_starget->dev);
443
444	return found_starget;
445}
446
447/**
448 * scsi_target_reap_ref_release - remove target from visibility
449 * @kref: the reap_ref in the target being released
450 *
451 * Called on last put of reap_ref, which is the indication that no device
452 * under this target is visible anymore, so render the target invisible in
453 * sysfs.  Note: we have to be in user context here because the target reaps
454 * should be done in places where the scsi device visibility is being removed.
455 */
456static void scsi_target_reap_ref_release(struct kref *kref)
457{
458	struct scsi_target *starget
459		= container_of(kref, struct scsi_target, reap_ref);
460
461	/*
462	 * if we get here and the target is still in a CREATED state that
463	 * means it was allocated but never made visible (because a scan
464	 * turned up no LUNs), so don't call device_del() on it.
465	 */
466	if ((starget->state != STARGET_CREATED) &&
467	    (starget->state != STARGET_CREATED_REMOVE)) {
468		transport_remove_device(&starget->dev);
469		device_del(&starget->dev);
470	}
471	scsi_target_destroy(starget);
472}
473
474static void scsi_target_reap_ref_put(struct scsi_target *starget)
475{
476	kref_put(&starget->reap_ref, scsi_target_reap_ref_release);
477}
478
479/**
480 * scsi_alloc_target - allocate a new or find an existing target
481 * @parent:	parent of the target (need not be a scsi host)
482 * @channel:	target channel number (zero if no channels)
483 * @id:		target id number
484 *
485 * Return an existing target if one exists, provided it hasn't already
486 * gone into STARGET_DEL state, otherwise allocate a new target.
487 *
488 * The target is returned with an incremented reference, so the caller
489 * is responsible for both reaping and doing a last put
490 */
491static struct scsi_target *scsi_alloc_target(struct device *parent,
492					     int channel, uint id)
493{
494	struct Scsi_Host *shost = dev_to_shost(parent);
495	struct device *dev = NULL;
496	unsigned long flags;
497	const int size = sizeof(struct scsi_target)
498		+ shost->transportt->target_size;
499	struct scsi_target *starget;
500	struct scsi_target *found_target;
501	int error, ref_got;
502
503	starget = kzalloc(size, GFP_KERNEL);
504	if (!starget) {
505		printk(KERN_ERR "%s: allocation failure\n", __func__);
506		return NULL;
507	}
508	dev = &starget->dev;
509	device_initialize(dev);
510	kref_init(&starget->reap_ref);
511	dev->parent = get_device(parent);
512	dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id);
513	dev->bus = &scsi_bus_type;
514	dev->type = &scsi_target_type;
515	scsi_enable_async_suspend(dev);
516	starget->id = id;
517	starget->channel = channel;
518	starget->can_queue = 0;
519	INIT_LIST_HEAD(&starget->siblings);
520	INIT_LIST_HEAD(&starget->devices);
521	starget->state = STARGET_CREATED;
522	starget->scsi_level = SCSI_2;
523	starget->max_target_blocked = SCSI_DEFAULT_TARGET_BLOCKED;
524 retry:
525	spin_lock_irqsave(shost->host_lock, flags);
526
527	found_target = __scsi_find_target(parent, channel, id);
528	if (found_target)
529		goto found;
530
531	list_add_tail(&starget->siblings, &shost->__targets);
532	spin_unlock_irqrestore(shost->host_lock, flags);
533	/* allocate and add */
534	transport_setup_device(dev);
535	if (shost->hostt->target_alloc) {
536		error = shost->hostt->target_alloc(starget);
537
538		if(error) {
539			if (error != -ENXIO)
540				dev_err(dev, "target allocation failed, error %d\n", error);
541			/* don't want scsi_target_reap to do the final
542			 * put because it will be under the host lock */
543			scsi_target_destroy(starget);
544			return NULL;
545		}
546	}
547	get_device(dev);
548
549	return starget;
550
551 found:
552	/*
553	 * release routine already fired if kref is zero, so if we can still
554	 * take the reference, the target must be alive.  If we can't, it must
555	 * be dying and we need to wait for a new target
556	 */
557	ref_got = kref_get_unless_zero(&found_target->reap_ref);
558
559	spin_unlock_irqrestore(shost->host_lock, flags);
560	if (ref_got) {
561		put_device(dev);
562		return found_target;
563	}
564	/*
565	 * Unfortunately, we found a dying target; need to wait until it's
566	 * dead before we can get a new one.  There is an anomaly here.  We
567	 * *should* call scsi_target_reap() to balance the kref_get() of the
568	 * reap_ref above.  However, since the target being released, it's
569	 * already invisible and the reap_ref is irrelevant.  If we call
570	 * scsi_target_reap() we might spuriously do another device_del() on
571	 * an already invisible target.
572	 */
573	put_device(&found_target->dev);
574	/*
575	 * length of time is irrelevant here, we just want to yield the CPU
576	 * for a tick to avoid busy waiting for the target to die.
577	 */
578	msleep(1);
579	goto retry;
580}
581
582/**
583 * scsi_target_reap - check to see if target is in use and destroy if not
584 * @starget: target to be checked
585 *
586 * This is used after removing a LUN or doing a last put of the target
587 * it checks atomically that nothing is using the target and removes
588 * it if so.
589 */
590void scsi_target_reap(struct scsi_target *starget)
591{
592	/*
593	 * serious problem if this triggers: STARGET_DEL is only set in the if
594	 * the reap_ref drops to zero, so we're trying to do another final put
595	 * on an already released kref
596	 */
597	BUG_ON(starget->state == STARGET_DEL);
598	scsi_target_reap_ref_put(starget);
599}
600
601/**
602 * scsi_sanitize_inquiry_string - remove non-graphical chars from an
603 *                                INQUIRY result string
604 * @s: INQUIRY result string to sanitize
605 * @len: length of the string
606 *
607 * Description:
608 *	The SCSI spec says that INQUIRY vendor, product, and revision
609 *	strings must consist entirely of graphic ASCII characters,
610 *	padded on the right with spaces.  Since not all devices obey
611 *	this rule, we will replace non-graphic or non-ASCII characters
612 *	with spaces.  Exception: a NUL character is interpreted as a
613 *	string terminator, so all the following characters are set to
614 *	spaces.
615 **/
616void scsi_sanitize_inquiry_string(unsigned char *s, int len)
617{
618	int terminated = 0;
619
620	for (; len > 0; (--len, ++s)) {
621		if (*s == 0)
622			terminated = 1;
623		if (terminated || *s < 0x20 || *s > 0x7e)
624			*s = ' ';
625	}
626}
627EXPORT_SYMBOL(scsi_sanitize_inquiry_string);
628
629
630/**
631 * scsi_probe_lun - probe a single LUN using a SCSI INQUIRY
632 * @sdev:	scsi_device to probe
633 * @inq_result:	area to store the INQUIRY result
634 * @result_len: len of inq_result
635 * @bflags:	store any bflags found here
636 *
637 * Description:
638 *     Probe the lun associated with @req using a standard SCSI INQUIRY;
639 *
640 *     If the INQUIRY is successful, zero is returned and the
641 *     INQUIRY data is in @inq_result; the scsi_level and INQUIRY length
642 *     are copied to the scsi_device any flags value is stored in *@bflags.
643 **/
644static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
645			  int result_len, blist_flags_t *bflags)
646{
647	unsigned char scsi_cmd[MAX_COMMAND_SIZE];
648	int first_inquiry_len, try_inquiry_len, next_inquiry_len;
649	int response_len = 0;
650	int pass, count, result, resid;
651	struct scsi_failure failure_defs[] = {
652		/*
653		 * not-ready to ready transition [asc/ascq=0x28/0x0] or
654		 * power-on, reset [asc/ascq=0x29/0x0], continue. INQUIRY
655		 * should not yield UNIT_ATTENTION but many buggy devices do
656		 * so anyway.
657		 */
658		{
659			.sense = UNIT_ATTENTION,
660			.asc = 0x28,
661			.result = SAM_STAT_CHECK_CONDITION,
662		},
663		{
664			.sense = UNIT_ATTENTION,
665			.asc = 0x29,
666			.result = SAM_STAT_CHECK_CONDITION,
667		},
668		{
669			.allowed = 1,
670			.result = DID_TIME_OUT << 16,
671		},
672		{}
673	};
674	struct scsi_failures failures = {
675		.total_allowed = 3,
676		.failure_definitions = failure_defs,
677	};
678	const struct scsi_exec_args exec_args = {
679		.resid = &resid,
680		.failures = &failures,
681	};
682
683	*bflags = 0;
684
685	/* Perform up to 3 passes.  The first pass uses a conservative
686	 * transfer length of 36 unless sdev->inquiry_len specifies a
687	 * different value. */
688	first_inquiry_len = sdev->inquiry_len ? sdev->inquiry_len : 36;
689	try_inquiry_len = first_inquiry_len;
690	pass = 1;
691
692 next_pass:
693	SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
694				"scsi scan: INQUIRY pass %d length %d\n",
695				pass, try_inquiry_len));
696
697	/* Each pass gets up to three chances to ignore Unit Attention */
698	scsi_failures_reset_retries(&failures);
699
700	for (count = 0; count < 3; ++count) {
701		memset(scsi_cmd, 0, 6);
702		scsi_cmd[0] = INQUIRY;
703		scsi_cmd[4] = (unsigned char) try_inquiry_len;
704
705		memset(inq_result, 0, try_inquiry_len);
706
707		result = scsi_execute_cmd(sdev,  scsi_cmd, REQ_OP_DRV_IN,
708					  inq_result, try_inquiry_len,
709					  HZ / 2 + HZ * scsi_inq_timeout, 3,
710					  &exec_args);
711
712		SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
713				"scsi scan: INQUIRY %s with code 0x%x\n",
714				result ? "failed" : "successful", result));
715
716		if (result == 0) {
717			/*
718			 * if nothing was transferred, we try
719			 * again. It's a workaround for some USB
720			 * devices.
721			 */
722			if (resid == try_inquiry_len)
723				continue;
724		}
725		break;
726	}
727
728	if (result == 0) {
729		scsi_sanitize_inquiry_string(&inq_result[8], 8);
730		scsi_sanitize_inquiry_string(&inq_result[16], 16);
731		scsi_sanitize_inquiry_string(&inq_result[32], 4);
732
733		response_len = inq_result[4] + 5;
734		if (response_len > 255)
735			response_len = first_inquiry_len;	/* sanity */
736
737		/*
738		 * Get any flags for this device.
739		 *
740		 * XXX add a bflags to scsi_device, and replace the
741		 * corresponding bit fields in scsi_device, so bflags
742		 * need not be passed as an argument.
743		 */
744		*bflags = scsi_get_device_flags(sdev, &inq_result[8],
745				&inq_result[16]);
746
747		/* When the first pass succeeds we gain information about
748		 * what larger transfer lengths might work. */
749		if (pass == 1) {
750			if (BLIST_INQUIRY_36 & *bflags)
751				next_inquiry_len = 36;
752			/*
753			 * LLD specified a maximum sdev->inquiry_len
754			 * but device claims it has more data. Capping
755			 * the length only makes sense for legacy
756			 * devices. If a device supports SPC-4 (2014)
757			 * or newer, assume that it is safe to ask for
758			 * as much as the device says it supports.
759			 */
760			else if (sdev->inquiry_len &&
761				 response_len > sdev->inquiry_len &&
762				 (inq_result[2] & 0x7) < 6) /* SPC-4 */
763				next_inquiry_len = sdev->inquiry_len;
764			else
765				next_inquiry_len = response_len;
766
767			/* If more data is available perform the second pass */
768			if (next_inquiry_len > try_inquiry_len) {
769				try_inquiry_len = next_inquiry_len;
770				pass = 2;
771				goto next_pass;
772			}
773		}
774
775	} else if (pass == 2) {
776		sdev_printk(KERN_INFO, sdev,
777			    "scsi scan: %d byte inquiry failed.  "
778			    "Consider BLIST_INQUIRY_36 for this device\n",
779			    try_inquiry_len);
780
781		/* If this pass failed, the third pass goes back and transfers
782		 * the same amount as we successfully got in the first pass. */
783		try_inquiry_len = first_inquiry_len;
784		pass = 3;
785		goto next_pass;
786	}
787
788	/* If the last transfer attempt got an error, assume the
789	 * peripheral doesn't exist or is dead. */
790	if (result)
791		return -EIO;
792
793	/* Don't report any more data than the device says is valid */
794	sdev->inquiry_len = min(try_inquiry_len, response_len);
795
796	/*
797	 * XXX Abort if the response length is less than 36? If less than
798	 * 32, the lookup of the device flags (above) could be invalid,
799	 * and it would be possible to take an incorrect action - we do
800	 * not want to hang because of a short INQUIRY. On the flip side,
801	 * if the device is spun down or becoming ready (and so it gives a
802	 * short INQUIRY), an abort here prevents any further use of the
803	 * device, including spin up.
804	 *
805	 * On the whole, the best approach seems to be to assume the first
806	 * 36 bytes are valid no matter what the device says.  That's
807	 * better than copying < 36 bytes to the inquiry-result buffer
808	 * and displaying garbage for the Vendor, Product, or Revision
809	 * strings.
810	 */
811	if (sdev->inquiry_len < 36) {
812		if (!sdev->host->short_inquiry) {
813			shost_printk(KERN_INFO, sdev->host,
814				    "scsi scan: INQUIRY result too short (%d),"
815				    " using 36\n", sdev->inquiry_len);
816			sdev->host->short_inquiry = 1;
817		}
818		sdev->inquiry_len = 36;
819	}
820
821	/*
822	 * Related to the above issue:
823	 *
824	 * XXX Devices (disk or all?) should be sent a TEST UNIT READY,
825	 * and if not ready, sent a START_STOP to start (maybe spin up) and
826	 * then send the INQUIRY again, since the INQUIRY can change after
827	 * a device is initialized.
828	 *
829	 * Ideally, start a device if explicitly asked to do so.  This
830	 * assumes that a device is spun up on power on, spun down on
831	 * request, and then spun up on request.
832	 */
833
834	/*
835	 * The scanning code needs to know the scsi_level, even if no
836	 * device is attached at LUN 0 (SCSI_SCAN_TARGET_PRESENT) so
837	 * non-zero LUNs can be scanned.
838	 */
839	sdev->scsi_level = inq_result[2] & 0x0f;
840	if (sdev->scsi_level >= 2 ||
841	    (sdev->scsi_level == 1 && (inq_result[3] & 0x0f) == 1))
842		sdev->scsi_level++;
843	sdev->sdev_target->scsi_level = sdev->scsi_level;
844
845	/*
846	 * If SCSI-2 or lower, and if the transport requires it,
847	 * store the LUN value in CDB[1].
848	 */
849	sdev->lun_in_cdb = 0;
850	if (sdev->scsi_level <= SCSI_2 &&
851	    sdev->scsi_level != SCSI_UNKNOWN &&
852	    !sdev->host->no_scsi2_lun_in_cdb)
853		sdev->lun_in_cdb = 1;
854
855	return 0;
856}
857
858/**
859 * scsi_add_lun - allocate and fully initialze a scsi_device
860 * @sdev:	holds information to be stored in the new scsi_device
861 * @inq_result:	holds the result of a previous INQUIRY to the LUN
862 * @bflags:	black/white list flag
863 * @async:	1 if this device is being scanned asynchronously
864 *
865 * Description:
866 *     Initialize the scsi_device @sdev.  Optionally set fields based
867 *     on values in *@bflags.
868 *
869 * Return:
870 *     SCSI_SCAN_NO_RESPONSE: could not allocate or setup a scsi_device
871 *     SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
872 **/
873static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
874		blist_flags_t *bflags, int async)
875{
876	int ret;
877
878	/*
879	 * XXX do not save the inquiry, since it can change underneath us,
880	 * save just vendor/model/rev.
881	 *
882	 * Rather than save it and have an ioctl that retrieves the saved
883	 * value, have an ioctl that executes the same INQUIRY code used
884	 * in scsi_probe_lun, let user level programs doing INQUIRY
885	 * scanning run at their own risk, or supply a user level program
886	 * that can correctly scan.
887	 */
888
889	/*
890	 * Copy at least 36 bytes of INQUIRY data, so that we don't
891	 * dereference unallocated memory when accessing the Vendor,
892	 * Product, and Revision strings.  Badly behaved devices may set
893	 * the INQUIRY Additional Length byte to a small value, indicating
894	 * these strings are invalid, but often they contain plausible data
895	 * nonetheless.  It doesn't matter if the device sent < 36 bytes
896	 * total, since scsi_probe_lun() initializes inq_result with 0s.
897	 */
898	sdev->inquiry = kmemdup(inq_result,
899				max_t(size_t, sdev->inquiry_len, 36),
900				GFP_KERNEL);
901	if (sdev->inquiry == NULL)
902		return SCSI_SCAN_NO_RESPONSE;
903
904	sdev->vendor = (char *) (sdev->inquiry + 8);
905	sdev->model = (char *) (sdev->inquiry + 16);
906	sdev->rev = (char *) (sdev->inquiry + 32);
907
908	if (strncmp(sdev->vendor, "ATA     ", 8) == 0) {
909		/*
910		 * sata emulation layer device.  This is a hack to work around
911		 * the SATL power management specifications which state that
912		 * when the SATL detects the device has gone into standby
913		 * mode, it shall respond with NOT READY.
914		 */
915		sdev->allow_restart = 1;
916	}
917
918	if (*bflags & BLIST_ISROM) {
919		sdev->type = TYPE_ROM;
920		sdev->removable = 1;
921	} else {
922		sdev->type = (inq_result[0] & 0x1f);
923		sdev->removable = (inq_result[1] & 0x80) >> 7;
924
925		/*
926		 * some devices may respond with wrong type for
927		 * well-known logical units. Force well-known type
928		 * to enumerate them correctly.
929		 */
930		if (scsi_is_wlun(sdev->lun) && sdev->type != TYPE_WLUN) {
931			sdev_printk(KERN_WARNING, sdev,
932				"%s: correcting incorrect peripheral device type 0x%x for W-LUN 0x%16xhN\n",
933				__func__, sdev->type, (unsigned int)sdev->lun);
934			sdev->type = TYPE_WLUN;
935		}
936
937	}
938
939	if (sdev->type == TYPE_RBC || sdev->type == TYPE_ROM) {
940		/* RBC and MMC devices can return SCSI-3 compliance and yet
941		 * still not support REPORT LUNS, so make them act as
942		 * BLIST_NOREPORTLUN unless BLIST_REPORTLUN2 is
943		 * specifically set */
944		if ((*bflags & BLIST_REPORTLUN2) == 0)
945			*bflags |= BLIST_NOREPORTLUN;
946	}
947
948	/*
949	 * For a peripheral qualifier (PQ) value of 1 (001b), the SCSI
950	 * spec says: The device server is capable of supporting the
951	 * specified peripheral device type on this logical unit. However,
952	 * the physical device is not currently connected to this logical
953	 * unit.
954	 *
955	 * The above is vague, as it implies that we could treat 001 and
956	 * 011 the same. Stay compatible with previous code, and create a
957	 * scsi_device for a PQ of 1
958	 *
959	 * Don't set the device offline here; rather let the upper
960	 * level drivers eval the PQ to decide whether they should
961	 * attach. So remove ((inq_result[0] >> 5) & 7) == 1 check.
962	 */
963
964	sdev->inq_periph_qual = (inq_result[0] >> 5) & 7;
965	sdev->lockable = sdev->removable;
966	sdev->soft_reset = (inq_result[7] & 1) && ((inq_result[3] & 7) == 2);
967
968	if (sdev->scsi_level >= SCSI_3 ||
969			(sdev->inquiry_len > 56 && inq_result[56] & 0x04))
970		sdev->ppr = 1;
971	if (inq_result[7] & 0x60)
972		sdev->wdtr = 1;
973	if (inq_result[7] & 0x10)
974		sdev->sdtr = 1;
975
976	sdev_printk(KERN_NOTICE, sdev, "%s %.8s %.16s %.4s PQ: %d "
977			"ANSI: %d%s\n", scsi_device_type(sdev->type),
978			sdev->vendor, sdev->model, sdev->rev,
979			sdev->inq_periph_qual, inq_result[2] & 0x07,
980			(inq_result[3] & 0x0f) == 1 ? " CCS" : "");
981
982	if ((sdev->scsi_level >= SCSI_2) && (inq_result[7] & 2) &&
983	    !(*bflags & BLIST_NOTQ)) {
984		sdev->tagged_supported = 1;
985		sdev->simple_tags = 1;
986	}
987
988	/*
989	 * Some devices (Texel CD ROM drives) have handshaking problems
990	 * when used with the Seagate controllers. borken is initialized
991	 * to 1, and then set it to 0 here.
992	 */
993	if ((*bflags & BLIST_BORKEN) == 0)
994		sdev->borken = 0;
995
996	if (*bflags & BLIST_NO_ULD_ATTACH)
997		sdev->no_uld_attach = 1;
998
999	/*
1000	 * Apparently some really broken devices (contrary to the SCSI
1001	 * standards) need to be selected without asserting ATN
1002	 */
1003	if (*bflags & BLIST_SELECT_NO_ATN)
1004		sdev->select_no_atn = 1;
1005
1006	/*
1007	 * Maximum 512 sector transfer length
1008	 * broken RA4x00 Compaq Disk Array
1009	 */
1010	if (*bflags & BLIST_MAX_512)
1011		blk_queue_max_hw_sectors(sdev->request_queue, 512);
1012	/*
1013	 * Max 1024 sector transfer length for targets that report incorrect
1014	 * max/optimal lengths and relied on the old block layer safe default
1015	 */
1016	else if (*bflags & BLIST_MAX_1024)
1017		blk_queue_max_hw_sectors(sdev->request_queue, 1024);
1018
1019	/*
1020	 * Some devices may not want to have a start command automatically
1021	 * issued when a device is added.
1022	 */
1023	if (*bflags & BLIST_NOSTARTONADD)
1024		sdev->no_start_on_add = 1;
1025
1026	if (*bflags & BLIST_SINGLELUN)
1027		scsi_target(sdev)->single_lun = 1;
1028
1029	sdev->use_10_for_rw = 1;
1030
1031	/* some devices don't like REPORT SUPPORTED OPERATION CODES
1032	 * and will simply timeout causing sd_mod init to take a very
1033	 * very long time */
1034	if (*bflags & BLIST_NO_RSOC)
1035		sdev->no_report_opcodes = 1;
1036
1037	/* set the device running here so that slave configure
1038	 * may do I/O */
1039	mutex_lock(&sdev->state_mutex);
1040	ret = scsi_device_set_state(sdev, SDEV_RUNNING);
1041	if (ret)
1042		ret = scsi_device_set_state(sdev, SDEV_BLOCK);
1043	mutex_unlock(&sdev->state_mutex);
1044
1045	if (ret) {
1046		sdev_printk(KERN_ERR, sdev,
1047			    "in wrong state %s to complete scan\n",
1048			    scsi_device_state_name(sdev->sdev_state));
1049		return SCSI_SCAN_NO_RESPONSE;
1050	}
1051
1052	if (*bflags & BLIST_NOT_LOCKABLE)
1053		sdev->lockable = 0;
1054
1055	if (*bflags & BLIST_RETRY_HWERROR)
1056		sdev->retry_hwerror = 1;
1057
1058	if (*bflags & BLIST_NO_DIF)
1059		sdev->no_dif = 1;
1060
1061	if (*bflags & BLIST_UNMAP_LIMIT_WS)
1062		sdev->unmap_limit_for_ws = 1;
1063
1064	if (*bflags & BLIST_IGN_MEDIA_CHANGE)
1065		sdev->ignore_media_change = 1;
1066
1067	sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT;
1068
1069	if (*bflags & BLIST_TRY_VPD_PAGES)
1070		sdev->try_vpd_pages = 1;
1071	else if (*bflags & BLIST_SKIP_VPD_PAGES)
1072		sdev->skip_vpd_pages = 1;
1073
1074	if (*bflags & BLIST_NO_VPD_SIZE)
1075		sdev->no_vpd_size = 1;
1076
1077	transport_configure_device(&sdev->sdev_gendev);
1078
1079	if (sdev->host->hostt->slave_configure) {
1080		ret = sdev->host->hostt->slave_configure(sdev);
1081		if (ret) {
1082			/*
1083			 * if LLDD reports slave not present, don't clutter
1084			 * console with alloc failure messages
1085			 */
1086			if (ret != -ENXIO) {
1087				sdev_printk(KERN_ERR, sdev,
1088					"failed to configure device\n");
1089			}
1090			return SCSI_SCAN_NO_RESPONSE;
1091		}
1092
1093		/*
1094		 * The queue_depth is often changed in ->slave_configure.
1095		 * Set up budget map again since memory consumption of
1096		 * the map depends on actual queue depth.
1097		 */
1098		scsi_realloc_sdev_budget_map(sdev, sdev->queue_depth);
1099	}
1100
1101	if (sdev->scsi_level >= SCSI_3)
1102		scsi_attach_vpd(sdev);
1103
1104	scsi_cdl_check(sdev);
1105
1106	sdev->max_queue_depth = sdev->queue_depth;
1107	WARN_ON_ONCE(sdev->max_queue_depth > sdev->budget_map.depth);
1108	sdev->sdev_bflags = *bflags;
1109
1110	/*
1111	 * Ok, the device is now all set up, we can
1112	 * register it and tell the rest of the kernel
1113	 * about it.
1114	 */
1115	if (!async && scsi_sysfs_add_sdev(sdev) != 0)
1116		return SCSI_SCAN_NO_RESPONSE;
1117
1118	return SCSI_SCAN_LUN_PRESENT;
1119}
1120
1121#ifdef CONFIG_SCSI_LOGGING
1122/**
1123 * scsi_inq_str - print INQUIRY data from min to max index, strip trailing whitespace
1124 * @buf:   Output buffer with at least end-first+1 bytes of space
1125 * @inq:   Inquiry buffer (input)
1126 * @first: Offset of string into inq
1127 * @end:   Index after last character in inq
1128 */
1129static unsigned char *scsi_inq_str(unsigned char *buf, unsigned char *inq,
1130				   unsigned first, unsigned end)
1131{
1132	unsigned term = 0, idx;
1133
1134	for (idx = 0; idx + first < end && idx + first < inq[4] + 5; idx++) {
1135		if (inq[idx+first] > ' ') {
1136			buf[idx] = inq[idx+first];
1137			term = idx+1;
1138		} else {
1139			buf[idx] = ' ';
1140		}
1141	}
1142	buf[term] = 0;
1143	return buf;
1144}
1145#endif
1146
1147/**
1148 * scsi_probe_and_add_lun - probe a LUN, if a LUN is found add it
1149 * @starget:	pointer to target device structure
1150 * @lun:	LUN of target device
1151 * @bflagsp:	store bflags here if not NULL
1152 * @sdevp:	probe the LUN corresponding to this scsi_device
1153 * @rescan:     if not equal to SCSI_SCAN_INITIAL skip some code only
1154 *              needed on first scan
1155 * @hostdata:	passed to scsi_alloc_sdev()
1156 *
1157 * Description:
1158 *     Call scsi_probe_lun, if a LUN with an attached device is found,
1159 *     allocate and set it up by calling scsi_add_lun.
1160 *
1161 * Return:
1162 *
1163 *   - SCSI_SCAN_NO_RESPONSE: could not allocate or setup a scsi_device
1164 *   - SCSI_SCAN_TARGET_PRESENT: target responded, but no device is
1165 *         attached at the LUN
1166 *   - SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
1167 **/
1168static int scsi_probe_and_add_lun(struct scsi_target *starget,
1169				  u64 lun, blist_flags_t *bflagsp,
1170				  struct scsi_device **sdevp,
1171				  enum scsi_scan_mode rescan,
1172				  void *hostdata)
1173{
1174	struct scsi_device *sdev;
1175	unsigned char *result;
1176	blist_flags_t bflags;
1177	int res = SCSI_SCAN_NO_RESPONSE, result_len = 256;
1178	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1179
1180	/*
1181	 * The rescan flag is used as an optimization, the first scan of a
1182	 * host adapter calls into here with rescan == 0.
1183	 */
1184	sdev = scsi_device_lookup_by_target(starget, lun);
1185	if (sdev) {
1186		if (rescan != SCSI_SCAN_INITIAL || !scsi_device_created(sdev)) {
1187			SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
1188				"scsi scan: device exists on %s\n",
1189				dev_name(&sdev->sdev_gendev)));
1190			if (sdevp)
1191				*sdevp = sdev;
1192			else
1193				scsi_device_put(sdev);
1194
1195			if (bflagsp)
1196				*bflagsp = scsi_get_device_flags(sdev,
1197								 sdev->vendor,
1198								 sdev->model);
1199			return SCSI_SCAN_LUN_PRESENT;
1200		}
1201		scsi_device_put(sdev);
1202	} else
1203		sdev = scsi_alloc_sdev(starget, lun, hostdata);
1204	if (!sdev)
1205		goto out;
1206
1207	result = kmalloc(result_len, GFP_KERNEL);
1208	if (!result)
1209		goto out_free_sdev;
1210
1211	if (scsi_probe_lun(sdev, result, result_len, &bflags))
1212		goto out_free_result;
1213
1214	if (bflagsp)
1215		*bflagsp = bflags;
1216	/*
1217	 * result contains valid SCSI INQUIRY data.
1218	 */
1219	if ((result[0] >> 5) == 3) {
1220		/*
1221		 * For a Peripheral qualifier 3 (011b), the SCSI
1222		 * spec says: The device server is not capable of
1223		 * supporting a physical device on this logical
1224		 * unit.
1225		 *
1226		 * For disks, this implies that there is no
1227		 * logical disk configured at sdev->lun, but there
1228		 * is a target id responding.
1229		 */
1230		SCSI_LOG_SCAN_BUS(2, sdev_printk(KERN_INFO, sdev, "scsi scan:"
1231				   " peripheral qualifier of 3, device not"
1232				   " added\n"))
1233		if (lun == 0) {
1234			SCSI_LOG_SCAN_BUS(1, {
1235				unsigned char vend[9];
1236				unsigned char mod[17];
1237
1238				sdev_printk(KERN_INFO, sdev,
1239					"scsi scan: consider passing scsi_mod."
1240					"dev_flags=%s:%s:0x240 or 0x1000240\n",
1241					scsi_inq_str(vend, result, 8, 16),
1242					scsi_inq_str(mod, result, 16, 32));
1243			});
1244
1245		}
1246
1247		res = SCSI_SCAN_TARGET_PRESENT;
1248		goto out_free_result;
1249	}
1250
1251	/*
1252	 * Some targets may set slight variations of PQ and PDT to signal
1253	 * that no LUN is present, so don't add sdev in these cases.
1254	 * Two specific examples are:
1255	 * 1) NetApp targets: return PQ=1, PDT=0x1f
1256	 * 2) USB UFI: returns PDT=0x1f, with the PQ bits being "reserved"
1257	 *    in the UFI 1.0 spec (we cannot rely on reserved bits).
1258	 *
1259	 * References:
1260	 * 1) SCSI SPC-3, pp. 145-146
1261	 * PQ=1: "A peripheral device having the specified peripheral
1262	 * device type is not connected to this logical unit. However, the
1263	 * device server is capable of supporting the specified peripheral
1264	 * device type on this logical unit."
1265	 * PDT=0x1f: "Unknown or no device type"
1266	 * 2) USB UFI 1.0, p. 20
1267	 * PDT=00h Direct-access device (floppy)
1268	 * PDT=1Fh none (no FDD connected to the requested logical unit)
1269	 */
1270	if (((result[0] >> 5) == 1 || starget->pdt_1f_for_no_lun) &&
1271	    (result[0] & 0x1f) == 0x1f &&
1272	    !scsi_is_wlun(lun)) {
1273		SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
1274					"scsi scan: peripheral device type"
1275					" of 31, no device added\n"));
1276		res = SCSI_SCAN_TARGET_PRESENT;
1277		goto out_free_result;
1278	}
1279
1280	res = scsi_add_lun(sdev, result, &bflags, shost->async_scan);
1281	if (res == SCSI_SCAN_LUN_PRESENT) {
1282		if (bflags & BLIST_KEY) {
1283			sdev->lockable = 0;
1284			scsi_unlock_floptical(sdev, result);
1285		}
1286	}
1287
1288 out_free_result:
1289	kfree(result);
1290 out_free_sdev:
1291	if (res == SCSI_SCAN_LUN_PRESENT) {
1292		if (sdevp) {
1293			if (scsi_device_get(sdev) == 0) {
1294				*sdevp = sdev;
1295			} else {
1296				__scsi_remove_device(sdev);
1297				res = SCSI_SCAN_NO_RESPONSE;
1298			}
1299		}
1300	} else
1301		__scsi_remove_device(sdev);
1302 out:
1303	return res;
1304}
1305
1306/**
1307 * scsi_sequential_lun_scan - sequentially scan a SCSI target
1308 * @starget:	pointer to target structure to scan
1309 * @bflags:	black/white list flag for LUN 0
1310 * @scsi_level: Which version of the standard does this device adhere to
1311 * @rescan:     passed to scsi_probe_add_lun()
1312 *
1313 * Description:
1314 *     Generally, scan from LUN 1 (LUN 0 is assumed to already have been
1315 *     scanned) to some maximum lun until a LUN is found with no device
1316 *     attached. Use the bflags to figure out any oddities.
1317 *
1318 *     Modifies sdevscan->lun.
1319 **/
1320static void scsi_sequential_lun_scan(struct scsi_target *starget,
1321				     blist_flags_t bflags, int scsi_level,
1322				     enum scsi_scan_mode rescan)
1323{
1324	uint max_dev_lun;
1325	u64 sparse_lun, lun;
1326	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1327
1328	SCSI_LOG_SCAN_BUS(3, starget_printk(KERN_INFO, starget,
1329		"scsi scan: Sequential scan\n"));
1330
1331	max_dev_lun = min(max_scsi_luns, shost->max_lun);
1332	/*
1333	 * If this device is known to support sparse multiple units,
1334	 * override the other settings, and scan all of them. Normally,
1335	 * SCSI-3 devices should be scanned via the REPORT LUNS.
1336	 */
1337	if (bflags & BLIST_SPARSELUN) {
1338		max_dev_lun = shost->max_lun;
1339		sparse_lun = 1;
1340	} else
1341		sparse_lun = 0;
1342
1343	/*
1344	 * If less than SCSI_1_CCS, and no special lun scanning, stop
1345	 * scanning; this matches 2.4 behaviour, but could just be a bug
1346	 * (to continue scanning a SCSI_1_CCS device).
1347	 *
1348	 * This test is broken.  We might not have any device on lun0 for
1349	 * a sparselun device, and if that's the case then how would we
1350	 * know the real scsi_level, eh?  It might make sense to just not
1351	 * scan any SCSI_1 device for non-0 luns, but that check would best
1352	 * go into scsi_alloc_sdev() and just have it return null when asked
1353	 * to alloc an sdev for lun > 0 on an already found SCSI_1 device.
1354	 *
1355	if ((sdevscan->scsi_level < SCSI_1_CCS) &&
1356	    ((bflags & (BLIST_FORCELUN | BLIST_SPARSELUN | BLIST_MAX5LUN))
1357	     == 0))
1358		return;
1359	 */
1360	/*
1361	 * If this device is known to support multiple units, override
1362	 * the other settings, and scan all of them.
1363	 */
1364	if (bflags & BLIST_FORCELUN)
1365		max_dev_lun = shost->max_lun;
1366	/*
1367	 * REGAL CDC-4X: avoid hang after LUN 4
1368	 */
1369	if (bflags & BLIST_MAX5LUN)
1370		max_dev_lun = min(5U, max_dev_lun);
1371	/*
1372	 * Do not scan SCSI-2 or lower device past LUN 7, unless
1373	 * BLIST_LARGELUN.
1374	 */
1375	if (scsi_level < SCSI_3 && !(bflags & BLIST_LARGELUN))
1376		max_dev_lun = min(8U, max_dev_lun);
1377	else
1378		max_dev_lun = min(256U, max_dev_lun);
1379
1380	/*
1381	 * We have already scanned LUN 0, so start at LUN 1. Keep scanning
1382	 * until we reach the max, or no LUN is found and we are not
1383	 * sparse_lun.
1384	 */
1385	for (lun = 1; lun < max_dev_lun; ++lun)
1386		if ((scsi_probe_and_add_lun(starget, lun, NULL, NULL, rescan,
1387					    NULL) != SCSI_SCAN_LUN_PRESENT) &&
1388		    !sparse_lun)
1389			return;
1390}
1391
1392/**
1393 * scsi_report_lun_scan - Scan using SCSI REPORT LUN results
1394 * @starget: which target
1395 * @bflags: Zero or a mix of BLIST_NOLUN, BLIST_REPORTLUN2, or BLIST_NOREPORTLUN
1396 * @rescan: nonzero if we can skip code only needed on first scan
1397 *
1398 * Description:
1399 *   Fast scanning for modern (SCSI-3) devices by sending a REPORT LUN command.
1400 *   Scan the resulting list of LUNs by calling scsi_probe_and_add_lun.
1401 *
1402 *   If BLINK_REPORTLUN2 is set, scan a target that supports more than 8
1403 *   LUNs even if it's older than SCSI-3.
1404 *   If BLIST_NOREPORTLUN is set, return 1 always.
1405 *   If BLIST_NOLUN is set, return 0 always.
1406 *   If starget->no_report_luns is set, return 1 always.
1407 *
1408 * Return:
1409 *     0: scan completed (or no memory, so further scanning is futile)
1410 *     1: could not scan with REPORT LUN
1411 **/
1412static int scsi_report_lun_scan(struct scsi_target *starget, blist_flags_t bflags,
1413				enum scsi_scan_mode rescan)
1414{
1415	unsigned char scsi_cmd[MAX_COMMAND_SIZE];
1416	unsigned int length;
1417	u64 lun;
1418	unsigned int num_luns;
1419	int result;
1420	struct scsi_lun *lunp, *lun_data;
1421	struct scsi_device *sdev;
1422	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1423	struct scsi_failure failure_defs[] = {
1424		{
1425			.sense = UNIT_ATTENTION,
1426			.asc = SCMD_FAILURE_ASC_ANY,
1427			.ascq = SCMD_FAILURE_ASCQ_ANY,
1428			.result = SAM_STAT_CHECK_CONDITION,
1429		},
1430		/* Fail all CCs except the UA above */
1431		{
1432			.sense = SCMD_FAILURE_SENSE_ANY,
1433			.result = SAM_STAT_CHECK_CONDITION,
1434		},
1435		/* Retry any other errors not listed above */
1436		{
1437			.result = SCMD_FAILURE_RESULT_ANY,
1438		},
1439		{}
1440	};
1441	struct scsi_failures failures = {
1442		.total_allowed = 3,
1443		.failure_definitions = failure_defs,
1444	};
1445	const struct scsi_exec_args exec_args = {
1446		.failures = &failures,
1447	};
1448	int ret = 0;
1449
1450	/*
1451	 * Only support SCSI-3 and up devices if BLIST_NOREPORTLUN is not set.
1452	 * Also allow SCSI-2 if BLIST_REPORTLUN2 is set and host adapter does
1453	 * support more than 8 LUNs.
1454	 * Don't attempt if the target doesn't support REPORT LUNS.
1455	 */
1456	if (bflags & BLIST_NOREPORTLUN)
1457		return 1;
1458	if (starget->scsi_level < SCSI_2 &&
1459	    starget->scsi_level != SCSI_UNKNOWN)
1460		return 1;
1461	if (starget->scsi_level < SCSI_3 &&
1462	    (!(bflags & BLIST_REPORTLUN2) || shost->max_lun <= 8))
1463		return 1;
1464	if (bflags & BLIST_NOLUN)
1465		return 0;
1466	if (starget->no_report_luns)
1467		return 1;
1468
1469	if (!(sdev = scsi_device_lookup_by_target(starget, 0))) {
1470		sdev = scsi_alloc_sdev(starget, 0, NULL);
1471		if (!sdev)
1472			return 0;
1473		if (scsi_device_get(sdev)) {
1474			__scsi_remove_device(sdev);
1475			return 0;
1476		}
1477	}
1478
1479	/*
1480	 * Allocate enough to hold the header (the same size as one scsi_lun)
1481	 * plus the number of luns we are requesting.  511 was the default
1482	 * value of the now removed max_report_luns parameter.
1483	 */
1484	length = (511 + 1) * sizeof(struct scsi_lun);
1485retry:
1486	lun_data = kmalloc(length, GFP_KERNEL);
1487	if (!lun_data) {
1488		printk(ALLOC_FAILURE_MSG, __func__);
1489		goto out;
1490	}
1491
1492	scsi_cmd[0] = REPORT_LUNS;
1493
1494	/*
1495	 * bytes 1 - 5: reserved, set to zero.
1496	 */
1497	memset(&scsi_cmd[1], 0, 5);
1498
1499	/*
1500	 * bytes 6 - 9: length of the command.
1501	 */
1502	put_unaligned_be32(length, &scsi_cmd[6]);
1503
1504	scsi_cmd[10] = 0;	/* reserved */
1505	scsi_cmd[11] = 0;	/* control */
1506
1507	/*
1508	 * We can get a UNIT ATTENTION, for example a power on/reset, so
1509	 * retry a few times (like sd.c does for TEST UNIT READY).
1510	 * Experience shows some combinations of adapter/devices get at
1511	 * least two power on/resets.
1512	 *
1513	 * Illegal requests (for devices that do not support REPORT LUNS)
1514	 * should come through as a check condition, and will not generate
1515	 * a retry.
1516	 */
1517	scsi_failures_reset_retries(&failures);
1518
1519	SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
1520			  "scsi scan: Sending REPORT LUNS\n"));
1521
1522	result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN, lun_data,
1523				  length, SCSI_REPORT_LUNS_TIMEOUT, 3,
1524				  &exec_args);
1525
1526	SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
1527			  "scsi scan: REPORT LUNS  %s result 0x%x\n",
1528			  result ?  "failed" : "successful", result));
1529	if (result) {
1530		/*
1531		 * The device probably does not support a REPORT LUN command
1532		 */
1533		ret = 1;
1534		goto out_err;
1535	}
1536
1537	/*
1538	 * Get the length from the first four bytes of lun_data.
1539	 */
1540	if (get_unaligned_be32(lun_data->scsi_lun) +
1541	    sizeof(struct scsi_lun) > length) {
1542		length = get_unaligned_be32(lun_data->scsi_lun) +
1543			 sizeof(struct scsi_lun);
1544		kfree(lun_data);
1545		goto retry;
1546	}
1547	length = get_unaligned_be32(lun_data->scsi_lun);
1548
1549	num_luns = (length / sizeof(struct scsi_lun));
1550
1551	SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
1552		"scsi scan: REPORT LUN scan\n"));
1553
1554	/*
1555	 * Scan the luns in lun_data. The entry at offset 0 is really
1556	 * the header, so start at 1 and go up to and including num_luns.
1557	 */
1558	for (lunp = &lun_data[1]; lunp <= &lun_data[num_luns]; lunp++) {
1559		lun = scsilun_to_int(lunp);
1560
1561		if (lun > sdev->host->max_lun) {
1562			sdev_printk(KERN_WARNING, sdev,
1563				    "lun%llu has a LUN larger than"
1564				    " allowed by the host adapter\n", lun);
1565		} else {
1566			int res;
1567
1568			res = scsi_probe_and_add_lun(starget,
1569				lun, NULL, NULL, rescan, NULL);
1570			if (res == SCSI_SCAN_NO_RESPONSE) {
1571				/*
1572				 * Got some results, but now none, abort.
1573				 */
1574				sdev_printk(KERN_ERR, sdev,
1575					"Unexpected response"
1576					" from lun %llu while scanning, scan"
1577					" aborted\n", (unsigned long long)lun);
1578				break;
1579			}
1580		}
1581	}
1582
1583 out_err:
1584	kfree(lun_data);
1585 out:
1586	if (scsi_device_created(sdev))
1587		/*
1588		 * the sdev we used didn't appear in the report luns scan
1589		 */
1590		__scsi_remove_device(sdev);
1591	scsi_device_put(sdev);
1592	return ret;
1593}
1594
1595struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,
1596				      uint id, u64 lun, void *hostdata)
1597{
1598	struct scsi_device *sdev = ERR_PTR(-ENODEV);
1599	struct device *parent = &shost->shost_gendev;
1600	struct scsi_target *starget;
1601
1602	if (strncmp(scsi_scan_type, "none", 4) == 0)
1603		return ERR_PTR(-ENODEV);
1604
1605	starget = scsi_alloc_target(parent, channel, id);
1606	if (!starget)
1607		return ERR_PTR(-ENOMEM);
1608	scsi_autopm_get_target(starget);
1609
1610	mutex_lock(&shost->scan_mutex);
1611	if (!shost->async_scan)
1612		scsi_complete_async_scans();
1613
1614	if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
1615		scsi_probe_and_add_lun(starget, lun, NULL, &sdev,
1616				       SCSI_SCAN_RESCAN, hostdata);
1617		scsi_autopm_put_host(shost);
1618	}
1619	mutex_unlock(&shost->scan_mutex);
1620	scsi_autopm_put_target(starget);
1621	/*
1622	 * paired with scsi_alloc_target().  Target will be destroyed unless
1623	 * scsi_probe_and_add_lun made an underlying device visible
1624	 */
1625	scsi_target_reap(starget);
1626	put_device(&starget->dev);
1627
1628	return sdev;
1629}
1630EXPORT_SYMBOL(__scsi_add_device);
1631
1632int scsi_add_device(struct Scsi_Host *host, uint channel,
1633		    uint target, u64 lun)
1634{
1635	struct scsi_device *sdev =
1636		__scsi_add_device(host, channel, target, lun, NULL);
1637	if (IS_ERR(sdev))
1638		return PTR_ERR(sdev);
1639
1640	scsi_device_put(sdev);
1641	return 0;
1642}
1643EXPORT_SYMBOL(scsi_add_device);
1644
1645int scsi_resume_device(struct scsi_device *sdev)
1646{
1647	struct device *dev = &sdev->sdev_gendev;
1648	int ret = 0;
1649
1650	device_lock(dev);
1651
1652	/*
1653	 * Bail out if the device or its queue are not running. Otherwise,
1654	 * the rescan may block waiting for commands to be executed, with us
1655	 * holding the device lock. This can result in a potential deadlock
1656	 * in the power management core code when system resume is on-going.
1657	 */
1658	if (sdev->sdev_state != SDEV_RUNNING ||
1659	    blk_queue_pm_only(sdev->request_queue)) {
1660		ret = -EWOULDBLOCK;
1661		goto unlock;
1662	}
1663
1664	if (dev->driver && try_module_get(dev->driver->owner)) {
1665		struct scsi_driver *drv = to_scsi_driver(dev->driver);
1666
1667		if (drv->resume)
1668			ret = drv->resume(dev);
1669		module_put(dev->driver->owner);
1670	}
1671
1672unlock:
1673	device_unlock(dev);
1674
1675	return ret;
1676}
1677EXPORT_SYMBOL(scsi_resume_device);
1678
1679int scsi_rescan_device(struct scsi_device *sdev)
1680{
1681	struct device *dev = &sdev->sdev_gendev;
1682	int ret = 0;
1683
1684	device_lock(dev);
1685
1686	/*
1687	 * Bail out if the device or its queue are not running. Otherwise,
1688	 * the rescan may block waiting for commands to be executed, with us
1689	 * holding the device lock. This can result in a potential deadlock
1690	 * in the power management core code when system resume is on-going.
1691	 */
1692	if (sdev->sdev_state != SDEV_RUNNING ||
1693	    blk_queue_pm_only(sdev->request_queue)) {
1694		ret = -EWOULDBLOCK;
1695		goto unlock;
1696	}
1697
1698	scsi_attach_vpd(sdev);
1699	scsi_cdl_check(sdev);
1700
1701	if (sdev->handler && sdev->handler->rescan)
1702		sdev->handler->rescan(sdev);
1703
1704	if (dev->driver && try_module_get(dev->driver->owner)) {
1705		struct scsi_driver *drv = to_scsi_driver(dev->driver);
1706
1707		if (drv->rescan)
1708			drv->rescan(dev);
1709		module_put(dev->driver->owner);
1710	}
1711
1712unlock:
1713	device_unlock(dev);
1714
1715	return ret;
1716}
1717EXPORT_SYMBOL(scsi_rescan_device);
1718
1719static void __scsi_scan_target(struct device *parent, unsigned int channel,
1720		unsigned int id, u64 lun, enum scsi_scan_mode rescan)
1721{
1722	struct Scsi_Host *shost = dev_to_shost(parent);
1723	blist_flags_t bflags = 0;
1724	int res;
1725	struct scsi_target *starget;
1726
1727	if (shost->this_id == id)
1728		/*
1729		 * Don't scan the host adapter
1730		 */
1731		return;
1732
1733	starget = scsi_alloc_target(parent, channel, id);
1734	if (!starget)
1735		return;
1736	scsi_autopm_get_target(starget);
1737
1738	if (lun != SCAN_WILD_CARD) {
1739		/*
1740		 * Scan for a specific host/chan/id/lun.
1741		 */
1742		scsi_probe_and_add_lun(starget, lun, NULL, NULL, rescan, NULL);
1743		goto out_reap;
1744	}
1745
1746	/*
1747	 * Scan LUN 0, if there is some response, scan further. Ideally, we
1748	 * would not configure LUN 0 until all LUNs are scanned.
1749	 */
1750	res = scsi_probe_and_add_lun(starget, 0, &bflags, NULL, rescan, NULL);
1751	if (res == SCSI_SCAN_LUN_PRESENT || res == SCSI_SCAN_TARGET_PRESENT) {
1752		if (scsi_report_lun_scan(starget, bflags, rescan) != 0)
1753			/*
1754			 * The REPORT LUN did not scan the target,
1755			 * do a sequential scan.
1756			 */
1757			scsi_sequential_lun_scan(starget, bflags,
1758						 starget->scsi_level, rescan);
1759	}
1760
1761 out_reap:
1762	scsi_autopm_put_target(starget);
1763	/*
1764	 * paired with scsi_alloc_target(): determine if the target has
1765	 * any children at all and if not, nuke it
1766	 */
1767	scsi_target_reap(starget);
1768
1769	put_device(&starget->dev);
1770}
1771
1772/**
1773 * scsi_scan_target - scan a target id, possibly including all LUNs on the target.
1774 * @parent:	host to scan
1775 * @channel:	channel to scan
1776 * @id:		target id to scan
1777 * @lun:	Specific LUN to scan or SCAN_WILD_CARD
1778 * @rescan:	passed to LUN scanning routines; SCSI_SCAN_INITIAL for
1779 *              no rescan, SCSI_SCAN_RESCAN to rescan existing LUNs,
1780 *              and SCSI_SCAN_MANUAL to force scanning even if
1781 *              'scan=manual' is set.
1782 *
1783 * Description:
1784 *     Scan the target id on @parent, @channel, and @id. Scan at least LUN 0,
1785 *     and possibly all LUNs on the target id.
1786 *
1787 *     First try a REPORT LUN scan, if that does not scan the target, do a
1788 *     sequential scan of LUNs on the target id.
1789 **/
1790void scsi_scan_target(struct device *parent, unsigned int channel,
1791		      unsigned int id, u64 lun, enum scsi_scan_mode rescan)
1792{
1793	struct Scsi_Host *shost = dev_to_shost(parent);
1794
1795	if (strncmp(scsi_scan_type, "none", 4) == 0)
1796		return;
1797
1798	if (rescan != SCSI_SCAN_MANUAL &&
1799	    strncmp(scsi_scan_type, "manual", 6) == 0)
1800		return;
1801
1802	mutex_lock(&shost->scan_mutex);
1803	if (!shost->async_scan)
1804		scsi_complete_async_scans();
1805
1806	if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
1807		__scsi_scan_target(parent, channel, id, lun, rescan);
1808		scsi_autopm_put_host(shost);
1809	}
1810	mutex_unlock(&shost->scan_mutex);
1811}
1812EXPORT_SYMBOL(scsi_scan_target);
1813
1814static void scsi_scan_channel(struct Scsi_Host *shost, unsigned int channel,
1815			      unsigned int id, u64 lun,
1816			      enum scsi_scan_mode rescan)
1817{
1818	uint order_id;
1819
1820	if (id == SCAN_WILD_CARD)
1821		for (id = 0; id < shost->max_id; ++id) {
1822			/*
1823			 * XXX adapter drivers when possible (FCP, iSCSI)
1824			 * could modify max_id to match the current max,
1825			 * not the absolute max.
1826			 *
1827			 * XXX add a shost id iterator, so for example,
1828			 * the FC ID can be the same as a target id
1829			 * without a huge overhead of sparse id's.
1830			 */
1831			if (shost->reverse_ordering)
1832				/*
1833				 * Scan from high to low id.
1834				 */
1835				order_id = shost->max_id - id - 1;
1836			else
1837				order_id = id;
1838			__scsi_scan_target(&shost->shost_gendev, channel,
1839					order_id, lun, rescan);
1840		}
1841	else
1842		__scsi_scan_target(&shost->shost_gendev, channel,
1843				id, lun, rescan);
1844}
1845
1846int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel,
1847			    unsigned int id, u64 lun,
1848			    enum scsi_scan_mode rescan)
1849{
1850	SCSI_LOG_SCAN_BUS(3, shost_printk (KERN_INFO, shost,
1851		"%s: <%u:%u:%llu>\n",
1852		__func__, channel, id, lun));
1853
1854	if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) ||
1855	    ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) ||
1856	    ((lun != SCAN_WILD_CARD) && (lun >= shost->max_lun)))
1857		return -EINVAL;
1858
1859	mutex_lock(&shost->scan_mutex);
1860	if (!shost->async_scan)
1861		scsi_complete_async_scans();
1862
1863	if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
1864		if (channel == SCAN_WILD_CARD)
1865			for (channel = 0; channel <= shost->max_channel;
1866			     channel++)
1867				scsi_scan_channel(shost, channel, id, lun,
1868						  rescan);
1869		else
1870			scsi_scan_channel(shost, channel, id, lun, rescan);
1871		scsi_autopm_put_host(shost);
1872	}
1873	mutex_unlock(&shost->scan_mutex);
1874
1875	return 0;
1876}
1877
1878static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
1879{
1880	struct scsi_device *sdev;
1881	shost_for_each_device(sdev, shost) {
1882		/* target removed before the device could be added */
1883		if (sdev->sdev_state == SDEV_DEL)
1884			continue;
1885		/* If device is already visible, skip adding it to sysfs */
1886		if (sdev->is_visible)
1887			continue;
1888		if (!scsi_host_scan_allowed(shost) ||
1889		    scsi_sysfs_add_sdev(sdev) != 0)
1890			__scsi_remove_device(sdev);
1891	}
1892}
1893
1894/**
1895 * scsi_prep_async_scan - prepare for an async scan
1896 * @shost: the host which will be scanned
1897 * Returns: a cookie to be passed to scsi_finish_async_scan()
1898 *
1899 * Tells the midlayer this host is going to do an asynchronous scan.
1900 * It reserves the host's position in the scanning list and ensures
1901 * that other asynchronous scans started after this one won't affect the
1902 * ordering of the discovered devices.
1903 */
1904static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
1905{
1906	struct async_scan_data *data = NULL;
1907	unsigned long flags;
1908
1909	if (strncmp(scsi_scan_type, "sync", 4) == 0)
1910		return NULL;
1911
1912	mutex_lock(&shost->scan_mutex);
1913	if (shost->async_scan) {
1914		shost_printk(KERN_DEBUG, shost, "%s called twice\n", __func__);
1915		goto err;
1916	}
1917
1918	data = kmalloc(sizeof(*data), GFP_KERNEL);
1919	if (!data)
1920		goto err;
1921	data->shost = scsi_host_get(shost);
1922	if (!data->shost)
1923		goto err;
1924	init_completion(&data->prev_finished);
1925
1926	spin_lock_irqsave(shost->host_lock, flags);
1927	shost->async_scan = 1;
1928	spin_unlock_irqrestore(shost->host_lock, flags);
1929	mutex_unlock(&shost->scan_mutex);
1930
1931	spin_lock(&async_scan_lock);
1932	if (list_empty(&scanning_hosts))
1933		complete(&data->prev_finished);
1934	list_add_tail(&data->list, &scanning_hosts);
1935	spin_unlock(&async_scan_lock);
1936
1937	return data;
1938
1939 err:
1940	mutex_unlock(&shost->scan_mutex);
1941	kfree(data);
1942	return NULL;
1943}
1944
1945/**
1946 * scsi_finish_async_scan - asynchronous scan has finished
1947 * @data: cookie returned from earlier call to scsi_prep_async_scan()
1948 *
1949 * All the devices currently attached to this host have been found.
1950 * This function announces all the devices it has found to the rest
1951 * of the system.
1952 */
1953static void scsi_finish_async_scan(struct async_scan_data *data)
1954{
1955	struct Scsi_Host *shost;
1956	unsigned long flags;
1957
1958	if (!data)
1959		return;
1960
1961	shost = data->shost;
1962
1963	mutex_lock(&shost->scan_mutex);
1964
1965	if (!shost->async_scan) {
1966		shost_printk(KERN_INFO, shost, "%s called twice\n", __func__);
1967		dump_stack();
1968		mutex_unlock(&shost->scan_mutex);
1969		return;
1970	}
1971
1972	wait_for_completion(&data->prev_finished);
1973
1974	scsi_sysfs_add_devices(shost);
1975
1976	spin_lock_irqsave(shost->host_lock, flags);
1977	shost->async_scan = 0;
1978	spin_unlock_irqrestore(shost->host_lock, flags);
1979
1980	mutex_unlock(&shost->scan_mutex);
1981
1982	spin_lock(&async_scan_lock);
1983	list_del(&data->list);
1984	if (!list_empty(&scanning_hosts)) {
1985		struct async_scan_data *next = list_entry(scanning_hosts.next,
1986				struct async_scan_data, list);
1987		complete(&next->prev_finished);
1988	}
1989	spin_unlock(&async_scan_lock);
1990
1991	scsi_autopm_put_host(shost);
1992	scsi_host_put(shost);
1993	kfree(data);
1994}
1995
1996static void do_scsi_scan_host(struct Scsi_Host *shost)
1997{
1998	if (shost->hostt->scan_finished) {
1999		unsigned long start = jiffies;
2000		if (shost->hostt->scan_start)
2001			shost->hostt->scan_start(shost);
2002
2003		while (!shost->hostt->scan_finished(shost, jiffies - start))
2004			msleep(10);
2005	} else {
2006		scsi_scan_host_selected(shost, SCAN_WILD_CARD, SCAN_WILD_CARD,
2007				SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
2008	}
2009}
2010
2011static void do_scan_async(void *_data, async_cookie_t c)
2012{
2013	struct async_scan_data *data = _data;
2014	struct Scsi_Host *shost = data->shost;
2015
2016	do_scsi_scan_host(shost);
2017	scsi_finish_async_scan(data);
2018}
2019
2020/**
2021 * scsi_scan_host - scan the given adapter
2022 * @shost:	adapter to scan
2023 **/
2024void scsi_scan_host(struct Scsi_Host *shost)
2025{
2026	struct async_scan_data *data;
2027
2028	if (strncmp(scsi_scan_type, "none", 4) == 0 ||
2029	    strncmp(scsi_scan_type, "manual", 6) == 0)
2030		return;
2031	if (scsi_autopm_get_host(shost) < 0)
2032		return;
2033
2034	data = scsi_prep_async_scan(shost);
2035	if (!data) {
2036		do_scsi_scan_host(shost);
2037		scsi_autopm_put_host(shost);
2038		return;
2039	}
2040
2041	/* register with the async subsystem so wait_for_device_probe()
2042	 * will flush this work
2043	 */
2044	async_schedule(do_scan_async, data);
2045
2046	/* scsi_autopm_put_host(shost) is called in scsi_finish_async_scan() */
2047}
2048EXPORT_SYMBOL(scsi_scan_host);
2049
2050void scsi_forget_host(struct Scsi_Host *shost)
2051{
2052	struct scsi_device *sdev;
2053	unsigned long flags;
2054
2055 restart:
2056	spin_lock_irqsave(shost->host_lock, flags);
2057	list_for_each_entry(sdev, &shost->__devices, siblings) {
2058		if (sdev->sdev_state == SDEV_DEL)
2059			continue;
2060		spin_unlock_irqrestore(shost->host_lock, flags);
2061		__scsi_remove_device(sdev);
2062		goto restart;
2063	}
2064	spin_unlock_irqrestore(shost->host_lock, flags);
2065}
2066
2067