cam_xpt.c revision 168882
1/*-
2 * Implementation of the Common Access Method Transport (XPT) layer.
3 *
4 * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
5 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions, and the following disclaimer,
13 *    without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 *    derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: head/sys/cam/cam_xpt.c 168882 2007-04-19 23:34:51Z scottl $");
32
33#include <sys/param.h>
34#include <sys/bus.h>
35#include <sys/systm.h>
36#include <sys/types.h>
37#include <sys/malloc.h>
38#include <sys/kernel.h>
39#include <sys/time.h>
40#include <sys/conf.h>
41#include <sys/fcntl.h>
42#include <sys/md5.h>
43#include <sys/interrupt.h>
44#include <sys/sbuf.h>
45#include <sys/taskqueue.h>
46
47#include <sys/lock.h>
48#include <sys/mutex.h>
49#include <sys/sysctl.h>
50#include <sys/kthread.h>
51
52#ifdef PC98
53#include <pc98/pc98/pc98_machdep.h>	/* geometry translation */
54#endif
55
56#include <cam/cam.h>
57#include <cam/cam_ccb.h>
58#include <cam/cam_periph.h>
59#include <cam/cam_sim.h>
60#include <cam/cam_xpt.h>
61#include <cam/cam_xpt_sim.h>
62#include <cam/cam_xpt_periph.h>
63#include <cam/cam_debug.h>
64
65#include <cam/scsi/scsi_all.h>
66#include <cam/scsi/scsi_message.h>
67#include <cam/scsi/scsi_pass.h>
68#include <machine/stdarg.h>	/* for xpt_print below */
69#include "opt_cam.h"
70
71/* Datastructures internal to the xpt layer */
72MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers");
73
74/* Object for defering XPT actions to a taskqueue */
75struct xpt_task {
76	struct task	task;
77	void		*data1;
78	uintptr_t	data2;
79};
80
81/*
82 * Definition of an async handler callback block.  These are used to add
83 * SIMs and peripherals to the async callback lists.
84 */
85struct async_node {
86	SLIST_ENTRY(async_node)	links;
87	u_int32_t	event_enable;	/* Async Event enables */
88	void		(*callback)(void *arg, u_int32_t code,
89				    struct cam_path *path, void *args);
90	void		*callback_arg;
91};
92
93SLIST_HEAD(async_list, async_node);
94SLIST_HEAD(periph_list, cam_periph);
95
96/*
97 * This is the maximum number of high powered commands (e.g. start unit)
98 * that can be outstanding at a particular time.
99 */
100#ifndef CAM_MAX_HIGHPOWER
101#define CAM_MAX_HIGHPOWER  4
102#endif
103
104/*
105 * Structure for queueing a device in a run queue.
106 * There is one run queue for allocating new ccbs,
107 * and another for sending ccbs to the controller.
108 */
109struct cam_ed_qinfo {
110	cam_pinfo pinfo;
111	struct	  cam_ed *device;
112};
113
114/*
115 * The CAM EDT (Existing Device Table) contains the device information for
116 * all devices for all busses in the system.  The table contains a
117 * cam_ed structure for each device on the bus.
118 */
119struct cam_ed {
120	TAILQ_ENTRY(cam_ed) links;
121	struct	cam_ed_qinfo alloc_ccb_entry;
122	struct	cam_ed_qinfo send_ccb_entry;
123	struct	cam_et	 *target;
124	struct	cam_sim  *sim;
125	lun_id_t	 lun_id;
126	struct	camq drvq;		/*
127					 * Queue of type drivers wanting to do
128					 * work on this device.
129					 */
130	struct	cam_ccbq ccbq;		/* Queue of pending ccbs */
131	struct	async_list asyncs;	/* Async callback info for this B/T/L */
132	struct	periph_list periphs;	/* All attached devices */
133	u_int	generation;		/* Generation number */
134	struct	cam_periph *owner;	/* Peripheral driver's ownership tag */
135	struct	xpt_quirk_entry *quirk;	/* Oddities about this device */
136					/* Storage for the inquiry data */
137	cam_proto	 protocol;
138	u_int		 protocol_version;
139	cam_xport	 transport;
140	u_int		 transport_version;
141	struct		 scsi_inquiry_data inq_data;
142	u_int8_t	 inq_flags;	/*
143					 * Current settings for inquiry flags.
144					 * This allows us to override settings
145					 * like disconnection and tagged
146					 * queuing for a device.
147					 */
148	u_int8_t	 queue_flags;	/* Queue flags from the control page */
149	u_int8_t	 serial_num_len;
150	u_int8_t	*serial_num;
151	u_int32_t	 qfrozen_cnt;
152	u_int32_t	 flags;
153#define CAM_DEV_UNCONFIGURED	 	0x01
154#define CAM_DEV_REL_TIMEOUT_PENDING	0x02
155#define CAM_DEV_REL_ON_COMPLETE		0x04
156#define CAM_DEV_REL_ON_QUEUE_EMPTY	0x08
157#define CAM_DEV_RESIZE_QUEUE_NEEDED	0x10
158#define CAM_DEV_TAG_AFTER_COUNT		0x20
159#define CAM_DEV_INQUIRY_DATA_VALID	0x40
160#define	CAM_DEV_IN_DV			0x80
161#define	CAM_DEV_DV_HIT_BOTTOM		0x100
162	u_int32_t	 tag_delay_count;
163#define	CAM_TAG_DELAY_COUNT		5
164	u_int32_t	 tag_saved_openings;
165	u_int32_t	 refcount;
166	struct callout	 callout;
167};
168
169/*
170 * Each target is represented by an ET (Existing Target).  These
171 * entries are created when a target is successfully probed with an
172 * identify, and removed when a device fails to respond after a number
173 * of retries, or a bus rescan finds the device missing.
174 */
175struct cam_et {
176	TAILQ_HEAD(, cam_ed) ed_entries;
177	TAILQ_ENTRY(cam_et) links;
178	struct	cam_eb	*bus;
179	target_id_t	target_id;
180	u_int32_t	refcount;
181	u_int		generation;
182	struct		timeval last_reset;
183};
184
185/*
186 * Each bus is represented by an EB (Existing Bus).  These entries
187 * are created by calls to xpt_bus_register and deleted by calls to
188 * xpt_bus_deregister.
189 */
190struct cam_eb {
191	TAILQ_HEAD(, cam_et) et_entries;
192	TAILQ_ENTRY(cam_eb)  links;
193	path_id_t	     path_id;
194	struct cam_sim	     *sim;
195	struct timeval	     last_reset;
196	u_int32_t	     flags;
197#define	CAM_EB_RUNQ_SCHEDULED	0x01
198	u_int32_t	     refcount;
199	u_int		     generation;
200};
201
202struct cam_path {
203	struct cam_periph *periph;
204	struct cam_eb	  *bus;
205	struct cam_et	  *target;
206	struct cam_ed	  *device;
207};
208
209struct xpt_quirk_entry {
210	struct scsi_inquiry_pattern inq_pat;
211	u_int8_t quirks;
212#define	CAM_QUIRK_NOLUNS	0x01
213#define	CAM_QUIRK_NOSERIAL	0x02
214#define	CAM_QUIRK_HILUNS	0x04
215#define	CAM_QUIRK_NOHILUNS	0x08
216	u_int mintags;
217	u_int maxtags;
218};
219
220static int cam_srch_hi = 0;
221TUNABLE_INT("kern.cam.cam_srch_hi", &cam_srch_hi);
222static int sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS);
223SYSCTL_PROC(_kern_cam, OID_AUTO, cam_srch_hi, CTLTYPE_INT|CTLFLAG_RW, 0, 0,
224    sysctl_cam_search_luns, "I",
225    "allow search above LUN 7 for SCSI3 and greater devices");
226
227#define	CAM_SCSI2_MAXLUN	8
228/*
229 * If we're not quirked to search <= the first 8 luns
230 * and we are either quirked to search above lun 8,
231 * or we're > SCSI-2 and we've enabled hilun searching,
232 * or we're > SCSI-2 and the last lun was a success,
233 * we can look for luns above lun 8.
234 */
235#define	CAN_SRCH_HI_SPARSE(dv)				\
236  (((dv->quirk->quirks & CAM_QUIRK_NOHILUNS) == 0) 	\
237  && ((dv->quirk->quirks & CAM_QUIRK_HILUNS)		\
238  || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2 && cam_srch_hi)))
239
240#define	CAN_SRCH_HI_DENSE(dv)				\
241  (((dv->quirk->quirks & CAM_QUIRK_NOHILUNS) == 0) 	\
242  && ((dv->quirk->quirks & CAM_QUIRK_HILUNS)		\
243  || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2)))
244
245typedef enum {
246	XPT_FLAG_OPEN		= 0x01
247} xpt_flags;
248
249struct xpt_softc {
250	xpt_flags		flags;
251	u_int32_t		xpt_generation;
252
253	/* number of high powered commands that can go through right now */
254	STAILQ_HEAD(highpowerlist, ccb_hdr)	highpowerq;
255	int			num_highpower;
256
257	/* queue for handling async rescan requests. */
258	TAILQ_HEAD(, ccb_hdr) ccb_scanq;
259
260	/* Registered busses */
261	TAILQ_HEAD(,cam_eb)	xpt_busses;
262	u_int			bus_generation;
263
264	struct intr_config_hook	*xpt_config_hook;
265
266	struct mtx		xpt_topo_lock;
267	struct mtx		xpt_lock;
268};
269
270static const char quantum[] = "QUANTUM";
271static const char sony[] = "SONY";
272static const char west_digital[] = "WDIGTL";
273static const char samsung[] = "SAMSUNG";
274static const char seagate[] = "SEAGATE";
275static const char microp[] = "MICROP";
276
277static struct xpt_quirk_entry xpt_quirk_table[] =
278{
279	{
280		/* Reports QUEUE FULL for temporary resource shortages */
281		{ T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" },
282		/*quirks*/0, /*mintags*/24, /*maxtags*/32
283	},
284	{
285		/* Reports QUEUE FULL for temporary resource shortages */
286		{ T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" },
287		/*quirks*/0, /*mintags*/24, /*maxtags*/32
288	},
289	{
290		/* Reports QUEUE FULL for temporary resource shortages */
291		{ T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" },
292		/*quirks*/0, /*mintags*/24, /*maxtags*/32
293	},
294	{
295		/* Broken tagged queuing drive */
296		{ T_DIRECT, SIP_MEDIA_FIXED, microp, "4421-07*", "*" },
297		/*quirks*/0, /*mintags*/0, /*maxtags*/0
298	},
299	{
300		/* Broken tagged queuing drive */
301		{ T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" },
302		/*quirks*/0, /*mintags*/0, /*maxtags*/0
303	},
304	{
305		/* Broken tagged queuing drive */
306		{ T_DIRECT, SIP_MEDIA_FIXED, microp, "3391*", "x43h" },
307		/*quirks*/0, /*mintags*/0, /*maxtags*/0
308	},
309	{
310		/*
311		 * Unfortunately, the Quantum Atlas III has the same
312		 * problem as the Atlas II drives above.
313		 * Reported by: "Johan Granlund" <johan@granlund.nu>
314		 *
315		 * For future reference, the drive with the problem was:
316		 * QUANTUM QM39100TD-SW N1B0
317		 *
318		 * It's possible that Quantum will fix the problem in later
319		 * firmware revisions.  If that happens, the quirk entry
320		 * will need to be made specific to the firmware revisions
321		 * with the problem.
322		 *
323		 */
324		/* Reports QUEUE FULL for temporary resource shortages */
325		{ T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" },
326		/*quirks*/0, /*mintags*/24, /*maxtags*/32
327	},
328	{
329		/*
330		 * 18 Gig Atlas III, same problem as the 9G version.
331		 * Reported by: Andre Albsmeier
332		 *		<andre.albsmeier@mchp.siemens.de>
333		 *
334		 * For future reference, the drive with the problem was:
335		 * QUANTUM QM318000TD-S N491
336		 */
337		/* Reports QUEUE FULL for temporary resource shortages */
338		{ T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" },
339		/*quirks*/0, /*mintags*/24, /*maxtags*/32
340	},
341	{
342		/*
343		 * Broken tagged queuing drive
344		 * Reported by: Bret Ford <bford@uop.cs.uop.edu>
345		 *         and: Martin Renters <martin@tdc.on.ca>
346		 */
347		{ T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" },
348		/*quirks*/0, /*mintags*/0, /*maxtags*/0
349	},
350		/*
351		 * The Seagate Medalist Pro drives have very poor write
352		 * performance with anything more than 2 tags.
353		 *
354		 * Reported by:  Paul van der Zwan <paulz@trantor.xs4all.nl>
355		 * Drive:  <SEAGATE ST36530N 1444>
356		 *
357		 * Reported by:  Jeremy Lea <reg@shale.csir.co.za>
358		 * Drive:  <SEAGATE ST34520W 1281>
359		 *
360		 * No one has actually reported that the 9G version
361		 * (ST39140*) of the Medalist Pro has the same problem, but
362		 * we're assuming that it does because the 4G and 6.5G
363		 * versions of the drive are broken.
364		 */
365	{
366		{ T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"},
367		/*quirks*/0, /*mintags*/2, /*maxtags*/2
368	},
369	{
370		{ T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"},
371		/*quirks*/0, /*mintags*/2, /*maxtags*/2
372	},
373	{
374		{ T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"},
375		/*quirks*/0, /*mintags*/2, /*maxtags*/2
376	},
377	{
378		/*
379		 * Slow when tagged queueing is enabled.  Write performance
380		 * steadily drops off with more and more concurrent
381		 * transactions.  Best sequential write performance with
382		 * tagged queueing turned off and write caching turned on.
383		 *
384		 * PR:  kern/10398
385		 * Submitted by:  Hideaki Okada <hokada@isl.melco.co.jp>
386		 * Drive:  DCAS-34330 w/ "S65A" firmware.
387		 *
388		 * The drive with the problem had the "S65A" firmware
389		 * revision, and has also been reported (by Stephen J.
390		 * Roznowski <sjr@home.net>) for a drive with the "S61A"
391		 * firmware revision.
392		 *
393		 * Although no one has reported problems with the 2 gig
394		 * version of the DCAS drive, the assumption is that it
395		 * has the same problems as the 4 gig version.  Therefore
396		 * this quirk entries disables tagged queueing for all
397		 * DCAS drives.
398		 */
399		{ T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" },
400		/*quirks*/0, /*mintags*/0, /*maxtags*/0
401	},
402	{
403		/* Broken tagged queuing drive */
404		{ T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" },
405		/*quirks*/0, /*mintags*/0, /*maxtags*/0
406	},
407	{
408		/* Broken tagged queuing drive */
409		{ T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" },
410		/*quirks*/0, /*mintags*/0, /*maxtags*/0
411	},
412	{
413		/* This does not support other than LUN 0 */
414		{ T_DIRECT, SIP_MEDIA_FIXED, "VMware*", "*", "*" },
415		CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
416	},
417	{
418		/*
419		 * Broken tagged queuing drive.
420		 * Submitted by:
421		 * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp>
422		 * in PR kern/9535
423		 */
424		{ T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" },
425		/*quirks*/0, /*mintags*/0, /*maxtags*/0
426	},
427        {
428		/*
429		 * Slow when tagged queueing is enabled. (1.5MB/sec versus
430		 * 8MB/sec.)
431		 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
432		 * Best performance with these drives is achieved with
433		 * tagged queueing turned off, and write caching turned on.
434		 */
435		{ T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" },
436		/*quirks*/0, /*mintags*/0, /*maxtags*/0
437        },
438        {
439		/*
440		 * Slow when tagged queueing is enabled. (1.5MB/sec versus
441		 * 8MB/sec.)
442		 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
443		 * Best performance with these drives is achieved with
444		 * tagged queueing turned off, and write caching turned on.
445		 */
446		{ T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" },
447		/*quirks*/0, /*mintags*/0, /*maxtags*/0
448        },
449	{
450		/*
451		 * Doesn't handle queue full condition correctly,
452		 * so we need to limit maxtags to what the device
453		 * can handle instead of determining this automatically.
454		 */
455		{ T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" },
456		/*quirks*/0, /*mintags*/2, /*maxtags*/32
457	},
458	{
459		/* Really only one LUN */
460		{ T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA", "*" },
461		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
462	},
463	{
464		/* I can't believe we need a quirk for DPT volumes. */
465		{ T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, "DPT", "*", "*" },
466		CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS,
467		/*mintags*/0, /*maxtags*/255
468	},
469	{
470		/*
471		 * Many Sony CDROM drives don't like multi-LUN probing.
472		 */
473		{ T_CDROM, SIP_MEDIA_REMOVABLE, sony, "CD-ROM CDU*", "*" },
474		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
475	},
476	{
477		/*
478		 * This drive doesn't like multiple LUN probing.
479		 * Submitted by:  Parag Patel <parag@cgt.com>
480		 */
481		{ T_WORM, SIP_MEDIA_REMOVABLE, sony, "CD-R   CDU9*", "*" },
482		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
483	},
484	{
485		{ T_WORM, SIP_MEDIA_REMOVABLE, "YAMAHA", "CDR100*", "*" },
486		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
487	},
488	{
489		/*
490		 * The 8200 doesn't like multi-lun probing, and probably
491		 * don't like serial number requests either.
492		 */
493		{
494			T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
495			"EXB-8200*", "*"
496		},
497		CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
498	},
499	{
500		/*
501		 * Let's try the same as above, but for a drive that says
502		 * it's an IPL-6860 but is actually an EXB 8200.
503		 */
504		{
505			T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
506			"IPL-6860*", "*"
507		},
508		CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
509	},
510	{
511		/*
512		 * These Hitachi drives don't like multi-lun probing.
513		 * The PR submitter has a DK319H, but says that the Linux
514		 * kernel has a similar work-around for the DK312 and DK314,
515		 * so all DK31* drives are quirked here.
516		 * PR:            misc/18793
517		 * Submitted by:  Paul Haddad <paul@pth.com>
518		 */
519		{ T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK31*", "*" },
520		CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
521	},
522	{
523		/*
524		 * The Hitachi CJ series with J8A8 firmware apparantly has
525		 * problems with tagged commands.
526		 * PR: 23536
527		 * Reported by: amagai@nue.org
528		 */
529		{ T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK32CJ*", "J8A8" },
530		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
531	},
532	{
533		/*
534		 * These are the large storage arrays.
535		 * Submitted by:  William Carrel <william.carrel@infospace.com>
536		 */
537		{ T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "OPEN*", "*" },
538		CAM_QUIRK_HILUNS, 2, 1024
539	},
540	{
541		/*
542		 * This old revision of the TDC3600 is also SCSI-1, and
543		 * hangs upon serial number probing.
544		 */
545		{
546			T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG",
547			" TDC 3600", "U07:"
548		},
549		CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0
550	},
551	{
552		/*
553		 * Maxtor Personal Storage 3000XT (Firewire)
554		 * hangs upon serial number probing.
555		 */
556		{
557			T_DIRECT, SIP_MEDIA_FIXED, "Maxtor",
558			"1394 storage", "*"
559		},
560		CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0
561	},
562	{
563		/*
564		 * Would repond to all LUNs if asked for.
565		 */
566		{
567			T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "CALIPER",
568			"CP150", "*"
569		},
570		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
571	},
572	{
573		/*
574		 * Would repond to all LUNs if asked for.
575		 */
576		{
577			T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY",
578			"96X2*", "*"
579		},
580		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
581	},
582	{
583		/* Submitted by: Matthew Dodd <winter@jurai.net> */
584		{ T_PROCESSOR, SIP_MEDIA_FIXED, "Cabletrn", "EA41*", "*" },
585		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
586	},
587	{
588		/* Submitted by: Matthew Dodd <winter@jurai.net> */
589		{ T_PROCESSOR, SIP_MEDIA_FIXED, "CABLETRN", "EA41*", "*" },
590		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
591	},
592	{
593		/* TeraSolutions special settings for TRC-22 RAID */
594		{ T_DIRECT, SIP_MEDIA_FIXED, "TERASOLU", "TRC-22", "*" },
595		  /*quirks*/0, /*mintags*/55, /*maxtags*/255
596	},
597	{
598		/* Veritas Storage Appliance */
599		{ T_DIRECT, SIP_MEDIA_FIXED, "VERITAS", "*", "*" },
600		  CAM_QUIRK_HILUNS, /*mintags*/2, /*maxtags*/1024
601	},
602	{
603		/*
604		 * Would respond to all LUNs.  Device type and removable
605		 * flag are jumper-selectable.
606		 */
607		{ T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, "MaxOptix",
608		  "Tahiti 1", "*"
609		},
610		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
611	},
612	{
613		/* EasyRAID E5A aka. areca ARC-6010 */
614		{ T_DIRECT, SIP_MEDIA_FIXED, "easyRAID", "*", "*" },
615		  CAM_QUIRK_NOHILUNS, /*mintags*/2, /*maxtags*/255
616	},
617	{
618		{ T_ENCLOSURE, SIP_MEDIA_FIXED, "DP", "BACKPLANE", "*" },
619		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
620	},
621	{
622		/* Default tagged queuing parameters for all devices */
623		{
624		  T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
625		  /*vendor*/"*", /*product*/"*", /*revision*/"*"
626		},
627		/*quirks*/0, /*mintags*/2, /*maxtags*/255
628	},
629};
630
631static const int xpt_quirk_table_size =
632	sizeof(xpt_quirk_table) / sizeof(*xpt_quirk_table);
633
634typedef enum {
635	DM_RET_COPY		= 0x01,
636	DM_RET_FLAG_MASK	= 0x0f,
637	DM_RET_NONE		= 0x00,
638	DM_RET_STOP		= 0x10,
639	DM_RET_DESCEND		= 0x20,
640	DM_RET_ERROR		= 0x30,
641	DM_RET_ACTION_MASK	= 0xf0
642} dev_match_ret;
643
644typedef enum {
645	XPT_DEPTH_BUS,
646	XPT_DEPTH_TARGET,
647	XPT_DEPTH_DEVICE,
648	XPT_DEPTH_PERIPH
649} xpt_traverse_depth;
650
651struct xpt_traverse_config {
652	xpt_traverse_depth	depth;
653	void			*tr_func;
654	void			*tr_arg;
655};
656
657typedef	int	xpt_busfunc_t (struct cam_eb *bus, void *arg);
658typedef	int	xpt_targetfunc_t (struct cam_et *target, void *arg);
659typedef	int	xpt_devicefunc_t (struct cam_ed *device, void *arg);
660typedef	int	xpt_periphfunc_t (struct cam_periph *periph, void *arg);
661typedef int	xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
662
663/* Transport layer configuration information */
664static struct xpt_softc xsoftc;
665
666/* Queues for our software interrupt handler */
667typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
668typedef TAILQ_HEAD(cam_simq, cam_sim) cam_simq_t;
669static cam_simq_t cam_simq;
670static struct mtx cam_simq_lock;
671
672/* Pointers to software interrupt handlers */
673static void *cambio_ih;
674
675struct cam_periph *xpt_periph;
676
677static periph_init_t xpt_periph_init;
678
679static periph_init_t probe_periph_init;
680
681static struct periph_driver xpt_driver =
682{
683	xpt_periph_init, "xpt",
684	TAILQ_HEAD_INITIALIZER(xpt_driver.units)
685};
686
687static struct periph_driver probe_driver =
688{
689	probe_periph_init, "probe",
690	TAILQ_HEAD_INITIALIZER(probe_driver.units)
691};
692
693PERIPHDRIVER_DECLARE(xpt, xpt_driver);
694PERIPHDRIVER_DECLARE(probe, probe_driver);
695
696
697static d_open_t xptopen;
698static d_close_t xptclose;
699static d_ioctl_t xptioctl;
700
701static struct cdevsw xpt_cdevsw = {
702	.d_version =	D_VERSION,
703	.d_flags =	0,
704	.d_open =	xptopen,
705	.d_close =	xptclose,
706	.d_ioctl =	xptioctl,
707	.d_name =	"xpt",
708};
709
710
711static void dead_sim_action(struct cam_sim *sim, union ccb *ccb);
712static void dead_sim_poll(struct cam_sim *sim);
713
714/* Dummy SIM that is used when the real one has gone. */
715static struct cam_sim cam_dead_sim = {
716	.sim_action =	dead_sim_action,
717	.sim_poll =	dead_sim_poll,
718	.sim_name =	"dead_sim",
719};
720
721#define SIM_DEAD(sim)	((sim) == &cam_dead_sim)
722
723
724/* Storage for debugging datastructures */
725#ifdef	CAMDEBUG
726struct cam_path *cam_dpath;
727u_int32_t cam_dflags;
728u_int32_t cam_debug_delay;
729#endif
730
731#if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG)
732#error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS"
733#endif
734
735/*
736 * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG
737 * enabled.  Also, the user must have either none, or all of CAM_DEBUG_BUS,
738 * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified.
739 */
740#if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \
741    || defined(CAM_DEBUG_LUN)
742#ifdef CAMDEBUG
743#if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \
744    || !defined(CAM_DEBUG_LUN)
745#error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \
746        and CAM_DEBUG_LUN"
747#endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */
748#else /* !CAMDEBUG */
749#error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options"
750#endif /* CAMDEBUG */
751#endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */
752
753/* Our boot-time initialization hook */
754static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
755
756static moduledata_t cam_moduledata = {
757	"cam",
758	cam_module_event_handler,
759	NULL
760};
761
762static int	xpt_init(void *);
763
764DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
765MODULE_VERSION(cam, 1);
766
767
768static cam_status	xpt_compile_path(struct cam_path *new_path,
769					 struct cam_periph *perph,
770					 path_id_t path_id,
771					 target_id_t target_id,
772					 lun_id_t lun_id);
773
774static void		xpt_release_path(struct cam_path *path);
775
776static void		xpt_async_bcast(struct async_list *async_head,
777					u_int32_t async_code,
778					struct cam_path *path,
779					void *async_arg);
780static void		xpt_dev_async(u_int32_t async_code,
781				      struct cam_eb *bus,
782				      struct cam_et *target,
783				      struct cam_ed *device,
784				      void *async_arg);
785static path_id_t xptnextfreepathid(void);
786static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
787static union ccb *xpt_get_ccb(struct cam_ed *device);
788static int	 xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
789				  u_int32_t new_priority);
790static void	 xpt_run_dev_allocq(struct cam_eb *bus);
791static void	 xpt_run_dev_sendq(struct cam_eb *bus);
792static timeout_t xpt_release_devq_timeout;
793static void	 xpt_release_simq_timeout(void *arg) __unused;
794static void	 xpt_release_bus(struct cam_eb *bus);
795static void	 xpt_release_devq_device(struct cam_ed *dev, u_int count,
796					 int run_queue);
797static struct cam_et*
798		 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
799static void	 xpt_release_target(struct cam_eb *bus, struct cam_et *target);
800static struct cam_ed*
801		 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target,
802				  lun_id_t lun_id);
803static void	 xpt_release_device(struct cam_eb *bus, struct cam_et *target,
804				    struct cam_ed *device);
805static u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings);
806static struct cam_eb*
807		 xpt_find_bus(path_id_t path_id);
808static struct cam_et*
809		 xpt_find_target(struct cam_eb *bus, target_id_t target_id);
810static struct cam_ed*
811		 xpt_find_device(struct cam_et *target, lun_id_t lun_id);
812static void	 xpt_scan_bus(struct cam_periph *periph, union ccb *ccb);
813static void	 xpt_scan_lun(struct cam_periph *periph,
814			      struct cam_path *path, cam_flags flags,
815			      union ccb *ccb);
816static void	 xptscandone(struct cam_periph *periph, union ccb *done_ccb);
817static xpt_busfunc_t	xptconfigbuscountfunc;
818static xpt_busfunc_t	xptconfigfunc;
819static void	 xpt_config(void *arg);
820static xpt_devicefunc_t xptpassannouncefunc;
821static void	 xpt_finishconfig(struct cam_periph *periph, union ccb *ccb);
822static void	 xptaction(struct cam_sim *sim, union ccb *work_ccb);
823static void	 xptpoll(struct cam_sim *sim);
824static void	 camisr(void *);
825static void	 camisr_runqueue(void *);
826static dev_match_ret	xptbusmatch(struct dev_match_pattern *patterns,
827				    u_int num_patterns, struct cam_eb *bus);
828static dev_match_ret	xptdevicematch(struct dev_match_pattern *patterns,
829				       u_int num_patterns,
830				       struct cam_ed *device);
831static dev_match_ret	xptperiphmatch(struct dev_match_pattern *patterns,
832				       u_int num_patterns,
833				       struct cam_periph *periph);
834static xpt_busfunc_t	xptedtbusfunc;
835static xpt_targetfunc_t	xptedttargetfunc;
836static xpt_devicefunc_t	xptedtdevicefunc;
837static xpt_periphfunc_t	xptedtperiphfunc;
838static xpt_pdrvfunc_t	xptplistpdrvfunc;
839static xpt_periphfunc_t	xptplistperiphfunc;
840static int		xptedtmatch(struct ccb_dev_match *cdm);
841static int		xptperiphlistmatch(struct ccb_dev_match *cdm);
842static int		xptbustraverse(struct cam_eb *start_bus,
843				       xpt_busfunc_t *tr_func, void *arg);
844static int		xpttargettraverse(struct cam_eb *bus,
845					  struct cam_et *start_target,
846					  xpt_targetfunc_t *tr_func, void *arg);
847static int		xptdevicetraverse(struct cam_et *target,
848					  struct cam_ed *start_device,
849					  xpt_devicefunc_t *tr_func, void *arg);
850static int		xptperiphtraverse(struct cam_ed *device,
851					  struct cam_periph *start_periph,
852					  xpt_periphfunc_t *tr_func, void *arg);
853static int		xptpdrvtraverse(struct periph_driver **start_pdrv,
854					xpt_pdrvfunc_t *tr_func, void *arg);
855static int		xptpdperiphtraverse(struct periph_driver **pdrv,
856					    struct cam_periph *start_periph,
857					    xpt_periphfunc_t *tr_func,
858					    void *arg);
859static xpt_busfunc_t	xptdefbusfunc;
860static xpt_targetfunc_t	xptdeftargetfunc;
861static xpt_devicefunc_t	xptdefdevicefunc;
862static xpt_periphfunc_t	xptdefperiphfunc;
863static int		xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg);
864static int		xpt_for_all_devices(xpt_devicefunc_t *tr_func,
865					    void *arg);
866static xpt_devicefunc_t	xptsetasyncfunc;
867static xpt_busfunc_t	xptsetasyncbusfunc;
868static cam_status	xptregister(struct cam_periph *periph,
869				    void *arg);
870static cam_status	proberegister(struct cam_periph *periph,
871				      void *arg);
872static void	 probeschedule(struct cam_periph *probe_periph);
873static void	 probestart(struct cam_periph *periph, union ccb *start_ccb);
874static void	 proberequestdefaultnegotiation(struct cam_periph *periph);
875static int       proberequestbackoff(struct cam_periph *periph,
876				     struct cam_ed *device);
877static void	 probedone(struct cam_periph *periph, union ccb *done_ccb);
878static void	 probecleanup(struct cam_periph *periph);
879static void	 xpt_find_quirk(struct cam_ed *device);
880static void	 xpt_devise_transport(struct cam_path *path);
881static void	 xpt_set_transfer_settings(struct ccb_trans_settings *cts,
882					   struct cam_ed *device,
883					   int async_update);
884static void	 xpt_toggle_tags(struct cam_path *path);
885static void	 xpt_start_tags(struct cam_path *path);
886static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus,
887					    struct cam_ed *dev);
888static __inline int xpt_schedule_dev_sendq(struct cam_eb *bus,
889					   struct cam_ed *dev);
890static __inline int periph_is_queued(struct cam_periph *periph);
891static __inline int device_is_alloc_queued(struct cam_ed *device);
892static __inline int device_is_send_queued(struct cam_ed *device);
893static __inline int dev_allocq_is_runnable(struct cam_devq *devq);
894
895static __inline int
896xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
897{
898	int retval;
899
900	if (dev->ccbq.devq_openings > 0) {
901		if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) {
902			cam_ccbq_resize(&dev->ccbq,
903					dev->ccbq.dev_openings
904					+ dev->ccbq.dev_active);
905			dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
906		}
907		/*
908		 * The priority of a device waiting for CCB resources
909		 * is that of the the highest priority peripheral driver
910		 * enqueued.
911		 */
912		retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue,
913					  &dev->alloc_ccb_entry.pinfo,
914					  CAMQ_GET_HEAD(&dev->drvq)->priority);
915	} else {
916		retval = 0;
917	}
918
919	return (retval);
920}
921
922static __inline int
923xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev)
924{
925	int	retval;
926
927	if (dev->ccbq.dev_openings > 0) {
928		/*
929		 * The priority of a device waiting for controller
930		 * resources is that of the the highest priority CCB
931		 * enqueued.
932		 */
933		retval =
934		    xpt_schedule_dev(&bus->sim->devq->send_queue,
935				     &dev->send_ccb_entry.pinfo,
936				     CAMQ_GET_HEAD(&dev->ccbq.queue)->priority);
937	} else {
938		retval = 0;
939	}
940	return (retval);
941}
942
943static __inline int
944periph_is_queued(struct cam_periph *periph)
945{
946	return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
947}
948
949static __inline int
950device_is_alloc_queued(struct cam_ed *device)
951{
952	return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
953}
954
955static __inline int
956device_is_send_queued(struct cam_ed *device)
957{
958	return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
959}
960
961static __inline int
962dev_allocq_is_runnable(struct cam_devq *devq)
963{
964	/*
965	 * Have work to do.
966	 * Have space to do more work.
967	 * Allowed to do work.
968	 */
969	return ((devq->alloc_queue.qfrozen_cnt == 0)
970	     && (devq->alloc_queue.entries > 0)
971	     && (devq->alloc_openings > 0));
972}
973
974static void
975xpt_periph_init()
976{
977	make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
978}
979
980static void
981probe_periph_init()
982{
983}
984
985
986static void
987xptdone(struct cam_periph *periph, union ccb *done_ccb)
988{
989	/* Caller will release the CCB */
990	wakeup(&done_ccb->ccb_h.cbfcnp);
991}
992
993static int
994xptopen(struct cdev *dev, int flags, int fmt, struct thread *td)
995{
996
997	/*
998	 * Only allow read-write access.
999	 */
1000	if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
1001		return(EPERM);
1002
1003	/*
1004	 * We don't allow nonblocking access.
1005	 */
1006	if ((flags & O_NONBLOCK) != 0) {
1007		printf("%s: can't do nonblocking access\n", devtoname(dev));
1008		return(ENODEV);
1009	}
1010
1011	/* Mark ourselves open */
1012	mtx_lock(&xsoftc.xpt_lock);
1013	xsoftc.flags |= XPT_FLAG_OPEN;
1014	mtx_unlock(&xsoftc.xpt_lock);
1015
1016	return(0);
1017}
1018
1019static int
1020xptclose(struct cdev *dev, int flag, int fmt, struct thread *td)
1021{
1022
1023	/* Mark ourselves closed */
1024	mtx_lock(&xsoftc.xpt_lock);
1025	xsoftc.flags &= ~XPT_FLAG_OPEN;
1026	mtx_unlock(&xsoftc.xpt_lock);
1027
1028	return(0);
1029}
1030
1031/*
1032 * Don't automatically grab the xpt softc lock here even though this is going
1033 * through the xpt device.  The xpt device is really just a back door for
1034 * accessing other devices and SIMs, so the right thing to do is to grab
1035 * the appropriate SIM lock once the bus/SIM is located.
1036 */
1037static int
1038xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
1039{
1040	int error;
1041
1042	error = 0;
1043
1044	switch(cmd) {
1045	/*
1046	 * For the transport layer CAMIOCOMMAND ioctl, we really only want
1047	 * to accept CCB types that don't quite make sense to send through a
1048	 * passthrough driver. XPT_PATH_INQ is an exception to this, as stated
1049	 * in the CAM spec.
1050	 */
1051	case CAMIOCOMMAND: {
1052		union ccb *ccb;
1053		union ccb *inccb;
1054		struct cam_eb *bus;
1055
1056		inccb = (union ccb *)addr;
1057
1058		bus = xpt_find_bus(inccb->ccb_h.path_id);
1059		if (bus == NULL) {
1060			error = EINVAL;
1061			break;
1062		}
1063
1064		switch(inccb->ccb_h.func_code) {
1065		case XPT_SCAN_BUS:
1066		case XPT_RESET_BUS:
1067			if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD)
1068			 || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) {
1069				error = EINVAL;
1070				break;
1071			}
1072			/* FALLTHROUGH */
1073		case XPT_PATH_INQ:
1074		case XPT_ENG_INQ:
1075		case XPT_SCAN_LUN:
1076
1077			ccb = xpt_alloc_ccb();
1078
1079			CAM_SIM_LOCK(bus->sim);
1080
1081			/*
1082			 * Create a path using the bus, target, and lun the
1083			 * user passed in.
1084			 */
1085			if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
1086					    inccb->ccb_h.path_id,
1087					    inccb->ccb_h.target_id,
1088					    inccb->ccb_h.target_lun) !=
1089					    CAM_REQ_CMP){
1090				error = EINVAL;
1091				CAM_SIM_UNLOCK(bus->sim);
1092				xpt_free_ccb(ccb);
1093				break;
1094			}
1095			/* Ensure all of our fields are correct */
1096			xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
1097				      inccb->ccb_h.pinfo.priority);
1098			xpt_merge_ccb(ccb, inccb);
1099			ccb->ccb_h.cbfcnp = xptdone;
1100			cam_periph_runccb(ccb, NULL, 0, 0, NULL);
1101			bcopy(ccb, inccb, sizeof(union ccb));
1102			xpt_free_path(ccb->ccb_h.path);
1103			xpt_free_ccb(ccb);
1104			CAM_SIM_UNLOCK(bus->sim);
1105			break;
1106
1107		case XPT_DEBUG: {
1108			union ccb ccb;
1109
1110			/*
1111			 * This is an immediate CCB, so it's okay to
1112			 * allocate it on the stack.
1113			 */
1114
1115			CAM_SIM_LOCK(bus->sim);
1116
1117			/*
1118			 * Create a path using the bus, target, and lun the
1119			 * user passed in.
1120			 */
1121			if (xpt_create_path(&ccb.ccb_h.path, xpt_periph,
1122					    inccb->ccb_h.path_id,
1123					    inccb->ccb_h.target_id,
1124					    inccb->ccb_h.target_lun) !=
1125					    CAM_REQ_CMP){
1126				error = EINVAL;
1127				break;
1128			}
1129			/* Ensure all of our fields are correct */
1130			xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
1131				      inccb->ccb_h.pinfo.priority);
1132			xpt_merge_ccb(&ccb, inccb);
1133			ccb.ccb_h.cbfcnp = xptdone;
1134			xpt_action(&ccb);
1135			CAM_SIM_UNLOCK(bus->sim);
1136			bcopy(&ccb, inccb, sizeof(union ccb));
1137			xpt_free_path(ccb.ccb_h.path);
1138			break;
1139
1140		}
1141		case XPT_DEV_MATCH: {
1142			struct cam_periph_map_info mapinfo;
1143			struct cam_path *old_path;
1144
1145			/*
1146			 * We can't deal with physical addresses for this
1147			 * type of transaction.
1148			 */
1149			if (inccb->ccb_h.flags & CAM_DATA_PHYS) {
1150				error = EINVAL;
1151				break;
1152			}
1153
1154			/*
1155			 * Save this in case the caller had it set to
1156			 * something in particular.
1157			 */
1158			old_path = inccb->ccb_h.path;
1159
1160			/*
1161			 * We really don't need a path for the matching
1162			 * code.  The path is needed because of the
1163			 * debugging statements in xpt_action().  They
1164			 * assume that the CCB has a valid path.
1165			 */
1166			inccb->ccb_h.path = xpt_periph->path;
1167
1168			bzero(&mapinfo, sizeof(mapinfo));
1169
1170			/*
1171			 * Map the pattern and match buffers into kernel
1172			 * virtual address space.
1173			 */
1174			error = cam_periph_mapmem(inccb, &mapinfo);
1175
1176			if (error) {
1177				inccb->ccb_h.path = old_path;
1178				break;
1179			}
1180
1181			/*
1182			 * This is an immediate CCB, we can send it on directly.
1183			 */
1184			xpt_action(inccb);
1185
1186			/*
1187			 * Map the buffers back into user space.
1188			 */
1189			cam_periph_unmapmem(inccb, &mapinfo);
1190
1191			inccb->ccb_h.path = old_path;
1192
1193			error = 0;
1194			break;
1195		}
1196		default:
1197			error = ENOTSUP;
1198			break;
1199		}
1200		break;
1201	}
1202	/*
1203	 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
1204	 * with the periphal driver name and unit name filled in.  The other
1205	 * fields don't really matter as input.  The passthrough driver name
1206	 * ("pass"), and unit number are passed back in the ccb.  The current
1207	 * device generation number, and the index into the device peripheral
1208	 * driver list, and the status are also passed back.  Note that
1209	 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
1210	 * we never return a status of CAM_GDEVLIST_LIST_CHANGED.  It is
1211	 * (or rather should be) impossible for the device peripheral driver
1212	 * list to change since we look at the whole thing in one pass, and
1213	 * we do it with lock protection.
1214	 *
1215	 */
1216	case CAMGETPASSTHRU: {
1217		union ccb *ccb;
1218		struct cam_periph *periph;
1219		struct periph_driver **p_drv;
1220		char   *name;
1221		u_int unit;
1222		u_int cur_generation;
1223		int base_periph_found;
1224		int splbreaknum;
1225
1226		ccb = (union ccb *)addr;
1227		unit = ccb->cgdl.unit_number;
1228		name = ccb->cgdl.periph_name;
1229		/*
1230		 * Every 100 devices, we want to drop our lock protection to
1231		 * give the software interrupt handler a chance to run.
1232		 * Most systems won't run into this check, but this should
1233		 * avoid starvation in the software interrupt handler in
1234		 * large systems.
1235		 */
1236		splbreaknum = 100;
1237
1238		ccb = (union ccb *)addr;
1239
1240		base_periph_found = 0;
1241
1242		/*
1243		 * Sanity check -- make sure we don't get a null peripheral
1244		 * driver name.
1245		 */
1246		if (*ccb->cgdl.periph_name == '\0') {
1247			error = EINVAL;
1248			break;
1249		}
1250
1251		/* Keep the list from changing while we traverse it */
1252		mtx_lock(&xsoftc.xpt_topo_lock);
1253ptstartover:
1254		cur_generation = xsoftc.xpt_generation;
1255
1256		/* first find our driver in the list of drivers */
1257		for (p_drv = periph_drivers; *p_drv != NULL; p_drv++)
1258			if (strcmp((*p_drv)->driver_name, name) == 0)
1259				break;
1260
1261		if (*p_drv == NULL) {
1262			mtx_unlock(&xsoftc.xpt_topo_lock);
1263			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1264			ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1265			*ccb->cgdl.periph_name = '\0';
1266			ccb->cgdl.unit_number = 0;
1267			error = ENOENT;
1268			break;
1269		}
1270
1271		/*
1272		 * Run through every peripheral instance of this driver
1273		 * and check to see whether it matches the unit passed
1274		 * in by the user.  If it does, get out of the loops and
1275		 * find the passthrough driver associated with that
1276		 * peripheral driver.
1277		 */
1278		for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
1279		     periph = TAILQ_NEXT(periph, unit_links)) {
1280
1281			if (periph->unit_number == unit) {
1282				break;
1283			} else if (--splbreaknum == 0) {
1284				mtx_unlock(&xsoftc.xpt_topo_lock);
1285				mtx_lock(&xsoftc.xpt_topo_lock);
1286				splbreaknum = 100;
1287				if (cur_generation != xsoftc.xpt_generation)
1288				       goto ptstartover;
1289			}
1290		}
1291		/*
1292		 * If we found the peripheral driver that the user passed
1293		 * in, go through all of the peripheral drivers for that
1294		 * particular device and look for a passthrough driver.
1295		 */
1296		if (periph != NULL) {
1297			struct cam_ed *device;
1298			int i;
1299
1300			base_periph_found = 1;
1301			device = periph->path->device;
1302			for (i = 0, periph = SLIST_FIRST(&device->periphs);
1303			     periph != NULL;
1304			     periph = SLIST_NEXT(periph, periph_links), i++) {
1305				/*
1306				 * Check to see whether we have a
1307				 * passthrough device or not.
1308				 */
1309				if (strcmp(periph->periph_name, "pass") == 0) {
1310					/*
1311					 * Fill in the getdevlist fields.
1312					 */
1313					strcpy(ccb->cgdl.periph_name,
1314					       periph->periph_name);
1315					ccb->cgdl.unit_number =
1316						periph->unit_number;
1317					if (SLIST_NEXT(periph, periph_links))
1318						ccb->cgdl.status =
1319							CAM_GDEVLIST_MORE_DEVS;
1320					else
1321						ccb->cgdl.status =
1322						       CAM_GDEVLIST_LAST_DEVICE;
1323					ccb->cgdl.generation =
1324						device->generation;
1325					ccb->cgdl.index = i;
1326					/*
1327					 * Fill in some CCB header fields
1328					 * that the user may want.
1329					 */
1330					ccb->ccb_h.path_id =
1331						periph->path->bus->path_id;
1332					ccb->ccb_h.target_id =
1333						periph->path->target->target_id;
1334					ccb->ccb_h.target_lun =
1335						periph->path->device->lun_id;
1336					ccb->ccb_h.status = CAM_REQ_CMP;
1337					break;
1338				}
1339			}
1340		}
1341
1342		/*
1343		 * If the periph is null here, one of two things has
1344		 * happened.  The first possibility is that we couldn't
1345		 * find the unit number of the particular peripheral driver
1346		 * that the user is asking about.  e.g. the user asks for
1347		 * the passthrough driver for "da11".  We find the list of
1348		 * "da" peripherals all right, but there is no unit 11.
1349		 * The other possibility is that we went through the list
1350		 * of peripheral drivers attached to the device structure,
1351		 * but didn't find one with the name "pass".  Either way,
1352		 * we return ENOENT, since we couldn't find something.
1353		 */
1354		if (periph == NULL) {
1355			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1356			ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1357			*ccb->cgdl.periph_name = '\0';
1358			ccb->cgdl.unit_number = 0;
1359			error = ENOENT;
1360			/*
1361			 * It is unfortunate that this is even necessary,
1362			 * but there are many, many clueless users out there.
1363			 * If this is true, the user is looking for the
1364			 * passthrough driver, but doesn't have one in his
1365			 * kernel.
1366			 */
1367			if (base_periph_found == 1) {
1368				printf("xptioctl: pass driver is not in the "
1369				       "kernel\n");
1370				printf("xptioctl: put \"device pass0\" in "
1371				       "your kernel config file\n");
1372			}
1373		}
1374		mtx_unlock(&xsoftc.xpt_topo_lock);
1375		break;
1376		}
1377	default:
1378		error = ENOTTY;
1379		break;
1380	}
1381
1382	return(error);
1383}
1384
1385static int
1386cam_module_event_handler(module_t mod, int what, void *arg)
1387{
1388	int error;
1389
1390	switch (what) {
1391	case MOD_LOAD:
1392		if ((error = xpt_init(NULL)) != 0)
1393			return (error);
1394		break;
1395	case MOD_UNLOAD:
1396		return EBUSY;
1397	default:
1398		return EOPNOTSUPP;
1399	}
1400
1401	return 0;
1402}
1403
1404/* thread to handle bus rescans */
1405static void
1406xpt_scanner_thread(void *dummy)
1407{
1408	cam_isrq_t	queue;
1409	union ccb	*ccb;
1410	struct cam_sim	*sim;
1411
1412	for (;;) {
1413		/*
1414		 * Wait for a rescan request to come in.  When it does, splice
1415		 * it onto a queue from local storage so that the xpt lock
1416		 * doesn't need to be held while the requests are being
1417		 * processed.
1418		 */
1419		xpt_lock_buses();
1420		msleep(&xsoftc.ccb_scanq, &xsoftc.xpt_topo_lock, PRIBIO,
1421		    "ccb_scanq", 0);
1422		TAILQ_INIT(&queue);
1423		TAILQ_CONCAT(&queue, &xsoftc.ccb_scanq, sim_links.tqe);
1424		xpt_unlock_buses();
1425
1426		while ((ccb = (union ccb *)TAILQ_FIRST(&queue)) != NULL) {
1427			TAILQ_REMOVE(&queue, &ccb->ccb_h, sim_links.tqe);
1428
1429			sim = ccb->ccb_h.path->bus->sim;
1430			CAM_SIM_LOCK(sim);
1431
1432			ccb->ccb_h.func_code = XPT_SCAN_BUS;
1433			ccb->ccb_h.cbfcnp = xptdone;
1434			xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 5);
1435			cam_periph_runccb(ccb, NULL, 0, 0, NULL);
1436			xpt_free_path(ccb->ccb_h.path);
1437			xpt_free_ccb(ccb);
1438			CAM_SIM_UNLOCK(sim);
1439		}
1440	}
1441}
1442
1443void
1444xpt_rescan(union ccb *ccb)
1445{
1446	struct ccb_hdr *hdr;
1447
1448	/*
1449	 * Don't make duplicate entries for the same paths.
1450	 */
1451	xpt_lock_buses();
1452	TAILQ_FOREACH(hdr, &xsoftc.ccb_scanq, sim_links.tqe) {
1453		if (xpt_path_comp(hdr->path, ccb->ccb_h.path) == 0) {
1454			xpt_unlock_buses();
1455			xpt_print(ccb->ccb_h.path, "rescan already queued\n");
1456			xpt_free_path(ccb->ccb_h.path);
1457			xpt_free_ccb(ccb);
1458			return;
1459		}
1460	}
1461	TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
1462	wakeup(&xsoftc.ccb_scanq);
1463	xpt_unlock_buses();
1464}
1465
1466/* Functions accessed by the peripheral drivers */
1467static int
1468xpt_init(void *dummy)
1469{
1470	struct cam_sim *xpt_sim;
1471	struct cam_path *path;
1472	struct cam_devq *devq;
1473	cam_status status;
1474
1475	TAILQ_INIT(&xsoftc.xpt_busses);
1476	TAILQ_INIT(&cam_simq);
1477	TAILQ_INIT(&xsoftc.ccb_scanq);
1478	STAILQ_INIT(&xsoftc.highpowerq);
1479	xsoftc.num_highpower = CAM_MAX_HIGHPOWER;
1480
1481	mtx_init(&cam_simq_lock, "CAM SIMQ lock", NULL, MTX_DEF);
1482	mtx_init(&xsoftc.xpt_lock, "XPT lock", NULL, MTX_DEF);
1483	mtx_init(&xsoftc.xpt_topo_lock, "XPT topology lock", NULL, MTX_DEF);
1484
1485	/*
1486	 * The xpt layer is, itself, the equivelent of a SIM.
1487	 * Allow 16 ccbs in the ccb pool for it.  This should
1488	 * give decent parallelism when we probe busses and
1489	 * perform other XPT functions.
1490	 */
1491	devq = cam_simq_alloc(16);
1492	xpt_sim = cam_sim_alloc(xptaction,
1493				xptpoll,
1494				"xpt",
1495				/*softc*/NULL,
1496				/*unit*/0,
1497				/*mtx*/&xsoftc.xpt_lock,
1498				/*max_dev_transactions*/0,
1499				/*max_tagged_dev_transactions*/0,
1500				devq);
1501	if (xpt_sim == NULL)
1502		return (ENOMEM);
1503
1504	xpt_sim->max_ccbs = 16;
1505
1506	mtx_lock(&xsoftc.xpt_lock);
1507	if ((status = xpt_bus_register(xpt_sim, /*bus #*/0)) != CAM_SUCCESS) {
1508		printf("xpt_init: xpt_bus_register failed with status %#x,"
1509		       " failing attach\n", status);
1510		return (EINVAL);
1511	}
1512
1513	/*
1514	 * Looking at the XPT from the SIM layer, the XPT is
1515	 * the equivelent of a peripheral driver.  Allocate
1516	 * a peripheral driver entry for us.
1517	 */
1518	if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
1519				      CAM_TARGET_WILDCARD,
1520				      CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
1521		printf("xpt_init: xpt_create_path failed with status %#x,"
1522		       " failing attach\n", status);
1523		return (EINVAL);
1524	}
1525
1526	cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
1527			 path, NULL, 0, xpt_sim);
1528	xpt_free_path(path);
1529	mtx_unlock(&xsoftc.xpt_lock);
1530
1531	/*
1532	 * Register a callback for when interrupts are enabled.
1533	 */
1534	xsoftc.xpt_config_hook =
1535	    (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook),
1536					      M_TEMP, M_NOWAIT | M_ZERO);
1537	if (xsoftc.xpt_config_hook == NULL) {
1538		printf("xpt_init: Cannot malloc config hook "
1539		       "- failing attach\n");
1540		return (ENOMEM);
1541	}
1542
1543	xsoftc.xpt_config_hook->ich_func = xpt_config;
1544	if (config_intrhook_establish(xsoftc.xpt_config_hook) != 0) {
1545		free (xsoftc.xpt_config_hook, M_TEMP);
1546		printf("xpt_init: config_intrhook_establish failed "
1547		       "- failing attach\n");
1548	}
1549
1550	/* fire up rescan thread */
1551	if (kthread_create(xpt_scanner_thread, NULL, NULL, 0, 0, "xpt_thrd")) {
1552		printf("xpt_init: failed to create rescan thread\n");
1553	}
1554	/* Install our software interrupt handlers */
1555	swi_add(NULL, "cambio", camisr, NULL, SWI_CAMBIO, INTR_MPSAFE, &cambio_ih);
1556
1557	return (0);
1558}
1559
1560static cam_status
1561xptregister(struct cam_periph *periph, void *arg)
1562{
1563	struct cam_sim *xpt_sim;
1564
1565	if (periph == NULL) {
1566		printf("xptregister: periph was NULL!!\n");
1567		return(CAM_REQ_CMP_ERR);
1568	}
1569
1570	xpt_sim = (struct cam_sim *)arg;
1571	xpt_sim->softc = periph;
1572	xpt_periph = periph;
1573	periph->softc = NULL;
1574
1575	return(CAM_REQ_CMP);
1576}
1577
1578int32_t
1579xpt_add_periph(struct cam_periph *periph)
1580{
1581	struct cam_ed *device;
1582	int32_t	 status;
1583	struct periph_list *periph_head;
1584
1585	mtx_assert(periph->sim->mtx, MA_OWNED);
1586
1587	device = periph->path->device;
1588
1589	periph_head = &device->periphs;
1590
1591	status = CAM_REQ_CMP;
1592
1593	if (device != NULL) {
1594		/*
1595		 * Make room for this peripheral
1596		 * so it will fit in the queue
1597		 * when it's scheduled to run
1598		 */
1599		status = camq_resize(&device->drvq,
1600				     device->drvq.array_size + 1);
1601
1602		device->generation++;
1603
1604		SLIST_INSERT_HEAD(periph_head, periph, periph_links);
1605	}
1606
1607	mtx_lock(&xsoftc.xpt_topo_lock);
1608	xsoftc.xpt_generation++;
1609	mtx_unlock(&xsoftc.xpt_topo_lock);
1610
1611	return (status);
1612}
1613
1614void
1615xpt_remove_periph(struct cam_periph *periph)
1616{
1617	struct cam_ed *device;
1618
1619	mtx_assert(periph->sim->mtx, MA_OWNED);
1620
1621	device = periph->path->device;
1622
1623	if (device != NULL) {
1624		struct periph_list *periph_head;
1625
1626		periph_head = &device->periphs;
1627
1628		/* Release the slot for this peripheral */
1629		camq_resize(&device->drvq, device->drvq.array_size - 1);
1630
1631		device->generation++;
1632
1633		SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
1634	}
1635
1636	mtx_lock(&xsoftc.xpt_topo_lock);
1637	xsoftc.xpt_generation++;
1638	mtx_unlock(&xsoftc.xpt_topo_lock);
1639}
1640
1641
1642void
1643xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1644{
1645	struct	ccb_pathinq cpi;
1646	struct	ccb_trans_settings cts;
1647	struct	cam_path *path;
1648	u_int	speed;
1649	u_int	freq;
1650	u_int	mb;
1651
1652	mtx_assert(periph->sim->mtx, MA_OWNED);
1653
1654	path = periph->path;
1655	/*
1656	 * To ensure that this is printed in one piece,
1657	 * mask out CAM interrupts.
1658	 */
1659	printf("%s%d at %s%d bus %d target %d lun %d\n",
1660	       periph->periph_name, periph->unit_number,
1661	       path->bus->sim->sim_name,
1662	       path->bus->sim->unit_number,
1663	       path->bus->sim->bus_id,
1664	       path->target->target_id,
1665	       path->device->lun_id);
1666	printf("%s%d: ", periph->periph_name, periph->unit_number);
1667	scsi_print_inquiry(&path->device->inq_data);
1668	if (bootverbose && path->device->serial_num_len > 0) {
1669		/* Don't wrap the screen  - print only the first 60 chars */
1670		printf("%s%d: Serial Number %.60s\n", periph->periph_name,
1671		       periph->unit_number, path->device->serial_num);
1672	}
1673	xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
1674	cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
1675	cts.type = CTS_TYPE_CURRENT_SETTINGS;
1676	xpt_action((union ccb*)&cts);
1677	if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1678		return;
1679	}
1680
1681	/* Ask the SIM for its base transfer speed */
1682	xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
1683	cpi.ccb_h.func_code = XPT_PATH_INQ;
1684	xpt_action((union ccb *)&cpi);
1685
1686	speed = cpi.base_transfer_speed;
1687	freq = 0;
1688	if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
1689		struct	ccb_trans_settings_spi *spi;
1690
1691		spi = &cts.xport_specific.spi;
1692		if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0
1693		  && spi->sync_offset != 0) {
1694			freq = scsi_calc_syncsrate(spi->sync_period);
1695			speed = freq;
1696		}
1697
1698		if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0)
1699			speed *= (0x01 << spi->bus_width);
1700	}
1701
1702	if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
1703		struct	ccb_trans_settings_fc *fc = &cts.xport_specific.fc;
1704		if (fc->valid & CTS_FC_VALID_SPEED) {
1705			speed = fc->bitrate;
1706		}
1707	}
1708
1709	if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SAS) {
1710		struct	ccb_trans_settings_sas *sas = &cts.xport_specific.sas;
1711		if (sas->valid & CTS_SAS_VALID_SPEED) {
1712			speed = sas->bitrate;
1713		}
1714	}
1715
1716	mb = speed / 1000;
1717	if (mb > 0)
1718		printf("%s%d: %d.%03dMB/s transfers",
1719		       periph->periph_name, periph->unit_number,
1720		       mb, speed % 1000);
1721	else
1722		printf("%s%d: %dKB/s transfers", periph->periph_name,
1723		       periph->unit_number, speed);
1724	/* Report additional information about SPI connections */
1725	if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
1726		struct	ccb_trans_settings_spi *spi;
1727
1728		spi = &cts.xport_specific.spi;
1729		if (freq != 0) {
1730			printf(" (%d.%03dMHz%s, offset %d", freq / 1000,
1731			       freq % 1000,
1732			       (spi->ppr_options & MSG_EXT_PPR_DT_REQ) != 0
1733			     ? " DT" : "",
1734			       spi->sync_offset);
1735		}
1736		if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0
1737		 && spi->bus_width > 0) {
1738			if (freq != 0) {
1739				printf(", ");
1740			} else {
1741				printf(" (");
1742			}
1743			printf("%dbit)", 8 * (0x01 << spi->bus_width));
1744		} else if (freq != 0) {
1745			printf(")");
1746		}
1747	}
1748	if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
1749		struct	ccb_trans_settings_fc *fc;
1750
1751		fc = &cts.xport_specific.fc;
1752		if (fc->valid & CTS_FC_VALID_WWNN)
1753			printf(" WWNN 0x%llx", (long long) fc->wwnn);
1754		if (fc->valid & CTS_FC_VALID_WWPN)
1755			printf(" WWPN 0x%llx", (long long) fc->wwpn);
1756		if (fc->valid & CTS_FC_VALID_PORT)
1757			printf(" PortID 0x%x", fc->port);
1758	}
1759
1760	if (path->device->inq_flags & SID_CmdQue
1761	 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1762		printf("\n%s%d: Command Queueing Enabled",
1763		       periph->periph_name, periph->unit_number);
1764	}
1765	printf("\n");
1766
1767	/*
1768	 * We only want to print the caller's announce string if they've
1769	 * passed one in..
1770	 */
1771	if (announce_string != NULL)
1772		printf("%s%d: %s\n", periph->periph_name,
1773		       periph->unit_number, announce_string);
1774}
1775
1776static dev_match_ret
1777xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1778	    struct cam_eb *bus)
1779{
1780	dev_match_ret retval;
1781	int i;
1782
1783	retval = DM_RET_NONE;
1784
1785	/*
1786	 * If we aren't given something to match against, that's an error.
1787	 */
1788	if (bus == NULL)
1789		return(DM_RET_ERROR);
1790
1791	/*
1792	 * If there are no match entries, then this bus matches no
1793	 * matter what.
1794	 */
1795	if ((patterns == NULL) || (num_patterns == 0))
1796		return(DM_RET_DESCEND | DM_RET_COPY);
1797
1798	for (i = 0; i < num_patterns; i++) {
1799		struct bus_match_pattern *cur_pattern;
1800
1801		/*
1802		 * If the pattern in question isn't for a bus node, we
1803		 * aren't interested.  However, we do indicate to the
1804		 * calling routine that we should continue descending the
1805		 * tree, since the user wants to match against lower-level
1806		 * EDT elements.
1807		 */
1808		if (patterns[i].type != DEV_MATCH_BUS) {
1809			if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1810				retval |= DM_RET_DESCEND;
1811			continue;
1812		}
1813
1814		cur_pattern = &patterns[i].pattern.bus_pattern;
1815
1816		/*
1817		 * If they want to match any bus node, we give them any
1818		 * device node.
1819		 */
1820		if (cur_pattern->flags == BUS_MATCH_ANY) {
1821			/* set the copy flag */
1822			retval |= DM_RET_COPY;
1823
1824			/*
1825			 * If we've already decided on an action, go ahead
1826			 * and return.
1827			 */
1828			if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1829				return(retval);
1830		}
1831
1832		/*
1833		 * Not sure why someone would do this...
1834		 */
1835		if (cur_pattern->flags == BUS_MATCH_NONE)
1836			continue;
1837
1838		if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
1839		 && (cur_pattern->path_id != bus->path_id))
1840			continue;
1841
1842		if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
1843		 && (cur_pattern->bus_id != bus->sim->bus_id))
1844			continue;
1845
1846		if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
1847		 && (cur_pattern->unit_number != bus->sim->unit_number))
1848			continue;
1849
1850		if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
1851		 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
1852			     DEV_IDLEN) != 0))
1853			continue;
1854
1855		/*
1856		 * If we get to this point, the user definitely wants
1857		 * information on this bus.  So tell the caller to copy the
1858		 * data out.
1859		 */
1860		retval |= DM_RET_COPY;
1861
1862		/*
1863		 * If the return action has been set to descend, then we
1864		 * know that we've already seen a non-bus matching
1865		 * expression, therefore we need to further descend the tree.
1866		 * This won't change by continuing around the loop, so we
1867		 * go ahead and return.  If we haven't seen a non-bus
1868		 * matching expression, we keep going around the loop until
1869		 * we exhaust the matching expressions.  We'll set the stop
1870		 * flag once we fall out of the loop.
1871		 */
1872		if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1873			return(retval);
1874	}
1875
1876	/*
1877	 * If the return action hasn't been set to descend yet, that means
1878	 * we haven't seen anything other than bus matching patterns.  So
1879	 * tell the caller to stop descending the tree -- the user doesn't
1880	 * want to match against lower level tree elements.
1881	 */
1882	if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1883		retval |= DM_RET_STOP;
1884
1885	return(retval);
1886}
1887
1888static dev_match_ret
1889xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
1890	       struct cam_ed *device)
1891{
1892	dev_match_ret retval;
1893	int i;
1894
1895	retval = DM_RET_NONE;
1896
1897	/*
1898	 * If we aren't given something to match against, that's an error.
1899	 */
1900	if (device == NULL)
1901		return(DM_RET_ERROR);
1902
1903	/*
1904	 * If there are no match entries, then this device matches no
1905	 * matter what.
1906	 */
1907	if ((patterns == NULL) || (num_patterns == 0))
1908		return(DM_RET_DESCEND | DM_RET_COPY);
1909
1910	for (i = 0; i < num_patterns; i++) {
1911		struct device_match_pattern *cur_pattern;
1912
1913		/*
1914		 * If the pattern in question isn't for a device node, we
1915		 * aren't interested.
1916		 */
1917		if (patterns[i].type != DEV_MATCH_DEVICE) {
1918			if ((patterns[i].type == DEV_MATCH_PERIPH)
1919			 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
1920				retval |= DM_RET_DESCEND;
1921			continue;
1922		}
1923
1924		cur_pattern = &patterns[i].pattern.device_pattern;
1925
1926		/*
1927		 * If they want to match any device node, we give them any
1928		 * device node.
1929		 */
1930		if (cur_pattern->flags == DEV_MATCH_ANY) {
1931			/* set the copy flag */
1932			retval |= DM_RET_COPY;
1933
1934
1935			/*
1936			 * If we've already decided on an action, go ahead
1937			 * and return.
1938			 */
1939			if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1940				return(retval);
1941		}
1942
1943		/*
1944		 * Not sure why someone would do this...
1945		 */
1946		if (cur_pattern->flags == DEV_MATCH_NONE)
1947			continue;
1948
1949		if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
1950		 && (cur_pattern->path_id != device->target->bus->path_id))
1951			continue;
1952
1953		if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
1954		 && (cur_pattern->target_id != device->target->target_id))
1955			continue;
1956
1957		if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
1958		 && (cur_pattern->target_lun != device->lun_id))
1959			continue;
1960
1961		if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
1962		 && (cam_quirkmatch((caddr_t)&device->inq_data,
1963				    (caddr_t)&cur_pattern->inq_pat,
1964				    1, sizeof(cur_pattern->inq_pat),
1965				    scsi_static_inquiry_match) == NULL))
1966			continue;
1967
1968		/*
1969		 * If we get to this point, the user definitely wants
1970		 * information on this device.  So tell the caller to copy
1971		 * the data out.
1972		 */
1973		retval |= DM_RET_COPY;
1974
1975		/*
1976		 * If the return action has been set to descend, then we
1977		 * know that we've already seen a peripheral matching
1978		 * expression, therefore we need to further descend the tree.
1979		 * This won't change by continuing around the loop, so we
1980		 * go ahead and return.  If we haven't seen a peripheral
1981		 * matching expression, we keep going around the loop until
1982		 * we exhaust the matching expressions.  We'll set the stop
1983		 * flag once we fall out of the loop.
1984		 */
1985		if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1986			return(retval);
1987	}
1988
1989	/*
1990	 * If the return action hasn't been set to descend yet, that means
1991	 * we haven't seen any peripheral matching patterns.  So tell the
1992	 * caller to stop descending the tree -- the user doesn't want to
1993	 * match against lower level tree elements.
1994	 */
1995	if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1996		retval |= DM_RET_STOP;
1997
1998	return(retval);
1999}
2000
2001/*
2002 * Match a single peripheral against any number of match patterns.
2003 */
2004static dev_match_ret
2005xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
2006	       struct cam_periph *periph)
2007{
2008	dev_match_ret retval;
2009	int i;
2010
2011	/*
2012	 * If we aren't given something to match against, that's an error.
2013	 */
2014	if (periph == NULL)
2015		return(DM_RET_ERROR);
2016
2017	/*
2018	 * If there are no match entries, then this peripheral matches no
2019	 * matter what.
2020	 */
2021	if ((patterns == NULL) || (num_patterns == 0))
2022		return(DM_RET_STOP | DM_RET_COPY);
2023
2024	/*
2025	 * There aren't any nodes below a peripheral node, so there's no
2026	 * reason to descend the tree any further.
2027	 */
2028	retval = DM_RET_STOP;
2029
2030	for (i = 0; i < num_patterns; i++) {
2031		struct periph_match_pattern *cur_pattern;
2032
2033		/*
2034		 * If the pattern in question isn't for a peripheral, we
2035		 * aren't interested.
2036		 */
2037		if (patterns[i].type != DEV_MATCH_PERIPH)
2038			continue;
2039
2040		cur_pattern = &patterns[i].pattern.periph_pattern;
2041
2042		/*
2043		 * If they want to match on anything, then we will do so.
2044		 */
2045		if (cur_pattern->flags == PERIPH_MATCH_ANY) {
2046			/* set the copy flag */
2047			retval |= DM_RET_COPY;
2048
2049			/*
2050			 * We've already set the return action to stop,
2051			 * since there are no nodes below peripherals in
2052			 * the tree.
2053			 */
2054			return(retval);
2055		}
2056
2057		/*
2058		 * Not sure why someone would do this...
2059		 */
2060		if (cur_pattern->flags == PERIPH_MATCH_NONE)
2061			continue;
2062
2063		if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
2064		 && (cur_pattern->path_id != periph->path->bus->path_id))
2065			continue;
2066
2067		/*
2068		 * For the target and lun id's, we have to make sure the
2069		 * target and lun pointers aren't NULL.  The xpt peripheral
2070		 * has a wildcard target and device.
2071		 */
2072		if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
2073		 && ((periph->path->target == NULL)
2074		 ||(cur_pattern->target_id != periph->path->target->target_id)))
2075			continue;
2076
2077		if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
2078		 && ((periph->path->device == NULL)
2079		 || (cur_pattern->target_lun != periph->path->device->lun_id)))
2080			continue;
2081
2082		if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
2083		 && (cur_pattern->unit_number != periph->unit_number))
2084			continue;
2085
2086		if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
2087		 && (strncmp(cur_pattern->periph_name, periph->periph_name,
2088			     DEV_IDLEN) != 0))
2089			continue;
2090
2091		/*
2092		 * If we get to this point, the user definitely wants
2093		 * information on this peripheral.  So tell the caller to
2094		 * copy the data out.
2095		 */
2096		retval |= DM_RET_COPY;
2097
2098		/*
2099		 * The return action has already been set to stop, since
2100		 * peripherals don't have any nodes below them in the EDT.
2101		 */
2102		return(retval);
2103	}
2104
2105	/*
2106	 * If we get to this point, the peripheral that was passed in
2107	 * doesn't match any of the patterns.
2108	 */
2109	return(retval);
2110}
2111
2112static int
2113xptedtbusfunc(struct cam_eb *bus, void *arg)
2114{
2115	struct ccb_dev_match *cdm;
2116	dev_match_ret retval;
2117
2118	cdm = (struct ccb_dev_match *)arg;
2119
2120	/*
2121	 * If our position is for something deeper in the tree, that means
2122	 * that we've already seen this node.  So, we keep going down.
2123	 */
2124	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2125	 && (cdm->pos.cookie.bus == bus)
2126	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2127	 && (cdm->pos.cookie.target != NULL))
2128		retval = DM_RET_DESCEND;
2129	else
2130		retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
2131
2132	/*
2133	 * If we got an error, bail out of the search.
2134	 */
2135	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2136		cdm->status = CAM_DEV_MATCH_ERROR;
2137		return(0);
2138	}
2139
2140	/*
2141	 * If the copy flag is set, copy this bus out.
2142	 */
2143	if (retval & DM_RET_COPY) {
2144		int spaceleft, j;
2145
2146		spaceleft = cdm->match_buf_len - (cdm->num_matches *
2147			sizeof(struct dev_match_result));
2148
2149		/*
2150		 * If we don't have enough space to put in another
2151		 * match result, save our position and tell the
2152		 * user there are more devices to check.
2153		 */
2154		if (spaceleft < sizeof(struct dev_match_result)) {
2155			bzero(&cdm->pos, sizeof(cdm->pos));
2156			cdm->pos.position_type =
2157				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
2158
2159			cdm->pos.cookie.bus = bus;
2160			cdm->pos.generations[CAM_BUS_GENERATION]=
2161				xsoftc.bus_generation;
2162			cdm->status = CAM_DEV_MATCH_MORE;
2163			return(0);
2164		}
2165		j = cdm->num_matches;
2166		cdm->num_matches++;
2167		cdm->matches[j].type = DEV_MATCH_BUS;
2168		cdm->matches[j].result.bus_result.path_id = bus->path_id;
2169		cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
2170		cdm->matches[j].result.bus_result.unit_number =
2171			bus->sim->unit_number;
2172		strncpy(cdm->matches[j].result.bus_result.dev_name,
2173			bus->sim->sim_name, DEV_IDLEN);
2174	}
2175
2176	/*
2177	 * If the user is only interested in busses, there's no
2178	 * reason to descend to the next level in the tree.
2179	 */
2180	if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2181		return(1);
2182
2183	/*
2184	 * If there is a target generation recorded, check it to
2185	 * make sure the target list hasn't changed.
2186	 */
2187	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2188	 && (bus == cdm->pos.cookie.bus)
2189	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2190	 && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
2191	 && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
2192	     bus->generation)) {
2193		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2194		return(0);
2195	}
2196
2197	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2198	 && (cdm->pos.cookie.bus == bus)
2199	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2200	 && (cdm->pos.cookie.target != NULL))
2201		return(xpttargettraverse(bus,
2202					(struct cam_et *)cdm->pos.cookie.target,
2203					 xptedttargetfunc, arg));
2204	else
2205		return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
2206}
2207
2208static int
2209xptedttargetfunc(struct cam_et *target, void *arg)
2210{
2211	struct ccb_dev_match *cdm;
2212
2213	cdm = (struct ccb_dev_match *)arg;
2214
2215	/*
2216	 * If there is a device list generation recorded, check it to
2217	 * make sure the device list hasn't changed.
2218	 */
2219	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2220	 && (cdm->pos.cookie.bus == target->bus)
2221	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2222	 && (cdm->pos.cookie.target == target)
2223	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2224	 && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
2225	 && (cdm->pos.generations[CAM_DEV_GENERATION] !=
2226	     target->generation)) {
2227		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2228		return(0);
2229	}
2230
2231	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2232	 && (cdm->pos.cookie.bus == target->bus)
2233	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2234	 && (cdm->pos.cookie.target == target)
2235	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2236	 && (cdm->pos.cookie.device != NULL))
2237		return(xptdevicetraverse(target,
2238					(struct cam_ed *)cdm->pos.cookie.device,
2239					 xptedtdevicefunc, arg));
2240	else
2241		return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
2242}
2243
2244static int
2245xptedtdevicefunc(struct cam_ed *device, void *arg)
2246{
2247
2248	struct ccb_dev_match *cdm;
2249	dev_match_ret retval;
2250
2251	cdm = (struct ccb_dev_match *)arg;
2252
2253	/*
2254	 * If our position is for something deeper in the tree, that means
2255	 * that we've already seen this node.  So, we keep going down.
2256	 */
2257	if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2258	 && (cdm->pos.cookie.device == device)
2259	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2260	 && (cdm->pos.cookie.periph != NULL))
2261		retval = DM_RET_DESCEND;
2262	else
2263		retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
2264					device);
2265
2266	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2267		cdm->status = CAM_DEV_MATCH_ERROR;
2268		return(0);
2269	}
2270
2271	/*
2272	 * If the copy flag is set, copy this device out.
2273	 */
2274	if (retval & DM_RET_COPY) {
2275		int spaceleft, j;
2276
2277		spaceleft = cdm->match_buf_len - (cdm->num_matches *
2278			sizeof(struct dev_match_result));
2279
2280		/*
2281		 * If we don't have enough space to put in another
2282		 * match result, save our position and tell the
2283		 * user there are more devices to check.
2284		 */
2285		if (spaceleft < sizeof(struct dev_match_result)) {
2286			bzero(&cdm->pos, sizeof(cdm->pos));
2287			cdm->pos.position_type =
2288				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2289				CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
2290
2291			cdm->pos.cookie.bus = device->target->bus;
2292			cdm->pos.generations[CAM_BUS_GENERATION]=
2293				xsoftc.bus_generation;
2294			cdm->pos.cookie.target = device->target;
2295			cdm->pos.generations[CAM_TARGET_GENERATION] =
2296				device->target->bus->generation;
2297			cdm->pos.cookie.device = device;
2298			cdm->pos.generations[CAM_DEV_GENERATION] =
2299				device->target->generation;
2300			cdm->status = CAM_DEV_MATCH_MORE;
2301			return(0);
2302		}
2303		j = cdm->num_matches;
2304		cdm->num_matches++;
2305		cdm->matches[j].type = DEV_MATCH_DEVICE;
2306		cdm->matches[j].result.device_result.path_id =
2307			device->target->bus->path_id;
2308		cdm->matches[j].result.device_result.target_id =
2309			device->target->target_id;
2310		cdm->matches[j].result.device_result.target_lun =
2311			device->lun_id;
2312		bcopy(&device->inq_data,
2313		      &cdm->matches[j].result.device_result.inq_data,
2314		      sizeof(struct scsi_inquiry_data));
2315
2316		/* Let the user know whether this device is unconfigured */
2317		if (device->flags & CAM_DEV_UNCONFIGURED)
2318			cdm->matches[j].result.device_result.flags =
2319				DEV_RESULT_UNCONFIGURED;
2320		else
2321			cdm->matches[j].result.device_result.flags =
2322				DEV_RESULT_NOFLAG;
2323	}
2324
2325	/*
2326	 * If the user isn't interested in peripherals, don't descend
2327	 * the tree any further.
2328	 */
2329	if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2330		return(1);
2331
2332	/*
2333	 * If there is a peripheral list generation recorded, make sure
2334	 * it hasn't changed.
2335	 */
2336	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2337	 && (device->target->bus == cdm->pos.cookie.bus)
2338	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2339	 && (device->target == cdm->pos.cookie.target)
2340	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2341	 && (device == cdm->pos.cookie.device)
2342	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2343	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2344	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2345	     device->generation)){
2346		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2347		return(0);
2348	}
2349
2350	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2351	 && (cdm->pos.cookie.bus == device->target->bus)
2352	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2353	 && (cdm->pos.cookie.target == device->target)
2354	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2355	 && (cdm->pos.cookie.device == device)
2356	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2357	 && (cdm->pos.cookie.periph != NULL))
2358		return(xptperiphtraverse(device,
2359				(struct cam_periph *)cdm->pos.cookie.periph,
2360				xptedtperiphfunc, arg));
2361	else
2362		return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
2363}
2364
2365static int
2366xptedtperiphfunc(struct cam_periph *periph, void *arg)
2367{
2368	struct ccb_dev_match *cdm;
2369	dev_match_ret retval;
2370
2371	cdm = (struct ccb_dev_match *)arg;
2372
2373	retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2374
2375	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2376		cdm->status = CAM_DEV_MATCH_ERROR;
2377		return(0);
2378	}
2379
2380	/*
2381	 * If the copy flag is set, copy this peripheral out.
2382	 */
2383	if (retval & DM_RET_COPY) {
2384		int spaceleft, j;
2385
2386		spaceleft = cdm->match_buf_len - (cdm->num_matches *
2387			sizeof(struct dev_match_result));
2388
2389		/*
2390		 * If we don't have enough space to put in another
2391		 * match result, save our position and tell the
2392		 * user there are more devices to check.
2393		 */
2394		if (spaceleft < sizeof(struct dev_match_result)) {
2395			bzero(&cdm->pos, sizeof(cdm->pos));
2396			cdm->pos.position_type =
2397				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2398				CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
2399				CAM_DEV_POS_PERIPH;
2400
2401			cdm->pos.cookie.bus = periph->path->bus;
2402			cdm->pos.generations[CAM_BUS_GENERATION]=
2403				xsoftc.bus_generation;
2404			cdm->pos.cookie.target = periph->path->target;
2405			cdm->pos.generations[CAM_TARGET_GENERATION] =
2406				periph->path->bus->generation;
2407			cdm->pos.cookie.device = periph->path->device;
2408			cdm->pos.generations[CAM_DEV_GENERATION] =
2409				periph->path->target->generation;
2410			cdm->pos.cookie.periph = periph;
2411			cdm->pos.generations[CAM_PERIPH_GENERATION] =
2412				periph->path->device->generation;
2413			cdm->status = CAM_DEV_MATCH_MORE;
2414			return(0);
2415		}
2416
2417		j = cdm->num_matches;
2418		cdm->num_matches++;
2419		cdm->matches[j].type = DEV_MATCH_PERIPH;
2420		cdm->matches[j].result.periph_result.path_id =
2421			periph->path->bus->path_id;
2422		cdm->matches[j].result.periph_result.target_id =
2423			periph->path->target->target_id;
2424		cdm->matches[j].result.periph_result.target_lun =
2425			periph->path->device->lun_id;
2426		cdm->matches[j].result.periph_result.unit_number =
2427			periph->unit_number;
2428		strncpy(cdm->matches[j].result.periph_result.periph_name,
2429			periph->periph_name, DEV_IDLEN);
2430	}
2431
2432	return(1);
2433}
2434
2435static int
2436xptedtmatch(struct ccb_dev_match *cdm)
2437{
2438	int ret;
2439
2440	cdm->num_matches = 0;
2441
2442	/*
2443	 * Check the bus list generation.  If it has changed, the user
2444	 * needs to reset everything and start over.
2445	 */
2446	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2447	 && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
2448	 && (cdm->pos.generations[CAM_BUS_GENERATION] != xsoftc.bus_generation)) {
2449		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2450		return(0);
2451	}
2452
2453	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2454	 && (cdm->pos.cookie.bus != NULL))
2455		ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
2456				     xptedtbusfunc, cdm);
2457	else
2458		ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
2459
2460	/*
2461	 * If we get back 0, that means that we had to stop before fully
2462	 * traversing the EDT.  It also means that one of the subroutines
2463	 * has set the status field to the proper value.  If we get back 1,
2464	 * we've fully traversed the EDT and copied out any matching entries.
2465	 */
2466	if (ret == 1)
2467		cdm->status = CAM_DEV_MATCH_LAST;
2468
2469	return(ret);
2470}
2471
2472static int
2473xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
2474{
2475	struct ccb_dev_match *cdm;
2476
2477	cdm = (struct ccb_dev_match *)arg;
2478
2479	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2480	 && (cdm->pos.cookie.pdrv == pdrv)
2481	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2482	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2483	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2484	     (*pdrv)->generation)) {
2485		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2486		return(0);
2487	}
2488
2489	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2490	 && (cdm->pos.cookie.pdrv == pdrv)
2491	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2492	 && (cdm->pos.cookie.periph != NULL))
2493		return(xptpdperiphtraverse(pdrv,
2494				(struct cam_periph *)cdm->pos.cookie.periph,
2495				xptplistperiphfunc, arg));
2496	else
2497		return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
2498}
2499
2500static int
2501xptplistperiphfunc(struct cam_periph *periph, void *arg)
2502{
2503	struct ccb_dev_match *cdm;
2504	dev_match_ret retval;
2505
2506	cdm = (struct ccb_dev_match *)arg;
2507
2508	retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2509
2510	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2511		cdm->status = CAM_DEV_MATCH_ERROR;
2512		return(0);
2513	}
2514
2515	/*
2516	 * If the copy flag is set, copy this peripheral out.
2517	 */
2518	if (retval & DM_RET_COPY) {
2519		int spaceleft, j;
2520
2521		spaceleft = cdm->match_buf_len - (cdm->num_matches *
2522			sizeof(struct dev_match_result));
2523
2524		/*
2525		 * If we don't have enough space to put in another
2526		 * match result, save our position and tell the
2527		 * user there are more devices to check.
2528		 */
2529		if (spaceleft < sizeof(struct dev_match_result)) {
2530			struct periph_driver **pdrv;
2531
2532			pdrv = NULL;
2533			bzero(&cdm->pos, sizeof(cdm->pos));
2534			cdm->pos.position_type =
2535				CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
2536				CAM_DEV_POS_PERIPH;
2537
2538			/*
2539			 * This may look a bit non-sensical, but it is
2540			 * actually quite logical.  There are very few
2541			 * peripheral drivers, and bloating every peripheral
2542			 * structure with a pointer back to its parent
2543			 * peripheral driver linker set entry would cost
2544			 * more in the long run than doing this quick lookup.
2545			 */
2546			for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
2547				if (strcmp((*pdrv)->driver_name,
2548				    periph->periph_name) == 0)
2549					break;
2550			}
2551
2552			if (*pdrv == NULL) {
2553				cdm->status = CAM_DEV_MATCH_ERROR;
2554				return(0);
2555			}
2556
2557			cdm->pos.cookie.pdrv = pdrv;
2558			/*
2559			 * The periph generation slot does double duty, as
2560			 * does the periph pointer slot.  They are used for
2561			 * both edt and pdrv lookups and positioning.
2562			 */
2563			cdm->pos.cookie.periph = periph;
2564			cdm->pos.generations[CAM_PERIPH_GENERATION] =
2565				(*pdrv)->generation;
2566			cdm->status = CAM_DEV_MATCH_MORE;
2567			return(0);
2568		}
2569
2570		j = cdm->num_matches;
2571		cdm->num_matches++;
2572		cdm->matches[j].type = DEV_MATCH_PERIPH;
2573		cdm->matches[j].result.periph_result.path_id =
2574			periph->path->bus->path_id;
2575
2576		/*
2577		 * The transport layer peripheral doesn't have a target or
2578		 * lun.
2579		 */
2580		if (periph->path->target)
2581			cdm->matches[j].result.periph_result.target_id =
2582				periph->path->target->target_id;
2583		else
2584			cdm->matches[j].result.periph_result.target_id = -1;
2585
2586		if (periph->path->device)
2587			cdm->matches[j].result.periph_result.target_lun =
2588				periph->path->device->lun_id;
2589		else
2590			cdm->matches[j].result.periph_result.target_lun = -1;
2591
2592		cdm->matches[j].result.periph_result.unit_number =
2593			periph->unit_number;
2594		strncpy(cdm->matches[j].result.periph_result.periph_name,
2595			periph->periph_name, DEV_IDLEN);
2596	}
2597
2598	return(1);
2599}
2600
2601static int
2602xptperiphlistmatch(struct ccb_dev_match *cdm)
2603{
2604	int ret;
2605
2606	cdm->num_matches = 0;
2607
2608	/*
2609	 * At this point in the edt traversal function, we check the bus
2610	 * list generation to make sure that no busses have been added or
2611	 * removed since the user last sent a XPT_DEV_MATCH ccb through.
2612	 * For the peripheral driver list traversal function, however, we
2613	 * don't have to worry about new peripheral driver types coming or
2614	 * going; they're in a linker set, and therefore can't change
2615	 * without a recompile.
2616	 */
2617
2618	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2619	 && (cdm->pos.cookie.pdrv != NULL))
2620		ret = xptpdrvtraverse(
2621				(struct periph_driver **)cdm->pos.cookie.pdrv,
2622				xptplistpdrvfunc, cdm);
2623	else
2624		ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
2625
2626	/*
2627	 * If we get back 0, that means that we had to stop before fully
2628	 * traversing the peripheral driver tree.  It also means that one of
2629	 * the subroutines has set the status field to the proper value.  If
2630	 * we get back 1, we've fully traversed the EDT and copied out any
2631	 * matching entries.
2632	 */
2633	if (ret == 1)
2634		cdm->status = CAM_DEV_MATCH_LAST;
2635
2636	return(ret);
2637}
2638
2639static int
2640xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
2641{
2642	struct cam_eb *bus, *next_bus;
2643	int retval;
2644
2645	retval = 1;
2646
2647	mtx_lock(&xsoftc.xpt_topo_lock);
2648	for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xsoftc.xpt_busses));
2649	     bus != NULL;
2650	     bus = next_bus) {
2651		next_bus = TAILQ_NEXT(bus, links);
2652
2653		mtx_unlock(&xsoftc.xpt_topo_lock);
2654		CAM_SIM_LOCK(bus->sim);
2655		retval = tr_func(bus, arg);
2656		CAM_SIM_UNLOCK(bus->sim);
2657		if (retval == 0)
2658			return(retval);
2659		mtx_lock(&xsoftc.xpt_topo_lock);
2660	}
2661	mtx_unlock(&xsoftc.xpt_topo_lock);
2662
2663	return(retval);
2664}
2665
2666static int
2667xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
2668		  xpt_targetfunc_t *tr_func, void *arg)
2669{
2670	struct cam_et *target, *next_target;
2671	int retval;
2672
2673	retval = 1;
2674	for (target = (start_target ? start_target :
2675		       TAILQ_FIRST(&bus->et_entries));
2676	     target != NULL; target = next_target) {
2677
2678		next_target = TAILQ_NEXT(target, links);
2679
2680		retval = tr_func(target, arg);
2681
2682		if (retval == 0)
2683			return(retval);
2684	}
2685
2686	return(retval);
2687}
2688
2689static int
2690xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
2691		  xpt_devicefunc_t *tr_func, void *arg)
2692{
2693	struct cam_ed *device, *next_device;
2694	int retval;
2695
2696	retval = 1;
2697	for (device = (start_device ? start_device :
2698		       TAILQ_FIRST(&target->ed_entries));
2699	     device != NULL;
2700	     device = next_device) {
2701
2702		next_device = TAILQ_NEXT(device, links);
2703
2704		retval = tr_func(device, arg);
2705
2706		if (retval == 0)
2707			return(retval);
2708	}
2709
2710	return(retval);
2711}
2712
2713static int
2714xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
2715		  xpt_periphfunc_t *tr_func, void *arg)
2716{
2717	struct cam_periph *periph, *next_periph;
2718	int retval;
2719
2720	retval = 1;
2721
2722	for (periph = (start_periph ? start_periph :
2723		       SLIST_FIRST(&device->periphs));
2724	     periph != NULL;
2725	     periph = next_periph) {
2726
2727		next_periph = SLIST_NEXT(periph, periph_links);
2728
2729		retval = tr_func(periph, arg);
2730		if (retval == 0)
2731			return(retval);
2732	}
2733
2734	return(retval);
2735}
2736
2737static int
2738xptpdrvtraverse(struct periph_driver **start_pdrv,
2739		xpt_pdrvfunc_t *tr_func, void *arg)
2740{
2741	struct periph_driver **pdrv;
2742	int retval;
2743
2744	retval = 1;
2745
2746	/*
2747	 * We don't traverse the peripheral driver list like we do the
2748	 * other lists, because it is a linker set, and therefore cannot be
2749	 * changed during runtime.  If the peripheral driver list is ever
2750	 * re-done to be something other than a linker set (i.e. it can
2751	 * change while the system is running), the list traversal should
2752	 * be modified to work like the other traversal functions.
2753	 */
2754	for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
2755	     *pdrv != NULL; pdrv++) {
2756		retval = tr_func(pdrv, arg);
2757
2758		if (retval == 0)
2759			return(retval);
2760	}
2761
2762	return(retval);
2763}
2764
2765static int
2766xptpdperiphtraverse(struct periph_driver **pdrv,
2767		    struct cam_periph *start_periph,
2768		    xpt_periphfunc_t *tr_func, void *arg)
2769{
2770	struct cam_periph *periph, *next_periph;
2771	int retval;
2772
2773	retval = 1;
2774
2775	for (periph = (start_periph ? start_periph :
2776	     TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
2777	     periph = next_periph) {
2778
2779		next_periph = TAILQ_NEXT(periph, unit_links);
2780
2781		retval = tr_func(periph, arg);
2782		if (retval == 0)
2783			return(retval);
2784	}
2785	return(retval);
2786}
2787
2788static int
2789xptdefbusfunc(struct cam_eb *bus, void *arg)
2790{
2791	struct xpt_traverse_config *tr_config;
2792
2793	tr_config = (struct xpt_traverse_config *)arg;
2794
2795	if (tr_config->depth == XPT_DEPTH_BUS) {
2796		xpt_busfunc_t *tr_func;
2797
2798		tr_func = (xpt_busfunc_t *)tr_config->tr_func;
2799
2800		return(tr_func(bus, tr_config->tr_arg));
2801	} else
2802		return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
2803}
2804
2805static int
2806xptdeftargetfunc(struct cam_et *target, void *arg)
2807{
2808	struct xpt_traverse_config *tr_config;
2809
2810	tr_config = (struct xpt_traverse_config *)arg;
2811
2812	if (tr_config->depth == XPT_DEPTH_TARGET) {
2813		xpt_targetfunc_t *tr_func;
2814
2815		tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
2816
2817		return(tr_func(target, tr_config->tr_arg));
2818	} else
2819		return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
2820}
2821
2822static int
2823xptdefdevicefunc(struct cam_ed *device, void *arg)
2824{
2825	struct xpt_traverse_config *tr_config;
2826
2827	tr_config = (struct xpt_traverse_config *)arg;
2828
2829	if (tr_config->depth == XPT_DEPTH_DEVICE) {
2830		xpt_devicefunc_t *tr_func;
2831
2832		tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
2833
2834		return(tr_func(device, tr_config->tr_arg));
2835	} else
2836		return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
2837}
2838
2839static int
2840xptdefperiphfunc(struct cam_periph *periph, void *arg)
2841{
2842	struct xpt_traverse_config *tr_config;
2843	xpt_periphfunc_t *tr_func;
2844
2845	tr_config = (struct xpt_traverse_config *)arg;
2846
2847	tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
2848
2849	/*
2850	 * Unlike the other default functions, we don't check for depth
2851	 * here.  The peripheral driver level is the last level in the EDT,
2852	 * so if we're here, we should execute the function in question.
2853	 */
2854	return(tr_func(periph, tr_config->tr_arg));
2855}
2856
2857/*
2858 * Execute the given function for every bus in the EDT.
2859 */
2860static int
2861xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
2862{
2863	struct xpt_traverse_config tr_config;
2864
2865	tr_config.depth = XPT_DEPTH_BUS;
2866	tr_config.tr_func = tr_func;
2867	tr_config.tr_arg = arg;
2868
2869	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2870}
2871
2872/*
2873 * Execute the given function for every device in the EDT.
2874 */
2875static int
2876xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
2877{
2878	struct xpt_traverse_config tr_config;
2879
2880	tr_config.depth = XPT_DEPTH_DEVICE;
2881	tr_config.tr_func = tr_func;
2882	tr_config.tr_arg = arg;
2883
2884	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2885}
2886
2887static int
2888xptsetasyncfunc(struct cam_ed *device, void *arg)
2889{
2890	struct cam_path path;
2891	struct ccb_getdev cgd;
2892	struct async_node *cur_entry;
2893
2894	cur_entry = (struct async_node *)arg;
2895
2896	/*
2897	 * Don't report unconfigured devices (Wildcard devs,
2898	 * devices only for target mode, device instances
2899	 * that have been invalidated but are waiting for
2900	 * their last reference count to be released).
2901	 */
2902	if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
2903		return (1);
2904
2905	xpt_compile_path(&path,
2906			 NULL,
2907			 device->target->bus->path_id,
2908			 device->target->target_id,
2909			 device->lun_id);
2910	xpt_setup_ccb(&cgd.ccb_h, &path, /*priority*/1);
2911	cgd.ccb_h.func_code = XPT_GDEV_TYPE;
2912	xpt_action((union ccb *)&cgd);
2913	cur_entry->callback(cur_entry->callback_arg,
2914			    AC_FOUND_DEVICE,
2915			    &path, &cgd);
2916	xpt_release_path(&path);
2917
2918	return(1);
2919}
2920
2921static int
2922xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
2923{
2924	struct cam_path path;
2925	struct ccb_pathinq cpi;
2926	struct async_node *cur_entry;
2927
2928	cur_entry = (struct async_node *)arg;
2929
2930	xpt_compile_path(&path, /*periph*/NULL,
2931			 bus->sim->path_id,
2932			 CAM_TARGET_WILDCARD,
2933			 CAM_LUN_WILDCARD);
2934	xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
2935	cpi.ccb_h.func_code = XPT_PATH_INQ;
2936	xpt_action((union ccb *)&cpi);
2937	cur_entry->callback(cur_entry->callback_arg,
2938			    AC_PATH_REGISTERED,
2939			    &path, &cpi);
2940	xpt_release_path(&path);
2941
2942	return(1);
2943}
2944
2945static void
2946xpt_action_sasync_cb(void *context, int pending)
2947{
2948	struct async_node *cur_entry;
2949	struct xpt_task *task;
2950	uint32_t added;
2951
2952	task = (struct xpt_task *)context;
2953	cur_entry = (struct async_node *)task->data1;
2954	added = task->data2;
2955
2956	if ((added & AC_FOUND_DEVICE) != 0) {
2957		/*
2958		 * Get this peripheral up to date with all
2959		 * the currently existing devices.
2960		 */
2961		xpt_for_all_devices(xptsetasyncfunc, cur_entry);
2962	}
2963	if ((added & AC_PATH_REGISTERED) != 0) {
2964		/*
2965		 * Get this peripheral up to date with all
2966		 * the currently existing busses.
2967		 */
2968		xpt_for_all_busses(xptsetasyncbusfunc, cur_entry);
2969		}
2970
2971	free(task, M_CAMXPT);
2972}
2973
2974void
2975xpt_action(union ccb *start_ccb)
2976{
2977
2978	CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
2979
2980	start_ccb->ccb_h.status = CAM_REQ_INPROG;
2981
2982	switch (start_ccb->ccb_h.func_code) {
2983	case XPT_SCSI_IO:
2984	{
2985		struct cam_ed *device;
2986#ifdef CAMDEBUG
2987		char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
2988		struct cam_path *path;
2989
2990		path = start_ccb->ccb_h.path;
2991#endif
2992
2993		/*
2994		 * For the sake of compatibility with SCSI-1
2995		 * devices that may not understand the identify
2996		 * message, we include lun information in the
2997		 * second byte of all commands.  SCSI-1 specifies
2998		 * that luns are a 3 bit value and reserves only 3
2999		 * bits for lun information in the CDB.  Later
3000		 * revisions of the SCSI spec allow for more than 8
3001		 * luns, but have deprecated lun information in the
3002		 * CDB.  So, if the lun won't fit, we must omit.
3003		 *
3004		 * Also be aware that during initial probing for devices,
3005		 * the inquiry information is unknown but initialized to 0.
3006		 * This means that this code will be exercised while probing
3007		 * devices with an ANSI revision greater than 2.
3008		 */
3009		device = start_ccb->ccb_h.path->device;
3010		if (device->protocol_version <= SCSI_REV_2
3011		 && start_ccb->ccb_h.target_lun < 8
3012		 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
3013
3014			start_ccb->csio.cdb_io.cdb_bytes[1] |=
3015			    start_ccb->ccb_h.target_lun << 5;
3016		}
3017		start_ccb->csio.scsi_status = SCSI_STATUS_OK;
3018		CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n",
3019			  scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0],
3020			  	       &path->device->inq_data),
3021			  scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes,
3022					  cdb_str, sizeof(cdb_str))));
3023	}
3024	/* FALLTHROUGH */
3025	case XPT_TARGET_IO:
3026	case XPT_CONT_TARGET_IO:
3027		start_ccb->csio.sense_resid = 0;
3028		start_ccb->csio.resid = 0;
3029		/* FALLTHROUGH */
3030	case XPT_RESET_DEV:
3031	case XPT_ENG_EXEC:
3032	{
3033		struct cam_path *path;
3034		struct cam_sim *sim;
3035		int runq;
3036
3037		path = start_ccb->ccb_h.path;
3038
3039		sim = path->bus->sim;
3040		if (SIM_DEAD(sim)) {
3041			/* The SIM has gone; just execute the CCB directly. */
3042			cam_ccbq_send_ccb(&path->device->ccbq, start_ccb);
3043			(*(sim->sim_action))(sim, start_ccb);
3044			break;
3045		}
3046
3047		cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
3048		if (path->device->qfrozen_cnt == 0)
3049			runq = xpt_schedule_dev_sendq(path->bus, path->device);
3050		else
3051			runq = 0;
3052		if (runq != 0)
3053			xpt_run_dev_sendq(path->bus);
3054		break;
3055	}
3056	case XPT_SET_TRAN_SETTINGS:
3057	{
3058		xpt_set_transfer_settings(&start_ccb->cts,
3059					  start_ccb->ccb_h.path->device,
3060					  /*async_update*/FALSE);
3061		break;
3062	}
3063	case XPT_CALC_GEOMETRY:
3064	{
3065		struct cam_sim *sim;
3066
3067		/* Filter out garbage */
3068		if (start_ccb->ccg.block_size == 0
3069		 || start_ccb->ccg.volume_size == 0) {
3070			start_ccb->ccg.cylinders = 0;
3071			start_ccb->ccg.heads = 0;
3072			start_ccb->ccg.secs_per_track = 0;
3073			start_ccb->ccb_h.status = CAM_REQ_CMP;
3074			break;
3075		}
3076#ifdef PC98
3077		/*
3078		 * In a PC-98 system, geometry translation depens on
3079		 * the "real" device geometry obtained from mode page 4.
3080		 * SCSI geometry translation is performed in the
3081		 * initialization routine of the SCSI BIOS and the result
3082		 * stored in host memory.  If the translation is available
3083		 * in host memory, use it.  If not, rely on the default
3084		 * translation the device driver performs.
3085		 */
3086		if (scsi_da_bios_params(&start_ccb->ccg) != 0) {
3087			start_ccb->ccb_h.status = CAM_REQ_CMP;
3088			break;
3089		}
3090#endif
3091		sim = start_ccb->ccb_h.path->bus->sim;
3092		(*(sim->sim_action))(sim, start_ccb);
3093		break;
3094	}
3095	case XPT_ABORT:
3096	{
3097		union ccb* abort_ccb;
3098
3099		abort_ccb = start_ccb->cab.abort_ccb;
3100		if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
3101
3102			if (abort_ccb->ccb_h.pinfo.index >= 0) {
3103				struct cam_ccbq *ccbq;
3104
3105				ccbq = &abort_ccb->ccb_h.path->device->ccbq;
3106				cam_ccbq_remove_ccb(ccbq, abort_ccb);
3107				abort_ccb->ccb_h.status =
3108				    CAM_REQ_ABORTED|CAM_DEV_QFRZN;
3109				xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
3110				xpt_done(abort_ccb);
3111				start_ccb->ccb_h.status = CAM_REQ_CMP;
3112				break;
3113			}
3114			if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
3115			 && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
3116				/*
3117				 * We've caught this ccb en route to
3118				 * the SIM.  Flag it for abort and the
3119				 * SIM will do so just before starting
3120				 * real work on the CCB.
3121				 */
3122				abort_ccb->ccb_h.status =
3123				    CAM_REQ_ABORTED|CAM_DEV_QFRZN;
3124				xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
3125				start_ccb->ccb_h.status = CAM_REQ_CMP;
3126				break;
3127			}
3128		}
3129		if (XPT_FC_IS_QUEUED(abort_ccb)
3130		 && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
3131			/*
3132			 * It's already completed but waiting
3133			 * for our SWI to get to it.
3134			 */
3135			start_ccb->ccb_h.status = CAM_UA_ABORT;
3136			break;
3137		}
3138		/*
3139		 * If we weren't able to take care of the abort request
3140		 * in the XPT, pass the request down to the SIM for processing.
3141		 */
3142	}
3143	/* FALLTHROUGH */
3144	case XPT_ACCEPT_TARGET_IO:
3145	case XPT_EN_LUN:
3146	case XPT_IMMED_NOTIFY:
3147	case XPT_NOTIFY_ACK:
3148	case XPT_GET_TRAN_SETTINGS:
3149	case XPT_RESET_BUS:
3150	{
3151		struct cam_sim *sim;
3152
3153		sim = start_ccb->ccb_h.path->bus->sim;
3154		(*(sim->sim_action))(sim, start_ccb);
3155		break;
3156	}
3157	case XPT_PATH_INQ:
3158	{
3159		struct cam_sim *sim;
3160
3161		sim = start_ccb->ccb_h.path->bus->sim;
3162		(*(sim->sim_action))(sim, start_ccb);
3163		break;
3164	}
3165	case XPT_PATH_STATS:
3166		start_ccb->cpis.last_reset =
3167			start_ccb->ccb_h.path->bus->last_reset;
3168		start_ccb->ccb_h.status = CAM_REQ_CMP;
3169		break;
3170	case XPT_GDEV_TYPE:
3171	{
3172		struct cam_ed *dev;
3173
3174		dev = start_ccb->ccb_h.path->device;
3175		if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
3176			start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
3177		} else {
3178			struct ccb_getdev *cgd;
3179			struct cam_eb *bus;
3180			struct cam_et *tar;
3181
3182			cgd = &start_ccb->cgd;
3183			bus = cgd->ccb_h.path->bus;
3184			tar = cgd->ccb_h.path->target;
3185			cgd->inq_data = dev->inq_data;
3186			cgd->ccb_h.status = CAM_REQ_CMP;
3187			cgd->serial_num_len = dev->serial_num_len;
3188			if ((dev->serial_num_len > 0)
3189			 && (dev->serial_num != NULL))
3190				bcopy(dev->serial_num, cgd->serial_num,
3191				      dev->serial_num_len);
3192		}
3193		break;
3194	}
3195	case XPT_GDEV_STATS:
3196	{
3197		struct cam_ed *dev;
3198
3199		dev = start_ccb->ccb_h.path->device;
3200		if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
3201			start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
3202		} else {
3203			struct ccb_getdevstats *cgds;
3204			struct cam_eb *bus;
3205			struct cam_et *tar;
3206
3207			cgds = &start_ccb->cgds;
3208			bus = cgds->ccb_h.path->bus;
3209			tar = cgds->ccb_h.path->target;
3210			cgds->dev_openings = dev->ccbq.dev_openings;
3211			cgds->dev_active = dev->ccbq.dev_active;
3212			cgds->devq_openings = dev->ccbq.devq_openings;
3213			cgds->devq_queued = dev->ccbq.queue.entries;
3214			cgds->held = dev->ccbq.held;
3215			cgds->last_reset = tar->last_reset;
3216			cgds->maxtags = dev->quirk->maxtags;
3217			cgds->mintags = dev->quirk->mintags;
3218			if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
3219				cgds->last_reset = bus->last_reset;
3220			cgds->ccb_h.status = CAM_REQ_CMP;
3221		}
3222		break;
3223	}
3224	case XPT_GDEVLIST:
3225	{
3226		struct cam_periph	*nperiph;
3227		struct periph_list	*periph_head;
3228		struct ccb_getdevlist	*cgdl;
3229		u_int			i;
3230		struct cam_ed		*device;
3231		int			found;
3232
3233
3234		found = 0;
3235
3236		/*
3237		 * Don't want anyone mucking with our data.
3238		 */
3239		device = start_ccb->ccb_h.path->device;
3240		periph_head = &device->periphs;
3241		cgdl = &start_ccb->cgdl;
3242
3243		/*
3244		 * Check and see if the list has changed since the user
3245		 * last requested a list member.  If so, tell them that the
3246		 * list has changed, and therefore they need to start over
3247		 * from the beginning.
3248		 */
3249		if ((cgdl->index != 0) &&
3250		    (cgdl->generation != device->generation)) {
3251			cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
3252			break;
3253		}
3254
3255		/*
3256		 * Traverse the list of peripherals and attempt to find
3257		 * the requested peripheral.
3258		 */
3259		for (nperiph = SLIST_FIRST(periph_head), i = 0;
3260		     (nperiph != NULL) && (i <= cgdl->index);
3261		     nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
3262			if (i == cgdl->index) {
3263				strncpy(cgdl->periph_name,
3264					nperiph->periph_name,
3265					DEV_IDLEN);
3266				cgdl->unit_number = nperiph->unit_number;
3267				found = 1;
3268			}
3269		}
3270		if (found == 0) {
3271			cgdl->status = CAM_GDEVLIST_ERROR;
3272			break;
3273		}
3274
3275		if (nperiph == NULL)
3276			cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
3277		else
3278			cgdl->status = CAM_GDEVLIST_MORE_DEVS;
3279
3280		cgdl->index++;
3281		cgdl->generation = device->generation;
3282
3283		cgdl->ccb_h.status = CAM_REQ_CMP;
3284		break;
3285	}
3286	case XPT_DEV_MATCH:
3287	{
3288		dev_pos_type position_type;
3289		struct ccb_dev_match *cdm;
3290
3291		cdm = &start_ccb->cdm;
3292
3293		/*
3294		 * There are two ways of getting at information in the EDT.
3295		 * The first way is via the primary EDT tree.  It starts
3296		 * with a list of busses, then a list of targets on a bus,
3297		 * then devices/luns on a target, and then peripherals on a
3298		 * device/lun.  The "other" way is by the peripheral driver
3299		 * lists.  The peripheral driver lists are organized by
3300		 * peripheral driver.  (obviously)  So it makes sense to
3301		 * use the peripheral driver list if the user is looking
3302		 * for something like "da1", or all "da" devices.  If the
3303		 * user is looking for something on a particular bus/target
3304		 * or lun, it's generally better to go through the EDT tree.
3305		 */
3306
3307		if (cdm->pos.position_type != CAM_DEV_POS_NONE)
3308			position_type = cdm->pos.position_type;
3309		else {
3310			u_int i;
3311
3312			position_type = CAM_DEV_POS_NONE;
3313
3314			for (i = 0; i < cdm->num_patterns; i++) {
3315				if ((cdm->patterns[i].type == DEV_MATCH_BUS)
3316				 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
3317					position_type = CAM_DEV_POS_EDT;
3318					break;
3319				}
3320			}
3321
3322			if (cdm->num_patterns == 0)
3323				position_type = CAM_DEV_POS_EDT;
3324			else if (position_type == CAM_DEV_POS_NONE)
3325				position_type = CAM_DEV_POS_PDRV;
3326		}
3327
3328		switch(position_type & CAM_DEV_POS_TYPEMASK) {
3329		case CAM_DEV_POS_EDT:
3330			xptedtmatch(cdm);
3331			break;
3332		case CAM_DEV_POS_PDRV:
3333			xptperiphlistmatch(cdm);
3334			break;
3335		default:
3336			cdm->status = CAM_DEV_MATCH_ERROR;
3337			break;
3338		}
3339
3340		if (cdm->status == CAM_DEV_MATCH_ERROR)
3341			start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3342		else
3343			start_ccb->ccb_h.status = CAM_REQ_CMP;
3344
3345		break;
3346	}
3347	case XPT_SASYNC_CB:
3348	{
3349		struct ccb_setasync *csa;
3350		struct async_node *cur_entry;
3351		struct async_list *async_head;
3352		u_int32_t added;
3353
3354		csa = &start_ccb->csa;
3355		added = csa->event_enable;
3356		async_head = &csa->ccb_h.path->device->asyncs;
3357
3358		/*
3359		 * If there is already an entry for us, simply
3360		 * update it.
3361		 */
3362		cur_entry = SLIST_FIRST(async_head);
3363		while (cur_entry != NULL) {
3364			if ((cur_entry->callback_arg == csa->callback_arg)
3365			 && (cur_entry->callback == csa->callback))
3366				break;
3367			cur_entry = SLIST_NEXT(cur_entry, links);
3368		}
3369
3370		if (cur_entry != NULL) {
3371		 	/*
3372			 * If the request has no flags set,
3373			 * remove the entry.
3374			 */
3375			added &= ~cur_entry->event_enable;
3376			if (csa->event_enable == 0) {
3377				SLIST_REMOVE(async_head, cur_entry,
3378					     async_node, links);
3379				csa->ccb_h.path->device->refcount--;
3380				free(cur_entry, M_CAMXPT);
3381			} else {
3382				cur_entry->event_enable = csa->event_enable;
3383			}
3384		} else {
3385			cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT,
3386					   M_NOWAIT);
3387			if (cur_entry == NULL) {
3388				csa->ccb_h.status = CAM_RESRC_UNAVAIL;
3389				break;
3390			}
3391			cur_entry->event_enable = csa->event_enable;
3392			cur_entry->callback_arg = csa->callback_arg;
3393			cur_entry->callback = csa->callback;
3394			SLIST_INSERT_HEAD(async_head, cur_entry, links);
3395			csa->ccb_h.path->device->refcount++;
3396		}
3397
3398		/*
3399		 * Need to decouple this operation via a taqskqueue so that
3400		 * the locking doesn't become a mess.
3401		 */
3402		if ((added & (AC_FOUND_DEVICE | AC_PATH_REGISTERED)) != 0) {
3403			struct xpt_task *task;
3404
3405			task = malloc(sizeof(struct xpt_task), M_CAMXPT,
3406				      M_NOWAIT);
3407			if (task == NULL) {
3408				csa->ccb_h.status = CAM_RESRC_UNAVAIL;
3409				break;
3410			}
3411
3412			TASK_INIT(&task->task, 0, xpt_action_sasync_cb, task);
3413			task->data1 = cur_entry;
3414			task->data2 = added;
3415			taskqueue_enqueue(taskqueue_thread, &task->task);
3416		}
3417
3418		start_ccb->ccb_h.status = CAM_REQ_CMP;
3419		break;
3420	}
3421	case XPT_REL_SIMQ:
3422	{
3423		struct ccb_relsim *crs;
3424		struct cam_ed *dev;
3425
3426		crs = &start_ccb->crs;
3427		dev = crs->ccb_h.path->device;
3428		if (dev == NULL) {
3429
3430			crs->ccb_h.status = CAM_DEV_NOT_THERE;
3431			break;
3432		}
3433
3434		if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
3435
3436 			if (INQ_DATA_TQ_ENABLED(&dev->inq_data)) {
3437				/* Don't ever go below one opening */
3438				if (crs->openings > 0) {
3439					xpt_dev_ccbq_resize(crs->ccb_h.path,
3440							    crs->openings);
3441
3442					if (bootverbose) {
3443						xpt_print(crs->ccb_h.path,
3444						    "tagged openings now %d\n",
3445						    crs->openings);
3446					}
3447				}
3448			}
3449		}
3450
3451		if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
3452
3453			if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
3454
3455				/*
3456				 * Just extend the old timeout and decrement
3457				 * the freeze count so that a single timeout
3458				 * is sufficient for releasing the queue.
3459				 */
3460				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3461				callout_stop(&dev->callout);
3462			} else {
3463
3464				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3465			}
3466
3467			callout_reset(&dev->callout,
3468			    (crs->release_timeout * hz) / 1000,
3469			    xpt_release_devq_timeout, dev);
3470
3471			dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
3472
3473		}
3474
3475		if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
3476
3477			if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
3478				/*
3479				 * Decrement the freeze count so that a single
3480				 * completion is still sufficient to unfreeze
3481				 * the queue.
3482				 */
3483				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3484			} else {
3485
3486				dev->flags |= CAM_DEV_REL_ON_COMPLETE;
3487				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3488			}
3489		}
3490
3491		if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
3492
3493			if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
3494			 || (dev->ccbq.dev_active == 0)) {
3495
3496				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3497			} else {
3498
3499				dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
3500				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3501			}
3502		}
3503
3504		if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) {
3505
3506			xpt_release_devq(crs->ccb_h.path, /*count*/1,
3507					 /*run_queue*/TRUE);
3508		}
3509		start_ccb->crs.qfrozen_cnt = dev->qfrozen_cnt;
3510		start_ccb->ccb_h.status = CAM_REQ_CMP;
3511		break;
3512	}
3513	case XPT_SCAN_BUS:
3514		xpt_scan_bus(start_ccb->ccb_h.path->periph, start_ccb);
3515		break;
3516	case XPT_SCAN_LUN:
3517		xpt_scan_lun(start_ccb->ccb_h.path->periph,
3518			     start_ccb->ccb_h.path, start_ccb->crcn.flags,
3519			     start_ccb);
3520		break;
3521	case XPT_DEBUG: {
3522#ifdef CAMDEBUG
3523#ifdef CAM_DEBUG_DELAY
3524		cam_debug_delay = CAM_DEBUG_DELAY;
3525#endif
3526		cam_dflags = start_ccb->cdbg.flags;
3527		if (cam_dpath != NULL) {
3528			xpt_free_path(cam_dpath);
3529			cam_dpath = NULL;
3530		}
3531
3532		if (cam_dflags != CAM_DEBUG_NONE) {
3533			if (xpt_create_path(&cam_dpath, xpt_periph,
3534					    start_ccb->ccb_h.path_id,
3535					    start_ccb->ccb_h.target_id,
3536					    start_ccb->ccb_h.target_lun) !=
3537					    CAM_REQ_CMP) {
3538				start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3539				cam_dflags = CAM_DEBUG_NONE;
3540			} else {
3541				start_ccb->ccb_h.status = CAM_REQ_CMP;
3542				xpt_print(cam_dpath, "debugging flags now %x\n",
3543				    cam_dflags);
3544			}
3545		} else {
3546			cam_dpath = NULL;
3547			start_ccb->ccb_h.status = CAM_REQ_CMP;
3548		}
3549#else /* !CAMDEBUG */
3550		start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3551#endif /* CAMDEBUG */
3552		break;
3553	}
3554	case XPT_NOOP:
3555		if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
3556			xpt_freeze_devq(start_ccb->ccb_h.path, 1);
3557		start_ccb->ccb_h.status = CAM_REQ_CMP;
3558		break;
3559	default:
3560	case XPT_SDEV_TYPE:
3561	case XPT_TERM_IO:
3562	case XPT_ENG_INQ:
3563		/* XXX Implement */
3564		start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
3565		break;
3566	}
3567}
3568
3569void
3570xpt_polled_action(union ccb *start_ccb)
3571{
3572	u_int32_t timeout;
3573	struct	  cam_sim *sim;
3574	struct	  cam_devq *devq;
3575	struct	  cam_ed *dev;
3576
3577
3578	timeout = start_ccb->ccb_h.timeout;
3579	sim = start_ccb->ccb_h.path->bus->sim;
3580	devq = sim->devq;
3581	dev = start_ccb->ccb_h.path->device;
3582
3583	mtx_assert(sim->mtx, MA_OWNED);
3584
3585	/*
3586	 * Steal an opening so that no other queued requests
3587	 * can get it before us while we simulate interrupts.
3588	 */
3589	dev->ccbq.devq_openings--;
3590	dev->ccbq.dev_openings--;
3591
3592	while(((devq != NULL && devq->send_openings <= 0) ||
3593	   dev->ccbq.dev_openings < 0) && (--timeout > 0)) {
3594		DELAY(1000);
3595		(*(sim->sim_poll))(sim);
3596		camisr_runqueue(&sim->sim_doneq);
3597	}
3598
3599	dev->ccbq.devq_openings++;
3600	dev->ccbq.dev_openings++;
3601
3602	if (timeout != 0) {
3603		xpt_action(start_ccb);
3604		while(--timeout > 0) {
3605			(*(sim->sim_poll))(sim);
3606			camisr_runqueue(&sim->sim_doneq);
3607			if ((start_ccb->ccb_h.status  & CAM_STATUS_MASK)
3608			    != CAM_REQ_INPROG)
3609				break;
3610			DELAY(1000);
3611		}
3612		if (timeout == 0) {
3613			/*
3614			 * XXX Is it worth adding a sim_timeout entry
3615			 * point so we can attempt recovery?  If
3616			 * this is only used for dumps, I don't think
3617			 * it is.
3618			 */
3619			start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
3620		}
3621	} else {
3622		start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3623	}
3624}
3625
3626/*
3627 * Schedule a peripheral driver to receive a ccb when it's
3628 * target device has space for more transactions.
3629 */
3630void
3631xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
3632{
3633	struct cam_ed *device;
3634	union ccb *work_ccb;
3635	int runq;
3636
3637	mtx_assert(perph->sim->mtx, MA_OWNED);
3638
3639	CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
3640	device = perph->path->device;
3641	if (periph_is_queued(perph)) {
3642		/* Simply reorder based on new priority */
3643		CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3644			  ("   change priority to %d\n", new_priority));
3645		if (new_priority < perph->pinfo.priority) {
3646			camq_change_priority(&device->drvq,
3647					     perph->pinfo.index,
3648					     new_priority);
3649		}
3650		runq = 0;
3651	} else if (SIM_DEAD(perph->path->bus->sim)) {
3652		/* The SIM is gone so just call periph_start directly. */
3653		work_ccb = xpt_get_ccb(perph->path->device);
3654		if (work_ccb == NULL)
3655			return; /* XXX */
3656		xpt_setup_ccb(&work_ccb->ccb_h, perph->path, new_priority);
3657		perph->pinfo.priority = new_priority;
3658		perph->periph_start(perph, work_ccb);
3659		return;
3660	} else {
3661		/* New entry on the queue */
3662		CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3663			  ("   added periph to queue\n"));
3664		perph->pinfo.priority = new_priority;
3665		perph->pinfo.generation = ++device->drvq.generation;
3666		camq_insert(&device->drvq, &perph->pinfo);
3667		runq = xpt_schedule_dev_allocq(perph->path->bus, device);
3668	}
3669	if (runq != 0) {
3670		CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3671			  ("   calling xpt_run_devq\n"));
3672		xpt_run_dev_allocq(perph->path->bus);
3673	}
3674}
3675
3676
3677/*
3678 * Schedule a device to run on a given queue.
3679 * If the device was inserted as a new entry on the queue,
3680 * return 1 meaning the device queue should be run. If we
3681 * were already queued, implying someone else has already
3682 * started the queue, return 0 so the caller doesn't attempt
3683 * to run the queue.
3684 */
3685static int
3686xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
3687		 u_int32_t new_priority)
3688{
3689	int retval;
3690	u_int32_t old_priority;
3691
3692	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
3693
3694	old_priority = pinfo->priority;
3695
3696	/*
3697	 * Are we already queued?
3698	 */
3699	if (pinfo->index != CAM_UNQUEUED_INDEX) {
3700		/* Simply reorder based on new priority */
3701		if (new_priority < old_priority) {
3702			camq_change_priority(queue, pinfo->index,
3703					     new_priority);
3704			CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3705					("changed priority to %d\n",
3706					 new_priority));
3707		}
3708		retval = 0;
3709	} else {
3710		/* New entry on the queue */
3711		if (new_priority < old_priority)
3712			pinfo->priority = new_priority;
3713
3714		CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3715				("Inserting onto queue\n"));
3716		pinfo->generation = ++queue->generation;
3717		camq_insert(queue, pinfo);
3718		retval = 1;
3719	}
3720	return (retval);
3721}
3722
3723static void
3724xpt_run_dev_allocq(struct cam_eb *bus)
3725{
3726	struct	cam_devq *devq;
3727
3728	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n"));
3729	devq = bus->sim->devq;
3730
3731	CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3732			("   qfrozen_cnt == 0x%x, entries == %d, "
3733			 "openings == %d, active == %d\n",
3734			 devq->alloc_queue.qfrozen_cnt,
3735			 devq->alloc_queue.entries,
3736			 devq->alloc_openings,
3737			 devq->alloc_active));
3738
3739	devq->alloc_queue.qfrozen_cnt++;
3740	while ((devq->alloc_queue.entries > 0)
3741	    && (devq->alloc_openings > 0)
3742	    && (devq->alloc_queue.qfrozen_cnt <= 1)) {
3743		struct	cam_ed_qinfo *qinfo;
3744		struct	cam_ed *device;
3745		union	ccb *work_ccb;
3746		struct	cam_periph *drv;
3747		struct	camq *drvq;
3748
3749		qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
3750							   CAMQ_HEAD);
3751		device = qinfo->device;
3752
3753		CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3754				("running device %p\n", device));
3755
3756		drvq = &device->drvq;
3757
3758#ifdef CAMDEBUG
3759		if (drvq->entries <= 0) {
3760			panic("xpt_run_dev_allocq: "
3761			      "Device on queue without any work to do");
3762		}
3763#endif
3764		if ((work_ccb = xpt_get_ccb(device)) != NULL) {
3765			devq->alloc_openings--;
3766			devq->alloc_active++;
3767			drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD);
3768			xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
3769				      drv->pinfo.priority);
3770			CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3771					("calling periph start\n"));
3772			drv->periph_start(drv, work_ccb);
3773		} else {
3774			/*
3775			 * Malloc failure in alloc_ccb
3776			 */
3777			/*
3778			 * XXX add us to a list to be run from free_ccb
3779			 * if we don't have any ccbs active on this
3780			 * device queue otherwise we may never get run
3781			 * again.
3782			 */
3783			break;
3784		}
3785
3786		if (drvq->entries > 0) {
3787			/* We have more work.  Attempt to reschedule */
3788			xpt_schedule_dev_allocq(bus, device);
3789		}
3790	}
3791	devq->alloc_queue.qfrozen_cnt--;
3792}
3793
3794static void
3795xpt_run_dev_sendq(struct cam_eb *bus)
3796{
3797	struct	cam_devq *devq;
3798
3799	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n"));
3800
3801	devq = bus->sim->devq;
3802
3803	devq->send_queue.qfrozen_cnt++;
3804	while ((devq->send_queue.entries > 0)
3805	    && (devq->send_openings > 0)) {
3806		struct	cam_ed_qinfo *qinfo;
3807		struct	cam_ed *device;
3808		union ccb *work_ccb;
3809		struct	cam_sim *sim;
3810
3811	    	if (devq->send_queue.qfrozen_cnt > 1) {
3812			break;
3813		}
3814
3815		qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
3816							   CAMQ_HEAD);
3817		device = qinfo->device;
3818
3819		/*
3820		 * If the device has been "frozen", don't attempt
3821		 * to run it.
3822		 */
3823		if (device->qfrozen_cnt > 0) {
3824			continue;
3825		}
3826
3827		CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3828				("running device %p\n", device));
3829
3830		work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
3831		if (work_ccb == NULL) {
3832			printf("device on run queue with no ccbs???\n");
3833			continue;
3834		}
3835
3836		if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
3837
3838			mtx_lock(&xsoftc.xpt_lock);
3839		 	if (xsoftc.num_highpower <= 0) {
3840				/*
3841				 * We got a high power command, but we
3842				 * don't have any available slots.  Freeze
3843				 * the device queue until we have a slot
3844				 * available.
3845				 */
3846				device->qfrozen_cnt++;
3847				STAILQ_INSERT_TAIL(&xsoftc.highpowerq,
3848						   &work_ccb->ccb_h,
3849						   xpt_links.stqe);
3850
3851				continue;
3852			} else {
3853				/*
3854				 * Consume a high power slot while
3855				 * this ccb runs.
3856				 */
3857				xsoftc.num_highpower--;
3858			}
3859			mtx_unlock(&xsoftc.xpt_lock);
3860		}
3861		devq->active_dev = device;
3862		cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
3863
3864		cam_ccbq_send_ccb(&device->ccbq, work_ccb);
3865
3866		devq->send_openings--;
3867		devq->send_active++;
3868
3869		if (device->ccbq.queue.entries > 0)
3870			xpt_schedule_dev_sendq(bus, device);
3871
3872		if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){
3873			/*
3874			 * The client wants to freeze the queue
3875			 * after this CCB is sent.
3876			 */
3877			device->qfrozen_cnt++;
3878		}
3879
3880		/* In Target mode, the peripheral driver knows best... */
3881		if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
3882			if ((device->inq_flags & SID_CmdQue) != 0
3883			 && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
3884				work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
3885			else
3886				/*
3887				 * Clear this in case of a retried CCB that
3888				 * failed due to a rejected tag.
3889				 */
3890				work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
3891		}
3892
3893		/*
3894		 * Device queues can be shared among multiple sim instances
3895		 * that reside on different busses.  Use the SIM in the queue
3896		 * CCB's path, rather than the one in the bus that was passed
3897		 * into this function.
3898		 */
3899		sim = work_ccb->ccb_h.path->bus->sim;
3900		(*(sim->sim_action))(sim, work_ccb);
3901
3902		devq->active_dev = NULL;
3903	}
3904	devq->send_queue.qfrozen_cnt--;
3905}
3906
3907/*
3908 * This function merges stuff from the slave ccb into the master ccb, while
3909 * keeping important fields in the master ccb constant.
3910 */
3911void
3912xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
3913{
3914
3915	/*
3916	 * Pull fields that are valid for peripheral drivers to set
3917	 * into the master CCB along with the CCB "payload".
3918	 */
3919	master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
3920	master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
3921	master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
3922	master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
3923	bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
3924	      sizeof(union ccb) - sizeof(struct ccb_hdr));
3925}
3926
3927void
3928xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
3929{
3930
3931	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
3932	ccb_h->pinfo.priority = priority;
3933	ccb_h->path = path;
3934	ccb_h->path_id = path->bus->path_id;
3935	if (path->target)
3936		ccb_h->target_id = path->target->target_id;
3937	else
3938		ccb_h->target_id = CAM_TARGET_WILDCARD;
3939	if (path->device) {
3940		ccb_h->target_lun = path->device->lun_id;
3941		ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
3942	} else {
3943		ccb_h->target_lun = CAM_TARGET_WILDCARD;
3944	}
3945	ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
3946	ccb_h->flags = 0;
3947}
3948
3949/* Path manipulation functions */
3950cam_status
3951xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
3952		path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3953{
3954	struct	   cam_path *path;
3955	cam_status status;
3956
3957	path = (struct cam_path *)malloc(sizeof(*path), M_CAMXPT, M_NOWAIT);
3958
3959	if (path == NULL) {
3960		status = CAM_RESRC_UNAVAIL;
3961		return(status);
3962	}
3963	status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
3964	if (status != CAM_REQ_CMP) {
3965		free(path, M_CAMXPT);
3966		path = NULL;
3967	}
3968	*new_path_ptr = path;
3969	return (status);
3970}
3971
3972cam_status
3973xpt_create_path_unlocked(struct cam_path **new_path_ptr,
3974			 struct cam_periph *periph, path_id_t path_id,
3975			 target_id_t target_id, lun_id_t lun_id)
3976{
3977	struct	   cam_path *path;
3978	struct	   cam_eb *bus = NULL;
3979	cam_status status;
3980	int	   need_unlock = 0;
3981
3982	path = (struct cam_path *)malloc(sizeof(*path), M_CAMXPT, M_WAITOK);
3983
3984	if (path_id != CAM_BUS_WILDCARD) {
3985		bus = xpt_find_bus(path_id);
3986		if (bus != NULL) {
3987			need_unlock = 1;
3988			CAM_SIM_LOCK(bus->sim);
3989		}
3990	}
3991	status = xpt_compile_path(path, periph, path_id, target_id, lun_id);
3992	if (need_unlock)
3993		CAM_SIM_UNLOCK(bus->sim);
3994	if (status != CAM_REQ_CMP) {
3995		free(path, M_CAMXPT);
3996		path = NULL;
3997	}
3998	*new_path_ptr = path;
3999	return (status);
4000}
4001
4002static cam_status
4003xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
4004		 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
4005{
4006	struct	     cam_eb *bus;
4007	struct	     cam_et *target;
4008	struct	     cam_ed *device;
4009	cam_status   status;
4010
4011	status = CAM_REQ_CMP;	/* Completed without error */
4012	target = NULL;		/* Wildcarded */
4013	device = NULL;		/* Wildcarded */
4014
4015	/*
4016	 * We will potentially modify the EDT, so block interrupts
4017	 * that may attempt to create cam paths.
4018	 */
4019	bus = xpt_find_bus(path_id);
4020	if (bus == NULL) {
4021		status = CAM_PATH_INVALID;
4022	} else {
4023		target = xpt_find_target(bus, target_id);
4024		if (target == NULL) {
4025			/* Create one */
4026			struct cam_et *new_target;
4027
4028			new_target = xpt_alloc_target(bus, target_id);
4029			if (new_target == NULL) {
4030				status = CAM_RESRC_UNAVAIL;
4031			} else {
4032				target = new_target;
4033			}
4034		}
4035		if (target != NULL) {
4036			device = xpt_find_device(target, lun_id);
4037			if (device == NULL) {
4038				/* Create one */
4039				struct cam_ed *new_device;
4040
4041				new_device = xpt_alloc_device(bus,
4042							      target,
4043							      lun_id);
4044				if (new_device == NULL) {
4045					status = CAM_RESRC_UNAVAIL;
4046				} else {
4047					device = new_device;
4048				}
4049			}
4050		}
4051	}
4052
4053	/*
4054	 * Only touch the user's data if we are successful.
4055	 */
4056	if (status == CAM_REQ_CMP) {
4057		new_path->periph = perph;
4058		new_path->bus = bus;
4059		new_path->target = target;
4060		new_path->device = device;
4061		CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
4062	} else {
4063		if (device != NULL)
4064			xpt_release_device(bus, target, device);
4065		if (target != NULL)
4066			xpt_release_target(bus, target);
4067		if (bus != NULL)
4068			xpt_release_bus(bus);
4069	}
4070	return (status);
4071}
4072
4073static void
4074xpt_release_path(struct cam_path *path)
4075{
4076	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
4077	if (path->device != NULL) {
4078		xpt_release_device(path->bus, path->target, path->device);
4079		path->device = NULL;
4080	}
4081	if (path->target != NULL) {
4082		xpt_release_target(path->bus, path->target);
4083		path->target = NULL;
4084	}
4085	if (path->bus != NULL) {
4086		xpt_release_bus(path->bus);
4087		path->bus = NULL;
4088	}
4089}
4090
4091void
4092xpt_free_path(struct cam_path *path)
4093{
4094
4095	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
4096	xpt_release_path(path);
4097	free(path, M_CAMXPT);
4098}
4099
4100
4101/*
4102 * Return -1 for failure, 0 for exact match, 1 for match with wildcards
4103 * in path1, 2 for match with wildcards in path2.
4104 */
4105int
4106xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
4107{
4108	int retval = 0;
4109
4110	if (path1->bus != path2->bus) {
4111		if (path1->bus->path_id == CAM_BUS_WILDCARD)
4112			retval = 1;
4113		else if (path2->bus->path_id == CAM_BUS_WILDCARD)
4114			retval = 2;
4115		else
4116			return (-1);
4117	}
4118	if (path1->target != path2->target) {
4119		if (path1->target->target_id == CAM_TARGET_WILDCARD) {
4120			if (retval == 0)
4121				retval = 1;
4122		} else if (path2->target->target_id == CAM_TARGET_WILDCARD)
4123			retval = 2;
4124		else
4125			return (-1);
4126	}
4127	if (path1->device != path2->device) {
4128		if (path1->device->lun_id == CAM_LUN_WILDCARD) {
4129			if (retval == 0)
4130				retval = 1;
4131		} else if (path2->device->lun_id == CAM_LUN_WILDCARD)
4132			retval = 2;
4133		else
4134			return (-1);
4135	}
4136	return (retval);
4137}
4138
4139void
4140xpt_print_path(struct cam_path *path)
4141{
4142	mtx_assert(path->bus->sim->mtx, MA_OWNED);
4143
4144	if (path == NULL)
4145		printf("(nopath): ");
4146	else {
4147		if (path->periph != NULL)
4148			printf("(%s%d:", path->periph->periph_name,
4149			       path->periph->unit_number);
4150		else
4151			printf("(noperiph:");
4152
4153		if (path->bus != NULL)
4154			printf("%s%d:%d:", path->bus->sim->sim_name,
4155			       path->bus->sim->unit_number,
4156			       path->bus->sim->bus_id);
4157		else
4158			printf("nobus:");
4159
4160		if (path->target != NULL)
4161			printf("%d:", path->target->target_id);
4162		else
4163			printf("X:");
4164
4165		if (path->device != NULL)
4166			printf("%d): ", path->device->lun_id);
4167		else
4168			printf("X): ");
4169	}
4170}
4171
4172void
4173xpt_print(struct cam_path *path, const char *fmt, ...)
4174{
4175	va_list ap;
4176	xpt_print_path(path);
4177	va_start(ap, fmt);
4178	vprintf(fmt, ap);
4179	va_end(ap);
4180}
4181
4182int
4183xpt_path_string(struct cam_path *path, char *str, size_t str_len)
4184{
4185	struct sbuf sb;
4186
4187	mtx_assert(path->bus->sim->mtx, MA_OWNED);
4188
4189	sbuf_new(&sb, str, str_len, 0);
4190
4191	if (path == NULL)
4192		sbuf_printf(&sb, "(nopath): ");
4193	else {
4194		if (path->periph != NULL)
4195			sbuf_printf(&sb, "(%s%d:", path->periph->periph_name,
4196				    path->periph->unit_number);
4197		else
4198			sbuf_printf(&sb, "(noperiph:");
4199
4200		if (path->bus != NULL)
4201			sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name,
4202				    path->bus->sim->unit_number,
4203				    path->bus->sim->bus_id);
4204		else
4205			sbuf_printf(&sb, "nobus:");
4206
4207		if (path->target != NULL)
4208			sbuf_printf(&sb, "%d:", path->target->target_id);
4209		else
4210			sbuf_printf(&sb, "X:");
4211
4212		if (path->device != NULL)
4213			sbuf_printf(&sb, "%d): ", path->device->lun_id);
4214		else
4215			sbuf_printf(&sb, "X): ");
4216	}
4217	sbuf_finish(&sb);
4218
4219	return(sbuf_len(&sb));
4220}
4221
4222path_id_t
4223xpt_path_path_id(struct cam_path *path)
4224{
4225	mtx_assert(path->bus->sim->mtx, MA_OWNED);
4226
4227	return(path->bus->path_id);
4228}
4229
4230target_id_t
4231xpt_path_target_id(struct cam_path *path)
4232{
4233	mtx_assert(path->bus->sim->mtx, MA_OWNED);
4234
4235	if (path->target != NULL)
4236		return (path->target->target_id);
4237	else
4238		return (CAM_TARGET_WILDCARD);
4239}
4240
4241lun_id_t
4242xpt_path_lun_id(struct cam_path *path)
4243{
4244	mtx_assert(path->bus->sim->mtx, MA_OWNED);
4245
4246	if (path->device != NULL)
4247		return (path->device->lun_id);
4248	else
4249		return (CAM_LUN_WILDCARD);
4250}
4251
4252struct cam_sim *
4253xpt_path_sim(struct cam_path *path)
4254{
4255
4256	return (path->bus->sim);
4257}
4258
4259struct cam_periph*
4260xpt_path_periph(struct cam_path *path)
4261{
4262	mtx_assert(path->bus->sim->mtx, MA_OWNED);
4263
4264	return (path->periph);
4265}
4266
4267/*
4268 * Release a CAM control block for the caller.  Remit the cost of the structure
4269 * to the device referenced by the path.  If the this device had no 'credits'
4270 * and peripheral drivers have registered async callbacks for this notification
4271 * call them now.
4272 */
4273void
4274xpt_release_ccb(union ccb *free_ccb)
4275{
4276	struct	 cam_path *path;
4277	struct	 cam_ed *device;
4278	struct	 cam_eb *bus;
4279	struct   cam_sim *sim;
4280
4281	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
4282	path = free_ccb->ccb_h.path;
4283	device = path->device;
4284	bus = path->bus;
4285	sim = bus->sim;
4286
4287	mtx_assert(sim->mtx, MA_OWNED);
4288
4289	cam_ccbq_release_opening(&device->ccbq);
4290	if (sim->ccb_count > sim->max_ccbs) {
4291		xpt_free_ccb(free_ccb);
4292		sim->ccb_count--;
4293	} else {
4294		SLIST_INSERT_HEAD(&sim->ccb_freeq, &free_ccb->ccb_h,
4295		    xpt_links.sle);
4296	}
4297	if (sim->devq == NULL) {
4298		return;
4299	}
4300	sim->devq->alloc_openings++;
4301	sim->devq->alloc_active--;
4302	/* XXX Turn this into an inline function - xpt_run_device?? */
4303	if ((device_is_alloc_queued(device) == 0)
4304	 && (device->drvq.entries > 0)) {
4305		xpt_schedule_dev_allocq(bus, device);
4306	}
4307	if (dev_allocq_is_runnable(sim->devq))
4308		xpt_run_dev_allocq(bus);
4309}
4310
4311/* Functions accessed by SIM drivers */
4312
4313/*
4314 * A sim structure, listing the SIM entry points and instance
4315 * identification info is passed to xpt_bus_register to hook the SIM
4316 * into the CAM framework.  xpt_bus_register creates a cam_eb entry
4317 * for this new bus and places it in the array of busses and assigns
4318 * it a path_id.  The path_id may be influenced by "hard wiring"
4319 * information specified by the user.  Once interrupt services are
4320 * availible, the bus will be probed.
4321 */
4322int32_t
4323xpt_bus_register(struct cam_sim *sim, u_int32_t bus)
4324{
4325	struct cam_eb *new_bus;
4326	struct cam_eb *old_bus;
4327	struct ccb_pathinq cpi;
4328
4329	mtx_assert(sim->mtx, MA_OWNED);
4330
4331	sim->bus_id = bus;
4332	new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
4333					  M_CAMXPT, M_NOWAIT);
4334	if (new_bus == NULL) {
4335		/* Couldn't satisfy request */
4336		return (CAM_RESRC_UNAVAIL);
4337	}
4338
4339	if (strcmp(sim->sim_name, "xpt") != 0) {
4340
4341		sim->path_id =
4342		    xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
4343	}
4344
4345	TAILQ_INIT(&new_bus->et_entries);
4346	new_bus->path_id = sim->path_id;
4347	new_bus->sim = sim;
4348	timevalclear(&new_bus->last_reset);
4349	new_bus->flags = 0;
4350	new_bus->refcount = 1;	/* Held until a bus_deregister event */
4351	new_bus->generation = 0;
4352	mtx_lock(&xsoftc.xpt_topo_lock);
4353	old_bus = TAILQ_FIRST(&xsoftc.xpt_busses);
4354	while (old_bus != NULL
4355	    && old_bus->path_id < new_bus->path_id)
4356		old_bus = TAILQ_NEXT(old_bus, links);
4357	if (old_bus != NULL)
4358		TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
4359	else
4360		TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links);
4361	xsoftc.bus_generation++;
4362	mtx_unlock(&xsoftc.xpt_topo_lock);
4363
4364	/* Notify interested parties */
4365	if (sim->path_id != CAM_XPT_PATH_ID) {
4366		struct cam_path path;
4367
4368		xpt_compile_path(&path, /*periph*/NULL, sim->path_id,
4369			         CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4370		xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
4371		cpi.ccb_h.func_code = XPT_PATH_INQ;
4372		xpt_action((union ccb *)&cpi);
4373		xpt_async(AC_PATH_REGISTERED, &path, &cpi);
4374		xpt_release_path(&path);
4375	}
4376	return (CAM_SUCCESS);
4377}
4378
4379int32_t
4380xpt_bus_deregister(path_id_t pathid)
4381{
4382	struct cam_path bus_path;
4383	struct cam_ed *device;
4384	struct cam_ed_qinfo *qinfo;
4385	struct cam_devq *devq;
4386	struct cam_periph *periph;
4387	struct cam_sim *ccbsim;
4388	union ccb *work_ccb;
4389	cam_status status;
4390
4391
4392	status = xpt_compile_path(&bus_path, NULL, pathid,
4393				  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4394	if (status != CAM_REQ_CMP)
4395		return (status);
4396
4397	xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
4398	xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
4399
4400	/* The SIM may be gone, so use a dummy SIM for any stray operations. */
4401	devq = bus_path.bus->sim->devq;
4402	ccbsim = bus_path.bus->sim;
4403	bus_path.bus->sim = &cam_dead_sim;
4404
4405	/* Execute any pending operations now. */
4406	while ((qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
4407	    CAMQ_HEAD)) != NULL ||
4408	    (qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
4409	    CAMQ_HEAD)) != NULL) {
4410		do {
4411			device = qinfo->device;
4412			work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
4413			if (work_ccb != NULL) {
4414				devq->active_dev = device;
4415				cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
4416				cam_ccbq_send_ccb(&device->ccbq, work_ccb);
4417				(*(ccbsim->sim_action))(ccbsim, work_ccb);
4418			}
4419
4420			periph = (struct cam_periph *)camq_remove(&device->drvq,
4421			    CAMQ_HEAD);
4422			if (periph != NULL)
4423				xpt_schedule(periph, periph->pinfo.priority);
4424		} while (work_ccb != NULL || periph != NULL);
4425	}
4426
4427	/* Make sure all completed CCBs are processed. */
4428	while (!TAILQ_EMPTY(&ccbsim->sim_doneq)) {
4429		camisr_runqueue(&ccbsim->sim_doneq);
4430
4431		/* Repeat the async's for the benefit of any new devices. */
4432		xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
4433		xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
4434	}
4435
4436	/* Release the reference count held while registered. */
4437	xpt_release_bus(bus_path.bus);
4438	xpt_release_path(&bus_path);
4439
4440	return (CAM_REQ_CMP);
4441}
4442
4443static path_id_t
4444xptnextfreepathid(void)
4445{
4446	struct cam_eb *bus;
4447	path_id_t pathid;
4448	const char *strval;
4449
4450	pathid = 0;
4451	mtx_lock(&xsoftc.xpt_topo_lock);
4452	bus = TAILQ_FIRST(&xsoftc.xpt_busses);
4453retry:
4454	/* Find an unoccupied pathid */
4455	while (bus != NULL && bus->path_id <= pathid) {
4456		if (bus->path_id == pathid)
4457			pathid++;
4458		bus = TAILQ_NEXT(bus, links);
4459	}
4460	mtx_unlock(&xsoftc.xpt_topo_lock);
4461
4462	/*
4463	 * Ensure that this pathid is not reserved for
4464	 * a bus that may be registered in the future.
4465	 */
4466	if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
4467		++pathid;
4468		/* Start the search over */
4469		mtx_lock(&xsoftc.xpt_topo_lock);
4470		goto retry;
4471	}
4472	return (pathid);
4473}
4474
4475static path_id_t
4476xptpathid(const char *sim_name, int sim_unit, int sim_bus)
4477{
4478	path_id_t pathid;
4479	int i, dunit, val;
4480	char buf[32];
4481	const char *dname;
4482
4483	pathid = CAM_XPT_PATH_ID;
4484	snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
4485	i = 0;
4486	while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) {
4487		if (strcmp(dname, "scbus")) {
4488			/* Avoid a bit of foot shooting. */
4489			continue;
4490		}
4491		if (dunit < 0)		/* unwired?! */
4492			continue;
4493		if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
4494			if (sim_bus == val) {
4495				pathid = dunit;
4496				break;
4497			}
4498		} else if (sim_bus == 0) {
4499			/* Unspecified matches bus 0 */
4500			pathid = dunit;
4501			break;
4502		} else {
4503			printf("Ambiguous scbus configuration for %s%d "
4504			       "bus %d, cannot wire down.  The kernel "
4505			       "config entry for scbus%d should "
4506			       "specify a controller bus.\n"
4507			       "Scbus will be assigned dynamically.\n",
4508			       sim_name, sim_unit, sim_bus, dunit);
4509			break;
4510		}
4511	}
4512
4513	if (pathid == CAM_XPT_PATH_ID)
4514		pathid = xptnextfreepathid();
4515	return (pathid);
4516}
4517
4518void
4519xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
4520{
4521	struct cam_eb *bus;
4522	struct cam_et *target, *next_target;
4523	struct cam_ed *device, *next_device;
4524
4525	mtx_assert(path->bus->sim->mtx, MA_OWNED);
4526
4527	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_async\n"));
4528
4529	/*
4530	 * Most async events come from a CAM interrupt context.  In
4531	 * a few cases, the error recovery code at the peripheral layer,
4532	 * which may run from our SWI or a process context, may signal
4533	 * deferred events with a call to xpt_async.
4534	 */
4535
4536	bus = path->bus;
4537
4538	if (async_code == AC_BUS_RESET) {
4539		/* Update our notion of when the last reset occurred */
4540		microtime(&bus->last_reset);
4541	}
4542
4543	for (target = TAILQ_FIRST(&bus->et_entries);
4544	     target != NULL;
4545	     target = next_target) {
4546
4547		next_target = TAILQ_NEXT(target, links);
4548
4549		if (path->target != target
4550		 && path->target->target_id != CAM_TARGET_WILDCARD
4551		 && target->target_id != CAM_TARGET_WILDCARD)
4552			continue;
4553
4554		if (async_code == AC_SENT_BDR) {
4555			/* Update our notion of when the last reset occurred */
4556			microtime(&path->target->last_reset);
4557		}
4558
4559		for (device = TAILQ_FIRST(&target->ed_entries);
4560		     device != NULL;
4561		     device = next_device) {
4562
4563			next_device = TAILQ_NEXT(device, links);
4564
4565			if (path->device != device
4566			 && path->device->lun_id != CAM_LUN_WILDCARD
4567			 && device->lun_id != CAM_LUN_WILDCARD)
4568				continue;
4569
4570			xpt_dev_async(async_code, bus, target,
4571				      device, async_arg);
4572
4573			xpt_async_bcast(&device->asyncs, async_code,
4574					path, async_arg);
4575		}
4576	}
4577
4578	/*
4579	 * If this wasn't a fully wildcarded async, tell all
4580	 * clients that want all async events.
4581	 */
4582	if (bus != xpt_periph->path->bus)
4583		xpt_async_bcast(&xpt_periph->path->device->asyncs, async_code,
4584				path, async_arg);
4585}
4586
4587static void
4588xpt_async_bcast(struct async_list *async_head,
4589		u_int32_t async_code,
4590		struct cam_path *path, void *async_arg)
4591{
4592	struct async_node *cur_entry;
4593
4594	cur_entry = SLIST_FIRST(async_head);
4595	while (cur_entry != NULL) {
4596		struct async_node *next_entry;
4597		/*
4598		 * Grab the next list entry before we call the current
4599		 * entry's callback.  This is because the callback function
4600		 * can delete its async callback entry.
4601		 */
4602		next_entry = SLIST_NEXT(cur_entry, links);
4603		if ((cur_entry->event_enable & async_code) != 0)
4604			cur_entry->callback(cur_entry->callback_arg,
4605					    async_code, path,
4606					    async_arg);
4607		cur_entry = next_entry;
4608	}
4609}
4610
4611/*
4612 * Handle any per-device event notifications that require action by the XPT.
4613 */
4614static void
4615xpt_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target,
4616	      struct cam_ed *device, void *async_arg)
4617{
4618	cam_status status;
4619	struct cam_path newpath;
4620
4621	/*
4622	 * We only need to handle events for real devices.
4623	 */
4624	if (target->target_id == CAM_TARGET_WILDCARD
4625	 || device->lun_id == CAM_LUN_WILDCARD)
4626		return;
4627
4628	/*
4629	 * We need our own path with wildcards expanded to
4630	 * handle certain types of events.
4631	 */
4632	if ((async_code == AC_SENT_BDR)
4633	 || (async_code == AC_BUS_RESET)
4634	 || (async_code == AC_INQ_CHANGED))
4635		status = xpt_compile_path(&newpath, NULL,
4636					  bus->path_id,
4637					  target->target_id,
4638					  device->lun_id);
4639	else
4640		status = CAM_REQ_CMP_ERR;
4641
4642	if (status == CAM_REQ_CMP) {
4643
4644		/*
4645		 * Allow transfer negotiation to occur in a
4646		 * tag free environment.
4647		 */
4648		if (async_code == AC_SENT_BDR
4649		 || async_code == AC_BUS_RESET)
4650			xpt_toggle_tags(&newpath);
4651
4652		if (async_code == AC_INQ_CHANGED) {
4653			/*
4654			 * We've sent a start unit command, or
4655			 * something similar to a device that
4656			 * may have caused its inquiry data to
4657			 * change. So we re-scan the device to
4658			 * refresh the inquiry data for it.
4659			 */
4660			xpt_scan_lun(newpath.periph, &newpath,
4661				     CAM_EXPECT_INQ_CHANGE, NULL);
4662		}
4663		xpt_release_path(&newpath);
4664	} else if (async_code == AC_LOST_DEVICE) {
4665		device->flags |= CAM_DEV_UNCONFIGURED;
4666	} else if (async_code == AC_TRANSFER_NEG) {
4667		struct ccb_trans_settings *settings;
4668
4669		settings = (struct ccb_trans_settings *)async_arg;
4670		xpt_set_transfer_settings(settings, device,
4671					  /*async_update*/TRUE);
4672	}
4673}
4674
4675u_int32_t
4676xpt_freeze_devq(struct cam_path *path, u_int count)
4677{
4678	struct ccb_hdr *ccbh;
4679
4680	mtx_assert(path->bus->sim->mtx, MA_OWNED);
4681
4682	path->device->qfrozen_cnt += count;
4683
4684	/*
4685	 * Mark the last CCB in the queue as needing
4686	 * to be requeued if the driver hasn't
4687	 * changed it's state yet.  This fixes a race
4688	 * where a ccb is just about to be queued to
4689	 * a controller driver when it's interrupt routine
4690	 * freezes the queue.  To completly close the
4691	 * hole, controller drives must check to see
4692	 * if a ccb's status is still CAM_REQ_INPROG
4693	 * just before they queue
4694	 * the CCB.  See ahc_action/ahc_freeze_devq for
4695	 * an example.
4696	 */
4697	ccbh = TAILQ_LAST(&path->device->ccbq.active_ccbs, ccb_hdr_tailq);
4698	if (ccbh && ccbh->status == CAM_REQ_INPROG)
4699		ccbh->status = CAM_REQUEUE_REQ;
4700	return (path->device->qfrozen_cnt);
4701}
4702
4703u_int32_t
4704xpt_freeze_simq(struct cam_sim *sim, u_int count)
4705{
4706	mtx_assert(sim->mtx, MA_OWNED);
4707
4708	sim->devq->send_queue.qfrozen_cnt += count;
4709	if (sim->devq->active_dev != NULL) {
4710		struct ccb_hdr *ccbh;
4711
4712		ccbh = TAILQ_LAST(&sim->devq->active_dev->ccbq.active_ccbs,
4713				  ccb_hdr_tailq);
4714		if (ccbh && ccbh->status == CAM_REQ_INPROG)
4715			ccbh->status = CAM_REQUEUE_REQ;
4716	}
4717	return (sim->devq->send_queue.qfrozen_cnt);
4718}
4719
4720static void
4721xpt_release_devq_timeout(void *arg)
4722{
4723	struct cam_ed *device;
4724
4725	device = (struct cam_ed *)arg;
4726
4727	xpt_release_devq_device(device, /*count*/1, /*run_queue*/TRUE);
4728}
4729
4730void
4731xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
4732{
4733	mtx_assert(path->bus->sim->mtx, MA_OWNED);
4734
4735	xpt_release_devq_device(path->device, count, run_queue);
4736}
4737
4738static void
4739xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue)
4740{
4741	int	rundevq;
4742
4743	rundevq = 0;
4744	if (dev->qfrozen_cnt > 0) {
4745
4746		count = (count > dev->qfrozen_cnt) ? dev->qfrozen_cnt : count;
4747		dev->qfrozen_cnt -= count;
4748		if (dev->qfrozen_cnt == 0) {
4749
4750			/*
4751			 * No longer need to wait for a successful
4752			 * command completion.
4753			 */
4754			dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
4755
4756			/*
4757			 * Remove any timeouts that might be scheduled
4758			 * to release this queue.
4759			 */
4760			if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
4761				callout_stop(&dev->callout);
4762				dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
4763			}
4764
4765			/*
4766			 * Now that we are unfrozen schedule the
4767			 * device so any pending transactions are
4768			 * run.
4769			 */
4770			if ((dev->ccbq.queue.entries > 0)
4771			 && (xpt_schedule_dev_sendq(dev->target->bus, dev))
4772			 && (run_queue != 0)) {
4773				rundevq = 1;
4774			}
4775		}
4776	}
4777	if (rundevq != 0)
4778		xpt_run_dev_sendq(dev->target->bus);
4779}
4780
4781void
4782xpt_release_simq(struct cam_sim *sim, int run_queue)
4783{
4784	struct	camq *sendq;
4785
4786	mtx_assert(sim->mtx, MA_OWNED);
4787
4788	sendq = &(sim->devq->send_queue);
4789	if (sendq->qfrozen_cnt > 0) {
4790
4791		sendq->qfrozen_cnt--;
4792		if (sendq->qfrozen_cnt == 0) {
4793			struct cam_eb *bus;
4794
4795			/*
4796			 * If there is a timeout scheduled to release this
4797			 * sim queue, remove it.  The queue frozen count is
4798			 * already at 0.
4799			 */
4800			if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
4801				callout_stop(&sim->callout);
4802				sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
4803			}
4804			bus = xpt_find_bus(sim->path_id);
4805
4806			if (run_queue) {
4807				/*
4808				 * Now that we are unfrozen run the send queue.
4809				 */
4810				xpt_run_dev_sendq(bus);
4811			}
4812			xpt_release_bus(bus);
4813		}
4814	}
4815}
4816
4817/*
4818 * XXX Appears to be unused.
4819 */
4820static void
4821xpt_release_simq_timeout(void *arg)
4822{
4823	struct cam_sim *sim;
4824
4825	sim = (struct cam_sim *)arg;
4826	xpt_release_simq(sim, /* run_queue */ TRUE);
4827}
4828
4829void
4830xpt_done(union ccb *done_ccb)
4831{
4832	struct cam_sim *sim;
4833
4834	CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n"));
4835	if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
4836		/*
4837		 * Queue up the request for handling by our SWI handler
4838		 * any of the "non-immediate" type of ccbs.
4839		 */
4840		sim = done_ccb->ccb_h.path->bus->sim;
4841		switch (done_ccb->ccb_h.path->periph->type) {
4842		case CAM_PERIPH_BIO:
4843			TAILQ_INSERT_TAIL(&sim->sim_doneq, &done_ccb->ccb_h,
4844					  sim_links.tqe);
4845			done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4846			if ((sim->flags & CAM_SIM_ON_DONEQ) == 0) {
4847				mtx_lock(&cam_simq_lock);
4848				TAILQ_INSERT_TAIL(&cam_simq, sim,
4849						  links);
4850				sim->flags |= CAM_SIM_ON_DONEQ;
4851				mtx_unlock(&cam_simq_lock);
4852			}
4853			if ((done_ccb->ccb_h.path->periph->flags &
4854			    CAM_PERIPH_POLLED) == 0)
4855				swi_sched(cambio_ih, 0);
4856			break;
4857		default:
4858			panic("unknown periph type %d",
4859			    done_ccb->ccb_h.path->periph->type);
4860		}
4861	}
4862}
4863
4864union ccb *
4865xpt_alloc_ccb()
4866{
4867	union ccb *new_ccb;
4868
4869	new_ccb = malloc(sizeof(*new_ccb), M_CAMXPT, M_ZERO|M_WAITOK);
4870	return (new_ccb);
4871}
4872
4873union ccb *
4874xpt_alloc_ccb_nowait()
4875{
4876	union ccb *new_ccb;
4877
4878	new_ccb = malloc(sizeof(*new_ccb), M_CAMXPT, M_ZERO|M_NOWAIT);
4879	return (new_ccb);
4880}
4881
4882void
4883xpt_free_ccb(union ccb *free_ccb)
4884{
4885	free(free_ccb, M_CAMXPT);
4886}
4887
4888
4889
4890/* Private XPT functions */
4891
4892/*
4893 * Get a CAM control block for the caller. Charge the structure to the device
4894 * referenced by the path.  If the this device has no 'credits' then the
4895 * device already has the maximum number of outstanding operations under way
4896 * and we return NULL. If we don't have sufficient resources to allocate more
4897 * ccbs, we also return NULL.
4898 */
4899static union ccb *
4900xpt_get_ccb(struct cam_ed *device)
4901{
4902	union ccb *new_ccb;
4903	struct cam_sim *sim;
4904
4905	sim = device->sim;
4906	if ((new_ccb = (union ccb *)SLIST_FIRST(&sim->ccb_freeq)) == NULL) {
4907		new_ccb = xpt_alloc_ccb_nowait();
4908                if (new_ccb == NULL) {
4909			return (NULL);
4910		}
4911		if ((sim->flags & CAM_SIM_MPSAFE) == 0)
4912			callout_handle_init(&new_ccb->ccb_h.timeout_ch);
4913		SLIST_INSERT_HEAD(&sim->ccb_freeq, &new_ccb->ccb_h,
4914				  xpt_links.sle);
4915		sim->ccb_count++;
4916	}
4917	cam_ccbq_take_opening(&device->ccbq);
4918	SLIST_REMOVE_HEAD(&sim->ccb_freeq, xpt_links.sle);
4919	return (new_ccb);
4920}
4921
4922static void
4923xpt_release_bus(struct cam_eb *bus)
4924{
4925
4926	if ((--bus->refcount == 0)
4927	 && (TAILQ_FIRST(&bus->et_entries) == NULL)) {
4928		mtx_lock(&xsoftc.xpt_topo_lock);
4929		TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links);
4930		xsoftc.bus_generation++;
4931		mtx_unlock(&xsoftc.xpt_topo_lock);
4932		free(bus, M_CAMXPT);
4933	}
4934}
4935
4936static struct cam_et *
4937xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
4938{
4939	struct cam_et *target;
4940
4941	target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT, M_NOWAIT);
4942	if (target != NULL) {
4943		struct cam_et *cur_target;
4944
4945		TAILQ_INIT(&target->ed_entries);
4946		target->bus = bus;
4947		target->target_id = target_id;
4948		target->refcount = 1;
4949		target->generation = 0;
4950		timevalclear(&target->last_reset);
4951		/*
4952		 * Hold a reference to our parent bus so it
4953		 * will not go away before we do.
4954		 */
4955		bus->refcount++;
4956
4957		/* Insertion sort into our bus's target list */
4958		cur_target = TAILQ_FIRST(&bus->et_entries);
4959		while (cur_target != NULL && cur_target->target_id < target_id)
4960			cur_target = TAILQ_NEXT(cur_target, links);
4961
4962		if (cur_target != NULL) {
4963			TAILQ_INSERT_BEFORE(cur_target, target, links);
4964		} else {
4965			TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
4966		}
4967		bus->generation++;
4968	}
4969	return (target);
4970}
4971
4972static void
4973xpt_release_target(struct cam_eb *bus, struct cam_et *target)
4974{
4975
4976	if ((--target->refcount == 0)
4977	 && (TAILQ_FIRST(&target->ed_entries) == NULL)) {
4978		TAILQ_REMOVE(&bus->et_entries, target, links);
4979		bus->generation++;
4980		free(target, M_CAMXPT);
4981		xpt_release_bus(bus);
4982	}
4983}
4984
4985static struct cam_ed *
4986xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
4987{
4988	struct	   cam_path path;
4989	struct	   cam_ed *device;
4990	struct	   cam_devq *devq;
4991	cam_status status;
4992
4993	if (SIM_DEAD(bus->sim))
4994		return (NULL);
4995
4996	/* Make space for us in the device queue on our bus */
4997	devq = bus->sim->devq;
4998	status = cam_devq_resize(devq, devq->alloc_queue.array_size + 1);
4999
5000	if (status != CAM_REQ_CMP) {
5001		device = NULL;
5002	} else {
5003		device = (struct cam_ed *)malloc(sizeof(*device),
5004						 M_CAMXPT, M_NOWAIT);
5005	}
5006
5007	if (device != NULL) {
5008		struct cam_ed *cur_device;
5009
5010		cam_init_pinfo(&device->alloc_ccb_entry.pinfo);
5011		device->alloc_ccb_entry.device = device;
5012		cam_init_pinfo(&device->send_ccb_entry.pinfo);
5013		device->send_ccb_entry.device = device;
5014		device->target = target;
5015		device->lun_id = lun_id;
5016		device->sim = bus->sim;
5017		/* Initialize our queues */
5018		if (camq_init(&device->drvq, 0) != 0) {
5019			free(device, M_CAMXPT);
5020			return (NULL);
5021		}
5022		if (cam_ccbq_init(&device->ccbq,
5023				  bus->sim->max_dev_openings) != 0) {
5024			camq_fini(&device->drvq);
5025			free(device, M_CAMXPT);
5026			return (NULL);
5027		}
5028		SLIST_INIT(&device->asyncs);
5029		SLIST_INIT(&device->periphs);
5030		device->generation = 0;
5031		device->owner = NULL;
5032		/*
5033		 * Take the default quirk entry until we have inquiry
5034		 * data and can determine a better quirk to use.
5035		 */
5036		device->quirk = &xpt_quirk_table[xpt_quirk_table_size - 1];
5037		bzero(&device->inq_data, sizeof(device->inq_data));
5038		device->inq_flags = 0;
5039		device->queue_flags = 0;
5040		device->serial_num = NULL;
5041		device->serial_num_len = 0;
5042		device->qfrozen_cnt = 0;
5043		device->flags = CAM_DEV_UNCONFIGURED;
5044		device->tag_delay_count = 0;
5045		device->tag_saved_openings = 0;
5046		device->refcount = 1;
5047		if (bus->sim->flags & CAM_SIM_MPSAFE)
5048			callout_init_mtx(&device->callout, bus->sim->mtx, 0);
5049		else
5050			callout_init_mtx(&device->callout, &Giant, 0);
5051
5052		/*
5053		 * Hold a reference to our parent target so it
5054		 * will not go away before we do.
5055		 */
5056		target->refcount++;
5057
5058		/*
5059		 * XXX should be limited by number of CCBs this bus can
5060		 * do.
5061		 */
5062		bus->sim->max_ccbs += device->ccbq.devq_openings;
5063		/* Insertion sort into our target's device list */
5064		cur_device = TAILQ_FIRST(&target->ed_entries);
5065		while (cur_device != NULL && cur_device->lun_id < lun_id)
5066			cur_device = TAILQ_NEXT(cur_device, links);
5067		if (cur_device != NULL) {
5068			TAILQ_INSERT_BEFORE(cur_device, device, links);
5069		} else {
5070			TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
5071		}
5072		target->generation++;
5073		if (lun_id != CAM_LUN_WILDCARD) {
5074			xpt_compile_path(&path,
5075					 NULL,
5076					 bus->path_id,
5077					 target->target_id,
5078					 lun_id);
5079			xpt_devise_transport(&path);
5080			xpt_release_path(&path);
5081		}
5082	}
5083	return (device);
5084}
5085
5086static void
5087xpt_release_device(struct cam_eb *bus, struct cam_et *target,
5088		   struct cam_ed *device)
5089{
5090
5091	if ((--device->refcount == 0)
5092	 && ((device->flags & CAM_DEV_UNCONFIGURED) != 0)) {
5093		struct cam_devq *devq;
5094
5095		if (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX
5096		 || device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX)
5097			panic("Removing device while still queued for ccbs");
5098
5099		if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0)
5100				callout_stop(&device->callout);
5101
5102		TAILQ_REMOVE(&target->ed_entries, device,links);
5103		target->generation++;
5104		bus->sim->max_ccbs -= device->ccbq.devq_openings;
5105		if (!SIM_DEAD(bus->sim)) {
5106			/* Release our slot in the devq */
5107			devq = bus->sim->devq;
5108			cam_devq_resize(devq, devq->alloc_queue.array_size - 1);
5109		}
5110		camq_fini(&device->drvq);
5111		camq_fini(&device->ccbq.queue);
5112		free(device, M_CAMXPT);
5113		xpt_release_target(bus, target);
5114	}
5115}
5116
5117static u_int32_t
5118xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
5119{
5120	int	diff;
5121	int	result;
5122	struct	cam_ed *dev;
5123
5124	dev = path->device;
5125
5126	diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings);
5127	result = cam_ccbq_resize(&dev->ccbq, newopenings);
5128	if (result == CAM_REQ_CMP && (diff < 0)) {
5129		dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED;
5130	}
5131	if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
5132	 || (dev->inq_flags & SID_CmdQue) != 0)
5133		dev->tag_saved_openings = newopenings;
5134	/* Adjust the global limit */
5135	dev->sim->max_ccbs += diff;
5136	return (result);
5137}
5138
5139static struct cam_eb *
5140xpt_find_bus(path_id_t path_id)
5141{
5142	struct cam_eb *bus;
5143
5144	mtx_lock(&xsoftc.xpt_topo_lock);
5145	for (bus = TAILQ_FIRST(&xsoftc.xpt_busses);
5146	     bus != NULL;
5147	     bus = TAILQ_NEXT(bus, links)) {
5148		if (bus->path_id == path_id) {
5149			bus->refcount++;
5150			break;
5151		}
5152	}
5153	mtx_unlock(&xsoftc.xpt_topo_lock);
5154	return (bus);
5155}
5156
5157static struct cam_et *
5158xpt_find_target(struct cam_eb *bus, target_id_t	target_id)
5159{
5160	struct cam_et *target;
5161
5162	for (target = TAILQ_FIRST(&bus->et_entries);
5163	     target != NULL;
5164	     target = TAILQ_NEXT(target, links)) {
5165		if (target->target_id == target_id) {
5166			target->refcount++;
5167			break;
5168		}
5169	}
5170	return (target);
5171}
5172
5173static struct cam_ed *
5174xpt_find_device(struct cam_et *target, lun_id_t lun_id)
5175{
5176	struct cam_ed *device;
5177
5178	for (device = TAILQ_FIRST(&target->ed_entries);
5179	     device != NULL;
5180	     device = TAILQ_NEXT(device, links)) {
5181		if (device->lun_id == lun_id) {
5182			device->refcount++;
5183			break;
5184		}
5185	}
5186	return (device);
5187}
5188
5189typedef struct {
5190	union	ccb *request_ccb;
5191	struct 	ccb_pathinq *cpi;
5192	int	counter;
5193} xpt_scan_bus_info;
5194
5195/*
5196 * To start a scan, request_ccb is an XPT_SCAN_BUS ccb.
5197 * As the scan progresses, xpt_scan_bus is used as the
5198 * callback on completion function.
5199 */
5200static void
5201xpt_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
5202{
5203	CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
5204		  ("xpt_scan_bus\n"));
5205	switch (request_ccb->ccb_h.func_code) {
5206	case XPT_SCAN_BUS:
5207	{
5208		xpt_scan_bus_info *scan_info;
5209		union	ccb *work_ccb;
5210		struct	cam_path *path;
5211		u_int	i;
5212		u_int	max_target;
5213		u_int	initiator_id;
5214
5215		/* Find out the characteristics of the bus */
5216		work_ccb = xpt_alloc_ccb_nowait();
5217		if (work_ccb == NULL) {
5218			request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
5219			xpt_done(request_ccb);
5220			return;
5221		}
5222		xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path,
5223			      request_ccb->ccb_h.pinfo.priority);
5224		work_ccb->ccb_h.func_code = XPT_PATH_INQ;
5225		xpt_action(work_ccb);
5226		if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
5227			request_ccb->ccb_h.status = work_ccb->ccb_h.status;
5228			xpt_free_ccb(work_ccb);
5229			xpt_done(request_ccb);
5230			return;
5231		}
5232
5233		if ((work_ccb->cpi.hba_misc & PIM_NOINITIATOR) != 0) {
5234			/*
5235			 * Can't scan the bus on an adapter that
5236			 * cannot perform the initiator role.
5237			 */
5238			request_ccb->ccb_h.status = CAM_REQ_CMP;
5239			xpt_free_ccb(work_ccb);
5240			xpt_done(request_ccb);
5241			return;
5242		}
5243
5244		/* Save some state for use while we probe for devices */
5245		scan_info = (xpt_scan_bus_info *)
5246		    malloc(sizeof(xpt_scan_bus_info), M_TEMP, M_NOWAIT);
5247		scan_info->request_ccb = request_ccb;
5248		scan_info->cpi = &work_ccb->cpi;
5249
5250		/* Cache on our stack so we can work asynchronously */
5251		max_target = scan_info->cpi->max_target;
5252		initiator_id = scan_info->cpi->initiator_id;
5253
5254
5255		/*
5256		 * We can scan all targets in parallel, or do it sequentially.
5257		 */
5258		if (scan_info->cpi->hba_misc & PIM_SEQSCAN) {
5259			max_target = 0;
5260			scan_info->counter = 0;
5261		} else {
5262			scan_info->counter = scan_info->cpi->max_target + 1;
5263			if (scan_info->cpi->initiator_id < scan_info->counter) {
5264				scan_info->counter--;
5265			}
5266		}
5267
5268		for (i = 0; i <= max_target; i++) {
5269			cam_status status;
5270			if (i == initiator_id)
5271				continue;
5272
5273			status = xpt_create_path(&path, xpt_periph,
5274						 request_ccb->ccb_h.path_id,
5275						 i, 0);
5276			if (status != CAM_REQ_CMP) {
5277				printf("xpt_scan_bus: xpt_create_path failed"
5278				       " with status %#x, bus scan halted\n",
5279				       status);
5280				free(scan_info, M_TEMP);
5281				request_ccb->ccb_h.status = status;
5282				xpt_free_ccb(work_ccb);
5283				xpt_done(request_ccb);
5284				break;
5285			}
5286			work_ccb = xpt_alloc_ccb_nowait();
5287			if (work_ccb == NULL) {
5288				free(scan_info, M_TEMP);
5289				xpt_free_path(path);
5290				request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
5291				xpt_done(request_ccb);
5292				break;
5293			}
5294			xpt_setup_ccb(&work_ccb->ccb_h, path,
5295				      request_ccb->ccb_h.pinfo.priority);
5296			work_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5297			work_ccb->ccb_h.cbfcnp = xpt_scan_bus;
5298			work_ccb->ccb_h.ppriv_ptr0 = scan_info;
5299			work_ccb->crcn.flags = request_ccb->crcn.flags;
5300			xpt_action(work_ccb);
5301		}
5302		break;
5303	}
5304	case XPT_SCAN_LUN:
5305	{
5306		cam_status status;
5307		struct cam_path *path;
5308		xpt_scan_bus_info *scan_info;
5309		path_id_t path_id;
5310		target_id_t target_id;
5311		lun_id_t lun_id;
5312
5313		/* Reuse the same CCB to query if a device was really found */
5314		scan_info = (xpt_scan_bus_info *)request_ccb->ccb_h.ppriv_ptr0;
5315		xpt_setup_ccb(&request_ccb->ccb_h, request_ccb->ccb_h.path,
5316			      request_ccb->ccb_h.pinfo.priority);
5317		request_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
5318
5319		path_id = request_ccb->ccb_h.path_id;
5320		target_id = request_ccb->ccb_h.target_id;
5321		lun_id = request_ccb->ccb_h.target_lun;
5322		xpt_action(request_ccb);
5323
5324		if (request_ccb->ccb_h.status != CAM_REQ_CMP) {
5325			struct cam_ed *device;
5326			struct cam_et *target;
5327			int phl;
5328
5329			/*
5330			 * If we already probed lun 0 successfully, or
5331			 * we have additional configured luns on this
5332			 * target that might have "gone away", go onto
5333			 * the next lun.
5334			 */
5335			target = request_ccb->ccb_h.path->target;
5336			/*
5337			 * We may touch devices that we don't
5338			 * hold references too, so ensure they
5339			 * don't disappear out from under us.
5340			 * The target above is referenced by the
5341			 * path in the request ccb.
5342			 */
5343			phl = 0;
5344			device = TAILQ_FIRST(&target->ed_entries);
5345			if (device != NULL) {
5346				phl = CAN_SRCH_HI_SPARSE(device);
5347				if (device->lun_id == 0)
5348					device = TAILQ_NEXT(device, links);
5349			}
5350			if ((lun_id != 0) || (device != NULL)) {
5351				if (lun_id < (CAM_SCSI2_MAXLUN-1) || phl)
5352					lun_id++;
5353			}
5354		} else {
5355			struct cam_ed *device;
5356
5357			device = request_ccb->ccb_h.path->device;
5358
5359			if ((device->quirk->quirks & CAM_QUIRK_NOLUNS) == 0) {
5360				/* Try the next lun */
5361				if (lun_id < (CAM_SCSI2_MAXLUN-1)
5362				  || CAN_SRCH_HI_DENSE(device))
5363					lun_id++;
5364			}
5365		}
5366
5367		/*
5368		 * Free the current request path- we're done with it.
5369		 */
5370		xpt_free_path(request_ccb->ccb_h.path);
5371
5372		/*
5373		 * Check to see if we scan any further luns.
5374		 */
5375		if (lun_id == request_ccb->ccb_h.target_lun
5376                 || lun_id > scan_info->cpi->max_lun) {
5377			int done;
5378
5379 hop_again:
5380			done = 0;
5381			if (scan_info->cpi->hba_misc & PIM_SEQSCAN) {
5382				scan_info->counter++;
5383				if (scan_info->counter ==
5384				    scan_info->cpi->initiator_id) {
5385					scan_info->counter++;
5386				}
5387				if (scan_info->counter >=
5388				    scan_info->cpi->max_target+1) {
5389					done = 1;
5390				}
5391			} else {
5392				scan_info->counter--;
5393				if (scan_info->counter == 0) {
5394					done = 1;
5395				}
5396			}
5397			if (done) {
5398				xpt_free_ccb(request_ccb);
5399				xpt_free_ccb((union ccb *)scan_info->cpi);
5400				request_ccb = scan_info->request_ccb;
5401				free(scan_info, M_TEMP);
5402				request_ccb->ccb_h.status = CAM_REQ_CMP;
5403				xpt_done(request_ccb);
5404				break;
5405			}
5406
5407			if ((scan_info->cpi->hba_misc & PIM_SEQSCAN) == 0) {
5408				break;
5409			}
5410			status = xpt_create_path(&path, xpt_periph,
5411			    scan_info->request_ccb->ccb_h.path_id,
5412			    scan_info->counter, 0);
5413			if (status != CAM_REQ_CMP) {
5414				printf("xpt_scan_bus: xpt_create_path failed"
5415				    " with status %#x, bus scan halted\n",
5416			       	    status);
5417				xpt_free_ccb(request_ccb);
5418				xpt_free_ccb((union ccb *)scan_info->cpi);
5419				request_ccb = scan_info->request_ccb;
5420				free(scan_info, M_TEMP);
5421				request_ccb->ccb_h.status = status;
5422				xpt_done(request_ccb);
5423				break;
5424			}
5425			xpt_setup_ccb(&request_ccb->ccb_h, path,
5426			    request_ccb->ccb_h.pinfo.priority);
5427			request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5428			request_ccb->ccb_h.cbfcnp = xpt_scan_bus;
5429			request_ccb->ccb_h.ppriv_ptr0 = scan_info;
5430			request_ccb->crcn.flags =
5431			    scan_info->request_ccb->crcn.flags;
5432		} else {
5433			status = xpt_create_path(&path, xpt_periph,
5434						 path_id, target_id, lun_id);
5435			if (status != CAM_REQ_CMP) {
5436				printf("xpt_scan_bus: xpt_create_path failed "
5437				       "with status %#x, halting LUN scan\n",
5438			 	       status);
5439				goto hop_again;
5440			}
5441			xpt_setup_ccb(&request_ccb->ccb_h, path,
5442				      request_ccb->ccb_h.pinfo.priority);
5443			request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5444			request_ccb->ccb_h.cbfcnp = xpt_scan_bus;
5445			request_ccb->ccb_h.ppriv_ptr0 = scan_info;
5446			request_ccb->crcn.flags =
5447				scan_info->request_ccb->crcn.flags;
5448		}
5449		xpt_action(request_ccb);
5450		break;
5451	}
5452	default:
5453		break;
5454	}
5455}
5456
5457typedef enum {
5458	PROBE_TUR,
5459	PROBE_INQUIRY,	/* this counts as DV0 for Basic Domain Validation */
5460	PROBE_FULL_INQUIRY,
5461	PROBE_MODE_SENSE,
5462	PROBE_SERIAL_NUM,
5463	PROBE_TUR_FOR_NEGOTIATION,
5464	PROBE_INQUIRY_BASIC_DV1,
5465	PROBE_INQUIRY_BASIC_DV2,
5466	PROBE_DV_EXIT
5467} probe_action;
5468
5469typedef enum {
5470	PROBE_INQUIRY_CKSUM	= 0x01,
5471	PROBE_SERIAL_CKSUM	= 0x02,
5472	PROBE_NO_ANNOUNCE	= 0x04
5473} probe_flags;
5474
5475typedef struct {
5476	TAILQ_HEAD(, ccb_hdr) request_ccbs;
5477	probe_action	action;
5478	union ccb	saved_ccb;
5479	probe_flags	flags;
5480	MD5_CTX		context;
5481	u_int8_t	digest[16];
5482} probe_softc;
5483
5484static void
5485xpt_scan_lun(struct cam_periph *periph, struct cam_path *path,
5486	     cam_flags flags, union ccb *request_ccb)
5487{
5488	struct ccb_pathinq cpi;
5489	cam_status status;
5490	struct cam_path *new_path;
5491	struct cam_periph *old_periph;
5492
5493	CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
5494		  ("xpt_scan_lun\n"));
5495
5496	xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
5497	cpi.ccb_h.func_code = XPT_PATH_INQ;
5498	xpt_action((union ccb *)&cpi);
5499
5500	if (cpi.ccb_h.status != CAM_REQ_CMP) {
5501		if (request_ccb != NULL) {
5502			request_ccb->ccb_h.status = cpi.ccb_h.status;
5503			xpt_done(request_ccb);
5504		}
5505		return;
5506	}
5507
5508	if ((cpi.hba_misc & PIM_NOINITIATOR) != 0) {
5509		/*
5510		 * Can't scan the bus on an adapter that
5511		 * cannot perform the initiator role.
5512		 */
5513		if (request_ccb != NULL) {
5514			request_ccb->ccb_h.status = CAM_REQ_CMP;
5515			xpt_done(request_ccb);
5516		}
5517		return;
5518	}
5519
5520	if (request_ccb == NULL) {
5521		request_ccb = malloc(sizeof(union ccb), M_TEMP, M_NOWAIT);
5522		if (request_ccb == NULL) {
5523			xpt_print(path, "xpt_scan_lun: can't allocate CCB, "
5524			    "can't continue\n");
5525			return;
5526		}
5527		new_path = malloc(sizeof(*new_path), M_TEMP, M_NOWAIT);
5528		if (new_path == NULL) {
5529			xpt_print(path, "xpt_scan_lun: can't allocate path, "
5530			    "can't continue\n");
5531			free(request_ccb, M_TEMP);
5532			return;
5533		}
5534		status = xpt_compile_path(new_path, xpt_periph,
5535					  path->bus->path_id,
5536					  path->target->target_id,
5537					  path->device->lun_id);
5538
5539		if (status != CAM_REQ_CMP) {
5540			xpt_print(path, "xpt_scan_lun: can't compile path, "
5541			    "can't continue\n");
5542			free(request_ccb, M_TEMP);
5543			free(new_path, M_TEMP);
5544			return;
5545		}
5546		xpt_setup_ccb(&request_ccb->ccb_h, new_path, /*priority*/ 1);
5547		request_ccb->ccb_h.cbfcnp = xptscandone;
5548		request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5549		request_ccb->crcn.flags = flags;
5550	}
5551
5552	if ((old_periph = cam_periph_find(path, "probe")) != NULL) {
5553		probe_softc *softc;
5554
5555		softc = (probe_softc *)old_periph->softc;
5556		TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
5557				  periph_links.tqe);
5558	} else {
5559		status = cam_periph_alloc(proberegister, NULL, probecleanup,
5560					  probestart, "probe",
5561					  CAM_PERIPH_BIO,
5562					  request_ccb->ccb_h.path, NULL, 0,
5563					  request_ccb);
5564
5565		if (status != CAM_REQ_CMP) {
5566			xpt_print(path, "xpt_scan_lun: cam_alloc_periph "
5567			    "returned an error, can't continue probe\n");
5568			request_ccb->ccb_h.status = status;
5569			xpt_done(request_ccb);
5570		}
5571	}
5572}
5573
5574static void
5575xptscandone(struct cam_periph *periph, union ccb *done_ccb)
5576{
5577	xpt_release_path(done_ccb->ccb_h.path);
5578	free(done_ccb->ccb_h.path, M_TEMP);
5579	free(done_ccb, M_TEMP);
5580}
5581
5582static cam_status
5583proberegister(struct cam_periph *periph, void *arg)
5584{
5585	union ccb *request_ccb;	/* CCB representing the probe request */
5586	cam_status status;
5587	probe_softc *softc;
5588
5589	request_ccb = (union ccb *)arg;
5590	if (periph == NULL) {
5591		printf("proberegister: periph was NULL!!\n");
5592		return(CAM_REQ_CMP_ERR);
5593	}
5594
5595	if (request_ccb == NULL) {
5596		printf("proberegister: no probe CCB, "
5597		       "can't register device\n");
5598		return(CAM_REQ_CMP_ERR);
5599	}
5600
5601	softc = (probe_softc *)malloc(sizeof(*softc), M_TEMP, M_NOWAIT);
5602
5603	if (softc == NULL) {
5604		printf("proberegister: Unable to probe new device. "
5605		       "Unable to allocate softc\n");
5606		return(CAM_REQ_CMP_ERR);
5607	}
5608	TAILQ_INIT(&softc->request_ccbs);
5609	TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
5610			  periph_links.tqe);
5611	softc->flags = 0;
5612	periph->softc = softc;
5613	status = cam_periph_acquire(periph);
5614	if (status != CAM_REQ_CMP) {
5615		return (status);
5616	}
5617
5618
5619	/*
5620	 * Ensure we've waited at least a bus settle
5621	 * delay before attempting to probe the device.
5622	 * For HBAs that don't do bus resets, this won't make a difference.
5623	 */
5624	cam_periph_freeze_after_event(periph, &periph->path->bus->last_reset,
5625				      scsi_delay);
5626	probeschedule(periph);
5627	return(CAM_REQ_CMP);
5628}
5629
5630static void
5631probeschedule(struct cam_periph *periph)
5632{
5633	struct ccb_pathinq cpi;
5634	union ccb *ccb;
5635	probe_softc *softc;
5636
5637	softc = (probe_softc *)periph->softc;
5638	ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
5639
5640	xpt_setup_ccb(&cpi.ccb_h, periph->path, /*priority*/1);
5641	cpi.ccb_h.func_code = XPT_PATH_INQ;
5642	xpt_action((union ccb *)&cpi);
5643
5644	/*
5645	 * If a device has gone away and another device, or the same one,
5646	 * is back in the same place, it should have a unit attention
5647	 * condition pending.  It will not report the unit attention in
5648	 * response to an inquiry, which may leave invalid transfer
5649	 * negotiations in effect.  The TUR will reveal the unit attention
5650	 * condition.  Only send the TUR for lun 0, since some devices
5651	 * will get confused by commands other than inquiry to non-existent
5652	 * luns.  If you think a device has gone away start your scan from
5653	 * lun 0.  This will insure that any bogus transfer settings are
5654	 * invalidated.
5655	 *
5656	 * If we haven't seen the device before and the controller supports
5657	 * some kind of transfer negotiation, negotiate with the first
5658	 * sent command if no bus reset was performed at startup.  This
5659	 * ensures that the device is not confused by transfer negotiation
5660	 * settings left over by loader or BIOS action.
5661	 */
5662	if (((ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
5663	 && (ccb->ccb_h.target_lun == 0)) {
5664		softc->action = PROBE_TUR;
5665	} else if ((cpi.hba_inquiry & (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) != 0
5666	      && (cpi.hba_misc & PIM_NOBUSRESET) != 0) {
5667		proberequestdefaultnegotiation(periph);
5668		softc->action = PROBE_INQUIRY;
5669	} else {
5670		softc->action = PROBE_INQUIRY;
5671	}
5672
5673	if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE)
5674		softc->flags |= PROBE_NO_ANNOUNCE;
5675	else
5676		softc->flags &= ~PROBE_NO_ANNOUNCE;
5677
5678	xpt_schedule(periph, ccb->ccb_h.pinfo.priority);
5679}
5680
5681static void
5682probestart(struct cam_periph *periph, union ccb *start_ccb)
5683{
5684	/* Probe the device that our peripheral driver points to */
5685	struct ccb_scsiio *csio;
5686	probe_softc *softc;
5687
5688	CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n"));
5689
5690	softc = (probe_softc *)periph->softc;
5691	csio = &start_ccb->csio;
5692
5693	switch (softc->action) {
5694	case PROBE_TUR:
5695	case PROBE_TUR_FOR_NEGOTIATION:
5696	case PROBE_DV_EXIT:
5697	{
5698		scsi_test_unit_ready(csio,
5699				     /*retries*/4,
5700				     probedone,
5701				     MSG_SIMPLE_Q_TAG,
5702				     SSD_FULL_SIZE,
5703				     /*timeout*/60000);
5704		break;
5705	}
5706	case PROBE_INQUIRY:
5707	case PROBE_FULL_INQUIRY:
5708	case PROBE_INQUIRY_BASIC_DV1:
5709	case PROBE_INQUIRY_BASIC_DV2:
5710	{
5711		u_int inquiry_len;
5712		struct scsi_inquiry_data *inq_buf;
5713
5714		inq_buf = &periph->path->device->inq_data;
5715
5716		/*
5717		 * If the device is currently configured, we calculate an
5718		 * MD5 checksum of the inquiry data, and if the serial number
5719		 * length is greater than 0, add the serial number data
5720		 * into the checksum as well.  Once the inquiry and the
5721		 * serial number check finish, we attempt to figure out
5722		 * whether we still have the same device.
5723		 */
5724		if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) {
5725
5726			MD5Init(&softc->context);
5727			MD5Update(&softc->context, (unsigned char *)inq_buf,
5728				  sizeof(struct scsi_inquiry_data));
5729			softc->flags |= PROBE_INQUIRY_CKSUM;
5730			if (periph->path->device->serial_num_len > 0) {
5731				MD5Update(&softc->context,
5732					  periph->path->device->serial_num,
5733					  periph->path->device->serial_num_len);
5734				softc->flags |= PROBE_SERIAL_CKSUM;
5735			}
5736			MD5Final(softc->digest, &softc->context);
5737		}
5738
5739		if (softc->action == PROBE_INQUIRY)
5740			inquiry_len = SHORT_INQUIRY_LENGTH;
5741		else
5742			inquiry_len = SID_ADDITIONAL_LENGTH(inq_buf);
5743
5744		/*
5745		 * Some parallel SCSI devices fail to send an
5746		 * ignore wide residue message when dealing with
5747		 * odd length inquiry requests.  Round up to be
5748		 * safe.
5749		 */
5750		inquiry_len = roundup2(inquiry_len, 2);
5751
5752		if (softc->action == PROBE_INQUIRY_BASIC_DV1
5753		 || softc->action == PROBE_INQUIRY_BASIC_DV2) {
5754			inq_buf = malloc(inquiry_len, M_TEMP, M_NOWAIT);
5755		}
5756		if (inq_buf == NULL) {
5757			xpt_print(periph->path, "malloc failure- skipping Basic"
5758			    "Domain Validation\n");
5759			softc->action = PROBE_DV_EXIT;
5760			scsi_test_unit_ready(csio,
5761					     /*retries*/4,
5762					     probedone,
5763					     MSG_SIMPLE_Q_TAG,
5764					     SSD_FULL_SIZE,
5765					     /*timeout*/60000);
5766			break;
5767		}
5768		scsi_inquiry(csio,
5769			     /*retries*/4,
5770			     probedone,
5771			     MSG_SIMPLE_Q_TAG,
5772			     (u_int8_t *)inq_buf,
5773			     inquiry_len,
5774			     /*evpd*/FALSE,
5775			     /*page_code*/0,
5776			     SSD_MIN_SIZE,
5777			     /*timeout*/60 * 1000);
5778		break;
5779	}
5780	case PROBE_MODE_SENSE:
5781	{
5782		void  *mode_buf;
5783		int    mode_buf_len;
5784
5785		mode_buf_len = sizeof(struct scsi_mode_header_6)
5786			     + sizeof(struct scsi_mode_blk_desc)
5787			     + sizeof(struct scsi_control_page);
5788		mode_buf = malloc(mode_buf_len, M_TEMP, M_NOWAIT);
5789		if (mode_buf != NULL) {
5790	                scsi_mode_sense(csio,
5791					/*retries*/4,
5792					probedone,
5793					MSG_SIMPLE_Q_TAG,
5794					/*dbd*/FALSE,
5795					SMS_PAGE_CTRL_CURRENT,
5796					SMS_CONTROL_MODE_PAGE,
5797					mode_buf,
5798					mode_buf_len,
5799					SSD_FULL_SIZE,
5800					/*timeout*/60000);
5801			break;
5802		}
5803		xpt_print(periph->path, "Unable to mode sense control page - "
5804		    "malloc failure\n");
5805		softc->action = PROBE_SERIAL_NUM;
5806	}
5807	/* FALLTHROUGH */
5808	case PROBE_SERIAL_NUM:
5809	{
5810		struct scsi_vpd_unit_serial_number *serial_buf;
5811		struct cam_ed* device;
5812
5813		serial_buf = NULL;
5814		device = periph->path->device;
5815		device->serial_num = NULL;
5816		device->serial_num_len = 0;
5817
5818		if ((device->quirk->quirks & CAM_QUIRK_NOSERIAL) == 0)
5819			serial_buf = (struct scsi_vpd_unit_serial_number *)
5820				malloc(sizeof(*serial_buf), M_TEMP,
5821					M_NOWAIT | M_ZERO);
5822
5823		if (serial_buf != NULL) {
5824			scsi_inquiry(csio,
5825				     /*retries*/4,
5826				     probedone,
5827				     MSG_SIMPLE_Q_TAG,
5828				     (u_int8_t *)serial_buf,
5829				     sizeof(*serial_buf),
5830				     /*evpd*/TRUE,
5831				     SVPD_UNIT_SERIAL_NUMBER,
5832				     SSD_MIN_SIZE,
5833				     /*timeout*/60 * 1000);
5834			break;
5835		}
5836		/*
5837		 * We'll have to do without, let our probedone
5838		 * routine finish up for us.
5839		 */
5840		start_ccb->csio.data_ptr = NULL;
5841		probedone(periph, start_ccb);
5842		return;
5843	}
5844	}
5845	xpt_action(start_ccb);
5846}
5847
5848static void
5849proberequestdefaultnegotiation(struct cam_periph *periph)
5850{
5851	struct ccb_trans_settings cts;
5852
5853	xpt_setup_ccb(&cts.ccb_h, periph->path, /*priority*/1);
5854	cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
5855	cts.type = CTS_TYPE_USER_SETTINGS;
5856	xpt_action((union ccb *)&cts);
5857	if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
5858		return;
5859	}
5860	cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
5861	cts.type = CTS_TYPE_CURRENT_SETTINGS;
5862	xpt_action((union ccb *)&cts);
5863}
5864
5865/*
5866 * Backoff Negotiation Code- only pertinent for SPI devices.
5867 */
5868static int
5869proberequestbackoff(struct cam_periph *periph, struct cam_ed *device)
5870{
5871	struct ccb_trans_settings cts;
5872	struct ccb_trans_settings_spi *spi;
5873
5874	memset(&cts, 0, sizeof (cts));
5875	xpt_setup_ccb(&cts.ccb_h, periph->path, /*priority*/1);
5876	cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
5877	cts.type = CTS_TYPE_CURRENT_SETTINGS;
5878	xpt_action((union ccb *)&cts);
5879	if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
5880		if (bootverbose) {
5881			xpt_print(periph->path,
5882			    "failed to get current device settings\n");
5883		}
5884		return (0);
5885	}
5886	if (cts.transport != XPORT_SPI) {
5887		if (bootverbose) {
5888			xpt_print(periph->path, "not SPI transport\n");
5889		}
5890		return (0);
5891	}
5892	spi = &cts.xport_specific.spi;
5893
5894	/*
5895	 * We cannot renegotiate sync rate if we don't have one.
5896	 */
5897	if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) {
5898		if (bootverbose) {
5899			xpt_print(periph->path, "no sync rate known\n");
5900		}
5901		return (0);
5902	}
5903
5904	/*
5905	 * We'll assert that we don't have to touch PPR options- the
5906	 * SIM will see what we do with period and offset and adjust
5907	 * the PPR options as appropriate.
5908	 */
5909
5910	/*
5911	 * A sync rate with unknown or zero offset is nonsensical.
5912	 * A sync period of zero means Async.
5913	 */
5914	if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0
5915	 || spi->sync_offset == 0 || spi->sync_period == 0) {
5916		if (bootverbose) {
5917			xpt_print(periph->path, "no sync rate available\n");
5918		}
5919		return (0);
5920	}
5921
5922	if (device->flags & CAM_DEV_DV_HIT_BOTTOM) {
5923		CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
5924		    ("hit async: giving up on DV\n"));
5925		return (0);
5926	}
5927
5928
5929	/*
5930	 * Jump sync_period up by one, but stop at 5MHz and fall back to Async.
5931	 * We don't try to remember 'last' settings to see if the SIM actually
5932	 * gets into the speed we want to set. We check on the SIM telling
5933	 * us that a requested speed is bad, but otherwise don't try and
5934	 * check the speed due to the asynchronous and handshake nature
5935	 * of speed setting.
5936	 */
5937	spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET;
5938	for (;;) {
5939		spi->sync_period++;
5940		if (spi->sync_period >= 0xf) {
5941			spi->sync_period = 0;
5942			spi->sync_offset = 0;
5943			CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
5944			    ("setting to async for DV\n"));
5945			/*
5946			 * Once we hit async, we don't want to try
5947			 * any more settings.
5948			 */
5949			device->flags |= CAM_DEV_DV_HIT_BOTTOM;
5950		} else if (bootverbose) {
5951			CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
5952			    ("DV: period 0x%x\n", spi->sync_period));
5953			printf("setting period to 0x%x\n", spi->sync_period);
5954		}
5955		cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
5956		cts.type = CTS_TYPE_CURRENT_SETTINGS;
5957		xpt_action((union ccb *)&cts);
5958		if ((cts.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5959			break;
5960		}
5961		CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
5962		    ("DV: failed to set period 0x%x\n", spi->sync_period));
5963		if (spi->sync_period == 0) {
5964			return (0);
5965		}
5966	}
5967	return (1);
5968}
5969
5970static void
5971probedone(struct cam_periph *periph, union ccb *done_ccb)
5972{
5973	probe_softc *softc;
5974	struct cam_path *path;
5975	u_int32_t  priority;
5976
5977	CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n"));
5978
5979	softc = (probe_softc *)periph->softc;
5980	path = done_ccb->ccb_h.path;
5981	priority = done_ccb->ccb_h.pinfo.priority;
5982
5983	switch (softc->action) {
5984	case PROBE_TUR:
5985	{
5986		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
5987
5988			if (cam_periph_error(done_ccb, 0,
5989					     SF_NO_PRINT, NULL) == ERESTART)
5990				return;
5991			else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
5992				/* Don't wedge the queue */
5993				xpt_release_devq(done_ccb->ccb_h.path,
5994						 /*count*/1,
5995						 /*run_queue*/TRUE);
5996		}
5997		softc->action = PROBE_INQUIRY;
5998		xpt_release_ccb(done_ccb);
5999		xpt_schedule(periph, priority);
6000		return;
6001	}
6002	case PROBE_INQUIRY:
6003	case PROBE_FULL_INQUIRY:
6004	{
6005		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
6006			struct scsi_inquiry_data *inq_buf;
6007			u_int8_t periph_qual;
6008
6009			path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID;
6010			inq_buf = &path->device->inq_data;
6011
6012			periph_qual = SID_QUAL(inq_buf);
6013
6014			switch(periph_qual) {
6015			case SID_QUAL_LU_CONNECTED:
6016			{
6017				u_int8_t len;
6018
6019				/*
6020				 * We conservatively request only
6021				 * SHORT_INQUIRY_LEN bytes of inquiry
6022				 * information during our first try
6023				 * at sending an INQUIRY. If the device
6024				 * has more information to give,
6025				 * perform a second request specifying
6026				 * the amount of information the device
6027				 * is willing to give.
6028				 */
6029				len = inq_buf->additional_length
6030				    + offsetof(struct scsi_inquiry_data,
6031                                               additional_length) + 1;
6032				if (softc->action == PROBE_INQUIRY
6033				 && len > SHORT_INQUIRY_LENGTH) {
6034					softc->action = PROBE_FULL_INQUIRY;
6035					xpt_release_ccb(done_ccb);
6036					xpt_schedule(periph, priority);
6037					return;
6038				}
6039
6040				xpt_find_quirk(path->device);
6041
6042				xpt_devise_transport(path);
6043				if (INQ_DATA_TQ_ENABLED(inq_buf))
6044					softc->action = PROBE_MODE_SENSE;
6045				else
6046					softc->action = PROBE_SERIAL_NUM;
6047
6048				path->device->flags &= ~CAM_DEV_UNCONFIGURED;
6049
6050				xpt_release_ccb(done_ccb);
6051				xpt_schedule(periph, priority);
6052				return;
6053			}
6054			default:
6055				break;
6056			}
6057		} else if (cam_periph_error(done_ccb, 0,
6058					    done_ccb->ccb_h.target_lun > 0
6059					    ? SF_RETRY_UA|SF_QUIET_IR
6060					    : SF_RETRY_UA,
6061					    &softc->saved_ccb) == ERESTART) {
6062			return;
6063		} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6064			/* Don't wedge the queue */
6065			xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
6066					 /*run_queue*/TRUE);
6067		}
6068		/*
6069		 * If we get to this point, we got an error status back
6070		 * from the inquiry and the error status doesn't require
6071		 * automatically retrying the command.  Therefore, the
6072		 * inquiry failed.  If we had inquiry information before
6073		 * for this device, but this latest inquiry command failed,
6074		 * the device has probably gone away.  If this device isn't
6075		 * already marked unconfigured, notify the peripheral
6076		 * drivers that this device is no more.
6077		 */
6078		if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
6079			/* Send the async notification. */
6080			xpt_async(AC_LOST_DEVICE, path, NULL);
6081
6082		xpt_release_ccb(done_ccb);
6083		break;
6084	}
6085	case PROBE_MODE_SENSE:
6086	{
6087		struct ccb_scsiio *csio;
6088		struct scsi_mode_header_6 *mode_hdr;
6089
6090		csio = &done_ccb->csio;
6091		mode_hdr = (struct scsi_mode_header_6 *)csio->data_ptr;
6092		if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
6093			struct scsi_control_page *page;
6094			u_int8_t *offset;
6095
6096			offset = ((u_int8_t *)&mode_hdr[1])
6097			    + mode_hdr->blk_desc_len;
6098			page = (struct scsi_control_page *)offset;
6099			path->device->queue_flags = page->queue_flags;
6100		} else if (cam_periph_error(done_ccb, 0,
6101					    SF_RETRY_UA|SF_NO_PRINT,
6102					    &softc->saved_ccb) == ERESTART) {
6103			return;
6104		} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6105			/* Don't wedge the queue */
6106			xpt_release_devq(done_ccb->ccb_h.path,
6107					 /*count*/1, /*run_queue*/TRUE);
6108		}
6109		xpt_release_ccb(done_ccb);
6110		free(mode_hdr, M_TEMP);
6111		softc->action = PROBE_SERIAL_NUM;
6112		xpt_schedule(periph, priority);
6113		return;
6114	}
6115	case PROBE_SERIAL_NUM:
6116	{
6117		struct ccb_scsiio *csio;
6118		struct scsi_vpd_unit_serial_number *serial_buf;
6119		u_int32_t  priority;
6120		int changed;
6121		int have_serialnum;
6122
6123		changed = 1;
6124		have_serialnum = 0;
6125		csio = &done_ccb->csio;
6126		priority = done_ccb->ccb_h.pinfo.priority;
6127		serial_buf =
6128		    (struct scsi_vpd_unit_serial_number *)csio->data_ptr;
6129
6130		/* Clean up from previous instance of this device */
6131		if (path->device->serial_num != NULL) {
6132			free(path->device->serial_num, M_CAMXPT);
6133			path->device->serial_num = NULL;
6134			path->device->serial_num_len = 0;
6135		}
6136
6137		if (serial_buf == NULL) {
6138			/*
6139			 * Don't process the command as it was never sent
6140			 */
6141		} else if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP
6142			&& (serial_buf->length > 0)) {
6143
6144			have_serialnum = 1;
6145			path->device->serial_num =
6146				(u_int8_t *)malloc((serial_buf->length + 1),
6147						   M_CAMXPT, M_NOWAIT);
6148			if (path->device->serial_num != NULL) {
6149				bcopy(serial_buf->serial_num,
6150				      path->device->serial_num,
6151				      serial_buf->length);
6152				path->device->serial_num_len =
6153				    serial_buf->length;
6154				path->device->serial_num[serial_buf->length]
6155				    = '\0';
6156			}
6157		} else if (cam_periph_error(done_ccb, 0,
6158					    SF_RETRY_UA|SF_NO_PRINT,
6159					    &softc->saved_ccb) == ERESTART) {
6160			return;
6161		} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6162			/* Don't wedge the queue */
6163			xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
6164					 /*run_queue*/TRUE);
6165		}
6166
6167		/*
6168		 * Let's see if we have seen this device before.
6169		 */
6170		if ((softc->flags & PROBE_INQUIRY_CKSUM) != 0) {
6171			MD5_CTX context;
6172			u_int8_t digest[16];
6173
6174			MD5Init(&context);
6175
6176			MD5Update(&context,
6177				  (unsigned char *)&path->device->inq_data,
6178				  sizeof(struct scsi_inquiry_data));
6179
6180			if (have_serialnum)
6181				MD5Update(&context, serial_buf->serial_num,
6182					  serial_buf->length);
6183
6184			MD5Final(digest, &context);
6185			if (bcmp(softc->digest, digest, 16) == 0)
6186				changed = 0;
6187
6188			/*
6189			 * XXX Do we need to do a TUR in order to ensure
6190			 *     that the device really hasn't changed???
6191			 */
6192			if ((changed != 0)
6193			 && ((softc->flags & PROBE_NO_ANNOUNCE) == 0))
6194				xpt_async(AC_LOST_DEVICE, path, NULL);
6195		}
6196		if (serial_buf != NULL)
6197			free(serial_buf, M_TEMP);
6198
6199		if (changed != 0) {
6200			/*
6201			 * Now that we have all the necessary
6202			 * information to safely perform transfer
6203			 * negotiations... Controllers don't perform
6204			 * any negotiation or tagged queuing until
6205			 * after the first XPT_SET_TRAN_SETTINGS ccb is
6206			 * received.  So, on a new device, just retrieve
6207			 * the user settings, and set them as the current
6208			 * settings to set the device up.
6209			 */
6210			proberequestdefaultnegotiation(periph);
6211			xpt_release_ccb(done_ccb);
6212
6213			/*
6214			 * Perform a TUR to allow the controller to
6215			 * perform any necessary transfer negotiation.
6216			 */
6217			softc->action = PROBE_TUR_FOR_NEGOTIATION;
6218			xpt_schedule(periph, priority);
6219			return;
6220		}
6221		xpt_release_ccb(done_ccb);
6222		break;
6223	}
6224	case PROBE_TUR_FOR_NEGOTIATION:
6225	case PROBE_DV_EXIT:
6226		if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6227			/* Don't wedge the queue */
6228			xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
6229					 /*run_queue*/TRUE);
6230		}
6231		/*
6232		 * Do Domain Validation for lun 0 on devices that claim
6233		 * to support Synchronous Transfer modes.
6234		 */
6235	 	if (softc->action == PROBE_TUR_FOR_NEGOTIATION
6236		 && done_ccb->ccb_h.target_lun == 0
6237		 && (path->device->inq_data.flags & SID_Sync) != 0
6238                 && (path->device->flags & CAM_DEV_IN_DV) == 0) {
6239			CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
6240			    ("Begin Domain Validation\n"));
6241			path->device->flags |= CAM_DEV_IN_DV;
6242			xpt_release_ccb(done_ccb);
6243			softc->action = PROBE_INQUIRY_BASIC_DV1;
6244			xpt_schedule(periph, priority);
6245			return;
6246		}
6247		if (softc->action == PROBE_DV_EXIT) {
6248			CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
6249			    ("Leave Domain Validation\n"));
6250		}
6251		path->device->flags &=
6252		    ~(CAM_DEV_UNCONFIGURED|CAM_DEV_IN_DV|CAM_DEV_DV_HIT_BOTTOM);
6253		if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) {
6254			/* Inform the XPT that a new device has been found */
6255			done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
6256			xpt_action(done_ccb);
6257			xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path,
6258				  done_ccb);
6259		}
6260		xpt_release_ccb(done_ccb);
6261		break;
6262	case PROBE_INQUIRY_BASIC_DV1:
6263	case PROBE_INQUIRY_BASIC_DV2:
6264	{
6265		struct scsi_inquiry_data *nbuf;
6266		struct ccb_scsiio *csio;
6267
6268		if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6269			/* Don't wedge the queue */
6270			xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
6271					 /*run_queue*/TRUE);
6272		}
6273		csio = &done_ccb->csio;
6274		nbuf = (struct scsi_inquiry_data *)csio->data_ptr;
6275		if (bcmp(nbuf, &path->device->inq_data, SHORT_INQUIRY_LENGTH)) {
6276			xpt_print(path,
6277			    "inquiry data fails comparison at DV%d step\n",
6278			    softc->action == PROBE_INQUIRY_BASIC_DV1? 1 : 2);
6279			if (proberequestbackoff(periph, path->device)) {
6280				path->device->flags &= ~CAM_DEV_IN_DV;
6281				softc->action = PROBE_TUR_FOR_NEGOTIATION;
6282			} else {
6283				/* give up */
6284				softc->action = PROBE_DV_EXIT;
6285			}
6286			free(nbuf, M_TEMP);
6287			xpt_release_ccb(done_ccb);
6288			xpt_schedule(periph, priority);
6289			return;
6290		}
6291		free(nbuf, M_TEMP);
6292		if (softc->action == PROBE_INQUIRY_BASIC_DV1) {
6293			softc->action = PROBE_INQUIRY_BASIC_DV2;
6294			xpt_release_ccb(done_ccb);
6295			xpt_schedule(periph, priority);
6296			return;
6297		}
6298		if (softc->action == PROBE_DV_EXIT) {
6299			CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
6300			    ("Leave Domain Validation Successfully\n"));
6301		}
6302		path->device->flags &=
6303		    ~(CAM_DEV_UNCONFIGURED|CAM_DEV_IN_DV|CAM_DEV_DV_HIT_BOTTOM);
6304		if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) {
6305			/* Inform the XPT that a new device has been found */
6306			done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
6307			xpt_action(done_ccb);
6308			xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path,
6309				  done_ccb);
6310		}
6311		xpt_release_ccb(done_ccb);
6312		break;
6313	}
6314	}
6315	done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
6316	TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe);
6317	done_ccb->ccb_h.status = CAM_REQ_CMP;
6318	xpt_done(done_ccb);
6319	if (TAILQ_FIRST(&softc->request_ccbs) == NULL) {
6320		cam_periph_invalidate(periph);
6321		cam_periph_release(periph);
6322	} else {
6323		probeschedule(periph);
6324	}
6325}
6326
6327static void
6328probecleanup(struct cam_periph *periph)
6329{
6330	free(periph->softc, M_TEMP);
6331}
6332
6333static void
6334xpt_find_quirk(struct cam_ed *device)
6335{
6336	caddr_t	match;
6337
6338	match = cam_quirkmatch((caddr_t)&device->inq_data,
6339			       (caddr_t)xpt_quirk_table,
6340			       sizeof(xpt_quirk_table)/sizeof(*xpt_quirk_table),
6341			       sizeof(*xpt_quirk_table), scsi_inquiry_match);
6342
6343	if (match == NULL)
6344		panic("xpt_find_quirk: device didn't match wildcard entry!!");
6345
6346	device->quirk = (struct xpt_quirk_entry *)match;
6347}
6348
6349static int
6350sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS)
6351{
6352	int error, bool;
6353
6354	bool = cam_srch_hi;
6355	error = sysctl_handle_int(oidp, &bool, sizeof(bool), req);
6356	if (error != 0 || req->newptr == NULL)
6357		return (error);
6358	if (bool == 0 || bool == 1) {
6359		cam_srch_hi = bool;
6360		return (0);
6361	} else {
6362		return (EINVAL);
6363	}
6364}
6365
6366
6367static void
6368xpt_devise_transport(struct cam_path *path)
6369{
6370	struct ccb_pathinq cpi;
6371	struct ccb_trans_settings cts;
6372	struct scsi_inquiry_data *inq_buf;
6373
6374	/* Get transport information from the SIM */
6375	xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
6376	cpi.ccb_h.func_code = XPT_PATH_INQ;
6377	xpt_action((union ccb *)&cpi);
6378
6379	inq_buf = NULL;
6380	if ((path->device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0)
6381		inq_buf = &path->device->inq_data;
6382	path->device->protocol = PROTO_SCSI;
6383	path->device->protocol_version =
6384	    inq_buf != NULL ? SID_ANSI_REV(inq_buf) : cpi.protocol_version;
6385	path->device->transport = cpi.transport;
6386	path->device->transport_version = cpi.transport_version;
6387
6388	/*
6389	 * Any device not using SPI3 features should
6390	 * be considered SPI2 or lower.
6391	 */
6392	if (inq_buf != NULL) {
6393		if (path->device->transport == XPORT_SPI
6394		 && (inq_buf->spi3data & SID_SPI_MASK) == 0
6395		 && path->device->transport_version > 2)
6396			path->device->transport_version = 2;
6397	} else {
6398		struct cam_ed* otherdev;
6399
6400		for (otherdev = TAILQ_FIRST(&path->target->ed_entries);
6401		     otherdev != NULL;
6402		     otherdev = TAILQ_NEXT(otherdev, links)) {
6403			if (otherdev != path->device)
6404				break;
6405		}
6406
6407		if (otherdev != NULL) {
6408			/*
6409			 * Initially assume the same versioning as
6410			 * prior luns for this target.
6411			 */
6412			path->device->protocol_version =
6413			    otherdev->protocol_version;
6414			path->device->transport_version =
6415			    otherdev->transport_version;
6416		} else {
6417			/* Until we know better, opt for safty */
6418			path->device->protocol_version = 2;
6419			if (path->device->transport == XPORT_SPI)
6420				path->device->transport_version = 2;
6421			else
6422				path->device->transport_version = 0;
6423		}
6424	}
6425
6426	/*
6427	 * XXX
6428	 * For a device compliant with SPC-2 we should be able
6429	 * to determine the transport version supported by
6430	 * scrutinizing the version descriptors in the
6431	 * inquiry buffer.
6432	 */
6433
6434	/* Tell the controller what we think */
6435	xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
6436	cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
6437	cts.type = CTS_TYPE_CURRENT_SETTINGS;
6438	cts.transport = path->device->transport;
6439	cts.transport_version = path->device->transport_version;
6440	cts.protocol = path->device->protocol;
6441	cts.protocol_version = path->device->protocol_version;
6442	cts.proto_specific.valid = 0;
6443	cts.xport_specific.valid = 0;
6444	xpt_action((union ccb *)&cts);
6445}
6446
6447static void
6448xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
6449			  int async_update)
6450{
6451	struct	ccb_pathinq cpi;
6452	struct	ccb_trans_settings cur_cts;
6453	struct	ccb_trans_settings_scsi *scsi;
6454	struct	ccb_trans_settings_scsi *cur_scsi;
6455	struct	cam_sim *sim;
6456	struct	scsi_inquiry_data *inq_data;
6457
6458	if (device == NULL) {
6459		cts->ccb_h.status = CAM_PATH_INVALID;
6460		xpt_done((union ccb *)cts);
6461		return;
6462	}
6463
6464	if (cts->protocol == PROTO_UNKNOWN
6465	 || cts->protocol == PROTO_UNSPECIFIED) {
6466		cts->protocol = device->protocol;
6467		cts->protocol_version = device->protocol_version;
6468	}
6469
6470	if (cts->protocol_version == PROTO_VERSION_UNKNOWN
6471	 || cts->protocol_version == PROTO_VERSION_UNSPECIFIED)
6472		cts->protocol_version = device->protocol_version;
6473
6474	if (cts->protocol != device->protocol) {
6475		xpt_print(cts->ccb_h.path, "Uninitialized Protocol %x:%x?\n",
6476		       cts->protocol, device->protocol);
6477		cts->protocol = device->protocol;
6478	}
6479
6480	if (cts->protocol_version > device->protocol_version) {
6481		if (bootverbose) {
6482			xpt_print(cts->ccb_h.path, "Down reving Protocol "
6483			    "Version from %d to %d?\n", cts->protocol_version,
6484			    device->protocol_version);
6485		}
6486		cts->protocol_version = device->protocol_version;
6487	}
6488
6489	if (cts->transport == XPORT_UNKNOWN
6490	 || cts->transport == XPORT_UNSPECIFIED) {
6491		cts->transport = device->transport;
6492		cts->transport_version = device->transport_version;
6493	}
6494
6495	if (cts->transport_version == XPORT_VERSION_UNKNOWN
6496	 || cts->transport_version == XPORT_VERSION_UNSPECIFIED)
6497		cts->transport_version = device->transport_version;
6498
6499	if (cts->transport != device->transport) {
6500		xpt_print(cts->ccb_h.path, "Uninitialized Transport %x:%x?\n",
6501		    cts->transport, device->transport);
6502		cts->transport = device->transport;
6503	}
6504
6505	if (cts->transport_version > device->transport_version) {
6506		if (bootverbose) {
6507			xpt_print(cts->ccb_h.path, "Down reving Transport "
6508			    "Version from %d to %d?\n", cts->transport_version,
6509			    device->transport_version);
6510		}
6511		cts->transport_version = device->transport_version;
6512	}
6513
6514	sim = cts->ccb_h.path->bus->sim;
6515
6516	/*
6517	 * Nothing more of interest to do unless
6518	 * this is a device connected via the
6519	 * SCSI protocol.
6520	 */
6521	if (cts->protocol != PROTO_SCSI) {
6522		if (async_update == FALSE)
6523			(*(sim->sim_action))(sim, (union ccb *)cts);
6524		return;
6525	}
6526
6527	inq_data = &device->inq_data;
6528	scsi = &cts->proto_specific.scsi;
6529	xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1);
6530	cpi.ccb_h.func_code = XPT_PATH_INQ;
6531	xpt_action((union ccb *)&cpi);
6532
6533	/* SCSI specific sanity checking */
6534	if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0
6535	 || (INQ_DATA_TQ_ENABLED(inq_data)) == 0
6536	 || (device->queue_flags & SCP_QUEUE_DQUE) != 0
6537	 || (device->quirk->mintags == 0)) {
6538		/*
6539		 * Can't tag on hardware that doesn't support tags,
6540		 * doesn't have it enabled, or has broken tag support.
6541		 */
6542		scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6543	}
6544
6545	if (async_update == FALSE) {
6546		/*
6547		 * Perform sanity checking against what the
6548		 * controller and device can do.
6549		 */
6550		xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, /*priority*/1);
6551		cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
6552		cur_cts.type = cts->type;
6553		xpt_action((union ccb *)&cur_cts);
6554		if ((cur_cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
6555			return;
6556		}
6557		cur_scsi = &cur_cts.proto_specific.scsi;
6558		if ((scsi->valid & CTS_SCSI_VALID_TQ) == 0) {
6559			scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6560			scsi->flags |= cur_scsi->flags & CTS_SCSI_FLAGS_TAG_ENB;
6561		}
6562		if ((cur_scsi->valid & CTS_SCSI_VALID_TQ) == 0)
6563			scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6564	}
6565
6566	/* SPI specific sanity checking */
6567	if (cts->transport == XPORT_SPI && async_update == FALSE) {
6568		u_int spi3caps;
6569		struct ccb_trans_settings_spi *spi;
6570		struct ccb_trans_settings_spi *cur_spi;
6571
6572		spi = &cts->xport_specific.spi;
6573
6574		cur_spi = &cur_cts.xport_specific.spi;
6575
6576		/* Fill in any gaps in what the user gave us */
6577		if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
6578			spi->sync_period = cur_spi->sync_period;
6579		if ((cur_spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
6580			spi->sync_period = 0;
6581		if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
6582			spi->sync_offset = cur_spi->sync_offset;
6583		if ((cur_spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
6584			spi->sync_offset = 0;
6585		if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
6586			spi->ppr_options = cur_spi->ppr_options;
6587		if ((cur_spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
6588			spi->ppr_options = 0;
6589		if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
6590			spi->bus_width = cur_spi->bus_width;
6591		if ((cur_spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
6592			spi->bus_width = 0;
6593		if ((spi->valid & CTS_SPI_VALID_DISC) == 0) {
6594			spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
6595			spi->flags |= cur_spi->flags & CTS_SPI_FLAGS_DISC_ENB;
6596		}
6597		if ((cur_spi->valid & CTS_SPI_VALID_DISC) == 0)
6598			spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
6599		if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
6600		  && (inq_data->flags & SID_Sync) == 0
6601		  && cts->type == CTS_TYPE_CURRENT_SETTINGS)
6602		 || ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0)
6603		 || (spi->sync_offset == 0)
6604		 || (spi->sync_period == 0)) {
6605			/* Force async */
6606			spi->sync_period = 0;
6607			spi->sync_offset = 0;
6608		}
6609
6610		switch (spi->bus_width) {
6611		case MSG_EXT_WDTR_BUS_32_BIT:
6612			if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6613			  || (inq_data->flags & SID_WBus32) != 0
6614			  || cts->type == CTS_TYPE_USER_SETTINGS)
6615			 && (cpi.hba_inquiry & PI_WIDE_32) != 0)
6616				break;
6617			/* Fall Through to 16-bit */
6618		case MSG_EXT_WDTR_BUS_16_BIT:
6619			if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6620			  || (inq_data->flags & SID_WBus16) != 0
6621			  || cts->type == CTS_TYPE_USER_SETTINGS)
6622			 && (cpi.hba_inquiry & PI_WIDE_16) != 0) {
6623				spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
6624				break;
6625			}
6626			/* Fall Through to 8-bit */
6627		default: /* New bus width?? */
6628		case MSG_EXT_WDTR_BUS_8_BIT:
6629			/* All targets can do this */
6630			spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
6631			break;
6632		}
6633
6634		spi3caps = cpi.xport_specific.spi.ppr_options;
6635		if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
6636		 && cts->type == CTS_TYPE_CURRENT_SETTINGS)
6637			spi3caps &= inq_data->spi3data;
6638
6639		if ((spi3caps & SID_SPI_CLOCK_DT) == 0)
6640			spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ;
6641
6642		if ((spi3caps & SID_SPI_IUS) == 0)
6643			spi->ppr_options &= ~MSG_EXT_PPR_IU_REQ;
6644
6645		if ((spi3caps & SID_SPI_QAS) == 0)
6646			spi->ppr_options &= ~MSG_EXT_PPR_QAS_REQ;
6647
6648		/* No SPI Transfer settings are allowed unless we are wide */
6649		if (spi->bus_width == 0)
6650			spi->ppr_options = 0;
6651
6652		if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) == 0) {
6653			/*
6654			 * Can't tag queue without disconnection.
6655			 */
6656			scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6657			scsi->valid |= CTS_SCSI_VALID_TQ;
6658		}
6659
6660		/*
6661		 * If we are currently performing tagged transactions to
6662		 * this device and want to change its negotiation parameters,
6663		 * go non-tagged for a bit to give the controller a chance to
6664		 * negotiate unhampered by tag messages.
6665		 */
6666		if (cts->type == CTS_TYPE_CURRENT_SETTINGS
6667		 && (device->inq_flags & SID_CmdQue) != 0
6668		 && (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
6669		 && (spi->flags & (CTS_SPI_VALID_SYNC_RATE|
6670				   CTS_SPI_VALID_SYNC_OFFSET|
6671				   CTS_SPI_VALID_BUS_WIDTH)) != 0)
6672			xpt_toggle_tags(cts->ccb_h.path);
6673	}
6674
6675	if (cts->type == CTS_TYPE_CURRENT_SETTINGS
6676	 && (scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
6677		int device_tagenb;
6678
6679		/*
6680		 * If we are transitioning from tags to no-tags or
6681		 * vice-versa, we need to carefully freeze and restart
6682		 * the queue so that we don't overlap tagged and non-tagged
6683		 * commands.  We also temporarily stop tags if there is
6684		 * a change in transfer negotiation settings to allow
6685		 * "tag-less" negotiation.
6686		 */
6687		if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6688		 || (device->inq_flags & SID_CmdQue) != 0)
6689			device_tagenb = TRUE;
6690		else
6691			device_tagenb = FALSE;
6692
6693		if (((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
6694		  && device_tagenb == FALSE)
6695		 || ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) == 0
6696		  && device_tagenb == TRUE)) {
6697
6698			if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) {
6699				/*
6700				 * Delay change to use tags until after a
6701				 * few commands have gone to this device so
6702				 * the controller has time to perform transfer
6703				 * negotiations without tagged messages getting
6704				 * in the way.
6705				 */
6706				device->tag_delay_count = CAM_TAG_DELAY_COUNT;
6707				device->flags |= CAM_DEV_TAG_AFTER_COUNT;
6708			} else {
6709				struct ccb_relsim crs;
6710
6711				xpt_freeze_devq(cts->ccb_h.path, /*count*/1);
6712		  		device->inq_flags &= ~SID_CmdQue;
6713				xpt_dev_ccbq_resize(cts->ccb_h.path,
6714						    sim->max_dev_openings);
6715				device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
6716				device->tag_delay_count = 0;
6717
6718				xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path,
6719					      /*priority*/1);
6720				crs.ccb_h.func_code = XPT_REL_SIMQ;
6721				crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
6722				crs.openings
6723				    = crs.release_timeout
6724				    = crs.qfrozen_cnt
6725				    = 0;
6726				xpt_action((union ccb *)&crs);
6727			}
6728		}
6729	}
6730	if (async_update == FALSE)
6731		(*(sim->sim_action))(sim, (union ccb *)cts);
6732}
6733
6734
6735static void
6736xpt_toggle_tags(struct cam_path *path)
6737{
6738	struct cam_ed *dev;
6739
6740	/*
6741	 * Give controllers a chance to renegotiate
6742	 * before starting tag operations.  We
6743	 * "toggle" tagged queuing off then on
6744	 * which causes the tag enable command delay
6745	 * counter to come into effect.
6746	 */
6747	dev = path->device;
6748	if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6749	 || ((dev->inq_flags & SID_CmdQue) != 0
6750 	  && (dev->inq_flags & (SID_Sync|SID_WBus16|SID_WBus32)) != 0)) {
6751		struct ccb_trans_settings cts;
6752
6753		xpt_setup_ccb(&cts.ccb_h, path, 1);
6754		cts.protocol = PROTO_SCSI;
6755		cts.protocol_version = PROTO_VERSION_UNSPECIFIED;
6756		cts.transport = XPORT_UNSPECIFIED;
6757		cts.transport_version = XPORT_VERSION_UNSPECIFIED;
6758		cts.proto_specific.scsi.flags = 0;
6759		cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
6760		xpt_set_transfer_settings(&cts, path->device,
6761					  /*async_update*/TRUE);
6762		cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
6763		xpt_set_transfer_settings(&cts, path->device,
6764					  /*async_update*/TRUE);
6765	}
6766}
6767
6768static void
6769xpt_start_tags(struct cam_path *path)
6770{
6771	struct ccb_relsim crs;
6772	struct cam_ed *device;
6773	struct cam_sim *sim;
6774	int    newopenings;
6775
6776	device = path->device;
6777	sim = path->bus->sim;
6778	device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
6779	xpt_freeze_devq(path, /*count*/1);
6780	device->inq_flags |= SID_CmdQue;
6781	if (device->tag_saved_openings != 0)
6782		newopenings = device->tag_saved_openings;
6783	else
6784		newopenings = min(device->quirk->maxtags,
6785				  sim->max_tagged_dev_openings);
6786	xpt_dev_ccbq_resize(path, newopenings);
6787	xpt_setup_ccb(&crs.ccb_h, path, /*priority*/1);
6788	crs.ccb_h.func_code = XPT_REL_SIMQ;
6789	crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
6790	crs.openings
6791	    = crs.release_timeout
6792	    = crs.qfrozen_cnt
6793	    = 0;
6794	xpt_action((union ccb *)&crs);
6795}
6796
6797static int busses_to_config;
6798static int busses_to_reset;
6799
6800static int
6801xptconfigbuscountfunc(struct cam_eb *bus, void *arg)
6802{
6803
6804	mtx_assert(bus->sim->mtx, MA_OWNED);
6805
6806	if (bus->path_id != CAM_XPT_PATH_ID) {
6807		struct cam_path path;
6808		struct ccb_pathinq cpi;
6809		int can_negotiate;
6810
6811		busses_to_config++;
6812		xpt_compile_path(&path, NULL, bus->path_id,
6813				 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
6814		xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
6815		cpi.ccb_h.func_code = XPT_PATH_INQ;
6816		xpt_action((union ccb *)&cpi);
6817		can_negotiate = cpi.hba_inquiry;
6818		can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
6819		if ((cpi.hba_misc & PIM_NOBUSRESET) == 0
6820		 && can_negotiate)
6821			busses_to_reset++;
6822		xpt_release_path(&path);
6823	}
6824
6825	return(1);
6826}
6827
6828static int
6829xptconfigfunc(struct cam_eb *bus, void *arg)
6830{
6831	struct	cam_path *path;
6832	union	ccb *work_ccb;
6833
6834	mtx_assert(bus->sim->mtx, MA_OWNED);
6835
6836	if (bus->path_id != CAM_XPT_PATH_ID) {
6837		cam_status status;
6838		int can_negotiate;
6839
6840		work_ccb = xpt_alloc_ccb_nowait();
6841		if (work_ccb == NULL) {
6842			busses_to_config--;
6843			xpt_finishconfig(xpt_periph, NULL);
6844			return(0);
6845		}
6846		if ((status = xpt_create_path(&path, xpt_periph, bus->path_id,
6847					      CAM_TARGET_WILDCARD,
6848					      CAM_LUN_WILDCARD)) !=CAM_REQ_CMP){
6849			printf("xptconfigfunc: xpt_create_path failed with "
6850			       "status %#x for bus %d\n", status, bus->path_id);
6851			printf("xptconfigfunc: halting bus configuration\n");
6852			xpt_free_ccb(work_ccb);
6853			busses_to_config--;
6854			xpt_finishconfig(xpt_periph, NULL);
6855			return(0);
6856		}
6857		xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
6858		work_ccb->ccb_h.func_code = XPT_PATH_INQ;
6859		xpt_action(work_ccb);
6860		if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
6861			printf("xptconfigfunc: CPI failed on bus %d "
6862			       "with status %d\n", bus->path_id,
6863			       work_ccb->ccb_h.status);
6864			xpt_finishconfig(xpt_periph, work_ccb);
6865			return(1);
6866		}
6867
6868		can_negotiate = work_ccb->cpi.hba_inquiry;
6869		can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
6870		if ((work_ccb->cpi.hba_misc & PIM_NOBUSRESET) == 0
6871		 && (can_negotiate != 0)) {
6872			xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
6873			work_ccb->ccb_h.func_code = XPT_RESET_BUS;
6874			work_ccb->ccb_h.cbfcnp = NULL;
6875			CAM_DEBUG(path, CAM_DEBUG_SUBTRACE,
6876				  ("Resetting Bus\n"));
6877			xpt_action(work_ccb);
6878			xpt_finishconfig(xpt_periph, work_ccb);
6879		} else {
6880			/* Act as though we performed a successful BUS RESET */
6881			work_ccb->ccb_h.func_code = XPT_RESET_BUS;
6882			xpt_finishconfig(xpt_periph, work_ccb);
6883		}
6884	}
6885
6886	return(1);
6887}
6888
6889static void
6890xpt_config(void *arg)
6891{
6892	/*
6893	 * Now that interrupts are enabled, go find our devices
6894	 */
6895
6896#ifdef CAMDEBUG
6897	/* Setup debugging flags and path */
6898#ifdef CAM_DEBUG_FLAGS
6899	cam_dflags = CAM_DEBUG_FLAGS;
6900#else /* !CAM_DEBUG_FLAGS */
6901	cam_dflags = CAM_DEBUG_NONE;
6902#endif /* CAM_DEBUG_FLAGS */
6903#ifdef CAM_DEBUG_BUS
6904	if (cam_dflags != CAM_DEBUG_NONE) {
6905		/*
6906		 * Locking is specifically omitted here.  No SIMs have
6907		 * registered yet, so xpt_create_path will only be searching
6908		 * empty lists of targets and devices.
6909		 */
6910		if (xpt_create_path(&cam_dpath, xpt_periph,
6911				    CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
6912				    CAM_DEBUG_LUN) != CAM_REQ_CMP) {
6913			printf("xpt_config: xpt_create_path() failed for debug"
6914			       " target %d:%d:%d, debugging disabled\n",
6915			       CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
6916			cam_dflags = CAM_DEBUG_NONE;
6917		}
6918	} else
6919		cam_dpath = NULL;
6920#else /* !CAM_DEBUG_BUS */
6921	cam_dpath = NULL;
6922#endif /* CAM_DEBUG_BUS */
6923#endif /* CAMDEBUG */
6924
6925	/*
6926	 * Scan all installed busses.
6927	 */
6928	xpt_for_all_busses(xptconfigbuscountfunc, NULL);
6929
6930	if (busses_to_config == 0) {
6931		/* Call manually because we don't have any busses */
6932		xpt_finishconfig(xpt_periph, NULL);
6933	} else  {
6934		if (busses_to_reset > 0 && scsi_delay >= 2000) {
6935			printf("Waiting %d seconds for SCSI "
6936			       "devices to settle\n", scsi_delay/1000);
6937		}
6938		xpt_for_all_busses(xptconfigfunc, NULL);
6939	}
6940}
6941
6942/*
6943 * If the given device only has one peripheral attached to it, and if that
6944 * peripheral is the passthrough driver, announce it.  This insures that the
6945 * user sees some sort of announcement for every peripheral in their system.
6946 */
6947static int
6948xptpassannouncefunc(struct cam_ed *device, void *arg)
6949{
6950	struct cam_periph *periph;
6951	int i;
6952
6953	for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
6954	     periph = SLIST_NEXT(periph, periph_links), i++);
6955
6956	periph = SLIST_FIRST(&device->periphs);
6957	if ((i == 1)
6958	 && (strncmp(periph->periph_name, "pass", 4) == 0))
6959		xpt_announce_periph(periph, NULL);
6960
6961	return(1);
6962}
6963
6964static void
6965xpt_finishconfig_task(void *context, int pending)
6966{
6967	struct	periph_driver **p_drv;
6968	int	i;
6969
6970	if (busses_to_config == 0) {
6971		/* Register all the peripheral drivers */
6972		/* XXX This will have to change when we have loadable modules */
6973		p_drv = periph_drivers;
6974		for (i = 0; p_drv[i] != NULL; i++) {
6975			(*p_drv[i]->init)();
6976		}
6977
6978		/*
6979		 * Check for devices with no "standard" peripheral driver
6980		 * attached.  For any devices like that, announce the
6981		 * passthrough driver so the user will see something.
6982		 */
6983		xpt_for_all_devices(xptpassannouncefunc, NULL);
6984
6985		/* Release our hook so that the boot can continue. */
6986		config_intrhook_disestablish(xsoftc.xpt_config_hook);
6987		free(xsoftc.xpt_config_hook, M_TEMP);
6988		xsoftc.xpt_config_hook = NULL;
6989	}
6990
6991	free(context, M_CAMXPT);
6992}
6993
6994static void
6995xpt_finishconfig(struct cam_periph *periph, union ccb *done_ccb)
6996{
6997	struct	xpt_task *task;
6998
6999	if (done_ccb != NULL) {
7000		CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE,
7001			  ("xpt_finishconfig\n"));
7002		switch(done_ccb->ccb_h.func_code) {
7003		case XPT_RESET_BUS:
7004			if (done_ccb->ccb_h.status == CAM_REQ_CMP) {
7005				done_ccb->ccb_h.func_code = XPT_SCAN_BUS;
7006				done_ccb->ccb_h.cbfcnp = xpt_finishconfig;
7007				done_ccb->crcn.flags = 0;
7008				xpt_action(done_ccb);
7009				return;
7010			}
7011			/* FALLTHROUGH */
7012		case XPT_SCAN_BUS:
7013		default:
7014			xpt_free_path(done_ccb->ccb_h.path);
7015			busses_to_config--;
7016			break;
7017		}
7018	}
7019
7020	task = malloc(sizeof(struct xpt_task), M_CAMXPT, M_NOWAIT);
7021	if (task != NULL) {
7022		TASK_INIT(&task->task, 0, xpt_finishconfig_task, task);
7023		taskqueue_enqueue(taskqueue_thread, &task->task);
7024	}
7025
7026	if (done_ccb != NULL)
7027		xpt_free_ccb(done_ccb);
7028}
7029
7030static void
7031xptaction(struct cam_sim *sim, union ccb *work_ccb)
7032{
7033	CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
7034
7035	switch (work_ccb->ccb_h.func_code) {
7036	/* Common cases first */
7037	case XPT_PATH_INQ:		/* Path routing inquiry */
7038	{
7039		struct ccb_pathinq *cpi;
7040
7041		cpi = &work_ccb->cpi;
7042		cpi->version_num = 1; /* XXX??? */
7043		cpi->hba_inquiry = 0;
7044		cpi->target_sprt = 0;
7045		cpi->hba_misc = 0;
7046		cpi->hba_eng_cnt = 0;
7047		cpi->max_target = 0;
7048		cpi->max_lun = 0;
7049		cpi->initiator_id = 0;
7050		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
7051		strncpy(cpi->hba_vid, "", HBA_IDLEN);
7052		strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
7053		cpi->unit_number = sim->unit_number;
7054		cpi->bus_id = sim->bus_id;
7055		cpi->base_transfer_speed = 0;
7056		cpi->protocol = PROTO_UNSPECIFIED;
7057		cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
7058		cpi->transport = XPORT_UNSPECIFIED;
7059		cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
7060		cpi->ccb_h.status = CAM_REQ_CMP;
7061		xpt_done(work_ccb);
7062		break;
7063	}
7064	default:
7065		work_ccb->ccb_h.status = CAM_REQ_INVALID;
7066		xpt_done(work_ccb);
7067		break;
7068	}
7069}
7070
7071/*
7072 * The xpt as a "controller" has no interrupt sources, so polling
7073 * is a no-op.
7074 */
7075static void
7076xptpoll(struct cam_sim *sim)
7077{
7078}
7079
7080void
7081xpt_lock_buses(void)
7082{
7083	mtx_lock(&xsoftc.xpt_topo_lock);
7084}
7085
7086void
7087xpt_unlock_buses(void)
7088{
7089	mtx_unlock(&xsoftc.xpt_topo_lock);
7090}
7091
7092static void
7093camisr(void *dummy)
7094{
7095	cam_simq_t queue;
7096	struct cam_sim *sim;
7097
7098	mtx_lock(&cam_simq_lock);
7099	TAILQ_INIT(&queue);
7100	TAILQ_CONCAT(&queue, &cam_simq, links);
7101	mtx_unlock(&cam_simq_lock);
7102
7103	while ((sim = TAILQ_FIRST(&queue)) != NULL) {
7104		TAILQ_REMOVE(&queue, sim, links);
7105		CAM_SIM_LOCK(sim);
7106		sim->flags &= ~CAM_SIM_ON_DONEQ;
7107		camisr_runqueue(&sim->sim_doneq);
7108		CAM_SIM_UNLOCK(sim);
7109	}
7110}
7111
7112static void
7113camisr_runqueue(void *V_queue)
7114{
7115	cam_isrq_t *queue = V_queue;
7116	struct	ccb_hdr *ccb_h;
7117
7118	while ((ccb_h = TAILQ_FIRST(queue)) != NULL) {
7119		int	runq;
7120
7121		TAILQ_REMOVE(queue, ccb_h, sim_links.tqe);
7122		ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
7123
7124		CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE,
7125			  ("camisr\n"));
7126
7127		runq = FALSE;
7128
7129		if (ccb_h->flags & CAM_HIGH_POWER) {
7130			struct highpowerlist	*hphead;
7131			union ccb		*send_ccb;
7132
7133			mtx_lock(&xsoftc.xpt_lock);
7134			hphead = &xsoftc.highpowerq;
7135
7136			send_ccb = (union ccb *)STAILQ_FIRST(hphead);
7137
7138			/*
7139			 * Increment the count since this command is done.
7140			 */
7141			xsoftc.num_highpower++;
7142
7143			/*
7144			 * Any high powered commands queued up?
7145			 */
7146			if (send_ccb != NULL) {
7147
7148				STAILQ_REMOVE_HEAD(hphead, xpt_links.stqe);
7149				mtx_unlock(&xsoftc.xpt_lock);
7150
7151				xpt_release_devq(send_ccb->ccb_h.path,
7152						 /*count*/1, /*runqueue*/TRUE);
7153			} else
7154				mtx_unlock(&xsoftc.xpt_lock);
7155		}
7156
7157		if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
7158			struct cam_ed *dev;
7159
7160			dev = ccb_h->path->device;
7161
7162			cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
7163
7164			if (!SIM_DEAD(ccb_h->path->bus->sim)) {
7165				ccb_h->path->bus->sim->devq->send_active--;
7166				ccb_h->path->bus->sim->devq->send_openings++;
7167			}
7168
7169			if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
7170			  && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)
7171			 || ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
7172			  && (dev->ccbq.dev_active == 0))) {
7173
7174				xpt_release_devq(ccb_h->path, /*count*/1,
7175						 /*run_queue*/TRUE);
7176			}
7177
7178			if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
7179			 && (--dev->tag_delay_count == 0))
7180				xpt_start_tags(ccb_h->path);
7181
7182			if ((dev->ccbq.queue.entries > 0)
7183			 && (dev->qfrozen_cnt == 0)
7184			 && (device_is_send_queued(dev) == 0)) {
7185				runq = xpt_schedule_dev_sendq(ccb_h->path->bus,
7186							      dev);
7187			}
7188		}
7189
7190		if (ccb_h->status & CAM_RELEASE_SIMQ) {
7191			xpt_release_simq(ccb_h->path->bus->sim,
7192					 /*run_queue*/TRUE);
7193			ccb_h->status &= ~CAM_RELEASE_SIMQ;
7194			runq = FALSE;
7195		}
7196
7197		if ((ccb_h->flags & CAM_DEV_QFRZDIS)
7198		 && (ccb_h->status & CAM_DEV_QFRZN)) {
7199			xpt_release_devq(ccb_h->path, /*count*/1,
7200					 /*run_queue*/TRUE);
7201			ccb_h->status &= ~CAM_DEV_QFRZN;
7202		} else if (runq) {
7203			xpt_run_dev_sendq(ccb_h->path->bus);
7204		}
7205
7206		/* Call the peripheral driver's callback */
7207		(*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
7208	}
7209}
7210
7211static void
7212dead_sim_action(struct cam_sim *sim, union ccb *ccb)
7213{
7214
7215	ccb->ccb_h.status = CAM_DEV_NOT_THERE;
7216	xpt_done(ccb);
7217}
7218
7219static void
7220dead_sim_poll(struct cam_sim *sim)
7221{
7222}
7223