cam_xpt.c revision 65822
1/*
2 * Implementation of the Common Access Method Transport (XPT) layer.
3 *
4 * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
5 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions, and the following disclaimer,
13 *    without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 *    derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $FreeBSD: head/sys/cam/cam_xpt.c 65822 2000-09-13 18:33:25Z jhb $
30 */
31#include <sys/param.h>
32#include <sys/bus.h>
33#include <sys/systm.h>
34#include <sys/types.h>
35#include <sys/malloc.h>
36#include <sys/kernel.h>
37#include <sys/time.h>
38#include <sys/conf.h>
39#include <sys/fcntl.h>
40#include <sys/md5.h>
41#include <sys/devicestat.h>
42#include <sys/interrupt.h>
43
44#ifdef PC98
45#include <pc98/pc98/pc98_machdep.h>	/* geometry translation */
46#endif
47
48#include <machine/clock.h>
49#include <machine/ipl.h>
50
51#include <cam/cam.h>
52#include <cam/cam_ccb.h>
53#include <cam/cam_periph.h>
54#include <cam/cam_sim.h>
55#include <cam/cam_xpt.h>
56#include <cam/cam_xpt_sim.h>
57#include <cam/cam_xpt_periph.h>
58#include <cam/cam_debug.h>
59
60#include <cam/scsi/scsi_all.h>
61#include <cam/scsi/scsi_message.h>
62#include <cam/scsi/scsi_pass.h>
63#include "opt_cam.h"
64
65/* Datastructures internal to the xpt layer */
66
67/*
68 * Definition of an async handler callback block.  These are used to add
69 * SIMs and peripherals to the async callback lists.
70 */
71struct async_node {
72	SLIST_ENTRY(async_node)	links;
73	u_int32_t	event_enable;	/* Async Event enables */
74	void		(*callback)(void *arg, u_int32_t code,
75				    struct cam_path *path, void *args);
76	void		*callback_arg;
77};
78
79SLIST_HEAD(async_list, async_node);
80SLIST_HEAD(periph_list, cam_periph);
81static STAILQ_HEAD(highpowerlist, ccb_hdr) highpowerq;
82
83/*
84 * This is the maximum number of high powered commands (e.g. start unit)
85 * that can be outstanding at a particular time.
86 */
87#ifndef CAM_MAX_HIGHPOWER
88#define CAM_MAX_HIGHPOWER  4
89#endif
90
91/* number of high powered commands that can go through right now */
92static int num_highpower = CAM_MAX_HIGHPOWER;
93
94/*
95 * Structure for queueing a device in a run queue.
96 * There is one run queue for allocating new ccbs,
97 * and another for sending ccbs to the controller.
98 */
99struct cam_ed_qinfo {
100	cam_pinfo pinfo;
101	struct	  cam_ed *device;
102};
103
104/*
105 * The CAM EDT (Existing Device Table) contains the device information for
106 * all devices for all busses in the system.  The table contains a
107 * cam_ed structure for each device on the bus.
108 */
109struct cam_ed {
110	TAILQ_ENTRY(cam_ed) links;
111	struct	cam_ed_qinfo alloc_ccb_entry;
112	struct	cam_ed_qinfo send_ccb_entry;
113	struct	cam_et	 *target;
114	lun_id_t	 lun_id;
115	struct	camq drvq;		/*
116					 * Queue of type drivers wanting to do
117					 * work on this device.
118					 */
119	struct	cam_ccbq ccbq;		/* Queue of pending ccbs */
120	struct	async_list asyncs;	/* Async callback info for this B/T/L */
121	struct	periph_list periphs;	/* All attached devices */
122	u_int	generation;		/* Generation number */
123	struct	cam_periph *owner;	/* Peripheral driver's ownership tag */
124	struct	xpt_quirk_entry *quirk;	/* Oddities about this device */
125					/* Storage for the inquiry data */
126	struct	scsi_inquiry_data inq_data;
127	u_int8_t	 inq_flags;	/*
128					 * Current settings for inquiry flags.
129					 * This allows us to override settings
130					 * like disconnection and tagged
131					 * queuing for a device.
132					 */
133	u_int8_t	 queue_flags;	/* Queue flags from the control page */
134	u_int8_t	 serial_num_len;
135	u_int8_t	 *serial_num;
136	u_int32_t	 qfrozen_cnt;
137	u_int32_t	 flags;
138#define CAM_DEV_UNCONFIGURED	 	0x01
139#define CAM_DEV_REL_TIMEOUT_PENDING	0x02
140#define CAM_DEV_REL_ON_COMPLETE		0x04
141#define CAM_DEV_REL_ON_QUEUE_EMPTY	0x08
142#define CAM_DEV_RESIZE_QUEUE_NEEDED	0x10
143#define CAM_DEV_TAG_AFTER_COUNT		0x20
144#define CAM_DEV_INQUIRY_DATA_VALID	0x40
145	u_int32_t	 tag_delay_count;
146#define	CAM_TAG_DELAY_COUNT		5
147	u_int32_t	 refcount;
148	struct		 callout_handle c_handle;
149};
150
151/*
152 * Each target is represented by an ET (Existing Target).  These
153 * entries are created when a target is successfully probed with an
154 * identify, and removed when a device fails to respond after a number
155 * of retries, or a bus rescan finds the device missing.
156 */
157struct cam_et {
158	TAILQ_HEAD(, cam_ed) ed_entries;
159	TAILQ_ENTRY(cam_et) links;
160	struct	cam_eb	*bus;
161	target_id_t	target_id;
162	u_int32_t	refcount;
163	u_int		generation;
164	struct		timeval last_reset;
165};
166
167/*
168 * Each bus is represented by an EB (Existing Bus).  These entries
169 * are created by calls to xpt_bus_register and deleted by calls to
170 * xpt_bus_deregister.
171 */
172struct cam_eb {
173	TAILQ_HEAD(, cam_et) et_entries;
174	TAILQ_ENTRY(cam_eb)  links;
175	path_id_t	     path_id;
176	struct cam_sim	     *sim;
177	struct timeval	     last_reset;
178	u_int32_t	     flags;
179#define	CAM_EB_RUNQ_SCHEDULED	0x01
180	u_int32_t	     refcount;
181	u_int		     generation;
182};
183
184struct cam_path {
185	struct cam_periph *periph;
186	struct cam_eb	  *bus;
187	struct cam_et	  *target;
188	struct cam_ed	  *device;
189};
190
191struct xpt_quirk_entry {
192	struct scsi_inquiry_pattern inq_pat;
193	u_int8_t quirks;
194#define	CAM_QUIRK_NOLUNS	0x01
195#define	CAM_QUIRK_NOSERIAL	0x02
196#define	CAM_QUIRK_HILUNS	0x04
197	u_int mintags;
198	u_int maxtags;
199};
200#define	CAM_SCSI2_MAXLUN	8
201
202typedef enum {
203	XPT_FLAG_OPEN		= 0x01
204} xpt_flags;
205
206struct xpt_softc {
207	xpt_flags	flags;
208	u_int32_t	generation;
209};
210
211static const char quantum[] = "QUANTUM";
212static const char sony[] = "SONY";
213static const char west_digital[] = "WDIGTL";
214static const char samsung[] = "SAMSUNG";
215static const char seagate[] = "SEAGATE";
216static const char microp[] = "MICROP";
217
218static struct xpt_quirk_entry xpt_quirk_table[] =
219{
220	{
221		/* Reports QUEUE FULL for temporary resource shortages */
222		{ T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" },
223		/*quirks*/0, /*mintags*/24, /*maxtags*/32
224	},
225	{
226		/* Reports QUEUE FULL for temporary resource shortages */
227		{ T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" },
228		/*quirks*/0, /*mintags*/24, /*maxtags*/32
229	},
230	{
231		/* Reports QUEUE FULL for temporary resource shortages */
232		{ T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" },
233		/*quirks*/0, /*mintags*/24, /*maxtags*/32
234	},
235	{
236		/* Broken tagged queuing drive */
237		{ T_DIRECT, SIP_MEDIA_FIXED, microp, "4421-07*", "*" },
238		/*quirks*/0, /*mintags*/0, /*maxtags*/0
239	},
240	{
241		/* Broken tagged queuing drive */
242		{ T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" },
243		/*quirks*/0, /*mintags*/0, /*maxtags*/0
244	},
245	{
246		/* Broken tagged queuing drive */
247		{ T_DIRECT, SIP_MEDIA_FIXED, microp, "3391*", "x43h" },
248		/*quirks*/0, /*mintags*/0, /*maxtags*/0
249	},
250	{
251		/*
252		 * Unfortunately, the Quantum Atlas III has the same
253		 * problem as the Atlas II drives above.
254		 * Reported by: "Johan Granlund" <johan@granlund.nu>
255		 *
256		 * For future reference, the drive with the problem was:
257		 * QUANTUM QM39100TD-SW N1B0
258		 *
259		 * It's possible that Quantum will fix the problem in later
260		 * firmware revisions.  If that happens, the quirk entry
261		 * will need to be made specific to the firmware revisions
262		 * with the problem.
263		 *
264		 */
265		/* Reports QUEUE FULL for temporary resource shortages */
266		{ T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" },
267		/*quirks*/0, /*mintags*/24, /*maxtags*/32
268	},
269	{
270		/*
271		 * 18 Gig Atlas III, same problem as the 9G version.
272		 * Reported by: Andre Albsmeier
273		 *		<andre.albsmeier@mchp.siemens.de>
274		 *
275		 * For future reference, the drive with the problem was:
276		 * QUANTUM QM318000TD-S N491
277		 */
278		/* Reports QUEUE FULL for temporary resource shortages */
279		{ T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" },
280		/*quirks*/0, /*mintags*/24, /*maxtags*/32
281	},
282	{
283		/*
284		 * Broken tagged queuing drive
285		 * Reported by: Bret Ford <bford@uop.cs.uop.edu>
286		 *         and: Martin Renters <martin@tdc.on.ca>
287		 */
288		{ T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" },
289		/*quirks*/0, /*mintags*/0, /*maxtags*/0
290	},
291		/*
292		 * The Seagate Medalist Pro drives have very poor write
293		 * performance with anything more than 2 tags.
294		 *
295		 * Reported by:  Paul van der Zwan <paulz@trantor.xs4all.nl>
296		 * Drive:  <SEAGATE ST36530N 1444>
297		 *
298		 * Reported by:  Jeremy Lea <reg@shale.csir.co.za>
299		 * Drive:  <SEAGATE ST34520W 1281>
300		 *
301		 * No one has actually reported that the 9G version
302		 * (ST39140*) of the Medalist Pro has the same problem, but
303		 * we're assuming that it does because the 4G and 6.5G
304		 * versions of the drive are broken.
305		 */
306	{
307		{ T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"},
308		/*quirks*/0, /*mintags*/2, /*maxtags*/2
309	},
310	{
311		{ T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"},
312		/*quirks*/0, /*mintags*/2, /*maxtags*/2
313	},
314	{
315		{ T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"},
316		/*quirks*/0, /*mintags*/2, /*maxtags*/2
317	},
318	{
319		/*
320		 * Slow when tagged queueing is enabled.  Write performance
321		 * steadily drops off with more and more concurrent
322		 * transactions.  Best sequential write performance with
323		 * tagged queueing turned off and write caching turned on.
324		 *
325		 * PR:  kern/10398
326		 * Submitted by:  Hideaki Okada <hokada@isl.melco.co.jp>
327		 * Drive:  DCAS-34330 w/ "S65A" firmware.
328		 *
329		 * The drive with the problem had the "S65A" firmware
330		 * revision, and has also been reported (by Stephen J.
331		 * Roznowski <sjr@home.net>) for a drive with the "S61A"
332		 * firmware revision.
333		 *
334		 * Although no one has reported problems with the 2 gig
335		 * version of the DCAS drive, the assumption is that it
336		 * has the same problems as the 4 gig version.  Therefore
337		 * this quirk entries disables tagged queueing for all
338		 * DCAS drives.
339		 */
340		{ T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" },
341		/*quirks*/0, /*mintags*/0, /*maxtags*/0
342	},
343	{
344		/* Broken tagged queuing drive */
345		{ T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" },
346		/*quirks*/0, /*mintags*/0, /*maxtags*/0
347	},
348	{
349		/* Broken tagged queuing drive */
350		{ T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" },
351		/*quirks*/0, /*mintags*/0, /*maxtags*/0
352	},
353	{
354		/*
355		 * Broken tagged queuing drive.
356		 * Submitted by:
357		 * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp>
358		 * in PR kern/9535
359		 */
360		{ T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" },
361		/*quirks*/0, /*mintags*/0, /*maxtags*/0
362	},
363        {
364		/*
365		 * Slow when tagged queueing is enabled. (1.5MB/sec versus
366		 * 8MB/sec.)
367		 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
368		 * Best performance with these drives is achieved with
369		 * tagged queueing turned off, and write caching turned on.
370		 */
371		{ T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" },
372		/*quirks*/0, /*mintags*/0, /*maxtags*/0
373        },
374        {
375		/*
376		 * Slow when tagged queueing is enabled. (1.5MB/sec versus
377		 * 8MB/sec.)
378		 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
379		 * Best performance with these drives is achieved with
380		 * tagged queueing turned off, and write caching turned on.
381		 */
382		{ T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" },
383		/*quirks*/0, /*mintags*/0, /*maxtags*/0
384        },
385	{
386		/*
387		 * Doesn't handle queue full condition correctly,
388		 * so we need to limit maxtags to what the device
389		 * can handle instead of determining this automatically.
390		 */
391		{ T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" },
392		/*quirks*/0, /*mintags*/2, /*maxtags*/32
393	},
394	{
395		/* Really only one LUN */
396		{ T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA*", "*" },
397		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
398	},
399	{
400		/* I can't believe we need a quirk for DPT volumes. */
401		{ T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, "DPT", "*", "*" },
402		CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS,
403		/*mintags*/0, /*maxtags*/255
404	},
405	{
406		/*
407		 * Many Sony CDROM drives don't like multi-LUN probing.
408		 */
409		{ T_CDROM, SIP_MEDIA_REMOVABLE, sony, "CD-ROM CDU*", "*" },
410		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
411	},
412	{
413		/*
414		 * This drive doesn't like multiple LUN probing.
415		 * Submitted by:  Parag Patel <parag@cgt.com>
416		 */
417		{ T_WORM, SIP_MEDIA_REMOVABLE, sony, "CD-R   CDU9*", "*" },
418		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
419	},
420	{
421		{ T_WORM, SIP_MEDIA_REMOVABLE, "YAMAHA", "CDR100*", "*" },
422		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
423	},
424	{
425		/*
426		 * The 8200 doesn't like multi-lun probing, and probably
427		 * don't like serial number requests either.
428		 */
429		{
430			T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
431			"EXB-8200*", "*"
432		},
433		CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
434	},
435	{
436		/*
437		 * These Hitachi drives don't like multi-lun probing.
438		 * The PR submitter has a DK319H, but says that the Linux
439		 * kernel has a similar work-around for the DK312 and DK314,
440		 * so all DK31* drives are quirked here.
441		 * PR:            misc/18793
442		 * Submitted by:  Paul Haddad <paul@pth.com>
443		 */
444		{ T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK31*", "*" },
445		CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
446	},
447	{
448		/*
449		 * This old revision of the TDC3600 is also SCSI-1, and
450		 * hangs upon serial number probing.
451		 */
452		{
453			T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG",
454			" TDC 3600", "U07:"
455		},
456		CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0
457	},
458	{
459		/*
460		 * Would repond to all LUNs if asked for.
461		 */
462		{
463			T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "CALIPER",
464			"CP150", "*"
465		},
466		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
467	},
468	{
469		/*
470		 * Would repond to all LUNs if asked for.
471		 */
472		{
473			T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY",
474			"96X2*", "*"
475		},
476		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
477	},
478	{
479		/* Submitted by: Matthew Dodd <winter@jurai.net> */
480		{ T_PROCESSOR, SIP_MEDIA_FIXED, "Cabletrn", "EA41*", "*" },
481		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
482	},
483	{
484		/* Submitted by: Matthew Dodd <winter@jurai.net> */
485		{ T_PROCESSOR, SIP_MEDIA_FIXED, "CABLETRN", "EA41*", "*" },
486		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
487	},
488	{
489		/* TeraSolutions special settings for TRC-22 RAID */
490		{ T_DIRECT, SIP_MEDIA_FIXED, "TERASOLU", "TRC-22", "*" },
491		  /*quirks*/0, /*mintags*/55, /*maxtags*/255
492	},
493	{
494		/* Default tagged queuing parameters for all devices */
495		{
496		  T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
497		  /*vendor*/"*", /*product*/"*", /*revision*/"*"
498		},
499		/*quirks*/0, /*mintags*/2, /*maxtags*/255
500	},
501};
502
503static const int xpt_quirk_table_size =
504	sizeof(xpt_quirk_table) / sizeof(*xpt_quirk_table);
505
506typedef enum {
507	DM_RET_COPY		= 0x01,
508	DM_RET_FLAG_MASK	= 0x0f,
509	DM_RET_NONE		= 0x00,
510	DM_RET_STOP		= 0x10,
511	DM_RET_DESCEND		= 0x20,
512	DM_RET_ERROR		= 0x30,
513	DM_RET_ACTION_MASK	= 0xf0
514} dev_match_ret;
515
516typedef enum {
517	XPT_DEPTH_BUS,
518	XPT_DEPTH_TARGET,
519	XPT_DEPTH_DEVICE,
520	XPT_DEPTH_PERIPH
521} xpt_traverse_depth;
522
523struct xpt_traverse_config {
524	xpt_traverse_depth	depth;
525	void			*tr_func;
526	void			*tr_arg;
527};
528
529typedef	int	xpt_busfunc_t (struct cam_eb *bus, void *arg);
530typedef	int	xpt_targetfunc_t (struct cam_et *target, void *arg);
531typedef	int	xpt_devicefunc_t (struct cam_ed *device, void *arg);
532typedef	int	xpt_periphfunc_t (struct cam_periph *periph, void *arg);
533typedef int	xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
534
535/* Transport layer configuration information */
536static struct xpt_softc xsoftc;
537
538/* Queues for our software interrupt handler */
539typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
540static cam_isrq_t cam_bioq;
541static cam_isrq_t cam_netq;
542
543/* "Pool" of inactive ccbs managed by xpt_alloc_ccb and xpt_free_ccb */
544static SLIST_HEAD(,ccb_hdr) ccb_freeq;
545static u_int xpt_max_ccbs;	/*
546				 * Maximum size of ccb pool.  Modified as
547				 * devices are added/removed or have their
548				 * opening counts changed.
549				 */
550static u_int xpt_ccb_count;	/* Current count of allocated ccbs */
551
552struct cam_periph *xpt_periph;
553
554static periph_init_t xpt_periph_init;
555
556static periph_init_t probe_periph_init;
557
558static struct periph_driver xpt_driver =
559{
560	xpt_periph_init, "xpt",
561	TAILQ_HEAD_INITIALIZER(xpt_driver.units)
562};
563
564static struct periph_driver probe_driver =
565{
566	probe_periph_init, "probe",
567	TAILQ_HEAD_INITIALIZER(probe_driver.units)
568};
569
570DATA_SET(periphdriver_set, xpt_driver);
571DATA_SET(periphdriver_set, probe_driver);
572
573#define XPT_CDEV_MAJOR 104
574
575static d_open_t xptopen;
576static d_close_t xptclose;
577static d_ioctl_t xptioctl;
578
579static struct cdevsw xpt_cdevsw = {
580	/* open */	xptopen,
581	/* close */	xptclose,
582	/* read */	noread,
583	/* write */	nowrite,
584	/* ioctl */	xptioctl,
585	/* poll */	nopoll,
586	/* mmap */	nommap,
587	/* strategy */	nostrategy,
588	/* name */	"xpt",
589	/* maj */	XPT_CDEV_MAJOR,
590	/* dump */	nodump,
591	/* psize */	nopsize,
592	/* flags */	0,
593	/* bmaj */	-1
594};
595
596static struct intr_config_hook *xpt_config_hook;
597
598/* Registered busses */
599static TAILQ_HEAD(,cam_eb) xpt_busses;
600static u_int bus_generation;
601
602/* Storage for debugging datastructures */
603#ifdef	CAMDEBUG
604struct cam_path *cam_dpath;
605u_int32_t cam_dflags;
606u_int32_t cam_debug_delay;
607#endif
608
609#if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG)
610#error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS"
611#endif
612
613/*
614 * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG
615 * enabled.  Also, the user must have either none, or all of CAM_DEBUG_BUS,
616 * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified.
617 */
618#if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \
619    || defined(CAM_DEBUG_LUN)
620#ifdef CAMDEBUG
621#if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \
622    || !defined(CAM_DEBUG_LUN)
623#error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \
624        and CAM_DEBUG_LUN"
625#endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */
626#else /* !CAMDEBUG */
627#error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options"
628#endif /* CAMDEBUG */
629#endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */
630
631/* Our boot-time initialization hook */
632static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
633
634static moduledata_t cam_moduledata = {
635	"cam",
636	cam_module_event_handler,
637	NULL
638};
639
640static void	xpt_init(void *);
641
642DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
643MODULE_VERSION(cam, 1);
644
645
646static cam_status	xpt_compile_path(struct cam_path *new_path,
647					 struct cam_periph *perph,
648					 path_id_t path_id,
649					 target_id_t target_id,
650					 lun_id_t lun_id);
651
652static void		xpt_release_path(struct cam_path *path);
653
654static void		xpt_async_bcast(struct async_list *async_head,
655					u_int32_t async_code,
656					struct cam_path *path,
657					void *async_arg);
658static path_id_t xptnextfreepathid(void);
659static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
660static union ccb *xpt_get_ccb(struct cam_ed *device);
661static int	 xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
662				  u_int32_t new_priority);
663static void	 xpt_run_dev_allocq(struct cam_eb *bus);
664static void	 xpt_run_dev_sendq(struct cam_eb *bus);
665static timeout_t xpt_release_devq_timeout;
666static timeout_t xpt_release_simq_timeout;
667static void	 xpt_release_bus(struct cam_eb *bus);
668static void	 xpt_release_devq_device(struct cam_ed *dev, u_int count,
669					 int run_queue);
670static struct cam_et*
671		 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
672static void	 xpt_release_target(struct cam_eb *bus, struct cam_et *target);
673static struct cam_ed*
674		 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target,
675				  lun_id_t lun_id);
676static void	 xpt_release_device(struct cam_eb *bus, struct cam_et *target,
677				    struct cam_ed *device);
678static u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings);
679static struct cam_eb*
680		 xpt_find_bus(path_id_t path_id);
681static struct cam_et*
682		 xpt_find_target(struct cam_eb *bus, target_id_t target_id);
683static struct cam_ed*
684		 xpt_find_device(struct cam_et *target, lun_id_t lun_id);
685static void	 xpt_scan_bus(struct cam_periph *periph, union ccb *ccb);
686static void	 xpt_scan_lun(struct cam_periph *periph,
687			      struct cam_path *path, cam_flags flags,
688			      union ccb *ccb);
689static void	 xptscandone(struct cam_periph *periph, union ccb *done_ccb);
690static xpt_busfunc_t	xptconfigbuscountfunc;
691static xpt_busfunc_t	xptconfigfunc;
692static void	 xpt_config(void *arg);
693static xpt_devicefunc_t xptpassannouncefunc;
694static void	 xpt_finishconfig(struct cam_periph *periph, union ccb *ccb);
695static void	 xptaction(struct cam_sim *sim, union ccb *work_ccb);
696static void	 xptpoll(struct cam_sim *sim);
697static swihand_t swi_camnet;
698static swihand_t swi_cambio;
699static void	 camisr(cam_isrq_t *queue);
700#if 0
701static void	 xptstart(struct cam_periph *periph, union ccb *work_ccb);
702static void	 xptasync(struct cam_periph *periph,
703			  u_int32_t code, cam_path *path);
704#endif
705static dev_match_ret	xptbusmatch(struct dev_match_pattern *patterns,
706				    int num_patterns, struct cam_eb *bus);
707static dev_match_ret	xptdevicematch(struct dev_match_pattern *patterns,
708				       int num_patterns, struct cam_ed *device);
709static dev_match_ret	xptperiphmatch(struct dev_match_pattern *patterns,
710				       int num_patterns,
711				       struct cam_periph *periph);
712static xpt_busfunc_t	xptedtbusfunc;
713static xpt_targetfunc_t	xptedttargetfunc;
714static xpt_devicefunc_t	xptedtdevicefunc;
715static xpt_periphfunc_t	xptedtperiphfunc;
716static xpt_pdrvfunc_t	xptplistpdrvfunc;
717static xpt_periphfunc_t	xptplistperiphfunc;
718static int		xptedtmatch(struct ccb_dev_match *cdm);
719static int		xptperiphlistmatch(struct ccb_dev_match *cdm);
720static int		xptbustraverse(struct cam_eb *start_bus,
721				       xpt_busfunc_t *tr_func, void *arg);
722static int		xpttargettraverse(struct cam_eb *bus,
723					  struct cam_et *start_target,
724					  xpt_targetfunc_t *tr_func, void *arg);
725static int		xptdevicetraverse(struct cam_et *target,
726					  struct cam_ed *start_device,
727					  xpt_devicefunc_t *tr_func, void *arg);
728static int		xptperiphtraverse(struct cam_ed *device,
729					  struct cam_periph *start_periph,
730					  xpt_periphfunc_t *tr_func, void *arg);
731static int		xptpdrvtraverse(struct periph_driver **start_pdrv,
732					xpt_pdrvfunc_t *tr_func, void *arg);
733static int		xptpdperiphtraverse(struct periph_driver **pdrv,
734					    struct cam_periph *start_periph,
735					    xpt_periphfunc_t *tr_func,
736					    void *arg);
737static xpt_busfunc_t	xptdefbusfunc;
738static xpt_targetfunc_t	xptdeftargetfunc;
739static xpt_devicefunc_t	xptdefdevicefunc;
740static xpt_periphfunc_t	xptdefperiphfunc;
741static int		xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg);
742#ifdef notusedyet
743static int		xpt_for_all_targets(xpt_targetfunc_t *tr_func,
744					    void *arg);
745#endif
746static int		xpt_for_all_devices(xpt_devicefunc_t *tr_func,
747					    void *arg);
748#ifdef notusedyet
749static int		xpt_for_all_periphs(xpt_periphfunc_t *tr_func,
750					    void *arg);
751#endif
752static xpt_devicefunc_t	xptsetasyncfunc;
753static xpt_busfunc_t	xptsetasyncbusfunc;
754static cam_status	xptregister(struct cam_periph *periph,
755				    void *arg);
756static cam_status	proberegister(struct cam_periph *periph,
757				      void *arg);
758static void	 probeschedule(struct cam_periph *probe_periph);
759static void	 probestart(struct cam_periph *periph, union ccb *start_ccb);
760static void	 proberequestdefaultnegotiation(struct cam_periph *periph);
761static void	 probedone(struct cam_periph *periph, union ccb *done_ccb);
762static void	 probecleanup(struct cam_periph *periph);
763static void	 xpt_find_quirk(struct cam_ed *device);
764static void	 xpt_set_transfer_settings(struct ccb_trans_settings *cts,
765					   struct cam_ed *device,
766					   int async_update);
767static void	 xpt_toggle_tags(struct cam_path *path);
768static void	 xpt_start_tags(struct cam_path *path);
769static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus,
770					    struct cam_ed *dev);
771static __inline int xpt_schedule_dev_sendq(struct cam_eb *bus,
772					   struct cam_ed *dev);
773static __inline int periph_is_queued(struct cam_periph *periph);
774static __inline int device_is_alloc_queued(struct cam_ed *device);
775static __inline int device_is_send_queued(struct cam_ed *device);
776static __inline int dev_allocq_is_runnable(struct cam_devq *devq);
777
778static __inline int
779xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
780{
781	int retval;
782
783	if (dev->ccbq.devq_openings > 0) {
784		if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) {
785			cam_ccbq_resize(&dev->ccbq,
786					dev->ccbq.dev_openings
787					+ dev->ccbq.dev_active);
788			dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
789		}
790		/*
791		 * The priority of a device waiting for CCB resources
792		 * is that of the the highest priority peripheral driver
793		 * enqueued.
794		 */
795		retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue,
796					  &dev->alloc_ccb_entry.pinfo,
797					  CAMQ_GET_HEAD(&dev->drvq)->priority);
798	} else {
799		retval = 0;
800	}
801
802	return (retval);
803}
804
805static __inline int
806xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev)
807{
808	int	retval;
809
810	if (dev->ccbq.dev_openings > 0) {
811		/*
812		 * The priority of a device waiting for controller
813		 * resources is that of the the highest priority CCB
814		 * enqueued.
815		 */
816		retval =
817		    xpt_schedule_dev(&bus->sim->devq->send_queue,
818				     &dev->send_ccb_entry.pinfo,
819				     CAMQ_GET_HEAD(&dev->ccbq.queue)->priority);
820	} else {
821		retval = 0;
822	}
823	return (retval);
824}
825
826static __inline int
827periph_is_queued(struct cam_periph *periph)
828{
829	return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
830}
831
832static __inline int
833device_is_alloc_queued(struct cam_ed *device)
834{
835	return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
836}
837
838static __inline int
839device_is_send_queued(struct cam_ed *device)
840{
841	return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
842}
843
844static __inline int
845dev_allocq_is_runnable(struct cam_devq *devq)
846{
847	/*
848	 * Have work to do.
849	 * Have space to do more work.
850	 * Allowed to do work.
851	 */
852	return ((devq->alloc_queue.qfrozen_cnt == 0)
853	     && (devq->alloc_queue.entries > 0)
854	     && (devq->alloc_openings > 0));
855}
856
857static void
858xpt_periph_init()
859{
860	make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
861}
862
863static void
864probe_periph_init()
865{
866}
867
868
869static void
870xptdone(struct cam_periph *periph, union ccb *done_ccb)
871{
872	/* Caller will release the CCB */
873	wakeup(&done_ccb->ccb_h.cbfcnp);
874}
875
876static int
877xptopen(dev_t dev, int flags, int fmt, struct proc *p)
878{
879	int unit;
880
881	unit = minor(dev) & 0xff;
882
883	/*
884	 * Only allow read-write access.
885	 */
886	if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
887		return(EPERM);
888
889	/*
890	 * We don't allow nonblocking access.
891	 */
892	if ((flags & O_NONBLOCK) != 0) {
893		printf("xpt%d: can't do nonblocking accesss\n", unit);
894		return(ENODEV);
895	}
896
897	/*
898	 * We only have one transport layer right now.  If someone accesses
899	 * us via something other than minor number 1, point out their
900	 * mistake.
901	 */
902	if (unit != 0) {
903		printf("xptopen: got invalid xpt unit %d\n", unit);
904		return(ENXIO);
905	}
906
907	/* Mark ourselves open */
908	xsoftc.flags |= XPT_FLAG_OPEN;
909
910	return(0);
911}
912
913static int
914xptclose(dev_t dev, int flag, int fmt, struct proc *p)
915{
916	int unit;
917
918	unit = minor(dev) & 0xff;
919
920	/*
921	 * We only have one transport layer right now.  If someone accesses
922	 * us via something other than minor number 1, point out their
923	 * mistake.
924	 */
925	if (unit != 0) {
926		printf("xptclose: got invalid xpt unit %d\n", unit);
927		return(ENXIO);
928	}
929
930	/* Mark ourselves closed */
931	xsoftc.flags &= ~XPT_FLAG_OPEN;
932
933	return(0);
934}
935
936static int
937xptioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
938{
939	int unit, error;
940
941	error = 0;
942	unit = minor(dev) & 0xff;
943
944	/*
945	 * We only have one transport layer right now.  If someone accesses
946	 * us via something other than minor number 1, point out their
947	 * mistake.
948	 */
949	if (unit != 0) {
950		printf("xptioctl: got invalid xpt unit %d\n", unit);
951		return(ENXIO);
952	}
953
954	switch(cmd) {
955	/*
956	 * For the transport layer CAMIOCOMMAND ioctl, we really only want
957	 * to accept CCB types that don't quite make sense to send through a
958	 * passthrough driver.
959	 */
960	case CAMIOCOMMAND: {
961		union ccb *ccb;
962		union ccb *inccb;
963
964		inccb = (union ccb *)addr;
965
966		switch(inccb->ccb_h.func_code) {
967		case XPT_SCAN_BUS:
968		case XPT_RESET_BUS:
969			if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD)
970			 || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) {
971				error = EINVAL;
972				break;
973			}
974			/* FALLTHROUGH */
975		case XPT_SCAN_LUN:
976
977			ccb = xpt_alloc_ccb();
978
979			/*
980			 * Create a path using the bus, target, and lun the
981			 * user passed in.
982			 */
983			if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
984					    inccb->ccb_h.path_id,
985					    inccb->ccb_h.target_id,
986					    inccb->ccb_h.target_lun) !=
987					    CAM_REQ_CMP){
988				error = EINVAL;
989				xpt_free_ccb(ccb);
990				break;
991			}
992			/* Ensure all of our fields are correct */
993			xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
994				      inccb->ccb_h.pinfo.priority);
995			xpt_merge_ccb(ccb, inccb);
996			ccb->ccb_h.cbfcnp = xptdone;
997			cam_periph_runccb(ccb, NULL, 0, 0, NULL);
998			bcopy(ccb, inccb, sizeof(union ccb));
999			xpt_free_path(ccb->ccb_h.path);
1000			xpt_free_ccb(ccb);
1001			break;
1002
1003		case XPT_DEBUG: {
1004			union ccb ccb;
1005
1006			/*
1007			 * This is an immediate CCB, so it's okay to
1008			 * allocate it on the stack.
1009			 */
1010
1011			/*
1012			 * Create a path using the bus, target, and lun the
1013			 * user passed in.
1014			 */
1015			if (xpt_create_path(&ccb.ccb_h.path, xpt_periph,
1016					    inccb->ccb_h.path_id,
1017					    inccb->ccb_h.target_id,
1018					    inccb->ccb_h.target_lun) !=
1019					    CAM_REQ_CMP){
1020				error = EINVAL;
1021				break;
1022			}
1023			/* Ensure all of our fields are correct */
1024			xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
1025				      inccb->ccb_h.pinfo.priority);
1026			xpt_merge_ccb(&ccb, inccb);
1027			ccb.ccb_h.cbfcnp = xptdone;
1028			xpt_action(&ccb);
1029			bcopy(&ccb, inccb, sizeof(union ccb));
1030			xpt_free_path(ccb.ccb_h.path);
1031			break;
1032
1033		}
1034		case XPT_DEV_MATCH: {
1035			struct cam_periph_map_info mapinfo;
1036			struct cam_path *old_path;
1037
1038			/*
1039			 * We can't deal with physical addresses for this
1040			 * type of transaction.
1041			 */
1042			if (inccb->ccb_h.flags & CAM_DATA_PHYS) {
1043				error = EINVAL;
1044				break;
1045			}
1046
1047			/*
1048			 * Save this in case the caller had it set to
1049			 * something in particular.
1050			 */
1051			old_path = inccb->ccb_h.path;
1052
1053			/*
1054			 * We really don't need a path for the matching
1055			 * code.  The path is needed because of the
1056			 * debugging statements in xpt_action().  They
1057			 * assume that the CCB has a valid path.
1058			 */
1059			inccb->ccb_h.path = xpt_periph->path;
1060
1061			bzero(&mapinfo, sizeof(mapinfo));
1062
1063			/*
1064			 * Map the pattern and match buffers into kernel
1065			 * virtual address space.
1066			 */
1067			error = cam_periph_mapmem(inccb, &mapinfo);
1068
1069			if (error) {
1070				inccb->ccb_h.path = old_path;
1071				break;
1072			}
1073
1074			/*
1075			 * This is an immediate CCB, we can send it on directly.
1076			 */
1077			xpt_action(inccb);
1078
1079			/*
1080			 * Map the buffers back into user space.
1081			 */
1082			cam_periph_unmapmem(inccb, &mapinfo);
1083
1084			inccb->ccb_h.path = old_path;
1085
1086			error = 0;
1087			break;
1088		}
1089		default:
1090			error = EINVAL;
1091			break;
1092		}
1093		break;
1094	}
1095	/*
1096	 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
1097	 * with the periphal driver name and unit name filled in.  The other
1098	 * fields don't really matter as input.  The passthrough driver name
1099	 * ("pass"), and unit number are passed back in the ccb.  The current
1100	 * device generation number, and the index into the device peripheral
1101	 * driver list, and the status are also passed back.  Note that
1102	 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
1103	 * we never return a status of CAM_GDEVLIST_LIST_CHANGED.  It is
1104	 * (or rather should be) impossible for the device peripheral driver
1105	 * list to change since we look at the whole thing in one pass, and
1106	 * we do it with splcam protection.
1107	 *
1108	 */
1109	case CAMGETPASSTHRU: {
1110		union ccb *ccb;
1111		struct cam_periph *periph;
1112		struct periph_driver **p_drv;
1113		char   *name;
1114		int unit;
1115		int cur_generation;
1116		int base_periph_found;
1117		int splbreaknum;
1118		int s;
1119
1120		ccb = (union ccb *)addr;
1121		unit = ccb->cgdl.unit_number;
1122		name = ccb->cgdl.periph_name;
1123		/*
1124		 * Every 100 devices, we want to drop our spl protection to
1125		 * give the software interrupt handler a chance to run.
1126		 * Most systems won't run into this check, but this should
1127		 * avoid starvation in the software interrupt handler in
1128		 * large systems.
1129		 */
1130		splbreaknum = 100;
1131
1132		ccb = (union ccb *)addr;
1133
1134		base_periph_found = 0;
1135
1136		/*
1137		 * Sanity check -- make sure we don't get a null peripheral
1138		 * driver name.
1139		 */
1140		if (*ccb->cgdl.periph_name == '\0') {
1141			error = EINVAL;
1142			break;
1143		}
1144
1145		/* Keep the list from changing while we traverse it */
1146		s = splcam();
1147ptstartover:
1148		cur_generation = xsoftc.generation;
1149
1150		/* first find our driver in the list of drivers */
1151		for (p_drv = (struct periph_driver **)periphdriver_set.ls_items;
1152		     *p_drv != NULL; p_drv++)
1153			if (strcmp((*p_drv)->driver_name, name) == 0)
1154				break;
1155
1156		if (*p_drv == NULL) {
1157			splx(s);
1158			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1159			ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1160			*ccb->cgdl.periph_name = '\0';
1161			ccb->cgdl.unit_number = 0;
1162			error = ENOENT;
1163			break;
1164		}
1165
1166		/*
1167		 * Run through every peripheral instance of this driver
1168		 * and check to see whether it matches the unit passed
1169		 * in by the user.  If it does, get out of the loops and
1170		 * find the passthrough driver associated with that
1171		 * peripheral driver.
1172		 */
1173		for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
1174		     periph = TAILQ_NEXT(periph, unit_links)) {
1175
1176			if (periph->unit_number == unit) {
1177				break;
1178			} else if (--splbreaknum == 0) {
1179				splx(s);
1180				s = splcam();
1181				splbreaknum = 100;
1182				if (cur_generation != xsoftc.generation)
1183				       goto ptstartover;
1184			}
1185		}
1186		/*
1187		 * If we found the peripheral driver that the user passed
1188		 * in, go through all of the peripheral drivers for that
1189		 * particular device and look for a passthrough driver.
1190		 */
1191		if (periph != NULL) {
1192			struct cam_ed *device;
1193			int i;
1194
1195			base_periph_found = 1;
1196			device = periph->path->device;
1197			for (i = 0, periph = device->periphs.slh_first;
1198			     periph != NULL;
1199			     periph = periph->periph_links.sle_next, i++) {
1200				/*
1201				 * Check to see whether we have a
1202				 * passthrough device or not.
1203				 */
1204				if (strcmp(periph->periph_name, "pass") == 0) {
1205					/*
1206					 * Fill in the getdevlist fields.
1207					 */
1208					strcpy(ccb->cgdl.periph_name,
1209					       periph->periph_name);
1210					ccb->cgdl.unit_number =
1211						periph->unit_number;
1212					if (periph->periph_links.sle_next)
1213						ccb->cgdl.status =
1214							CAM_GDEVLIST_MORE_DEVS;
1215					else
1216						ccb->cgdl.status =
1217						       CAM_GDEVLIST_LAST_DEVICE;
1218					ccb->cgdl.generation =
1219						device->generation;
1220					ccb->cgdl.index = i;
1221					/*
1222					 * Fill in some CCB header fields
1223					 * that the user may want.
1224					 */
1225					ccb->ccb_h.path_id =
1226						periph->path->bus->path_id;
1227					ccb->ccb_h.target_id =
1228						periph->path->target->target_id;
1229					ccb->ccb_h.target_lun =
1230						periph->path->device->lun_id;
1231					ccb->ccb_h.status = CAM_REQ_CMP;
1232					break;
1233				}
1234			}
1235		}
1236
1237		/*
1238		 * If the periph is null here, one of two things has
1239		 * happened.  The first possibility is that we couldn't
1240		 * find the unit number of the particular peripheral driver
1241		 * that the user is asking about.  e.g. the user asks for
1242		 * the passthrough driver for "da11".  We find the list of
1243		 * "da" peripherals all right, but there is no unit 11.
1244		 * The other possibility is that we went through the list
1245		 * of peripheral drivers attached to the device structure,
1246		 * but didn't find one with the name "pass".  Either way,
1247		 * we return ENOENT, since we couldn't find something.
1248		 */
1249		if (periph == NULL) {
1250			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1251			ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1252			*ccb->cgdl.periph_name = '\0';
1253			ccb->cgdl.unit_number = 0;
1254			error = ENOENT;
1255			/*
1256			 * It is unfortunate that this is even necessary,
1257			 * but there are many, many clueless users out there.
1258			 * If this is true, the user is looking for the
1259			 * passthrough driver, but doesn't have one in his
1260			 * kernel.
1261			 */
1262			if (base_periph_found == 1) {
1263				printf("xptioctl: pass driver is not in the "
1264				       "kernel\n");
1265				printf("xptioctl: put \"device pass0\" in "
1266				       "your kernel config file\n");
1267			}
1268		}
1269		splx(s);
1270		break;
1271		}
1272	default:
1273		error = ENOTTY;
1274		break;
1275	}
1276
1277	return(error);
1278}
1279
1280static int
1281cam_module_event_handler(module_t mod, int what, void *arg)
1282{
1283	if (what == MOD_LOAD) {
1284		xpt_init(NULL);
1285	} else if (what == MOD_UNLOAD) {
1286		return EBUSY;
1287	}
1288
1289	return 0;
1290}
1291
1292/* Functions accessed by the peripheral drivers */
1293static void
1294xpt_init(dummy)
1295	void *dummy;
1296{
1297	struct cam_sim *xpt_sim;
1298	struct cam_path *path;
1299	struct cam_devq *devq;
1300	cam_status status;
1301
1302	TAILQ_INIT(&xpt_busses);
1303	TAILQ_INIT(&cam_bioq);
1304	TAILQ_INIT(&cam_netq);
1305	SLIST_INIT(&ccb_freeq);
1306	STAILQ_INIT(&highpowerq);
1307
1308	/*
1309	 * The xpt layer is, itself, the equivelent of a SIM.
1310	 * Allow 16 ccbs in the ccb pool for it.  This should
1311	 * give decent parallelism when we probe busses and
1312	 * perform other XPT functions.
1313	 */
1314	devq = cam_simq_alloc(16);
1315	xpt_sim = cam_sim_alloc(xptaction,
1316				xptpoll,
1317				"xpt",
1318				/*softc*/NULL,
1319				/*unit*/0,
1320				/*max_dev_transactions*/0,
1321				/*max_tagged_dev_transactions*/0,
1322				devq);
1323	xpt_max_ccbs = 16;
1324
1325	xpt_bus_register(xpt_sim, /*bus #*/0);
1326
1327	/*
1328	 * Looking at the XPT from the SIM layer, the XPT is
1329	 * the equivelent of a peripheral driver.  Allocate
1330	 * a peripheral driver entry for us.
1331	 */
1332	if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
1333				      CAM_TARGET_WILDCARD,
1334				      CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
1335		printf("xpt_init: xpt_create_path failed with status %#x,"
1336		       " failing attach\n", status);
1337		return;
1338	}
1339
1340	cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
1341			 path, NULL, 0, NULL);
1342	xpt_free_path(path);
1343
1344	xpt_sim->softc = xpt_periph;
1345
1346	/*
1347	 * Register a callback for when interrupts are enabled.
1348	 */
1349	xpt_config_hook =
1350	    (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook),
1351					      M_TEMP, M_NOWAIT);
1352	if (xpt_config_hook == NULL) {
1353		printf("xpt_init: Cannot malloc config hook "
1354		       "- failing attach\n");
1355		return;
1356	}
1357	bzero(xpt_config_hook, sizeof(*xpt_config_hook));
1358
1359	xpt_config_hook->ich_func = xpt_config;
1360	if (config_intrhook_establish(xpt_config_hook) != 0) {
1361		free (xpt_config_hook, M_TEMP);
1362		printf("xpt_init: config_intrhook_establish failed "
1363		       "- failing attach\n");
1364	}
1365
1366	/* Install our software interrupt handlers */
1367	register_swi(SWI_CAMNET, swi_camnet);
1368	register_swi(SWI_CAMBIO, swi_cambio);
1369}
1370
1371static cam_status
1372xptregister(struct cam_periph *periph, void *arg)
1373{
1374	if (periph == NULL) {
1375		printf("xptregister: periph was NULL!!\n");
1376		return(CAM_REQ_CMP_ERR);
1377	}
1378
1379	periph->softc = NULL;
1380
1381	xpt_periph = periph;
1382
1383	return(CAM_REQ_CMP);
1384}
1385
1386int32_t
1387xpt_add_periph(struct cam_periph *periph)
1388{
1389	struct cam_ed *device;
1390	int32_t	 status;
1391	struct periph_list *periph_head;
1392
1393	device = periph->path->device;
1394
1395	periph_head = &device->periphs;
1396
1397	status = CAM_REQ_CMP;
1398
1399	if (device != NULL) {
1400		int s;
1401
1402		/*
1403		 * Make room for this peripheral
1404		 * so it will fit in the queue
1405		 * when it's scheduled to run
1406		 */
1407		s = splsoftcam();
1408		status = camq_resize(&device->drvq,
1409				     device->drvq.array_size + 1);
1410
1411		device->generation++;
1412
1413		SLIST_INSERT_HEAD(periph_head, periph, periph_links);
1414
1415		splx(s);
1416	}
1417
1418	xsoftc.generation++;
1419
1420	return (status);
1421}
1422
1423void
1424xpt_remove_periph(struct cam_periph *periph)
1425{
1426	struct cam_ed *device;
1427
1428	device = periph->path->device;
1429
1430	if (device != NULL) {
1431		int s;
1432		struct periph_list *periph_head;
1433
1434		periph_head = &device->periphs;
1435
1436		/* Release the slot for this peripheral */
1437		s = splsoftcam();
1438		camq_resize(&device->drvq, device->drvq.array_size - 1);
1439
1440		device->generation++;
1441
1442		SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
1443
1444		splx(s);
1445	}
1446
1447	xsoftc.generation++;
1448
1449}
1450
1451void
1452xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1453{
1454	int s;
1455	u_int mb;
1456	struct cam_path *path;
1457	struct ccb_trans_settings cts;
1458
1459	path = periph->path;
1460	/*
1461	 * To ensure that this is printed in one piece,
1462	 * mask out CAM interrupts.
1463	 */
1464	s = splsoftcam();
1465	printf("%s%d at %s%d bus %d target %d lun %d\n",
1466	       periph->periph_name, periph->unit_number,
1467	       path->bus->sim->sim_name,
1468	       path->bus->sim->unit_number,
1469	       path->bus->sim->bus_id,
1470	       path->target->target_id,
1471	       path->device->lun_id);
1472	printf("%s%d: ", periph->periph_name, periph->unit_number);
1473	scsi_print_inquiry(&path->device->inq_data);
1474	if ((bootverbose)
1475	 && (path->device->serial_num_len > 0)) {
1476		/* Don't wrap the screen  - print only the first 60 chars */
1477		printf("%s%d: Serial Number %.60s\n", periph->periph_name,
1478		       periph->unit_number, path->device->serial_num);
1479	}
1480	xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
1481	cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
1482	cts.flags = CCB_TRANS_CURRENT_SETTINGS;
1483	xpt_action((union ccb*)&cts);
1484	if (cts.ccb_h.status == CAM_REQ_CMP) {
1485		u_int speed;
1486		u_int freq;
1487
1488		if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1489		  && cts.sync_offset != 0) {
1490			freq = scsi_calc_syncsrate(cts.sync_period);
1491			speed = freq;
1492		} else {
1493			struct ccb_pathinq cpi;
1494
1495			/* Ask the SIM for its base transfer speed */
1496			xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
1497			cpi.ccb_h.func_code = XPT_PATH_INQ;
1498			xpt_action((union ccb *)&cpi);
1499
1500			speed = cpi.base_transfer_speed;
1501			freq = 0;
1502		}
1503		if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0)
1504			speed *= (0x01 << cts.bus_width);
1505		mb = speed / 1000;
1506		if (mb > 0)
1507			printf("%s%d: %d.%03dMB/s transfers",
1508			       periph->periph_name, periph->unit_number,
1509			       mb, speed % 1000);
1510		else
1511			printf("%s%d: %dKB/s transfers", periph->periph_name,
1512			       periph->unit_number, speed);
1513		if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1514		 && cts.sync_offset != 0) {
1515			printf(" (%d.%03dMHz, offset %d", freq / 1000,
1516			       freq % 1000, cts.sync_offset);
1517		}
1518		if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0
1519		 && cts.bus_width > 0) {
1520			if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1521			 && cts.sync_offset != 0) {
1522				printf(", ");
1523			} else {
1524				printf(" (");
1525			}
1526			printf("%dbit)", 8 * (0x01 << cts.bus_width));
1527		} else if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1528			&& cts.sync_offset != 0) {
1529			printf(")");
1530		}
1531
1532		if (path->device->inq_flags & SID_CmdQue
1533		 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1534			printf(", Tagged Queueing Enabled");
1535		}
1536
1537		printf("\n");
1538	} else if (path->device->inq_flags & SID_CmdQue
1539   		|| path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1540		printf("%s%d: Tagged Queueing Enabled\n",
1541		       periph->periph_name, periph->unit_number);
1542	}
1543
1544	/*
1545	 * We only want to print the caller's announce string if they've
1546	 * passed one in..
1547	 */
1548	if (announce_string != NULL)
1549		printf("%s%d: %s\n", periph->periph_name,
1550		       periph->unit_number, announce_string);
1551	splx(s);
1552}
1553
1554
1555static dev_match_ret
1556xptbusmatch(struct dev_match_pattern *patterns, int num_patterns,
1557	    struct cam_eb *bus)
1558{
1559	dev_match_ret retval;
1560	int i;
1561
1562	retval = DM_RET_NONE;
1563
1564	/*
1565	 * If we aren't given something to match against, that's an error.
1566	 */
1567	if (bus == NULL)
1568		return(DM_RET_ERROR);
1569
1570	/*
1571	 * If there are no match entries, then this bus matches no
1572	 * matter what.
1573	 */
1574	if ((patterns == NULL) || (num_patterns == 0))
1575		return(DM_RET_DESCEND | DM_RET_COPY);
1576
1577	for (i = 0; i < num_patterns; i++) {
1578		struct bus_match_pattern *cur_pattern;
1579
1580		/*
1581		 * If the pattern in question isn't for a bus node, we
1582		 * aren't interested.  However, we do indicate to the
1583		 * calling routine that we should continue descending the
1584		 * tree, since the user wants to match against lower-level
1585		 * EDT elements.
1586		 */
1587		if (patterns[i].type != DEV_MATCH_BUS) {
1588			if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1589				retval |= DM_RET_DESCEND;
1590			continue;
1591		}
1592
1593		cur_pattern = &patterns[i].pattern.bus_pattern;
1594
1595		/*
1596		 * If they want to match any bus node, we give them any
1597		 * device node.
1598		 */
1599		if (cur_pattern->flags == BUS_MATCH_ANY) {
1600			/* set the copy flag */
1601			retval |= DM_RET_COPY;
1602
1603			/*
1604			 * If we've already decided on an action, go ahead
1605			 * and return.
1606			 */
1607			if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1608				return(retval);
1609		}
1610
1611		/*
1612		 * Not sure why someone would do this...
1613		 */
1614		if (cur_pattern->flags == BUS_MATCH_NONE)
1615			continue;
1616
1617		if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
1618		 && (cur_pattern->path_id != bus->path_id))
1619			continue;
1620
1621		if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
1622		 && (cur_pattern->bus_id != bus->sim->bus_id))
1623			continue;
1624
1625		if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
1626		 && (cur_pattern->unit_number != bus->sim->unit_number))
1627			continue;
1628
1629		if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
1630		 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
1631			     DEV_IDLEN) != 0))
1632			continue;
1633
1634		/*
1635		 * If we get to this point, the user definitely wants
1636		 * information on this bus.  So tell the caller to copy the
1637		 * data out.
1638		 */
1639		retval |= DM_RET_COPY;
1640
1641		/*
1642		 * If the return action has been set to descend, then we
1643		 * know that we've already seen a non-bus matching
1644		 * expression, therefore we need to further descend the tree.
1645		 * This won't change by continuing around the loop, so we
1646		 * go ahead and return.  If we haven't seen a non-bus
1647		 * matching expression, we keep going around the loop until
1648		 * we exhaust the matching expressions.  We'll set the stop
1649		 * flag once we fall out of the loop.
1650		 */
1651		if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1652			return(retval);
1653	}
1654
1655	/*
1656	 * If the return action hasn't been set to descend yet, that means
1657	 * we haven't seen anything other than bus matching patterns.  So
1658	 * tell the caller to stop descending the tree -- the user doesn't
1659	 * want to match against lower level tree elements.
1660	 */
1661	if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1662		retval |= DM_RET_STOP;
1663
1664	return(retval);
1665}
1666
1667static dev_match_ret
1668xptdevicematch(struct dev_match_pattern *patterns, int num_patterns,
1669	       struct cam_ed *device)
1670{
1671	dev_match_ret retval;
1672	int i;
1673
1674	retval = DM_RET_NONE;
1675
1676	/*
1677	 * If we aren't given something to match against, that's an error.
1678	 */
1679	if (device == NULL)
1680		return(DM_RET_ERROR);
1681
1682	/*
1683	 * If there are no match entries, then this device matches no
1684	 * matter what.
1685	 */
1686	if ((patterns == NULL) || (patterns == 0))
1687		return(DM_RET_DESCEND | DM_RET_COPY);
1688
1689	for (i = 0; i < num_patterns; i++) {
1690		struct device_match_pattern *cur_pattern;
1691
1692		/*
1693		 * If the pattern in question isn't for a device node, we
1694		 * aren't interested.
1695		 */
1696		if (patterns[i].type != DEV_MATCH_DEVICE) {
1697			if ((patterns[i].type == DEV_MATCH_PERIPH)
1698			 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
1699				retval |= DM_RET_DESCEND;
1700			continue;
1701		}
1702
1703		cur_pattern = &patterns[i].pattern.device_pattern;
1704
1705		/*
1706		 * If they want to match any device node, we give them any
1707		 * device node.
1708		 */
1709		if (cur_pattern->flags == DEV_MATCH_ANY) {
1710			/* set the copy flag */
1711			retval |= DM_RET_COPY;
1712
1713
1714			/*
1715			 * If we've already decided on an action, go ahead
1716			 * and return.
1717			 */
1718			if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1719				return(retval);
1720		}
1721
1722		/*
1723		 * Not sure why someone would do this...
1724		 */
1725		if (cur_pattern->flags == DEV_MATCH_NONE)
1726			continue;
1727
1728		if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
1729		 && (cur_pattern->path_id != device->target->bus->path_id))
1730			continue;
1731
1732		if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
1733		 && (cur_pattern->target_id != device->target->target_id))
1734			continue;
1735
1736		if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
1737		 && (cur_pattern->target_lun != device->lun_id))
1738			continue;
1739
1740		if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
1741		 && (cam_quirkmatch((caddr_t)&device->inq_data,
1742				    (caddr_t)&cur_pattern->inq_pat,
1743				    1, sizeof(cur_pattern->inq_pat),
1744				    scsi_static_inquiry_match) == NULL))
1745			continue;
1746
1747		/*
1748		 * If we get to this point, the user definitely wants
1749		 * information on this device.  So tell the caller to copy
1750		 * the data out.
1751		 */
1752		retval |= DM_RET_COPY;
1753
1754		/*
1755		 * If the return action has been set to descend, then we
1756		 * know that we've already seen a peripheral matching
1757		 * expression, therefore we need to further descend the tree.
1758		 * This won't change by continuing around the loop, so we
1759		 * go ahead and return.  If we haven't seen a peripheral
1760		 * matching expression, we keep going around the loop until
1761		 * we exhaust the matching expressions.  We'll set the stop
1762		 * flag once we fall out of the loop.
1763		 */
1764		if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1765			return(retval);
1766	}
1767
1768	/*
1769	 * If the return action hasn't been set to descend yet, that means
1770	 * we haven't seen any peripheral matching patterns.  So tell the
1771	 * caller to stop descending the tree -- the user doesn't want to
1772	 * match against lower level tree elements.
1773	 */
1774	if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1775		retval |= DM_RET_STOP;
1776
1777	return(retval);
1778}
1779
1780/*
1781 * Match a single peripheral against any number of match patterns.
1782 */
1783static dev_match_ret
1784xptperiphmatch(struct dev_match_pattern *patterns, int num_patterns,
1785	       struct cam_periph *periph)
1786{
1787	dev_match_ret retval;
1788	int i;
1789
1790	/*
1791	 * If we aren't given something to match against, that's an error.
1792	 */
1793	if (periph == NULL)
1794		return(DM_RET_ERROR);
1795
1796	/*
1797	 * If there are no match entries, then this peripheral matches no
1798	 * matter what.
1799	 */
1800	if ((patterns == NULL) || (num_patterns == 0))
1801		return(DM_RET_STOP | DM_RET_COPY);
1802
1803	/*
1804	 * There aren't any nodes below a peripheral node, so there's no
1805	 * reason to descend the tree any further.
1806	 */
1807	retval = DM_RET_STOP;
1808
1809	for (i = 0; i < num_patterns; i++) {
1810		struct periph_match_pattern *cur_pattern;
1811
1812		/*
1813		 * If the pattern in question isn't for a peripheral, we
1814		 * aren't interested.
1815		 */
1816		if (patterns[i].type != DEV_MATCH_PERIPH)
1817			continue;
1818
1819		cur_pattern = &patterns[i].pattern.periph_pattern;
1820
1821		/*
1822		 * If they want to match on anything, then we will do so.
1823		 */
1824		if (cur_pattern->flags == PERIPH_MATCH_ANY) {
1825			/* set the copy flag */
1826			retval |= DM_RET_COPY;
1827
1828			/*
1829			 * We've already set the return action to stop,
1830			 * since there are no nodes below peripherals in
1831			 * the tree.
1832			 */
1833			return(retval);
1834		}
1835
1836		/*
1837		 * Not sure why someone would do this...
1838		 */
1839		if (cur_pattern->flags == PERIPH_MATCH_NONE)
1840			continue;
1841
1842		if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
1843		 && (cur_pattern->path_id != periph->path->bus->path_id))
1844			continue;
1845
1846		/*
1847		 * For the target and lun id's, we have to make sure the
1848		 * target and lun pointers aren't NULL.  The xpt peripheral
1849		 * has a wildcard target and device.
1850		 */
1851		if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
1852		 && ((periph->path->target == NULL)
1853		 ||(cur_pattern->target_id != periph->path->target->target_id)))
1854			continue;
1855
1856		if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
1857		 && ((periph->path->device == NULL)
1858		 || (cur_pattern->target_lun != periph->path->device->lun_id)))
1859			continue;
1860
1861		if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
1862		 && (cur_pattern->unit_number != periph->unit_number))
1863			continue;
1864
1865		if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
1866		 && (strncmp(cur_pattern->periph_name, periph->periph_name,
1867			     DEV_IDLEN) != 0))
1868			continue;
1869
1870		/*
1871		 * If we get to this point, the user definitely wants
1872		 * information on this peripheral.  So tell the caller to
1873		 * copy the data out.
1874		 */
1875		retval |= DM_RET_COPY;
1876
1877		/*
1878		 * The return action has already been set to stop, since
1879		 * peripherals don't have any nodes below them in the EDT.
1880		 */
1881		return(retval);
1882	}
1883
1884	/*
1885	 * If we get to this point, the peripheral that was passed in
1886	 * doesn't match any of the patterns.
1887	 */
1888	return(retval);
1889}
1890
1891static int
1892xptedtbusfunc(struct cam_eb *bus, void *arg)
1893{
1894	struct ccb_dev_match *cdm;
1895	dev_match_ret retval;
1896
1897	cdm = (struct ccb_dev_match *)arg;
1898
1899	/*
1900	 * If our position is for something deeper in the tree, that means
1901	 * that we've already seen this node.  So, we keep going down.
1902	 */
1903	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1904	 && (cdm->pos.cookie.bus == bus)
1905	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1906	 && (cdm->pos.cookie.target != NULL))
1907		retval = DM_RET_DESCEND;
1908	else
1909		retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
1910
1911	/*
1912	 * If we got an error, bail out of the search.
1913	 */
1914	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1915		cdm->status = CAM_DEV_MATCH_ERROR;
1916		return(0);
1917	}
1918
1919	/*
1920	 * If the copy flag is set, copy this bus out.
1921	 */
1922	if (retval & DM_RET_COPY) {
1923		int spaceleft, j;
1924
1925		spaceleft = cdm->match_buf_len - (cdm->num_matches *
1926			sizeof(struct dev_match_result));
1927
1928		/*
1929		 * If we don't have enough space to put in another
1930		 * match result, save our position and tell the
1931		 * user there are more devices to check.
1932		 */
1933		if (spaceleft < sizeof(struct dev_match_result)) {
1934			bzero(&cdm->pos, sizeof(cdm->pos));
1935			cdm->pos.position_type =
1936				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
1937
1938			cdm->pos.cookie.bus = bus;
1939			cdm->pos.generations[CAM_BUS_GENERATION]=
1940				bus_generation;
1941			cdm->status = CAM_DEV_MATCH_MORE;
1942			return(0);
1943		}
1944		j = cdm->num_matches;
1945		cdm->num_matches++;
1946		cdm->matches[j].type = DEV_MATCH_BUS;
1947		cdm->matches[j].result.bus_result.path_id = bus->path_id;
1948		cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
1949		cdm->matches[j].result.bus_result.unit_number =
1950			bus->sim->unit_number;
1951		strncpy(cdm->matches[j].result.bus_result.dev_name,
1952			bus->sim->sim_name, DEV_IDLEN);
1953	}
1954
1955	/*
1956	 * If the user is only interested in busses, there's no
1957	 * reason to descend to the next level in the tree.
1958	 */
1959	if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
1960		return(1);
1961
1962	/*
1963	 * If there is a target generation recorded, check it to
1964	 * make sure the target list hasn't changed.
1965	 */
1966	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1967	 && (bus == cdm->pos.cookie.bus)
1968	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1969	 && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
1970	 && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
1971	     bus->generation)) {
1972		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1973		return(0);
1974	}
1975
1976	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1977	 && (cdm->pos.cookie.bus == bus)
1978	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1979	 && (cdm->pos.cookie.target != NULL))
1980		return(xpttargettraverse(bus,
1981					(struct cam_et *)cdm->pos.cookie.target,
1982					 xptedttargetfunc, arg));
1983	else
1984		return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
1985}
1986
1987static int
1988xptedttargetfunc(struct cam_et *target, void *arg)
1989{
1990	struct ccb_dev_match *cdm;
1991
1992	cdm = (struct ccb_dev_match *)arg;
1993
1994	/*
1995	 * If there is a device list generation recorded, check it to
1996	 * make sure the device list hasn't changed.
1997	 */
1998	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1999	 && (cdm->pos.cookie.bus == target->bus)
2000	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2001	 && (cdm->pos.cookie.target == target)
2002	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2003	 && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
2004	 && (cdm->pos.generations[CAM_DEV_GENERATION] !=
2005	     target->generation)) {
2006		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2007		return(0);
2008	}
2009
2010	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2011	 && (cdm->pos.cookie.bus == target->bus)
2012	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2013	 && (cdm->pos.cookie.target == target)
2014	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2015	 && (cdm->pos.cookie.device != NULL))
2016		return(xptdevicetraverse(target,
2017					(struct cam_ed *)cdm->pos.cookie.device,
2018					 xptedtdevicefunc, arg));
2019	else
2020		return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
2021}
2022
2023static int
2024xptedtdevicefunc(struct cam_ed *device, void *arg)
2025{
2026
2027	struct ccb_dev_match *cdm;
2028	dev_match_ret retval;
2029
2030	cdm = (struct ccb_dev_match *)arg;
2031
2032	/*
2033	 * If our position is for something deeper in the tree, that means
2034	 * that we've already seen this node.  So, we keep going down.
2035	 */
2036	if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2037	 && (cdm->pos.cookie.device == device)
2038	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2039	 && (cdm->pos.cookie.periph != NULL))
2040		retval = DM_RET_DESCEND;
2041	else
2042		retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
2043					device);
2044
2045	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2046		cdm->status = CAM_DEV_MATCH_ERROR;
2047		return(0);
2048	}
2049
2050	/*
2051	 * If the copy flag is set, copy this device out.
2052	 */
2053	if (retval & DM_RET_COPY) {
2054		int spaceleft, j;
2055
2056		spaceleft = cdm->match_buf_len - (cdm->num_matches *
2057			sizeof(struct dev_match_result));
2058
2059		/*
2060		 * If we don't have enough space to put in another
2061		 * match result, save our position and tell the
2062		 * user there are more devices to check.
2063		 */
2064		if (spaceleft < sizeof(struct dev_match_result)) {
2065			bzero(&cdm->pos, sizeof(cdm->pos));
2066			cdm->pos.position_type =
2067				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2068				CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
2069
2070			cdm->pos.cookie.bus = device->target->bus;
2071			cdm->pos.generations[CAM_BUS_GENERATION]=
2072				bus_generation;
2073			cdm->pos.cookie.target = device->target;
2074			cdm->pos.generations[CAM_TARGET_GENERATION] =
2075				device->target->bus->generation;
2076			cdm->pos.cookie.device = device;
2077			cdm->pos.generations[CAM_DEV_GENERATION] =
2078				device->target->generation;
2079			cdm->status = CAM_DEV_MATCH_MORE;
2080			return(0);
2081		}
2082		j = cdm->num_matches;
2083		cdm->num_matches++;
2084		cdm->matches[j].type = DEV_MATCH_DEVICE;
2085		cdm->matches[j].result.device_result.path_id =
2086			device->target->bus->path_id;
2087		cdm->matches[j].result.device_result.target_id =
2088			device->target->target_id;
2089		cdm->matches[j].result.device_result.target_lun =
2090			device->lun_id;
2091		bcopy(&device->inq_data,
2092		      &cdm->matches[j].result.device_result.inq_data,
2093		      sizeof(struct scsi_inquiry_data));
2094
2095		/* Let the user know whether this device is unconfigured */
2096		if (device->flags & CAM_DEV_UNCONFIGURED)
2097			cdm->matches[j].result.device_result.flags =
2098				DEV_RESULT_UNCONFIGURED;
2099		else
2100			cdm->matches[j].result.device_result.flags =
2101				DEV_RESULT_NOFLAG;
2102	}
2103
2104	/*
2105	 * If the user isn't interested in peripherals, don't descend
2106	 * the tree any further.
2107	 */
2108	if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2109		return(1);
2110
2111	/*
2112	 * If there is a peripheral list generation recorded, make sure
2113	 * it hasn't changed.
2114	 */
2115	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2116	 && (device->target->bus == cdm->pos.cookie.bus)
2117	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2118	 && (device->target == cdm->pos.cookie.target)
2119	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2120	 && (device == cdm->pos.cookie.device)
2121	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2122	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2123	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2124	     device->generation)){
2125		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2126		return(0);
2127	}
2128
2129	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2130	 && (cdm->pos.cookie.bus == device->target->bus)
2131	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2132	 && (cdm->pos.cookie.target == device->target)
2133	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2134	 && (cdm->pos.cookie.device == device)
2135	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2136	 && (cdm->pos.cookie.periph != NULL))
2137		return(xptperiphtraverse(device,
2138				(struct cam_periph *)cdm->pos.cookie.periph,
2139				xptedtperiphfunc, arg));
2140	else
2141		return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
2142}
2143
2144static int
2145xptedtperiphfunc(struct cam_periph *periph, void *arg)
2146{
2147	struct ccb_dev_match *cdm;
2148	dev_match_ret retval;
2149
2150	cdm = (struct ccb_dev_match *)arg;
2151
2152	retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2153
2154	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2155		cdm->status = CAM_DEV_MATCH_ERROR;
2156		return(0);
2157	}
2158
2159	/*
2160	 * If the copy flag is set, copy this peripheral out.
2161	 */
2162	if (retval & DM_RET_COPY) {
2163		int spaceleft, j;
2164
2165		spaceleft = cdm->match_buf_len - (cdm->num_matches *
2166			sizeof(struct dev_match_result));
2167
2168		/*
2169		 * If we don't have enough space to put in another
2170		 * match result, save our position and tell the
2171		 * user there are more devices to check.
2172		 */
2173		if (spaceleft < sizeof(struct dev_match_result)) {
2174			bzero(&cdm->pos, sizeof(cdm->pos));
2175			cdm->pos.position_type =
2176				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2177				CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
2178				CAM_DEV_POS_PERIPH;
2179
2180			cdm->pos.cookie.bus = periph->path->bus;
2181			cdm->pos.generations[CAM_BUS_GENERATION]=
2182				bus_generation;
2183			cdm->pos.cookie.target = periph->path->target;
2184			cdm->pos.generations[CAM_TARGET_GENERATION] =
2185				periph->path->bus->generation;
2186			cdm->pos.cookie.device = periph->path->device;
2187			cdm->pos.generations[CAM_DEV_GENERATION] =
2188				periph->path->target->generation;
2189			cdm->pos.cookie.periph = periph;
2190			cdm->pos.generations[CAM_PERIPH_GENERATION] =
2191				periph->path->device->generation;
2192			cdm->status = CAM_DEV_MATCH_MORE;
2193			return(0);
2194		}
2195
2196		j = cdm->num_matches;
2197		cdm->num_matches++;
2198		cdm->matches[j].type = DEV_MATCH_PERIPH;
2199		cdm->matches[j].result.periph_result.path_id =
2200			periph->path->bus->path_id;
2201		cdm->matches[j].result.periph_result.target_id =
2202			periph->path->target->target_id;
2203		cdm->matches[j].result.periph_result.target_lun =
2204			periph->path->device->lun_id;
2205		cdm->matches[j].result.periph_result.unit_number =
2206			periph->unit_number;
2207		strncpy(cdm->matches[j].result.periph_result.periph_name,
2208			periph->periph_name, DEV_IDLEN);
2209	}
2210
2211	return(1);
2212}
2213
2214static int
2215xptedtmatch(struct ccb_dev_match *cdm)
2216{
2217	int ret;
2218
2219	cdm->num_matches = 0;
2220
2221	/*
2222	 * Check the bus list generation.  If it has changed, the user
2223	 * needs to reset everything and start over.
2224	 */
2225	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2226	 && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
2227	 && (cdm->pos.generations[CAM_BUS_GENERATION] != bus_generation)) {
2228		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2229		return(0);
2230	}
2231
2232	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2233	 && (cdm->pos.cookie.bus != NULL))
2234		ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
2235				     xptedtbusfunc, cdm);
2236	else
2237		ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
2238
2239	/*
2240	 * If we get back 0, that means that we had to stop before fully
2241	 * traversing the EDT.  It also means that one of the subroutines
2242	 * has set the status field to the proper value.  If we get back 1,
2243	 * we've fully traversed the EDT and copied out any matching entries.
2244	 */
2245	if (ret == 1)
2246		cdm->status = CAM_DEV_MATCH_LAST;
2247
2248	return(ret);
2249}
2250
2251static int
2252xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
2253{
2254	struct ccb_dev_match *cdm;
2255
2256	cdm = (struct ccb_dev_match *)arg;
2257
2258	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2259	 && (cdm->pos.cookie.pdrv == pdrv)
2260	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2261	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2262	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2263	     (*pdrv)->generation)) {
2264		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2265		return(0);
2266	}
2267
2268	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2269	 && (cdm->pos.cookie.pdrv == pdrv)
2270	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2271	 && (cdm->pos.cookie.periph != NULL))
2272		return(xptpdperiphtraverse(pdrv,
2273				(struct cam_periph *)cdm->pos.cookie.periph,
2274				xptplistperiphfunc, arg));
2275	else
2276		return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
2277}
2278
2279static int
2280xptplistperiphfunc(struct cam_periph *periph, void *arg)
2281{
2282	struct ccb_dev_match *cdm;
2283	dev_match_ret retval;
2284
2285	cdm = (struct ccb_dev_match *)arg;
2286
2287	retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2288
2289	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2290		cdm->status = CAM_DEV_MATCH_ERROR;
2291		return(0);
2292	}
2293
2294	/*
2295	 * If the copy flag is set, copy this peripheral out.
2296	 */
2297	if (retval & DM_RET_COPY) {
2298		int spaceleft, j;
2299
2300		spaceleft = cdm->match_buf_len - (cdm->num_matches *
2301			sizeof(struct dev_match_result));
2302
2303		/*
2304		 * If we don't have enough space to put in another
2305		 * match result, save our position and tell the
2306		 * user there are more devices to check.
2307		 */
2308		if (spaceleft < sizeof(struct dev_match_result)) {
2309			struct periph_driver **pdrv;
2310
2311			pdrv = NULL;
2312			bzero(&cdm->pos, sizeof(cdm->pos));
2313			cdm->pos.position_type =
2314				CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
2315				CAM_DEV_POS_PERIPH;
2316
2317			/*
2318			 * This may look a bit non-sensical, but it is
2319			 * actually quite logical.  There are very few
2320			 * peripheral drivers, and bloating every peripheral
2321			 * structure with a pointer back to its parent
2322			 * peripheral driver linker set entry would cost
2323			 * more in the long run than doing this quick lookup.
2324			 */
2325			for (pdrv =
2326			     (struct periph_driver **)periphdriver_set.ls_items;
2327			     *pdrv != NULL; pdrv++) {
2328				if (strcmp((*pdrv)->driver_name,
2329				    periph->periph_name) == 0)
2330					break;
2331			}
2332
2333			if (pdrv == NULL) {
2334				cdm->status = CAM_DEV_MATCH_ERROR;
2335				return(0);
2336			}
2337
2338			cdm->pos.cookie.pdrv = pdrv;
2339			/*
2340			 * The periph generation slot does double duty, as
2341			 * does the periph pointer slot.  They are used for
2342			 * both edt and pdrv lookups and positioning.
2343			 */
2344			cdm->pos.cookie.periph = periph;
2345			cdm->pos.generations[CAM_PERIPH_GENERATION] =
2346				(*pdrv)->generation;
2347			cdm->status = CAM_DEV_MATCH_MORE;
2348			return(0);
2349		}
2350
2351		j = cdm->num_matches;
2352		cdm->num_matches++;
2353		cdm->matches[j].type = DEV_MATCH_PERIPH;
2354		cdm->matches[j].result.periph_result.path_id =
2355			periph->path->bus->path_id;
2356
2357		/*
2358		 * The transport layer peripheral doesn't have a target or
2359		 * lun.
2360		 */
2361		if (periph->path->target)
2362			cdm->matches[j].result.periph_result.target_id =
2363				periph->path->target->target_id;
2364		else
2365			cdm->matches[j].result.periph_result.target_id = -1;
2366
2367		if (periph->path->device)
2368			cdm->matches[j].result.periph_result.target_lun =
2369				periph->path->device->lun_id;
2370		else
2371			cdm->matches[j].result.periph_result.target_lun = -1;
2372
2373		cdm->matches[j].result.periph_result.unit_number =
2374			periph->unit_number;
2375		strncpy(cdm->matches[j].result.periph_result.periph_name,
2376			periph->periph_name, DEV_IDLEN);
2377	}
2378
2379	return(1);
2380}
2381
2382static int
2383xptperiphlistmatch(struct ccb_dev_match *cdm)
2384{
2385	int ret;
2386
2387	cdm->num_matches = 0;
2388
2389	/*
2390	 * At this point in the edt traversal function, we check the bus
2391	 * list generation to make sure that no busses have been added or
2392	 * removed since the user last sent a XPT_DEV_MATCH ccb through.
2393	 * For the peripheral driver list traversal function, however, we
2394	 * don't have to worry about new peripheral driver types coming or
2395	 * going; they're in a linker set, and therefore can't change
2396	 * without a recompile.
2397	 */
2398
2399	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2400	 && (cdm->pos.cookie.pdrv != NULL))
2401		ret = xptpdrvtraverse(
2402				(struct periph_driver **)cdm->pos.cookie.pdrv,
2403				xptplistpdrvfunc, cdm);
2404	else
2405		ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
2406
2407	/*
2408	 * If we get back 0, that means that we had to stop before fully
2409	 * traversing the peripheral driver tree.  It also means that one of
2410	 * the subroutines has set the status field to the proper value.  If
2411	 * we get back 1, we've fully traversed the EDT and copied out any
2412	 * matching entries.
2413	 */
2414	if (ret == 1)
2415		cdm->status = CAM_DEV_MATCH_LAST;
2416
2417	return(ret);
2418}
2419
2420static int
2421xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
2422{
2423	struct cam_eb *bus, *next_bus;
2424	int retval;
2425
2426	retval = 1;
2427
2428	for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xpt_busses));
2429	     bus != NULL;
2430	     bus = next_bus) {
2431		next_bus = TAILQ_NEXT(bus, links);
2432
2433		retval = tr_func(bus, arg);
2434		if (retval == 0)
2435			return(retval);
2436	}
2437
2438	return(retval);
2439}
2440
2441static int
2442xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
2443		  xpt_targetfunc_t *tr_func, void *arg)
2444{
2445	struct cam_et *target, *next_target;
2446	int retval;
2447
2448	retval = 1;
2449	for (target = (start_target ? start_target :
2450		       TAILQ_FIRST(&bus->et_entries));
2451	     target != NULL; target = next_target) {
2452
2453		next_target = TAILQ_NEXT(target, links);
2454
2455		retval = tr_func(target, arg);
2456
2457		if (retval == 0)
2458			return(retval);
2459	}
2460
2461	return(retval);
2462}
2463
2464static int
2465xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
2466		  xpt_devicefunc_t *tr_func, void *arg)
2467{
2468	struct cam_ed *device, *next_device;
2469	int retval;
2470
2471	retval = 1;
2472	for (device = (start_device ? start_device :
2473		       TAILQ_FIRST(&target->ed_entries));
2474	     device != NULL;
2475	     device = next_device) {
2476
2477		next_device = TAILQ_NEXT(device, links);
2478
2479		retval = tr_func(device, arg);
2480
2481		if (retval == 0)
2482			return(retval);
2483	}
2484
2485	return(retval);
2486}
2487
2488static int
2489xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
2490		  xpt_periphfunc_t *tr_func, void *arg)
2491{
2492	struct cam_periph *periph, *next_periph;
2493	int retval;
2494
2495	retval = 1;
2496
2497	for (periph = (start_periph ? start_periph :
2498		       SLIST_FIRST(&device->periphs));
2499	     periph != NULL;
2500	     periph = next_periph) {
2501
2502		next_periph = SLIST_NEXT(periph, periph_links);
2503
2504		retval = tr_func(periph, arg);
2505		if (retval == 0)
2506			return(retval);
2507	}
2508
2509	return(retval);
2510}
2511
2512static int
2513xptpdrvtraverse(struct periph_driver **start_pdrv,
2514		xpt_pdrvfunc_t *tr_func, void *arg)
2515{
2516	struct periph_driver **pdrv;
2517	int retval;
2518
2519	retval = 1;
2520
2521	/*
2522	 * We don't traverse the peripheral driver list like we do the
2523	 * other lists, because it is a linker set, and therefore cannot be
2524	 * changed during runtime.  If the peripheral driver list is ever
2525	 * re-done to be something other than a linker set (i.e. it can
2526	 * change while the system is running), the list traversal should
2527	 * be modified to work like the other traversal functions.
2528	 */
2529	for (pdrv = (start_pdrv ? start_pdrv :
2530	     (struct periph_driver **)periphdriver_set.ls_items);
2531	     *pdrv != NULL; pdrv++) {
2532		retval = tr_func(pdrv, arg);
2533
2534		if (retval == 0)
2535			return(retval);
2536	}
2537
2538	return(retval);
2539}
2540
2541static int
2542xptpdperiphtraverse(struct periph_driver **pdrv,
2543		    struct cam_periph *start_periph,
2544		    xpt_periphfunc_t *tr_func, void *arg)
2545{
2546	struct cam_periph *periph, *next_periph;
2547	int retval;
2548
2549	retval = 1;
2550
2551	for (periph = (start_periph ? start_periph :
2552	     TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
2553	     periph = next_periph) {
2554
2555		next_periph = TAILQ_NEXT(periph, unit_links);
2556
2557		retval = tr_func(periph, arg);
2558		if (retval == 0)
2559			return(retval);
2560	}
2561	return(retval);
2562}
2563
2564static int
2565xptdefbusfunc(struct cam_eb *bus, void *arg)
2566{
2567	struct xpt_traverse_config *tr_config;
2568
2569	tr_config = (struct xpt_traverse_config *)arg;
2570
2571	if (tr_config->depth == XPT_DEPTH_BUS) {
2572		xpt_busfunc_t *tr_func;
2573
2574		tr_func = (xpt_busfunc_t *)tr_config->tr_func;
2575
2576		return(tr_func(bus, tr_config->tr_arg));
2577	} else
2578		return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
2579}
2580
2581static int
2582xptdeftargetfunc(struct cam_et *target, void *arg)
2583{
2584	struct xpt_traverse_config *tr_config;
2585
2586	tr_config = (struct xpt_traverse_config *)arg;
2587
2588	if (tr_config->depth == XPT_DEPTH_TARGET) {
2589		xpt_targetfunc_t *tr_func;
2590
2591		tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
2592
2593		return(tr_func(target, tr_config->tr_arg));
2594	} else
2595		return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
2596}
2597
2598static int
2599xptdefdevicefunc(struct cam_ed *device, void *arg)
2600{
2601	struct xpt_traverse_config *tr_config;
2602
2603	tr_config = (struct xpt_traverse_config *)arg;
2604
2605	if (tr_config->depth == XPT_DEPTH_DEVICE) {
2606		xpt_devicefunc_t *tr_func;
2607
2608		tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
2609
2610		return(tr_func(device, tr_config->tr_arg));
2611	} else
2612		return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
2613}
2614
2615static int
2616xptdefperiphfunc(struct cam_periph *periph, void *arg)
2617{
2618	struct xpt_traverse_config *tr_config;
2619	xpt_periphfunc_t *tr_func;
2620
2621	tr_config = (struct xpt_traverse_config *)arg;
2622
2623	tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
2624
2625	/*
2626	 * Unlike the other default functions, we don't check for depth
2627	 * here.  The peripheral driver level is the last level in the EDT,
2628	 * so if we're here, we should execute the function in question.
2629	 */
2630	return(tr_func(periph, tr_config->tr_arg));
2631}
2632
2633/*
2634 * Execute the given function for every bus in the EDT.
2635 */
2636static int
2637xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
2638{
2639	struct xpt_traverse_config tr_config;
2640
2641	tr_config.depth = XPT_DEPTH_BUS;
2642	tr_config.tr_func = tr_func;
2643	tr_config.tr_arg = arg;
2644
2645	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2646}
2647
2648#ifdef notusedyet
2649/*
2650 * Execute the given function for every target in the EDT.
2651 */
2652static int
2653xpt_for_all_targets(xpt_targetfunc_t *tr_func, void *arg)
2654{
2655	struct xpt_traverse_config tr_config;
2656
2657	tr_config.depth = XPT_DEPTH_TARGET;
2658	tr_config.tr_func = tr_func;
2659	tr_config.tr_arg = arg;
2660
2661	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2662}
2663#endif /* notusedyet */
2664
2665/*
2666 * Execute the given function for every device in the EDT.
2667 */
2668static int
2669xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
2670{
2671	struct xpt_traverse_config tr_config;
2672
2673	tr_config.depth = XPT_DEPTH_DEVICE;
2674	tr_config.tr_func = tr_func;
2675	tr_config.tr_arg = arg;
2676
2677	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2678}
2679
2680#ifdef notusedyet
2681/*
2682 * Execute the given function for every peripheral in the EDT.
2683 */
2684static int
2685xpt_for_all_periphs(xpt_periphfunc_t *tr_func, void *arg)
2686{
2687	struct xpt_traverse_config tr_config;
2688
2689	tr_config.depth = XPT_DEPTH_PERIPH;
2690	tr_config.tr_func = tr_func;
2691	tr_config.tr_arg = arg;
2692
2693	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2694}
2695#endif /* notusedyet */
2696
2697static int
2698xptsetasyncfunc(struct cam_ed *device, void *arg)
2699{
2700	struct cam_path path;
2701	struct ccb_getdev cgd;
2702	struct async_node *cur_entry;
2703
2704	cur_entry = (struct async_node *)arg;
2705
2706	/*
2707	 * Don't report unconfigured devices (Wildcard devs,
2708	 * devices only for target mode, device instances
2709	 * that have been invalidated but are waiting for
2710	 * their last reference count to be released).
2711	 */
2712	if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
2713		return (1);
2714
2715	xpt_compile_path(&path,
2716			 NULL,
2717			 device->target->bus->path_id,
2718			 device->target->target_id,
2719			 device->lun_id);
2720	xpt_setup_ccb(&cgd.ccb_h, &path, /*priority*/1);
2721	cgd.ccb_h.func_code = XPT_GDEV_TYPE;
2722	xpt_action((union ccb *)&cgd);
2723	cur_entry->callback(cur_entry->callback_arg,
2724			    AC_FOUND_DEVICE,
2725			    &path, &cgd);
2726	xpt_release_path(&path);
2727
2728	return(1);
2729}
2730
2731static int
2732xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
2733{
2734	struct cam_path path;
2735	struct ccb_pathinq cpi;
2736	struct async_node *cur_entry;
2737
2738	cur_entry = (struct async_node *)arg;
2739
2740	xpt_compile_path(&path, /*periph*/NULL,
2741			 bus->sim->path_id,
2742			 CAM_TARGET_WILDCARD,
2743			 CAM_LUN_WILDCARD);
2744	xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
2745	cpi.ccb_h.func_code = XPT_PATH_INQ;
2746	xpt_action((union ccb *)&cpi);
2747	cur_entry->callback(cur_entry->callback_arg,
2748			    AC_PATH_REGISTERED,
2749			    &path, &cpi);
2750	xpt_release_path(&path);
2751
2752	return(1);
2753}
2754
2755void
2756xpt_action(union ccb *start_ccb)
2757{
2758	int iopl;
2759
2760	CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
2761
2762	start_ccb->ccb_h.status = CAM_REQ_INPROG;
2763
2764	iopl = splsoftcam();
2765	switch (start_ccb->ccb_h.func_code) {
2766	case XPT_SCSI_IO:
2767	{
2768#ifdef CAMDEBUG
2769		char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
2770		struct cam_path *path;
2771
2772		path = start_ccb->ccb_h.path;
2773#endif
2774
2775		/*
2776		 * For the sake of compatibility with SCSI-1
2777		 * devices that may not understand the identify
2778		 * message, we include lun information in the
2779		 * second byte of all commands.  SCSI-1 specifies
2780		 * that luns are a 3 bit value and reserves only 3
2781		 * bits for lun information in the CDB.  Later
2782		 * revisions of the SCSI spec allow for more than 8
2783		 * luns, but have deprecated lun information in the
2784		 * CDB.  So, if the lun won't fit, we must omit.
2785		 *
2786		 * Also be aware that during initial probing for devices,
2787		 * the inquiry information is unknown but initialized to 0.
2788		 * This means that this code will be exercised while probing
2789		 * devices with an ANSI revision greater than 2.
2790		 */
2791		if (SID_ANSI_REV(&start_ccb->ccb_h.path->device->inq_data) <= 2
2792		 && start_ccb->ccb_h.target_lun < 8
2793		 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
2794
2795			start_ccb->csio.cdb_io.cdb_bytes[1] |=
2796			    start_ccb->ccb_h.target_lun << 5;
2797		}
2798		start_ccb->csio.scsi_status = SCSI_STATUS_OK;
2799		CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n",
2800			  scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0],
2801			  	       &path->device->inq_data),
2802			  scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes,
2803					  cdb_str, sizeof(cdb_str))));
2804		/* FALLTHROUGH */
2805	}
2806	case XPT_TARGET_IO:
2807	case XPT_CONT_TARGET_IO:
2808		start_ccb->csio.sense_resid = 0;
2809		start_ccb->csio.resid = 0;
2810		/* FALLTHROUGH */
2811	case XPT_RESET_DEV:
2812	case XPT_ENG_EXEC:
2813	{
2814		struct cam_path *path;
2815		int s;
2816		int runq;
2817
2818		path = start_ccb->ccb_h.path;
2819		s = splsoftcam();
2820
2821		cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
2822		if (path->device->qfrozen_cnt == 0)
2823			runq = xpt_schedule_dev_sendq(path->bus, path->device);
2824		else
2825			runq = 0;
2826		splx(s);
2827		if (runq != 0)
2828			xpt_run_dev_sendq(path->bus);
2829		break;
2830	}
2831	case XPT_SET_TRAN_SETTINGS:
2832	{
2833		xpt_set_transfer_settings(&start_ccb->cts,
2834					  start_ccb->ccb_h.path->device,
2835					  /*async_update*/FALSE);
2836		break;
2837	}
2838	case XPT_CALC_GEOMETRY:
2839	{
2840		struct cam_sim *sim;
2841
2842		/* Filter out garbage */
2843		if (start_ccb->ccg.block_size == 0
2844		 || start_ccb->ccg.volume_size == 0) {
2845			start_ccb->ccg.cylinders = 0;
2846			start_ccb->ccg.heads = 0;
2847			start_ccb->ccg.secs_per_track = 0;
2848			start_ccb->ccb_h.status = CAM_REQ_CMP;
2849			break;
2850		}
2851#ifdef PC98
2852		/*
2853		 * In a PC-98 system, geometry translation depens on
2854		 * the "real" device geometry obtained from mode page 4.
2855		 * SCSI geometry translation is performed in the
2856		 * initialization routine of the SCSI BIOS and the result
2857		 * stored in host memory.  If the translation is available
2858		 * in host memory, use it.  If not, rely on the default
2859		 * translation the device driver performs.
2860		 */
2861		if (scsi_da_bios_params(&start_ccb->ccg) != 0) {
2862			start_ccb->ccb_h.status = CAM_REQ_CMP;
2863			break;
2864		}
2865#endif
2866		sim = start_ccb->ccb_h.path->bus->sim;
2867		(*(sim->sim_action))(sim, start_ccb);
2868		break;
2869	}
2870	case XPT_ABORT:
2871	{
2872		union ccb* abort_ccb;
2873		int s;
2874
2875		abort_ccb = start_ccb->cab.abort_ccb;
2876		if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
2877
2878			if (abort_ccb->ccb_h.pinfo.index >= 0) {
2879				struct cam_ccbq *ccbq;
2880
2881				ccbq = &abort_ccb->ccb_h.path->device->ccbq;
2882				cam_ccbq_remove_ccb(ccbq, abort_ccb);
2883				abort_ccb->ccb_h.status =
2884				    CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2885				xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
2886				s = splcam();
2887				xpt_done(abort_ccb);
2888				splx(s);
2889				start_ccb->ccb_h.status = CAM_REQ_CMP;
2890				break;
2891			}
2892			if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
2893			 && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
2894				/*
2895				 * We've caught this ccb en route to
2896				 * the SIM.  Flag it for abort and the
2897				 * SIM will do so just before starting
2898				 * real work on the CCB.
2899				 */
2900				abort_ccb->ccb_h.status =
2901				    CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2902				xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
2903				start_ccb->ccb_h.status = CAM_REQ_CMP;
2904				break;
2905			}
2906		}
2907		if (XPT_FC_IS_QUEUED(abort_ccb)
2908		 && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
2909			/*
2910			 * It's already completed but waiting
2911			 * for our SWI to get to it.
2912			 */
2913			start_ccb->ccb_h.status = CAM_UA_ABORT;
2914			break;
2915		}
2916		/*
2917		 * If we weren't able to take care of the abort request
2918		 * in the XPT, pass the request down to the SIM for processing.
2919		 */
2920		/* FALLTHROUGH */
2921	}
2922	case XPT_ACCEPT_TARGET_IO:
2923	case XPT_EN_LUN:
2924	case XPT_IMMED_NOTIFY:
2925	case XPT_NOTIFY_ACK:
2926	case XPT_GET_TRAN_SETTINGS:
2927	case XPT_RESET_BUS:
2928	{
2929		struct cam_sim *sim;
2930
2931		sim = start_ccb->ccb_h.path->bus->sim;
2932		(*(sim->sim_action))(sim, start_ccb);
2933		break;
2934	}
2935	case XPT_PATH_INQ:
2936	{
2937		struct cam_sim *sim;
2938
2939		sim = start_ccb->ccb_h.path->bus->sim;
2940		(*(sim->sim_action))(sim, start_ccb);
2941		break;
2942	}
2943	case XPT_PATH_STATS:
2944		start_ccb->cpis.last_reset =
2945			start_ccb->ccb_h.path->bus->last_reset;
2946		start_ccb->ccb_h.status = CAM_REQ_CMP;
2947		break;
2948	case XPT_GDEV_TYPE:
2949	{
2950		struct cam_ed *dev;
2951		int s;
2952
2953		dev = start_ccb->ccb_h.path->device;
2954		s = splcam();
2955		if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
2956			start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2957		} else {
2958			struct ccb_getdev *cgd;
2959			struct cam_eb *bus;
2960			struct cam_et *tar;
2961
2962			cgd = &start_ccb->cgd;
2963			bus = cgd->ccb_h.path->bus;
2964			tar = cgd->ccb_h.path->target;
2965			cgd->inq_data = dev->inq_data;
2966			cgd->ccb_h.status = CAM_REQ_CMP;
2967			cgd->serial_num_len = dev->serial_num_len;
2968			if ((dev->serial_num_len > 0)
2969			 && (dev->serial_num != NULL))
2970				bcopy(dev->serial_num, cgd->serial_num,
2971				      dev->serial_num_len);
2972		}
2973		splx(s);
2974		break;
2975	}
2976	case XPT_GDEV_STATS:
2977	{
2978		struct cam_ed *dev;
2979		int s;
2980
2981		dev = start_ccb->ccb_h.path->device;
2982		s = splcam();
2983		if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
2984			start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2985		} else {
2986			struct ccb_getdevstats *cgds;
2987			struct cam_eb *bus;
2988			struct cam_et *tar;
2989
2990			cgds = &start_ccb->cgds;
2991			bus = cgds->ccb_h.path->bus;
2992			tar = cgds->ccb_h.path->target;
2993			cgds->dev_openings = dev->ccbq.dev_openings;
2994			cgds->dev_active = dev->ccbq.dev_active;
2995			cgds->devq_openings = dev->ccbq.devq_openings;
2996			cgds->devq_queued = dev->ccbq.queue.entries;
2997			cgds->held = dev->ccbq.held;
2998			cgds->last_reset = tar->last_reset;
2999			cgds->maxtags = dev->quirk->maxtags;
3000			cgds->mintags = dev->quirk->mintags;
3001			if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
3002				cgds->last_reset = bus->last_reset;
3003			cgds->ccb_h.status = CAM_REQ_CMP;
3004		}
3005		splx(s);
3006		break;
3007	}
3008	case XPT_GDEVLIST:
3009	{
3010		struct cam_periph	*nperiph;
3011		struct periph_list	*periph_head;
3012		struct ccb_getdevlist	*cgdl;
3013		int			i;
3014		int			s;
3015		struct cam_ed		*device;
3016		int			found;
3017
3018
3019		found = 0;
3020
3021		/*
3022		 * Don't want anyone mucking with our data.
3023		 */
3024		s = splcam();
3025		device = start_ccb->ccb_h.path->device;
3026		periph_head = &device->periphs;
3027		cgdl = &start_ccb->cgdl;
3028
3029		/*
3030		 * Check and see if the list has changed since the user
3031		 * last requested a list member.  If so, tell them that the
3032		 * list has changed, and therefore they need to start over
3033		 * from the beginning.
3034		 */
3035		if ((cgdl->index != 0) &&
3036		    (cgdl->generation != device->generation)) {
3037			cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
3038			splx(s);
3039			break;
3040		}
3041
3042		/*
3043		 * Traverse the list of peripherals and attempt to find
3044		 * the requested peripheral.
3045		 */
3046		for (nperiph = periph_head->slh_first, i = 0;
3047		     (nperiph != NULL) && (i <= cgdl->index);
3048		     nperiph = nperiph->periph_links.sle_next, i++) {
3049			if (i == cgdl->index) {
3050				strncpy(cgdl->periph_name,
3051					nperiph->periph_name,
3052					DEV_IDLEN);
3053				cgdl->unit_number = nperiph->unit_number;
3054				found = 1;
3055			}
3056		}
3057		if (found == 0) {
3058			cgdl->status = CAM_GDEVLIST_ERROR;
3059			splx(s);
3060			break;
3061		}
3062
3063		if (nperiph == NULL)
3064			cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
3065		else
3066			cgdl->status = CAM_GDEVLIST_MORE_DEVS;
3067
3068		cgdl->index++;
3069		cgdl->generation = device->generation;
3070
3071		splx(s);
3072		cgdl->ccb_h.status = CAM_REQ_CMP;
3073		break;
3074	}
3075	case XPT_DEV_MATCH:
3076	{
3077		int s;
3078		dev_pos_type position_type;
3079		struct ccb_dev_match *cdm;
3080		int ret;
3081
3082		cdm = &start_ccb->cdm;
3083
3084		/*
3085		 * Prevent EDT changes while we traverse it.
3086		 */
3087		s = splcam();
3088		/*
3089		 * There are two ways of getting at information in the EDT.
3090		 * The first way is via the primary EDT tree.  It starts
3091		 * with a list of busses, then a list of targets on a bus,
3092		 * then devices/luns on a target, and then peripherals on a
3093		 * device/lun.  The "other" way is by the peripheral driver
3094		 * lists.  The peripheral driver lists are organized by
3095		 * peripheral driver.  (obviously)  So it makes sense to
3096		 * use the peripheral driver list if the user is looking
3097		 * for something like "da1", or all "da" devices.  If the
3098		 * user is looking for something on a particular bus/target
3099		 * or lun, it's generally better to go through the EDT tree.
3100		 */
3101
3102		if (cdm->pos.position_type != CAM_DEV_POS_NONE)
3103			position_type = cdm->pos.position_type;
3104		else {
3105			int i;
3106
3107			position_type = CAM_DEV_POS_NONE;
3108
3109			for (i = 0; i < cdm->num_patterns; i++) {
3110				if ((cdm->patterns[i].type == DEV_MATCH_BUS)
3111				 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
3112					position_type = CAM_DEV_POS_EDT;
3113					break;
3114				}
3115			}
3116
3117			if (cdm->num_patterns == 0)
3118				position_type = CAM_DEV_POS_EDT;
3119			else if (position_type == CAM_DEV_POS_NONE)
3120				position_type = CAM_DEV_POS_PDRV;
3121		}
3122
3123		switch(position_type & CAM_DEV_POS_TYPEMASK) {
3124		case CAM_DEV_POS_EDT:
3125			ret = xptedtmatch(cdm);
3126			break;
3127		case CAM_DEV_POS_PDRV:
3128			ret = xptperiphlistmatch(cdm);
3129			break;
3130		default:
3131			cdm->status = CAM_DEV_MATCH_ERROR;
3132			break;
3133		}
3134
3135		splx(s);
3136
3137		if (cdm->status == CAM_DEV_MATCH_ERROR)
3138			start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3139		else
3140			start_ccb->ccb_h.status = CAM_REQ_CMP;
3141
3142		break;
3143	}
3144	case XPT_SASYNC_CB:
3145	{
3146		struct ccb_setasync *csa;
3147		struct async_node *cur_entry;
3148		struct async_list *async_head;
3149		u_int32_t added;
3150		int s;
3151
3152		csa = &start_ccb->csa;
3153		added = csa->event_enable;
3154		async_head = &csa->ccb_h.path->device->asyncs;
3155
3156		/*
3157		 * If there is already an entry for us, simply
3158		 * update it.
3159		 */
3160		s = splcam();
3161		cur_entry = SLIST_FIRST(async_head);
3162		while (cur_entry != NULL) {
3163			if ((cur_entry->callback_arg == csa->callback_arg)
3164			 && (cur_entry->callback == csa->callback))
3165				break;
3166			cur_entry = SLIST_NEXT(cur_entry, links);
3167		}
3168
3169		if (cur_entry != NULL) {
3170		 	/*
3171			 * If the request has no flags set,
3172			 * remove the entry.
3173			 */
3174			added &= ~cur_entry->event_enable;
3175			if (csa->event_enable == 0) {
3176				SLIST_REMOVE(async_head, cur_entry,
3177					     async_node, links);
3178				csa->ccb_h.path->device->refcount--;
3179				free(cur_entry, M_DEVBUF);
3180			} else {
3181				cur_entry->event_enable = csa->event_enable;
3182			}
3183		} else {
3184			cur_entry = malloc(sizeof(*cur_entry), M_DEVBUF,
3185					   M_NOWAIT);
3186			if (cur_entry == NULL) {
3187				splx(s);
3188				csa->ccb_h.status = CAM_RESRC_UNAVAIL;
3189				break;
3190			}
3191			cur_entry->event_enable = csa->event_enable;
3192			cur_entry->callback_arg = csa->callback_arg;
3193			cur_entry->callback = csa->callback;
3194			SLIST_INSERT_HEAD(async_head, cur_entry, links);
3195			csa->ccb_h.path->device->refcount++;
3196		}
3197
3198		if ((added & AC_FOUND_DEVICE) != 0) {
3199			/*
3200			 * Get this peripheral up to date with all
3201			 * the currently existing devices.
3202			 */
3203			xpt_for_all_devices(xptsetasyncfunc, cur_entry);
3204		}
3205		if ((added & AC_PATH_REGISTERED) != 0) {
3206			/*
3207			 * Get this peripheral up to date with all
3208			 * the currently existing busses.
3209			 */
3210			xpt_for_all_busses(xptsetasyncbusfunc, cur_entry);
3211		}
3212		splx(s);
3213		start_ccb->ccb_h.status = CAM_REQ_CMP;
3214		break;
3215	}
3216	case XPT_REL_SIMQ:
3217	{
3218		struct ccb_relsim *crs;
3219		struct cam_ed *dev;
3220		int s;
3221
3222		crs = &start_ccb->crs;
3223		dev = crs->ccb_h.path->device;
3224		if (dev == NULL) {
3225
3226			crs->ccb_h.status = CAM_DEV_NOT_THERE;
3227			break;
3228		}
3229
3230		s = splcam();
3231
3232		if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
3233
3234 			if ((dev->inq_data.flags & SID_CmdQue) != 0) {
3235
3236				/* Don't ever go below one opening */
3237				if (crs->openings > 0) {
3238					xpt_dev_ccbq_resize(crs->ccb_h.path,
3239							    crs->openings);
3240
3241					if (bootverbose) {
3242						xpt_print_path(crs->ccb_h.path);
3243						printf("tagged openings "
3244						       "now %d\n",
3245						       crs->openings);
3246					}
3247				}
3248			}
3249		}
3250
3251		if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
3252
3253			if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
3254
3255				/*
3256				 * Just extend the old timeout and decrement
3257				 * the freeze count so that a single timeout
3258				 * is sufficient for releasing the queue.
3259				 */
3260				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3261				untimeout(xpt_release_devq_timeout,
3262					  dev, dev->c_handle);
3263			} else {
3264
3265				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3266			}
3267
3268			dev->c_handle =
3269				timeout(xpt_release_devq_timeout,
3270					dev,
3271					(crs->release_timeout * hz) / 1000);
3272
3273			dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
3274
3275		}
3276
3277		if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
3278
3279			if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
3280				/*
3281				 * Decrement the freeze count so that a single
3282				 * completion is still sufficient to unfreeze
3283				 * the queue.
3284				 */
3285				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3286			} else {
3287
3288				dev->flags |= CAM_DEV_REL_ON_COMPLETE;
3289				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3290			}
3291		}
3292
3293		if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
3294
3295			if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
3296			 || (dev->ccbq.dev_active == 0)) {
3297
3298				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3299			} else {
3300
3301				dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
3302				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3303			}
3304		}
3305		splx(s);
3306
3307		if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) {
3308
3309			xpt_release_devq(crs->ccb_h.path, /*count*/1,
3310					 /*run_queue*/TRUE);
3311		}
3312		start_ccb->crs.qfrozen_cnt = dev->qfrozen_cnt;
3313		start_ccb->ccb_h.status = CAM_REQ_CMP;
3314		break;
3315	}
3316	case XPT_SCAN_BUS:
3317		xpt_scan_bus(start_ccb->ccb_h.path->periph, start_ccb);
3318		break;
3319	case XPT_SCAN_LUN:
3320		xpt_scan_lun(start_ccb->ccb_h.path->periph,
3321			     start_ccb->ccb_h.path, start_ccb->crcn.flags,
3322			     start_ccb);
3323		break;
3324	case XPT_DEBUG: {
3325#ifdef CAMDEBUG
3326		int s;
3327
3328		s = splcam();
3329#ifdef CAM_DEBUG_DELAY
3330		cam_debug_delay = CAM_DEBUG_DELAY;
3331#endif
3332		cam_dflags = start_ccb->cdbg.flags;
3333		if (cam_dpath != NULL) {
3334			xpt_free_path(cam_dpath);
3335			cam_dpath = NULL;
3336		}
3337
3338		if (cam_dflags != CAM_DEBUG_NONE) {
3339			if (xpt_create_path(&cam_dpath, xpt_periph,
3340					    start_ccb->ccb_h.path_id,
3341					    start_ccb->ccb_h.target_id,
3342					    start_ccb->ccb_h.target_lun) !=
3343					    CAM_REQ_CMP) {
3344				start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3345				cam_dflags = CAM_DEBUG_NONE;
3346			} else {
3347				start_ccb->ccb_h.status = CAM_REQ_CMP;
3348				xpt_print_path(cam_dpath);
3349				printf("debugging flags now %x\n", cam_dflags);
3350			}
3351		} else {
3352			cam_dpath = NULL;
3353			start_ccb->ccb_h.status = CAM_REQ_CMP;
3354		}
3355		splx(s);
3356#else /* !CAMDEBUG */
3357		start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3358#endif /* CAMDEBUG */
3359		break;
3360	}
3361	case XPT_NOOP:
3362		if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
3363			xpt_freeze_devq(start_ccb->ccb_h.path, 1);
3364		start_ccb->ccb_h.status = CAM_REQ_CMP;
3365		break;
3366	default:
3367	case XPT_SDEV_TYPE:
3368	case XPT_TERM_IO:
3369	case XPT_ENG_INQ:
3370		/* XXX Implement */
3371		start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
3372		break;
3373	}
3374	splx(iopl);
3375}
3376
3377void
3378xpt_polled_action(union ccb *start_ccb)
3379{
3380	int	  s;
3381	u_int32_t timeout;
3382	struct	  cam_sim *sim;
3383	struct	  cam_devq *devq;
3384	struct	  cam_ed *dev;
3385
3386	timeout = start_ccb->ccb_h.timeout;
3387	sim = start_ccb->ccb_h.path->bus->sim;
3388	devq = sim->devq;
3389	dev = start_ccb->ccb_h.path->device;
3390
3391	s = splcam();
3392
3393	/*
3394	 * Steal an opening so that no other queued requests
3395	 * can get it before us while we simulate interrupts.
3396	 */
3397	dev->ccbq.devq_openings--;
3398	dev->ccbq.dev_openings--;
3399
3400	while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0)
3401	   && (--timeout > 0)) {
3402		DELAY(1000);
3403		(*(sim->sim_poll))(sim);
3404		swi_camnet();
3405		swi_cambio();
3406	}
3407
3408	dev->ccbq.devq_openings++;
3409	dev->ccbq.dev_openings++;
3410
3411	if (timeout != 0) {
3412		xpt_action(start_ccb);
3413		while(--timeout > 0) {
3414			(*(sim->sim_poll))(sim);
3415			swi_camnet();
3416			swi_cambio();
3417			if ((start_ccb->ccb_h.status  & CAM_STATUS_MASK)
3418			    != CAM_REQ_INPROG)
3419				break;
3420			DELAY(1000);
3421		}
3422		if (timeout == 0) {
3423			/*
3424			 * XXX Is it worth adding a sim_timeout entry
3425			 * point so we can attempt recovery?  If
3426			 * this is only used for dumps, I don't think
3427			 * it is.
3428			 */
3429			start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
3430		}
3431	} else {
3432		start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3433	}
3434	splx(s);
3435}
3436
3437/*
3438 * Schedule a peripheral driver to receive a ccb when it's
3439 * target device has space for more transactions.
3440 */
3441void
3442xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
3443{
3444	struct cam_ed *device;
3445	int s;
3446	int runq;
3447
3448	CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
3449	device = perph->path->device;
3450	s = splsoftcam();
3451	if (periph_is_queued(perph)) {
3452		/* Simply reorder based on new priority */
3453		CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3454			  ("   change priority to %d\n", new_priority));
3455		if (new_priority < perph->pinfo.priority) {
3456			camq_change_priority(&device->drvq,
3457					     perph->pinfo.index,
3458					     new_priority);
3459		}
3460		runq = 0;
3461	} else {
3462		/* New entry on the queue */
3463		CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3464			  ("   added periph to queue\n"));
3465		perph->pinfo.priority = new_priority;
3466		perph->pinfo.generation = ++device->drvq.generation;
3467		camq_insert(&device->drvq, &perph->pinfo);
3468		runq = xpt_schedule_dev_allocq(perph->path->bus, device);
3469	}
3470	splx(s);
3471	if (runq != 0) {
3472		CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3473			  ("   calling xpt_run_devq\n"));
3474		xpt_run_dev_allocq(perph->path->bus);
3475	}
3476}
3477
3478
3479/*
3480 * Schedule a device to run on a given queue.
3481 * If the device was inserted as a new entry on the queue,
3482 * return 1 meaning the device queue should be run. If we
3483 * were already queued, implying someone else has already
3484 * started the queue, return 0 so the caller doesn't attempt
3485 * to run the queue.  Must be run at either splsoftcam
3486 * (or splcam since that encompases splsoftcam).
3487 */
3488static int
3489xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
3490		 u_int32_t new_priority)
3491{
3492	int retval;
3493	u_int32_t old_priority;
3494
3495	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
3496
3497	old_priority = pinfo->priority;
3498
3499	/*
3500	 * Are we already queued?
3501	 */
3502	if (pinfo->index != CAM_UNQUEUED_INDEX) {
3503		/* Simply reorder based on new priority */
3504		if (new_priority < old_priority) {
3505			camq_change_priority(queue, pinfo->index,
3506					     new_priority);
3507			CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3508					("changed priority to %d\n",
3509					 new_priority));
3510		}
3511		retval = 0;
3512	} else {
3513		/* New entry on the queue */
3514		if (new_priority < old_priority)
3515			pinfo->priority = new_priority;
3516
3517		CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3518				("Inserting onto queue\n"));
3519		pinfo->generation = ++queue->generation;
3520		camq_insert(queue, pinfo);
3521		retval = 1;
3522	}
3523	return (retval);
3524}
3525
3526static void
3527xpt_run_dev_allocq(struct cam_eb *bus)
3528{
3529	struct	cam_devq *devq;
3530	int	s;
3531
3532	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n"));
3533	devq = bus->sim->devq;
3534
3535	CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3536			("   qfrozen_cnt == 0x%x, entries == %d, "
3537			 "openings == %d, active == %d\n",
3538			 devq->alloc_queue.qfrozen_cnt,
3539			 devq->alloc_queue.entries,
3540			 devq->alloc_openings,
3541			 devq->alloc_active));
3542
3543	s = splsoftcam();
3544	devq->alloc_queue.qfrozen_cnt++;
3545	while ((devq->alloc_queue.entries > 0)
3546	    && (devq->alloc_openings > 0)
3547	    && (devq->alloc_queue.qfrozen_cnt <= 1)) {
3548		struct	cam_ed_qinfo *qinfo;
3549		struct	cam_ed *device;
3550		union	ccb *work_ccb;
3551		struct	cam_periph *drv;
3552		struct	camq *drvq;
3553
3554		qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
3555							   CAMQ_HEAD);
3556		device = qinfo->device;
3557
3558		CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3559				("running device %p\n", device));
3560
3561		drvq = &device->drvq;
3562
3563#ifdef CAMDEBUG
3564		if (drvq->entries <= 0) {
3565			panic("xpt_run_dev_allocq: "
3566			      "Device on queue without any work to do");
3567		}
3568#endif
3569		if ((work_ccb = xpt_get_ccb(device)) != NULL) {
3570			devq->alloc_openings--;
3571			devq->alloc_active++;
3572			drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD);
3573			splx(s);
3574			xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
3575				      drv->pinfo.priority);
3576			CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3577					("calling periph start\n"));
3578			drv->periph_start(drv, work_ccb);
3579		} else {
3580			/*
3581			 * Malloc failure in alloc_ccb
3582			 */
3583			/*
3584			 * XXX add us to a list to be run from free_ccb
3585			 * if we don't have any ccbs active on this
3586			 * device queue otherwise we may never get run
3587			 * again.
3588			 */
3589			break;
3590		}
3591
3592		/* Raise IPL for possible insertion and test at top of loop */
3593		s = splsoftcam();
3594
3595		if (drvq->entries > 0) {
3596			/* We have more work.  Attempt to reschedule */
3597			xpt_schedule_dev_allocq(bus, device);
3598		}
3599	}
3600	devq->alloc_queue.qfrozen_cnt--;
3601	splx(s);
3602}
3603
3604static void
3605xpt_run_dev_sendq(struct cam_eb *bus)
3606{
3607	struct	cam_devq *devq;
3608	int	s;
3609
3610	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n"));
3611
3612	devq = bus->sim->devq;
3613
3614	s = splcam();
3615	devq->send_queue.qfrozen_cnt++;
3616	splx(s);
3617	s = splsoftcam();
3618	while ((devq->send_queue.entries > 0)
3619	    && (devq->send_openings > 0)) {
3620		struct	cam_ed_qinfo *qinfo;
3621		struct	cam_ed *device;
3622		union ccb *work_ccb;
3623		struct	cam_sim *sim;
3624		int	ospl;
3625
3626		ospl = splcam();
3627	    	if (devq->send_queue.qfrozen_cnt > 1) {
3628			splx(ospl);
3629			break;
3630		}
3631
3632		qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
3633							   CAMQ_HEAD);
3634		device = qinfo->device;
3635
3636		/*
3637		 * If the device has been "frozen", don't attempt
3638		 * to run it.
3639		 */
3640		if (device->qfrozen_cnt > 0) {
3641			splx(ospl);
3642			continue;
3643		}
3644
3645		CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3646				("running device %p\n", device));
3647
3648		work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
3649		if (work_ccb == NULL) {
3650			printf("device on run queue with no ccbs???");
3651			splx(ospl);
3652			continue;
3653		}
3654
3655		if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
3656
3657		 	if (num_highpower <= 0) {
3658				/*
3659				 * We got a high power command, but we
3660				 * don't have any available slots.  Freeze
3661				 * the device queue until we have a slot
3662				 * available.
3663				 */
3664				device->qfrozen_cnt++;
3665				STAILQ_INSERT_TAIL(&highpowerq,
3666						   &work_ccb->ccb_h,
3667						   xpt_links.stqe);
3668
3669				splx(ospl);
3670				continue;
3671			} else {
3672				/*
3673				 * Consume a high power slot while
3674				 * this ccb runs.
3675				 */
3676				num_highpower--;
3677			}
3678		}
3679		devq->active_dev = device;
3680		cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
3681
3682		cam_ccbq_send_ccb(&device->ccbq, work_ccb);
3683		splx(ospl);
3684
3685		devq->send_openings--;
3686		devq->send_active++;
3687
3688		if (device->ccbq.queue.entries > 0)
3689			xpt_schedule_dev_sendq(bus, device);
3690
3691		if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){
3692			/*
3693			 * The client wants to freeze the queue
3694			 * after this CCB is sent.
3695			 */
3696			ospl = splcam();
3697			device->qfrozen_cnt++;
3698			splx(ospl);
3699		}
3700
3701		splx(s);
3702
3703		/* In Target mode, the peripheral driver knows best... */
3704		if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
3705			if ((device->inq_flags & SID_CmdQue) != 0
3706			 && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
3707				work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
3708			else
3709				/*
3710				 * Clear this in case of a retried CCB that
3711				 * failed due to a rejected tag.
3712				 */
3713				work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
3714		}
3715
3716		/*
3717		 * Device queues can be shared among multiple sim instances
3718		 * that reside on different busses.  Use the SIM in the queue
3719		 * CCB's path, rather than the one in the bus that was passed
3720		 * into this function.
3721		 */
3722		sim = work_ccb->ccb_h.path->bus->sim;
3723		(*(sim->sim_action))(sim, work_ccb);
3724
3725		ospl = splcam();
3726		devq->active_dev = NULL;
3727		splx(ospl);
3728		/* Raise IPL for possible insertion and test at top of loop */
3729		s = splsoftcam();
3730	}
3731	splx(s);
3732	s = splcam();
3733	devq->send_queue.qfrozen_cnt--;
3734	splx(s);
3735}
3736
3737/*
3738 * This function merges stuff from the slave ccb into the master ccb, while
3739 * keeping important fields in the master ccb constant.
3740 */
3741void
3742xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
3743{
3744	/*
3745	 * Pull fields that are valid for peripheral drivers to set
3746	 * into the master CCB along with the CCB "payload".
3747	 */
3748	master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
3749	master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
3750	master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
3751	master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
3752	bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
3753	      sizeof(union ccb) - sizeof(struct ccb_hdr));
3754}
3755
3756void
3757xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
3758{
3759	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
3760	ccb_h->pinfo.priority = priority;
3761	ccb_h->path = path;
3762	ccb_h->path_id = path->bus->path_id;
3763	if (path->target)
3764		ccb_h->target_id = path->target->target_id;
3765	else
3766		ccb_h->target_id = CAM_TARGET_WILDCARD;
3767	if (path->device) {
3768		ccb_h->target_lun = path->device->lun_id;
3769		ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
3770	} else {
3771		ccb_h->target_lun = CAM_TARGET_WILDCARD;
3772	}
3773	ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
3774	ccb_h->flags = 0;
3775}
3776
3777/* Path manipulation functions */
3778cam_status
3779xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
3780		path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3781{
3782	struct	   cam_path *path;
3783	cam_status status;
3784
3785	path = (struct cam_path *)malloc(sizeof(*path), M_DEVBUF, M_NOWAIT);
3786
3787	if (path == NULL) {
3788		status = CAM_RESRC_UNAVAIL;
3789		return(status);
3790	}
3791	status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
3792	if (status != CAM_REQ_CMP) {
3793		free(path, M_DEVBUF);
3794		path = NULL;
3795	}
3796	*new_path_ptr = path;
3797	return (status);
3798}
3799
3800static cam_status
3801xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
3802		 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3803{
3804	struct	     cam_eb *bus;
3805	struct	     cam_et *target;
3806	struct	     cam_ed *device;
3807	cam_status   status;
3808	int	     s;
3809
3810	status = CAM_REQ_CMP;	/* Completed without error */
3811	target = NULL;		/* Wildcarded */
3812	device = NULL;		/* Wildcarded */
3813
3814	/*
3815	 * We will potentially modify the EDT, so block interrupts
3816	 * that may attempt to create cam paths.
3817	 */
3818	s = splcam();
3819	bus = xpt_find_bus(path_id);
3820	if (bus == NULL) {
3821		status = CAM_PATH_INVALID;
3822	} else {
3823		target = xpt_find_target(bus, target_id);
3824		if (target == NULL) {
3825			/* Create one */
3826			struct cam_et *new_target;
3827
3828			new_target = xpt_alloc_target(bus, target_id);
3829			if (new_target == NULL) {
3830				status = CAM_RESRC_UNAVAIL;
3831			} else {
3832				target = new_target;
3833			}
3834		}
3835		if (target != NULL) {
3836			device = xpt_find_device(target, lun_id);
3837			if (device == NULL) {
3838				/* Create one */
3839				struct cam_ed *new_device;
3840
3841				new_device = xpt_alloc_device(bus,
3842							      target,
3843							      lun_id);
3844				if (new_device == NULL) {
3845					status = CAM_RESRC_UNAVAIL;
3846				} else {
3847					device = new_device;
3848				}
3849			}
3850		}
3851	}
3852	splx(s);
3853
3854	/*
3855	 * Only touch the user's data if we are successful.
3856	 */
3857	if (status == CAM_REQ_CMP) {
3858		new_path->periph = perph;
3859		new_path->bus = bus;
3860		new_path->target = target;
3861		new_path->device = device;
3862		CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
3863	} else {
3864		if (device != NULL)
3865			xpt_release_device(bus, target, device);
3866		if (target != NULL)
3867			xpt_release_target(bus, target);
3868		if (bus != NULL)
3869			xpt_release_bus(bus);
3870	}
3871	return (status);
3872}
3873
3874static void
3875xpt_release_path(struct cam_path *path)
3876{
3877	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
3878	if (path->device != NULL) {
3879		xpt_release_device(path->bus, path->target, path->device);
3880		path->device = NULL;
3881	}
3882	if (path->target != NULL) {
3883		xpt_release_target(path->bus, path->target);
3884		path->target = NULL;
3885	}
3886	if (path->bus != NULL) {
3887		xpt_release_bus(path->bus);
3888		path->bus = NULL;
3889	}
3890}
3891
3892void
3893xpt_free_path(struct cam_path *path)
3894{
3895	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
3896	xpt_release_path(path);
3897	free(path, M_DEVBUF);
3898}
3899
3900
3901/*
3902 * Return -1 for failure, 0 for exact match, 1 for match with wildcards
3903 * in path1, 2 for match with wildcards in path2.
3904 */
3905int
3906xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
3907{
3908	int retval = 0;
3909
3910	if (path1->bus != path2->bus) {
3911		if (path1->bus->path_id == CAM_BUS_WILDCARD)
3912			retval = 1;
3913		else if (path2->bus->path_id == CAM_BUS_WILDCARD)
3914			retval = 2;
3915		else
3916			return (-1);
3917	}
3918	if (path1->target != path2->target) {
3919		if (path1->target->target_id == CAM_TARGET_WILDCARD) {
3920			if (retval == 0)
3921				retval = 1;
3922		} else if (path2->target->target_id == CAM_TARGET_WILDCARD)
3923			retval = 2;
3924		else
3925			return (-1);
3926	}
3927	if (path1->device != path2->device) {
3928		if (path1->device->lun_id == CAM_LUN_WILDCARD) {
3929			if (retval == 0)
3930				retval = 1;
3931		} else if (path2->device->lun_id == CAM_LUN_WILDCARD)
3932			retval = 2;
3933		else
3934			return (-1);
3935	}
3936	return (retval);
3937}
3938
3939void
3940xpt_print_path(struct cam_path *path)
3941{
3942	if (path == NULL)
3943		printf("(nopath): ");
3944	else {
3945		if (path->periph != NULL)
3946			printf("(%s%d:", path->periph->periph_name,
3947			       path->periph->unit_number);
3948		else
3949			printf("(noperiph:");
3950
3951		if (path->bus != NULL)
3952			printf("%s%d:%d:", path->bus->sim->sim_name,
3953			       path->bus->sim->unit_number,
3954			       path->bus->sim->bus_id);
3955		else
3956			printf("nobus:");
3957
3958		if (path->target != NULL)
3959			printf("%d:", path->target->target_id);
3960		else
3961			printf("X:");
3962
3963		if (path->device != NULL)
3964			printf("%d): ", path->device->lun_id);
3965		else
3966			printf("X): ");
3967	}
3968}
3969
3970path_id_t
3971xpt_path_path_id(struct cam_path *path)
3972{
3973	return(path->bus->path_id);
3974}
3975
3976target_id_t
3977xpt_path_target_id(struct cam_path *path)
3978{
3979	if (path->target != NULL)
3980		return (path->target->target_id);
3981	else
3982		return (CAM_TARGET_WILDCARD);
3983}
3984
3985lun_id_t
3986xpt_path_lun_id(struct cam_path *path)
3987{
3988	if (path->device != NULL)
3989		return (path->device->lun_id);
3990	else
3991		return (CAM_LUN_WILDCARD);
3992}
3993
3994struct cam_sim *
3995xpt_path_sim(struct cam_path *path)
3996{
3997	return (path->bus->sim);
3998}
3999
4000struct cam_periph*
4001xpt_path_periph(struct cam_path *path)
4002{
4003	return (path->periph);
4004}
4005
4006/*
4007 * Release a CAM control block for the caller.  Remit the cost of the structure
4008 * to the device referenced by the path.  If the this device had no 'credits'
4009 * and peripheral drivers have registered async callbacks for this notification
4010 * call them now.
4011 */
4012void
4013xpt_release_ccb(union ccb *free_ccb)
4014{
4015	int	 s;
4016	struct	 cam_path *path;
4017	struct	 cam_ed *device;
4018	struct	 cam_eb *bus;
4019
4020	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
4021	path = free_ccb->ccb_h.path;
4022	device = path->device;
4023	bus = path->bus;
4024	s = splsoftcam();
4025	cam_ccbq_release_opening(&device->ccbq);
4026	if (xpt_ccb_count > xpt_max_ccbs) {
4027		xpt_free_ccb(free_ccb);
4028		xpt_ccb_count--;
4029	} else {
4030		SLIST_INSERT_HEAD(&ccb_freeq, &free_ccb->ccb_h, xpt_links.sle);
4031	}
4032	bus->sim->devq->alloc_openings++;
4033	bus->sim->devq->alloc_active--;
4034	/* XXX Turn this into an inline function - xpt_run_device?? */
4035	if ((device_is_alloc_queued(device) == 0)
4036	 && (device->drvq.entries > 0)) {
4037		xpt_schedule_dev_allocq(bus, device);
4038	}
4039	splx(s);
4040	if (dev_allocq_is_runnable(bus->sim->devq))
4041		xpt_run_dev_allocq(bus);
4042}
4043
4044/* Functions accessed by SIM drivers */
4045
4046/*
4047 * A sim structure, listing the SIM entry points and instance
4048 * identification info is passed to xpt_bus_register to hook the SIM
4049 * into the CAM framework.  xpt_bus_register creates a cam_eb entry
4050 * for this new bus and places it in the array of busses and assigns
4051 * it a path_id.  The path_id may be influenced by "hard wiring"
4052 * information specified by the user.  Once interrupt services are
4053 * availible, the bus will be probed.
4054 */
4055int32_t
4056xpt_bus_register(struct cam_sim *sim, u_int32_t bus)
4057{
4058	struct cam_eb *new_bus;
4059	struct cam_eb *old_bus;
4060	struct ccb_pathinq cpi;
4061	int s;
4062
4063	sim->bus_id = bus;
4064	new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
4065					  M_DEVBUF, M_NOWAIT);
4066	if (new_bus == NULL) {
4067		/* Couldn't satisfy request */
4068		return (CAM_RESRC_UNAVAIL);
4069	}
4070
4071	if (strcmp(sim->sim_name, "xpt") != 0) {
4072
4073		sim->path_id =
4074		    xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
4075	}
4076
4077	TAILQ_INIT(&new_bus->et_entries);
4078	new_bus->path_id = sim->path_id;
4079	new_bus->sim = sim;
4080	timevalclear(&new_bus->last_reset);
4081	new_bus->flags = 0;
4082	new_bus->refcount = 1;	/* Held until a bus_deregister event */
4083	new_bus->generation = 0;
4084	s = splcam();
4085	old_bus = TAILQ_FIRST(&xpt_busses);
4086	while (old_bus != NULL
4087	    && old_bus->path_id < new_bus->path_id)
4088		old_bus = TAILQ_NEXT(old_bus, links);
4089	if (old_bus != NULL)
4090		TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
4091	else
4092		TAILQ_INSERT_TAIL(&xpt_busses, new_bus, links);
4093	bus_generation++;
4094	splx(s);
4095
4096	/* Notify interested parties */
4097	if (sim->path_id != CAM_XPT_PATH_ID) {
4098		struct cam_path path;
4099
4100		xpt_compile_path(&path, /*periph*/NULL, sim->path_id,
4101			         CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4102		xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
4103		cpi.ccb_h.func_code = XPT_PATH_INQ;
4104		xpt_action((union ccb *)&cpi);
4105		xpt_async(AC_PATH_REGISTERED, xpt_periph->path, &cpi);
4106		xpt_release_path(&path);
4107	}
4108	return (CAM_SUCCESS);
4109}
4110
4111int32_t
4112xpt_bus_deregister(path_id_t pathid)
4113{
4114	struct cam_path bus_path;
4115	cam_status status;
4116
4117	status = xpt_compile_path(&bus_path, NULL, pathid,
4118				  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4119	if (status != CAM_REQ_CMP)
4120		return (status);
4121
4122	xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
4123	xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
4124
4125	/* Release the reference count held while registered. */
4126	xpt_release_bus(bus_path.bus);
4127	xpt_release_path(&bus_path);
4128
4129	return (CAM_REQ_CMP);
4130}
4131
4132static path_id_t
4133xptnextfreepathid(void)
4134{
4135	struct cam_eb *bus;
4136	path_id_t pathid;
4137	char *strval;
4138
4139	pathid = 0;
4140	bus = TAILQ_FIRST(&xpt_busses);
4141retry:
4142	/* Find an unoccupied pathid */
4143	while (bus != NULL
4144	    && bus->path_id <= pathid) {
4145		if (bus->path_id == pathid)
4146			pathid++;
4147		bus = TAILQ_NEXT(bus, links);
4148	}
4149
4150	/*
4151	 * Ensure that this pathid is not reserved for
4152	 * a bus that may be registered in the future.
4153	 */
4154	if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
4155		++pathid;
4156		/* Start the search over */
4157		goto retry;
4158	}
4159	return (pathid);
4160}
4161
4162static path_id_t
4163xptpathid(const char *sim_name, int sim_unit, int sim_bus)
4164{
4165	path_id_t pathid;
4166	int i, dunit, val;
4167	char buf[32], *strval;
4168
4169	pathid = CAM_XPT_PATH_ID;
4170	snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
4171	i = -1;
4172	while ((i = resource_locate(i, "scbus")) != -1) {
4173		dunit = resource_query_unit(i);
4174		if (dunit < 0)		/* unwired?! */
4175			continue;
4176		if (resource_string_value("scbus", dunit, "at", &strval) != 0)
4177			continue;
4178		if (strcmp(buf, strval) != 0)
4179			continue;
4180		if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
4181			if (sim_bus == val) {
4182				pathid = dunit;
4183				break;
4184			}
4185		} else if (sim_bus == 0) {
4186			/* Unspecified matches bus 0 */
4187			pathid = dunit;
4188			break;
4189		} else {
4190			printf("Ambiguous scbus configuration for %s%d "
4191			       "bus %d, cannot wire down.  The kernel "
4192			       "config entry for scbus%d should "
4193			       "specify a controller bus.\n"
4194			       "Scbus will be assigned dynamically.\n",
4195			       sim_name, sim_unit, sim_bus, dunit);
4196			break;
4197		}
4198	}
4199
4200	if (pathid == CAM_XPT_PATH_ID)
4201		pathid = xptnextfreepathid();
4202	return (pathid);
4203}
4204
4205void
4206xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
4207{
4208	struct cam_eb *bus;
4209	struct cam_et *target, *next_target;
4210	struct cam_ed *device, *next_device;
4211	int s;
4212
4213	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_async\n"));
4214
4215	/*
4216	 * Most async events come from a CAM interrupt context.  In
4217	 * a few cases, the error recovery code at the peripheral layer,
4218	 * which may run from our SWI or a process context, may signal
4219	 * deferred events with a call to xpt_async. Ensure async
4220	 * notifications are serialized by blocking cam interrupts.
4221	 */
4222	s = splcam();
4223
4224	bus = path->bus;
4225
4226	if (async_code == AC_BUS_RESET) {
4227		int s;
4228
4229		s = splclock();
4230		/* Update our notion of when the last reset occurred */
4231		microtime(&bus->last_reset);
4232		splx(s);
4233	}
4234
4235	for (target = TAILQ_FIRST(&bus->et_entries);
4236	     target != NULL;
4237	     target = next_target) {
4238
4239		next_target = TAILQ_NEXT(target, links);
4240
4241		if (path->target != target
4242		 && path->target->target_id != CAM_TARGET_WILDCARD)
4243			continue;
4244
4245		if (async_code == AC_SENT_BDR) {
4246			int s;
4247
4248			/* Update our notion of when the last reset occurred */
4249			s = splclock();
4250			microtime(&path->target->last_reset);
4251			splx(s);
4252		}
4253
4254		for (device = TAILQ_FIRST(&target->ed_entries);
4255		     device != NULL;
4256		     device = next_device) {
4257			cam_status status;
4258			struct cam_path newpath;
4259
4260			next_device = TAILQ_NEXT(device, links);
4261
4262			if (path->device != device
4263			 && path->device->lun_id != CAM_LUN_WILDCARD)
4264				continue;
4265
4266			/*
4267			 * We need our own path with wildcards expanded to
4268			 * handle certain types of events.
4269			 */
4270			if ((async_code == AC_SENT_BDR)
4271			 || (async_code == AC_BUS_RESET)
4272			 || (async_code == AC_INQ_CHANGED))
4273				status = xpt_compile_path(&newpath, NULL,
4274							  bus->path_id,
4275							  target->target_id,
4276							  device->lun_id);
4277			else
4278				status = CAM_REQ_CMP_ERR;
4279
4280			if (status == CAM_REQ_CMP) {
4281
4282				/*
4283				 * Allow transfer negotiation to occur in a
4284				 * tag free environment.
4285				 */
4286				if (async_code == AC_SENT_BDR
4287				  || async_code == AC_BUS_RESET)
4288					xpt_toggle_tags(&newpath);
4289
4290				if (async_code == AC_INQ_CHANGED) {
4291					/*
4292					 * We've sent a start unit command, or
4293					 * something similar to a device that
4294					 * may have caused its inquiry data to
4295					 * change. So we re-scan the device to
4296					 * refresh the inquiry data for it.
4297					 */
4298					xpt_scan_lun(newpath.periph, &newpath,
4299						     CAM_EXPECT_INQ_CHANGE,
4300						     NULL);
4301				}
4302				xpt_release_path(&newpath);
4303			} else if (async_code == AC_LOST_DEVICE) {
4304				device->flags |= CAM_DEV_UNCONFIGURED;
4305			} else if (async_code == AC_TRANSFER_NEG) {
4306				struct ccb_trans_settings *settings;
4307
4308				settings =
4309				    (struct ccb_trans_settings *)async_arg;
4310				xpt_set_transfer_settings(settings, device,
4311							  /*async_update*/TRUE);
4312			}
4313
4314			xpt_async_bcast(&device->asyncs,
4315					async_code,
4316					path,
4317					async_arg);
4318		}
4319	}
4320
4321	/*
4322	 * If this wasn't a fully wildcarded async, tell all
4323	 * clients that want all async events.
4324	 */
4325	if (bus != xpt_periph->path->bus)
4326		xpt_async_bcast(&xpt_periph->path->device->asyncs, async_code,
4327				path, async_arg);
4328	splx(s);
4329}
4330
4331static void
4332xpt_async_bcast(struct async_list *async_head,
4333		u_int32_t async_code,
4334		struct cam_path *path, void *async_arg)
4335{
4336	struct async_node *cur_entry;
4337
4338	cur_entry = SLIST_FIRST(async_head);
4339	while (cur_entry != NULL) {
4340		struct async_node *next_entry;
4341		/*
4342		 * Grab the next list entry before we call the current
4343		 * entry's callback.  This is because the callback function
4344		 * can delete its async callback entry.
4345		 */
4346		next_entry = SLIST_NEXT(cur_entry, links);
4347		if ((cur_entry->event_enable & async_code) != 0)
4348			cur_entry->callback(cur_entry->callback_arg,
4349					    async_code, path,
4350					    async_arg);
4351		cur_entry = next_entry;
4352	}
4353}
4354
4355u_int32_t
4356xpt_freeze_devq(struct cam_path *path, u_int count)
4357{
4358	int s;
4359	struct ccb_hdr *ccbh;
4360
4361	s = splcam();
4362	path->device->qfrozen_cnt += count;
4363
4364	/*
4365	 * Mark the last CCB in the queue as needing
4366	 * to be requeued if the driver hasn't
4367	 * changed it's state yet.  This fixes a race
4368	 * where a ccb is just about to be queued to
4369	 * a controller driver when it's interrupt routine
4370	 * freezes the queue.  To completly close the
4371	 * hole, controller drives must check to see
4372	 * if a ccb's status is still CAM_REQ_INPROG
4373	 * under spl protection just before they queue
4374	 * the CCB.  See ahc_action/ahc_freeze_devq for
4375	 * an example.
4376	 */
4377	ccbh = TAILQ_LAST(&path->device->ccbq.active_ccbs, ccb_hdr_tailq);
4378	if (ccbh && ccbh->status == CAM_REQ_INPROG)
4379		ccbh->status = CAM_REQUEUE_REQ;
4380	splx(s);
4381	return (path->device->qfrozen_cnt);
4382}
4383
4384u_int32_t
4385xpt_freeze_simq(struct cam_sim *sim, u_int count)
4386{
4387	sim->devq->send_queue.qfrozen_cnt += count;
4388	if (sim->devq->active_dev != NULL) {
4389		struct ccb_hdr *ccbh;
4390
4391		ccbh = TAILQ_LAST(&sim->devq->active_dev->ccbq.active_ccbs,
4392				  ccb_hdr_tailq);
4393		if (ccbh && ccbh->status == CAM_REQ_INPROG)
4394			ccbh->status = CAM_REQUEUE_REQ;
4395	}
4396	return (sim->devq->send_queue.qfrozen_cnt);
4397}
4398
4399static void
4400xpt_release_devq_timeout(void *arg)
4401{
4402	struct cam_ed *device;
4403
4404	device = (struct cam_ed *)arg;
4405
4406	xpt_release_devq_device(device, /*count*/1, /*run_queue*/TRUE);
4407}
4408
4409void
4410xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
4411{
4412	xpt_release_devq_device(path->device, count, run_queue);
4413}
4414
4415static void
4416xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue)
4417{
4418	int	rundevq;
4419	int	s0, s1;
4420
4421	rundevq = 0;
4422	s0 = splsoftcam();
4423	s1 = splcam();
4424	if (dev->qfrozen_cnt > 0) {
4425
4426		count = (count > dev->qfrozen_cnt) ? dev->qfrozen_cnt : count;
4427		dev->qfrozen_cnt -= count;
4428		if (dev->qfrozen_cnt == 0) {
4429
4430			/*
4431			 * No longer need to wait for a successful
4432			 * command completion.
4433			 */
4434			dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
4435
4436			/*
4437			 * Remove any timeouts that might be scheduled
4438			 * to release this queue.
4439			 */
4440			if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
4441				untimeout(xpt_release_devq_timeout, dev,
4442					  dev->c_handle);
4443				dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
4444			}
4445
4446			/*
4447			 * Now that we are unfrozen schedule the
4448			 * device so any pending transactions are
4449			 * run.
4450			 */
4451			if ((dev->ccbq.queue.entries > 0)
4452			 && (xpt_schedule_dev_sendq(dev->target->bus, dev))
4453			 && (run_queue != 0)) {
4454				rundevq = 1;
4455			}
4456		}
4457	}
4458	splx(s1);
4459	if (rundevq != 0)
4460		xpt_run_dev_sendq(dev->target->bus);
4461	splx(s0);
4462}
4463
4464void
4465xpt_release_simq(struct cam_sim *sim, int run_queue)
4466{
4467	int	s;
4468	struct	camq *sendq;
4469
4470	sendq = &(sim->devq->send_queue);
4471	s = splcam();
4472	if (sendq->qfrozen_cnt > 0) {
4473
4474		sendq->qfrozen_cnt--;
4475		if (sendq->qfrozen_cnt == 0) {
4476			struct cam_eb *bus;
4477
4478			/*
4479			 * If there is a timeout scheduled to release this
4480			 * sim queue, remove it.  The queue frozen count is
4481			 * already at 0.
4482			 */
4483			if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
4484				untimeout(xpt_release_simq_timeout, sim,
4485					  sim->c_handle);
4486				sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
4487			}
4488			bus = xpt_find_bus(sim->path_id);
4489			splx(s);
4490
4491			if (run_queue) {
4492				/*
4493				 * Now that we are unfrozen run the send queue.
4494				 */
4495				xpt_run_dev_sendq(bus);
4496			}
4497			xpt_release_bus(bus);
4498		} else
4499			splx(s);
4500	} else
4501		splx(s);
4502}
4503
4504static void
4505xpt_release_simq_timeout(void *arg)
4506{
4507	struct cam_sim *sim;
4508
4509	sim = (struct cam_sim *)arg;
4510	xpt_release_simq(sim, /* run_queue */ TRUE);
4511}
4512
4513void
4514xpt_done(union ccb *done_ccb)
4515{
4516	int s;
4517
4518	s = splcam();
4519
4520	CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n"));
4521	if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
4522		/*
4523		 * Queue up the request for handling by our SWI handler
4524		 * any of the "non-immediate" type of ccbs.
4525		 */
4526		switch (done_ccb->ccb_h.path->periph->type) {
4527		case CAM_PERIPH_BIO:
4528			TAILQ_INSERT_TAIL(&cam_bioq, &done_ccb->ccb_h,
4529					  sim_links.tqe);
4530			done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4531			setsoftcambio();
4532			break;
4533		case CAM_PERIPH_NET:
4534			TAILQ_INSERT_TAIL(&cam_netq, &done_ccb->ccb_h,
4535					  sim_links.tqe);
4536			done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4537			setsoftcamnet();
4538			break;
4539		}
4540	}
4541	splx(s);
4542}
4543
4544union ccb *
4545xpt_alloc_ccb()
4546{
4547	union ccb *new_ccb;
4548
4549	new_ccb = malloc(sizeof(*new_ccb), M_DEVBUF, M_WAITOK);
4550	return (new_ccb);
4551}
4552
4553void
4554xpt_free_ccb(union ccb *free_ccb)
4555{
4556	free(free_ccb, M_DEVBUF);
4557}
4558
4559
4560
4561/* Private XPT functions */
4562
4563/*
4564 * Get a CAM control block for the caller. Charge the structure to the device
4565 * referenced by the path.  If the this device has no 'credits' then the
4566 * device already has the maximum number of outstanding operations under way
4567 * and we return NULL. If we don't have sufficient resources to allocate more
4568 * ccbs, we also return NULL.
4569 */
4570static union ccb *
4571xpt_get_ccb(struct cam_ed *device)
4572{
4573	union ccb *new_ccb;
4574	int s;
4575
4576	s = splsoftcam();
4577	if ((new_ccb = (union ccb *)ccb_freeq.slh_first) == NULL) {
4578		new_ccb = malloc(sizeof(*new_ccb), M_DEVBUF, M_NOWAIT);
4579                if (new_ccb == NULL) {
4580			splx(s);
4581			return (NULL);
4582		}
4583		callout_handle_init(&new_ccb->ccb_h.timeout_ch);
4584		SLIST_INSERT_HEAD(&ccb_freeq, &new_ccb->ccb_h,
4585				  xpt_links.sle);
4586		xpt_ccb_count++;
4587	}
4588	cam_ccbq_take_opening(&device->ccbq);
4589	SLIST_REMOVE_HEAD(&ccb_freeq, xpt_links.sle);
4590	splx(s);
4591	return (new_ccb);
4592}
4593
4594static void
4595xpt_release_bus(struct cam_eb *bus)
4596{
4597	int s;
4598
4599	s = splcam();
4600	if ((--bus->refcount == 0)
4601	 && (TAILQ_FIRST(&bus->et_entries) == NULL)) {
4602		TAILQ_REMOVE(&xpt_busses, bus, links);
4603		bus_generation++;
4604		splx(s);
4605		free(bus, M_DEVBUF);
4606	} else
4607		splx(s);
4608}
4609
4610static struct cam_et *
4611xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
4612{
4613	struct cam_et *target;
4614
4615	target = (struct cam_et *)malloc(sizeof(*target), M_DEVBUF, M_NOWAIT);
4616	if (target != NULL) {
4617		struct cam_et *cur_target;
4618
4619		TAILQ_INIT(&target->ed_entries);
4620		target->bus = bus;
4621		target->target_id = target_id;
4622		target->refcount = 1;
4623		target->generation = 0;
4624		timevalclear(&target->last_reset);
4625		/*
4626		 * Hold a reference to our parent bus so it
4627		 * will not go away before we do.
4628		 */
4629		bus->refcount++;
4630
4631		/* Insertion sort into our bus's target list */
4632		cur_target = TAILQ_FIRST(&bus->et_entries);
4633		while (cur_target != NULL && cur_target->target_id < target_id)
4634			cur_target = TAILQ_NEXT(cur_target, links);
4635
4636		if (cur_target != NULL) {
4637			TAILQ_INSERT_BEFORE(cur_target, target, links);
4638		} else {
4639			TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
4640		}
4641		bus->generation++;
4642	}
4643	return (target);
4644}
4645
4646static void
4647xpt_release_target(struct cam_eb *bus, struct cam_et *target)
4648{
4649	int s;
4650
4651	s = splcam();
4652	if ((--target->refcount == 0)
4653	 && (TAILQ_FIRST(&target->ed_entries) == NULL)) {
4654		TAILQ_REMOVE(&bus->et_entries, target, links);
4655		bus->generation++;
4656		splx(s);
4657		free(target, M_DEVBUF);
4658		xpt_release_bus(bus);
4659	} else
4660		splx(s);
4661}
4662
4663static struct cam_ed *
4664xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
4665{
4666	struct	   cam_ed *device;
4667	struct	   cam_devq *devq;
4668	cam_status status;
4669
4670	/* Make space for us in the device queue on our bus */
4671	devq = bus->sim->devq;
4672	status = cam_devq_resize(devq, devq->alloc_queue.array_size + 1);
4673
4674	if (status != CAM_REQ_CMP) {
4675		device = NULL;
4676	} else {
4677		device = (struct cam_ed *)malloc(sizeof(*device),
4678						 M_DEVBUF, M_NOWAIT);
4679	}
4680
4681	if (device != NULL) {
4682		struct cam_ed *cur_device;
4683
4684		cam_init_pinfo(&device->alloc_ccb_entry.pinfo);
4685		device->alloc_ccb_entry.device = device;
4686		cam_init_pinfo(&device->send_ccb_entry.pinfo);
4687		device->send_ccb_entry.device = device;
4688		device->target = target;
4689		device->lun_id = lun_id;
4690		/* Initialize our queues */
4691		if (camq_init(&device->drvq, 0) != 0) {
4692			free(device, M_DEVBUF);
4693			return (NULL);
4694		}
4695		if (cam_ccbq_init(&device->ccbq,
4696				  bus->sim->max_dev_openings) != 0) {
4697			camq_fini(&device->drvq);
4698			free(device, M_DEVBUF);
4699			return (NULL);
4700		}
4701		SLIST_INIT(&device->asyncs);
4702		SLIST_INIT(&device->periphs);
4703		device->generation = 0;
4704		device->owner = NULL;
4705		/*
4706		 * Take the default quirk entry until we have inquiry
4707		 * data and can determine a better quirk to use.
4708		 */
4709		device->quirk = &xpt_quirk_table[xpt_quirk_table_size - 1];
4710		bzero(&device->inq_data, sizeof(device->inq_data));
4711		device->inq_flags = 0;
4712		device->queue_flags = 0;
4713		device->serial_num = NULL;
4714		device->serial_num_len = 0;
4715		device->qfrozen_cnt = 0;
4716		device->flags = CAM_DEV_UNCONFIGURED;
4717		device->tag_delay_count = 0;
4718		device->refcount = 1;
4719		callout_handle_init(&device->c_handle);
4720
4721		/*
4722		 * Hold a reference to our parent target so it
4723		 * will not go away before we do.
4724		 */
4725		target->refcount++;
4726
4727		/*
4728		 * XXX should be limited by number of CCBs this bus can
4729		 * do.
4730		 */
4731		xpt_max_ccbs += device->ccbq.devq_openings;
4732		/* Insertion sort into our target's device list */
4733		cur_device = TAILQ_FIRST(&target->ed_entries);
4734		while (cur_device != NULL && cur_device->lun_id < lun_id)
4735			cur_device = TAILQ_NEXT(cur_device, links);
4736		if (cur_device != NULL) {
4737			TAILQ_INSERT_BEFORE(cur_device, device, links);
4738		} else {
4739			TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
4740		}
4741		target->generation++;
4742	}
4743	return (device);
4744}
4745
4746static void
4747xpt_release_device(struct cam_eb *bus, struct cam_et *target,
4748		   struct cam_ed *device)
4749{
4750	int s;
4751
4752	s = splcam();
4753	if ((--device->refcount == 0)
4754	 && ((device->flags & CAM_DEV_UNCONFIGURED) != 0)) {
4755		struct cam_devq *devq;
4756
4757		if (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX
4758		 || device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX)
4759			panic("Removing device while still queued for ccbs");
4760
4761		if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0)
4762				untimeout(xpt_release_devq_timeout, device,
4763					  device->c_handle);
4764
4765		TAILQ_REMOVE(&target->ed_entries, device,links);
4766		target->generation++;
4767		xpt_max_ccbs -= device->ccbq.devq_openings;
4768		/* Release our slot in the devq */
4769		devq = bus->sim->devq;
4770		cam_devq_resize(devq, devq->alloc_queue.array_size - 1);
4771		splx(s);
4772		free(device, M_DEVBUF);
4773		xpt_release_target(bus, target);
4774	} else
4775		splx(s);
4776}
4777
4778static u_int32_t
4779xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
4780{
4781	int	s;
4782	int	diff;
4783	int	result;
4784	struct	cam_ed *dev;
4785
4786	dev = path->device;
4787	s = splsoftcam();
4788
4789	diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings);
4790	result = cam_ccbq_resize(&dev->ccbq, newopenings);
4791	if (result == CAM_REQ_CMP && (diff < 0)) {
4792		dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED;
4793	}
4794	/* Adjust the global limit */
4795	xpt_max_ccbs += diff;
4796	splx(s);
4797	return (result);
4798}
4799
4800static struct cam_eb *
4801xpt_find_bus(path_id_t path_id)
4802{
4803	struct cam_eb *bus;
4804
4805	for (bus = TAILQ_FIRST(&xpt_busses);
4806	     bus != NULL;
4807	     bus = TAILQ_NEXT(bus, links)) {
4808		if (bus->path_id == path_id) {
4809			bus->refcount++;
4810			break;
4811		}
4812	}
4813	return (bus);
4814}
4815
4816static struct cam_et *
4817xpt_find_target(struct cam_eb *bus, target_id_t	target_id)
4818{
4819	struct cam_et *target;
4820
4821	for (target = TAILQ_FIRST(&bus->et_entries);
4822	     target != NULL;
4823	     target = TAILQ_NEXT(target, links)) {
4824		if (target->target_id == target_id) {
4825			target->refcount++;
4826			break;
4827		}
4828	}
4829	return (target);
4830}
4831
4832static struct cam_ed *
4833xpt_find_device(struct cam_et *target, lun_id_t lun_id)
4834{
4835	struct cam_ed *device;
4836
4837	for (device = TAILQ_FIRST(&target->ed_entries);
4838	     device != NULL;
4839	     device = TAILQ_NEXT(device, links)) {
4840		if (device->lun_id == lun_id) {
4841			device->refcount++;
4842			break;
4843		}
4844	}
4845	return (device);
4846}
4847
4848typedef struct {
4849	union	ccb *request_ccb;
4850	struct 	ccb_pathinq *cpi;
4851	int	pending_count;
4852} xpt_scan_bus_info;
4853
4854/*
4855 * To start a scan, request_ccb is an XPT_SCAN_BUS ccb.
4856 * As the scan progresses, xpt_scan_bus is used as the
4857 * callback on completion function.
4858 */
4859static void
4860xpt_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
4861{
4862	CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
4863		  ("xpt_scan_bus\n"));
4864	switch (request_ccb->ccb_h.func_code) {
4865	case XPT_SCAN_BUS:
4866	{
4867		xpt_scan_bus_info *scan_info;
4868		union	ccb *work_ccb;
4869		struct	cam_path *path;
4870		u_int	i;
4871		u_int	max_target;
4872		u_int	initiator_id;
4873
4874		/* Find out the characteristics of the bus */
4875		work_ccb = xpt_alloc_ccb();
4876		xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path,
4877			      request_ccb->ccb_h.pinfo.priority);
4878		work_ccb->ccb_h.func_code = XPT_PATH_INQ;
4879		xpt_action(work_ccb);
4880		if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
4881			request_ccb->ccb_h.status = work_ccb->ccb_h.status;
4882			xpt_free_ccb(work_ccb);
4883			xpt_done(request_ccb);
4884			return;
4885		}
4886
4887		if ((work_ccb->cpi.hba_misc & PIM_NOINITIATOR) != 0) {
4888			/*
4889			 * Can't scan the bus on an adapter that
4890			 * cannot perform the initiator role.
4891			 */
4892			request_ccb->ccb_h.status = CAM_REQ_CMP;
4893			xpt_free_ccb(work_ccb);
4894			xpt_done(request_ccb);
4895			return;
4896		}
4897
4898		/* Save some state for use while we probe for devices */
4899		scan_info = (xpt_scan_bus_info *)
4900		    malloc(sizeof(xpt_scan_bus_info), M_TEMP, M_WAITOK);
4901		scan_info->request_ccb = request_ccb;
4902		scan_info->cpi = &work_ccb->cpi;
4903
4904		/* Cache on our stack so we can work asynchronously */
4905		max_target = scan_info->cpi->max_target;
4906		initiator_id = scan_info->cpi->initiator_id;
4907
4908		/*
4909		 * Don't count the initiator if the
4910		 * initiator is addressable.
4911		 */
4912		scan_info->pending_count = max_target + 1;
4913		if (initiator_id <= max_target)
4914			scan_info->pending_count--;
4915
4916		for (i = 0; i <= max_target; i++) {
4917			cam_status status;
4918		 	if (i == initiator_id)
4919				continue;
4920
4921			status = xpt_create_path(&path, xpt_periph,
4922						 request_ccb->ccb_h.path_id,
4923						 i, 0);
4924			if (status != CAM_REQ_CMP) {
4925				printf("xpt_scan_bus: xpt_create_path failed"
4926				       " with status %#x, bus scan halted\n",
4927				       status);
4928				break;
4929			}
4930			work_ccb = xpt_alloc_ccb();
4931			xpt_setup_ccb(&work_ccb->ccb_h, path,
4932				      request_ccb->ccb_h.pinfo.priority);
4933			work_ccb->ccb_h.func_code = XPT_SCAN_LUN;
4934			work_ccb->ccb_h.cbfcnp = xpt_scan_bus;
4935			work_ccb->ccb_h.ppriv_ptr0 = scan_info;
4936			work_ccb->crcn.flags = request_ccb->crcn.flags;
4937#if 0
4938			printf("xpt_scan_bus: probing %d:%d:%d\n",
4939				request_ccb->ccb_h.path_id, i, 0);
4940#endif
4941			xpt_action(work_ccb);
4942		}
4943		break;
4944	}
4945	case XPT_SCAN_LUN:
4946	{
4947		xpt_scan_bus_info *scan_info;
4948		path_id_t path_id;
4949		target_id_t target_id;
4950		lun_id_t lun_id;
4951
4952		/* Reuse the same CCB to query if a device was really found */
4953		scan_info = (xpt_scan_bus_info *)request_ccb->ccb_h.ppriv_ptr0;
4954		xpt_setup_ccb(&request_ccb->ccb_h, request_ccb->ccb_h.path,
4955			      request_ccb->ccb_h.pinfo.priority);
4956		request_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
4957
4958		path_id = request_ccb->ccb_h.path_id;
4959		target_id = request_ccb->ccb_h.target_id;
4960		lun_id = request_ccb->ccb_h.target_lun;
4961		xpt_action(request_ccb);
4962
4963#if 0
4964		printf("xpt_scan_bus: got back probe from %d:%d:%d\n",
4965			path_id, target_id, lun_id);
4966#endif
4967
4968		if (request_ccb->ccb_h.status != CAM_REQ_CMP) {
4969			struct cam_ed *device;
4970			struct cam_et *target;
4971			int s, phl;
4972
4973			/*
4974			 * If we already probed lun 0 successfully, or
4975			 * we have additional configured luns on this
4976			 * target that might have "gone away", go onto
4977			 * the next lun.
4978			 */
4979			target = request_ccb->ccb_h.path->target;
4980			/*
4981			 * We may touch devices that we don't
4982			 * hold references too, so ensure they
4983			 * don't disappear out from under us.
4984			 * The target above is referenced by the
4985			 * path in the request ccb.
4986			 */
4987			phl = 0;
4988			s = splcam();
4989			device = TAILQ_FIRST(&target->ed_entries);
4990			if (device != NULL) {
4991				phl = device->quirk->quirks & CAM_QUIRK_HILUNS;
4992				if (device->lun_id == 0)
4993					device = TAILQ_NEXT(device, links);
4994			}
4995			splx(s);
4996			if ((lun_id != 0) || (device != NULL)) {
4997				if (lun_id < (CAM_SCSI2_MAXLUN-1) || phl)
4998					lun_id++;
4999			}
5000		} else {
5001			struct cam_ed *device;
5002
5003			device = request_ccb->ccb_h.path->device;
5004
5005			if ((device->quirk->quirks & CAM_QUIRK_NOLUNS) == 0) {
5006				/* Try the next lun */
5007				if (lun_id < (CAM_SCSI2_MAXLUN-1) ||
5008				    (device->quirk->quirks & CAM_QUIRK_HILUNS))
5009					lun_id++;
5010			}
5011		}
5012
5013		xpt_free_path(request_ccb->ccb_h.path);
5014
5015		/* Check Bounds */
5016		if ((lun_id == request_ccb->ccb_h.target_lun)
5017		 || lun_id > scan_info->cpi->max_lun) {
5018			/* We're done */
5019
5020			xpt_free_ccb(request_ccb);
5021			scan_info->pending_count--;
5022			if (scan_info->pending_count == 0) {
5023				xpt_free_ccb((union ccb *)scan_info->cpi);
5024				request_ccb = scan_info->request_ccb;
5025				free(scan_info, M_TEMP);
5026				request_ccb->ccb_h.status = CAM_REQ_CMP;
5027				xpt_done(request_ccb);
5028			}
5029		} else {
5030			/* Try the next device */
5031			struct cam_path *path;
5032			cam_status status;
5033
5034			path = request_ccb->ccb_h.path;
5035			status = xpt_create_path(&path, xpt_periph,
5036						 path_id, target_id, lun_id);
5037			if (status != CAM_REQ_CMP) {
5038				printf("xpt_scan_bus: xpt_create_path failed "
5039				       "with status %#x, halting LUN scan\n",
5040			 	       status);
5041				xpt_free_ccb(request_ccb);
5042				scan_info->pending_count--;
5043				if (scan_info->pending_count == 0) {
5044					xpt_free_ccb(
5045						(union ccb *)scan_info->cpi);
5046					request_ccb = scan_info->request_ccb;
5047					free(scan_info, M_TEMP);
5048					request_ccb->ccb_h.status = CAM_REQ_CMP;
5049					xpt_done(request_ccb);
5050					break;
5051				}
5052			}
5053			xpt_setup_ccb(&request_ccb->ccb_h, path,
5054				      request_ccb->ccb_h.pinfo.priority);
5055			request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5056			request_ccb->ccb_h.cbfcnp = xpt_scan_bus;
5057			request_ccb->ccb_h.ppriv_ptr0 = scan_info;
5058			request_ccb->crcn.flags =
5059				scan_info->request_ccb->crcn.flags;
5060#if 0
5061			xpt_print_path(path);
5062			printf("xpt_scan bus probing\n");
5063#endif
5064			xpt_action(request_ccb);
5065		}
5066		break;
5067	}
5068	default:
5069		break;
5070	}
5071}
5072
5073typedef enum {
5074	PROBE_TUR,
5075	PROBE_INQUIRY,
5076	PROBE_FULL_INQUIRY,
5077	PROBE_MODE_SENSE,
5078	PROBE_SERIAL_NUM,
5079	PROBE_TUR_FOR_NEGOTIATION
5080} probe_action;
5081
5082typedef enum {
5083	PROBE_INQUIRY_CKSUM	= 0x01,
5084	PROBE_SERIAL_CKSUM	= 0x02,
5085	PROBE_NO_ANNOUNCE	= 0x04
5086} probe_flags;
5087
5088typedef struct {
5089	TAILQ_HEAD(, ccb_hdr) request_ccbs;
5090	probe_action	action;
5091	union ccb	saved_ccb;
5092	probe_flags	flags;
5093	MD5_CTX		context;
5094	u_int8_t	digest[16];
5095} probe_softc;
5096
5097static void
5098xpt_scan_lun(struct cam_periph *periph, struct cam_path *path,
5099	     cam_flags flags, union ccb *request_ccb)
5100{
5101	struct ccb_pathinq cpi;
5102	cam_status status;
5103	struct cam_path *new_path;
5104	struct cam_periph *old_periph;
5105	int s;
5106
5107	CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
5108		  ("xpt_scan_lun\n"));
5109
5110	xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
5111	cpi.ccb_h.func_code = XPT_PATH_INQ;
5112	xpt_action((union ccb *)&cpi);
5113
5114	if (cpi.ccb_h.status != CAM_REQ_CMP) {
5115		if (request_ccb != NULL) {
5116			request_ccb->ccb_h.status = cpi.ccb_h.status;
5117			xpt_done(request_ccb);
5118		}
5119		return;
5120	}
5121
5122	if ((cpi.hba_misc & PIM_NOINITIATOR) != 0) {
5123		/*
5124		 * Can't scan the bus on an adapter that
5125		 * cannot perform the initiator role.
5126		 */
5127		if (request_ccb != NULL) {
5128			request_ccb->ccb_h.status = CAM_REQ_CMP;
5129			xpt_done(request_ccb);
5130		}
5131		return;
5132	}
5133
5134	if (request_ccb == NULL) {
5135		request_ccb = malloc(sizeof(union ccb), M_TEMP, M_NOWAIT);
5136		if (request_ccb == NULL) {
5137			xpt_print_path(path);
5138			printf("xpt_scan_lun: can't allocate CCB, can't "
5139			       "continue\n");
5140			return;
5141		}
5142		new_path = malloc(sizeof(*new_path), M_TEMP, M_NOWAIT);
5143		if (new_path == NULL) {
5144			xpt_print_path(path);
5145			printf("xpt_scan_lun: can't allocate path, can't "
5146			       "continue\n");
5147			free(request_ccb, M_TEMP);
5148			return;
5149		}
5150		status = xpt_compile_path(new_path, xpt_periph,
5151					  path->bus->path_id,
5152					  path->target->target_id,
5153					  path->device->lun_id);
5154
5155		if (status != CAM_REQ_CMP) {
5156			xpt_print_path(path);
5157			printf("xpt_scan_lun: can't compile path, can't "
5158			       "continue\n");
5159			free(request_ccb, M_TEMP);
5160			free(new_path, M_TEMP);
5161			return;
5162		}
5163		xpt_setup_ccb(&request_ccb->ccb_h, new_path, /*priority*/ 1);
5164		request_ccb->ccb_h.cbfcnp = xptscandone;
5165		request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5166		request_ccb->crcn.flags = flags;
5167	}
5168
5169	s = splsoftcam();
5170	if ((old_periph = cam_periph_find(path, "probe")) != NULL) {
5171		probe_softc *softc;
5172
5173		softc = (probe_softc *)old_periph->softc;
5174		TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
5175				  periph_links.tqe);
5176	} else {
5177		status = cam_periph_alloc(proberegister, NULL, probecleanup,
5178					  probestart, "probe",
5179					  CAM_PERIPH_BIO,
5180					  request_ccb->ccb_h.path, NULL, 0,
5181					  request_ccb);
5182
5183		if (status != CAM_REQ_CMP) {
5184			xpt_print_path(path);
5185			printf("xpt_scan_lun: cam_alloc_periph returned an "
5186			       "error, can't continue probe\n");
5187			request_ccb->ccb_h.status = status;
5188			xpt_done(request_ccb);
5189		}
5190	}
5191	splx(s);
5192}
5193
5194static void
5195xptscandone(struct cam_periph *periph, union ccb *done_ccb)
5196{
5197	xpt_release_path(done_ccb->ccb_h.path);
5198	free(done_ccb->ccb_h.path, M_TEMP);
5199	free(done_ccb, M_TEMP);
5200}
5201
5202static cam_status
5203proberegister(struct cam_periph *periph, void *arg)
5204{
5205	union ccb *request_ccb;	/* CCB representing the probe request */
5206	probe_softc *softc;
5207
5208	request_ccb = (union ccb *)arg;
5209	if (periph == NULL) {
5210		printf("proberegister: periph was NULL!!\n");
5211		return(CAM_REQ_CMP_ERR);
5212	}
5213
5214	if (request_ccb == NULL) {
5215		printf("proberegister: no probe CCB, "
5216		       "can't register device\n");
5217		return(CAM_REQ_CMP_ERR);
5218	}
5219
5220	softc = (probe_softc *)malloc(sizeof(*softc), M_TEMP, M_NOWAIT);
5221
5222	if (softc == NULL) {
5223		printf("proberegister: Unable to probe new device. "
5224		       "Unable to allocate softc\n");
5225		return(CAM_REQ_CMP_ERR);
5226	}
5227	TAILQ_INIT(&softc->request_ccbs);
5228	TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
5229			  periph_links.tqe);
5230	softc->flags = 0;
5231	periph->softc = softc;
5232	cam_periph_acquire(periph);
5233	/*
5234	 * Ensure we've waited at least a bus settle
5235	 * delay before attempting to probe the device.
5236	 * For HBAs that don't do bus resets, this won't make a difference.
5237	 */
5238	cam_periph_freeze_after_event(periph, &periph->path->bus->last_reset,
5239				      SCSI_DELAY);
5240	probeschedule(periph);
5241	return(CAM_REQ_CMP);
5242}
5243
5244static void
5245probeschedule(struct cam_periph *periph)
5246{
5247	struct ccb_pathinq cpi;
5248	union ccb *ccb;
5249	probe_softc *softc;
5250
5251	softc = (probe_softc *)periph->softc;
5252	ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
5253
5254	xpt_setup_ccb(&cpi.ccb_h, periph->path, /*priority*/1);
5255	cpi.ccb_h.func_code = XPT_PATH_INQ;
5256	xpt_action((union ccb *)&cpi);
5257
5258	/*
5259	 * If a device has gone away and another device, or the same one,
5260	 * is back in the same place, it should have a unit attention
5261	 * condition pending.  It will not report the unit attention in
5262	 * response to an inquiry, which may leave invalid transfer
5263	 * negotiations in effect.  The TUR will reveal the unit attention
5264	 * condition.  Only send the TUR for lun 0, since some devices
5265	 * will get confused by commands other than inquiry to non-existent
5266	 * luns.  If you think a device has gone away start your scan from
5267	 * lun 0.  This will insure that any bogus transfer settings are
5268	 * invalidated.
5269	 *
5270	 * If we haven't seen the device before and the controller supports
5271	 * some kind of transfer negotiation, negotiate with the first
5272	 * sent command if no bus reset was performed at startup.  This
5273	 * ensures that the device is not confused by transfer negotiation
5274	 * settings left over by loader or BIOS action.
5275	 */
5276	if (((ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
5277	 && (ccb->ccb_h.target_lun == 0)) {
5278		softc->action = PROBE_TUR;
5279	} else if ((cpi.hba_inquiry & (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) != 0
5280	      && (cpi.hba_misc & PIM_NOBUSRESET) != 0) {
5281		proberequestdefaultnegotiation(periph);
5282		softc->action = PROBE_INQUIRY;
5283	} else {
5284		softc->action = PROBE_INQUIRY;
5285	}
5286
5287	if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE)
5288		softc->flags |= PROBE_NO_ANNOUNCE;
5289	else
5290		softc->flags &= ~PROBE_NO_ANNOUNCE;
5291
5292	xpt_schedule(periph, ccb->ccb_h.pinfo.priority);
5293}
5294
5295static void
5296probestart(struct cam_periph *periph, union ccb *start_ccb)
5297{
5298	/* Probe the device that our peripheral driver points to */
5299	struct ccb_scsiio *csio;
5300	probe_softc *softc;
5301
5302	CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n"));
5303
5304	softc = (probe_softc *)periph->softc;
5305	csio = &start_ccb->csio;
5306
5307	switch (softc->action) {
5308	case PROBE_TUR:
5309	case PROBE_TUR_FOR_NEGOTIATION:
5310	{
5311		scsi_test_unit_ready(csio,
5312				     /*retries*/4,
5313				     probedone,
5314				     MSG_SIMPLE_Q_TAG,
5315				     SSD_FULL_SIZE,
5316				     /*timeout*/60000);
5317		break;
5318	}
5319	case PROBE_INQUIRY:
5320	case PROBE_FULL_INQUIRY:
5321	{
5322		u_int inquiry_len;
5323		struct scsi_inquiry_data *inq_buf;
5324
5325		inq_buf = &periph->path->device->inq_data;
5326		/*
5327		 * If the device is currently configured, we calculate an
5328		 * MD5 checksum of the inquiry data, and if the serial number
5329		 * length is greater than 0, add the serial number data
5330		 * into the checksum as well.  Once the inquiry and the
5331		 * serial number check finish, we attempt to figure out
5332		 * whether we still have the same device.
5333		 */
5334		if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) {
5335
5336			MD5Init(&softc->context);
5337			MD5Update(&softc->context, (unsigned char *)inq_buf,
5338				  sizeof(struct scsi_inquiry_data));
5339			softc->flags |= PROBE_INQUIRY_CKSUM;
5340			if (periph->path->device->serial_num_len > 0) {
5341				MD5Update(&softc->context,
5342					  periph->path->device->serial_num,
5343					  periph->path->device->serial_num_len);
5344				softc->flags |= PROBE_SERIAL_CKSUM;
5345			}
5346			MD5Final(softc->digest, &softc->context);
5347		}
5348
5349		if (softc->action == PROBE_INQUIRY)
5350			inquiry_len = SHORT_INQUIRY_LENGTH;
5351		else
5352			inquiry_len = inq_buf->additional_length + 4;
5353
5354		scsi_inquiry(csio,
5355			     /*retries*/4,
5356			     probedone,
5357			     MSG_SIMPLE_Q_TAG,
5358			     (u_int8_t *)inq_buf,
5359			     inquiry_len,
5360			     /*evpd*/FALSE,
5361			     /*page_code*/0,
5362			     SSD_MIN_SIZE,
5363			     /*timeout*/60 * 1000);
5364		break;
5365	}
5366	case PROBE_MODE_SENSE:
5367	{
5368		void  *mode_buf;
5369		int    mode_buf_len;
5370
5371		mode_buf_len = sizeof(struct scsi_mode_header_6)
5372			     + sizeof(struct scsi_mode_blk_desc)
5373			     + sizeof(struct scsi_control_page);
5374		mode_buf = malloc(mode_buf_len, M_TEMP, M_NOWAIT);
5375		if (mode_buf != NULL) {
5376	                scsi_mode_sense(csio,
5377					/*retries*/4,
5378					probedone,
5379					MSG_SIMPLE_Q_TAG,
5380					/*dbd*/FALSE,
5381					SMS_PAGE_CTRL_CURRENT,
5382					SMS_CONTROL_MODE_PAGE,
5383					mode_buf,
5384					mode_buf_len,
5385					SSD_FULL_SIZE,
5386					/*timeout*/60000);
5387			break;
5388		}
5389		xpt_print_path(periph->path);
5390		printf("Unable to mode sense control page - malloc failure\n");
5391		softc->action = PROBE_SERIAL_NUM;
5392		/* FALLTHROUGH */
5393	}
5394	case PROBE_SERIAL_NUM:
5395	{
5396		struct scsi_vpd_unit_serial_number *serial_buf;
5397		struct cam_ed* device;
5398
5399		serial_buf = NULL;
5400		device = periph->path->device;
5401		device->serial_num = NULL;
5402		device->serial_num_len = 0;
5403
5404		if ((device->quirk->quirks & CAM_QUIRK_NOSERIAL) == 0)
5405			serial_buf = (struct scsi_vpd_unit_serial_number *)
5406				malloc(sizeof(*serial_buf), M_TEMP, M_NOWAIT);
5407
5408		if (serial_buf != NULL) {
5409			bzero(serial_buf, sizeof(*serial_buf));
5410			scsi_inquiry(csio,
5411				     /*retries*/4,
5412				     probedone,
5413				     MSG_SIMPLE_Q_TAG,
5414				     (u_int8_t *)serial_buf,
5415				     sizeof(*serial_buf),
5416				     /*evpd*/TRUE,
5417				     SVPD_UNIT_SERIAL_NUMBER,
5418				     SSD_MIN_SIZE,
5419				     /*timeout*/60 * 1000);
5420			break;
5421		}
5422		/*
5423		 * We'll have to do without, let our probedone
5424		 * routine finish up for us.
5425		 */
5426		start_ccb->csio.data_ptr = NULL;
5427		probedone(periph, start_ccb);
5428		return;
5429	}
5430	}
5431	xpt_action(start_ccb);
5432}
5433
5434static void
5435proberequestdefaultnegotiation(struct cam_periph *periph)
5436{
5437	struct ccb_trans_settings cts;
5438
5439	xpt_setup_ccb(&cts.ccb_h, periph->path, /*priority*/1);
5440	cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
5441	cts.flags = CCB_TRANS_USER_SETTINGS;
5442	xpt_action((union ccb *)&cts);
5443	cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
5444	cts.flags &= ~CCB_TRANS_USER_SETTINGS;
5445	cts.flags |= CCB_TRANS_CURRENT_SETTINGS;
5446	xpt_action((union ccb *)&cts);
5447}
5448
5449static void
5450probedone(struct cam_periph *periph, union ccb *done_ccb)
5451{
5452	probe_softc *softc;
5453	struct cam_path *path;
5454	u_int32_t  priority;
5455
5456	CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n"));
5457
5458	softc = (probe_softc *)periph->softc;
5459	path = done_ccb->ccb_h.path;
5460	priority = done_ccb->ccb_h.pinfo.priority;
5461
5462	switch (softc->action) {
5463	case PROBE_TUR:
5464	{
5465		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
5466
5467			if (cam_periph_error(done_ccb, 0,
5468					     SF_NO_PRINT, NULL) == ERESTART)
5469				return;
5470			else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
5471				/* Don't wedge the queue */
5472				xpt_release_devq(done_ccb->ccb_h.path,
5473						 /*count*/1,
5474						 /*run_queue*/TRUE);
5475		}
5476		softc->action = PROBE_INQUIRY;
5477		xpt_release_ccb(done_ccb);
5478		xpt_schedule(periph, priority);
5479		return;
5480	}
5481	case PROBE_INQUIRY:
5482	case PROBE_FULL_INQUIRY:
5483	{
5484		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5485			struct scsi_inquiry_data *inq_buf;
5486			u_int8_t periph_qual;
5487			u_int8_t periph_dtype;
5488
5489			path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID;
5490			inq_buf = &path->device->inq_data;
5491
5492			periph_qual = SID_QUAL(inq_buf);
5493			periph_dtype = SID_TYPE(inq_buf);
5494
5495			if (periph_dtype != T_NODEVICE) {
5496				switch(periph_qual) {
5497				case SID_QUAL_LU_CONNECTED:
5498				{
5499					u_int8_t alen;
5500
5501					/*
5502					 * We conservatively request only
5503					 * SHORT_INQUIRY_LEN bytes of inquiry
5504					 * information during our first try
5505					 * at sending an INQUIRY. If the device
5506					 * has more information to give,
5507					 * perform a second request specifying
5508					 * the amount of information the device
5509					 * is willing to give.
5510					 */
5511					alen = inq_buf->additional_length;
5512					if (softc->action == PROBE_INQUIRY
5513					 && alen > (SHORT_INQUIRY_LENGTH - 4)) {
5514						softc->action =
5515						    PROBE_FULL_INQUIRY;
5516						xpt_release_ccb(done_ccb);
5517						xpt_schedule(periph, priority);
5518						return;
5519					}
5520
5521					xpt_find_quirk(path->device);
5522
5523					if ((inq_buf->flags & SID_CmdQue) != 0)
5524						softc->action =
5525						    PROBE_MODE_SENSE;
5526					else
5527						softc->action =
5528						    PROBE_SERIAL_NUM;
5529
5530					path->device->flags &=
5531						~CAM_DEV_UNCONFIGURED;
5532
5533					xpt_release_ccb(done_ccb);
5534					xpt_schedule(periph, priority);
5535					return;
5536				}
5537				default:
5538					break;
5539				}
5540			}
5541		} else if (cam_periph_error(done_ccb, 0,
5542					    done_ccb->ccb_h.target_lun > 0
5543					    ? SF_RETRY_UA|SF_QUIET_IR
5544					    : SF_RETRY_UA,
5545					    &softc->saved_ccb) == ERESTART) {
5546			return;
5547		} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5548			/* Don't wedge the queue */
5549			xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
5550					 /*run_queue*/TRUE);
5551		}
5552		/*
5553		 * If we get to this point, we got an error status back
5554		 * from the inquiry and the error status doesn't require
5555		 * automatically retrying the command.  Therefore, the
5556		 * inquiry failed.  If we had inquiry information before
5557		 * for this device, but this latest inquiry command failed,
5558		 * the device has probably gone away.  If this device isn't
5559		 * already marked unconfigured, notify the peripheral
5560		 * drivers that this device is no more.
5561		 */
5562		if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
5563			/* Send the async notification. */
5564			xpt_async(AC_LOST_DEVICE, path, NULL);
5565
5566		xpt_release_ccb(done_ccb);
5567		break;
5568	}
5569	case PROBE_MODE_SENSE:
5570	{
5571		struct ccb_scsiio *csio;
5572		struct scsi_mode_header_6 *mode_hdr;
5573
5574		csio = &done_ccb->csio;
5575		mode_hdr = (struct scsi_mode_header_6 *)csio->data_ptr;
5576		if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5577			struct scsi_control_page *page;
5578			u_int8_t *offset;
5579
5580			offset = ((u_int8_t *)&mode_hdr[1])
5581			    + mode_hdr->blk_desc_len;
5582			page = (struct scsi_control_page *)offset;
5583			path->device->queue_flags = page->queue_flags;
5584		} else if (cam_periph_error(done_ccb, 0,
5585					    SF_RETRY_UA|SF_NO_PRINT,
5586					    &softc->saved_ccb) == ERESTART) {
5587			return;
5588		} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5589			/* Don't wedge the queue */
5590			xpt_release_devq(done_ccb->ccb_h.path,
5591					 /*count*/1, /*run_queue*/TRUE);
5592		}
5593		xpt_release_ccb(done_ccb);
5594		free(mode_hdr, M_TEMP);
5595		softc->action = PROBE_SERIAL_NUM;
5596		xpt_schedule(periph, priority);
5597		return;
5598	}
5599	case PROBE_SERIAL_NUM:
5600	{
5601		struct ccb_scsiio *csio;
5602		struct scsi_vpd_unit_serial_number *serial_buf;
5603		u_int32_t  priority;
5604		int changed;
5605		int have_serialnum;
5606
5607		changed = 1;
5608		have_serialnum = 0;
5609		csio = &done_ccb->csio;
5610		priority = done_ccb->ccb_h.pinfo.priority;
5611		serial_buf =
5612		    (struct scsi_vpd_unit_serial_number *)csio->data_ptr;
5613
5614		/* Clean up from previous instance of this device */
5615		if (path->device->serial_num != NULL) {
5616			free(path->device->serial_num, M_DEVBUF);
5617			path->device->serial_num = NULL;
5618			path->device->serial_num_len = 0;
5619		}
5620
5621		if (serial_buf == NULL) {
5622			/*
5623			 * Don't process the command as it was never sent
5624			 */
5625		} else if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP
5626			&& (serial_buf->length > 0)) {
5627
5628			have_serialnum = 1;
5629			path->device->serial_num =
5630				(u_int8_t *)malloc((serial_buf->length + 1),
5631						   M_DEVBUF, M_NOWAIT);
5632			if (path->device->serial_num != NULL) {
5633				bcopy(serial_buf->serial_num,
5634				      path->device->serial_num,
5635				      serial_buf->length);
5636				path->device->serial_num_len =
5637				    serial_buf->length;
5638				path->device->serial_num[serial_buf->length]
5639				    = '\0';
5640			}
5641		} else if (cam_periph_error(done_ccb, 0,
5642					    SF_RETRY_UA|SF_NO_PRINT,
5643					    &softc->saved_ccb) == ERESTART) {
5644			return;
5645		} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5646			/* Don't wedge the queue */
5647			xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
5648					 /*run_queue*/TRUE);
5649		}
5650
5651		/*
5652		 * Let's see if we have seen this device before.
5653		 */
5654		if ((softc->flags & PROBE_INQUIRY_CKSUM) != 0) {
5655			MD5_CTX context;
5656			u_int8_t digest[16];
5657
5658			MD5Init(&context);
5659
5660			MD5Update(&context,
5661				  (unsigned char *)&path->device->inq_data,
5662				  sizeof(struct scsi_inquiry_data));
5663
5664			if (have_serialnum)
5665				MD5Update(&context, serial_buf->serial_num,
5666					  serial_buf->length);
5667
5668			MD5Final(digest, &context);
5669			if (bcmp(softc->digest, digest, 16) == 0)
5670				changed = 0;
5671
5672			/*
5673			 * XXX Do we need to do a TUR in order to ensure
5674			 *     that the device really hasn't changed???
5675			 */
5676			if ((changed != 0)
5677			 && ((softc->flags & PROBE_NO_ANNOUNCE) == 0))
5678				xpt_async(AC_LOST_DEVICE, path, NULL);
5679		}
5680		if (serial_buf != NULL)
5681			free(serial_buf, M_TEMP);
5682
5683		if (changed != 0) {
5684			/*
5685			 * Now that we have all the necessary
5686			 * information to safely perform transfer
5687			 * negotiations... Controllers don't perform
5688			 * any negotiation or tagged queuing until
5689			 * after the first XPT_SET_TRAN_SETTINGS ccb is
5690			 * received.  So, on a new device, just retreive
5691			 * the user settings, and set them as the current
5692			 * settings to set the device up.
5693			 */
5694			proberequestdefaultnegotiation(periph);
5695			xpt_release_ccb(done_ccb);
5696
5697			/*
5698			 * Perform a TUR to allow the controller to
5699			 * perform any necessary transfer negotiation.
5700			 */
5701			softc->action = PROBE_TUR_FOR_NEGOTIATION;
5702			xpt_schedule(periph, priority);
5703			return;
5704		}
5705		xpt_release_ccb(done_ccb);
5706		break;
5707	}
5708	case PROBE_TUR_FOR_NEGOTIATION:
5709		if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5710			/* Don't wedge the queue */
5711			xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
5712					 /*run_queue*/TRUE);
5713		}
5714
5715		path->device->flags &= ~CAM_DEV_UNCONFIGURED;
5716
5717		if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) {
5718			/* Inform the XPT that a new device has been found */
5719			done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
5720			xpt_action(done_ccb);
5721
5722			xpt_async(AC_FOUND_DEVICE, xpt_periph->path, done_ccb);
5723		}
5724		xpt_release_ccb(done_ccb);
5725		break;
5726	}
5727	done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
5728	TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe);
5729	done_ccb->ccb_h.status = CAM_REQ_CMP;
5730	xpt_done(done_ccb);
5731	if (TAILQ_FIRST(&softc->request_ccbs) == NULL) {
5732		cam_periph_invalidate(periph);
5733		cam_periph_release(periph);
5734	} else {
5735		probeschedule(periph);
5736	}
5737}
5738
5739static void
5740probecleanup(struct cam_periph *periph)
5741{
5742	free(periph->softc, M_TEMP);
5743}
5744
5745static void
5746xpt_find_quirk(struct cam_ed *device)
5747{
5748	caddr_t	match;
5749
5750	match = cam_quirkmatch((caddr_t)&device->inq_data,
5751			       (caddr_t)xpt_quirk_table,
5752			       sizeof(xpt_quirk_table)/sizeof(*xpt_quirk_table),
5753			       sizeof(*xpt_quirk_table), scsi_inquiry_match);
5754
5755	if (match == NULL)
5756		panic("xpt_find_quirk: device didn't match wildcard entry!!");
5757
5758	device->quirk = (struct xpt_quirk_entry *)match;
5759}
5760
5761static void
5762xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
5763			  int async_update)
5764{
5765	struct	cam_sim *sim;
5766	int	qfrozen;
5767
5768	sim = cts->ccb_h.path->bus->sim;
5769	if (async_update == FALSE) {
5770		struct	scsi_inquiry_data *inq_data;
5771		struct	ccb_pathinq cpi;
5772		struct	ccb_trans_settings cur_cts;
5773
5774		if (device == NULL) {
5775			cts->ccb_h.status = CAM_PATH_INVALID;
5776			xpt_done((union ccb *)cts);
5777			return;
5778		}
5779
5780		/*
5781		 * Perform sanity checking against what the
5782		 * controller and device can do.
5783		 */
5784		xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1);
5785		cpi.ccb_h.func_code = XPT_PATH_INQ;
5786		xpt_action((union ccb *)&cpi);
5787		xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, /*priority*/1);
5788		cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
5789		cur_cts.flags = CCB_TRANS_CURRENT_SETTINGS;
5790		xpt_action((union ccb *)&cur_cts);
5791		inq_data = &device->inq_data;
5792
5793		/* Fill in any gaps in what the user gave us */
5794		if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0)
5795			cts->sync_period = cur_cts.sync_period;
5796		if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0)
5797			cts->sync_offset = cur_cts.sync_offset;
5798		if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) == 0)
5799			cts->bus_width = cur_cts.bus_width;
5800		if ((cts->valid & CCB_TRANS_DISC_VALID) == 0) {
5801			cts->flags &= ~CCB_TRANS_DISC_ENB;
5802			cts->flags |= cur_cts.flags & CCB_TRANS_DISC_ENB;
5803		}
5804		if ((cts->valid & CCB_TRANS_TQ_VALID) == 0) {
5805			cts->flags &= ~CCB_TRANS_TAG_ENB;
5806			cts->flags |= cur_cts.flags & CCB_TRANS_TAG_ENB;
5807		}
5808		if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
5809		  && (inq_data->flags & SID_Sync) == 0)
5810		 || (cpi.hba_inquiry & PI_SDTR_ABLE) == 0) {
5811			/* Force async */
5812			cts->sync_period = 0;
5813			cts->sync_offset = 0;
5814		}
5815
5816		/*
5817		 * Don't allow DT transmission rates if the
5818		 * device does not support it.
5819		 */
5820		if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
5821		 && (inq_data->spi3data & SID_SPI_CLOCK_DT) == 0
5822		 && cts->sync_period <= 0x9)
5823			cts->sync_period = 0xa;
5824
5825		switch (cts->bus_width) {
5826		case MSG_EXT_WDTR_BUS_32_BIT:
5827			if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
5828			  || (inq_data->flags & SID_WBus32) != 0)
5829			 && (cpi.hba_inquiry & PI_WIDE_32) != 0)
5830				break;
5831			/* Fall Through to 16-bit */
5832		case MSG_EXT_WDTR_BUS_16_BIT:
5833			if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
5834			  || (inq_data->flags & SID_WBus16) != 0)
5835			 && (cpi.hba_inquiry & PI_WIDE_16) != 0) {
5836				cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
5837				break;
5838			}
5839			/* Fall Through to 8-bit */
5840		default: /* New bus width?? */
5841		case MSG_EXT_WDTR_BUS_8_BIT:
5842			/* All targets can do this */
5843			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
5844			break;
5845		}
5846
5847		if ((cts->flags & CCB_TRANS_DISC_ENB) == 0) {
5848			/*
5849			 * Can't tag queue without disconnection.
5850			 */
5851			cts->flags &= ~CCB_TRANS_TAG_ENB;
5852			cts->valid |= CCB_TRANS_TQ_VALID;
5853		}
5854
5855		if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0
5856		 || (inq_data->flags & SID_CmdQue) == 0
5857		 || (device->queue_flags & SCP_QUEUE_DQUE) != 0
5858		 || (device->quirk->mintags == 0)) {
5859			/*
5860			 * Can't tag on hardware that doesn't support,
5861			 * doesn't have it enabled, or has broken tag support.
5862			 */
5863			cts->flags &= ~CCB_TRANS_TAG_ENB;
5864		}
5865	}
5866
5867	qfrozen = FALSE;
5868	if ((cts->valid & CCB_TRANS_TQ_VALID) != 0
5869	 && (async_update == FALSE)) {
5870		int device_tagenb;
5871
5872		/*
5873		 * If we are transitioning from tags to no-tags or
5874		 * vice-versa, we need to carefully freeze and restart
5875		 * the queue so that we don't overlap tagged and non-tagged
5876		 * commands.  We also temporarily stop tags if there is
5877		 * a change in transfer negotiation settings to allow
5878		 * "tag-less" negotiation.
5879		 */
5880		if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
5881		 || (device->inq_flags & SID_CmdQue) != 0)
5882			device_tagenb = TRUE;
5883		else
5884			device_tagenb = FALSE;
5885
5886		if (((cts->flags & CCB_TRANS_TAG_ENB) != 0
5887		  && device_tagenb == FALSE)
5888		 || ((cts->flags & CCB_TRANS_TAG_ENB) == 0
5889		  && device_tagenb == TRUE)) {
5890
5891			if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
5892				/*
5893				 * Delay change to use tags until after a
5894				 * few commands have gone to this device so
5895				 * the controller has time to perform transfer
5896				 * negotiations without tagged messages getting
5897				 * in the way.
5898				 */
5899				device->tag_delay_count = CAM_TAG_DELAY_COUNT;
5900				device->flags |= CAM_DEV_TAG_AFTER_COUNT;
5901			} else {
5902				xpt_freeze_devq(cts->ccb_h.path, /*count*/1);
5903				qfrozen = TRUE;
5904		  		device->inq_flags &= ~SID_CmdQue;
5905				xpt_dev_ccbq_resize(cts->ccb_h.path,
5906						    sim->max_dev_openings);
5907				device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
5908				device->tag_delay_count = 0;
5909			}
5910		}
5911	}
5912
5913	if (async_update == FALSE) {
5914		/*
5915		 * If we are currently performing tagged transactions to
5916		 * this device and want to change its negotiation parameters,
5917		 * go non-tagged for a bit to give the controller a chance to
5918		 * negotiate unhampered by tag messages.
5919		 */
5920		if ((device->inq_flags & SID_CmdQue) != 0
5921		 && (cts->flags & (CCB_TRANS_SYNC_RATE_VALID|
5922				   CCB_TRANS_SYNC_OFFSET_VALID|
5923				   CCB_TRANS_BUS_WIDTH_VALID)) != 0)
5924			xpt_toggle_tags(cts->ccb_h.path);
5925
5926		(*(sim->sim_action))(sim, (union ccb *)cts);
5927	}
5928
5929	if (qfrozen) {
5930		struct ccb_relsim crs;
5931
5932		xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path,
5933			      /*priority*/1);
5934		crs.ccb_h.func_code = XPT_REL_SIMQ;
5935		crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
5936		crs.openings
5937		    = crs.release_timeout
5938		    = crs.qfrozen_cnt
5939		    = 0;
5940		xpt_action((union ccb *)&crs);
5941	}
5942}
5943
5944static void
5945xpt_toggle_tags(struct cam_path *path)
5946{
5947	struct cam_ed *dev;
5948
5949	/*
5950	 * Give controllers a chance to renegotiate
5951	 * before starting tag operations.  We
5952	 * "toggle" tagged queuing off then on
5953	 * which causes the tag enable command delay
5954	 * counter to come into effect.
5955	 */
5956	dev = path->device;
5957	if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
5958	 || ((dev->inq_flags & SID_CmdQue) != 0
5959 	  && (dev->inq_flags & (SID_Sync|SID_WBus16|SID_WBus32)) != 0)) {
5960		struct ccb_trans_settings cts;
5961
5962		xpt_setup_ccb(&cts.ccb_h, path, 1);
5963		cts.flags = 0;
5964		cts.valid = CCB_TRANS_TQ_VALID;
5965		xpt_set_transfer_settings(&cts, path->device,
5966					  /*async_update*/TRUE);
5967		cts.flags = CCB_TRANS_TAG_ENB;
5968		xpt_set_transfer_settings(&cts, path->device,
5969					  /*async_update*/TRUE);
5970	}
5971}
5972
5973static void
5974xpt_start_tags(struct cam_path *path)
5975{
5976	struct ccb_relsim crs;
5977	struct cam_ed *device;
5978	struct cam_sim *sim;
5979	int    newopenings;
5980
5981	device = path->device;
5982	sim = path->bus->sim;
5983	device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
5984	xpt_freeze_devq(path, /*count*/1);
5985	device->inq_flags |= SID_CmdQue;
5986	newopenings = min(device->quirk->maxtags, sim->max_tagged_dev_openings);
5987	xpt_dev_ccbq_resize(path, newopenings);
5988	xpt_setup_ccb(&crs.ccb_h, path, /*priority*/1);
5989	crs.ccb_h.func_code = XPT_REL_SIMQ;
5990	crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
5991	crs.openings
5992	    = crs.release_timeout
5993	    = crs.qfrozen_cnt
5994	    = 0;
5995	xpt_action((union ccb *)&crs);
5996}
5997
5998static int busses_to_config;
5999static int busses_to_reset;
6000
6001static int
6002xptconfigbuscountfunc(struct cam_eb *bus, void *arg)
6003{
6004	if (bus->path_id != CAM_XPT_PATH_ID) {
6005		struct cam_path path;
6006		struct ccb_pathinq cpi;
6007		int can_negotiate;
6008
6009		busses_to_config++;
6010		xpt_compile_path(&path, NULL, bus->path_id,
6011				 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
6012		xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
6013		cpi.ccb_h.func_code = XPT_PATH_INQ;
6014		xpt_action((union ccb *)&cpi);
6015		can_negotiate = cpi.hba_inquiry;
6016		can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
6017		if ((cpi.hba_misc & PIM_NOBUSRESET) == 0
6018		 && can_negotiate)
6019			busses_to_reset++;
6020		xpt_release_path(&path);
6021	}
6022
6023	return(1);
6024}
6025
6026static int
6027xptconfigfunc(struct cam_eb *bus, void *arg)
6028{
6029	struct	cam_path *path;
6030	union	ccb *work_ccb;
6031
6032	if (bus->path_id != CAM_XPT_PATH_ID) {
6033		cam_status status;
6034		int can_negotiate;
6035
6036		work_ccb = xpt_alloc_ccb();
6037		if ((status = xpt_create_path(&path, xpt_periph, bus->path_id,
6038					      CAM_TARGET_WILDCARD,
6039					      CAM_LUN_WILDCARD)) !=CAM_REQ_CMP){
6040			printf("xptconfigfunc: xpt_create_path failed with "
6041			       "status %#x for bus %d\n", status, bus->path_id);
6042			printf("xptconfigfunc: halting bus configuration\n");
6043			xpt_free_ccb(work_ccb);
6044			busses_to_config--;
6045			xpt_finishconfig(xpt_periph, NULL);
6046			return(0);
6047		}
6048		xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
6049		work_ccb->ccb_h.func_code = XPT_PATH_INQ;
6050		xpt_action(work_ccb);
6051		if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
6052			printf("xptconfigfunc: CPI failed on bus %d "
6053			       "with status %d\n", bus->path_id,
6054			       work_ccb->ccb_h.status);
6055			xpt_finishconfig(xpt_periph, work_ccb);
6056			return(1);
6057		}
6058
6059		can_negotiate = work_ccb->cpi.hba_inquiry;
6060		can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
6061		if ((work_ccb->cpi.hba_misc & PIM_NOBUSRESET) == 0
6062		 && (can_negotiate != 0)) {
6063			xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
6064			work_ccb->ccb_h.func_code = XPT_RESET_BUS;
6065			work_ccb->ccb_h.cbfcnp = NULL;
6066			CAM_DEBUG(path, CAM_DEBUG_SUBTRACE,
6067				  ("Resetting Bus\n"));
6068			xpt_action(work_ccb);
6069			xpt_finishconfig(xpt_periph, work_ccb);
6070		} else {
6071			/* Act as though we performed a successful BUS RESET */
6072			work_ccb->ccb_h.func_code = XPT_RESET_BUS;
6073			xpt_finishconfig(xpt_periph, work_ccb);
6074		}
6075	}
6076
6077	return(1);
6078}
6079
6080static void
6081xpt_config(void *arg)
6082{
6083	/* Now that interrupts are enabled, go find our devices */
6084
6085#ifdef CAMDEBUG
6086	/* Setup debugging flags and path */
6087#ifdef CAM_DEBUG_FLAGS
6088	cam_dflags = CAM_DEBUG_FLAGS;
6089#else /* !CAM_DEBUG_FLAGS */
6090	cam_dflags = CAM_DEBUG_NONE;
6091#endif /* CAM_DEBUG_FLAGS */
6092#ifdef CAM_DEBUG_BUS
6093	if (cam_dflags != CAM_DEBUG_NONE) {
6094		if (xpt_create_path(&cam_dpath, xpt_periph,
6095				    CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
6096				    CAM_DEBUG_LUN) != CAM_REQ_CMP) {
6097			printf("xpt_config: xpt_create_path() failed for debug"
6098			       " target %d:%d:%d, debugging disabled\n",
6099			       CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
6100			cam_dflags = CAM_DEBUG_NONE;
6101		}
6102	} else
6103		cam_dpath = NULL;
6104#else /* !CAM_DEBUG_BUS */
6105	cam_dpath = NULL;
6106#endif /* CAM_DEBUG_BUS */
6107#endif /* CAMDEBUG */
6108
6109	/*
6110	 * Scan all installed busses.
6111	 */
6112	xpt_for_all_busses(xptconfigbuscountfunc, NULL);
6113
6114	if (busses_to_config == 0) {
6115		/* Call manually because we don't have any busses */
6116		xpt_finishconfig(xpt_periph, NULL);
6117	} else  {
6118		if (busses_to_reset > 0 && SCSI_DELAY >= 2000) {
6119			printf("Waiting %d seconds for SCSI "
6120			       "devices to settle\n", SCSI_DELAY/1000);
6121		}
6122		xpt_for_all_busses(xptconfigfunc, NULL);
6123	}
6124}
6125
6126/*
6127 * If the given device only has one peripheral attached to it, and if that
6128 * peripheral is the passthrough driver, announce it.  This insures that the
6129 * user sees some sort of announcement for every peripheral in their system.
6130 */
6131static int
6132xptpassannouncefunc(struct cam_ed *device, void *arg)
6133{
6134	struct cam_periph *periph;
6135	int i;
6136
6137	for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
6138	     periph = SLIST_NEXT(periph, periph_links), i++);
6139
6140	periph = SLIST_FIRST(&device->periphs);
6141	if ((i == 1)
6142	 && (strncmp(periph->periph_name, "pass", 4) == 0))
6143		xpt_announce_periph(periph, NULL);
6144
6145	return(1);
6146}
6147
6148static void
6149xpt_finishconfig(struct cam_periph *periph, union ccb *done_ccb)
6150{
6151	struct	periph_driver **p_drv;
6152	int	i;
6153
6154	if (done_ccb != NULL) {
6155		CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE,
6156			  ("xpt_finishconfig\n"));
6157		switch(done_ccb->ccb_h.func_code) {
6158		case XPT_RESET_BUS:
6159			if (done_ccb->ccb_h.status == CAM_REQ_CMP) {
6160				done_ccb->ccb_h.func_code = XPT_SCAN_BUS;
6161				done_ccb->ccb_h.cbfcnp = xpt_finishconfig;
6162				xpt_action(done_ccb);
6163				return;
6164			}
6165			/* FALLTHROUGH */
6166		case XPT_SCAN_BUS:
6167		default:
6168			xpt_free_path(done_ccb->ccb_h.path);
6169			busses_to_config--;
6170			break;
6171		}
6172	}
6173
6174	if (busses_to_config == 0) {
6175		/* Register all the peripheral drivers */
6176		/* XXX This will have to change when we have loadable modules */
6177		p_drv = (struct periph_driver **)periphdriver_set.ls_items;
6178		for (i = 0; p_drv[i] != NULL; i++) {
6179			(*p_drv[i]->init)();
6180		}
6181
6182		/*
6183		 * Check for devices with no "standard" peripheral driver
6184		 * attached.  For any devices like that, announce the
6185		 * passthrough driver so the user will see something.
6186		 */
6187		xpt_for_all_devices(xptpassannouncefunc, NULL);
6188
6189		/* Release our hook so that the boot can continue. */
6190		config_intrhook_disestablish(xpt_config_hook);
6191		free(xpt_config_hook, M_TEMP);
6192		xpt_config_hook = NULL;
6193	}
6194	if (done_ccb != NULL)
6195		xpt_free_ccb(done_ccb);
6196}
6197
6198static void
6199xptaction(struct cam_sim *sim, union ccb *work_ccb)
6200{
6201	CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
6202
6203	switch (work_ccb->ccb_h.func_code) {
6204	/* Common cases first */
6205	case XPT_PATH_INQ:		/* Path routing inquiry */
6206	{
6207		struct ccb_pathinq *cpi;
6208
6209		cpi = &work_ccb->cpi;
6210		cpi->version_num = 1; /* XXX??? */
6211		cpi->hba_inquiry = 0;
6212		cpi->target_sprt = 0;
6213		cpi->hba_misc = 0;
6214		cpi->hba_eng_cnt = 0;
6215		cpi->max_target = 0;
6216		cpi->max_lun = 0;
6217		cpi->initiator_id = 0;
6218		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
6219		strncpy(cpi->hba_vid, "", HBA_IDLEN);
6220		strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
6221		cpi->unit_number = sim->unit_number;
6222		cpi->bus_id = sim->bus_id;
6223		cpi->base_transfer_speed = 0;
6224		cpi->ccb_h.status = CAM_REQ_CMP;
6225		xpt_done(work_ccb);
6226		break;
6227	}
6228	default:
6229		work_ccb->ccb_h.status = CAM_REQ_INVALID;
6230		xpt_done(work_ccb);
6231		break;
6232	}
6233}
6234
6235/*
6236 * The xpt as a "controller" has no interrupt sources, so polling
6237 * is a no-op.
6238 */
6239static void
6240xptpoll(struct cam_sim *sim)
6241{
6242}
6243
6244/*
6245 * Should only be called by the machine interrupt dispatch routines,
6246 * so put these prototypes here instead of in the header.
6247 */
6248
6249static void
6250swi_camnet(void)
6251{
6252	camisr(&cam_netq);
6253}
6254
6255static void
6256swi_cambio(void)
6257{
6258	camisr(&cam_bioq);
6259}
6260
6261static void
6262camisr(cam_isrq_t *queue)
6263{
6264	int	s;
6265	struct	ccb_hdr *ccb_h;
6266
6267	s = splcam();
6268	while ((ccb_h = TAILQ_FIRST(queue)) != NULL) {
6269		int	runq;
6270
6271		TAILQ_REMOVE(queue, ccb_h, sim_links.tqe);
6272		ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
6273		splx(s);
6274
6275		CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE,
6276			  ("camisr"));
6277
6278		runq = FALSE;
6279
6280		if (ccb_h->flags & CAM_HIGH_POWER) {
6281			struct highpowerlist	*hphead;
6282			struct cam_ed		*device;
6283			union ccb		*send_ccb;
6284
6285			hphead = &highpowerq;
6286
6287			send_ccb = (union ccb *)STAILQ_FIRST(hphead);
6288
6289			/*
6290			 * Increment the count since this command is done.
6291			 */
6292			num_highpower++;
6293
6294			/*
6295			 * Any high powered commands queued up?
6296			 */
6297			if (send_ccb != NULL) {
6298				device = send_ccb->ccb_h.path->device;
6299
6300				STAILQ_REMOVE_HEAD(hphead, xpt_links.stqe);
6301
6302				xpt_release_devq(send_ccb->ccb_h.path,
6303						 /*count*/1, /*runqueue*/TRUE);
6304			}
6305		}
6306		if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
6307			struct cam_ed *dev;
6308
6309			dev = ccb_h->path->device;
6310
6311			s = splcam();
6312			cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
6313
6314			ccb_h->path->bus->sim->devq->send_active--;
6315			ccb_h->path->bus->sim->devq->send_openings++;
6316			splx(s);
6317
6318			if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
6319			 || ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
6320			  && (dev->ccbq.dev_active == 0))) {
6321
6322				xpt_release_devq(ccb_h->path, /*count*/1,
6323						 /*run_queue*/TRUE);
6324			}
6325
6326			if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6327			 && (--dev->tag_delay_count == 0))
6328				xpt_start_tags(ccb_h->path);
6329
6330			if ((dev->ccbq.queue.entries > 0)
6331			 && (dev->qfrozen_cnt == 0)
6332			 && (device_is_send_queued(dev) == 0)) {
6333				runq = xpt_schedule_dev_sendq(ccb_h->path->bus,
6334							      dev);
6335			}
6336		}
6337
6338		if (ccb_h->status & CAM_RELEASE_SIMQ) {
6339			xpt_release_simq(ccb_h->path->bus->sim,
6340					 /*run_queue*/TRUE);
6341			ccb_h->status &= ~CAM_RELEASE_SIMQ;
6342			runq = FALSE;
6343		}
6344
6345		if ((ccb_h->flags & CAM_DEV_QFRZDIS)
6346		 && (ccb_h->status & CAM_DEV_QFRZN)) {
6347			xpt_release_devq(ccb_h->path, /*count*/1,
6348					 /*run_queue*/TRUE);
6349			ccb_h->status &= ~CAM_DEV_QFRZN;
6350		} else if (runq) {
6351			xpt_run_dev_sendq(ccb_h->path->bus);
6352		}
6353
6354		/* Call the peripheral driver's callback */
6355		(*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
6356
6357		/* Raise IPL for while test */
6358		s = splcam();
6359	}
6360	splx(s);
6361}
6362