cam_xpt.c revision 45441
1/*
2 * Implementation of the Common Access Method Transport (XPT) layer.
3 *
4 * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
5 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions, and the following disclaimer,
13 *    without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 *    derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 *      $Id: cam_xpt.c,v 1.49 1999/03/14 05:15:38 ken Exp $
30 */
31#include <sys/param.h>
32#include <sys/systm.h>
33#include <sys/types.h>
34#include <sys/malloc.h>
35#include <sys/device.h>
36#include <sys/kernel.h>
37#include <sys/conf.h>
38#include <sys/fcntl.h>
39#include <sys/md5.h>
40#include <sys/devicestat.h>
41#include <sys/interrupt.h>
42
43#ifdef PC98
44#include <pc98/pc98/pc98_machdep.h>	/* geometry translation */
45#endif
46
47#include <machine/clock.h>
48#include <machine/ipl.h>
49
50#include <cam/cam.h>
51#include <cam/cam_conf.h>
52#include <cam/cam_ccb.h>
53#include <cam/cam_periph.h>
54#include <cam/cam_sim.h>
55#include <cam/cam_xpt.h>
56#include <cam/cam_xpt_sim.h>
57#include <cam/cam_xpt_periph.h>
58#include <cam/cam_debug.h>
59
60#include <cam/scsi/scsi_all.h>
61#include <cam/scsi/scsi_message.h>
62#include <cam/scsi/scsi_pass.h>
63#include "opt_cam.h"
64#include "opt_scsi.h"
65
66extern	void	(*ihandlers[32]) __P((void));
67
68/* Datastructures internal to the xpt layer */
69
70/*
71 * Definition of an async handler callback block.  These are used to add
72 * SIMs and peripherals to the async callback lists.
73 */
74struct async_node {
75	SLIST_ENTRY(async_node)	links;
76	u_int32_t	event_enable;	/* Async Event enables */
77	void		(*callback)(void *arg, u_int32_t code,
78				    struct cam_path *path, void *args);
79	void		*callback_arg;
80};
81
82SLIST_HEAD(async_list, async_node);
83SLIST_HEAD(periph_list, cam_periph);
84static STAILQ_HEAD(highpowerlist, ccb_hdr) highpowerq;
85
86/*
87 * This is the maximum number of high powered commands (e.g. start unit)
88 * that can be outstanding at a particular time.
89 */
90#ifndef CAM_MAX_HIGHPOWER
91#define CAM_MAX_HIGHPOWER  4
92#endif
93
94/*
95 * This is the number of seconds we wait for devices to settle after a SCSI
96 * bus reset.
97 */
98#ifndef SCSI_DELAY
99#define SCSI_DELAY 2000
100#endif
101/*
102 * If someone sets this to 0, we assume that they want the minimum
103 * allowable bus settle delay.  All devices need _some_ sort of bus settle
104 * delay, so we'll set it to a minimum value of 100ms.
105 */
106#if (SCSI_DELAY == 0)
107#undef SCSI_DELAY
108#define SCSI_DELAY 100
109#endif
110
111/*
112 * Make sure the user isn't using seconds instead of milliseconds.
113 */
114#if (SCSI_DELAY < 100)
115#error "SCSI_DELAY is in milliseconds, not seconds!  Please use a larger value"
116#endif
117
118/* number of high powered commands that can go through right now */
119static int num_highpower = CAM_MAX_HIGHPOWER;
120
121/*
122 * Structure for queueing a device in a run queue.
123 * There is one run queue for allocating new ccbs,
124 * and another for sending ccbs to the controller.
125 */
126struct cam_ed_qinfo {
127	cam_pinfo pinfo;
128	struct	  cam_ed *device;
129};
130
131/*
132 * The CAM EDT (Existing Device Table) contains the device information for
133 * all devices for all busses in the system.  The table contains a
134 * cam_ed structure for each device on the bus.
135 */
136struct cam_ed {
137	TAILQ_ENTRY(cam_ed) links;
138	struct	cam_ed_qinfo alloc_ccb_entry;
139	struct	cam_ed_qinfo send_ccb_entry;
140	struct	cam_et	 *target;
141	lun_id_t	 lun_id;
142	struct	camq drvq;		/*
143					 * Queue of type drivers wanting to do
144					 * work on this device.
145					 */
146	struct	cam_ccbq ccbq;		/* Queue of pending ccbs */
147	struct	async_list asyncs;	/* Async callback info for this B/T/L */
148	struct	periph_list periphs;	/* All attached devices */
149	u_int	generation;		/* Generation number */
150	struct	cam_periph *owner;	/* Peripheral driver's ownership tag */
151	struct	xpt_quirk_entry *quirk;	/* Oddities about this device */
152					/* Storage for the inquiry data */
153	struct	scsi_inquiry_data inq_data;
154	u_int8_t	 inq_flags;	/*
155					 * Current settings for inquiry flags.
156					 * This allows us to override settings
157					 * like disconnection and tagged
158					 * queuing for a device.
159					 */
160	u_int8_t	 queue_flags;	/* Queue flags from the control page */
161	u_int8_t	 *serial_num;
162	u_int8_t	 serial_num_len;
163	u_int32_t	 qfrozen_cnt;
164	u_int32_t	 flags;
165#define CAM_DEV_UNCONFIGURED	 	0x01
166#define CAM_DEV_REL_TIMEOUT_PENDING	0x02
167#define CAM_DEV_REL_ON_COMPLETE		0x04
168#define CAM_DEV_REL_ON_QUEUE_EMPTY	0x08
169#define CAM_DEV_RESIZE_QUEUE_NEEDED	0x10
170#define CAM_DEV_TAG_AFTER_COUNT		0x20
171	u_int32_t	 tag_delay_count;
172#define	CAM_TAG_DELAY_COUNT		5
173	u_int32_t	 refcount;
174	struct		 callout_handle c_handle;
175};
176
177/*
178 * Each target is represented by an ET (Existing Target).  These
179 * entries are created when a target is successfully probed with an
180 * identify, and removed when a device fails to respond after a number
181 * of retries, or a bus rescan finds the device missing.
182 */
183struct cam_et {
184	TAILQ_HEAD(, cam_ed) ed_entries;
185	TAILQ_ENTRY(cam_et) links;
186	struct	cam_eb	*bus;
187	target_id_t	target_id;
188	u_int32_t	refcount;
189	u_int		generation;
190};
191
192/*
193 * Each bus is represented by an EB (Existing Bus).  These entries
194 * are created by calls to xpt_bus_register and deleted by calls to
195 * xpt_bus_deregister.
196 */
197struct cam_eb {
198	TAILQ_HEAD(, cam_et) et_entries;
199	TAILQ_ENTRY(cam_eb)  links;
200	path_id_t	     path_id;
201	struct cam_sim	     *sim;
202	u_int32_t	     flags;
203#define	CAM_EB_RUNQ_SCHEDULED	0x01
204	u_int32_t	     refcount;
205	u_int		     generation;
206};
207
208struct cam_path {
209	struct cam_periph *periph;
210	struct cam_eb	  *bus;
211	struct cam_et	  *target;
212	struct cam_ed	  *device;
213};
214
215struct xpt_quirk_entry {
216	struct scsi_inquiry_pattern inq_pat;
217	u_int8_t quirks;
218#define	CAM_QUIRK_NOLUNS	0x01
219#define	CAM_QUIRK_NOSERIAL	0x02
220	u_int8_t mintags;
221	u_int8_t maxtags;
222};
223
224typedef enum {
225	XPT_FLAG_OPEN		= 0x01
226} xpt_flags;
227
228struct xpt_softc {
229	xpt_flags	flags;
230	u_int32_t	generation;
231#ifdef DEVFS
232	void		*xpt_devfs_token;
233	void		*ctl_devfs_token;
234#endif
235};
236
237static const char quantum[] = "QUANTUM";
238static const char sony[] = "SONY";
239static const char west_digital[] = "WDIGTL";
240static const char samsung[] = "SAMSUNG";
241static const char seagate[] = "SEAGATE";
242
243static struct xpt_quirk_entry xpt_quirk_table[] =
244{
245	{
246		/* Reports QUEUE FULL for temporary resource shortages */
247		{ T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" },
248		/*quirks*/0, /*mintags*/24, /*maxtags*/32
249	},
250	{
251		/* Reports QUEUE FULL for temporary resource shortages */
252		{ T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" },
253		/*quirks*/0, /*mintags*/24, /*maxtags*/32
254	},
255	{
256		/* Reports QUEUE FULL for temporary resource shortages */
257		{ T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" },
258		/*quirks*/0, /*mintags*/24, /*maxtags*/32
259	},
260	{
261		/* Broken tagged queuing drive */
262		{ T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" },
263		/*quirks*/0, /*mintags*/0, /*maxtags*/0
264	},
265	{
266		/* Broken tagged queuing drive */
267		{ T_DIRECT, SIP_MEDIA_FIXED, "MICROP", "3391*", "x43h" },
268		/*quirks*/0, /*mintags*/0, /*maxtags*/0
269	},
270	{
271		/*
272		 * Unfortunately, the Quantum Atlas III has the same
273		 * problem as the Atlas II drives above.
274		 * Reported by: "Johan Granlund" <johan@granlund.nu>
275		 *
276		 * For future reference, the drive with the problem was:
277		 * QUANTUM QM39100TD-SW N1B0
278		 *
279		 * It's possible that Quantum will fix the problem in later
280		 * firmware revisions.  If that happens, the quirk entry
281		 * will need to be made specific to the firmware revisions
282		 * with the problem.
283		 *
284		 */
285		/* Reports QUEUE FULL for temporary resource shortages */
286		{ T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" },
287		/*quirks*/0, /*mintags*/24, /*maxtags*/32
288	},
289	{
290		/*
291		 * 18 Gig Atlas III, same problem as the 9G version.
292		 * Reported by: Andre Albsmeier
293		 *		<andre.albsmeier@mchp.siemens.de>
294		 *
295		 * For future reference, the drive with the problem was:
296		 * QUANTUM QM318000TD-S N491
297		 */
298		/* Reports QUEUE FULL for temporary resource shortages */
299		{ T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" },
300		/*quirks*/0, /*mintags*/24, /*maxtags*/32
301	},
302	{
303		/*
304		 * Broken tagged queuing drive
305		 * Reported by: Bret Ford <bford@uop.cs.uop.edu>
306		 *         and: Martin Renters <martin@tdc.on.ca>
307		 */
308		{ T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" },
309		/*quirks*/0, /*mintags*/0, /*maxtags*/0
310	},
311		/*
312		 * The Seagate Medalist Pro drives have very poor write
313		 * performance with anything more than 2 tags.
314		 *
315		 * Reported by:  Paul van der Zwan <paulz@trantor.xs4all.nl>
316		 * Drive:  <SEAGATE ST36530N 1444>
317		 *
318		 * Reported by:  Jeremy Lea <reg@shale.csir.co.za>
319		 * Drive:  <SEAGATE ST34520W 1281>
320		 *
321		 * No one has actually reported that the 9G version
322		 * (ST39140*) of the Medalist Pro has the same problem, but
323		 * we're assuming that it does because the 4G and 6.5G
324		 * versions of the drive are broken.
325		 */
326	{
327		{ T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"},
328		/*quirks*/0, /*mintags*/2, /*maxtags*/2
329	},
330	{
331		{ T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"},
332		/*quirks*/0, /*mintags*/2, /*maxtags*/2
333	},
334	{
335		{ T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"},
336		/*quirks*/0, /*mintags*/2, /*maxtags*/2
337	},
338	{
339		/*
340		 * Slow when tagged queueing is enabled.  Write performance
341		 * steadily drops off with more and more concurrent
342		 * transactions.  Best sequential write performance with
343		 * tagged queueing turned off and write caching turned on.
344		 *
345		 * PR:  kern/10398
346		 * Submitted by:  Hideaki Okada <hokada@isl.melco.co.jp>
347		 * Drive:  DCAS-34330 w/ "S65A" firmware.
348		 *
349		 * The drive with the problem had the "S65A" firmware
350		 * revision, and has also been reported (by Stephen J.
351		 * Roznowski <sjr@home.net>) for a drive with the "S61A"
352		 * firmware revision.
353		 *
354		 * Although no one has reported problems with the 2 gig
355		 * version of the DCAS drive, the assumption is that it
356		 * has the same problems as the 4 gig version.  Therefore
357		 * this quirk entries disables tagged queueing for all
358		 * DCAS drives.
359		 */
360		{ T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" },
361		/*quirks*/0, /*mintags*/0, /*maxtags*/0
362	},
363	{
364		/* Broken tagged queuing drive */
365		{ T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" },
366		/*quirks*/0, /*mintags*/0, /*maxtags*/0
367	},
368	{
369		/* Broken tagged queuing drive */
370		{ T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" },
371		/*quirks*/0, /*mintags*/0, /*maxtags*/0
372	},
373	{
374		/*
375		 * Broken tagged queuing drive.
376		 * Submitted by:
377		 * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp>
378		 * in PR kern/9535
379		 */
380		{ T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" },
381		/*quirks*/0, /*mintags*/0, /*maxtags*/0
382	},
383        {
384		/*
385		 * Slow when tagged queueing is enabled. (1.5MB/sec versus
386		 * 8MB/sec.)
387		 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
388		 * Best performance with these drives is achieved with
389		 * tagged queueing turned off, and write caching turned on.
390		 */
391		{ T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" },
392		/*quirks*/0, /*mintags*/0, /*maxtags*/0
393        },
394        {
395		/*
396		 * Slow when tagged queueing is enabled. (1.5MB/sec versus
397		 * 8MB/sec.)
398		 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
399		 * Best performance with these drives is achieved with
400		 * tagged queueing turned off, and write caching turned on.
401		 */
402		{ T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" },
403		/*quirks*/0, /*mintags*/0, /*maxtags*/0
404        },
405	{
406		/*
407		 * Doesn't handle queue full condition correctly,
408		 * so we need to limit maxtags to what the device
409		 * can handle instead of determining this automatically.
410		 */
411		{ T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" },
412		/*quirks*/0, /*mintags*/2, /*maxtags*/32
413	},
414	{
415		/* Really only one LUN */
416		{
417			T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA*", "*"
418		},
419		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
420	},
421	{
422		/* I can't believe we need a quirk for DPT volumes. */
423		{
424			T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE,
425			"DPT", "*", "*"
426		},
427		CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS,
428		/*mintags*/0, /*maxtags*/255
429	},
430	{
431		/*
432		 * Many Sony CDROM drives don't like multi-LUN probing.
433		 */
434		{
435			T_CDROM, SIP_MEDIA_REMOVABLE, sony,
436			"CD-ROM CDU*", "*"
437		},
438		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
439	},
440	{
441		/*
442		 * This drive doesn't like multiple LUN probing.
443		 * Submitted by:  Parag Patel <parag@cgt.com>
444		 */
445		{
446			T_WORM, SIP_MEDIA_REMOVABLE, sony,
447			"CD-R   CDU9*", "*"
448		},
449		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
450	},
451	{
452		/*
453		 * The 8200 doesn't like multi-lun probing, and probably
454		 * don't like serial number requests either.
455		 */
456		{
457			T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
458			"EXB-8200*", "*"
459		},
460		CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
461	},
462	{
463		/*
464		 * This old revision of the TDC3600 is also SCSI-1, and
465		 * hangs upon serial number probing.
466		 */
467		{
468			T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG",
469			" TDC 3600", "U07:"
470		},
471		CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0
472	},
473	{
474		/* Default tagged queuing parameters for all devices */
475		{
476		  T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
477		  /*vendor*/"*", /*product*/"*", /*revision*/"*"
478		},
479		/*quirks*/0, /*mintags*/2, /*maxtags*/255
480	},
481};
482
483typedef enum {
484	DM_RET_COPY		= 0x01,
485	DM_RET_FLAG_MASK	= 0x0f,
486	DM_RET_NONE		= 0x00,
487	DM_RET_STOP		= 0x10,
488	DM_RET_DESCEND		= 0x20,
489	DM_RET_ERROR		= 0x30,
490	DM_RET_ACTION_MASK	= 0xf0
491} dev_match_ret;
492
493typedef enum {
494	XPT_DEPTH_BUS,
495	XPT_DEPTH_TARGET,
496	XPT_DEPTH_DEVICE,
497	XPT_DEPTH_PERIPH
498} xpt_traverse_depth;
499
500struct xpt_traverse_config {
501	xpt_traverse_depth	depth;
502	void			*tr_func;
503	void			*tr_arg;
504};
505
506typedef	int	xpt_busfunc_t (struct cam_eb *bus, void *arg);
507typedef	int	xpt_targetfunc_t (struct cam_et *target, void *arg);
508typedef	int	xpt_devicefunc_t (struct cam_ed *device, void *arg);
509typedef	int	xpt_periphfunc_t (struct cam_periph *periph, void *arg);
510typedef int	xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
511
512/* Transport layer configuration information */
513static struct xpt_softc xsoftc;
514
515/* Queues for our software interrupt handler */
516typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
517static cam_isrq_t cam_bioq;
518static cam_isrq_t cam_netq;
519
520/* "Pool" of inactive ccbs managed by xpt_alloc_ccb and xpt_free_ccb */
521static SLIST_HEAD(,ccb_hdr) ccb_freeq;
522static u_int xpt_max_ccbs;	/*
523				 * Maximum size of ccb pool.  Modified as
524				 * devices are added/removed or have their
525				 * opening counts changed.
526				 */
527static u_int xpt_ccb_count;	/* Current count of allocated ccbs */
528
529static struct cam_periph *xpt_periph;
530
531static periph_init_t xpt_periph_init;
532
533static periph_init_t probe_periph_init;
534
535static struct periph_driver xpt_driver =
536{
537	xpt_periph_init, "xpt",
538	TAILQ_HEAD_INITIALIZER(xpt_driver.units)
539};
540
541static struct periph_driver probe_driver =
542{
543	probe_periph_init, "probe",
544	TAILQ_HEAD_INITIALIZER(probe_driver.units)
545};
546
547DATA_SET(periphdriver_set, xpt_driver);
548DATA_SET(periphdriver_set, probe_driver);
549
550#define XPT_CDEV_MAJOR 104
551
552static d_open_t xptopen;
553static d_close_t xptclose;
554static d_ioctl_t xptioctl;
555
556static struct cdevsw xpt_cdevsw =
557{
558	/*d_open*/	xptopen,
559	/*d_close*/	xptclose,
560	/*d_read*/	noread,
561	/*d_write*/	nowrite,
562	/*d_ioctl*/	xptioctl,
563	/*d_stop*/	nostop,
564	/*d_reset*/	noreset,
565	/*d_devtotty*/	nodevtotty,
566	/*d_poll*/	NULL,
567	/*d_mmap*/	nommap,
568	/*d_strategy*/	nostrategy,
569	/*d_name*/	"xpt",
570	/*d_spare*/	NULL,
571	/*d_maj*/	-1,
572	/*d_dump*/	nodump,
573	/*d_psize*/	nopsize,
574	/*d_flags*/	0,
575	/*d_maxio*/	0,
576	/*b_maj*/	-1
577};
578
579static struct intr_config_hook *xpt_config_hook;
580
581/* Registered busses */
582static TAILQ_HEAD(,cam_eb) xpt_busses;
583static u_int bus_generation;
584
585/* Storage for debugging datastructures */
586#ifdef	CAMDEBUG
587struct cam_path *cam_dpath;
588u_int32_t cam_dflags;
589#endif
590
591#if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG)
592#error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS"
593#endif
594
595/*
596 * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG
597 * enabled.  Also, the user must have either none, or all of CAM_DEBUG_BUS,
598 * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified.
599 */
600#if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \
601    || defined(CAM_DEBUG_LUN)
602#ifdef CAMDEBUG
603#if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \
604    || !defined(CAM_DEBUG_LUN)
605#error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \
606        and CAM_DEBUG_LUN"
607#endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */
608#else /* !CAMDEBUG */
609#error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options"
610#endif /* CAMDEBUG */
611#endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */
612
613/* Our boot-time initialization hook */
614static void	xpt_init(void *);
615SYSINIT(cam, SI_SUB_CONFIGURE, SI_ORDER_SECOND, xpt_init, NULL);
616
617static cam_status	xpt_compile_path(struct cam_path *new_path,
618					 struct cam_periph *perph,
619					 path_id_t path_id,
620					 target_id_t target_id,
621					 lun_id_t lun_id);
622
623static void		xpt_release_path(struct cam_path *path);
624
625static void		xpt_async_bcast(struct async_list *async_head,
626					u_int32_t async_code,
627					struct cam_path *path,
628					void *async_arg);
629static int 	 xptnextfreebus(path_id_t startbus);
630static int	 xptpathid(const char *sim_name, int sim_unit, int sim_bus,
631			   path_id_t *nextpath);
632static union ccb *xpt_get_ccb(struct cam_ed *device);
633static int	 xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
634				  u_int32_t new_priority);
635static void	 xpt_run_dev_allocq(struct cam_eb *bus);
636static void	 xpt_run_dev_sendq(struct cam_eb *bus);
637static timeout_t xpt_release_devq_timeout;
638static timeout_t xpt_release_simq_timeout;
639static void	 xpt_release_bus(struct cam_eb *bus);
640static struct cam_et*
641		 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
642static void	 xpt_release_target(struct cam_eb *bus, struct cam_et *target);
643static struct cam_ed*
644		 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target,
645				  lun_id_t lun_id);
646static void	 xpt_release_device(struct cam_eb *bus, struct cam_et *target,
647				    struct cam_ed *device);
648static u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings);
649static struct cam_eb*
650		 xpt_find_bus(path_id_t path_id);
651static struct cam_et*
652		 xpt_find_target(struct cam_eb *bus, target_id_t target_id);
653static struct cam_ed*
654		 xpt_find_device(struct cam_et *target, lun_id_t lun_id);
655static void	 xpt_scan_bus(struct cam_periph *periph, union ccb *ccb);
656static void	 xpt_scan_lun(struct cam_periph *periph,
657			      struct cam_path *path, cam_flags flags,
658			      union ccb *ccb);
659static void	 xptscandone(struct cam_periph *periph, union ccb *done_ccb);
660static xpt_busfunc_t	xptconfigbuscountfunc;
661static xpt_busfunc_t	xptconfigfunc;
662static void	 xpt_config(void *arg);
663static xpt_devicefunc_t xptpassannouncefunc;
664static void	 xpt_finishconfig(struct cam_periph *periph, union ccb *ccb);
665static void	 xptaction(struct cam_sim *sim, union ccb *work_ccb);
666static swihand_t swi_camnet;
667static swihand_t swi_cambio;
668static void	 camisr(cam_isrq_t *queue);
669#if 0
670static void	 xptstart(struct cam_periph *periph, union ccb *work_ccb);
671static void	 xptasync(struct cam_periph *periph,
672			  u_int32_t code, cam_path *path);
673#endif
674static dev_match_ret	xptbusmatch(struct dev_match_pattern *patterns,
675				    int num_patterns, struct cam_eb *bus);
676static dev_match_ret	xptdevicematch(struct dev_match_pattern *patterns,
677				       int num_patterns, struct cam_ed *device);
678static dev_match_ret	xptperiphmatch(struct dev_match_pattern *patterns,
679				       int num_patterns,
680				       struct cam_periph *periph);
681static xpt_busfunc_t	xptedtbusfunc;
682static xpt_targetfunc_t	xptedttargetfunc;
683static xpt_devicefunc_t	xptedtdevicefunc;
684static xpt_periphfunc_t	xptedtperiphfunc;
685static xpt_pdrvfunc_t	xptplistpdrvfunc;
686static xpt_periphfunc_t	xptplistperiphfunc;
687static int		xptedtmatch(struct ccb_dev_match *cdm);
688static int		xptperiphlistmatch(struct ccb_dev_match *cdm);
689static int		xptbustraverse(struct cam_eb *start_bus,
690				       xpt_busfunc_t *tr_func, void *arg);
691static int		xpttargettraverse(struct cam_eb *bus,
692					  struct cam_et *start_target,
693					  xpt_targetfunc_t *tr_func, void *arg);
694static int		xptdevicetraverse(struct cam_et *target,
695					  struct cam_ed *start_device,
696					  xpt_devicefunc_t *tr_func, void *arg);
697static int		xptperiphtraverse(struct cam_ed *device,
698					  struct cam_periph *start_periph,
699					  xpt_periphfunc_t *tr_func, void *arg);
700static int		xptpdrvtraverse(struct periph_driver **start_pdrv,
701					xpt_pdrvfunc_t *tr_func, void *arg);
702static int		xptpdperiphtraverse(struct periph_driver **pdrv,
703					    struct cam_periph *start_periph,
704					    xpt_periphfunc_t *tr_func,
705					    void *arg);
706static xpt_busfunc_t	xptdefbusfunc;
707static xpt_targetfunc_t	xptdeftargetfunc;
708static xpt_devicefunc_t	xptdefdevicefunc;
709static xpt_periphfunc_t	xptdefperiphfunc;
710static int		xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg);
711#ifdef notusedyet
712static int		xpt_for_all_targets(xpt_targetfunc_t *tr_func,
713					    void *arg);
714#endif
715static int		xpt_for_all_devices(xpt_devicefunc_t *tr_func,
716					    void *arg);
717#ifdef notusedyet
718static int		xpt_for_all_periphs(xpt_periphfunc_t *tr_func,
719					    void *arg);
720#endif
721static xpt_devicefunc_t	xptsetasyncfunc;
722static xpt_busfunc_t	xptsetasyncbusfunc;
723static cam_status	xptregister(struct cam_periph *periph,
724				    void *arg);
725static cam_status	proberegister(struct cam_periph *periph,
726				      void *arg);
727static void	 probeschedule(struct cam_periph *probe_periph);
728static void	 probestart(struct cam_periph *periph, union ccb *start_ccb);
729static void	 probedone(struct cam_periph *periph, union ccb *done_ccb);
730static void	 probecleanup(struct cam_periph *periph);
731static void	 xpt_find_quirk(struct cam_ed *device);
732static void	 xpt_set_transfer_settings(struct ccb_trans_settings *cts,
733					   struct cam_ed *device,
734					   int async_update);
735static void	 xpt_toggle_tags(struct cam_path *path);
736static void	 xpt_start_tags(struct cam_path *path);
737static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus,
738					    struct cam_ed *dev);
739static __inline int xpt_schedule_dev_sendq(struct cam_eb *bus,
740					   struct cam_ed *dev);
741static __inline int periph_is_queued(struct cam_periph *periph);
742static __inline int device_is_alloc_queued(struct cam_ed *device);
743static __inline int device_is_send_queued(struct cam_ed *device);
744static __inline int dev_allocq_is_runnable(struct cam_devq *devq);
745
746static __inline int
747xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
748{
749	int retval;
750
751	if (dev->ccbq.devq_openings > 0) {
752		if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) {
753			cam_ccbq_resize(&dev->ccbq,
754					dev->ccbq.dev_openings
755					+ dev->ccbq.dev_active);
756			dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
757		}
758		retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue,
759					  &dev->alloc_ccb_entry.pinfo,
760					  dev->drvq.queue_array[0]->priority);
761	} else {
762		retval = 0;
763	}
764
765	return (retval);
766}
767
768static __inline int
769xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev)
770{
771	int	retval;
772
773	if (dev->ccbq.dev_openings > 0) {
774		retval = xpt_schedule_dev(&bus->sim->devq->send_queue,
775					  &dev->send_ccb_entry.pinfo,
776					  dev->ccbq.queue.queue_array[0]->priority);
777	} else {
778		retval = 0;
779	}
780	return (retval);
781}
782
783static __inline int
784periph_is_queued(struct cam_periph *periph)
785{
786	return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
787}
788
789static __inline int
790device_is_alloc_queued(struct cam_ed *device)
791{
792	return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
793}
794
795static __inline int
796device_is_send_queued(struct cam_ed *device)
797{
798	return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
799}
800
801static __inline int
802dev_allocq_is_runnable(struct cam_devq *devq)
803{
804	/*
805	 * Have work to do.
806	 * Have space to do more work.
807	 * Allowed to do work.
808	 */
809	return ((devq->alloc_queue.qfrozen_cnt == 0)
810	     && (devq->alloc_queue.entries > 0)
811	     && (devq->alloc_openings > 0));
812}
813
814static void
815xpt_periph_init()
816{
817	dev_t dev;
818
819	dev = makedev(XPT_CDEV_MAJOR, 0);
820	cdevsw_add(&dev, &xpt_cdevsw, NULL);
821}
822
823static void
824probe_periph_init()
825{
826}
827
828
829static void
830xptdone(struct cam_periph *periph, union ccb *done_ccb)
831{
832	/* Caller will release the CCB */
833	wakeup(&done_ccb->ccb_h.cbfcnp);
834}
835
836static int
837xptopen(dev_t dev, int flags, int fmt, struct proc *p)
838{
839	int unit;
840
841	unit = minor(dev) & 0xff;
842
843	/*
844	 * Only allow read-write access.
845	 */
846	if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
847		return(EPERM);
848
849	/*
850	 * We don't allow nonblocking access.
851	 */
852	if ((flags & O_NONBLOCK) != 0) {
853		printf("xpt%d: can't do nonblocking accesss\n", unit);
854		return(ENODEV);
855	}
856
857	/*
858	 * We only have one transport layer right now.  If someone accesses
859	 * us via something other than minor number 1, point out their
860	 * mistake.
861	 */
862	if (unit != 0) {
863		printf("xptopen: got invalid xpt unit %d\n", unit);
864		return(ENXIO);
865	}
866
867	/* Mark ourselves open */
868	xsoftc.flags |= XPT_FLAG_OPEN;
869
870	return(0);
871}
872
873static int
874xptclose(dev_t dev, int flag, int fmt, struct proc *p)
875{
876	int unit;
877
878	unit = minor(dev) & 0xff;
879
880	/*
881	 * We only have one transport layer right now.  If someone accesses
882	 * us via something other than minor number 1, point out their
883	 * mistake.
884	 */
885	if (unit != 0) {
886		printf("xptclose: got invalid xpt unit %d\n", unit);
887		return(ENXIO);
888	}
889
890	/* Mark ourselves closed */
891	xsoftc.flags &= ~XPT_FLAG_OPEN;
892
893	return(0);
894}
895
896static int
897xptioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
898{
899	int unit, error;
900
901	error = 0;
902	unit = minor(dev) & 0xff;
903
904	/*
905	 * We only have one transport layer right now.  If someone accesses
906	 * us via something other than minor number 1, point out their
907	 * mistake.
908	 */
909	if (unit != 0) {
910		printf("xptioctl: got invalid xpt unit %d\n", unit);
911		return(ENXIO);
912	}
913
914	switch(cmd) {
915	/*
916	 * For the transport layer CAMIOCOMMAND ioctl, we really only want
917	 * to accept CCB types that don't quite make sense to send through a
918	 * passthrough driver.
919	 */
920	case CAMIOCOMMAND: {
921		union ccb *ccb;
922		union ccb *inccb;
923
924		inccb = (union ccb *)addr;
925
926		switch(inccb->ccb_h.func_code) {
927		case XPT_SCAN_BUS:
928		case XPT_RESET_BUS:
929			if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD)
930			 || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) {
931				error = EINVAL;
932				break;
933			}
934			/* FALLTHROUGH */
935		case XPT_SCAN_LUN:
936		case XPT_ENG_INQ:  /* XXX not implemented yet */
937		case XPT_ENG_EXEC:
938
939			ccb = xpt_alloc_ccb();
940
941			/*
942			 * Create a path using the bus, target, and lun the
943			 * user passed in.
944			 */
945			if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
946					    inccb->ccb_h.path_id,
947					    inccb->ccb_h.target_id,
948					    inccb->ccb_h.target_lun) !=
949					    CAM_REQ_CMP){
950				error = EINVAL;
951				xpt_free_ccb(ccb);
952				break;
953			}
954			/* Ensure all of our fields are correct */
955			xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
956				      inccb->ccb_h.pinfo.priority);
957			xpt_merge_ccb(ccb, inccb);
958			ccb->ccb_h.cbfcnp = xptdone;
959			cam_periph_runccb(ccb, NULL, 0, 0, NULL);
960			bcopy(ccb, inccb, sizeof(union ccb));
961			xpt_free_path(ccb->ccb_h.path);
962			xpt_free_ccb(ccb);
963			break;
964
965		case XPT_DEBUG: {
966			union ccb ccb;
967
968			/*
969			 * This is an immediate CCB, so it's okay to
970			 * allocate it on the stack.
971			 */
972
973			/*
974			 * Create a path using the bus, target, and lun the
975			 * user passed in.
976			 */
977			if (xpt_create_path(&ccb.ccb_h.path, xpt_periph,
978					    inccb->ccb_h.path_id,
979					    inccb->ccb_h.target_id,
980					    inccb->ccb_h.target_lun) !=
981					    CAM_REQ_CMP){
982				error = EINVAL;
983				break;
984			}
985			/* Ensure all of our fields are correct */
986			xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
987				      inccb->ccb_h.pinfo.priority);
988			xpt_merge_ccb(&ccb, inccb);
989			ccb.ccb_h.cbfcnp = xptdone;
990			xpt_action(&ccb);
991			bcopy(&ccb, inccb, sizeof(union ccb));
992			xpt_free_path(ccb.ccb_h.path);
993			break;
994
995		}
996		case XPT_DEV_MATCH: {
997			struct cam_periph_map_info mapinfo;
998			struct cam_path *old_path;
999
1000			/*
1001			 * We can't deal with physical addresses for this
1002			 * type of transaction.
1003			 */
1004			if (inccb->ccb_h.flags & CAM_DATA_PHYS) {
1005				error = EINVAL;
1006				break;
1007			}
1008
1009			/*
1010			 * Save this in case the caller had it set to
1011			 * something in particular.
1012			 */
1013			old_path = inccb->ccb_h.path;
1014
1015			/*
1016			 * We really don't need a path for the matching
1017			 * code.  The path is needed because of the
1018			 * debugging statements in xpt_action().  They
1019			 * assume that the CCB has a valid path.
1020			 */
1021			inccb->ccb_h.path = xpt_periph->path;
1022
1023			bzero(&mapinfo, sizeof(mapinfo));
1024
1025			/*
1026			 * Map the pattern and match buffers into kernel
1027			 * virtual address space.
1028			 */
1029			error = cam_periph_mapmem(inccb, &mapinfo);
1030
1031			if (error) {
1032				inccb->ccb_h.path = old_path;
1033				break;
1034			}
1035
1036			/*
1037			 * This is an immediate CCB, we can send it on directly.
1038			 */
1039			xpt_action(inccb);
1040
1041			/*
1042			 * Map the buffers back into user space.
1043			 */
1044			cam_periph_unmapmem(inccb, &mapinfo);
1045
1046			inccb->ccb_h.path = old_path;
1047
1048			error = 0;
1049			break;
1050		}
1051		default:
1052			error = EINVAL;
1053			break;
1054		}
1055		break;
1056	}
1057	/*
1058	 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
1059	 * with the periphal driver name and unit name filled in.  The other
1060	 * fields don't really matter as input.  The passthrough driver name
1061	 * ("pass"), and unit number are passed back in the ccb.  The current
1062	 * device generation number, and the index into the device peripheral
1063	 * driver list, and the status are also passed back.  Note that
1064	 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
1065	 * we never return a status of CAM_GDEVLIST_LIST_CHANGED.  It is
1066	 * (or rather should be) impossible for the device peripheral driver
1067	 * list to change since we look at the whole thing in one pass, and
1068	 * we do it with splcam protection.
1069	 *
1070	 */
1071	case CAMGETPASSTHRU: {
1072		union ccb *ccb;
1073		struct cam_periph *periph;
1074		struct periph_driver **p_drv;
1075		char   *name;
1076		int unit;
1077		int cur_generation;
1078		int base_periph_found;
1079		int splbreaknum;
1080		int s;
1081
1082		ccb = (union ccb *)addr;
1083		unit = ccb->cgdl.unit_number;
1084		name = ccb->cgdl.periph_name;
1085		/*
1086		 * Every 100 devices, we want to drop our spl protection to
1087		 * give the software interrupt handler a chance to run.
1088		 * Most systems won't run into this check, but this should
1089		 * avoid starvation in the software interrupt handler in
1090		 * large systems.
1091		 */
1092		splbreaknum = 100;
1093
1094		ccb = (union ccb *)addr;
1095
1096		base_periph_found = 0;
1097
1098		/*
1099		 * Sanity check -- make sure we don't get a null peripheral
1100		 * driver name.
1101		 */
1102		if (*ccb->cgdl.periph_name == '\0') {
1103			error = EINVAL;
1104			break;
1105		}
1106
1107		/* Keep the list from changing while we traverse it */
1108		s = splcam();
1109ptstartover:
1110		cur_generation = xsoftc.generation;
1111
1112		/* first find our driver in the list of drivers */
1113		for (p_drv = (struct periph_driver **)periphdriver_set.ls_items;
1114		     *p_drv != NULL; p_drv++)
1115			if (strcmp((*p_drv)->driver_name, name) == 0)
1116				break;
1117
1118		if (*p_drv == NULL) {
1119			splx(s);
1120			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1121			ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1122			*ccb->cgdl.periph_name = '\0';
1123			ccb->cgdl.unit_number = 0;
1124			error = ENOENT;
1125			break;
1126		}
1127
1128		/*
1129		 * Run through every peripheral instance of this driver
1130		 * and check to see whether it matches the unit passed
1131		 * in by the user.  If it does, get out of the loops and
1132		 * find the passthrough driver associated with that
1133		 * peripheral driver.
1134		 */
1135		for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
1136		     periph = TAILQ_NEXT(periph, unit_links)) {
1137
1138			if (periph->unit_number == unit) {
1139				break;
1140			} else if (--splbreaknum == 0) {
1141				splx(s);
1142				s = splcam();
1143				splbreaknum = 100;
1144				if (cur_generation != xsoftc.generation)
1145				       goto ptstartover;
1146			}
1147		}
1148		/*
1149		 * If we found the peripheral driver that the user passed
1150		 * in, go through all of the peripheral drivers for that
1151		 * particular device and look for a passthrough driver.
1152		 */
1153		if (periph != NULL) {
1154			struct cam_ed *device;
1155			int i;
1156
1157			base_periph_found = 1;
1158			device = periph->path->device;
1159			for (i = 0, periph = device->periphs.slh_first;
1160			     periph != NULL;
1161			     periph = periph->periph_links.sle_next, i++) {
1162				/*
1163				 * Check to see whether we have a
1164				 * passthrough device or not.
1165				 */
1166				if (strcmp(periph->periph_name, "pass") == 0) {
1167					/*
1168					 * Fill in the getdevlist fields.
1169					 */
1170					strcpy(ccb->cgdl.periph_name,
1171					       periph->periph_name);
1172					ccb->cgdl.unit_number =
1173						periph->unit_number;
1174					if (periph->periph_links.sle_next)
1175						ccb->cgdl.status =
1176							CAM_GDEVLIST_MORE_DEVS;
1177					else
1178						ccb->cgdl.status =
1179						       CAM_GDEVLIST_LAST_DEVICE;
1180					ccb->cgdl.generation =
1181						device->generation;
1182					ccb->cgdl.index = i;
1183					/*
1184					 * Fill in some CCB header fields
1185					 * that the user may want.
1186					 */
1187					ccb->ccb_h.path_id =
1188						periph->path->bus->path_id;
1189					ccb->ccb_h.target_id =
1190						periph->path->target->target_id;
1191					ccb->ccb_h.target_lun =
1192						periph->path->device->lun_id;
1193					ccb->ccb_h.status = CAM_REQ_CMP;
1194					break;
1195				}
1196			}
1197		}
1198
1199		/*
1200		 * If the periph is null here, one of two things has
1201		 * happened.  The first possibility is that we couldn't
1202		 * find the unit number of the particular peripheral driver
1203		 * that the user is asking about.  e.g. the user asks for
1204		 * the passthrough driver for "da11".  We find the list of
1205		 * "da" peripherals all right, but there is no unit 11.
1206		 * The other possibility is that we went through the list
1207		 * of peripheral drivers attached to the device structure,
1208		 * but didn't find one with the name "pass".  Either way,
1209		 * we return ENOENT, since we couldn't find something.
1210		 */
1211		if (periph == NULL) {
1212			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1213			ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1214			*ccb->cgdl.periph_name = '\0';
1215			ccb->cgdl.unit_number = 0;
1216			error = ENOENT;
1217			/*
1218			 * It is unfortunate that this is even necessary,
1219			 * but there are many, many clueless users out there.
1220			 * If this is true, the user is looking for the
1221			 * passthrough driver, but doesn't have one in his
1222			 * kernel.
1223			 */
1224			if (base_periph_found == 1) {
1225				printf("xptioctl: pass driver is not in the "
1226				       "kernel\n");
1227				printf("xptioctl: put \"device pass0\" in "
1228				       "your kernel config file\n");
1229			}
1230		}
1231		splx(s);
1232		break;
1233		}
1234	default:
1235		error = ENOTTY;
1236		break;
1237	}
1238
1239	return(error);
1240}
1241
1242/* Functions accessed by the peripheral drivers */
1243static void
1244xpt_init(dummy)
1245	void *dummy;
1246{
1247	struct cam_sim *xpt_sim;
1248	struct cam_path *path;
1249	struct cam_devq;
1250	cam_status status;
1251
1252	TAILQ_INIT(&xpt_busses);
1253	TAILQ_INIT(&cam_bioq);
1254	TAILQ_INIT(&cam_netq);
1255	SLIST_INIT(&ccb_freeq);
1256	STAILQ_INIT(&highpowerq);
1257
1258	/*
1259	 * The xpt layer is, itself, the equivelent of a SIM.
1260	 * Allow 16 ccbs in the ccb pool for it.  This should
1261	 * give decent parallelism when we probe busses and
1262	 * perform other XPT functions.
1263	 */
1264	xpt_sim = (struct cam_sim *)malloc(sizeof(*xpt_sim),
1265					   M_DEVBUF, M_WAITOK);
1266	xpt_sim->sim_action = xptaction;
1267	xpt_sim->sim_name = "xpt";
1268	xpt_sim->path_id = CAM_XPT_PATH_ID;
1269	xpt_sim->bus_id = 0;
1270	xpt_sim->max_tagged_dev_openings = 0;
1271	xpt_sim->max_dev_openings = 0;
1272	xpt_sim->devq = cam_simq_alloc(16);
1273	xpt_max_ccbs = 16;
1274
1275	xpt_bus_register(xpt_sim, 0);
1276
1277	/*
1278	 * Looking at the XPT from the SIM layer, the XPT is
1279	 * the equivelent of a peripheral driver.  Allocate
1280	 * a peripheral driver entry for us.
1281	 */
1282	if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
1283				      CAM_TARGET_WILDCARD,
1284				      CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
1285		printf("xpt_init: xpt_create_path failed with status %#x,"
1286		       " failing attach\n", status);
1287		return;
1288	}
1289
1290	cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
1291			 path, NULL, 0, NULL);
1292	xpt_free_path(path);
1293
1294	xpt_sim->softc = xpt_periph;
1295
1296	/*
1297	 * Register a callback for when interrupts are enabled.
1298	 */
1299	xpt_config_hook =
1300	    (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook),
1301					      M_TEMP, M_NOWAIT);
1302	if (xpt_config_hook == NULL) {
1303		printf("xpt_init: Cannot malloc config hook "
1304		       "- failing attach\n");
1305		return;
1306	}
1307	bzero(xpt_config_hook, sizeof(*xpt_config_hook));
1308
1309	xpt_config_hook->ich_func = xpt_config;
1310	if (config_intrhook_establish(xpt_config_hook) != 0) {
1311		free (xpt_config_hook, M_TEMP);
1312		printf("xpt_init: config_intrhook_establish failed "
1313		       "- failing attach\n");
1314	}
1315
1316	/* Install our software interrupt handlers */
1317	register_swi(SWI_CAMNET, swi_camnet);
1318	register_swi(SWI_CAMBIO, swi_cambio);
1319}
1320
1321static cam_status
1322xptregister(struct cam_periph *periph, void *arg)
1323{
1324	if (periph == NULL) {
1325		printf("xptregister: periph was NULL!!\n");
1326		return(CAM_REQ_CMP_ERR);
1327	}
1328
1329	periph->softc = NULL;
1330
1331	xpt_periph = periph;
1332
1333	return(CAM_REQ_CMP);
1334}
1335
1336int32_t
1337xpt_add_periph(struct cam_periph *periph)
1338{
1339	struct cam_ed *device;
1340	int32_t	 status;
1341	struct periph_list *periph_head;
1342
1343	device = periph->path->device;
1344
1345	periph_head = &device->periphs;
1346
1347	status = CAM_REQ_CMP;
1348
1349	if (device != NULL) {
1350		int s;
1351
1352		/*
1353		 * Make room for this peripheral
1354		 * so it will fit in the queue
1355		 * when it's scheduled to run
1356		 */
1357		s = splsoftcam();
1358		status = camq_resize(&device->drvq,
1359				     device->drvq.array_size + 1);
1360
1361		device->generation++;
1362
1363		SLIST_INSERT_HEAD(periph_head, periph, periph_links);
1364
1365		splx(s);
1366	}
1367
1368	xsoftc.generation++;
1369
1370	return (status);
1371}
1372
1373void
1374xpt_remove_periph(struct cam_periph *periph)
1375{
1376	struct cam_ed *device;
1377
1378	device = periph->path->device;
1379
1380	if (device != NULL) {
1381		int s;
1382		struct periph_list *periph_head;
1383
1384		periph_head = &device->periphs;
1385
1386		/* Release the slot for this peripheral */
1387		s = splsoftcam();
1388		camq_resize(&device->drvq, device->drvq.array_size - 1);
1389
1390		device->generation++;
1391
1392		SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
1393
1394		splx(s);
1395	}
1396
1397	xsoftc.generation++;
1398
1399}
1400
1401void
1402xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1403{
1404	int s;
1405	u_int mb;
1406	struct cam_path *path;
1407	struct ccb_trans_settings cts;
1408
1409	path = periph->path;
1410	/*
1411	 * To ensure that this is printed in one piece,
1412	 * mask out CAM interrupts.
1413	 */
1414	s = splsoftcam();
1415	printf("%s%d at %s%d bus %d target %d lun %d\n",
1416	       periph->periph_name, periph->unit_number,
1417	       path->bus->sim->sim_name,
1418	       path->bus->sim->unit_number,
1419	       path->bus->sim->bus_id,
1420	       path->target->target_id,
1421	       path->device->lun_id);
1422	printf("%s%d: ", periph->periph_name, periph->unit_number);
1423	scsi_print_inquiry(&path->device->inq_data);
1424	if ((bootverbose)
1425	 && (path->device->serial_num_len > 0)) {
1426		/* Don't wrap the screen  - print only the first 60 chars */
1427		printf("%s%d: Serial Number %.60s\n", periph->periph_name,
1428		       periph->unit_number, path->device->serial_num);
1429	}
1430	xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
1431	cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
1432	cts.flags = CCB_TRANS_CURRENT_SETTINGS;
1433	xpt_action((union ccb*)&cts);
1434	if (cts.ccb_h.status == CAM_REQ_CMP) {
1435		u_int speed;
1436		u_int freq;
1437
1438		if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1439		  && cts.sync_offset != 0) {
1440			freq = scsi_calc_syncsrate(cts.sync_period);
1441			speed = freq;
1442		} else {
1443			freq = 0;
1444			speed = path->bus->sim->base_transfer_speed;
1445		}
1446		if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0)
1447			speed *= (0x01 << cts.bus_width);
1448		mb = speed / 1000;
1449		if (mb > 0)
1450			printf("%s%d: %d.%03dMB/s transfers",
1451			       periph->periph_name, periph->unit_number,
1452			       mb, speed % 1000);
1453		else
1454			printf("%s%d: %dKB/s transfers", periph->periph_name,
1455			       periph->unit_number, (speed % 1000) * 1000);
1456		if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1457		 && cts.sync_offset != 0) {
1458			printf(" (%d.%03dMHz, offset %d", freq / 1000,
1459			       freq % 1000, cts.sync_offset);
1460		}
1461		if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0
1462		 && cts.bus_width > 0) {
1463			if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1464			 && cts.sync_offset != 0) {
1465				printf(", ");
1466			} else {
1467				printf(" (");
1468			}
1469			printf("%dbit)", 8 * (0x01 << cts.bus_width));
1470		} else if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1471			&& cts.sync_offset != 0) {
1472			printf(")");
1473		}
1474
1475		if (path->device->inq_flags & SID_CmdQue
1476		 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1477			printf(", Tagged Queueing Enabled");
1478		}
1479
1480		printf("\n");
1481	} else if (path->device->inq_flags & SID_CmdQue
1482   		|| path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1483		printf("%s%d: Tagged Queueing Enabled\n",
1484		       periph->periph_name, periph->unit_number);
1485	}
1486
1487	/*
1488	 * We only want to print the caller's announce string if they've
1489	 * passed one in..
1490	 */
1491	if (announce_string != NULL)
1492		printf("%s%d: %s\n", periph->periph_name,
1493		       periph->unit_number, announce_string);
1494	splx(s);
1495}
1496
1497
1498static dev_match_ret
1499xptbusmatch(struct dev_match_pattern *patterns, int num_patterns,
1500	    struct cam_eb *bus)
1501{
1502	dev_match_ret retval;
1503	int i;
1504
1505	retval = DM_RET_NONE;
1506
1507	/*
1508	 * If we aren't given something to match against, that's an error.
1509	 */
1510	if (bus == NULL)
1511		return(DM_RET_ERROR);
1512
1513	/*
1514	 * If there are no match entries, then this bus matches no
1515	 * matter what.
1516	 */
1517	if ((patterns == NULL) || (num_patterns == 0))
1518		return(DM_RET_DESCEND | DM_RET_COPY);
1519
1520	for (i = 0; i < num_patterns; i++) {
1521		struct bus_match_pattern *cur_pattern;
1522
1523		/*
1524		 * If the pattern in question isn't for a bus node, we
1525		 * aren't interested.  However, we do indicate to the
1526		 * calling routine that we should continue descending the
1527		 * tree, since the user wants to match against lower-level
1528		 * EDT elements.
1529		 */
1530		if (patterns[i].type != DEV_MATCH_BUS) {
1531			if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1532				retval |= DM_RET_DESCEND;
1533			continue;
1534		}
1535
1536		cur_pattern = &patterns[i].pattern.bus_pattern;
1537
1538		/*
1539		 * If they want to match any bus node, we give them any
1540		 * device node.
1541		 */
1542		if (cur_pattern->flags == BUS_MATCH_ANY) {
1543			/* set the copy flag */
1544			retval |= DM_RET_COPY;
1545
1546			/*
1547			 * If we've already decided on an action, go ahead
1548			 * and return.
1549			 */
1550			if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1551				return(retval);
1552		}
1553
1554		/*
1555		 * Not sure why someone would do this...
1556		 */
1557		if (cur_pattern->flags == BUS_MATCH_NONE)
1558			continue;
1559
1560		if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
1561		 && (cur_pattern->path_id != bus->path_id))
1562			continue;
1563
1564		if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
1565		 && (cur_pattern->bus_id != bus->sim->bus_id))
1566			continue;
1567
1568		if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
1569		 && (cur_pattern->unit_number != bus->sim->unit_number))
1570			continue;
1571
1572		if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
1573		 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
1574			     DEV_IDLEN) != 0))
1575			continue;
1576
1577		/*
1578		 * If we get to this point, the user definitely wants
1579		 * information on this bus.  So tell the caller to copy the
1580		 * data out.
1581		 */
1582		retval |= DM_RET_COPY;
1583
1584		/*
1585		 * If the return action has been set to descend, then we
1586		 * know that we've already seen a non-bus matching
1587		 * expression, therefore we need to further descend the tree.
1588		 * This won't change by continuing around the loop, so we
1589		 * go ahead and return.  If we haven't seen a non-bus
1590		 * matching expression, we keep going around the loop until
1591		 * we exhaust the matching expressions.  We'll set the stop
1592		 * flag once we fall out of the loop.
1593		 */
1594		if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1595			return(retval);
1596	}
1597
1598	/*
1599	 * If the return action hasn't been set to descend yet, that means
1600	 * we haven't seen anything other than bus matching patterns.  So
1601	 * tell the caller to stop descending the tree -- the user doesn't
1602	 * want to match against lower level tree elements.
1603	 */
1604	if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1605		retval |= DM_RET_STOP;
1606
1607	return(retval);
1608}
1609
1610static dev_match_ret
1611xptdevicematch(struct dev_match_pattern *patterns, int num_patterns,
1612	       struct cam_ed *device)
1613{
1614	dev_match_ret retval;
1615	int i;
1616
1617	retval = DM_RET_NONE;
1618
1619	/*
1620	 * If we aren't given something to match against, that's an error.
1621	 */
1622	if (device == NULL)
1623		return(DM_RET_ERROR);
1624
1625	/*
1626	 * If there are no match entries, then this device matches no
1627	 * matter what.
1628	 */
1629	if ((patterns == NULL) || (patterns == 0))
1630		return(DM_RET_DESCEND | DM_RET_COPY);
1631
1632	for (i = 0; i < num_patterns; i++) {
1633		struct device_match_pattern *cur_pattern;
1634
1635		/*
1636		 * If the pattern in question isn't for a device node, we
1637		 * aren't interested.
1638		 */
1639		if (patterns[i].type != DEV_MATCH_DEVICE) {
1640			if ((patterns[i].type == DEV_MATCH_PERIPH)
1641			 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
1642				retval |= DM_RET_DESCEND;
1643			continue;
1644		}
1645
1646		cur_pattern = &patterns[i].pattern.device_pattern;
1647
1648		/*
1649		 * If they want to match any device node, we give them any
1650		 * device node.
1651		 */
1652		if (cur_pattern->flags == DEV_MATCH_ANY) {
1653			/* set the copy flag */
1654			retval |= DM_RET_COPY;
1655
1656
1657			/*
1658			 * If we've already decided on an action, go ahead
1659			 * and return.
1660			 */
1661			if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1662				return(retval);
1663		}
1664
1665		/*
1666		 * Not sure why someone would do this...
1667		 */
1668		if (cur_pattern->flags == DEV_MATCH_NONE)
1669			continue;
1670
1671		if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
1672		 && (cur_pattern->path_id != device->target->bus->path_id))
1673			continue;
1674
1675		if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
1676		 && (cur_pattern->target_id != device->target->target_id))
1677			continue;
1678
1679		if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
1680		 && (cur_pattern->target_lun != device->lun_id))
1681			continue;
1682
1683		if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
1684		 && (cam_quirkmatch((caddr_t)&device->inq_data,
1685				    (caddr_t)&cur_pattern->inq_pat,
1686				    1, sizeof(cur_pattern->inq_pat),
1687				    scsi_static_inquiry_match) == NULL))
1688			continue;
1689
1690		/*
1691		 * If we get to this point, the user definitely wants
1692		 * information on this device.  So tell the caller to copy
1693		 * the data out.
1694		 */
1695		retval |= DM_RET_COPY;
1696
1697		/*
1698		 * If the return action has been set to descend, then we
1699		 * know that we've already seen a peripheral matching
1700		 * expression, therefore we need to further descend the tree.
1701		 * This won't change by continuing around the loop, so we
1702		 * go ahead and return.  If we haven't seen a peripheral
1703		 * matching expression, we keep going around the loop until
1704		 * we exhaust the matching expressions.  We'll set the stop
1705		 * flag once we fall out of the loop.
1706		 */
1707		if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1708			return(retval);
1709	}
1710
1711	/*
1712	 * If the return action hasn't been set to descend yet, that means
1713	 * we haven't seen any peripheral matching patterns.  So tell the
1714	 * caller to stop descending the tree -- the user doesn't want to
1715	 * match against lower level tree elements.
1716	 */
1717	if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1718		retval |= DM_RET_STOP;
1719
1720	return(retval);
1721}
1722
1723/*
1724 * Match a single peripheral against any number of match patterns.
1725 */
1726static dev_match_ret
1727xptperiphmatch(struct dev_match_pattern *patterns, int num_patterns,
1728	       struct cam_periph *periph)
1729{
1730	dev_match_ret retval;
1731	int i;
1732
1733	/*
1734	 * If we aren't given something to match against, that's an error.
1735	 */
1736	if (periph == NULL)
1737		return(DM_RET_ERROR);
1738
1739	/*
1740	 * If there are no match entries, then this peripheral matches no
1741	 * matter what.
1742	 */
1743	if ((patterns == NULL) || (num_patterns == 0))
1744		return(DM_RET_STOP | DM_RET_COPY);
1745
1746	/*
1747	 * There aren't any nodes below a peripheral node, so there's no
1748	 * reason to descend the tree any further.
1749	 */
1750	retval = DM_RET_STOP;
1751
1752	for (i = 0; i < num_patterns; i++) {
1753		struct periph_match_pattern *cur_pattern;
1754
1755		/*
1756		 * If the pattern in question isn't for a peripheral, we
1757		 * aren't interested.
1758		 */
1759		if (patterns[i].type != DEV_MATCH_PERIPH)
1760			continue;
1761
1762		cur_pattern = &patterns[i].pattern.periph_pattern;
1763
1764		/*
1765		 * If they want to match on anything, then we will do so.
1766		 */
1767		if (cur_pattern->flags == PERIPH_MATCH_ANY) {
1768			/* set the copy flag */
1769			retval |= DM_RET_COPY;
1770
1771			/*
1772			 * We've already set the return action to stop,
1773			 * since there are no nodes below peripherals in
1774			 * the tree.
1775			 */
1776			return(retval);
1777		}
1778
1779		/*
1780		 * Not sure why someone would do this...
1781		 */
1782		if (cur_pattern->flags == PERIPH_MATCH_NONE)
1783			continue;
1784
1785		if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
1786		 && (cur_pattern->path_id != periph->path->bus->path_id))
1787			continue;
1788
1789		/*
1790		 * For the target and lun id's, we have to make sure the
1791		 * target and lun pointers aren't NULL.  The xpt peripheral
1792		 * has a wildcard target and device.
1793		 */
1794		if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
1795		 && ((periph->path->target == NULL)
1796		 ||(cur_pattern->target_id != periph->path->target->target_id)))
1797			continue;
1798
1799		if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
1800		 && ((periph->path->device == NULL)
1801		 || (cur_pattern->target_lun != periph->path->device->lun_id)))
1802			continue;
1803
1804		if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
1805		 && (cur_pattern->unit_number != periph->unit_number))
1806			continue;
1807
1808		if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
1809		 && (strncmp(cur_pattern->periph_name, periph->periph_name,
1810			     DEV_IDLEN) != 0))
1811			continue;
1812
1813		/*
1814		 * If we get to this point, the user definitely wants
1815		 * information on this peripheral.  So tell the caller to
1816		 * copy the data out.
1817		 */
1818		retval |= DM_RET_COPY;
1819
1820		/*
1821		 * The return action has already been set to stop, since
1822		 * peripherals don't have any nodes below them in the EDT.
1823		 */
1824		return(retval);
1825	}
1826
1827	/*
1828	 * If we get to this point, the peripheral that was passed in
1829	 * doesn't match any of the patterns.
1830	 */
1831	return(retval);
1832}
1833
1834static int
1835xptedtbusfunc(struct cam_eb *bus, void *arg)
1836{
1837	struct ccb_dev_match *cdm;
1838	dev_match_ret retval;
1839
1840	cdm = (struct ccb_dev_match *)arg;
1841
1842	/*
1843	 * If our position is for something deeper in the tree, that means
1844	 * that we've already seen this node.  So, we keep going down.
1845	 */
1846	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1847	 && (cdm->pos.cookie.bus == bus)
1848	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1849	 && (cdm->pos.cookie.target != NULL))
1850		retval = DM_RET_DESCEND;
1851	else
1852		retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
1853
1854	/*
1855	 * If we got an error, bail out of the search.
1856	 */
1857	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1858		cdm->status = CAM_DEV_MATCH_ERROR;
1859		return(0);
1860	}
1861
1862	/*
1863	 * If the copy flag is set, copy this bus out.
1864	 */
1865	if (retval & DM_RET_COPY) {
1866		int spaceleft, j;
1867
1868		spaceleft = cdm->match_buf_len - (cdm->num_matches *
1869			sizeof(struct dev_match_result));
1870
1871		/*
1872		 * If we don't have enough space to put in another
1873		 * match result, save our position and tell the
1874		 * user there are more devices to check.
1875		 */
1876		if (spaceleft < sizeof(struct dev_match_result)) {
1877			bzero(&cdm->pos, sizeof(cdm->pos));
1878			cdm->pos.position_type =
1879				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
1880
1881			cdm->pos.cookie.bus = bus;
1882			cdm->pos.generations[CAM_BUS_GENERATION]=
1883				bus_generation;
1884			cdm->status = CAM_DEV_MATCH_MORE;
1885			return(0);
1886		}
1887		j = cdm->num_matches;
1888		cdm->num_matches++;
1889		cdm->matches[j].type = DEV_MATCH_BUS;
1890		cdm->matches[j].result.bus_result.path_id = bus->path_id;
1891		cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
1892		cdm->matches[j].result.bus_result.unit_number =
1893			bus->sim->unit_number;
1894		strncpy(cdm->matches[j].result.bus_result.dev_name,
1895			bus->sim->sim_name, DEV_IDLEN);
1896	}
1897
1898	/*
1899	 * If the user is only interested in busses, there's no
1900	 * reason to descend to the next level in the tree.
1901	 */
1902	if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
1903		return(1);
1904
1905	/*
1906	 * If there is a target generation recorded, check it to
1907	 * make sure the target list hasn't changed.
1908	 */
1909	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1910	 && (bus == cdm->pos.cookie.bus)
1911	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1912	 && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
1913	 && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
1914	     bus->generation)) {
1915		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1916		return(0);
1917	}
1918
1919	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1920	 && (cdm->pos.cookie.bus == bus)
1921	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1922	 && (cdm->pos.cookie.target != NULL))
1923		return(xpttargettraverse(bus,
1924					(struct cam_et *)cdm->pos.cookie.target,
1925					 xptedttargetfunc, arg));
1926	else
1927		return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
1928}
1929
1930static int
1931xptedttargetfunc(struct cam_et *target, void *arg)
1932{
1933	struct ccb_dev_match *cdm;
1934
1935	cdm = (struct ccb_dev_match *)arg;
1936
1937	/*
1938	 * If there is a device list generation recorded, check it to
1939	 * make sure the device list hasn't changed.
1940	 */
1941	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1942	 && (cdm->pos.cookie.bus == target->bus)
1943	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1944	 && (cdm->pos.cookie.target == target)
1945	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1946	 && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
1947	 && (cdm->pos.generations[CAM_DEV_GENERATION] !=
1948	     target->generation)) {
1949		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1950		return(0);
1951	}
1952
1953	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1954	 && (cdm->pos.cookie.bus == target->bus)
1955	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1956	 && (cdm->pos.cookie.target == target)
1957	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1958	 && (cdm->pos.cookie.device != NULL))
1959		return(xptdevicetraverse(target,
1960					(struct cam_ed *)cdm->pos.cookie.device,
1961					 xptedtdevicefunc, arg));
1962	else
1963		return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
1964}
1965
1966static int
1967xptedtdevicefunc(struct cam_ed *device, void *arg)
1968{
1969
1970	struct ccb_dev_match *cdm;
1971	dev_match_ret retval;
1972
1973	cdm = (struct ccb_dev_match *)arg;
1974
1975	/*
1976	 * If our position is for something deeper in the tree, that means
1977	 * that we've already seen this node.  So, we keep going down.
1978	 */
1979	if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1980	 && (cdm->pos.cookie.device == device)
1981	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1982	 && (cdm->pos.cookie.periph != NULL))
1983		retval = DM_RET_DESCEND;
1984	else
1985		retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
1986					device);
1987
1988	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1989		cdm->status = CAM_DEV_MATCH_ERROR;
1990		return(0);
1991	}
1992
1993	/*
1994	 * If the copy flag is set, copy this device out.
1995	 */
1996	if (retval & DM_RET_COPY) {
1997		int spaceleft, j;
1998
1999		spaceleft = cdm->match_buf_len - (cdm->num_matches *
2000			sizeof(struct dev_match_result));
2001
2002		/*
2003		 * If we don't have enough space to put in another
2004		 * match result, save our position and tell the
2005		 * user there are more devices to check.
2006		 */
2007		if (spaceleft < sizeof(struct dev_match_result)) {
2008			bzero(&cdm->pos, sizeof(cdm->pos));
2009			cdm->pos.position_type =
2010				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2011				CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
2012
2013			cdm->pos.cookie.bus = device->target->bus;
2014			cdm->pos.generations[CAM_BUS_GENERATION]=
2015				bus_generation;
2016			cdm->pos.cookie.target = device->target;
2017			cdm->pos.generations[CAM_TARGET_GENERATION] =
2018				device->target->bus->generation;
2019			cdm->pos.cookie.device = device;
2020			cdm->pos.generations[CAM_DEV_GENERATION] =
2021				device->target->generation;
2022			cdm->status = CAM_DEV_MATCH_MORE;
2023			return(0);
2024		}
2025		j = cdm->num_matches;
2026		cdm->num_matches++;
2027		cdm->matches[j].type = DEV_MATCH_DEVICE;
2028		cdm->matches[j].result.device_result.path_id =
2029			device->target->bus->path_id;
2030		cdm->matches[j].result.device_result.target_id =
2031			device->target->target_id;
2032		cdm->matches[j].result.device_result.target_lun =
2033			device->lun_id;
2034		bcopy(&device->inq_data,
2035		      &cdm->matches[j].result.device_result.inq_data,
2036		      sizeof(struct scsi_inquiry_data));
2037	}
2038
2039	/*
2040	 * If the user isn't interested in peripherals, don't descend
2041	 * the tree any further.
2042	 */
2043	if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2044		return(1);
2045
2046	/*
2047	 * If there is a peripheral list generation recorded, make sure
2048	 * it hasn't changed.
2049	 */
2050	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2051	 && (device->target->bus == cdm->pos.cookie.bus)
2052	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2053	 && (device->target == cdm->pos.cookie.target)
2054	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2055	 && (device == cdm->pos.cookie.device)
2056	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2057	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2058	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2059	     device->generation)){
2060		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2061		return(0);
2062	}
2063
2064	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2065	 && (cdm->pos.cookie.bus == device->target->bus)
2066	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2067	 && (cdm->pos.cookie.target == device->target)
2068	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2069	 && (cdm->pos.cookie.device == device)
2070	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2071	 && (cdm->pos.cookie.periph != NULL))
2072		return(xptperiphtraverse(device,
2073				(struct cam_periph *)cdm->pos.cookie.periph,
2074				xptedtperiphfunc, arg));
2075	else
2076		return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
2077}
2078
2079static int
2080xptedtperiphfunc(struct cam_periph *periph, void *arg)
2081{
2082	struct ccb_dev_match *cdm;
2083	dev_match_ret retval;
2084
2085	cdm = (struct ccb_dev_match *)arg;
2086
2087	retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2088
2089	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2090		cdm->status = CAM_DEV_MATCH_ERROR;
2091		return(0);
2092	}
2093
2094	/*
2095	 * If the copy flag is set, copy this peripheral out.
2096	 */
2097	if (retval & DM_RET_COPY) {
2098		int spaceleft, j;
2099
2100		spaceleft = cdm->match_buf_len - (cdm->num_matches *
2101			sizeof(struct dev_match_result));
2102
2103		/*
2104		 * If we don't have enough space to put in another
2105		 * match result, save our position and tell the
2106		 * user there are more devices to check.
2107		 */
2108		if (spaceleft < sizeof(struct dev_match_result)) {
2109			bzero(&cdm->pos, sizeof(cdm->pos));
2110			cdm->pos.position_type =
2111				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2112				CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
2113				CAM_DEV_POS_PERIPH;
2114
2115			cdm->pos.cookie.bus = periph->path->bus;
2116			cdm->pos.generations[CAM_BUS_GENERATION]=
2117				bus_generation;
2118			cdm->pos.cookie.target = periph->path->target;
2119			cdm->pos.generations[CAM_TARGET_GENERATION] =
2120				periph->path->bus->generation;
2121			cdm->pos.cookie.device = periph->path->device;
2122			cdm->pos.generations[CAM_DEV_GENERATION] =
2123				periph->path->target->generation;
2124			cdm->pos.cookie.periph = periph;
2125			cdm->pos.generations[CAM_PERIPH_GENERATION] =
2126				periph->path->device->generation;
2127			cdm->status = CAM_DEV_MATCH_MORE;
2128			return(0);
2129		}
2130
2131		j = cdm->num_matches;
2132		cdm->num_matches++;
2133		cdm->matches[j].type = DEV_MATCH_PERIPH;
2134		cdm->matches[j].result.periph_result.path_id =
2135			periph->path->bus->path_id;
2136		cdm->matches[j].result.periph_result.target_id =
2137			periph->path->target->target_id;
2138		cdm->matches[j].result.periph_result.target_lun =
2139			periph->path->device->lun_id;
2140		cdm->matches[j].result.periph_result.unit_number =
2141			periph->unit_number;
2142		strncpy(cdm->matches[j].result.periph_result.periph_name,
2143			periph->periph_name, DEV_IDLEN);
2144	}
2145
2146	return(1);
2147}
2148
2149static int
2150xptedtmatch(struct ccb_dev_match *cdm)
2151{
2152	int ret;
2153
2154	cdm->num_matches = 0;
2155
2156	/*
2157	 * Check the bus list generation.  If it has changed, the user
2158	 * needs to reset everything and start over.
2159	 */
2160	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2161	 && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
2162	 && (cdm->pos.generations[CAM_BUS_GENERATION] != bus_generation)) {
2163		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2164		return(0);
2165	}
2166
2167	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2168	 && (cdm->pos.cookie.bus != NULL))
2169		ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
2170				     xptedtbusfunc, cdm);
2171	else
2172		ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
2173
2174	/*
2175	 * If we get back 0, that means that we had to stop before fully
2176	 * traversing the EDT.  It also means that one of the subroutines
2177	 * has set the status field to the proper value.  If we get back 1,
2178	 * we've fully traversed the EDT and copied out any matching entries.
2179	 */
2180	if (ret == 1)
2181		cdm->status = CAM_DEV_MATCH_LAST;
2182
2183	return(ret);
2184}
2185
2186static int
2187xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
2188{
2189	struct ccb_dev_match *cdm;
2190
2191	cdm = (struct ccb_dev_match *)arg;
2192
2193	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2194	 && (cdm->pos.cookie.pdrv == pdrv)
2195	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2196	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2197	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2198	     (*pdrv)->generation)) {
2199		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2200		return(0);
2201	}
2202
2203	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2204	 && (cdm->pos.cookie.pdrv == pdrv)
2205	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2206	 && (cdm->pos.cookie.periph != NULL))
2207		return(xptpdperiphtraverse(pdrv,
2208				(struct cam_periph *)cdm->pos.cookie.periph,
2209				xptplistperiphfunc, arg));
2210	else
2211		return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
2212}
2213
2214static int
2215xptplistperiphfunc(struct cam_periph *periph, void *arg)
2216{
2217	struct ccb_dev_match *cdm;
2218	dev_match_ret retval;
2219
2220	cdm = (struct ccb_dev_match *)arg;
2221
2222	retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2223
2224	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2225		cdm->status = CAM_DEV_MATCH_ERROR;
2226		return(0);
2227	}
2228
2229	/*
2230	 * If the copy flag is set, copy this peripheral out.
2231	 */
2232	if (retval & DM_RET_COPY) {
2233		int spaceleft, j;
2234
2235		spaceleft = cdm->match_buf_len - (cdm->num_matches *
2236			sizeof(struct dev_match_result));
2237
2238		/*
2239		 * If we don't have enough space to put in another
2240		 * match result, save our position and tell the
2241		 * user there are more devices to check.
2242		 */
2243		if (spaceleft < sizeof(struct dev_match_result)) {
2244			struct periph_driver **pdrv;
2245
2246			pdrv = NULL;
2247			bzero(&cdm->pos, sizeof(cdm->pos));
2248			cdm->pos.position_type =
2249				CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
2250				CAM_DEV_POS_PERIPH;
2251
2252			/*
2253			 * This may look a bit non-sensical, but it is
2254			 * actually quite logical.  There are very few
2255			 * peripheral drivers, and bloating every peripheral
2256			 * structure with a pointer back to its parent
2257			 * peripheral driver linker set entry would cost
2258			 * more in the long run than doing this quick lookup.
2259			 */
2260			for (pdrv =
2261			     (struct periph_driver **)periphdriver_set.ls_items;
2262			     *pdrv != NULL; pdrv++) {
2263				if (strcmp((*pdrv)->driver_name,
2264				    periph->periph_name) == 0)
2265					break;
2266			}
2267
2268			if (pdrv == NULL) {
2269				cdm->status = CAM_DEV_MATCH_ERROR;
2270				return(0);
2271			}
2272
2273			cdm->pos.cookie.pdrv = pdrv;
2274			/*
2275			 * The periph generation slot does double duty, as
2276			 * does the periph pointer slot.  They are used for
2277			 * both edt and pdrv lookups and positioning.
2278			 */
2279			cdm->pos.cookie.periph = periph;
2280			cdm->pos.generations[CAM_PERIPH_GENERATION] =
2281				(*pdrv)->generation;
2282			cdm->status = CAM_DEV_MATCH_MORE;
2283			return(0);
2284		}
2285
2286		j = cdm->num_matches;
2287		cdm->num_matches++;
2288		cdm->matches[j].type = DEV_MATCH_PERIPH;
2289		cdm->matches[j].result.periph_result.path_id =
2290			periph->path->bus->path_id;
2291
2292		/*
2293		 * The transport layer peripheral doesn't have a target or
2294		 * lun.
2295		 */
2296		if (periph->path->target)
2297			cdm->matches[j].result.periph_result.target_id =
2298				periph->path->target->target_id;
2299		else
2300			cdm->matches[j].result.periph_result.target_id = -1;
2301
2302		if (periph->path->device)
2303			cdm->matches[j].result.periph_result.target_lun =
2304				periph->path->device->lun_id;
2305		else
2306			cdm->matches[j].result.periph_result.target_lun = -1;
2307
2308		cdm->matches[j].result.periph_result.unit_number =
2309			periph->unit_number;
2310		strncpy(cdm->matches[j].result.periph_result.periph_name,
2311			periph->periph_name, DEV_IDLEN);
2312	}
2313
2314	return(1);
2315}
2316
2317static int
2318xptperiphlistmatch(struct ccb_dev_match *cdm)
2319{
2320	int ret;
2321
2322	cdm->num_matches = 0;
2323
2324	/*
2325	 * At this point in the edt traversal function, we check the bus
2326	 * list generation to make sure that no busses have been added or
2327	 * removed since the user last sent a XPT_DEV_MATCH ccb through.
2328	 * For the peripheral driver list traversal function, however, we
2329	 * don't have to worry about new peripheral driver types coming or
2330	 * going; they're in a linker set, and therefore can't change
2331	 * without a recompile.
2332	 */
2333
2334	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2335	 && (cdm->pos.cookie.pdrv != NULL))
2336		ret = xptpdrvtraverse(
2337			        (struct periph_driver **)cdm->pos.cookie.pdrv,
2338				xptplistpdrvfunc, cdm);
2339	else
2340		ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
2341
2342	/*
2343	 * If we get back 0, that means that we had to stop before fully
2344	 * traversing the peripheral driver tree.  It also means that one of
2345	 * the subroutines has set the status field to the proper value.  If
2346	 * we get back 1, we've fully traversed the EDT and copied out any
2347	 * matching entries.
2348	 */
2349	if (ret == 1)
2350		cdm->status = CAM_DEV_MATCH_LAST;
2351
2352	return(ret);
2353}
2354
2355static int
2356xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
2357{
2358	struct cam_eb *bus, *next_bus;
2359	int retval;
2360
2361	retval = 1;
2362
2363	for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xpt_busses));
2364	     bus != NULL;
2365	     bus = next_bus) {
2366		next_bus = TAILQ_NEXT(bus, links);
2367
2368		retval = tr_func(bus, arg);
2369		if (retval == 0)
2370			return(retval);
2371	}
2372
2373	return(retval);
2374}
2375
2376static int
2377xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
2378		  xpt_targetfunc_t *tr_func, void *arg)
2379{
2380	struct cam_et *target, *next_target;
2381	int retval;
2382
2383	retval = 1;
2384	for (target = (start_target ? start_target :
2385		       TAILQ_FIRST(&bus->et_entries));
2386	     target != NULL; target = next_target) {
2387
2388		next_target = TAILQ_NEXT(target, links);
2389
2390		retval = tr_func(target, arg);
2391
2392		if (retval == 0)
2393			return(retval);
2394	}
2395
2396	return(retval);
2397}
2398
2399static int
2400xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
2401		  xpt_devicefunc_t *tr_func, void *arg)
2402{
2403	struct cam_ed *device, *next_device;
2404	int retval;
2405
2406	retval = 1;
2407	for (device = (start_device ? start_device :
2408		       TAILQ_FIRST(&target->ed_entries));
2409	     device != NULL;
2410	     device = next_device) {
2411
2412		next_device = TAILQ_NEXT(device, links);
2413
2414		retval = tr_func(device, arg);
2415
2416		if (retval == 0)
2417			return(retval);
2418	}
2419
2420	return(retval);
2421}
2422
2423static int
2424xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
2425		  xpt_periphfunc_t *tr_func, void *arg)
2426{
2427	struct cam_periph *periph, *next_periph;
2428	int retval;
2429
2430	retval = 1;
2431
2432	for (periph = (start_periph ? start_periph :
2433		       SLIST_FIRST(&device->periphs));
2434	     periph != NULL;
2435	     periph = next_periph) {
2436
2437		next_periph = SLIST_NEXT(periph, periph_links);
2438
2439		retval = tr_func(periph, arg);
2440		if (retval == 0)
2441			return(retval);
2442	}
2443
2444	return(retval);
2445}
2446
2447static int
2448xptpdrvtraverse(struct periph_driver **start_pdrv,
2449		xpt_pdrvfunc_t *tr_func, void *arg)
2450{
2451	struct periph_driver **pdrv;
2452	int retval;
2453
2454	retval = 1;
2455
2456	/*
2457	 * We don't traverse the peripheral driver list like we do the
2458	 * other lists, because it is a linker set, and therefore cannot be
2459	 * changed during runtime.  If the peripheral driver list is ever
2460	 * re-done to be something other than a linker set (i.e. it can
2461	 * change while the system is running), the list traversal should
2462	 * be modified to work like the other traversal functions.
2463	 */
2464	for (pdrv = (start_pdrv ? start_pdrv :
2465	     (struct periph_driver **)periphdriver_set.ls_items);
2466	     *pdrv != NULL; pdrv++) {
2467		retval = tr_func(pdrv, arg);
2468
2469		if (retval == 0)
2470			return(retval);
2471	}
2472
2473	return(retval);
2474}
2475
2476static int
2477xptpdperiphtraverse(struct periph_driver **pdrv,
2478		    struct cam_periph *start_periph,
2479		    xpt_periphfunc_t *tr_func, void *arg)
2480{
2481	struct cam_periph *periph, *next_periph;
2482	int retval;
2483
2484	retval = 1;
2485
2486	for (periph = (start_periph ? start_periph :
2487	     TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
2488	     periph = next_periph) {
2489
2490		next_periph = TAILQ_NEXT(periph, unit_links);
2491
2492		retval = tr_func(periph, arg);
2493		if (retval == 0)
2494			return(retval);
2495	}
2496	return(retval);
2497}
2498
2499static int
2500xptdefbusfunc(struct cam_eb *bus, void *arg)
2501{
2502	struct xpt_traverse_config *tr_config;
2503
2504	tr_config = (struct xpt_traverse_config *)arg;
2505
2506	if (tr_config->depth == XPT_DEPTH_BUS) {
2507		xpt_busfunc_t *tr_func;
2508
2509		tr_func = (xpt_busfunc_t *)tr_config->tr_func;
2510
2511		return(tr_func(bus, tr_config->tr_arg));
2512	} else
2513		return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
2514}
2515
2516static int
2517xptdeftargetfunc(struct cam_et *target, void *arg)
2518{
2519	struct xpt_traverse_config *tr_config;
2520
2521	tr_config = (struct xpt_traverse_config *)arg;
2522
2523	if (tr_config->depth == XPT_DEPTH_TARGET) {
2524		xpt_targetfunc_t *tr_func;
2525
2526		tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
2527
2528		return(tr_func(target, tr_config->tr_arg));
2529	} else
2530		return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
2531}
2532
2533static int
2534xptdefdevicefunc(struct cam_ed *device, void *arg)
2535{
2536	struct xpt_traverse_config *tr_config;
2537
2538	tr_config = (struct xpt_traverse_config *)arg;
2539
2540	if (tr_config->depth == XPT_DEPTH_DEVICE) {
2541		xpt_devicefunc_t *tr_func;
2542
2543		tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
2544
2545		return(tr_func(device, tr_config->tr_arg));
2546	} else
2547		return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
2548}
2549
2550static int
2551xptdefperiphfunc(struct cam_periph *periph, void *arg)
2552{
2553	struct xpt_traverse_config *tr_config;
2554	xpt_periphfunc_t *tr_func;
2555
2556	tr_config = (struct xpt_traverse_config *)arg;
2557
2558	tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
2559
2560	/*
2561	 * Unlike the other default functions, we don't check for depth
2562	 * here.  The peripheral driver level is the last level in the EDT,
2563	 * so if we're here, we should execute the function in question.
2564	 */
2565	return(tr_func(periph, tr_config->tr_arg));
2566}
2567
2568/*
2569 * Execute the given function for every bus in the EDT.
2570 */
2571static int
2572xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
2573{
2574	struct xpt_traverse_config tr_config;
2575
2576	tr_config.depth = XPT_DEPTH_BUS;
2577	tr_config.tr_func = tr_func;
2578	tr_config.tr_arg = arg;
2579
2580	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2581}
2582
2583#ifdef notusedyet
2584/*
2585 * Execute the given function for every target in the EDT.
2586 */
2587static int
2588xpt_for_all_targets(xpt_targetfunc_t *tr_func, void *arg)
2589{
2590	struct xpt_traverse_config tr_config;
2591
2592	tr_config.depth = XPT_DEPTH_TARGET;
2593	tr_config.tr_func = tr_func;
2594	tr_config.tr_arg = arg;
2595
2596	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2597}
2598#endif /* notusedyet */
2599
2600/*
2601 * Execute the given function for every device in the EDT.
2602 */
2603static int
2604xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
2605{
2606	struct xpt_traverse_config tr_config;
2607
2608	tr_config.depth = XPT_DEPTH_DEVICE;
2609	tr_config.tr_func = tr_func;
2610	tr_config.tr_arg = arg;
2611
2612	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2613}
2614
2615#ifdef notusedyet
2616/*
2617 * Execute the given function for every peripheral in the EDT.
2618 */
2619static int
2620xpt_for_all_periphs(xpt_periphfunc_t *tr_func, void *arg)
2621{
2622	struct xpt_traverse_config tr_config;
2623
2624	tr_config.depth = XPT_DEPTH_PERIPH;
2625	tr_config.tr_func = tr_func;
2626	tr_config.tr_arg = arg;
2627
2628	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2629}
2630#endif /* notusedyet */
2631
2632static int
2633xptsetasyncfunc(struct cam_ed *device, void *arg)
2634{
2635	struct cam_path path;
2636	struct ccb_getdev cgd;
2637	struct async_node *cur_entry;
2638
2639	cur_entry = (struct async_node *)arg;
2640
2641	/*
2642	 * Don't report unconfigured devices (Wildcard devs,
2643	 * devices only for target mode, device instances
2644	 * that have been invalidated but are waiting for
2645	 * their last reference count to be released).
2646	 */
2647	if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
2648		return (1);
2649
2650	xpt_compile_path(&path,
2651			 NULL,
2652			 device->target->bus->path_id,
2653			 device->target->target_id,
2654			 device->lun_id);
2655	xpt_setup_ccb(&cgd.ccb_h, &path, /*priority*/1);
2656	cgd.ccb_h.func_code = XPT_GDEV_TYPE;
2657	xpt_action((union ccb *)&cgd);
2658	cur_entry->callback(cur_entry->callback_arg,
2659			    AC_FOUND_DEVICE,
2660			    &path, &cgd);
2661	xpt_release_path(&path);
2662
2663	return(1);
2664}
2665
2666static int
2667xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
2668{
2669	struct cam_path path;
2670	struct ccb_pathinq cpi;
2671	struct async_node *cur_entry;
2672
2673	cur_entry = (struct async_node *)arg;
2674
2675	xpt_compile_path(&path, /*periph*/NULL,
2676			 bus->sim->path_id,
2677			 CAM_TARGET_WILDCARD,
2678			 CAM_LUN_WILDCARD);
2679	xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
2680	cpi.ccb_h.func_code = XPT_PATH_INQ;
2681	xpt_action((union ccb *)&cpi);
2682	cur_entry->callback(cur_entry->callback_arg,
2683			    AC_PATH_REGISTERED,
2684			    &path, &cpi);
2685	xpt_release_path(&path);
2686
2687	return(1);
2688}
2689
2690void
2691xpt_action(union ccb *start_ccb)
2692{
2693	CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
2694
2695	start_ccb->ccb_h.status = CAM_REQ_INPROG;
2696
2697	switch (start_ccb->ccb_h.func_code) {
2698	case XPT_SCSI_IO:
2699	{
2700#ifdef CAMDEBUG
2701		char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
2702		struct cam_path *path;
2703
2704		path = start_ccb->ccb_h.path;
2705#endif
2706
2707		/*
2708		 * For the sake of compatibility with SCSI-1
2709		 * devices that may not understand the identify
2710		 * message, we include lun information in the
2711		 * second byte of all commands.  SCSI-1 specifies
2712		 * that luns are a 3 bit value and reserves only 3
2713		 * bits for lun information in the CDB.  Later
2714		 * revisions of the SCSI spec allow for more than 8
2715		 * luns, but have deprecated lun information in the
2716		 * CDB.  So, if the lun won't fit, we must omit.
2717		 *
2718		 * Also be aware that during initial probing for devices,
2719		 * the inquiry information is unknown but initialized to 0.
2720		 * This means that this code will be exercised while probing
2721		 * devices with an ANSI revision greater than 2.
2722		 */
2723		if (SID_ANSI_REV(&start_ccb->ccb_h.path->device->inq_data) <= 2
2724		 && start_ccb->ccb_h.target_lun < 8
2725		 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
2726
2727			start_ccb->csio.cdb_io.cdb_bytes[1] |=
2728			    start_ccb->ccb_h.target_lun << 5;
2729		}
2730		start_ccb->csio.scsi_status = SCSI_STATUS_OK;
2731		start_ccb->csio.sense_resid = 0;
2732		start_ccb->csio.resid = 0;
2733		CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n",
2734			  scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0],
2735			  	       &path->device->inq_data),
2736			  scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes,
2737					  cdb_str, sizeof(cdb_str))));
2738		/* FALLTRHOUGH */
2739	}
2740	case XPT_TARGET_IO:
2741	case XPT_CONT_TARGET_IO:
2742	case XPT_ENG_EXEC:
2743	{
2744		struct cam_path *path;
2745		int s;
2746		int runq;
2747
2748		path = start_ccb->ccb_h.path;
2749		s = splsoftcam();
2750
2751		cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
2752		if (path->device->qfrozen_cnt == 0)
2753			runq = xpt_schedule_dev_sendq(path->bus, path->device);
2754		else
2755			runq = 0;
2756		splx(s);
2757		if (runq != 0)
2758			xpt_run_dev_sendq(path->bus);
2759		break;
2760	}
2761	case XPT_SET_TRAN_SETTINGS:
2762	{
2763		xpt_set_transfer_settings(&start_ccb->cts,
2764					  start_ccb->ccb_h.path->device,
2765					  /*async_update*/FALSE);
2766		break;
2767	}
2768	case XPT_CALC_GEOMETRY:
2769		/* Filter out garbage */
2770		if (start_ccb->ccg.block_size == 0
2771		 || start_ccb->ccg.volume_size == 0) {
2772			start_ccb->ccg.cylinders = 0;
2773			start_ccb->ccg.heads = 0;
2774			start_ccb->ccg.secs_per_track = 0;
2775			start_ccb->ccb_h.status = CAM_REQ_CMP;
2776			break;
2777		}
2778#ifdef PC98
2779		/*
2780		 * In a PC-98 system, geometry translation depens on
2781		 * the "real" device geometry obtained from mode page 4.
2782		 * SCSI geometry translation is performed in the
2783		 * initialization routine of the SCSI BIOS and the result
2784		 * stored in host memory.  If the translation is available
2785		 * in host memory, use it.  If not, rely on the default
2786		 * translation the device driver performs.
2787		 */
2788		if (scsi_da_bios_params(&start_ccb->ccg) != 0) {
2789			start_ccb->ccb_h.status = CAM_REQ_CMP;
2790			break;
2791		}
2792		/* FALLTHROUGH */
2793#endif
2794	case XPT_ABORT:
2795	case XPT_RESET_DEV:
2796	case XPT_ACCEPT_TARGET_IO:
2797	case XPT_EN_LUN:
2798	case XPT_IMMED_NOTIFY:
2799	case XPT_NOTIFY_ACK:
2800	case XPT_GET_TRAN_SETTINGS:
2801	case XPT_PATH_INQ:
2802	case XPT_RESET_BUS:
2803	{
2804		struct cam_sim *sim;
2805
2806		sim = start_ccb->ccb_h.path->bus->sim;
2807		(*(sim->sim_action))(sim, start_ccb);
2808		break;
2809	}
2810	case XPT_GDEV_TYPE:
2811	{
2812		int s;
2813
2814		s = splcam();
2815		if ((start_ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) != 0) {
2816			start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2817		} else {
2818			struct ccb_getdev *cgd;
2819			struct cam_et *tar;
2820			struct cam_ed *dev;
2821
2822			cgd = &start_ccb->cgd;
2823			tar = cgd->ccb_h.path->target;
2824			dev = cgd->ccb_h.path->device;
2825			cgd->inq_data = dev->inq_data;
2826			cgd->pd_type = SID_TYPE(&dev->inq_data);
2827			cgd->dev_openings = dev->ccbq.dev_openings;
2828			cgd->dev_active = dev->ccbq.dev_active;
2829			cgd->devq_openings = dev->ccbq.devq_openings;
2830			cgd->devq_queued = dev->ccbq.queue.entries;
2831			cgd->held = dev->ccbq.held;
2832			cgd->maxtags = dev->quirk->maxtags;
2833			cgd->mintags = dev->quirk->mintags;
2834			cgd->ccb_h.status = CAM_REQ_CMP;
2835			cgd->serial_num_len = dev->serial_num_len;
2836			if ((dev->serial_num_len > 0)
2837			 && (dev->serial_num != NULL))
2838				bcopy(dev->serial_num, cgd->serial_num,
2839				      dev->serial_num_len);
2840		}
2841		splx(s);
2842		break;
2843	}
2844	case XPT_GDEVLIST:
2845	{
2846		struct cam_periph	*nperiph;
2847		struct periph_list	*periph_head;
2848		struct ccb_getdevlist	*cgdl;
2849		int			i;
2850		int			s;
2851		struct cam_ed		*device;
2852		int			found;
2853
2854
2855		found = 0;
2856
2857		/*
2858		 * Don't want anyone mucking with our data.
2859		 */
2860		s = splcam();
2861		device = start_ccb->ccb_h.path->device;
2862		periph_head = &device->periphs;
2863		cgdl = &start_ccb->cgdl;
2864
2865		/*
2866		 * Check and see if the list has changed since the user
2867		 * last requested a list member.  If so, tell them that the
2868		 * list has changed, and therefore they need to start over
2869		 * from the beginning.
2870		 */
2871		if ((cgdl->index != 0) &&
2872		    (cgdl->generation != device->generation)) {
2873			cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
2874			splx(s);
2875			break;
2876		}
2877
2878		/*
2879		 * Traverse the list of peripherals and attempt to find
2880		 * the requested peripheral.
2881		 */
2882		for (nperiph = periph_head->slh_first, i = 0;
2883		     (nperiph != NULL) && (i <= cgdl->index);
2884		     nperiph = nperiph->periph_links.sle_next, i++) {
2885			if (i == cgdl->index) {
2886				strncpy(cgdl->periph_name,
2887					nperiph->periph_name,
2888					DEV_IDLEN);
2889				cgdl->unit_number = nperiph->unit_number;
2890				found = 1;
2891			}
2892		}
2893		if (found == 0) {
2894			cgdl->status = CAM_GDEVLIST_ERROR;
2895			splx(s);
2896			break;
2897		}
2898
2899		if (nperiph == NULL)
2900			cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
2901		else
2902			cgdl->status = CAM_GDEVLIST_MORE_DEVS;
2903
2904		cgdl->index++;
2905		cgdl->generation = device->generation;
2906
2907		splx(s);
2908		cgdl->ccb_h.status = CAM_REQ_CMP;
2909		break;
2910	}
2911	case XPT_DEV_MATCH:
2912	{
2913		int s;
2914		dev_pos_type position_type;
2915		struct ccb_dev_match *cdm;
2916		int ret;
2917
2918		cdm = &start_ccb->cdm;
2919
2920		/*
2921		 * Prevent EDT changes while we traverse it.
2922		 */
2923		s = splcam();
2924		/*
2925		 * There are two ways of getting at information in the EDT.
2926		 * The first way is via the primary EDT tree.  It starts
2927		 * with a list of busses, then a list of targets on a bus,
2928		 * then devices/luns on a target, and then peripherals on a
2929		 * device/lun.  The "other" way is by the peripheral driver
2930		 * lists.  The peripheral driver lists are organized by
2931		 * peripheral driver.  (obviously)  So it makes sense to
2932		 * use the peripheral driver list if the user is looking
2933		 * for something like "da1", or all "da" devices.  If the
2934		 * user is looking for something on a particular bus/target
2935		 * or lun, it's generally better to go through the EDT tree.
2936		 */
2937
2938		if (cdm->pos.position_type != CAM_DEV_POS_NONE)
2939			position_type = cdm->pos.position_type;
2940		else {
2941			int i;
2942
2943			position_type = CAM_DEV_POS_NONE;
2944
2945			for (i = 0; i < cdm->num_patterns; i++) {
2946				if ((cdm->patterns[i].type == DEV_MATCH_BUS)
2947				 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
2948					position_type = CAM_DEV_POS_EDT;
2949					break;
2950				}
2951			}
2952
2953			if (cdm->num_patterns == 0)
2954				position_type = CAM_DEV_POS_EDT;
2955			else if (position_type == CAM_DEV_POS_NONE)
2956				position_type = CAM_DEV_POS_PDRV;
2957		}
2958
2959		switch(position_type & CAM_DEV_POS_TYPEMASK) {
2960		case CAM_DEV_POS_EDT:
2961			ret = xptedtmatch(cdm);
2962			break;
2963		case CAM_DEV_POS_PDRV:
2964			ret = xptperiphlistmatch(cdm);
2965			break;
2966		default:
2967			cdm->status = CAM_DEV_MATCH_ERROR;
2968			break;
2969		}
2970
2971		splx(s);
2972
2973		if (cdm->status == CAM_DEV_MATCH_ERROR)
2974			start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2975		else
2976			start_ccb->ccb_h.status = CAM_REQ_CMP;
2977
2978		break;
2979	}
2980	case XPT_SASYNC_CB:
2981	{
2982		struct ccb_setasync *csa;
2983		struct async_node *cur_entry;
2984		struct async_list *async_head;
2985		u_int32_t added;
2986		int s;
2987
2988		csa = &start_ccb->csa;
2989		added = csa->event_enable;
2990		async_head = &csa->ccb_h.path->device->asyncs;
2991
2992		/*
2993		 * If there is already an entry for us, simply
2994		 * update it.
2995		 */
2996		s = splcam();
2997		cur_entry = SLIST_FIRST(async_head);
2998		while (cur_entry != NULL) {
2999			if ((cur_entry->callback_arg == csa->callback_arg)
3000			 && (cur_entry->callback == csa->callback))
3001				break;
3002			cur_entry = SLIST_NEXT(cur_entry, links);
3003		}
3004
3005		if (cur_entry != NULL) {
3006		 	/*
3007			 * If the request has no flags set,
3008			 * remove the entry.
3009			 */
3010			added &= ~cur_entry->event_enable;
3011			if (csa->event_enable == 0) {
3012				SLIST_REMOVE(async_head, cur_entry,
3013					     async_node, links);
3014				csa->ccb_h.path->device->refcount--;
3015				free(cur_entry, M_DEVBUF);
3016			} else {
3017				cur_entry->event_enable = csa->event_enable;
3018			}
3019		} else {
3020			cur_entry = malloc(sizeof(*cur_entry), M_DEVBUF,
3021					   M_NOWAIT);
3022			if (cur_entry == NULL) {
3023				splx(s);
3024				csa->ccb_h.status = CAM_RESRC_UNAVAIL;
3025				break;
3026			}
3027			cur_entry->callback_arg = csa->callback_arg;
3028			cur_entry->callback = csa->callback;
3029			cur_entry->event_enable = csa->event_enable;
3030			SLIST_INSERT_HEAD(async_head, cur_entry, links);
3031			csa->ccb_h.path->device->refcount++;
3032		}
3033
3034		if ((added & AC_FOUND_DEVICE) != 0) {
3035			/*
3036			 * Get this peripheral up to date with all
3037			 * the currently existing devices.
3038			 */
3039			xpt_for_all_devices(xptsetasyncfunc, cur_entry);
3040		}
3041		if ((added & AC_PATH_REGISTERED) != 0) {
3042			/*
3043			 * Get this peripheral up to date with all
3044			 * the currently existing busses.
3045			 */
3046			xpt_for_all_busses(xptsetasyncbusfunc, cur_entry);
3047		}
3048		splx(s);
3049		start_ccb->ccb_h.status = CAM_REQ_CMP;
3050		break;
3051	}
3052	case XPT_REL_SIMQ:
3053	{
3054		struct ccb_relsim *crs;
3055		struct cam_ed *dev;
3056		int s;
3057
3058		crs = &start_ccb->crs;
3059		dev = crs->ccb_h.path->device;
3060		if (dev == NULL) {
3061
3062			crs->ccb_h.status = CAM_DEV_NOT_THERE;
3063			break;
3064		}
3065
3066		s = splcam();
3067
3068		if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
3069
3070 			if ((dev->inq_data.flags & SID_CmdQue) != 0) {
3071
3072				/* Don't ever go below one opening */
3073				if (crs->openings > 0) {
3074					xpt_dev_ccbq_resize(crs->ccb_h.path,
3075							    crs->openings);
3076
3077					if (bootverbose) {
3078						xpt_print_path(crs->ccb_h.path);
3079						printf("tagged openings "
3080						       "now %d\n",
3081						       crs->openings);
3082					}
3083				}
3084			}
3085		}
3086
3087		if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
3088
3089			if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
3090
3091				/*
3092				 * Just extend the old timeout and decrement
3093				 * the freeze count so that a single timeout
3094				 * is sufficient for releasing the queue.
3095				 */
3096				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3097				untimeout(xpt_release_devq_timeout,
3098					  dev, dev->c_handle);
3099			} else {
3100
3101				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3102			}
3103
3104			dev->c_handle =
3105				timeout(xpt_release_devq_timeout,
3106					dev,
3107					(crs->release_timeout * hz) / 1000);
3108
3109			dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
3110
3111		}
3112
3113		if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
3114
3115			if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
3116				/*
3117				 * Decrement the freeze count so that a single
3118				 * completion is still sufficient to unfreeze
3119				 * the queue.
3120				 */
3121				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3122			} else {
3123
3124				dev->flags |= CAM_DEV_REL_ON_COMPLETE;
3125				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3126			}
3127		}
3128
3129		if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
3130
3131			if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
3132			 || (dev->ccbq.dev_active == 0)) {
3133
3134				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3135			} else {
3136
3137				dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
3138				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3139			}
3140		}
3141		splx(s);
3142
3143		if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) {
3144
3145			xpt_release_devq(crs->ccb_h.path->device,
3146					 /*run_queue*/TRUE);
3147		}
3148		start_ccb->crs.qfrozen_cnt = dev->qfrozen_cnt;
3149		start_ccb->ccb_h.status = CAM_REQ_CMP;
3150		break;
3151	}
3152	case XPT_SCAN_BUS:
3153		xpt_scan_bus(start_ccb->ccb_h.path->periph, start_ccb);
3154		break;
3155	case XPT_SCAN_LUN:
3156		xpt_scan_lun(start_ccb->ccb_h.path->periph,
3157			     start_ccb->ccb_h.path, start_ccb->crcn.flags,
3158			     start_ccb);
3159		break;
3160	case XPT_DEBUG: {
3161#ifdef CAMDEBUG
3162		int s;
3163
3164		s = splcam();
3165		cam_dflags = start_ccb->cdbg.flags;
3166		if (cam_dpath != NULL) {
3167			xpt_free_path(cam_dpath);
3168			cam_dpath = NULL;
3169		}
3170
3171		if (cam_dflags != CAM_DEBUG_NONE) {
3172			if (xpt_create_path(&cam_dpath, xpt_periph,
3173					    start_ccb->ccb_h.path_id,
3174					    start_ccb->ccb_h.target_id,
3175					    start_ccb->ccb_h.target_lun) !=
3176					    CAM_REQ_CMP) {
3177				start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3178				cam_dflags = CAM_DEBUG_NONE;
3179			} else {
3180				start_ccb->ccb_h.status = CAM_REQ_CMP;
3181				xpt_print_path(cam_dpath);
3182				printf("debugging flags now %x\n", cam_dflags);
3183			}
3184		} else {
3185			cam_dpath = NULL;
3186			start_ccb->ccb_h.status = CAM_REQ_CMP;
3187		}
3188		splx(s);
3189#else /* !CAMDEBUG */
3190		start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3191#endif /* CAMDEBUG */
3192		break;
3193	}
3194	case XPT_NOOP:
3195		start_ccb->ccb_h.status = CAM_REQ_CMP;
3196		break;
3197	default:
3198	case XPT_SDEV_TYPE:
3199	case XPT_TERM_IO:
3200	case XPT_ENG_INQ:
3201		/* XXX Implement */
3202		start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
3203		break;
3204	}
3205}
3206
3207void
3208xpt_polled_action(union ccb *start_ccb)
3209{
3210	int	  s;
3211	u_int32_t timeout;
3212	struct	  cam_sim *sim;
3213	struct	  cam_devq *devq;
3214	struct	  cam_ed *dev;
3215
3216	timeout = start_ccb->ccb_h.timeout;
3217	sim = start_ccb->ccb_h.path->bus->sim;
3218	devq = sim->devq;
3219	dev = start_ccb->ccb_h.path->device;
3220
3221	s = splcam();
3222
3223	/*
3224	 * Steal an opening so that no other queued requests
3225	 * can get it before us while we simulate interrupts.
3226	 */
3227	dev->ccbq.devq_openings--;
3228	dev->ccbq.dev_openings--;
3229
3230	while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0)
3231	   && (--timeout > 0)) {
3232		DELAY(1000);
3233		(*(sim->sim_poll))(sim);
3234		swi_camnet();
3235		swi_cambio();
3236	}
3237
3238	dev->ccbq.devq_openings++;
3239	dev->ccbq.dev_openings++;
3240
3241	if (timeout != 0) {
3242		xpt_action(start_ccb);
3243		while(--timeout > 0) {
3244			(*(sim->sim_poll))(sim);
3245			swi_camnet();
3246			swi_cambio();
3247			if ((start_ccb->ccb_h.status  & CAM_STATUS_MASK)
3248			    != CAM_REQ_INPROG)
3249				break;
3250			DELAY(1000);
3251		}
3252		if (timeout == 0) {
3253			/*
3254			 * XXX Is it worth adding a sim_timeout entry
3255			 * point so we can attempt recovery?  If
3256			 * this is only used for dumps, I don't think
3257			 * it is.
3258			 */
3259			start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
3260		}
3261	} else {
3262		start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3263	}
3264	splx(s);
3265}
3266
3267/*
3268 * Schedule a peripheral driver to receive a ccb when it's
3269 * target device has space for more transactions.
3270 */
3271void
3272xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
3273{
3274	struct cam_ed *device;
3275	int s;
3276	int runq;
3277
3278	CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
3279	device = perph->path->device;
3280	s = splsoftcam();
3281	if (periph_is_queued(perph)) {
3282		/* Simply reorder based on new priority */
3283		CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3284			  ("   change priority to %d\n", new_priority));
3285		if (new_priority < perph->pinfo.priority) {
3286			camq_change_priority(&device->drvq,
3287					     perph->pinfo.index,
3288					     new_priority);
3289		}
3290		runq = 0;
3291	} else {
3292		/* New entry on the queue */
3293		CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3294			  ("   added periph to queue\n"));
3295		perph->pinfo.priority = new_priority;
3296		perph->pinfo.generation = ++device->drvq.generation;
3297		camq_insert(&device->drvq, &perph->pinfo);
3298		runq = xpt_schedule_dev_allocq(perph->path->bus, device);
3299	}
3300	splx(s);
3301	if (runq != 0) {
3302		CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3303			  ("   calling xpt_run_devq\n"));
3304		xpt_run_dev_allocq(perph->path->bus);
3305	}
3306}
3307
3308
3309/*
3310 * Schedule a device to run on a given queue.
3311 * If the device was inserted as a new entry on the queue,
3312 * return 1 meaning the device queue should be run. If we
3313 * were already queued, implying someone else has already
3314 * started the queue, return 0 so the caller doesn't attempt
3315 * to run the queue.  Must be run at either splsoftcam
3316 * (or splcam since that encompases splsoftcam).
3317 */
3318static int
3319xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
3320		 u_int32_t new_priority)
3321{
3322	int retval;
3323	u_int32_t old_priority;
3324
3325	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
3326
3327	old_priority = pinfo->priority;
3328
3329	/*
3330	 * Are we already queued?
3331	 */
3332	if (pinfo->index != CAM_UNQUEUED_INDEX) {
3333		/* Simply reorder based on new priority */
3334		if (new_priority < old_priority) {
3335			camq_change_priority(queue, pinfo->index,
3336					     new_priority);
3337			CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3338					("changed priority to %d\n",
3339					 new_priority));
3340		}
3341		retval = 0;
3342	} else {
3343		/* New entry on the queue */
3344		if (new_priority < old_priority)
3345			pinfo->priority = new_priority;
3346
3347		CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3348				("Inserting onto queue\n"));
3349		pinfo->generation = ++queue->generation;
3350		camq_insert(queue, pinfo);
3351		retval = 1;
3352	}
3353	return (retval);
3354}
3355
3356static void
3357xpt_run_dev_allocq(struct cam_eb *bus)
3358{
3359	struct	cam_devq *devq;
3360	int	s;
3361
3362	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n"));
3363	devq = bus->sim->devq;
3364
3365	CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3366			("   qfrozen_cnt == 0x%x, entries == %d, "
3367			 "openings == %d, active == %d\n",
3368			 devq->alloc_queue.qfrozen_cnt,
3369			 devq->alloc_queue.entries,
3370			 devq->alloc_openings,
3371			 devq->alloc_active));
3372
3373	s = splsoftcam();
3374	devq->alloc_queue.qfrozen_cnt++;
3375	while ((devq->alloc_queue.entries > 0)
3376	    && (devq->alloc_openings > 0)
3377	    && (devq->alloc_queue.qfrozen_cnt <= 1)) {
3378		struct	cam_ed_qinfo *qinfo;
3379		struct	cam_ed *device;
3380		union	ccb *work_ccb;
3381		struct	cam_periph *drv;
3382		struct	camq *drvq;
3383
3384		qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
3385							   /*position*/0);
3386		device = qinfo->device;
3387
3388		CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3389				("running device %p\n", device));
3390
3391		drvq = &device->drvq;
3392
3393#ifdef CAMDEBUG
3394		if (drvq->entries <= 0) {
3395			panic("xpt_run_dev_allocq: "
3396			      "Device on queue without any work to do");
3397		}
3398#endif
3399		if ((work_ccb = xpt_get_ccb(device)) != NULL) {
3400			devq->alloc_openings--;
3401			devq->alloc_active++;
3402			drv = (struct cam_periph*)camq_remove(drvq,
3403							      /*pos*/0);
3404			/* Update priority */
3405			if (drvq->entries > 0) {
3406				qinfo->pinfo.priority = drvq->queue_array[0]->priority;
3407			} else {
3408				qinfo->pinfo.priority = CAM_PRIORITY_NONE;
3409			}
3410			splx(s);
3411			xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
3412				      drv->pinfo.priority);
3413			CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3414					("calling periph start\n"));
3415			drv->periph_start(drv, work_ccb);
3416		} else {
3417			/*
3418			 * Malloc failure in alloc_ccb
3419			 */
3420			/*
3421			 * XXX add us to a list to be run from free_ccb
3422			 * if we don't have any ccbs active on this
3423			 * device queue otherwise we may never get run
3424			 * again.
3425			 */
3426			break;
3427		}
3428
3429		/* Raise IPL for possible insertion and test at top of loop */
3430		s = splsoftcam();
3431
3432		if (drvq->entries > 0) {
3433			/* We have more work.  Attempt to reschedule */
3434			xpt_schedule_dev_allocq(bus, device);
3435		}
3436	}
3437	devq->alloc_queue.qfrozen_cnt--;
3438	splx(s);
3439}
3440
3441static void
3442xpt_run_dev_sendq(struct cam_eb *bus)
3443{
3444	struct	cam_devq *devq;
3445	int	s;
3446
3447	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n"));
3448
3449	devq = bus->sim->devq;
3450
3451	s = splcam();
3452	devq->send_queue.qfrozen_cnt++;
3453	splx(s);
3454	s = splsoftcam();
3455	while ((devq->send_queue.entries > 0)
3456	    && (devq->send_openings > 0)) {
3457		struct	cam_ed_qinfo *qinfo;
3458		struct	cam_ed *device;
3459		union ccb *work_ccb;
3460		struct	cam_sim *sim;
3461		int	ospl;
3462
3463		ospl = splcam();
3464	    	if (devq->send_queue.qfrozen_cnt > 1) {
3465			splx(ospl);
3466			break;
3467		}
3468
3469		qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
3470							   /*position*/0);
3471		device = qinfo->device;
3472
3473		/*
3474		 * If the device has been "frozen", don't attempt
3475		 * to run it.
3476		 */
3477		if (device->qfrozen_cnt > 0) {
3478			splx(ospl);
3479			continue;
3480		}
3481
3482		CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3483				("running device %p\n", device));
3484
3485		work_ccb = cam_ccbq_peek_ccb(&device->ccbq, 0);
3486		if (work_ccb == NULL) {
3487			printf("device on run queue with no ccbs???");
3488			splx(ospl);
3489			continue;
3490		}
3491
3492		if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
3493
3494		 	if (num_highpower <= 0) {
3495				/*
3496				 * We got a high power command, but we
3497				 * don't have any available slots.  Freeze
3498				 * the device queue until we have a slot
3499				 * available.
3500				 */
3501				device->qfrozen_cnt++;
3502				STAILQ_INSERT_TAIL(&highpowerq,
3503						   &work_ccb->ccb_h,
3504						   xpt_links.stqe);
3505
3506				splx(ospl);
3507				continue;
3508			} else {
3509				/*
3510				 * Consume a high power slot while
3511				 * this ccb runs.
3512				 */
3513				num_highpower--;
3514			}
3515		}
3516		devq->active_dev = device;
3517		cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
3518
3519		cam_ccbq_send_ccb(&device->ccbq, work_ccb);
3520		splx(ospl);
3521
3522		devq->send_openings--;
3523		devq->send_active++;
3524
3525		if (device->ccbq.queue.entries > 0) {
3526			qinfo->pinfo.priority =
3527			    device->ccbq.queue.queue_array[0]->priority;
3528			xpt_schedule_dev_sendq(bus, device);
3529		} else {
3530			qinfo->pinfo.priority = CAM_PRIORITY_NONE;
3531		}
3532
3533		if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){
3534			/*
3535			 * The client wants to freeze the queue
3536			 * after this CCB is sent.
3537			 */
3538			ospl = splcam();
3539			device->qfrozen_cnt++;
3540			splx(ospl);
3541		}
3542
3543		splx(s);
3544
3545		if ((device->inq_flags & SID_CmdQue) != 0)
3546			work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
3547		else
3548			/*
3549			 * Clear this in case of a retried CCB that failed
3550			 * due to a rejected tag.
3551			 */
3552			work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
3553
3554		/*
3555		 * Device queues can be shared among multiple sim instances
3556		 * that reside on different busses.  Use the SIM in the queue
3557		 * CCB's path, rather than the one in the bus that was passed
3558		 * into this function.
3559		 */
3560		sim = work_ccb->ccb_h.path->bus->sim;
3561		(*(sim->sim_action))(sim, work_ccb);
3562
3563		ospl = splcam();
3564		devq->active_dev = NULL;
3565		splx(ospl);
3566		/* Raise IPL for possible insertion and test at top of loop */
3567		s = splsoftcam();
3568	}
3569	splx(s);
3570	s = splcam();
3571	devq->send_queue.qfrozen_cnt--;
3572	splx(s);
3573}
3574
3575/*
3576 * This function merges stuff from the slave ccb into the master ccb, while
3577 * keeping important fields in the master ccb constant.
3578 */
3579void
3580xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
3581{
3582	/*
3583	 * Pull fields that are valid for peripheral drivers to set
3584	 * into the master CCB along with the CCB "payload".
3585	 */
3586	master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
3587	master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
3588	master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
3589	master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
3590	bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
3591	      sizeof(union ccb) - sizeof(struct ccb_hdr));
3592}
3593
3594void
3595xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
3596{
3597	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
3598	ccb_h->pinfo.priority = priority;
3599	ccb_h->path = path;
3600	ccb_h->path_id = path->bus->path_id;
3601	if (path->target)
3602		ccb_h->target_id = path->target->target_id;
3603	else
3604		ccb_h->target_id = CAM_TARGET_WILDCARD;
3605	if (path->device) {
3606		ccb_h->target_lun = path->device->lun_id;
3607		ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
3608	} else {
3609		ccb_h->target_lun = CAM_TARGET_WILDCARD;
3610	}
3611	ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
3612	ccb_h->flags = 0;
3613}
3614
3615/* Path manipulation functions */
3616cam_status
3617xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
3618		path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3619{
3620	struct	   cam_path *path;
3621	cam_status status;
3622
3623	path = (struct cam_path *)malloc(sizeof(*path), M_DEVBUF, M_NOWAIT);
3624
3625	if (path == NULL) {
3626		status = CAM_RESRC_UNAVAIL;
3627		return(status);
3628	}
3629	status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
3630	if (status != CAM_REQ_CMP) {
3631		free(path, M_DEVBUF);
3632		path = NULL;
3633	}
3634	*new_path_ptr = path;
3635	return (status);
3636}
3637
3638static cam_status
3639xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
3640		 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3641{
3642	struct	     cam_eb *bus;
3643	struct	     cam_et *target;
3644	struct	     cam_ed *device;
3645	cam_status   status;
3646	int	     s;
3647
3648	status = CAM_REQ_CMP;	/* Completed without error */
3649	target = NULL;		/* Wildcarded */
3650	device = NULL;		/* Wildcarded */
3651
3652	/*
3653	 * We will potentially modify the EDT, so block interrupts
3654	 * that may attempt to create cam paths.
3655	 */
3656	s = splcam();
3657	bus = xpt_find_bus(path_id);
3658	if (bus == NULL) {
3659		status = CAM_PATH_INVALID;
3660	} else {
3661		target = xpt_find_target(bus, target_id);
3662		if (target == NULL) {
3663			/* Create one */
3664			struct cam_et *new_target;
3665
3666			new_target = xpt_alloc_target(bus, target_id);
3667			if (new_target == NULL) {
3668				status = CAM_RESRC_UNAVAIL;
3669			} else {
3670				target = new_target;
3671			}
3672		}
3673		if (target != NULL) {
3674			device = xpt_find_device(target, lun_id);
3675			if (device == NULL) {
3676				/* Create one */
3677				struct cam_ed *new_device;
3678
3679				new_device = xpt_alloc_device(bus,
3680							      target,
3681							      lun_id);
3682				if (new_device == NULL) {
3683					status = CAM_RESRC_UNAVAIL;
3684				} else {
3685					device = new_device;
3686				}
3687			}
3688		}
3689	}
3690	splx(s);
3691
3692	/*
3693	 * Only touch the user's data if we are successful.
3694	 */
3695	if (status == CAM_REQ_CMP) {
3696		new_path->periph = perph;
3697		new_path->bus = bus;
3698		new_path->target = target;
3699		new_path->device = device;
3700		CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
3701	} else {
3702		if (device != NULL)
3703			xpt_release_device(bus, target, device);
3704		if (target != NULL)
3705			xpt_release_target(bus, target);
3706		if (bus != NULL)
3707			xpt_release_bus(bus);
3708	}
3709	return (status);
3710}
3711
3712static void
3713xpt_release_path(struct cam_path *path)
3714{
3715	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
3716	if (path->device != NULL)
3717		xpt_release_device(path->bus, path->target, path->device);
3718	if (path->target != NULL)
3719		xpt_release_target(path->bus, path->target);
3720}
3721
3722void
3723xpt_free_path(struct cam_path *path)
3724{
3725	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
3726	xpt_release_path(path);
3727	free(path, M_DEVBUF);
3728}
3729
3730
3731/*
3732 * Return -1 for failure, 0 for exact match, 1 for match with wildcards.
3733 */
3734int
3735xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
3736{
3737	int retval = 0;
3738
3739	if (path1->bus != path2->bus) {
3740		if ((path1->bus == NULL)
3741		 || (path2->bus == NULL))
3742			retval = 1;
3743		else
3744			return (-1);
3745	}
3746	if (path1->target != path2->target) {
3747		if ((path1->target == NULL)
3748		 || (path2->target == NULL))
3749			retval = 1;
3750		else
3751			return (-1);
3752	}
3753	if (path1->device != path2->device) {
3754		if ((path1->device == NULL)
3755		 || (path2->device == NULL))
3756			retval = 1;
3757		else
3758			return (-1);
3759	}
3760	return (retval);
3761}
3762
3763void
3764xpt_print_path(struct cam_path *path)
3765{
3766	if (path == NULL)
3767		printf("(nopath): ");
3768	else {
3769		if (path->periph != NULL)
3770			printf("(%s%d:", path->periph->periph_name,
3771			       path->periph->unit_number);
3772		else
3773			printf("(noperiph:");
3774
3775		if (path->bus != NULL)
3776			printf("%s%d:%d:", path->bus->sim->sim_name,
3777			       path->bus->sim->unit_number,
3778			       path->bus->sim->bus_id);
3779		else
3780			printf("nobus:");
3781
3782		if (path->target != NULL)
3783			printf("%d:", path->target->target_id);
3784		else
3785			printf("X:");
3786
3787		if (path->device != NULL)
3788			printf("%d): ", path->device->lun_id);
3789		else
3790			printf("X): ");
3791	}
3792}
3793
3794path_id_t
3795xpt_path_path_id(struct cam_path *path)
3796{
3797	return(path->bus->path_id);
3798}
3799
3800target_id_t
3801xpt_path_target_id(struct cam_path *path)
3802{
3803	if (path->target != NULL)
3804		return (path->target->target_id);
3805	else
3806		return (CAM_TARGET_WILDCARD);
3807}
3808
3809lun_id_t
3810xpt_path_lun_id(struct cam_path *path)
3811{
3812	if (path->device != NULL)
3813		return (path->device->lun_id);
3814	else
3815		return (CAM_LUN_WILDCARD);
3816}
3817
3818struct cam_sim *
3819xpt_path_sim(struct cam_path *path)
3820{
3821	return (path->bus->sim);
3822}
3823
3824struct cam_periph*
3825xpt_path_periph(struct cam_path *path)
3826{
3827	return (path->periph);
3828}
3829
3830/*
3831 * Release a CAM control block for the caller.  Remit the cost of the structure
3832 * to the device referenced by the path.  If the this device had no 'credits'
3833 * and peripheral drivers have registered async callbacks for this notification
3834 * call them now.
3835 */
3836void
3837xpt_release_ccb(union ccb *free_ccb)
3838{
3839	int	 s;
3840	struct	 cam_path *path;
3841	struct	 cam_ed *device;
3842	struct	 cam_eb *bus;
3843
3844	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
3845	path = free_ccb->ccb_h.path;
3846	device = path->device;
3847	bus = path->bus;
3848	s = splsoftcam();
3849	cam_ccbq_release_opening(&device->ccbq);
3850	if (xpt_ccb_count > xpt_max_ccbs) {
3851		xpt_free_ccb(free_ccb);
3852		xpt_ccb_count--;
3853	} else {
3854		SLIST_INSERT_HEAD(&ccb_freeq, &free_ccb->ccb_h, xpt_links.sle);
3855	}
3856	bus->sim->devq->alloc_openings++;
3857	bus->sim->devq->alloc_active--;
3858	/* XXX Turn this into an inline function - xpt_run_device?? */
3859	if ((device_is_alloc_queued(device) == 0)
3860	 && (device->drvq.entries > 0)) {
3861		xpt_schedule_dev_allocq(bus, device);
3862	}
3863	splx(s);
3864	if (dev_allocq_is_runnable(bus->sim->devq))
3865		xpt_run_dev_allocq(bus);
3866}
3867
3868/* Functions accessed by SIM drivers */
3869
3870/*
3871 * A sim structure, listing the SIM entry points and instance
3872 * identification info is passed to xpt_bus_register to hook the SIM
3873 * into the CAM framework.  xpt_bus_register creates a cam_eb entry
3874 * for this new bus and places it in the array of busses and assigns
3875 * it a path_id.  The path_id may be influenced by "hard wiring"
3876 * information specified by the user.  Once interrupt services are
3877 * availible, the bus will be probed.
3878 */
3879int32_t
3880xpt_bus_register(struct cam_sim *sim, u_int32_t bus)
3881{
3882	static path_id_t buscount;
3883	struct cam_eb *new_bus;
3884	struct ccb_pathinq cpi;
3885	int s;
3886
3887	sim->bus_id = bus;
3888	new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
3889					  M_DEVBUF, M_NOWAIT);
3890	if (new_bus == NULL) {
3891		/* Couldn't satisfy request */
3892		return (CAM_RESRC_UNAVAIL);
3893	}
3894
3895	bzero(new_bus, sizeof(*new_bus));
3896
3897	if (strcmp(sim->sim_name, "xpt") != 0) {
3898
3899		sim->path_id = xptpathid(sim->sim_name, sim->unit_number,
3900					 sim->bus_id, &buscount);
3901	}
3902
3903	new_bus->path_id = sim->path_id;
3904	new_bus->sim = sim;
3905	TAILQ_INIT(&new_bus->et_entries);
3906	new_bus->refcount = 1;	/* Held until a bus_deregister event */
3907	s = splcam();
3908	TAILQ_INSERT_TAIL(&xpt_busses, new_bus, links);
3909	bus_generation++;
3910	splx(s);
3911
3912	/* Notify interested parties */
3913	if (sim->path_id != CAM_XPT_PATH_ID) {
3914		struct cam_path path;
3915
3916		xpt_compile_path(&path, /*periph*/NULL, sim->path_id,
3917			         CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
3918		xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
3919		cpi.ccb_h.func_code = XPT_PATH_INQ;
3920		xpt_action((union ccb *)&cpi);
3921		xpt_async(AC_PATH_REGISTERED, xpt_periph->path, &cpi);
3922		xpt_release_path(&path);
3923	}
3924	return (CAM_SUCCESS);
3925}
3926
3927static int
3928xptnextfreebus(path_id_t startbus)
3929{
3930	struct cam_sim_config *sim_conf;
3931
3932	sim_conf = cam_sinit;
3933	while (sim_conf->sim_name != NULL) {
3934
3935		if (IS_SPECIFIED(sim_conf->pathid)
3936		 && (startbus == sim_conf->pathid)) {
3937			++startbus;
3938			/* Start the search over */
3939			sim_conf = cam_sinit;
3940		} else {
3941			sim_conf++;
3942		}
3943	}
3944	return (startbus);
3945}
3946
3947static int
3948xptpathid(const char *sim_name, int sim_unit,
3949	  int sim_bus, path_id_t *nextpath)
3950{
3951	struct cam_sim_config *sim_conf;
3952	path_id_t pathid;
3953
3954	pathid = CAM_XPT_PATH_ID;
3955	for (sim_conf = cam_sinit; sim_conf->sim_name != NULL; sim_conf++) {
3956
3957		if (!IS_SPECIFIED(sim_conf->pathid))
3958			continue;
3959
3960		if (!strcmp(sim_name, sim_conf->sim_name)
3961		 && (sim_unit == sim_conf->sim_unit)) {
3962
3963			if (IS_SPECIFIED(sim_conf->sim_bus)) {
3964				if (sim_bus == sim_conf->sim_bus) {
3965					pathid = sim_conf->pathid;
3966					break;
3967				}
3968			} else if (sim_bus == 0) {
3969				/* Unspecified matches bus 0 */
3970				pathid = sim_conf->pathid;
3971				break;
3972			} else {
3973				printf("Ambiguous scbus configuration for %s%d "
3974				       "bus %d, cannot wire down.  The kernel "
3975				       "config entry for scbus%d should "
3976				       "specify a controller bus.\n"
3977				       "Scbus will be assigned dynamically.\n",
3978				       sim_name, sim_unit, sim_bus,
3979				       sim_conf->pathid);
3980                             break;
3981			}
3982		}
3983	}
3984
3985	if (pathid == CAM_XPT_PATH_ID) {
3986		pathid = xptnextfreebus(*nextpath);
3987		*nextpath = pathid + 1;
3988	}
3989	return (pathid);
3990}
3991
3992int32_t
3993xpt_bus_deregister(path_id)
3994	u_int8_t path_id;
3995{
3996	/* XXX */
3997	return (CAM_SUCCESS);
3998}
3999
4000void
4001xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
4002{
4003	struct cam_eb *bus;
4004	struct cam_et *target, *next_target;
4005	struct cam_ed *device, *next_device;
4006	int s;
4007
4008	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_async\n"));
4009
4010	/*
4011	 * Most async events come from a CAM interrupt context.  In
4012	 * a few cases, the error recovery code at the peripheral layer,
4013	 * which may run from our SWI or a process context, may signal
4014	 * deferred events with a call to xpt_async. Ensure async
4015	 * notifications are serialized by blocking cam interrupts.
4016	 */
4017	s = splcam();
4018
4019	bus = path->bus;
4020
4021	/*
4022	 * Freeze the SIM queue for SCSI_DELAY ms to
4023	 * allow the bus to settle.
4024	 */
4025	if (async_code == AC_BUS_RESET) {
4026		struct cam_sim *sim;
4027
4028		sim = bus->sim;
4029
4030		/*
4031		 * If there isn't already another timeout pending, go ahead
4032		 * and freeze the simq and set the timeout flag.  If there
4033		 * is another timeout pending, replace it with this
4034		 * timeout.  There could be two bus reset async broadcasts
4035		 * sent for some dual-channel controllers.
4036		 */
4037		if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) == 0) {
4038			xpt_freeze_simq(sim, 1);
4039			sim->flags |= CAM_SIM_REL_TIMEOUT_PENDING;
4040		} else
4041			untimeout(xpt_release_simq_timeout, sim, sim->c_handle);
4042
4043		sim->c_handle = timeout(xpt_release_simq_timeout,
4044					sim, (SCSI_DELAY * hz) / 1000);
4045	}
4046
4047	for (target = TAILQ_FIRST(&bus->et_entries);
4048	     target != NULL;
4049	     target = next_target) {
4050
4051		next_target = TAILQ_NEXT(target, links);
4052
4053		if (path->target != target
4054		 && path->target != NULL)
4055			continue;
4056
4057		for (device = TAILQ_FIRST(&target->ed_entries);
4058		     device != NULL;
4059		     device = next_device) {
4060			cam_status status;
4061			struct cam_path newpath;
4062
4063			next_device = TAILQ_NEXT(device, links);
4064
4065			if (path->device != device
4066			 && path->device != NULL)
4067				continue;
4068
4069			/*
4070			 * We need our own path with wildcards expanded to
4071			 * handle certain types of events.
4072			 */
4073			if ((async_code == AC_SENT_BDR)
4074			 || (async_code == AC_BUS_RESET)
4075			 || (async_code == AC_INQ_CHANGED))
4076				status = xpt_compile_path(&newpath, NULL,
4077							  bus->path_id,
4078							  target->target_id,
4079							  device->lun_id);
4080			else
4081				status = CAM_REQ_CMP_ERR;
4082
4083			if (status == CAM_REQ_CMP) {
4084
4085				/*
4086				 * Allow transfer negotiation to occur in a
4087				 * tag free environment.
4088				 */
4089				if (async_code == AC_SENT_BDR
4090				  || async_code == AC_BUS_RESET)
4091					xpt_toggle_tags(&newpath);
4092
4093				/*
4094				 * If we send a BDR, freeze the device queue
4095				 * for SCSI_DELAY ms to allow it to settle
4096				 * down.
4097				 */
4098				if (async_code == AC_SENT_BDR) {
4099					xpt_freeze_devq(&newpath, 1);
4100					/*
4101					 * Although this looks bad, it
4102					 * isn't as bad as it seems.  We're
4103					 * passing in a stack-allocated path
4104					 * that we then immediately release
4105					 * after scheduling a timeout to
4106					 * release the device queue.  So
4107					 * the path won't be around when
4108					 * the timeout fires, right?  Right.
4109					 * But it doesn't matter, since
4110					 * xpt_release_devq and its timeout
4111					 * function both take the device as
4112					 * an argument.  Theoretically, the
4113					 * device will still be there when
4114					 * the timeout fires, even though
4115					 * the path will be gone.
4116					 */
4117					cam_release_devq(
4118						   &newpath,
4119						   /*relsim_flags*/
4120						   RELSIM_RELEASE_AFTER_TIMEOUT,
4121						   /*reduction*/0,
4122						   /*timeout*/SCSI_DELAY,
4123						   /*getcount_only*/0);
4124				} else if (async_code == AC_INQ_CHANGED) {
4125					/*
4126					 * We've sent a start unit command, or
4127					 * something similar to a device that
4128					 * may have caused its inquiry data to
4129					 * change. So we re-scan the device to
4130					 * refresh the inquiry data for it.
4131					 */
4132					xpt_scan_lun(newpath.periph, &newpath,
4133						     CAM_EXPECT_INQ_CHANGE,
4134						     NULL);
4135				}
4136				xpt_release_path(&newpath);
4137			} else if (async_code == AC_LOST_DEVICE) {
4138				device->flags |= CAM_DEV_UNCONFIGURED;
4139			} else if (async_code == AC_TRANSFER_NEG) {
4140				struct ccb_trans_settings *settings;
4141
4142				settings =
4143				    (struct ccb_trans_settings *)async_arg;
4144				xpt_set_transfer_settings(settings, device,
4145							  /*async_update*/TRUE);
4146			}
4147
4148			xpt_async_bcast(&device->asyncs,
4149					async_code,
4150					path,
4151					async_arg);
4152		}
4153	}
4154
4155	/*
4156	 * If this wasn't a fully wildcarded async, tell all
4157	 * clients that want all async events.
4158	 */
4159	if (bus != xpt_periph->path->bus)
4160		xpt_async_bcast(&xpt_periph->path->device->asyncs, async_code,
4161				path, async_arg);
4162	splx(s);
4163}
4164
4165static void
4166xpt_async_bcast(struct async_list *async_head,
4167		u_int32_t async_code,
4168		struct cam_path *path, void *async_arg)
4169{
4170	struct async_node *cur_entry;
4171
4172	cur_entry = SLIST_FIRST(async_head);
4173	while (cur_entry != NULL) {
4174		struct async_node *next_entry;
4175		/*
4176		 * Grab the next list entry before we call the current
4177		 * entry's callback.  This is because the callback function
4178		 * can delete its async callback entry.
4179		 */
4180		next_entry = SLIST_NEXT(cur_entry, links);
4181		if ((cur_entry->event_enable & async_code) != 0)
4182			cur_entry->callback(cur_entry->callback_arg,
4183					    async_code, path,
4184					    async_arg);
4185		cur_entry = next_entry;
4186	}
4187}
4188
4189u_int32_t
4190xpt_freeze_devq(struct cam_path *path, u_int count)
4191{
4192	int s;
4193	struct ccb_hdr *ccbh;
4194
4195	s = splcam();
4196	path->device->qfrozen_cnt += count;
4197
4198	/*
4199	 * Mark the last CCB in the queue as needing
4200	 * to be requeued if the driver hasn't
4201	 * changed it's state yet.  This fixes a race
4202	 * where a ccb is just about to be queued to
4203	 * a controller driver when it's interrupt routine
4204	 * freezes the queue.  To completly close the
4205	 * hole, controller drives must check to see
4206	 * if a ccb's status is still CAM_REQ_INPROG
4207	 * under spl protection just before they queue
4208	 * the CCB.  See ahc_action/ahc_freeze_devq for
4209	 * an example.
4210	 */
4211	ccbh = TAILQ_LAST(&path->device->ccbq.active_ccbs, ccb_hdr_tailq);
4212	if (ccbh && ccbh->status == CAM_REQ_INPROG)
4213		ccbh->status = CAM_REQUEUE_REQ;
4214	splx(s);
4215	return (path->device->qfrozen_cnt);
4216}
4217
4218u_int32_t
4219xpt_freeze_simq(struct cam_sim *sim, u_int count)
4220{
4221	sim->devq->send_queue.qfrozen_cnt += count;
4222	if (sim->devq->active_dev != NULL) {
4223		struct ccb_hdr *ccbh;
4224
4225		ccbh = TAILQ_LAST(&sim->devq->active_dev->ccbq.active_ccbs,
4226				  ccb_hdr_tailq);
4227		if (ccbh && ccbh->status == CAM_REQ_INPROG)
4228			ccbh->status = CAM_REQUEUE_REQ;
4229	}
4230	return (sim->devq->send_queue.qfrozen_cnt);
4231}
4232
4233static void
4234xpt_release_devq_timeout(void *arg)
4235{
4236	struct cam_ed *device;
4237
4238	device = (struct cam_ed *)arg;
4239
4240	xpt_release_devq(device, /*run_queue*/TRUE);
4241}
4242
4243void
4244xpt_release_devq(struct cam_ed *dev, int run_queue)
4245{
4246	int	rundevq;
4247	int	s;
4248
4249	rundevq = 0;
4250	s = splcam();
4251	if (dev->qfrozen_cnt > 0) {
4252
4253		dev->qfrozen_cnt--;
4254		if (dev->qfrozen_cnt == 0) {
4255
4256			/*
4257			 * No longer need to wait for a successful
4258			 * command completion.
4259			 */
4260			dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
4261
4262			/*
4263			 * Remove any timeouts that might be scheduled
4264			 * to release this queue.
4265			 */
4266			if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
4267				untimeout(xpt_release_devq_timeout, dev,
4268					  dev->c_handle);
4269				dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
4270			}
4271
4272			/*
4273			 * Now that we are unfrozen schedule the
4274			 * device so any pending transactions are
4275			 * run.
4276			 */
4277			if ((dev->ccbq.queue.entries > 0)
4278			 && (xpt_schedule_dev_sendq(dev->target->bus, dev))
4279			 && (run_queue != 0)) {
4280				rundevq = 1;
4281			}
4282		}
4283	}
4284	splx(s);
4285	if (rundevq != 0)
4286		xpt_run_dev_sendq(dev->target->bus);
4287}
4288
4289void
4290xpt_release_simq(struct cam_sim *sim, int run_queue)
4291{
4292	int	s;
4293	struct	camq *sendq;
4294
4295	sendq = &(sim->devq->send_queue);
4296	s = splcam();
4297	if (sendq->qfrozen_cnt > 0) {
4298
4299		sendq->qfrozen_cnt--;
4300		if (sendq->qfrozen_cnt == 0) {
4301			struct cam_eb *bus;
4302
4303			/*
4304			 * If there is a timeout scheduled to release this
4305			 * sim queue, remove it.  The queue frozen count is
4306			 * already at 0.
4307			 */
4308			if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
4309				untimeout(xpt_release_simq_timeout, sim,
4310					  sim->c_handle);
4311				sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
4312			}
4313			bus = xpt_find_bus(sim->path_id);
4314			splx(s);
4315
4316			if (run_queue) {
4317				/*
4318				 * Now that we are unfrozen run the send queue.
4319				 */
4320				xpt_run_dev_sendq(bus);
4321			}
4322			xpt_release_bus(bus);
4323		} else
4324			splx(s);
4325	} else
4326		splx(s);
4327}
4328
4329static void
4330xpt_release_simq_timeout(void *arg)
4331{
4332	struct cam_sim *sim;
4333
4334	sim = (struct cam_sim *)arg;
4335	xpt_release_simq(sim, /* run_queue */ TRUE);
4336}
4337
4338void
4339xpt_done(union ccb *done_ccb)
4340{
4341	int s;
4342
4343	s = splcam();
4344
4345	CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n"));
4346	switch (done_ccb->ccb_h.func_code) {
4347	case XPT_SCSI_IO:
4348	case XPT_ENG_EXEC:
4349	case XPT_TARGET_IO:
4350	case XPT_ACCEPT_TARGET_IO:
4351	case XPT_CONT_TARGET_IO:
4352	case XPT_IMMED_NOTIFY:
4353	case XPT_SCAN_BUS:
4354	case XPT_SCAN_LUN:
4355	{
4356		/*
4357		 * Queue up the request for handling by our SWI handler
4358		 * any of the "non-immediate" type of ccbs.
4359		 */
4360		switch (done_ccb->ccb_h.path->periph->type) {
4361		case CAM_PERIPH_BIO:
4362			TAILQ_INSERT_TAIL(&cam_bioq, &done_ccb->ccb_h,
4363					  sim_links.tqe);
4364			done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4365			setsoftcambio();
4366			break;
4367		case CAM_PERIPH_NET:
4368			TAILQ_INSERT_TAIL(&cam_netq, &done_ccb->ccb_h,
4369					  sim_links.tqe);
4370			done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4371			setsoftcamnet();
4372			break;
4373		}
4374		break;
4375	}
4376	default:
4377		break;
4378	}
4379	splx(s);
4380}
4381
4382union ccb *
4383xpt_alloc_ccb()
4384{
4385	union ccb *new_ccb;
4386
4387	new_ccb = malloc(sizeof(*new_ccb), M_DEVBUF, M_WAITOK);
4388	return (new_ccb);
4389}
4390
4391void
4392xpt_free_ccb(union ccb *free_ccb)
4393{
4394	free(free_ccb, M_DEVBUF);
4395}
4396
4397
4398
4399/* Private XPT functions */
4400
4401/*
4402 * Get a CAM control block for the caller. Charge the structure to the device
4403 * referenced by the path.  If the this device has no 'credits' then the
4404 * device already has the maximum number of outstanding operations under way
4405 * and we return NULL. If we don't have sufficient resources to allocate more
4406 * ccbs, we also return NULL.
4407 */
4408static union ccb *
4409xpt_get_ccb(struct cam_ed *device)
4410{
4411	union ccb *new_ccb;
4412	int s;
4413
4414	s = splsoftcam();
4415	if ((new_ccb = (union ccb *)ccb_freeq.slh_first) == NULL) {
4416		new_ccb = malloc(sizeof(*new_ccb), M_DEVBUF, M_NOWAIT);
4417                if (new_ccb == NULL) {
4418			splx(s);
4419			return (NULL);
4420		}
4421		callout_handle_init(&new_ccb->ccb_h.timeout_ch);
4422		SLIST_INSERT_HEAD(&ccb_freeq, &new_ccb->ccb_h,
4423				  xpt_links.sle);
4424		xpt_ccb_count++;
4425	}
4426	cam_ccbq_take_opening(&device->ccbq);
4427	SLIST_REMOVE_HEAD(&ccb_freeq, xpt_links.sle);
4428	splx(s);
4429	return (new_ccb);
4430}
4431
4432static void
4433xpt_release_bus(struct cam_eb *bus)
4434{
4435	int s;
4436
4437	s = splcam();
4438	if ((--bus->refcount == 0)
4439	 && (TAILQ_FIRST(&bus->et_entries) == NULL)) {
4440		TAILQ_REMOVE(&xpt_busses, bus, links);
4441		bus_generation++;
4442		splx(s);
4443		free(bus, M_DEVBUF);
4444	} else
4445		splx(s);
4446}
4447
4448static struct cam_et *
4449xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
4450{
4451	struct cam_et *target;
4452
4453	target = (struct cam_et *)malloc(sizeof(*target), M_DEVBUF, M_NOWAIT);
4454	if (target != NULL) {
4455		struct cam_et *cur_target;
4456
4457		target->bus = bus;
4458		target->target_id = target_id;
4459		target->refcount = 1;
4460		/*
4461		 * Hold a reference to our parent bus so it
4462		 * will not go away before we do.
4463		 */
4464		bus->refcount++;
4465		TAILQ_INIT(&target->ed_entries);
4466
4467		/* Insertion sort into our bus's target list */
4468		cur_target = TAILQ_FIRST(&bus->et_entries);
4469		while (cur_target != NULL && cur_target->target_id < target_id)
4470			cur_target = TAILQ_NEXT(cur_target, links);
4471
4472		if (cur_target != NULL) {
4473			TAILQ_INSERT_BEFORE(cur_target, target, links);
4474		} else {
4475			TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
4476		}
4477		bus->generation++;
4478	}
4479	return (target);
4480}
4481
4482static void
4483xpt_release_target(struct cam_eb *bus, struct cam_et *target)
4484{
4485	int s;
4486
4487	s = splcam();
4488	if ((--target->refcount == 0)
4489	 && (TAILQ_FIRST(&target->ed_entries) == NULL)) {
4490		TAILQ_REMOVE(&bus->et_entries, target, links);
4491		bus->generation++;
4492		splx(s);
4493		free(target, M_DEVBUF);
4494		xpt_release_bus(bus);
4495	} else
4496		splx(s);
4497}
4498
4499static struct cam_ed *
4500xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
4501{
4502	struct	   cam_ed *device;
4503	struct	   cam_devq *devq;
4504	cam_status status;
4505
4506	/* Make space for us in the device queue on our bus */
4507	devq = bus->sim->devq;
4508	status = cam_devq_resize(devq, devq->alloc_queue.array_size + 1);
4509
4510	if (status != CAM_REQ_CMP) {
4511		device = NULL;
4512	} else {
4513		device = (struct cam_ed *)malloc(sizeof(*device),
4514						 M_DEVBUF, M_NOWAIT);
4515	}
4516
4517	if (device != NULL) {
4518		struct cam_ed *cur_device;
4519
4520		bzero(device, sizeof(*device));
4521
4522		SLIST_INIT(&device->asyncs);
4523		SLIST_INIT(&device->periphs);
4524		callout_handle_init(&device->c_handle);
4525		device->refcount = 1;
4526		device->flags |= CAM_DEV_UNCONFIGURED;
4527
4528		cam_init_pinfo(&device->alloc_ccb_entry.pinfo);
4529		device->alloc_ccb_entry.device = device;
4530		cam_init_pinfo(&device->send_ccb_entry.pinfo);
4531		device->send_ccb_entry.device = device;
4532
4533		device->target = target;
4534		/*
4535		 * Hold a reference to our parent target so it
4536		 * will not go away before we do.
4537		 */
4538		target->refcount++;
4539
4540		device->lun_id = lun_id;
4541
4542		/* Initialize our queues */
4543		if (camq_init(&device->drvq, 0) != 0) {
4544			free(device, M_DEVBUF);
4545			return (NULL);
4546		}
4547
4548		if (cam_ccbq_init(&device->ccbq,
4549				  bus->sim->max_dev_openings) != 0) {
4550			camq_fini(&device->drvq);
4551			free(device, M_DEVBUF);
4552			return (NULL);
4553		}
4554		/*
4555		 * XXX should be limited by number of CCBs this bus can
4556		 * do.
4557		 */
4558		xpt_max_ccbs += device->ccbq.devq_openings;
4559		/* Insertion sort into our target's device list */
4560		cur_device = TAILQ_FIRST(&target->ed_entries);
4561		while (cur_device != NULL && cur_device->lun_id < lun_id)
4562			cur_device = TAILQ_NEXT(cur_device, links);
4563		if (cur_device != NULL) {
4564			TAILQ_INSERT_BEFORE(cur_device, device, links);
4565		} else {
4566			TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
4567		}
4568		target->generation++;
4569	}
4570	return (device);
4571}
4572
4573static void
4574xpt_release_device(struct cam_eb *bus, struct cam_et *target,
4575		   struct cam_ed *device)
4576{
4577	int s;
4578
4579	s = splcam();
4580	if ((--device->refcount == 0)
4581	 && ((device->flags & CAM_DEV_UNCONFIGURED) != 0)) {
4582		struct cam_devq *devq;
4583
4584		if (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX
4585		 || device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX)
4586			panic("Removing device while still queued for ccbs");
4587		TAILQ_REMOVE(&target->ed_entries, device,links);
4588		target->generation++;
4589		xpt_max_ccbs -= device->ccbq.devq_openings;
4590		/* Release our slot in the devq */
4591		devq = bus->sim->devq;
4592		cam_devq_resize(devq, devq->alloc_queue.array_size - 1);
4593		splx(s);
4594		free(device, M_DEVBUF);
4595	} else
4596		splx(s);
4597}
4598
4599static u_int32_t
4600xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
4601{
4602	int	s;
4603	int	diff;
4604	int	result;
4605	struct	cam_ed *dev;
4606
4607	dev = path->device;
4608	s = splsoftcam();
4609
4610	diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings);
4611	result = cam_ccbq_resize(&dev->ccbq, newopenings);
4612	if (result == CAM_REQ_CMP && (diff < 0)) {
4613		dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED;
4614	}
4615	/* Adjust the global limit */
4616	xpt_max_ccbs += diff;
4617	splx(s);
4618	return (result);
4619}
4620
4621static struct cam_eb *
4622xpt_find_bus(path_id_t path_id)
4623{
4624	struct cam_eb *bus;
4625
4626	for (bus = TAILQ_FIRST(&xpt_busses);
4627	     bus != NULL;
4628	     bus = TAILQ_NEXT(bus, links)) {
4629		if (bus->path_id == path_id) {
4630			bus->refcount++;
4631			break;
4632		}
4633	}
4634	return (bus);
4635}
4636
4637static struct cam_et *
4638xpt_find_target(struct cam_eb *bus, target_id_t	target_id)
4639{
4640	struct cam_et *target;
4641
4642	for (target = TAILQ_FIRST(&bus->et_entries);
4643	     target != NULL;
4644	     target = TAILQ_NEXT(target, links)) {
4645		if (target->target_id == target_id) {
4646			target->refcount++;
4647			break;
4648		}
4649	}
4650	return (target);
4651}
4652
4653static struct cam_ed *
4654xpt_find_device(struct cam_et *target, lun_id_t lun_id)
4655{
4656	struct cam_ed *device;
4657
4658	for (device = TAILQ_FIRST(&target->ed_entries);
4659	     device != NULL;
4660	     device = TAILQ_NEXT(device, links)) {
4661		if (device->lun_id == lun_id) {
4662			device->refcount++;
4663			break;
4664		}
4665	}
4666	return (device);
4667}
4668
4669typedef struct {
4670	union	ccb *request_ccb;
4671	struct 	ccb_pathinq *cpi;
4672	int	pending_count;
4673} xpt_scan_bus_info;
4674
4675/*
4676 * To start a scan, request_ccb is an XPT_SCAN_BUS ccb.
4677 * As the scan progresses, xpt_scan_bus is used as the
4678 * callback on completion function.
4679 */
4680static void
4681xpt_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
4682{
4683	CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
4684		  ("xpt_scan_bus\n"));
4685	switch (request_ccb->ccb_h.func_code) {
4686	case XPT_SCAN_BUS:
4687	{
4688		xpt_scan_bus_info *scan_info;
4689		union	ccb *work_ccb;
4690		struct	cam_path *path;
4691		u_int	i;
4692		u_int	max_target;
4693		u_int	initiator_id;
4694
4695		/* Find out the characteristics of the bus */
4696		work_ccb = xpt_alloc_ccb();
4697		xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path,
4698			      request_ccb->ccb_h.pinfo.priority);
4699		work_ccb->ccb_h.func_code = XPT_PATH_INQ;
4700		xpt_action(work_ccb);
4701		if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
4702			request_ccb->ccb_h.status = work_ccb->ccb_h.status;
4703			xpt_free_ccb(work_ccb);
4704			xpt_done(request_ccb);
4705			return;
4706		}
4707
4708		if ((work_ccb->cpi.hba_misc & PIM_NOINITIATOR) != 0) {
4709			/*
4710			 * Can't scan the bus on an adapter that
4711			 * cannot perform the initiator role.
4712			 */
4713			request_ccb->ccb_h.status = CAM_REQ_CMP;
4714			xpt_free_ccb(work_ccb);
4715			xpt_done(request_ccb);
4716			return;
4717		}
4718
4719		/* Save some state for use while we probe for devices */
4720		scan_info = (xpt_scan_bus_info *)
4721		    malloc(sizeof(xpt_scan_bus_info), M_TEMP, M_WAITOK);
4722		scan_info->request_ccb = request_ccb;
4723		scan_info->cpi = &work_ccb->cpi;
4724
4725		/* Cache on our stack so we can work asynchronously */
4726		max_target = scan_info->cpi->max_target;
4727		initiator_id = scan_info->cpi->initiator_id;
4728
4729		/*
4730		 * Don't count the initiator if the
4731		 * initiator is addressable.
4732		 */
4733		scan_info->pending_count = max_target + 1;
4734		if (initiator_id <= max_target)
4735			scan_info->pending_count--;
4736
4737		for (i = 0; i <= max_target; i++) {
4738			cam_status status;
4739		 	if (i == initiator_id)
4740				continue;
4741
4742			status = xpt_create_path(&path, xpt_periph,
4743						 request_ccb->ccb_h.path_id,
4744						 i, 0);
4745			if (status != CAM_REQ_CMP) {
4746				printf("xpt_scan_bus: xpt_create_path failed"
4747				       " with status %#x, bus scan halted\n",
4748				       status);
4749				break;
4750			}
4751			work_ccb = xpt_alloc_ccb();
4752			xpt_setup_ccb(&work_ccb->ccb_h, path,
4753				      request_ccb->ccb_h.pinfo.priority);
4754			work_ccb->ccb_h.func_code = XPT_SCAN_LUN;
4755			work_ccb->ccb_h.cbfcnp = xpt_scan_bus;
4756			work_ccb->ccb_h.ppriv_ptr0 = scan_info;
4757			work_ccb->crcn.flags = request_ccb->crcn.flags;
4758#if 0
4759			printf("xpt_scan_bus: probing %d:%d:%d\n",
4760				request_ccb->ccb_h.path_id, i, 0);
4761#endif
4762			xpt_action(work_ccb);
4763		}
4764		break;
4765	}
4766	case XPT_SCAN_LUN:
4767	{
4768		xpt_scan_bus_info *scan_info;
4769		path_id_t path_id;
4770		target_id_t target_id;
4771		lun_id_t lun_id;
4772
4773		/* Reuse the same CCB to query if a device was really found */
4774		scan_info = (xpt_scan_bus_info *)request_ccb->ccb_h.ppriv_ptr0;
4775		xpt_setup_ccb(&request_ccb->ccb_h, request_ccb->ccb_h.path,
4776			      request_ccb->ccb_h.pinfo.priority);
4777		request_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
4778
4779		path_id = request_ccb->ccb_h.path_id;
4780		target_id = request_ccb->ccb_h.target_id;
4781		lun_id = request_ccb->ccb_h.target_lun;
4782		xpt_action(request_ccb);
4783
4784#if 0
4785		printf("xpt_scan_bus: got back probe from %d:%d:%d\n",
4786			path_id, target_id, lun_id);
4787#endif
4788
4789		if (request_ccb->ccb_h.status != CAM_REQ_CMP) {
4790			struct cam_ed *device;
4791			struct cam_et *target;
4792			int s;
4793
4794			/*
4795			 * If we already probed lun 0 successfully, or
4796			 * we have additional configured luns on this
4797			 * target that might have "gone away", go onto
4798			 * the next lun.
4799			 */
4800			target = request_ccb->ccb_h.path->target;
4801			s = splcam();
4802			device = TAILQ_FIRST(&target->ed_entries);
4803			if (device != NULL)
4804				device = TAILQ_NEXT(device, links);
4805			splx(s);
4806
4807			if ((lun_id != 0) || (device != NULL)) {
4808				/* Try the next lun */
4809				lun_id++;
4810			}
4811		} else {
4812			struct cam_ed *device;
4813
4814			device = request_ccb->ccb_h.path->device;
4815
4816			if ((device->quirk->quirks & CAM_QUIRK_NOLUNS) == 0) {
4817				/* Try the next lun */
4818				lun_id++;
4819			}
4820		}
4821
4822		xpt_free_path(request_ccb->ccb_h.path);
4823
4824		/* Check Bounds */
4825		if ((lun_id == request_ccb->ccb_h.target_lun)
4826		 || lun_id > scan_info->cpi->max_lun) {
4827			/* We're done */
4828
4829			xpt_free_ccb(request_ccb);
4830			scan_info->pending_count--;
4831			if (scan_info->pending_count == 0) {
4832				xpt_free_ccb((union ccb *)scan_info->cpi);
4833				request_ccb = scan_info->request_ccb;
4834				free(scan_info, M_TEMP);
4835				request_ccb->ccb_h.status = CAM_REQ_CMP;
4836				xpt_done(request_ccb);
4837			}
4838		} else {
4839			/* Try the next device */
4840			struct cam_path *path;
4841			cam_status status;
4842
4843			path = request_ccb->ccb_h.path;
4844			status = xpt_create_path(&path, xpt_periph,
4845						 path_id, target_id, lun_id);
4846			if (status != CAM_REQ_CMP) {
4847				printf("xpt_scan_bus: xpt_create_path failed "
4848				       "with status %#x, halting LUN scan\n",
4849			 	       status);
4850				xpt_free_ccb(request_ccb);
4851				scan_info->pending_count--;
4852				if (scan_info->pending_count == 0) {
4853					xpt_free_ccb(
4854						(union ccb *)scan_info->cpi);
4855					request_ccb = scan_info->request_ccb;
4856					free(scan_info, M_TEMP);
4857					request_ccb->ccb_h.status = CAM_REQ_CMP;
4858					xpt_done(request_ccb);
4859					break;
4860				}
4861			}
4862			xpt_setup_ccb(&request_ccb->ccb_h, path,
4863				      request_ccb->ccb_h.pinfo.priority);
4864			request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
4865			request_ccb->ccb_h.cbfcnp = xpt_scan_bus;
4866			request_ccb->ccb_h.ppriv_ptr0 = scan_info;
4867			request_ccb->crcn.flags =
4868				scan_info->request_ccb->crcn.flags;
4869#if 0
4870			xpt_print_path(path);
4871			printf("xpt_scan bus probing\n");
4872#endif
4873			xpt_action(request_ccb);
4874		}
4875		break;
4876	}
4877	default:
4878		break;
4879	}
4880}
4881
4882typedef enum {
4883	PROBE_TUR,
4884	PROBE_INQUIRY,
4885	PROBE_MODE_SENSE,
4886	PROBE_SERIAL_NUM,
4887	PROBE_TUR_FOR_NEGOTIATION
4888} probe_action;
4889
4890typedef enum {
4891	PROBE_INQUIRY_CKSUM	= 0x01,
4892	PROBE_SERIAL_CKSUM	= 0x02,
4893	PROBE_NO_ANNOUNCE	= 0x04
4894} probe_flags;
4895
4896typedef struct {
4897	TAILQ_HEAD(, ccb_hdr) request_ccbs;
4898	probe_action	action;
4899	union ccb	saved_ccb;
4900	probe_flags	flags;
4901	MD5_CTX		context;
4902	u_int8_t	digest[16];
4903} probe_softc;
4904
4905static void
4906xpt_scan_lun(struct cam_periph *periph, struct cam_path *path,
4907	     cam_flags flags, union ccb *request_ccb)
4908{
4909	struct ccb_pathinq cpi;
4910	cam_status status;
4911	struct cam_path *new_path;
4912	struct cam_periph *old_periph;
4913	int s;
4914
4915	CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
4916		  ("xpt_scan_lun\n"));
4917
4918	xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
4919	cpi.ccb_h.func_code = XPT_PATH_INQ;
4920	xpt_action((union ccb *)&cpi);
4921
4922	if (cpi.ccb_h.status != CAM_REQ_CMP) {
4923		if (request_ccb != NULL) {
4924			request_ccb->ccb_h.status = cpi.ccb_h.status;
4925			xpt_done(request_ccb);
4926		}
4927		return;
4928	}
4929
4930	if ((cpi.hba_misc & PIM_NOINITIATOR) != 0) {
4931		/*
4932		 * Can't scan the bus on an adapter that
4933		 * cannot perform the initiator role.
4934		 */
4935		if (request_ccb != NULL) {
4936			request_ccb->ccb_h.status = CAM_REQ_CMP;
4937			xpt_done(request_ccb);
4938		}
4939		return;
4940	}
4941
4942	if (request_ccb == NULL) {
4943		request_ccb = malloc(sizeof(union ccb), M_TEMP, M_NOWAIT);
4944		if (request_ccb == NULL) {
4945			xpt_print_path(path);
4946			printf("xpt_scan_lun: can't allocate CCB, can't "
4947			       "continue\n");
4948			return;
4949		}
4950		new_path = malloc(sizeof(*new_path), M_TEMP, M_NOWAIT);
4951		if (new_path == NULL) {
4952			xpt_print_path(path);
4953			printf("xpt_scan_lun: can't allocate path, can't "
4954			       "continue\n");
4955			free(request_ccb, M_TEMP);
4956			return;
4957		}
4958		status = xpt_compile_path(new_path, xpt_periph,
4959					  path->bus->path_id,
4960					  path->target->target_id,
4961					  path->device->lun_id);
4962
4963		if (status != CAM_REQ_CMP) {
4964			xpt_print_path(path);
4965			printf("xpt_scan_lun: can't compile path, can't "
4966			       "continue\n");
4967			free(request_ccb, M_TEMP);
4968			free(new_path, M_TEMP);
4969			return;
4970		}
4971		xpt_setup_ccb(&request_ccb->ccb_h, new_path, /*priority*/ 1);
4972		request_ccb->ccb_h.cbfcnp = xptscandone;
4973		request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
4974		request_ccb->crcn.flags = flags;
4975	}
4976
4977	s = splsoftcam();
4978	if ((old_periph = cam_periph_find(path, "probe")) != NULL) {
4979		probe_softc *softc;
4980
4981		softc = (probe_softc *)old_periph->softc;
4982		TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
4983				  periph_links.tqe);
4984	} else {
4985		status = cam_periph_alloc(proberegister, NULL, probecleanup,
4986					  probestart, "probe",
4987					  CAM_PERIPH_BIO,
4988					  request_ccb->ccb_h.path, NULL, 0,
4989					  request_ccb);
4990
4991		if (status != CAM_REQ_CMP) {
4992			xpt_print_path(path);
4993			printf("xpt_scan_lun: cam_alloc_periph returned an "
4994			       "error, can't continue probe\n");
4995			request_ccb->ccb_h.status = status;
4996			xpt_done(request_ccb);
4997		}
4998	}
4999	splx(s);
5000}
5001
5002static void
5003xptscandone(struct cam_periph *periph, union ccb *done_ccb)
5004{
5005	xpt_release_path(done_ccb->ccb_h.path);
5006	free(done_ccb->ccb_h.path, M_TEMP);
5007	free(done_ccb, M_TEMP);
5008}
5009
5010static cam_status
5011proberegister(struct cam_periph *periph, void *arg)
5012{
5013	struct ccb_getdev *cgd;
5014	probe_softc *softc;
5015	union ccb *ccb;
5016
5017	cgd = (struct ccb_getdev *)arg;
5018	if (periph == NULL) {
5019		printf("proberegister: periph was NULL!!\n");
5020		return(CAM_REQ_CMP_ERR);
5021	}
5022
5023	if (cgd == NULL) {
5024		printf("proberegister: no getdev CCB, can't register device\n");
5025		return(CAM_REQ_CMP_ERR);
5026	}
5027
5028	softc = (probe_softc *)malloc(sizeof(*softc), M_TEMP, M_NOWAIT);
5029
5030	if (softc == NULL) {
5031		printf("proberegister: Unable to probe new device. "
5032		       "Unable to allocate softc\n");
5033		return(CAM_REQ_CMP_ERR);
5034	}
5035	ccb = (union ccb *)cgd;
5036	TAILQ_INIT(&softc->request_ccbs);
5037	TAILQ_INSERT_TAIL(&softc->request_ccbs, &ccb->ccb_h, periph_links.tqe);
5038	softc->flags = 0;
5039	periph->softc = softc;
5040	cam_periph_acquire(periph);
5041	probeschedule(periph);
5042	return(CAM_REQ_CMP);
5043}
5044
5045static void
5046probeschedule(struct cam_periph *periph)
5047{
5048	union ccb *ccb;
5049	probe_softc *softc;
5050
5051	softc = (probe_softc *)periph->softc;
5052	ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
5053
5054	/*
5055	 * If a device has gone away and another device, or the same one,
5056	 * is back in the same place, it should have a unit attention
5057	 * condition pending.  It will not report the unit attention in
5058	 * response to an inquiry, which may leave invalid transfer
5059	 * negotiations in effect.  The TUR will reveal the unit attention
5060	 * condition.  Only send the TUR for lun 0, since some devices
5061	 * will get confused by commands other than inquiry to non-existent
5062	 * luns.  If you think a device has gone away start your scan from
5063	 * lun 0.  This will insure that any bogus transfer settings are
5064	 * invalidated.
5065	 */
5066	if (((ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
5067	 && (ccb->ccb_h.target_lun == 0))
5068		softc->action = PROBE_TUR;
5069	else
5070		softc->action = PROBE_INQUIRY;
5071
5072	if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE)
5073		softc->flags |= PROBE_NO_ANNOUNCE;
5074	else
5075		softc->flags &= ~PROBE_NO_ANNOUNCE;
5076
5077	xpt_schedule(periph, ccb->ccb_h.pinfo.priority);
5078}
5079
5080static void
5081probestart(struct cam_periph *periph, union ccb *start_ccb)
5082{
5083	/* Probe the device that our peripheral driver points to */
5084	struct ccb_scsiio *csio;
5085	probe_softc *softc;
5086
5087	CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n"));
5088
5089	softc = (probe_softc *)periph->softc;
5090	csio = &start_ccb->csio;
5091
5092	switch (softc->action) {
5093	case PROBE_TUR:
5094	case PROBE_TUR_FOR_NEGOTIATION:
5095	{
5096		scsi_test_unit_ready(csio,
5097				     /*retries*/4,
5098				     probedone,
5099				     MSG_SIMPLE_Q_TAG,
5100				     SSD_FULL_SIZE,
5101				     /*timeout*/60000);
5102		break;
5103	}
5104	case PROBE_INQUIRY:
5105	{
5106		struct scsi_inquiry_data *inq_buf;
5107
5108		inq_buf = &periph->path->device->inq_data;
5109		/*
5110		 * If the device is currently configured, we calculate an
5111		 * MD5 checksum of the inquiry data, and if the serial number
5112		 * length is greater than 0, add the serial number data
5113		 * into the checksum as well.  Once the inquiry and the
5114		 * serial number check finish, we attempt to figure out
5115		 * whether we still have the same device.
5116		 */
5117		if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) {
5118
5119			MD5Init(&softc->context);
5120			MD5Update(&softc->context, (unsigned char *)inq_buf,
5121				  sizeof(struct scsi_inquiry_data));
5122			softc->flags |= PROBE_INQUIRY_CKSUM;
5123			if (periph->path->device->serial_num_len > 0) {
5124				MD5Update(&softc->context,
5125					  periph->path->device->serial_num,
5126					  periph->path->device->serial_num_len);
5127				softc->flags |= PROBE_SERIAL_CKSUM;
5128			}
5129			MD5Final(softc->digest, &softc->context);
5130		}
5131
5132		scsi_inquiry(csio,
5133			     /*retries*/4,
5134			     probedone,
5135			     MSG_SIMPLE_Q_TAG,
5136			     (u_int8_t *)inq_buf,
5137			     sizeof(*inq_buf),
5138			     /*evpd*/FALSE,
5139			     /*page_code*/0,
5140			     SSD_MIN_SIZE,
5141			     /*timeout*/60 * 1000);
5142		break;
5143	}
5144	case PROBE_MODE_SENSE:
5145	{
5146		void  *mode_buf;
5147		int    mode_buf_len;
5148
5149		mode_buf_len = sizeof(struct scsi_mode_header_6)
5150			     + sizeof(struct scsi_mode_blk_desc)
5151			     + sizeof(struct scsi_control_page);
5152		mode_buf = malloc(mode_buf_len, M_TEMP, M_NOWAIT);
5153		if (mode_buf != NULL) {
5154	                scsi_mode_sense(csio,
5155					/*retries*/4,
5156					probedone,
5157					MSG_SIMPLE_Q_TAG,
5158					/*dbd*/FALSE,
5159					SMS_PAGE_CTRL_CURRENT,
5160					SMS_CONTROL_MODE_PAGE,
5161					mode_buf,
5162					mode_buf_len,
5163					SSD_FULL_SIZE,
5164					/*timeout*/60000);
5165			break;
5166		}
5167		xpt_print_path(periph->path);
5168		printf("Unable to mode sense control page - malloc failure\n");
5169		softc->action = PROBE_SERIAL_NUM;
5170		/* FALLTHROUGH */
5171	}
5172	case PROBE_SERIAL_NUM:
5173	{
5174		struct scsi_vpd_unit_serial_number *serial_buf;
5175		struct cam_ed* device;
5176
5177		serial_buf = NULL;
5178		device = periph->path->device;
5179		device->serial_num = NULL;
5180		device->serial_num_len = 0;
5181
5182		if ((device->quirk->quirks & CAM_QUIRK_NOSERIAL) == 0)
5183			serial_buf = (struct scsi_vpd_unit_serial_number *)
5184				malloc(sizeof(*serial_buf), M_TEMP, M_NOWAIT);
5185
5186		if (serial_buf != NULL) {
5187			bzero(serial_buf, sizeof(*serial_buf));
5188			scsi_inquiry(csio,
5189				     /*retries*/4,
5190				     probedone,
5191				     MSG_SIMPLE_Q_TAG,
5192				     (u_int8_t *)serial_buf,
5193				     sizeof(*serial_buf),
5194				     /*evpd*/TRUE,
5195				     SVPD_UNIT_SERIAL_NUMBER,
5196				     SSD_MIN_SIZE,
5197				     /*timeout*/60 * 1000);
5198			break;
5199		}
5200		/*
5201		 * We'll have to do without, let our probedone
5202		 * routine finish up for us.
5203		 */
5204		start_ccb->csio.data_ptr = NULL;
5205		probedone(periph, start_ccb);
5206		return;
5207	}
5208	}
5209	xpt_action(start_ccb);
5210}
5211
5212static void
5213probedone(struct cam_periph *periph, union ccb *done_ccb)
5214{
5215	probe_softc *softc;
5216	struct cam_path *path;
5217	u_int32_t  priority;
5218
5219	CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n"));
5220
5221	softc = (probe_softc *)periph->softc;
5222	path = done_ccb->ccb_h.path;
5223	priority = done_ccb->ccb_h.pinfo.priority;
5224
5225	switch (softc->action) {
5226	case PROBE_TUR:
5227	{
5228		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
5229
5230			if (cam_periph_error(done_ccb, 0,
5231					     SF_NO_PRINT, NULL) == ERESTART)
5232				return;
5233			else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
5234				/* Don't wedge the queue */
5235				xpt_release_devq(done_ccb->ccb_h.path->device,
5236						 /*run_queue*/TRUE);
5237		}
5238		softc->action = PROBE_INQUIRY;
5239		xpt_release_ccb(done_ccb);
5240		xpt_schedule(periph, priority);
5241		return;
5242	}
5243	case PROBE_INQUIRY:
5244	{
5245		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5246			struct scsi_inquiry_data *inq_buf;
5247			u_int8_t periph_qual;
5248			u_int8_t periph_dtype;
5249
5250			inq_buf = &path->device->inq_data;
5251
5252			periph_qual = SID_QUAL(inq_buf);
5253			periph_dtype = SID_TYPE(inq_buf);
5254			if (periph_dtype != T_NODEVICE) {
5255				switch(periph_qual) {
5256				case SID_QUAL_LU_CONNECTED:
5257				{
5258					xpt_find_quirk(path->device);
5259
5260					if ((inq_buf->flags & SID_CmdQue) != 0)
5261						softc->action =
5262						    PROBE_MODE_SENSE;
5263					else
5264						softc->action =
5265						    PROBE_SERIAL_NUM;
5266
5267					path->device->flags &=
5268						~CAM_DEV_UNCONFIGURED;
5269
5270					xpt_release_ccb(done_ccb);
5271					xpt_schedule(periph, priority);
5272					return;
5273				}
5274				default:
5275					break;
5276				}
5277			}
5278		} else if (cam_periph_error(done_ccb, 0,
5279					    done_ccb->ccb_h.target_lun > 0
5280					    ? SF_RETRY_UA|SF_QUIET_IR
5281					    : SF_RETRY_UA,
5282					    &softc->saved_ccb) == ERESTART) {
5283			return;
5284		} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5285			/* Don't wedge the queue */
5286			xpt_release_devq(done_ccb->ccb_h.path->device,
5287					 /*run_queue*/TRUE);
5288		}
5289		/*
5290		 * If we get to this point, we got an error status back
5291		 * from the inquiry and the error status doesn't require
5292		 * automatically retrying the command.  Therefore, the
5293		 * inquiry failed.  If we had inquiry information before
5294		 * for this device, but this latest inquiry command failed,
5295		 * the device has probably gone away.  If this device isn't
5296		 * already marked unconfigured, notify the peripheral
5297		 * drivers that this device is no more.
5298		 */
5299		if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
5300			/* Send the async notification. */
5301			xpt_async(AC_LOST_DEVICE, path, NULL);
5302
5303		xpt_release_ccb(done_ccb);
5304		break;
5305	}
5306	case PROBE_MODE_SENSE:
5307	{
5308		struct ccb_scsiio *csio;
5309		struct scsi_mode_header_6 *mode_hdr;
5310
5311		csio = &done_ccb->csio;
5312		mode_hdr = (struct scsi_mode_header_6 *)csio->data_ptr;
5313		if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5314			struct scsi_control_page *page;
5315			u_int8_t *offset;
5316
5317			offset = ((u_int8_t *)&mode_hdr[1])
5318			    + mode_hdr->blk_desc_len;
5319			page = (struct scsi_control_page *)offset;
5320			path->device->queue_flags = page->queue_flags;
5321		} else if (cam_periph_error(done_ccb, 0,
5322					    SF_RETRY_UA|SF_NO_PRINT,
5323					    &softc->saved_ccb) == ERESTART) {
5324			return;
5325		} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5326			/* Don't wedge the queue */
5327			xpt_release_devq(done_ccb->ccb_h.path->device,
5328					 /*run_queue*/TRUE);
5329		}
5330		xpt_release_ccb(done_ccb);
5331		free(mode_hdr, M_TEMP);
5332		softc->action = PROBE_SERIAL_NUM;
5333		xpt_schedule(periph, priority);
5334		return;
5335	}
5336	case PROBE_SERIAL_NUM:
5337	{
5338		struct ccb_scsiio *csio;
5339		struct scsi_vpd_unit_serial_number *serial_buf;
5340		u_int32_t  priority;
5341		int changed;
5342		int have_serialnum;
5343
5344		changed = 1;
5345		have_serialnum = 0;
5346		csio = &done_ccb->csio;
5347		priority = done_ccb->ccb_h.pinfo.priority;
5348		serial_buf =
5349		    (struct scsi_vpd_unit_serial_number *)csio->data_ptr;
5350
5351		/* Clean up from previous instance of this device */
5352		if (path->device->serial_num != NULL) {
5353			free(path->device->serial_num, M_DEVBUF);
5354			path->device->serial_num = NULL;
5355			path->device->serial_num_len = 0;
5356		}
5357
5358		if (serial_buf == NULL) {
5359			/*
5360			 * Don't process the command as it was never sent
5361			 */
5362		} else if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP
5363		      && (serial_buf->length > 0)) {
5364
5365			have_serialnum = 1;
5366			path->device->serial_num =
5367				(u_int8_t *)malloc((serial_buf->length + 1),
5368						   M_DEVBUF, M_NOWAIT);
5369			if (path->device->serial_num != NULL) {
5370				bcopy(serial_buf->serial_num,
5371				      path->device->serial_num,
5372				      serial_buf->length);
5373				path->device->serial_num_len =
5374				    serial_buf->length;
5375				path->device->serial_num[serial_buf->length]
5376				    = '\0';
5377			}
5378		} else if (cam_periph_error(done_ccb, 0,
5379					    SF_RETRY_UA|SF_NO_PRINT,
5380					    &softc->saved_ccb) == ERESTART) {
5381			return;
5382		} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5383			/* Don't wedge the queue */
5384			xpt_release_devq(done_ccb->ccb_h.path->device,
5385					 /*run_queue*/TRUE);
5386		}
5387
5388		/*
5389		 * Let's see if we have seen this device before.
5390		 */
5391		if ((softc->flags & PROBE_INQUIRY_CKSUM) != 0) {
5392			MD5_CTX context;
5393			u_int8_t digest[16];
5394
5395			MD5Init(&context);
5396
5397			MD5Update(&context,
5398				  (unsigned char *)&path->device->inq_data,
5399				  sizeof(struct scsi_inquiry_data));
5400
5401			if (have_serialnum)
5402				MD5Update(&context, serial_buf->serial_num,
5403					  serial_buf->length);
5404
5405			MD5Final(digest, &context);
5406			if (bcmp(softc->digest, digest, 16) == 0)
5407				changed = 0;
5408
5409			/*
5410			 * XXX Do we need to do a TUR in order to ensure
5411			 *     that the device really hasn't changed???
5412			 */
5413			if ((changed != 0)
5414			 && ((softc->flags & PROBE_NO_ANNOUNCE) == 0))
5415				xpt_async(AC_LOST_DEVICE, path, NULL);
5416		}
5417		if (serial_buf != NULL)
5418			free(serial_buf, M_TEMP);
5419
5420		if (changed != 0) {
5421			/*
5422			 * Now that we have all the necessary
5423			 * information to safely perform transfer
5424			 * negotiations... Controllers don't perform
5425			 * any negotiation or tagged queuing until
5426			 * after the first XPT_SET_TRAN_SETTINGS ccb is
5427			 * received.  So, on a new device, just retreive
5428			 * the user settings, and set them as the current
5429			 * settings to set the device up.
5430			 */
5431			done_ccb->ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
5432			done_ccb->cts.flags = CCB_TRANS_USER_SETTINGS;
5433			xpt_action(done_ccb);
5434			done_ccb->ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
5435			done_ccb->cts.flags &= ~CCB_TRANS_USER_SETTINGS;
5436			done_ccb->cts.flags |= CCB_TRANS_CURRENT_SETTINGS;
5437			xpt_action(done_ccb);
5438			xpt_release_ccb(done_ccb);
5439
5440			/*
5441			 * Perform a TUR to allow the controller to
5442			 * perform any necessary transfer negotiation.
5443			 */
5444			softc->action = PROBE_TUR_FOR_NEGOTIATION;
5445			xpt_schedule(periph, priority);
5446			return;
5447		}
5448		xpt_release_ccb(done_ccb);
5449		break;
5450	}
5451	case PROBE_TUR_FOR_NEGOTIATION:
5452		if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5453			/* Don't wedge the queue */
5454			xpt_release_devq(done_ccb->ccb_h.path->device,
5455					 /*run_queue*/TRUE);
5456		}
5457
5458		path->device->flags &= ~CAM_DEV_UNCONFIGURED;
5459
5460		if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) {
5461			/* Inform the XPT that a new device has been found */
5462			done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
5463			xpt_action(done_ccb);
5464
5465			xpt_async(AC_FOUND_DEVICE, xpt_periph->path, done_ccb);
5466		}
5467		xpt_release_ccb(done_ccb);
5468		break;
5469	}
5470	done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
5471	TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe);
5472	done_ccb->ccb_h.status = CAM_REQ_CMP;
5473	xpt_done(done_ccb);
5474	if (TAILQ_FIRST(&softc->request_ccbs) == NULL) {
5475		cam_periph_invalidate(periph);
5476		cam_periph_release(periph);
5477	} else {
5478		probeschedule(periph);
5479	}
5480}
5481
5482static void
5483probecleanup(struct cam_periph *periph)
5484{
5485	free(periph->softc, M_TEMP);
5486}
5487
5488static void
5489xpt_find_quirk(struct cam_ed *device)
5490{
5491	caddr_t	match;
5492
5493	match = cam_quirkmatch((caddr_t)&device->inq_data,
5494			       (caddr_t)xpt_quirk_table,
5495			       sizeof(xpt_quirk_table)/sizeof(*xpt_quirk_table),
5496			       sizeof(*xpt_quirk_table), scsi_inquiry_match);
5497
5498	if (match == NULL)
5499		panic("xpt_find_quirk: device didn't match wildcard entry!!");
5500
5501	device->quirk = (struct xpt_quirk_entry *)match;
5502}
5503
5504static void
5505xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
5506			  int async_update)
5507{
5508	struct	cam_sim *sim;
5509	int	qfrozen;
5510
5511	sim = cts->ccb_h.path->bus->sim;
5512	if (async_update == FALSE) {
5513		struct	scsi_inquiry_data *inq_data;
5514		struct	ccb_pathinq cpi;
5515
5516		if (device == NULL) {
5517			cts->ccb_h.status = CAM_PATH_INVALID;
5518			xpt_done((union ccb *)cts);
5519			return;
5520		}
5521
5522		/*
5523		 * Perform sanity checking against what the
5524		 * controller and device can do.
5525		 */
5526		xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1);
5527		cpi.ccb_h.func_code = XPT_PATH_INQ;
5528		xpt_action((union ccb *)&cpi);
5529
5530		inq_data = &device->inq_data;
5531		if ((inq_data->flags & SID_Sync) == 0
5532		 || (cpi.hba_inquiry & PI_SDTR_ABLE) == 0) {
5533			/* Force async */
5534			cts->sync_period = 0;
5535			cts->sync_offset = 0;
5536		}
5537
5538		switch (cts->bus_width) {
5539		case MSG_EXT_WDTR_BUS_32_BIT:
5540			if ((inq_data->flags & SID_WBus32) != 0
5541			 && (cpi.hba_inquiry & PI_WIDE_32) != 0)
5542				break;
5543			/* Fall Through to 16-bit */
5544		case MSG_EXT_WDTR_BUS_16_BIT:
5545			if ((inq_data->flags & SID_WBus16) != 0
5546			 && (cpi.hba_inquiry & PI_WIDE_16) != 0) {
5547				cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
5548				break;
5549			}
5550			/* Fall Through to 8-bit */
5551		default: /* New bus width?? */
5552		case MSG_EXT_WDTR_BUS_8_BIT:
5553			/* All targets can do this */
5554			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
5555			break;
5556		}
5557
5558		if ((cts->flags & CCB_TRANS_DISC_ENB) == 0) {
5559			/*
5560			 * Can't tag queue without disconnection.
5561			 */
5562			cts->flags &= ~CCB_TRANS_TAG_ENB;
5563			cts->valid |= CCB_TRANS_TQ_VALID;
5564		}
5565
5566		if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0
5567		 || (inq_data->flags & SID_CmdQue) == 0
5568		 || (device->queue_flags & SCP_QUEUE_DQUE) != 0
5569		 || (device->quirk->mintags == 0)) {
5570			/*
5571			 * Can't tag on hardware that doesn't support,
5572			 * doesn't have it enabled, or has broken tag support.
5573			 */
5574			cts->flags &= ~CCB_TRANS_TAG_ENB;
5575		}
5576	}
5577
5578	qfrozen = FALSE;
5579	if ((cts->valid & CCB_TRANS_TQ_VALID) != 0
5580	 && (async_update == FALSE)) {
5581		int device_tagenb;
5582
5583		/*
5584		 * If we are transitioning from tags to no-tags or
5585		 * vice-versa, we need to carefully freeze and restart
5586		 * the queue so that we don't overlap tagged and non-tagged
5587		 * commands.  We also temporarily stop tags if there is
5588		 * a change in transfer negotiation settings to allow
5589		 * "tag-less" negotiation.
5590		 */
5591		if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
5592		 || (device->inq_flags & SID_CmdQue) != 0)
5593			device_tagenb = TRUE;
5594		else
5595			device_tagenb = FALSE;
5596
5597		if (((cts->flags & CCB_TRANS_TAG_ENB) != 0
5598		  && device_tagenb == FALSE)
5599		 || ((cts->flags & CCB_TRANS_TAG_ENB) == 0
5600		  && device_tagenb == TRUE)) {
5601
5602			if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
5603				/*
5604				 * Delay change to use tags until after a
5605				 * few commands have gone to this device so
5606				 * the controller has time to perform transfer
5607				 * negotiations without tagged messages getting
5608				 * in the way.
5609				 */
5610				device->tag_delay_count = CAM_TAG_DELAY_COUNT;
5611				device->flags |= CAM_DEV_TAG_AFTER_COUNT;
5612			} else {
5613				xpt_freeze_devq(cts->ccb_h.path, /*count*/1);
5614				qfrozen = TRUE;
5615		  		device->inq_flags &= ~SID_CmdQue;
5616				xpt_dev_ccbq_resize(cts->ccb_h.path,
5617						    sim->max_dev_openings);
5618				device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
5619				device->tag_delay_count = 0;
5620			}
5621		}
5622	}
5623
5624	if (async_update == FALSE) {
5625		/*
5626		 * If we are currently performing tagged transactions to
5627		 * this device and want to change its negotiation parameters,
5628		 * go non-tagged for a bit to give the controller a chance to
5629		 * negotiate unhampered by tag messages.
5630		 */
5631		if ((device->inq_flags & SID_CmdQue) != 0
5632		 && (cts->flags & (CCB_TRANS_SYNC_RATE_VALID|
5633				   CCB_TRANS_SYNC_OFFSET_VALID|
5634				   CCB_TRANS_BUS_WIDTH_VALID)) != 0)
5635			xpt_toggle_tags(cts->ccb_h.path);
5636
5637		(*(sim->sim_action))(sim, (union ccb *)cts);
5638	}
5639
5640	if (qfrozen) {
5641		struct ccb_relsim crs;
5642
5643		xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path,
5644			      /*priority*/1);
5645		crs.ccb_h.func_code = XPT_REL_SIMQ;
5646		crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
5647		crs.openings
5648		    = crs.release_timeout
5649		    = crs.qfrozen_cnt
5650		    = 0;
5651		xpt_action((union ccb *)&crs);
5652	}
5653}
5654
5655static void
5656xpt_toggle_tags(struct cam_path *path)
5657{
5658	/*
5659	 * Give controllers a chance to renegotiate
5660	 * before starting tag operations.  We
5661	 * "toggle" tagged queuing off then on
5662	 * which causes the tag enable command delay
5663	 * counter to come into effect.
5664	 */
5665	if ((path->device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
5666	 || (path->device->inq_flags & SID_CmdQue) != 0) {
5667		struct ccb_trans_settings cts;
5668
5669		xpt_setup_ccb(&cts.ccb_h, path, 1);
5670		cts.flags = 0;
5671		cts.valid = CCB_TRANS_TQ_VALID;
5672		xpt_set_transfer_settings(&cts, path->device,
5673					  /*async_update*/TRUE);
5674		cts.flags = CCB_TRANS_TAG_ENB;
5675		xpt_set_transfer_settings(&cts, path->device,
5676					  /*async_update*/TRUE);
5677	}
5678}
5679
5680static void
5681xpt_start_tags(struct cam_path *path)
5682{
5683	struct ccb_relsim crs;
5684	struct cam_ed *device;
5685	struct cam_sim *sim;
5686	int    newopenings;
5687
5688	device = path->device;
5689	sim = path->bus->sim;
5690	device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
5691	xpt_freeze_devq(path, /*count*/1);
5692	device->inq_flags |= SID_CmdQue;
5693	newopenings = min(device->quirk->maxtags, sim->max_tagged_dev_openings);
5694	xpt_dev_ccbq_resize(path, newopenings);
5695	xpt_setup_ccb(&crs.ccb_h, path, /*priority*/1);
5696	crs.ccb_h.func_code = XPT_REL_SIMQ;
5697	crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
5698	crs.openings
5699	    = crs.release_timeout
5700	    = crs.qfrozen_cnt
5701	    = 0;
5702	xpt_action((union ccb *)&crs);
5703}
5704
5705static int busses_to_config;
5706
5707static int
5708xptconfigbuscountfunc(struct cam_eb *bus, void *arg)
5709{
5710	if (bus->path_id != CAM_XPT_PATH_ID)
5711		busses_to_config++;
5712
5713	return(1);
5714}
5715
5716static int
5717xptconfigfunc(struct cam_eb *bus, void *arg)
5718{
5719	struct	cam_path *path;
5720	union	ccb *work_ccb;
5721
5722	if (bus->path_id != CAM_XPT_PATH_ID) {
5723		cam_status status;
5724
5725		work_ccb = xpt_alloc_ccb();
5726		if ((status = xpt_create_path(&path, xpt_periph, bus->path_id,
5727					      CAM_TARGET_WILDCARD,
5728					      CAM_LUN_WILDCARD)) !=CAM_REQ_CMP){
5729			printf("xptconfigfunc: xpt_create_path failed with "
5730			       "status %#x for bus %d\n", status, bus->path_id);
5731			printf("xptconfigfunc: halting bus configuration\n");
5732			xpt_free_ccb(work_ccb);
5733			busses_to_config--;
5734			xpt_finishconfig(xpt_periph, NULL);
5735			return(0);
5736		}
5737		xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
5738		work_ccb->ccb_h.func_code = XPT_PATH_INQ;
5739		xpt_action(work_ccb);
5740		if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
5741			printf("xptconfigfunc: CPI failed on bus %d "
5742			       "with status %d\n", bus->path_id,
5743			       work_ccb->ccb_h.status);
5744			xpt_finishconfig(xpt_periph, work_ccb);
5745			return(1);
5746		}
5747
5748		if ((work_ccb->cpi.hba_misc & PIM_NOBUSRESET) == 0) {
5749			xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
5750			work_ccb->ccb_h.func_code = XPT_RESET_BUS;
5751			work_ccb->ccb_h.cbfcnp = NULL;
5752			CAM_DEBUG(path, CAM_DEBUG_SUBTRACE,
5753				  ("Resetting Bus\n"));
5754			xpt_action(work_ccb);
5755			xpt_finishconfig(xpt_periph, work_ccb);
5756		} else {
5757			/* Act as though we performed a successful BUS RESET */
5758			work_ccb->ccb_h.func_code = XPT_RESET_BUS;
5759			xpt_finishconfig(xpt_periph, work_ccb);
5760		}
5761	}
5762
5763	return(1);
5764}
5765
5766static void
5767xpt_config(void *arg)
5768{
5769	/* Now that interrupts are enabled, go find our devices */
5770
5771#ifdef CAMDEBUG
5772	/* Setup debugging flags and path */
5773#ifdef CAM_DEBUG_FLAGS
5774	cam_dflags = CAM_DEBUG_FLAGS;
5775#else /* !CAM_DEBUG_FLAGS */
5776	cam_dflags = CAM_DEBUG_NONE;
5777#endif /* CAM_DEBUG_FLAGS */
5778#ifdef CAM_DEBUG_BUS
5779	if (cam_dflags != CAM_DEBUG_NONE) {
5780		if (xpt_create_path(&cam_dpath, xpt_periph,
5781				    CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
5782				    CAM_DEBUG_LUN) != CAM_REQ_CMP) {
5783			printf("xpt_config: xpt_create_path() failed for debug"
5784			       " target %d:%d:%d, debugging disabled\n",
5785			       CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
5786			cam_dflags = CAM_DEBUG_NONE;
5787		}
5788	} else
5789		cam_dpath = NULL;
5790#else /* !CAM_DEBUG_BUS */
5791	cam_dpath = NULL;
5792#endif /* CAM_DEBUG_BUS */
5793#endif /* CAMDEBUG */
5794
5795	/*
5796	 * Scan all installed busses.
5797	 */
5798	xpt_for_all_busses(xptconfigbuscountfunc, NULL);
5799
5800	if (busses_to_config == 0) {
5801		/* Call manually because we don't have any busses */
5802		xpt_finishconfig(xpt_periph, NULL);
5803	} else  {
5804		if (SCSI_DELAY >= 2000) {
5805			printf("Waiting %d seconds for SCSI "
5806			       "devices to settle\n", SCSI_DELAY/1000);
5807		}
5808		xpt_for_all_busses(xptconfigfunc, NULL);
5809	}
5810}
5811
5812/*
5813 * If the given device only has one peripheral attached to it, and if that
5814 * peripheral is the passthrough driver, announce it.  This insures that the
5815 * user sees some sort of announcement for every peripheral in their system.
5816 */
5817static int
5818xptpassannouncefunc(struct cam_ed *device, void *arg)
5819{
5820	struct cam_periph *periph;
5821	int i;
5822
5823	for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
5824	     periph = SLIST_NEXT(periph, periph_links), i++);
5825
5826	periph = SLIST_FIRST(&device->periphs);
5827	if ((i == 1)
5828	 && (strncmp(periph->periph_name, "pass", 4) == 0))
5829		xpt_announce_periph(periph, NULL);
5830
5831	return(1);
5832}
5833
5834static void
5835xpt_finishconfig(struct cam_periph *periph, union ccb *done_ccb)
5836{
5837	struct	periph_driver **p_drv;
5838	int	i;
5839
5840	if (done_ccb != NULL) {
5841		CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE,
5842			  ("xpt_finishconfig\n"));
5843		switch(done_ccb->ccb_h.func_code) {
5844		case XPT_RESET_BUS:
5845			if (done_ccb->ccb_h.status == CAM_REQ_CMP) {
5846				done_ccb->ccb_h.func_code = XPT_SCAN_BUS;
5847				done_ccb->ccb_h.cbfcnp = xpt_finishconfig;
5848				xpt_action(done_ccb);
5849				return;
5850			}
5851			/* FALLTHROUGH */
5852		case XPT_SCAN_BUS:
5853		default:
5854			xpt_free_path(done_ccb->ccb_h.path);
5855			busses_to_config--;
5856			break;
5857		}
5858	}
5859
5860	if (busses_to_config == 0) {
5861		/* Register all the peripheral drivers */
5862		/* XXX This will have to change when we have LKMs */
5863		p_drv = (struct periph_driver **)periphdriver_set.ls_items;
5864		for (i = 0; p_drv[i] != NULL; i++) {
5865			(*p_drv[i]->init)();
5866		}
5867
5868		/*
5869		 * Check for devices with no "standard" peripheral driver
5870		 * attached.  For any devices like that, announce the
5871		 * passthrough driver so the user will see something.
5872		 */
5873		xpt_for_all_devices(xptpassannouncefunc, NULL);
5874
5875		/* Release our hook so that the boot can continue. */
5876		config_intrhook_disestablish(xpt_config_hook);
5877	}
5878	if (done_ccb != NULL)
5879		xpt_free_ccb(done_ccb);
5880}
5881
5882static void
5883xptaction(struct cam_sim *sim, union ccb *work_ccb)
5884{
5885	CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
5886
5887	switch (work_ccb->ccb_h.func_code) {
5888	/* Common cases first */
5889	case XPT_PATH_INQ:		/* Path routing inquiry */
5890	{
5891		struct ccb_pathinq *cpi;
5892
5893		cpi = &work_ccb->cpi;
5894		cpi->version_num = 1; /* XXX??? */
5895		cpi->hba_inquiry = 0;
5896		cpi->target_sprt = 0;
5897		cpi->hba_misc = 0;
5898		cpi->hba_eng_cnt = 0;
5899		cpi->max_target = 0;
5900		cpi->max_lun = 0;
5901		cpi->initiator_id = 0;
5902		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
5903		strncpy(cpi->hba_vid, "", HBA_IDLEN);
5904		strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
5905		cpi->unit_number = sim->unit_number;
5906		cpi->bus_id = sim->bus_id;
5907		cpi->ccb_h.status = CAM_REQ_CMP;
5908		xpt_done(work_ccb);
5909		break;
5910	}
5911	default:
5912		work_ccb->ccb_h.status = CAM_REQ_INVALID;
5913		xpt_done(work_ccb);
5914		break;
5915	}
5916}
5917
5918/*
5919 * Should only be called by the machine interrupt dispatch routines,
5920 * so put these prototypes here instead of in the header.
5921 */
5922
5923static void
5924swi_camnet(void)
5925{
5926	camisr(&cam_netq);
5927}
5928
5929static void
5930swi_cambio(void)
5931{
5932	camisr(&cam_bioq);
5933}
5934
5935static void
5936camisr(cam_isrq_t *queue)
5937{
5938	int	s;
5939	struct	ccb_hdr *ccb_h;
5940
5941	s = splcam();
5942	while ((ccb_h = TAILQ_FIRST(queue)) != NULL) {
5943		int	runq;
5944
5945		TAILQ_REMOVE(queue, ccb_h, sim_links.tqe);
5946		ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
5947		splx(s);
5948
5949		CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE,
5950			  ("camisr"));
5951
5952		runq = FALSE;
5953
5954		if (ccb_h->flags & CAM_HIGH_POWER) {
5955			struct highpowerlist	*hphead;
5956			struct cam_ed		*device;
5957			union ccb		*send_ccb;
5958
5959			hphead = &highpowerq;
5960
5961			send_ccb = (union ccb *)STAILQ_FIRST(hphead);
5962
5963			/*
5964			 * Increment the count since this command is done.
5965			 */
5966			num_highpower++;
5967
5968			/*
5969			 * Any high powered commands queued up?
5970			 */
5971			if (send_ccb != NULL) {
5972				device = send_ccb->ccb_h.path->device;
5973
5974				STAILQ_REMOVE_HEAD(hphead, xpt_links.stqe);
5975
5976				xpt_release_devq(send_ccb->ccb_h.path->device,
5977						 TRUE);
5978			}
5979		}
5980		if ((ccb_h->func_code != XPT_ACCEPT_TARGET_IO)
5981		 && (ccb_h->func_code != XPT_IMMED_NOTIFY)
5982		 && (ccb_h->func_code != XPT_SCAN_LUN)
5983		 && (ccb_h->func_code != XPT_SCAN_BUS)) {
5984			struct cam_ed *dev;
5985
5986			dev = ccb_h->path->device;
5987
5988			s = splcam();
5989			cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
5990
5991			ccb_h->path->bus->sim->devq->send_active--;
5992			ccb_h->path->bus->sim->devq->send_openings++;
5993			splx(s);
5994
5995			if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
5996			 || ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
5997			  && (dev->ccbq.dev_active == 0))) {
5998
5999				xpt_release_devq(ccb_h->path->device,
6000						 /*run_queue*/TRUE);
6001			}
6002
6003			if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6004			 && (--dev->tag_delay_count == 0))
6005				xpt_start_tags(ccb_h->path);
6006
6007			if ((dev->ccbq.queue.entries > 0)
6008			 && (dev->qfrozen_cnt == 0)
6009			 && (device_is_send_queued(dev) == 0)) {
6010				runq = xpt_schedule_dev_sendq(ccb_h->path->bus,
6011							      dev);
6012			}
6013		}
6014
6015		if (ccb_h->status & CAM_RELEASE_SIMQ) {
6016			xpt_release_simq(ccb_h->path->bus->sim,
6017					 /*run_queue*/TRUE);
6018		} else if ((ccb_h->flags & CAM_DEV_QFRZDIS)
6019			&& (ccb_h->status & CAM_DEV_QFRZN)) {
6020			xpt_release_devq(ccb_h->path->device,
6021					 /*run_queue*/TRUE);
6022			ccb_h->status &= ~CAM_DEV_QFRZN;
6023		} else if (runq) {
6024			xpt_run_dev_sendq(ccb_h->path->bus);
6025		}
6026
6027		/* Call the peripheral driver's callback */
6028		(*ccb_h->cbfcnp)(ccb_h->path->periph,
6029				 (union ccb *)ccb_h);
6030
6031		/* Raise IPL for while test */
6032		s = splcam();
6033	}
6034	splx(s);
6035}
6036