cam_xpt.c revision 40025
1/*
2 * Implementation of the Common Access Method Transport (XPT) layer.
3 *
4 * Copyright (c) 1997, 1998 Justin T. Gibbs.
5 * Copyright (c) 1997, 1998 Kenneth D. Merry.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions, and the following disclaimer,
13 *    without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 *    derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 *      $Id: cam_xpt.c,v 1.16 1998/10/06 19:27:19 ken Exp $
30 */
31#include <sys/param.h>
32#include <sys/systm.h>
33#include <sys/types.h>
34#include <sys/malloc.h>
35#include <sys/device.h>
36#include <sys/kernel.h>
37#include <sys/conf.h>
38#include <sys/fcntl.h>
39#include <sys/md5.h>
40#include <sys/devicestat.h>
41
42#ifdef PC98
43#include <pc98/pc98/pc98_machdep.h>	/* geometry translation */
44#endif
45
46#include <machine/clock.h>
47#include <machine/ipl.h>
48
49#include <cam/cam.h>
50#include <cam/cam_conf.h>
51#include <cam/cam_ccb.h>
52#include <cam/cam_periph.h>
53#include <cam/cam_sim.h>
54#include <cam/cam_xpt.h>
55#include <cam/cam_xpt_sim.h>
56#include <cam/cam_xpt_periph.h>
57#include <cam/cam_debug.h>
58
59#include <cam/scsi/scsi_all.h>
60#include <cam/scsi/scsi_message.h>
61#include <cam/scsi/scsi_pass.h>
62#include "opt_cam.h"
63#include "opt_scsi.h"
64
65extern	void	(*ihandlers[32]) __P((void));
66
67/* Datastructures internal to the xpt layer */
68
69/*
70 * Definition of an async handler callback block.  These are used to add
71 * SIMs and peripherals to the async callback lists.
72 */
73struct async_node {
74	SLIST_ENTRY(async_node)	links;
75	u_int32_t	event_enable;	/* Async Event enables */
76	void		(*callback)(void *arg, u_int32_t code,
77				    struct cam_path *path, void *args);
78	void		*callback_arg;
79};
80
81SLIST_HEAD(async_list, async_node);
82SLIST_HEAD(periph_list, cam_periph);
83STAILQ_HEAD(highpowerlist, ccb_hdr) highpowerq;
84
85/*
86 * This is the maximum number of high powered commands (e.g. start unit)
87 * that can be outstanding at a particular time.
88 */
89#ifndef CAM_MAX_HIGHPOWER
90#define CAM_MAX_HIGHPOWER  4
91#endif
92
93/*
94 * This is the number of seconds we wait for devices to settle after a SCSI
95 * bus reset.
96 */
97#ifndef SCSI_DELAY
98#define SCSI_DELAY 2000
99#endif
100/*
101 * If someone sets this to 0, we assume that they want the minimum
102 * allowable bus settle delay.  All devices need _some_ sort of bus settle
103 * delay, so we'll set it to a minimum value of 100ms.
104 */
105#if (SCSI_DELAY == 0)
106#undef SCSI_DELAY
107#define SCSI_DELAY 100
108#endif
109
110/*
111 * Make sure the user isn't using seconds instead of milliseconds.
112 */
113#if (SCSI_DELAY < 100)
114#error "SCSI_DELAY is in milliseconds, not seconds!  Please use a larger value"
115#endif
116
117/* number of high powered commands that can go through right now */
118static int num_highpower = CAM_MAX_HIGHPOWER;
119
120/*
121 * Structure for queueing a device in a run queue.
122 * There is one run queue for allocating new ccbs,
123 * and another for sending ccbs to the controller.
124 */
125struct cam_ed_qinfo {
126	cam_pinfo pinfo;
127	struct	  cam_ed *device;
128};
129
130/*
131 * The CAM EDT (Existing Device Table) contains the device information for
132 * all devices for all busses in the system.  The table contains a
133 * cam_ed structure for each device on the bus.
134 */
135struct cam_ed {
136	TAILQ_ENTRY(cam_ed) links;
137	struct	cam_ed_qinfo alloc_ccb_entry;
138	struct	cam_ed_qinfo send_ccb_entry;
139	struct	cam_et	 *target;
140	lun_id_t	 lun_id;
141	struct	camq drvq;		/*
142					 * Queue of type drivers wanting to do
143					 * work on this device.
144					 */
145	struct	cam_ccbq ccbq;		/* Queue of pending ccbs */
146	struct	async_list asyncs;	/* Async callback info for this B/T/L */
147	struct	periph_list periphs;	/* All attached devices */
148	u_int	generation;		/* Generation number */
149	struct	cam_periph *owner;	/* Peripheral driver's ownership tag */
150	struct	xpt_quirk_entry *quirk;	/* Oddities about this device */
151					/* Storage for the inquiry data */
152	struct	scsi_inquiry_data inq_data;
153	u_int8_t	 inq_flags;	/*
154					 * Current settings for inquiry flags.
155					 * This allows us to override settings
156					 * like disconnection and tagged
157					 * queuing for a device.
158					 */
159	u_int8_t	 queue_flags;	/* Queue flags from the control page */
160	u_int8_t	 *serial_num;
161	u_int8_t	 serial_num_len;
162	u_int32_t	 qfrozen_cnt;
163	u_int32_t	 flags;
164#define CAM_DEV_UNCONFIGURED	 	0x01
165#define CAM_DEV_REL_TIMEOUT_PENDING	0x02
166#define CAM_DEV_REL_ON_COMPLETE		0x04
167#define CAM_DEV_REL_ON_QUEUE_EMPTY	0x08
168#define CAM_DEV_RESIZE_QUEUE_NEEDED	0x10
169#define CAM_DEV_TAG_AFTER_COUNT		0x20
170	u_int32_t	 tag_delay_count;
171#define	CAM_TAG_DELAY_COUNT		5
172	u_int32_t	 refcount;
173	struct		 callout_handle c_handle;
174};
175
176/*
177 * Each target is represented by an ET (Existing Target).  These
178 * entries are created when a target is successfully probed with an
179 * identify, and removed when a device fails to respond after a number
180 * of retries, or a bus rescan finds the device missing.
181 */
182struct cam_et {
183	TAILQ_HEAD(, cam_ed) ed_entries;
184	TAILQ_ENTRY(cam_et) links;
185	struct	cam_eb	*bus;
186	target_id_t	target_id;
187	u_int32_t	refcount;
188	u_int		generation;
189};
190
191/*
192 * Each bus is represented by an EB (Existing Bus).  These entries
193 * are created by calls to xpt_bus_register and deleted by calls to
194 * xpt_bus_deregister.
195 */
196struct cam_eb {
197	TAILQ_HEAD(, cam_et) et_entries;
198	TAILQ_ENTRY(cam_eb)  links;
199	struct async_list    asyncs;	/* Async callback info for this B/T/L */
200	path_id_t	     path_id;
201	struct cam_sim	     *sim;
202	u_int32_t	     flags;
203#define	CAM_EB_RUNQ_SCHEDULED	0x01
204	u_int		     generation;
205};
206
207struct cam_path {
208	struct cam_periph *periph;
209	struct cam_eb	  *bus;
210	struct cam_et	  *target;
211	struct cam_ed	  *device;
212};
213
214struct xpt_quirk_entry {
215	struct scsi_inquiry_pattern inq_pat;
216	u_int8_t quirks;
217#define	CAM_QUIRK_NOLUNS	0x01
218#define	CAM_QUIRK_NOSERIAL	0x02
219	u_int8_t mintags;
220	u_int8_t maxtags;
221};
222
223typedef enum {
224	XPT_FLAG_OPEN		= 0x01
225} xpt_flags;
226
227struct xpt_softc {
228	xpt_flags	flags;
229	u_int32_t	generation;
230#ifdef DEVFS
231	void		*xpt_devfs_token;
232	void		*ctl_devfs_token;
233#endif
234};
235
236static const char quantum[] = "QUANTUM";
237static const char sony[] = "SONY";
238static const char west_digital[] = "WDIGTL";
239
240static struct xpt_quirk_entry xpt_quirk_table[] =
241{
242	{
243		/* Reports QUEUE FULL for temporary resource shortages */
244		{ T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" },
245		/*quirks*/0, /*mintags*/24, /*maxtags*/32
246	},
247	{
248		/* Reports QUEUE FULL for temporary resource shortages */
249		{ T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" },
250		/*quirks*/0, /*mintags*/24, /*maxtags*/32
251	},
252	{
253		/* Reports QUEUE FULL for temporary resource shortages */
254		{ T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" },
255		/*quirks*/0, /*mintags*/24, /*maxtags*/32
256	},
257	{
258		/* Broken tagged queuing drive */
259		{ T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" },
260		/*quirks*/0, /*mintags*/0, /*maxtags*/0
261	},
262	{
263		/* Broken tagged queuing drive */
264		{ T_DIRECT, SIP_MEDIA_FIXED, "MICROP", "3391*", "x43h" },
265		/*quirks*/0, /*mintags*/0, /*maxtags*/0
266	},
267        {
268		/* Broken tagged queuing drive */
269                { T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" },
270		/*quirks*/0, /*mintags*/0, /*maxtags*/0
271	},
272	{
273		/* Broken tagged queuing drive */
274		{ T_DIRECT, SIP_MEDIA_REMOVABLE, "CONNER", "CFP2107*", "*" },
275		/*quirks*/0, /*mintags*/0, /*maxtags*/0
276	},
277        {
278		/*
279		 * Slow when tagged queueing is enabled. (1.5MB/sec versus
280		 * 8MB/sec.)
281		 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
282		 */
283		{ T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" },
284		/*quirks*/0, /*mintags*/0, /*maxtags*/0
285        },
286        {
287		/*
288		 * Slow when tagged queueing is enabled. (1.5MB/sec versus
289		 * 8MB/sec.)
290		 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
291		 */
292		{ T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" },
293		/*quirks*/0, /*mintags*/0, /*maxtags*/0
294        },
295	{
296		/*
297		 * Doesn't handle queue full condition correctly,
298		 * so we need to limit maxtags to what the device
299		 * can handle instead of determining this automatically.
300		 */
301		{ T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "WN321010S*", "*" },
302		/*quirks*/0, /*mintags*/2, /*maxtags*/32
303	},
304        {
305		/*
306		 * Hack until multiple-luns are supported by
307		 * the target mode code.
308		 */
309		{
310			T_PROCESSOR, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
311			"FreeBSD", "TM-PT", "*"
312		},
313		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
314        },
315	{
316		/* Really only one LUN */
317		{
318			T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA*", "*"
319		},
320		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
321	},
322	{
323		/* I can't believe we need a quirk for DPT volumes. */
324		{
325			T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE,
326			"DPT", "*", "*"
327		},
328		CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS,
329		/*mintags*/0, /*maxtags*/255
330	},
331	{
332		/*
333		 * This drive doesn't like multiple LUN probing.
334		 * Verified by: Jean-Marc Zucconi <jmz@FreeBSD.ORG>
335		 */
336		{
337			T_CDROM, SIP_MEDIA_REMOVABLE, sony,
338			"CD-ROM CDU-80*", "*"
339		},
340		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
341	},
342	{
343		/*
344		 * This drive doesn't like multiple LUN probing.
345		 * Submitted by:  Parag Patel <parag@cgt.com>
346		 */
347		{
348			T_WORM, SIP_MEDIA_REMOVABLE, sony,
349			"CD-R   CDU9*", "*"
350		},
351		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
352	},
353	{
354		/*
355		 * The 8200 doesn't like multi-lun probing, and probably
356		 * don't like serial number requests either.
357		 */
358		{
359			T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
360			"EXB-8200*", "*"
361		},
362		CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
363	},
364	{
365		/* Default tagged queuing parameters for all devices */
366		{
367		  T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
368		  /*vendor*/"*", /*product*/"*", /*revision*/"*"
369		},
370		/*quirks*/0, /*mintags*/2, /*maxtags*/255
371	},
372};
373
374typedef enum {
375	DM_RET_COPY		= 0x01,
376	DM_RET_FLAG_MASK	= 0x0f,
377	DM_RET_NONE		= 0x00,
378	DM_RET_STOP		= 0x10,
379	DM_RET_DESCEND		= 0x20,
380	DM_RET_ERROR		= 0x30,
381	DM_RET_ACTION_MASK	= 0xf0
382} dev_match_ret;
383
384typedef enum {
385	XPT_DEPTH_BUS,
386	XPT_DEPTH_TARGET,
387	XPT_DEPTH_DEVICE,
388	XPT_DEPTH_PERIPH
389} xpt_traverse_depth;
390
391struct xpt_traverse_config {
392	xpt_traverse_depth	depth;
393	void			*tr_func;
394	void			*tr_arg;
395};
396
397typedef	int	xpt_busfunc_t (struct cam_eb *bus, void *arg);
398typedef	int	xpt_targetfunc_t (struct cam_et *target, void *arg);
399typedef	int	xpt_devicefunc_t (struct cam_ed *device, void *arg);
400typedef	int	xpt_periphfunc_t (struct cam_periph *periph, void *arg);
401typedef int	xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
402
403/* Transport layer configuration information */
404static struct xpt_softc xsoftc;
405
406/* Queues for our software interrupt handler */
407typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
408static cam_isrq_t cam_bioq;
409static cam_isrq_t cam_netq;
410
411/* "Pool" of inactive ccbs managed by xpt_alloc_ccb and xpt_free_ccb */
412SLIST_HEAD(,ccb_hdr) ccb_freeq;
413static u_int xpt_max_ccbs;	/*
414				 * Maximum size of ccb pool.  Modified as
415				 * devices are added/removed or have their
416				 * opening counts changed.
417				 */
418static u_int xpt_ccb_count;	/* Current count of allocated ccbs */
419
420static struct cam_periph *xpt_periph;
421
422static periph_init_t xpt_periph_init;
423
424static periph_init_t probe_periph_init;
425
426static struct periph_driver xpt_driver =
427{
428	xpt_periph_init, "xpt",
429	TAILQ_HEAD_INITIALIZER(xpt_driver.units)
430};
431
432static struct periph_driver probe_driver =
433{
434	probe_periph_init, "probe",
435	TAILQ_HEAD_INITIALIZER(probe_driver.units)
436};
437
438DATA_SET(periphdriver_set, xpt_driver);
439DATA_SET(periphdriver_set, probe_driver);
440
441#define XPT_CDEV_MAJOR 104
442
443static d_open_t xptopen;
444static d_close_t xptclose;
445static d_ioctl_t xptioctl;
446
447static struct cdevsw xpt_cdevsw =
448{
449	/*d_open*/	xptopen,
450	/*d_close*/	xptclose,
451	/*d_read*/	noread,
452	/*d_write*/	nowrite,
453	/*d_ioctl*/	xptioctl,
454	/*d_stop*/	nostop,
455	/*d_reset*/	noreset,
456	/*d_devtotty*/	nodevtotty,
457	/*d_poll*/	NULL,
458	/*d_mmap*/	nommap,
459	/*d_strategy*/	nostrategy,
460	/*d_name*/	"xpt",
461	/*d_spare*/	NULL,
462	/*d_maj*/	-1,
463	/*d_dump*/	nodump,
464	/*d_psize*/	nopsize,
465	/*d_flags*/	0,
466	/*d_maxio*/	0,
467	/*b_maj*/	-1
468};
469
470static struct intr_config_hook *xpt_config_hook;
471
472/* Registered busses */
473TAILQ_HEAD(,cam_eb) xpt_busses;
474static u_int bus_generation;
475
476/* Storage for debugging datastructures */
477#ifdef	CAMDEBUG
478struct cam_path *cam_dpath;
479u_int32_t cam_dflags;
480#endif
481
482#if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG)
483#error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS"
484#endif
485
486/*
487 * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG
488 * enabled.  Also, the user must have either none, or all of CAM_DEBUG_BUS,
489 * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified.
490 */
491#if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \
492    || defined(CAM_DEBUG_LUN)
493#ifdef CAMDEBUG
494#if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \
495    || !defined(CAM_DEBUG_LUN)
496#error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \
497        and CAM_DEBUG_LUN"
498#endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */
499#else /* !CAMDEBUG */
500#error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options"
501#endif /* CAMDEBUG */
502#endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */
503
504/* Forward declarations for private functions */
505void	xpt_init(void);
506
507static cam_status	xpt_compile_path(struct cam_path *new_path,
508					 struct cam_periph *perph,
509					 path_id_t path_id,
510					 target_id_t target_id,
511					 lun_id_t lun_id);
512
513static void		xpt_release_path(struct cam_path *path);
514
515static void		xpt_async_bcast(struct async_list *async_head,
516					u_int32_t async_code,
517					struct cam_path *path,
518					void *async_arg);
519static int 	 xptnextfreebus(path_id_t startbus);
520static int	 xptpathid(const char *sim_name, int sim_unit, int sim_bus,
521			   path_id_t *nextpath);
522static union ccb *xpt_get_ccb(struct cam_ed *device);
523static int	 xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
524				  u_int32_t new_priority);
525static void	 xpt_run_dev_allocq(struct cam_eb *bus);
526static void	 xpt_run_dev_sendq(struct cam_eb *bus);
527static timeout_t xpt_release_devq_timeout;
528static timeout_t xpt_release_simq_timeout;
529static struct cam_et*
530		 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
531static void	 xpt_release_target(struct cam_eb *bus, struct cam_et *target);
532static struct cam_ed*
533		 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target,
534				  lun_id_t lun_id);
535static void	 xpt_release_device(struct cam_eb *bus, struct cam_et *target,
536				    struct cam_ed *device);
537static u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings);
538static struct cam_eb*
539		 xpt_find_bus(path_id_t path_id);
540static struct cam_et*
541		 xpt_find_target(struct cam_eb *bus, target_id_t target_id);
542static struct cam_ed*
543		 xpt_find_device(struct cam_et *target, lun_id_t lun_id);
544static void	 xpt_scan_bus(struct cam_periph *periph, union ccb *ccb);
545static void	 xpt_scan_lun(struct cam_periph *periph,
546			      struct cam_path *path, cam_flags flags,
547			      union ccb *ccb);
548static void	 xptscandone(struct cam_periph *periph, union ccb *done_ccb);
549static xpt_busfunc_t	xptconfigbuscountfunc;
550static xpt_busfunc_t	xptconfigfunc;
551static void	 xpt_config(void *arg);
552static xpt_devicefunc_t	xptfinishconfigfunc;
553static xpt_devicefunc_t xptpassannouncefunc;
554static void	 xpt_finishconfig(struct cam_periph *periph, union ccb *ccb);
555static void	 xptaction(struct cam_sim *sim, union ccb *work_ccb);
556       void	 swi_camnet(void);
557       void	 swi_cambio(void);
558static void	 camisr(cam_isrq_t *queue);
559#if 0
560static void	 xptstart(struct cam_periph *periph, union ccb *work_ccb);
561static void	 xptasync(struct cam_periph *periph,
562			  u_int32_t code, cam_path *path);
563#endif
564static dev_match_ret	xptbusmatch(struct dev_match_pattern *patterns,
565				    int num_patterns, struct cam_eb *bus);
566static dev_match_ret	xptdevicematch(struct dev_match_pattern *patterns,
567				       int num_patterns, struct cam_ed *device);
568static dev_match_ret	xptperiphmatch(struct dev_match_pattern *patterns,
569				       int num_patterns,
570				       struct cam_periph *periph);
571static xpt_busfunc_t	xptedtbusfunc;
572static xpt_targetfunc_t	xptedttargetfunc;
573static xpt_devicefunc_t	xptedtdevicefunc;
574static xpt_periphfunc_t	xptedtperiphfunc;
575static xpt_pdrvfunc_t	xptplistpdrvfunc;
576static xpt_periphfunc_t	xptplistperiphfunc;
577static int		xptedtmatch(struct ccb_dev_match *cdm);
578static int		xptperiphlistmatch(struct ccb_dev_match *cdm);
579static int		xptbustraverse(struct cam_eb *start_bus,
580				       xpt_busfunc_t *tr_func, void *arg);
581static int		xpttargettraverse(struct cam_eb *bus,
582					  struct cam_et *start_target,
583					  xpt_targetfunc_t *tr_func, void *arg);
584static int		xptdevicetraverse(struct cam_et *target,
585					  struct cam_ed *start_device,
586					  xpt_devicefunc_t *tr_func, void *arg);
587static int		xptperiphtraverse(struct cam_ed *device,
588					  struct cam_periph *start_periph,
589					  xpt_periphfunc_t *tr_func, void *arg);
590static int		xptpdrvtraverse(struct periph_driver **start_pdrv,
591					xpt_pdrvfunc_t *tr_func, void *arg);
592static int		xptpdperiphtraverse(struct periph_driver **pdrv,
593					    struct cam_periph *start_periph,
594					    xpt_periphfunc_t *tr_func,
595					    void *arg);
596static xpt_busfunc_t	xptdefbusfunc;
597static xpt_targetfunc_t	xptdeftargetfunc;
598static xpt_devicefunc_t	xptdefdevicefunc;
599static xpt_periphfunc_t	xptdefperiphfunc;
600static int		xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg);
601static int		xpt_for_all_targets(xpt_targetfunc_t *tr_func,
602					    void *arg);
603static int		xpt_for_all_devices(xpt_devicefunc_t *tr_func,
604					    void *arg);
605static int		xpt_for_all_periphs(xpt_periphfunc_t *tr_func,
606					    void *arg);
607static xpt_devicefunc_t	xptsetasyncfunc;
608static xpt_busfunc_t	xptsetasyncbusfunc;
609static cam_status	xptregister(struct cam_periph *periph,
610				    void *arg);
611static cam_status	proberegister(struct cam_periph *periph,
612				      void *arg);
613static void	 probeschedule(struct cam_periph *probe_periph);
614static void	 probestart(struct cam_periph *periph, union ccb *start_ccb);
615static void	 probedone(struct cam_periph *periph, union ccb *done_ccb);
616static void	 probecleanup(struct cam_periph *periph);
617static void	 xpt_find_quirk(struct cam_ed *device);
618static void	 xpt_set_transfer_settings(struct ccb_trans_settings *cts,
619					   struct cam_ed *device,
620					   int async_update);
621static void	 xpt_toggle_tags(struct cam_path *path);
622static void	 xpt_start_tags(struct cam_path *path);
623static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus,
624					    struct cam_ed *dev);
625static __inline int xpt_schedule_dev_sendq(struct cam_eb *bus,
626					   struct cam_ed *dev);
627static __inline int periph_is_queued(struct cam_periph *periph);
628static __inline int device_is_alloc_queued(struct cam_ed *device);
629static __inline int device_is_send_queued(struct cam_ed *device);
630static __inline int dev_allocq_is_runnable(struct cam_devq *devq);
631
632static __inline int
633xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
634{
635	int retval;
636
637	if (dev->ccbq.devq_openings > 0) {
638		if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) {
639			cam_ccbq_resize(&dev->ccbq,
640					dev->ccbq.dev_openings
641					+ dev->ccbq.dev_active);
642			dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
643		}
644		retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue,
645					  &dev->alloc_ccb_entry.pinfo,
646					  dev->drvq.queue_array[0]->priority);
647	} else {
648		retval = 0;
649	}
650
651	return (retval);
652}
653
654static __inline int
655xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev)
656{
657	int	retval;
658
659	if (dev->ccbq.dev_openings > 0) {
660		retval = xpt_schedule_dev(&bus->sim->devq->send_queue,
661					  &dev->send_ccb_entry.pinfo,
662					  dev->ccbq.queue.queue_array[0]->priority);
663	} else {
664		retval = 0;
665	}
666	return (retval);
667}
668
669static __inline int
670periph_is_queued(struct cam_periph *periph)
671{
672	return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
673}
674
675static __inline int
676device_is_alloc_queued(struct cam_ed *device)
677{
678	return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
679}
680
681static __inline int
682device_is_send_queued(struct cam_ed *device)
683{
684	return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
685}
686
687static __inline int
688dev_allocq_is_runnable(struct cam_devq *devq)
689{
690	/*
691	 * Have work to do.
692	 * Have space to do more work.
693	 * Allowed to do work.
694	 */
695	return ((devq->alloc_queue.qfrozen_cnt == 0)
696	     && (devq->alloc_queue.entries > 0)
697	     && (devq->alloc_openings > 0));
698}
699
700static void
701xpt_periph_init()
702{
703	dev_t dev;
704
705	dev = makedev(XPT_CDEV_MAJOR, 0);
706	cdevsw_add(&dev, &xpt_cdevsw, NULL);
707}
708
709static void
710probe_periph_init()
711{
712}
713
714
715static void
716xptdone(struct cam_periph *periph, union ccb *done_ccb)
717{
718	/* Caller will release the CCB */
719	wakeup(&done_ccb->ccb_h.cbfcnp);
720}
721
722static int
723xptopen(dev_t dev, int flags, int fmt, struct proc *p)
724{
725	int unit;
726
727	unit = minor(dev) & 0xff;
728
729	/*
730	 * Only allow read-write access.
731	 */
732	if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
733		return(EPERM);
734
735	/*
736	 * We don't allow nonblocking access.
737	 */
738	if ((flags & O_NONBLOCK) != 0) {
739		printf("xpt%d: can't do nonblocking accesss\n", unit);
740		return(ENODEV);
741	}
742
743	/*
744	 * We only have one transport layer right now.  If someone accesses
745	 * us via something other than minor number 1, point out their
746	 * mistake.
747	 */
748	if (unit != 0) {
749		printf("xptopen: got invalid xpt unit %d\n", unit);
750		return(ENXIO);
751	}
752
753	/* Mark ourselves open */
754	xsoftc.flags |= XPT_FLAG_OPEN;
755
756	return(0);
757}
758
759static int
760xptclose(dev_t dev, int flag, int fmt, struct proc *p)
761{
762	int unit;
763
764	unit = minor(dev) & 0xff;
765
766	/*
767	 * We only have one transport layer right now.  If someone accesses
768	 * us via something other than minor number 1, point out their
769	 * mistake.
770	 */
771	if (unit != 0) {
772		printf("xptclose: got invalid xpt unit %d\n", unit);
773		return(ENXIO);
774	}
775
776	/* Mark ourselves closed */
777	xsoftc.flags &= ~XPT_FLAG_OPEN;
778
779	return(0);
780}
781
782static int
783xptioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
784{
785	int unit, error;
786
787	error = 0;
788	unit = minor(dev) & 0xff;
789
790	/*
791	 * We only have one transport layer right now.  If someone accesses
792	 * us via something other than minor number 1, point out their
793	 * mistake.
794	 */
795	if (unit != 0) {
796		printf("xptioctl: got invalid xpt unit %d\n", unit);
797		return(ENXIO);
798	}
799
800	switch(cmd) {
801	/*
802	 * For the transport layer CAMIOCOMMAND ioctl, we really only want
803	 * to accept CCB types that don't quite make sense to send through a
804	 * passthrough driver.
805	 */
806	case CAMIOCOMMAND: {
807		union ccb *ccb;
808		union ccb *inccb;
809
810		inccb = (union ccb *)addr;
811
812		switch(inccb->ccb_h.func_code) {
813		case XPT_SCAN_BUS:
814		case XPT_RESET_BUS:
815			if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD)
816			 || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) {
817				error = EINVAL;
818				break;
819			}
820			/* FALLTHROUGH */
821		case XPT_SCAN_LUN:
822		case XPT_ENG_INQ:  /* XXX not implemented yet */
823		case XPT_ENG_EXEC:
824
825			ccb = xpt_alloc_ccb();
826
827			/*
828			 * Create a path using the bus, target, and lun the
829			 * user passed in.
830			 */
831			if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
832					    inccb->ccb_h.path_id,
833					    inccb->ccb_h.target_id,
834					    inccb->ccb_h.target_lun) !=
835					    CAM_REQ_CMP){
836				error = EINVAL;
837				xpt_free_ccb(ccb);
838				break;
839			}
840			/* Ensure all of our fields are correct */
841			xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
842				      inccb->ccb_h.pinfo.priority);
843			xpt_merge_ccb(ccb, inccb);
844			ccb->ccb_h.cbfcnp = xptdone;
845			cam_periph_runccb(ccb, NULL, 0, 0, NULL);
846			bcopy(ccb, inccb, sizeof(union ccb));
847			xpt_free_path(ccb->ccb_h.path);
848			xpt_free_ccb(ccb);
849			break;
850
851		case XPT_DEBUG: {
852			union ccb ccb;
853
854			/*
855			 * This is an immedaite CCB, so it's okay to
856			 * allocate it on the stack.
857			 */
858
859			/*
860			 * Create a path using the bus, target, and lun the
861			 * user passed in.
862			 */
863			if (xpt_create_path(&ccb.ccb_h.path, xpt_periph,
864					    inccb->ccb_h.path_id,
865					    inccb->ccb_h.target_id,
866					    inccb->ccb_h.target_lun) !=
867					    CAM_REQ_CMP){
868				error = EINVAL;
869				break;
870			}
871			/* Ensure all of our fields are correct */
872			xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
873				      inccb->ccb_h.pinfo.priority);
874			xpt_merge_ccb(&ccb, inccb);
875			ccb.ccb_h.cbfcnp = xptdone;
876			xpt_action(&ccb);
877			bcopy(&ccb, inccb, sizeof(union ccb));
878			xpt_free_path(ccb.ccb_h.path);
879			break;
880
881		}
882		case XPT_DEV_MATCH: {
883			struct cam_periph_map_info mapinfo;
884
885			/*
886			 * We can't deal with physical addresses for this
887			 * type of transaction.
888			 */
889			if (inccb->ccb_h.flags & CAM_DATA_PHYS) {
890				error = EINVAL;
891				break;
892			}
893			bzero(&mapinfo, sizeof(mapinfo));
894
895			/*
896			 * Map the pattern and match buffers into kernel
897			 * virtual address space.
898			 */
899			error = cam_periph_mapmem(inccb, &mapinfo);
900
901			if (error)
902				break;
903
904			/*
905			 * This is an immediate CCB, we can send it on directly.
906			 */
907			xpt_action(inccb);
908
909			/*
910			 * Map the buffers back into user space.
911			 */
912			cam_periph_unmapmem(inccb, &mapinfo);
913
914			error = 0;
915			break;
916		}
917		default:
918			error = EINVAL;
919			break;
920		}
921		break;
922	}
923	/*
924	 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
925	 * with the periphal driver name and unit name filled in.  The other
926	 * fields don't really matter as input.  The passthrough driver name
927	 * ("pass"), and unit number are passed back in the ccb.  The current
928	 * device generation number, and the index into the device peripheral
929	 * driver list, and the status are also passed back.  Note that
930	 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
931	 * we never return a status of CAM_GDEVLIST_LIST_CHANGED.  It is
932	 * (or rather should be) impossible for the device peripheral driver
933	 * list to change since we look at the whole thing in one pass, and
934	 * we do it with splsoftcam protection.
935	 *
936	 */
937	case CAMGETPASSTHRU: {
938		union ccb *ccb;
939		struct cam_periph *periph;
940		struct periph_driver **p_drv;
941		char   *name;
942		int unit;
943		int cur_generation;
944		int splbreaknum;
945		int s;
946		int i;
947
948		ccb = (union ccb *)addr;
949		unit = ccb->cgdl.unit_number;
950		name = ccb->cgdl.periph_name;
951		/*
952		 * Every 100 devices, we want to drop our spl protection to
953		 * give the software interrupt handler a chance to run.
954		 * Most systems won't run into this check, but this should
955		 * avoid starvation in the software interrupt handler in
956		 * large systems.
957		 */
958		splbreaknum = 100;
959
960		ccb = (union ccb *)addr;
961
962		/*
963		 * Sanity check -- make sure we don't get a null peripheral
964		 * driver name.
965		 */
966		if (*ccb->cgdl.periph_name == '\0') {
967			error = EINVAL;
968			break;
969		}
970
971		/* Keep the list from changing while we traverse it */
972		s = splsoftcam();
973ptstartover:
974		cur_generation = xsoftc.generation;
975
976		/* first find our driver in the list of drivers */
977		for (p_drv = (struct periph_driver **)periphdriver_set.ls_items;
978		     *p_drv != NULL; p_drv++)
979			if (strcmp((*p_drv)->driver_name, name) == 0)
980				break;
981
982		if (*p_drv == NULL) {
983			splx(s);
984			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
985			ccb->cgdl.status = CAM_GDEVLIST_ERROR;
986			*ccb->cgdl.periph_name = '\0';
987			ccb->cgdl.unit_number = 0;
988			error = ENOENT;
989			break;
990		}
991
992		/*
993		 * Run through every peripheral instance of this driver
994		 * and check to see whether it matches the unit passed
995		 * in by the user.  If it does, get out of the loops and
996		 * find the passthrough driver associated with that
997		 * peripheral driver.
998		 */
999		for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
1000		     periph = TAILQ_NEXT(periph, unit_links)) {
1001
1002			if (periph->unit_number == unit) {
1003				break;
1004			} else if (--splbreaknum == 0) {
1005				splx(s);
1006				s = splsoftcam();
1007				splbreaknum = 100;
1008				if (cur_generation != xsoftc.generation)
1009				       goto ptstartover;
1010			}
1011		}
1012		/*
1013		 * If we found the peripheral driver that the user passed
1014		 * in, go through all of the peripheral drivers for that
1015		 * particular device and look for a passthrough driver.
1016		 */
1017		if (periph != NULL) {
1018			struct cam_ed *device;
1019			int i;
1020
1021			device = periph->path->device;
1022			for (i = 0, periph = device->periphs.slh_first;
1023			     periph != NULL;
1024			     periph = periph->periph_links.sle_next, i++) {
1025				/*
1026				 * Check to see whether we have a
1027				 * passthrough device or not.
1028				 */
1029				if (strcmp(periph->periph_name, "pass") == 0) {
1030					/*
1031					 * Fill in the getdevlist fields.
1032					 */
1033					strcpy(ccb->cgdl.periph_name,
1034					       periph->periph_name);
1035					ccb->cgdl.unit_number =
1036						periph->unit_number;
1037					if (periph->periph_links.sle_next)
1038						ccb->cgdl.status =
1039							CAM_GDEVLIST_MORE_DEVS;
1040					else
1041						ccb->cgdl.status =
1042						       CAM_GDEVLIST_LAST_DEVICE;
1043					ccb->cgdl.generation =
1044						device->generation;
1045					ccb->cgdl.index = i;
1046					/*
1047					 * Fill in some CCB header fields
1048					 * that the user may want.
1049					 */
1050					ccb->ccb_h.path_id =
1051						periph->path->bus->path_id;
1052					ccb->ccb_h.target_id =
1053						periph->path->target->target_id;
1054					ccb->ccb_h.target_lun =
1055						periph->path->device->lun_id;
1056					ccb->ccb_h.status = CAM_REQ_CMP;
1057					break;
1058				}
1059			}
1060		}
1061
1062		/*
1063		 * If the periph is null here, one of two things has
1064		 * happened.  The first possibility is that we couldn't
1065		 * find the unit number of the particular peripheral driver
1066		 * that the user is asking about.  e.g. the user asks for
1067		 * the passthrough driver for "da11".  We find the list of
1068		 * "da" peripherals all right, but there is no unit 11.
1069		 * The other possibility is that we went through the list
1070		 * of peripheral drivers attached to the device structure,
1071		 * but didn't find one with the name "pass".  Either way,
1072		 * we return ENOENT, since we couldn't find something.
1073		 */
1074		if (periph == NULL) {
1075			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1076			ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1077			*ccb->cgdl.periph_name = '\0';
1078			ccb->cgdl.unit_number = 0;
1079			error = ENOENT;
1080		}
1081		splx(s);
1082		break;
1083		}
1084	default:
1085		error = ENOTTY;
1086		break;
1087	}
1088
1089	return(error);
1090}
1091
1092/* Functions accessed by the peripheral drivers */
1093void
1094xpt_init()
1095{
1096	struct cam_sim *xpt_sim;
1097	struct cam_path *path;
1098	struct cam_devq;
1099	cam_status status;
1100
1101	TAILQ_INIT(&xpt_busses);
1102	TAILQ_INIT(&cam_bioq);
1103	TAILQ_INIT(&cam_netq);
1104	SLIST_INIT(&ccb_freeq);
1105	STAILQ_INIT(&highpowerq);
1106
1107	/*
1108	 * The xpt layer is, itself, the equivelent of a SIM.
1109	 * Allow 16 ccbs in the ccb pool for it.  This should
1110	 * give decent parallelism when we probe busses and
1111	 * perform other XPT functions.
1112	 */
1113	xpt_sim = (struct cam_sim *)malloc(sizeof(*xpt_sim),
1114					   M_DEVBUF, M_WAITOK);
1115	xpt_sim->sim_action = xptaction;
1116	xpt_sim->sim_name = "xpt";
1117	xpt_sim->path_id = CAM_XPT_PATH_ID;
1118	xpt_sim->bus_id = 0;
1119	xpt_sim->max_tagged_dev_openings = 0;
1120	xpt_sim->max_dev_openings = 0;
1121	xpt_sim->devq = cam_simq_alloc(16);
1122	xpt_max_ccbs = 16;
1123
1124	xpt_bus_register(xpt_sim, 0);
1125
1126	/*
1127	 * Looking at the XPT from the SIM layer, the XPT is
1128	 * the equivelent of a peripheral driver.  Allocate
1129	 * a peripheral driver entry for us.
1130	 */
1131	if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
1132				      CAM_TARGET_WILDCARD,
1133				      CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
1134		printf("xpt_init: xpt_create_path failed with status %#x,"
1135		       " failing attach\n", status);
1136		return;
1137	}
1138
1139	cam_periph_alloc(xptregister, NULL, NULL, "xpt", CAM_PERIPH_BIO,
1140			 path, NULL, 0, NULL);
1141	xpt_free_path(path);
1142
1143	xpt_sim->softc = xpt_periph;
1144
1145	/*
1146	 * Register a callback for when interrupts are enabled.
1147	 */
1148	xpt_config_hook =
1149	    (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook),
1150					      M_TEMP, M_NOWAIT);
1151	if (xpt_config_hook == NULL) {
1152		printf("xpt_init: Cannot malloc config hook "
1153		       "- failing attach\n");
1154		return;
1155	}
1156	bzero(xpt_config_hook, sizeof(*xpt_config_hook));
1157
1158	xpt_config_hook->ich_func = xpt_config;
1159	if (config_intrhook_establish(xpt_config_hook) != 0) {
1160		free (xpt_config_hook, M_TEMP);
1161		printf("xpt_init: config_intrhook_establish failed "
1162		       "- failing attach\n");
1163	}
1164
1165	/* Install our software interrupt handlers */
1166	/* XXX Should call some MI function to do this */
1167#ifdef __i386__
1168	ihandlers[SWI_CAMNET] = swi_camnet;
1169	ihandlers[SWI_CAMBIO] = swi_cambio;
1170#endif
1171}
1172
1173static cam_status
1174xptregister(struct cam_periph *periph, void *arg)
1175{
1176	if (periph == NULL) {
1177		printf("xptregister: periph was NULL!!\n");
1178		return(CAM_REQ_CMP_ERR);
1179	}
1180
1181	periph->softc = NULL;
1182
1183	xpt_periph = periph;
1184
1185	return(CAM_REQ_CMP);
1186}
1187
1188int32_t
1189xpt_add_periph(struct cam_periph *periph)
1190{
1191	struct cam_ed *device;
1192	int32_t	 status;
1193	struct periph_list *periph_head;
1194
1195	device = periph->path->device;
1196
1197	periph_head = &device->periphs;
1198
1199	status = CAM_REQ_CMP;
1200
1201	if (device != NULL) {
1202		int s;
1203
1204		/*
1205		 * Make room for this peripheral
1206		 * so it will fit in the queue
1207		 * when it's scheduled to run
1208		 */
1209		s = splsoftcam();
1210		status = camq_resize(&device->drvq,
1211				     device->drvq.array_size + 1);
1212
1213		device->generation++;
1214
1215		SLIST_INSERT_HEAD(periph_head, periph, periph_links);
1216
1217		splx(s);
1218	}
1219
1220	xsoftc.generation++;
1221
1222	return (status);
1223}
1224
1225void
1226xpt_remove_periph(struct cam_periph *periph)
1227{
1228	struct cam_ed *device;
1229
1230	device = periph->path->device;
1231
1232	if (device != NULL) {
1233		int s;
1234		struct periph_list *periph_head;
1235
1236		periph_head = &device->periphs;
1237
1238		/* Release the slot for this peripheral */
1239		s = splsoftcam();
1240		camq_resize(&device->drvq, device->drvq.array_size - 1);
1241
1242		device->generation++;
1243
1244		SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
1245
1246		splx(s);
1247	}
1248
1249	xsoftc.generation++;
1250
1251}
1252
1253void
1254xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1255{
1256	int s;
1257	u_int mb;
1258	struct cam_path *path;
1259	struct ccb_trans_settings cts;
1260
1261	path = periph->path;
1262	/*
1263	 * To ensure that this is printed in one piece,
1264	 * mask out CAM interrupts.
1265	 */
1266	s = splsoftcam();
1267	printf("%s%d at %s%d bus %d target %d lun %d\n",
1268	       periph->periph_name, periph->unit_number,
1269	       path->bus->sim->sim_name,
1270	       path->bus->sim->unit_number,
1271	       path->bus->sim->bus_id,
1272	       path->target->target_id,
1273	       path->device->lun_id);
1274	printf("%s%d: ", periph->periph_name, periph->unit_number);
1275	scsi_print_inquiry(&path->device->inq_data);
1276	if ((bootverbose)
1277	 && (path->device->serial_num_len > 0)) {
1278		/* Don't wrap the screen  - print only the first 60 chars */
1279		printf("%s%d: Serial Number %.60s\n", periph->periph_name,
1280		       periph->unit_number, path->device->serial_num);
1281	}
1282	xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
1283	cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
1284	cts.flags = CCB_TRANS_CURRENT_SETTINGS;
1285	xpt_action((union ccb*)&cts);
1286	if (cts.ccb_h.status == CAM_REQ_CMP) {
1287		u_int speed;
1288		u_int freq;
1289
1290		if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1291		  && cts.sync_offset != 0) {
1292			freq = scsi_calc_syncsrate(cts.sync_period);
1293			speed = freq;
1294		} else {
1295			freq = 0;
1296			speed = path->bus->sim->base_transfer_speed;
1297		}
1298		if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0)
1299			speed *= (0x01 << cts.bus_width);
1300		mb = speed / 1000;
1301		if (mb > 0)
1302			printf("%s%d: %d.%dMB/s transfers", periph->periph_name,
1303			       periph->unit_number, mb, speed % 1000);
1304		else
1305			printf("%s%d: %dKB/s transfers", periph->periph_name,
1306			       periph->unit_number, (speed % 1000) * 1000);
1307		if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1308		 && cts.sync_offset != 0) {
1309			printf(" (%d.%dMHz, offset %d", freq / 1000,
1310			       freq % 1000, cts.sync_offset);
1311		}
1312		if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0
1313		 && cts.bus_width > 0) {
1314			if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1315			 && cts.sync_offset != 0) {
1316				printf(", ");
1317			} else {
1318				printf(" (");
1319			}
1320			printf("%dbit)", 8 * (0x01 << cts.bus_width));
1321		} else if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1322			&& cts.sync_offset != 0) {
1323			printf(")");
1324		}
1325
1326		if (path->device->inq_flags & SID_CmdQue
1327		 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1328			printf(", Tagged Queueing Enabled");
1329		}
1330
1331		printf("\n");
1332	} else if (path->device->inq_flags & SID_CmdQue
1333   		|| path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1334		printf("%s%d: Tagged Queueing Enabled\n",
1335		       periph->periph_name, periph->unit_number);
1336	}
1337
1338	/*
1339	 * We only want to print the caller's announce string if they've
1340	 * passed one in..
1341	 */
1342	if (announce_string != NULL)
1343		printf("%s%d: %s\n", periph->periph_name,
1344		       periph->unit_number, announce_string);
1345	splx(s);
1346}
1347
1348
1349static dev_match_ret
1350xptbusmatch(struct dev_match_pattern *patterns, int num_patterns,
1351	    struct cam_eb *bus)
1352{
1353	dev_match_ret retval;
1354	int i;
1355
1356	retval = DM_RET_NONE;
1357
1358	/*
1359	 * If we aren't given something to match against, that's an error.
1360	 */
1361	if (bus == NULL)
1362		return(DM_RET_ERROR);
1363
1364	/*
1365	 * If there are no match entries, then this bus matches no
1366	 * matter what.
1367	 */
1368	if ((patterns == NULL) || (num_patterns == 0))
1369		return(DM_RET_DESCEND | DM_RET_COPY);
1370
1371	for (i = 0; i < num_patterns; i++) {
1372		struct bus_match_pattern *cur_pattern;
1373
1374		/*
1375		 * If the pattern in question isn't for a bus node, we
1376		 * aren't interested.  However, we do indicate to the
1377		 * calling routine that we should continue descending the
1378		 * tree, since the user wants to match against lower-level
1379		 * EDT elements.
1380		 */
1381		if (patterns[i].type != DEV_MATCH_BUS) {
1382			if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1383				retval |= DM_RET_DESCEND;
1384			continue;
1385		}
1386
1387		cur_pattern = &patterns[i].pattern.bus_pattern;
1388
1389		/*
1390		 * If they want to match any bus node, we give them any
1391		 * device node.
1392		 */
1393		if (cur_pattern->flags == BUS_MATCH_ANY) {
1394			/* set the copy flag */
1395			retval |= DM_RET_COPY;
1396
1397			/*
1398			 * If we've already decided on an action, go ahead
1399			 * and return.
1400			 */
1401			if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1402				return(retval);
1403		}
1404
1405		/*
1406		 * Not sure why someone would do this...
1407		 */
1408		if (cur_pattern->flags == BUS_MATCH_NONE)
1409			continue;
1410
1411		if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
1412		 && (cur_pattern->path_id != bus->path_id))
1413			continue;
1414
1415		if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
1416		 && (cur_pattern->bus_id != bus->sim->bus_id))
1417			continue;
1418
1419		if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
1420		 && (cur_pattern->unit_number != bus->sim->unit_number))
1421			continue;
1422
1423		if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
1424		 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
1425			     DEV_IDLEN) != 0))
1426			continue;
1427
1428		/*
1429		 * If we get to this point, the user definitely wants
1430		 * information on this bus.  So tell the caller to copy the
1431		 * data out.
1432		 */
1433		retval |= DM_RET_COPY;
1434
1435		/*
1436		 * If the return action has been set to descend, then we
1437		 * know that we've already seen a non-bus matching
1438		 * expression, therefore we need to further descend the tree.
1439		 * This won't change by continuing around the loop, so we
1440		 * go ahead and return.  If we haven't seen a non-bus
1441		 * matching expression, we keep going around the loop until
1442		 * we exhaust the matching expressions.  We'll set the stop
1443		 * flag once we fall out of the loop.
1444		 */
1445		if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1446			return(retval);
1447	}
1448
1449	/*
1450	 * If the return action hasn't been set to descend yet, that means
1451	 * we haven't seen anything other than bus matching patterns.  So
1452	 * tell the caller to stop descending the tree -- the user doesn't
1453	 * want to match against lower level tree elements.
1454	 */
1455	if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1456		retval |= DM_RET_STOP;
1457
1458	return(retval);
1459}
1460
1461static dev_match_ret
1462xptdevicematch(struct dev_match_pattern *patterns, int num_patterns,
1463	       struct cam_ed *device)
1464{
1465	dev_match_ret retval;
1466	int i;
1467
1468	retval = DM_RET_NONE;
1469
1470	/*
1471	 * If we aren't given something to match against, that's an error.
1472	 */
1473	if (device == NULL)
1474		return(DM_RET_ERROR);
1475
1476	/*
1477	 * If there are no match entries, then this device matches no
1478	 * matter what.
1479	 */
1480	if ((patterns == NULL) || (patterns == 0))
1481		return(DM_RET_DESCEND | DM_RET_COPY);
1482
1483	for (i = 0; i < num_patterns; i++) {
1484		struct device_match_pattern *cur_pattern;
1485
1486		/*
1487		 * If the pattern in question isn't for a device node, we
1488		 * aren't interested.
1489		 */
1490		if (patterns[i].type != DEV_MATCH_DEVICE) {
1491			if ((patterns[i].type == DEV_MATCH_PERIPH)
1492			 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
1493				retval |= DM_RET_DESCEND;
1494			continue;
1495		}
1496
1497		cur_pattern = &patterns[i].pattern.device_pattern;
1498
1499		/*
1500		 * If they want to match any device node, we give them any
1501		 * device node.
1502		 */
1503		if (cur_pattern->flags == DEV_MATCH_ANY) {
1504			/* set the copy flag */
1505			retval |= DM_RET_COPY;
1506
1507
1508			/*
1509			 * If we've already decided on an action, go ahead
1510			 * and return.
1511			 */
1512			if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1513				return(retval);
1514		}
1515
1516		/*
1517		 * Not sure why someone would do this...
1518		 */
1519		if (cur_pattern->flags == DEV_MATCH_NONE)
1520			continue;
1521
1522		if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
1523		 && (cur_pattern->path_id != device->target->bus->path_id))
1524			continue;
1525
1526		if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
1527		 && (cur_pattern->target_id != device->target->target_id))
1528			continue;
1529
1530		if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
1531		 && (cur_pattern->target_lun != device->lun_id))
1532			continue;
1533
1534		if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
1535		 && (cam_quirkmatch((caddr_t)&device->inq_data,
1536				    (caddr_t)&cur_pattern->inq_pat,
1537				    1, sizeof(cur_pattern->inq_pat),
1538				    scsi_static_inquiry_match) == NULL))
1539			continue;
1540
1541		/*
1542		 * If we get to this point, the user definitely wants
1543		 * information on this device.  So tell the caller to copy
1544		 * the data out.
1545		 */
1546		retval |= DM_RET_COPY;
1547
1548		/*
1549		 * If the return action has been set to descend, then we
1550		 * know that we've already seen a peripheral matching
1551		 * expression, therefore we need to further descend the tree.
1552		 * This won't change by continuing around the loop, so we
1553		 * go ahead and return.  If we haven't seen a peripheral
1554		 * matching expression, we keep going around the loop until
1555		 * we exhaust the matching expressions.  We'll set the stop
1556		 * flag once we fall out of the loop.
1557		 */
1558		if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1559			return(retval);
1560	}
1561
1562	/*
1563	 * If the return action hasn't been set to descend yet, that means
1564	 * we haven't seen any peripheral matching patterns.  So tell the
1565	 * caller to stop descending the tree -- the user doesn't want to
1566	 * match against lower level tree elements.
1567	 */
1568	if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1569		retval |= DM_RET_STOP;
1570
1571	return(retval);
1572}
1573
1574/*
1575 * Match a single peripheral against any number of match patterns.
1576 */
1577static dev_match_ret
1578xptperiphmatch(struct dev_match_pattern *patterns, int num_patterns,
1579	       struct cam_periph *periph)
1580{
1581	dev_match_ret retval;
1582	int i;
1583
1584	/*
1585	 * If we aren't given something to match against, that's an error.
1586	 */
1587	if (periph == NULL)
1588		return(DM_RET_ERROR);
1589
1590	/*
1591	 * If there are no match entries, then this peripheral matches no
1592	 * matter what.
1593	 */
1594	if ((patterns == NULL) || (num_patterns == 0))
1595		return(DM_RET_STOP | DM_RET_COPY);
1596
1597	/*
1598	 * There aren't any nodes below a peripheral node, so there's no
1599	 * reason to descend the tree any further.
1600	 */
1601	retval = DM_RET_STOP;
1602
1603	for (i = 0; i < num_patterns; i++) {
1604		struct periph_match_pattern *cur_pattern;
1605
1606		/*
1607		 * If the pattern in question isn't for a peripheral, we
1608		 * aren't interested.
1609		 */
1610		if (patterns[i].type != DEV_MATCH_PERIPH)
1611			continue;
1612
1613		cur_pattern = &patterns[i].pattern.periph_pattern;
1614
1615		/*
1616		 * If they want to match on anything, then we will do so.
1617		 */
1618		if (cur_pattern->flags == PERIPH_MATCH_ANY) {
1619			/* set the copy flag */
1620			retval |= DM_RET_COPY;
1621
1622			/*
1623			 * We've already set the return action to stop,
1624			 * since there are no nodes below peripherals in
1625			 * the tree.
1626			 */
1627			return(retval);
1628		}
1629
1630		/*
1631		 * Not sure why someone would do this...
1632		 */
1633		if (cur_pattern->flags == PERIPH_MATCH_NONE)
1634			continue;
1635
1636		if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
1637		 && (cur_pattern->path_id != periph->path->bus->path_id))
1638			continue;
1639
1640		/*
1641		 * For the target and lun id's, we have to make sure the
1642		 * target and lun pointers aren't NULL.  The xpt peripheral
1643		 * has a wildcard target and device.
1644		 */
1645		if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
1646		 && ((periph->path->target == NULL)
1647		 ||(cur_pattern->target_id != periph->path->target->target_id)))
1648			continue;
1649
1650		if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
1651		 && ((periph->path->device == NULL)
1652		 || (cur_pattern->target_lun != periph->path->device->lun_id)))
1653			continue;
1654
1655		if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
1656		 && (cur_pattern->unit_number != periph->unit_number))
1657			continue;
1658
1659		if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
1660		 && (strncmp(cur_pattern->periph_name, periph->periph_name,
1661			     DEV_IDLEN) != 0))
1662			continue;
1663
1664		/*
1665		 * If we get to this point, the user definitely wants
1666		 * information on this peripheral.  So tell the caller to
1667		 * copy the data out.
1668		 */
1669		retval |= DM_RET_COPY;
1670
1671		/*
1672		 * The return action has already been set to stop, since
1673		 * peripherals don't have any nodes below them in the EDT.
1674		 */
1675		return(retval);
1676	}
1677
1678	/*
1679	 * If we get to this point, the peripheral that was passed in
1680	 * doesn't match any of the patterns.
1681	 */
1682	return(retval);
1683}
1684
1685static int
1686xptedtbusfunc(struct cam_eb *bus, void *arg)
1687{
1688	struct ccb_dev_match *cdm;
1689	dev_match_ret retval;
1690
1691	cdm = (struct ccb_dev_match *)arg;
1692
1693	/*
1694	 * If our position is for something deeper in the tree, that means
1695	 * that we've already seen this node.  So, we keep going down.
1696	 */
1697	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1698	 && (cdm->pos.cookie.bus == bus)
1699	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1700	 && (cdm->pos.cookie.target != NULL))
1701		retval = DM_RET_DESCEND;
1702	else
1703		retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
1704
1705	/*
1706	 * If we got an error, bail out of the search.
1707	 */
1708	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1709		cdm->status = CAM_DEV_MATCH_ERROR;
1710		return(0);
1711	}
1712
1713	/*
1714	 * If the copy flag is set, copy this bus out.
1715	 */
1716	if (retval & DM_RET_COPY) {
1717		int spaceleft, j;
1718
1719		spaceleft = cdm->match_buf_len - (cdm->num_matches *
1720			sizeof(struct dev_match_result));
1721
1722		/*
1723		 * If we don't have enough space to put in another
1724		 * match result, save our position and tell the
1725		 * user there are more devices to check.
1726		 */
1727		if (spaceleft < sizeof(struct dev_match_result)) {
1728			bzero(&cdm->pos, sizeof(cdm->pos));
1729			cdm->pos.position_type =
1730				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
1731
1732			cdm->pos.cookie.bus = bus;
1733			cdm->pos.generations[CAM_BUS_GENERATION]=
1734				bus_generation;
1735			cdm->status = CAM_DEV_MATCH_MORE;
1736			return(0);
1737		}
1738		j = cdm->num_matches;
1739		cdm->num_matches++;
1740		cdm->matches[j].type = DEV_MATCH_BUS;
1741		cdm->matches[j].result.bus_result.path_id = bus->path_id;
1742		cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
1743		cdm->matches[j].result.bus_result.unit_number =
1744			bus->sim->unit_number;
1745		strncpy(cdm->matches[j].result.bus_result.dev_name,
1746			bus->sim->sim_name, DEV_IDLEN);
1747	}
1748
1749	/*
1750	 * If the user is only interested in busses, there's no
1751	 * reason to descend to the next level in the tree.
1752	 */
1753	if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
1754		return(1);
1755
1756	/*
1757	 * If there is a target generation recorded, check it to
1758	 * make sure the target list hasn't changed.
1759	 */
1760	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1761	 && (bus == cdm->pos.cookie.bus)
1762	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1763	 && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
1764	 && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
1765	     bus->generation)) {
1766		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1767		return(0);
1768	}
1769
1770	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1771	 && (cdm->pos.cookie.bus == bus)
1772	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1773	 && (cdm->pos.cookie.target != NULL))
1774		return(xpttargettraverse(bus,
1775					(struct cam_et *)cdm->pos.cookie.target,
1776					 xptedttargetfunc, arg));
1777	else
1778		return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
1779}
1780
1781static int
1782xptedttargetfunc(struct cam_et *target, void *arg)
1783{
1784	struct ccb_dev_match *cdm;
1785
1786	cdm = (struct ccb_dev_match *)arg;
1787
1788	/*
1789	 * If there is a device list generation recorded, check it to
1790	 * make sure the device list hasn't changed.
1791	 */
1792	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1793	 && (cdm->pos.cookie.bus == target->bus)
1794	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1795	 && (cdm->pos.cookie.target == target)
1796	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1797	 && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
1798	 && (cdm->pos.generations[CAM_DEV_GENERATION] !=
1799	     target->generation)) {
1800		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1801		return(0);
1802	}
1803
1804	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1805	 && (cdm->pos.cookie.bus == target->bus)
1806	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1807	 && (cdm->pos.cookie.target == target)
1808	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1809	 && (cdm->pos.cookie.device != NULL))
1810		return(xptdevicetraverse(target,
1811					(struct cam_ed *)cdm->pos.cookie.device,
1812					 xptedtdevicefunc, arg));
1813	else
1814		return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
1815}
1816
1817static int
1818xptedtdevicefunc(struct cam_ed *device, void *arg)
1819{
1820
1821	struct ccb_dev_match *cdm;
1822	dev_match_ret retval;
1823	u_int dev_gen;
1824
1825	cdm = (struct ccb_dev_match *)arg;
1826
1827	/*
1828	 * If our position is for something deeper in the tree, that means
1829	 * that we've already seen this node.  So, we keep going down.
1830	 */
1831	if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1832	 && (cdm->pos.cookie.device == device)
1833	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1834	 && (cdm->pos.cookie.periph != NULL))
1835		retval = DM_RET_DESCEND;
1836	else
1837		retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
1838					device);
1839
1840	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1841		cdm->status = CAM_DEV_MATCH_ERROR;
1842		return(0);
1843	}
1844
1845	/*
1846	 * If the copy flag is set, copy this device out.
1847	 */
1848	if (retval & DM_RET_COPY) {
1849		int spaceleft, j;
1850
1851		spaceleft = cdm->match_buf_len - (cdm->num_matches *
1852			sizeof(struct dev_match_result));
1853
1854		/*
1855		 * If we don't have enough space to put in another
1856		 * match result, save our position and tell the
1857		 * user there are more devices to check.
1858		 */
1859		if (spaceleft < sizeof(struct dev_match_result)) {
1860			bzero(&cdm->pos, sizeof(cdm->pos));
1861			cdm->pos.position_type =
1862				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
1863				CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
1864
1865			cdm->pos.cookie.bus = device->target->bus;
1866			cdm->pos.generations[CAM_BUS_GENERATION]=
1867				bus_generation;
1868			cdm->pos.cookie.target = device->target;
1869			cdm->pos.generations[CAM_TARGET_GENERATION] =
1870				device->target->bus->generation;
1871			cdm->pos.cookie.device = device;
1872			cdm->pos.generations[CAM_DEV_GENERATION] =
1873				device->target->generation;
1874			cdm->status = CAM_DEV_MATCH_MORE;
1875			return(0);
1876		}
1877		j = cdm->num_matches;
1878		cdm->num_matches++;
1879		cdm->matches[j].type = DEV_MATCH_DEVICE;
1880		cdm->matches[j].result.device_result.path_id =
1881			device->target->bus->path_id;
1882		cdm->matches[j].result.device_result.target_id =
1883			device->target->target_id;
1884		cdm->matches[j].result.device_result.target_lun =
1885			device->lun_id;
1886		bcopy(&device->inq_data,
1887		      &cdm->matches[j].result.device_result.inq_data,
1888		      sizeof(struct scsi_inquiry_data));
1889	}
1890
1891	/*
1892	 * If the user isn't interested in peripherals, don't descend
1893	 * the tree any further.
1894	 */
1895	if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
1896		return(1);
1897
1898	/*
1899	 * If there is a peripheral list generation recorded, make sure
1900	 * it hasn't changed.
1901	 */
1902	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1903	 && (device->target->bus == cdm->pos.cookie.bus)
1904	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1905	 && (device->target == cdm->pos.cookie.target)
1906	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1907	 && (device == cdm->pos.cookie.device)
1908	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1909	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
1910	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
1911	     device->generation)){
1912		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1913		return(0);
1914	}
1915
1916	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1917	 && (cdm->pos.cookie.bus == device->target->bus)
1918	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1919	 && (cdm->pos.cookie.target == device->target)
1920	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1921	 && (cdm->pos.cookie.device == device)
1922	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1923	 && (cdm->pos.cookie.periph != NULL))
1924		return(xptperiphtraverse(device,
1925				(struct cam_periph *)cdm->pos.cookie.periph,
1926				xptedtperiphfunc, arg));
1927	else
1928		return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
1929}
1930
1931static int
1932xptedtperiphfunc(struct cam_periph *periph, void *arg)
1933{
1934	struct ccb_dev_match *cdm;
1935	dev_match_ret retval;
1936
1937	cdm = (struct ccb_dev_match *)arg;
1938
1939	retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
1940
1941	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1942		cdm->status = CAM_DEV_MATCH_ERROR;
1943		return(0);
1944	}
1945
1946	/*
1947	 * If the copy flag is set, copy this peripheral out.
1948	 */
1949	if (retval & DM_RET_COPY) {
1950		int spaceleft, j;
1951
1952		spaceleft = cdm->match_buf_len - (cdm->num_matches *
1953			sizeof(struct dev_match_result));
1954
1955		/*
1956		 * If we don't have enough space to put in another
1957		 * match result, save our position and tell the
1958		 * user there are more devices to check.
1959		 */
1960		if (spaceleft < sizeof(struct dev_match_result)) {
1961			bzero(&cdm->pos, sizeof(cdm->pos));
1962			cdm->pos.position_type =
1963				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
1964				CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
1965				CAM_DEV_POS_PERIPH;
1966
1967			cdm->pos.cookie.bus = periph->path->bus;
1968			cdm->pos.generations[CAM_BUS_GENERATION]=
1969				bus_generation;
1970			cdm->pos.cookie.target = periph->path->target;
1971			cdm->pos.generations[CAM_TARGET_GENERATION] =
1972				periph->path->bus->generation;
1973			cdm->pos.cookie.device = periph->path->device;
1974			cdm->pos.generations[CAM_DEV_GENERATION] =
1975				periph->path->target->generation;
1976			cdm->pos.cookie.periph = periph;
1977			cdm->pos.generations[CAM_PERIPH_GENERATION] =
1978				periph->path->device->generation;
1979			cdm->status = CAM_DEV_MATCH_MORE;
1980			return(0);
1981		}
1982
1983		j = cdm->num_matches;
1984		cdm->num_matches++;
1985		cdm->matches[j].type = DEV_MATCH_PERIPH;
1986		cdm->matches[j].result.periph_result.path_id =
1987			periph->path->bus->path_id;
1988		cdm->matches[j].result.periph_result.target_id =
1989			periph->path->target->target_id;
1990		cdm->matches[j].result.periph_result.target_lun =
1991			periph->path->device->lun_id;
1992		cdm->matches[j].result.periph_result.unit_number =
1993			periph->unit_number;
1994		strncpy(cdm->matches[j].result.periph_result.periph_name,
1995			periph->periph_name, DEV_IDLEN);
1996	}
1997
1998	return(1);
1999}
2000
2001static int
2002xptedtmatch(struct ccb_dev_match *cdm)
2003{
2004	int ret;
2005
2006	cdm->num_matches = 0;
2007
2008	/*
2009	 * Check the bus list generation.  If it has changed, the user
2010	 * needs to reset everything and start over.
2011	 */
2012	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2013	 && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
2014	 && (cdm->pos.generations[CAM_BUS_GENERATION] != bus_generation)) {
2015		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2016		return(0);
2017	}
2018
2019	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2020	 && (cdm->pos.cookie.bus != NULL))
2021		ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
2022				     xptedtbusfunc, cdm);
2023	else
2024		ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
2025
2026	/*
2027	 * If we get back 0, that means that we had to stop before fully
2028	 * traversing the EDT.  It also means that one of the subroutines
2029	 * has set the status field to the proper value.  If we get back 1,
2030	 * we've fully traversed the EDT and copied out any matching entries.
2031	 */
2032	if (ret == 1)
2033		cdm->status = CAM_DEV_MATCH_LAST;
2034
2035	return(ret);
2036}
2037
2038static int
2039xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
2040{
2041	struct ccb_dev_match *cdm;
2042
2043	cdm = (struct ccb_dev_match *)arg;
2044
2045	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2046	 && (cdm->pos.cookie.pdrv == pdrv)
2047	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2048	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2049	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2050	     (*pdrv)->generation)) {
2051		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2052		return(0);
2053	}
2054
2055	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2056	 && (cdm->pos.cookie.pdrv == pdrv)
2057	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2058	 && (cdm->pos.cookie.periph != NULL))
2059		return(xptpdperiphtraverse(pdrv,
2060				(struct cam_periph *)cdm->pos.cookie.periph,
2061				xptplistperiphfunc, arg));
2062	else
2063		return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
2064}
2065
2066static int
2067xptplistperiphfunc(struct cam_periph *periph, void *arg)
2068{
2069	struct ccb_dev_match *cdm;
2070	dev_match_ret retval;
2071
2072	cdm = (struct ccb_dev_match *)arg;
2073
2074	retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2075
2076	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2077		cdm->status = CAM_DEV_MATCH_ERROR;
2078		return(0);
2079	}
2080
2081	/*
2082	 * If the copy flag is set, copy this peripheral out.
2083	 */
2084	if (retval & DM_RET_COPY) {
2085		int spaceleft, j;
2086
2087		spaceleft = cdm->match_buf_len - (cdm->num_matches *
2088			sizeof(struct dev_match_result));
2089
2090		/*
2091		 * If we don't have enough space to put in another
2092		 * match result, save our position and tell the
2093		 * user there are more devices to check.
2094		 */
2095		if (spaceleft < sizeof(struct dev_match_result)) {
2096			struct periph_driver **pdrv;
2097
2098			pdrv = NULL;
2099			bzero(&cdm->pos, sizeof(cdm->pos));
2100			cdm->pos.position_type =
2101				CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
2102				CAM_DEV_POS_PERIPH;
2103
2104			/*
2105			 * This may look a bit non-sensical, but it is
2106			 * actually quite logical.  There are very few
2107			 * peripheral drivers, and bloating every peripheral
2108			 * structure with a pointer back to its parent
2109			 * peripheral driver linker set entry would cost
2110			 * more in the long run than doing this quick lookup.
2111			 */
2112			for (pdrv =
2113			     (struct periph_driver **)periphdriver_set.ls_items;
2114			     *pdrv != NULL; pdrv++) {
2115				if (strcmp((*pdrv)->driver_name,
2116				    periph->periph_name) == 0)
2117					break;
2118			}
2119
2120			if (pdrv == NULL) {
2121				cdm->status = CAM_DEV_MATCH_ERROR;
2122				return(0);
2123			}
2124
2125			cdm->pos.cookie.pdrv = pdrv;
2126			/*
2127			 * The periph generation slot does double duty, as
2128			 * does the periph pointer slot.  They are used for
2129			 * both edt and pdrv lookups and positioning.
2130			 */
2131			cdm->pos.cookie.periph = periph;
2132			cdm->pos.generations[CAM_PERIPH_GENERATION] =
2133				(*pdrv)->generation;
2134			cdm->status = CAM_DEV_MATCH_MORE;
2135			return(0);
2136		}
2137
2138		j = cdm->num_matches;
2139		cdm->num_matches++;
2140		cdm->matches[j].type = DEV_MATCH_PERIPH;
2141		cdm->matches[j].result.periph_result.path_id =
2142			periph->path->bus->path_id;
2143
2144		/*
2145		 * The transport layer peripheral doesn't have a target or
2146		 * lun.
2147		 */
2148		if (periph->path->target)
2149			cdm->matches[j].result.periph_result.target_id =
2150				periph->path->target->target_id;
2151		else
2152			cdm->matches[j].result.periph_result.target_id = -1;
2153
2154		if (periph->path->device)
2155			cdm->matches[j].result.periph_result.target_lun =
2156				periph->path->device->lun_id;
2157		else
2158			cdm->matches[j].result.periph_result.target_lun = -1;
2159
2160		cdm->matches[j].result.periph_result.unit_number =
2161			periph->unit_number;
2162		strncpy(cdm->matches[j].result.periph_result.periph_name,
2163			periph->periph_name, DEV_IDLEN);
2164	}
2165
2166	return(1);
2167}
2168
2169static int
2170xptperiphlistmatch(struct ccb_dev_match *cdm)
2171{
2172	int ret;
2173
2174	cdm->num_matches = 0;
2175
2176	/*
2177	 * At this point in the edt traversal function, we check the bus
2178	 * list generation to make sure that no busses have been added or
2179	 * removed since the user last sent a XPT_DEV_MATCH ccb through.
2180	 * For the peripheral driver list traversal function, however, we
2181	 * don't have to worry about new peripheral driver types coming or
2182	 * going; they're in a linker set, and therefore can't change
2183	 * without a recompile.
2184	 */
2185
2186	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2187	 && (cdm->pos.cookie.pdrv != NULL))
2188		ret = xptpdrvtraverse(
2189			        (struct periph_driver **)cdm->pos.cookie.pdrv,
2190				xptplistpdrvfunc, cdm);
2191	else
2192		ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
2193
2194	/*
2195	 * If we get back 0, that means that we had to stop before fully
2196	 * traversing the peripheral driver tree.  It also means that one of
2197	 * the subroutines has set the status field to the proper value.  If
2198	 * we get back 1, we've fully traversed the EDT and copied out any
2199	 * matching entries.
2200	 */
2201	if (ret == 1)
2202		cdm->status = CAM_DEV_MATCH_LAST;
2203
2204	return(ret);
2205}
2206
2207static int
2208xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
2209{
2210	struct cam_eb *bus, *next_bus;
2211	int retval;
2212
2213	retval = 1;
2214
2215	for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xpt_busses));
2216	     bus != NULL;
2217	     bus = next_bus) {
2218		next_bus = TAILQ_NEXT(bus, links);
2219
2220		retval = tr_func(bus, arg);
2221		if (retval == 0)
2222			return(retval);
2223	}
2224
2225	return(retval);
2226}
2227
2228static int
2229xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
2230		  xpt_targetfunc_t *tr_func, void *arg)
2231{
2232	struct cam_et *target, *next_target;
2233	int retval;
2234
2235	retval = 1;
2236	for (target = (start_target ? start_target :
2237		       TAILQ_FIRST(&bus->et_entries));
2238	     target != NULL; target = next_target) {
2239
2240		next_target = TAILQ_NEXT(target, links);
2241
2242		retval = tr_func(target, arg);
2243
2244		if (retval == 0)
2245			return(retval);
2246	}
2247
2248	return(retval);
2249}
2250
2251static int
2252xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
2253		  xpt_devicefunc_t *tr_func, void *arg)
2254{
2255	struct cam_ed *device, *next_device;
2256	int retval;
2257
2258	retval = 1;
2259	for (device = (start_device ? start_device :
2260		       TAILQ_FIRST(&target->ed_entries));
2261	     device != NULL;
2262	     device = next_device) {
2263
2264		next_device = TAILQ_NEXT(device, links);
2265
2266		retval = tr_func(device, arg);
2267
2268		if (retval == 0)
2269			return(retval);
2270	}
2271
2272	return(retval);
2273}
2274
2275static int
2276xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
2277		  xpt_periphfunc_t *tr_func, void *arg)
2278{
2279	struct cam_periph *periph, *next_periph;
2280	int retval;
2281
2282	retval = 1;
2283
2284	for (periph = (start_periph ? start_periph :
2285		       SLIST_FIRST(&device->periphs));
2286	     periph != NULL;
2287	     periph = next_periph) {
2288
2289		next_periph = SLIST_NEXT(periph, periph_links);
2290
2291		retval = tr_func(periph, arg);
2292		if (retval == 0)
2293			return(retval);
2294	}
2295
2296	return(retval);
2297}
2298
2299static int
2300xptpdrvtraverse(struct periph_driver **start_pdrv,
2301		xpt_pdrvfunc_t *tr_func, void *arg)
2302{
2303	struct periph_driver **pdrv;
2304	int retval;
2305
2306	retval = 1;
2307
2308	/*
2309	 * We don't traverse the peripheral driver list like we do the
2310	 * other lists, because it is a linker set, and therefore cannot be
2311	 * changed during runtime.  If the peripheral driver list is ever
2312	 * re-done to be something other than a linker set (i.e. it can
2313	 * change while the system is running), the list traversal should
2314	 * be modified to work like the other traversal functions.
2315	 */
2316	for (pdrv = (start_pdrv ? start_pdrv :
2317	     (struct periph_driver **)periphdriver_set.ls_items);
2318	     *pdrv != NULL; pdrv++) {
2319		retval = tr_func(pdrv, arg);
2320
2321		if (retval == 0)
2322			return(retval);
2323	}
2324
2325	return(retval);
2326}
2327
2328static int
2329xptpdperiphtraverse(struct periph_driver **pdrv,
2330		    struct cam_periph *start_periph,
2331		    xpt_periphfunc_t *tr_func, void *arg)
2332{
2333	struct cam_periph *periph, *next_periph;
2334	int retval;
2335
2336	retval = 1;
2337
2338	for (periph = (start_periph ? start_periph :
2339	     TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
2340	     periph = next_periph) {
2341
2342		next_periph = TAILQ_NEXT(periph, unit_links);
2343
2344		retval = tr_func(periph, arg);
2345		if (retval == 0)
2346			return(retval);
2347	}
2348	return(retval);
2349}
2350
2351static int
2352xptdefbusfunc(struct cam_eb *bus, void *arg)
2353{
2354	struct xpt_traverse_config *tr_config;
2355
2356	tr_config = (struct xpt_traverse_config *)arg;
2357
2358	if (tr_config->depth == XPT_DEPTH_BUS) {
2359		xpt_busfunc_t *tr_func;
2360
2361		tr_func = (xpt_busfunc_t *)tr_config->tr_func;
2362
2363		return(tr_func(bus, tr_config->tr_arg));
2364	} else
2365		return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
2366}
2367
2368static int
2369xptdeftargetfunc(struct cam_et *target, void *arg)
2370{
2371	struct xpt_traverse_config *tr_config;
2372
2373	tr_config = (struct xpt_traverse_config *)arg;
2374
2375	if (tr_config->depth == XPT_DEPTH_TARGET) {
2376		xpt_targetfunc_t *tr_func;
2377
2378		tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
2379
2380		return(tr_func(target, tr_config->tr_arg));
2381	} else
2382		return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
2383}
2384
2385static int
2386xptdefdevicefunc(struct cam_ed *device, void *arg)
2387{
2388	struct xpt_traverse_config *tr_config;
2389
2390	tr_config = (struct xpt_traverse_config *)arg;
2391
2392	if (tr_config->depth == XPT_DEPTH_DEVICE) {
2393		xpt_devicefunc_t *tr_func;
2394
2395		tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
2396
2397		return(tr_func(device, tr_config->tr_arg));
2398	} else
2399		return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
2400}
2401
2402static int
2403xptdefperiphfunc(struct cam_periph *periph, void *arg)
2404{
2405	struct xpt_traverse_config *tr_config;
2406	xpt_periphfunc_t *tr_func;
2407
2408	tr_config = (struct xpt_traverse_config *)arg;
2409
2410	tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
2411
2412	/*
2413	 * Unlike the other default functions, we don't check for depth
2414	 * here.  The peripheral driver level is the last level in the EDT,
2415	 * so if we're here, we should execute the function in question.
2416	 */
2417	return(tr_func(periph, tr_config->tr_arg));
2418}
2419
2420/*
2421 * Execute the given function for every bus in the EDT.
2422 */
2423static int
2424xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
2425{
2426	struct xpt_traverse_config tr_config;
2427
2428	tr_config.depth = XPT_DEPTH_BUS;
2429	tr_config.tr_func = tr_func;
2430	tr_config.tr_arg = arg;
2431
2432	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2433}
2434
2435/*
2436 * Execute the given function for every target in the EDT.
2437 */
2438static int
2439xpt_for_all_targets(xpt_targetfunc_t *tr_func, void *arg)
2440{
2441	struct xpt_traverse_config tr_config;
2442
2443	tr_config.depth = XPT_DEPTH_TARGET;
2444	tr_config.tr_func = tr_func;
2445	tr_config.tr_arg = arg;
2446
2447	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2448}
2449
2450/*
2451 * Execute the given function for every device in the EDT.
2452 */
2453static int
2454xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
2455{
2456	struct xpt_traverse_config tr_config;
2457
2458	tr_config.depth = XPT_DEPTH_DEVICE;
2459	tr_config.tr_func = tr_func;
2460	tr_config.tr_arg = arg;
2461
2462	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2463}
2464
2465/*
2466 * Execute the given function for every peripheral in the EDT.
2467 */
2468static int
2469xpt_for_all_periphs(xpt_periphfunc_t *tr_func, void *arg)
2470{
2471	struct xpt_traverse_config tr_config;
2472
2473	tr_config.depth = XPT_DEPTH_PERIPH;
2474	tr_config.tr_func = tr_func;
2475	tr_config.tr_arg = arg;
2476
2477	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2478}
2479
2480static int
2481xptsetasyncfunc(struct cam_ed *device, void *arg)
2482{
2483	struct cam_path path;
2484	struct ccb_getdev cgd;
2485	struct async_node *cur_entry;
2486
2487	cur_entry = (struct async_node *)arg;
2488
2489	xpt_compile_path(&path,
2490			 NULL,
2491			 device->target->bus->path_id,
2492			 device->target->target_id,
2493			 device->lun_id);
2494	xpt_setup_ccb(&cgd.ccb_h, &path, /*priority*/1);
2495	cgd.ccb_h.func_code = XPT_GDEV_TYPE;
2496	xpt_action((union ccb *)&cgd);
2497	cur_entry->callback(cur_entry->callback_arg,
2498			    AC_FOUND_DEVICE,
2499			    &path, &cgd);
2500	xpt_release_path(&path);
2501
2502	return(1);
2503}
2504static int
2505xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
2506{
2507	struct cam_path path;
2508	struct ccb_pathinq cpi;
2509	struct async_node *cur_entry;
2510
2511	cur_entry = (struct async_node *)arg;
2512
2513	xpt_compile_path(&path, /*periph*/NULL,
2514			 bus->sim->path_id,
2515			 CAM_TARGET_WILDCARD,
2516			 CAM_LUN_WILDCARD);
2517	xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
2518	cpi.ccb_h.func_code = XPT_PATH_INQ;
2519	xpt_action((union ccb *)&cpi);
2520	cur_entry->callback(cur_entry->callback_arg,
2521			    AC_PATH_REGISTERED,
2522			    &path, &cpi);
2523	xpt_release_path(&path);
2524
2525	return(1);
2526}
2527
2528void
2529xpt_action(union ccb *start_ccb)
2530{
2531	CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
2532
2533	start_ccb->ccb_h.status = CAM_REQ_INPROG;
2534
2535	switch (start_ccb->ccb_h.func_code) {
2536	case XPT_SCSI_IO:
2537	{
2538#ifdef CAMDEBUG
2539		char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
2540		struct cam_path *path;
2541
2542		path = start_ccb->ccb_h.path;
2543#endif
2544
2545		/*
2546		 * For the sake of compatibility with SCSI-1
2547		 * devices that may not understand the identify
2548		 * message, we include lun information in the
2549		 * second byte of all commands.  SCSI-1 specifies
2550		 * that luns are a 3 bit value and reserves only 3
2551		 * bits for lun information in the CDB.  Later
2552		 * revisions of the SCSI spec allow for more than 8
2553		 * luns, but have deprecated lun information in the
2554		 * CDB.  So, if the lun won't fit, we must omit.
2555		 *
2556		 * Also be aware that during initial probing for devices,
2557		 * the inquiry information is unknown but initialized to 0.
2558		 * This means that this code will be exercised while probing
2559		 * devices with an ANSI revision greater than 2.
2560		 */
2561		if (SID_ANSI_REV(&start_ccb->ccb_h.path->device->inq_data) <= 2
2562		 && start_ccb->ccb_h.target_lun < 8
2563		 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
2564
2565			start_ccb->csio.cdb_io.cdb_bytes[1] |=
2566			    start_ccb->ccb_h.target_lun << 5;
2567		}
2568		start_ccb->csio.scsi_status = SCSI_STATUS_OK;
2569		start_ccb->csio.sense_resid = 0;
2570		start_ccb->csio.resid = 0;
2571		CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n",
2572			  scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0],
2573			  	       &path->device->inq_data),
2574			  scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes,
2575					  cdb_str)));
2576		/* FALLTRHOUGH */
2577	}
2578	case XPT_TARGET_IO:
2579	case XPT_CONT_TARGET_IO:
2580	case XPT_ENG_EXEC:
2581	{
2582		struct cam_path *path;
2583		int s;
2584		int runq;
2585
2586		path = start_ccb->ccb_h.path;
2587		s = splsoftcam();
2588
2589		cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
2590		if (path->device->qfrozen_cnt == 0)
2591			runq = xpt_schedule_dev_sendq(path->bus, path->device);
2592		else
2593			runq = 0;
2594		splx(s);
2595		if (runq != 0)
2596			xpt_run_dev_sendq(path->bus);
2597		break;
2598	}
2599	case XPT_SET_TRAN_SETTINGS:
2600	{
2601		xpt_set_transfer_settings(&start_ccb->cts,
2602					  start_ccb->ccb_h.path->device,
2603					  /*async_update*/FALSE);
2604		break;
2605	}
2606	case XPT_CALC_GEOMETRY:
2607		/* Filter out garbage */
2608		if (start_ccb->ccg.block_size == 0
2609		 || start_ccb->ccg.volume_size == 0) {
2610			start_ccb->ccg.cylinders = 0;
2611			start_ccb->ccg.heads = 0;
2612			start_ccb->ccg.secs_per_track = 0;
2613			start_ccb->ccb_h.status = CAM_REQ_CMP;
2614			break;
2615		}
2616#ifdef PC98
2617		/*
2618		 * In a PC-98 system, geometry translation depens on
2619		 * the "real" device geometry obtained from mode page 4.
2620		 * SCSI geometry translation is performed in the
2621		 * initialization routine of the SCSI BIOS and the result
2622		 * stored in host memory.  If the translation is available
2623		 * in host memory, use it.  If not, rely on the default
2624		 * translation the device driver performs.
2625		 */
2626		if (scsi_da_bios_params(&start_ccb->ccg) != 0) {
2627			start_ccb->ccb_h.status = CAM_REQ_CMP;
2628			break;
2629		}
2630		/* FALLTHROUGH */
2631#endif
2632	case XPT_ACCEPT_TARGET_IO:
2633	case XPT_EN_LUN:
2634	case XPT_IMMED_NOTIFY:
2635	case XPT_NOTIFY_ACK:
2636	case XPT_GET_TRAN_SETTINGS:
2637	case XPT_PATH_INQ:
2638	case XPT_RESET_BUS:
2639	{
2640		struct cam_sim *sim;
2641
2642		sim = start_ccb->ccb_h.path->bus->sim;
2643		(*(sim->sim_action))(sim, start_ccb);
2644		break;
2645	}
2646	case XPT_GDEV_TYPE:
2647		if ((start_ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) != 0) {
2648			start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2649		} else {
2650			struct ccb_getdev *cgd;
2651			struct cam_et *tar;
2652			struct cam_ed *dev;
2653			int s;
2654
2655			s = splsoftcam();
2656			cgd = &start_ccb->cgd;
2657			tar = cgd->ccb_h.path->target;
2658			dev = cgd->ccb_h.path->device;
2659			cgd->inq_data = dev->inq_data;
2660			cgd->pd_type = SID_TYPE(&dev->inq_data);
2661			cgd->dev_openings = dev->ccbq.dev_openings;
2662			cgd->dev_active = dev->ccbq.dev_active;
2663			cgd->devq_openings = dev->ccbq.devq_openings;
2664			cgd->devq_queued = dev->ccbq.queue.entries;
2665			cgd->held = dev->ccbq.held;
2666			cgd->maxtags = dev->quirk->maxtags;
2667			cgd->mintags = dev->quirk->mintags;
2668			cgd->ccb_h.status = CAM_REQ_CMP;
2669			cgd->serial_num_len = dev->serial_num_len;
2670			if ((dev->serial_num_len > 0)
2671			 && (dev->serial_num != NULL))
2672				bcopy(dev->serial_num, cgd->serial_num,
2673				      dev->serial_num_len);
2674			splx(s);
2675		}
2676		break;
2677	case XPT_GDEVLIST:
2678	{
2679		struct cam_periph	*nperiph;
2680		struct periph_list	*periph_head;
2681		struct ccb_getdevlist	*cgdl;
2682		int			i;
2683		int			s;
2684		struct cam_ed		*device;
2685		int			found;
2686
2687
2688		found = 0;
2689
2690		/*
2691		 * Don't want anyone mucking with our data.
2692		 */
2693		s = splsoftcam();
2694		device = start_ccb->ccb_h.path->device;
2695		periph_head = &device->periphs;
2696		cgdl = &start_ccb->cgdl;
2697
2698		/*
2699		 * Check and see if the list has changed since the user
2700		 * last requested a list member.  If so, tell them that the
2701		 * list has changed, and therefore they need to start over
2702		 * from the beginning.
2703		 */
2704		if ((cgdl->index != 0) &&
2705		    (cgdl->generation != device->generation)) {
2706			cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
2707			splx(s);
2708			break;
2709		}
2710
2711		/*
2712		 * Traverse the list of peripherals and attempt to find
2713		 * the requested peripheral.
2714		 */
2715		for (nperiph = periph_head->slh_first, i = 0;
2716		     (nperiph != NULL) && (i <= cgdl->index);
2717		     nperiph = nperiph->periph_links.sle_next, i++) {
2718			if (i == cgdl->index) {
2719				strncpy(cgdl->periph_name,
2720					nperiph->periph_name,
2721					DEV_IDLEN);
2722				cgdl->unit_number = nperiph->unit_number;
2723				found = 1;
2724			}
2725		}
2726		if (found == 0) {
2727			cgdl->status = CAM_GDEVLIST_ERROR;
2728			splx(s);
2729			break;
2730		}
2731
2732		if (nperiph == NULL)
2733			cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
2734		else
2735			cgdl->status = CAM_GDEVLIST_MORE_DEVS;
2736
2737		cgdl->index++;
2738		cgdl->generation = device->generation;
2739
2740		splx(s);
2741		cgdl->ccb_h.status = CAM_REQ_CMP;
2742		break;
2743	}
2744	case XPT_DEV_MATCH:
2745	{
2746		int s;
2747		dev_pos_type position_type;
2748		struct ccb_dev_match *cdm;
2749		int ret;
2750
2751		cdm = &start_ccb->cdm;
2752
2753		/*
2754		 * Prevent EDT changes while we traverse it.
2755		 */
2756		s = splsoftcam();
2757		/*
2758		 * There are two ways of getting at information in the EDT.
2759		 * The first way is via the primary EDT tree.  It starts
2760		 * with a list of busses, then a list of targets on a bus,
2761		 * then devices/luns on a target, and then peripherals on a
2762		 * device/lun.  The "other" way is by the peripheral driver
2763		 * lists.  The peripheral driver lists are organized by
2764		 * peripheral driver.  (obviously)  So it makes sense to
2765		 * use the peripheral driver list if the user is looking
2766		 * for something like "da1", or all "da" devices.  If the
2767		 * user is looking for something on a particular bus/target
2768		 * or lun, it's generally better to go through the EDT tree.
2769		 */
2770
2771		if (cdm->pos.position_type != CAM_DEV_POS_NONE)
2772			position_type = cdm->pos.position_type;
2773		else {
2774			int i;
2775
2776			position_type = CAM_DEV_POS_NONE;
2777
2778			for (i = 0; i < cdm->num_patterns; i++) {
2779				if ((cdm->patterns[i].type == DEV_MATCH_BUS)
2780				 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
2781					position_type = CAM_DEV_POS_EDT;
2782					break;
2783				}
2784			}
2785
2786			if (cdm->num_patterns == 0)
2787				position_type = CAM_DEV_POS_EDT;
2788			else if (position_type == CAM_DEV_POS_NONE)
2789				position_type = CAM_DEV_POS_PDRV;
2790		}
2791
2792		switch(position_type & CAM_DEV_POS_TYPEMASK) {
2793		case CAM_DEV_POS_EDT:
2794			ret = xptedtmatch(cdm);
2795			break;
2796		case CAM_DEV_POS_PDRV:
2797			ret = xptperiphlistmatch(cdm);
2798			break;
2799		default:
2800			cdm->status = CAM_DEV_MATCH_ERROR;
2801			break;
2802		}
2803
2804		splx(s);
2805
2806		if (cdm->status == CAM_DEV_MATCH_ERROR)
2807			start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2808		else
2809			start_ccb->ccb_h.status = CAM_REQ_CMP;
2810
2811		break;
2812	}
2813	case XPT_SASYNC_CB:
2814	{
2815		/*
2816		 * First off, determine the list we want to
2817		 * be insterted into.
2818		 */
2819		struct ccb_setasync *csa;
2820		struct async_node *cur_entry;
2821		struct async_list *async_head;
2822		u_int32_t added;
2823		int s;
2824
2825		csa = &start_ccb->csa;
2826		added = csa->event_enable;
2827		if (csa->ccb_h.path->device != NULL) {
2828			async_head = &csa->ccb_h.path->device->asyncs;
2829		} else {
2830			async_head = &csa->ccb_h.path->bus->asyncs;
2831		}
2832
2833		/*
2834		 * If there is already an entry for us, simply
2835		 * update it.
2836		 */
2837		s = splsoftcam();
2838		cur_entry = SLIST_FIRST(async_head);
2839		while (cur_entry != NULL) {
2840			if ((cur_entry->callback_arg == csa->callback_arg)
2841			 && (cur_entry->callback == csa->callback))
2842				break;
2843			cur_entry = SLIST_NEXT(cur_entry, links);
2844		}
2845
2846		if (cur_entry != NULL) {
2847		 	/*
2848			 * If the request has no flags set,
2849			 * remove the entry.
2850			 */
2851			added &= ~cur_entry->event_enable;
2852			if (csa->event_enable == 0) {
2853				SLIST_REMOVE(async_head, cur_entry,
2854					     async_node, links);
2855				free(cur_entry, M_DEVBUF);
2856			} else {
2857				cur_entry->event_enable = csa->event_enable;
2858			}
2859		} else {
2860			cur_entry = malloc(sizeof(*cur_entry), M_DEVBUF,
2861					   M_NOWAIT);
2862			if (cur_entry == NULL) {
2863				splx(s);
2864				csa->ccb_h.status = CAM_RESRC_UNAVAIL;
2865				break;
2866			}
2867			cur_entry->callback_arg = csa->callback_arg;
2868			cur_entry->callback = csa->callback;
2869			cur_entry->event_enable = csa->event_enable;
2870			SLIST_INSERT_HEAD(async_head, cur_entry, links);
2871		}
2872
2873		if ((added & AC_FOUND_DEVICE) != 0) {
2874			/*
2875			 * Get this peripheral up to date with all
2876			 * the currently existing devices.
2877			 */
2878			xpt_for_all_devices(xptsetasyncfunc, cur_entry);
2879		}
2880		if ((added & AC_PATH_REGISTERED) != 0) {
2881			/*
2882			 * Get this peripheral up to date with all
2883			 * the currently existing busses.
2884			 */
2885			xpt_for_all_busses(xptsetasyncbusfunc, cur_entry);
2886		}
2887		splx(s);
2888		start_ccb->ccb_h.status = CAM_REQ_CMP;
2889		break;
2890	}
2891	case XPT_REL_SIMQ:
2892	{
2893		struct ccb_relsim *crs;
2894		struct cam_ed *dev;
2895		int s;
2896
2897		crs = &start_ccb->crs;
2898		dev = crs->ccb_h.path->device;
2899		if (dev == NULL) {
2900
2901			crs->ccb_h.status = CAM_DEV_NOT_THERE;
2902			break;
2903		}
2904
2905		s = splcam();
2906
2907		if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
2908
2909 			if ((dev->inq_data.flags & SID_CmdQue) != 0) {
2910				int reduction;
2911
2912				/* Don't ever go below one opening */
2913				if (crs->openings > 0) {
2914					xpt_dev_ccbq_resize(crs->ccb_h.path,
2915							    crs->openings);
2916
2917					if (bootverbose || 1) {
2918						xpt_print_path(crs->ccb_h.path);
2919						printf("tagged openings "
2920						       "now %d\n",
2921						       crs->openings);
2922					}
2923				}
2924			}
2925		}
2926
2927		if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
2928
2929			if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
2930
2931				/*
2932				 * Just extend the old timeout and decrement
2933				 * the freeze count so that a single timeout
2934				 * is sufficient for releasing the queue.
2935				 */
2936				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
2937				untimeout(xpt_release_devq_timeout,
2938					  dev, dev->c_handle);
2939			} else {
2940
2941				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
2942			}
2943
2944			dev->c_handle =
2945				timeout(xpt_release_devq_timeout,
2946					dev,
2947					(crs->release_timeout * hz) / 1000);
2948
2949			dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
2950
2951		}
2952
2953		if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
2954
2955			if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
2956				/*
2957				 * Decrement the freeze count so that a single
2958				 * completion is still sufficient to unfreeze
2959				 * the queue.
2960				 */
2961				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
2962			} else {
2963
2964				dev->flags |= CAM_DEV_REL_ON_COMPLETE;
2965				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
2966			}
2967		}
2968
2969		if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
2970
2971			if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
2972			 || (dev->ccbq.dev_active == 0)) {
2973
2974				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
2975			} else {
2976
2977				dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
2978				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
2979			}
2980		}
2981		splx(s);
2982
2983		if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) {
2984
2985			xpt_release_devq(crs->ccb_h.path->device,
2986					 /*run_queue*/TRUE);
2987		}
2988		start_ccb->crs.qfrozen_cnt = dev->qfrozen_cnt;
2989		start_ccb->ccb_h.status = CAM_REQ_CMP;
2990		break;
2991	}
2992	case XPT_SCAN_BUS:
2993		xpt_scan_bus(start_ccb->ccb_h.path->periph, start_ccb);
2994		break;
2995	case XPT_SCAN_LUN:
2996		xpt_scan_lun(start_ccb->ccb_h.path->periph,
2997			     start_ccb->ccb_h.path, start_ccb->crcn.flags,
2998			     start_ccb);
2999		break;
3000	case XPT_DEBUG: {
3001#ifdef CAMDEBUG
3002		int s;
3003
3004		s = splcam();
3005		cam_dflags = start_ccb->cdbg.flags;
3006		if (cam_dpath != NULL) {
3007			xpt_free_path(cam_dpath);
3008			cam_dpath = NULL;
3009		}
3010
3011		if (cam_dflags != CAM_DEBUG_NONE) {
3012			if (xpt_create_path(&cam_dpath, xpt_periph,
3013					    start_ccb->ccb_h.path_id,
3014					    start_ccb->ccb_h.target_id,
3015					    start_ccb->ccb_h.target_lun) !=
3016					    CAM_REQ_CMP) {
3017				start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3018				cam_dflags = CAM_DEBUG_NONE;
3019			} else
3020				start_ccb->ccb_h.status = CAM_REQ_CMP;
3021		} else {
3022			cam_dpath = NULL;
3023			start_ccb->ccb_h.status = CAM_REQ_CMP;
3024		}
3025		splx(s);
3026#else /* !CAMDEBUG */
3027		start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3028#endif /* CAMDEBUG */
3029		break;
3030	}
3031	case XPT_NOOP:
3032		start_ccb->ccb_h.status = CAM_REQ_CMP;
3033		break;
3034	default:
3035	case XPT_SDEV_TYPE:
3036	case XPT_ABORT:
3037	case XPT_RESET_DEV:
3038	case XPT_TERM_IO:
3039	case XPT_ENG_INQ:
3040		/* XXX Implement */
3041		start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
3042		break;
3043	}
3044}
3045
3046void
3047xpt_polled_action(union ccb *start_ccb)
3048{
3049	int	  s;
3050	u_int32_t timeout;
3051	struct	  cam_sim *sim;
3052	struct	  cam_devq *devq;
3053	struct	  cam_ed *dev;
3054
3055	timeout = start_ccb->ccb_h.timeout;
3056	sim = start_ccb->ccb_h.path->bus->sim;
3057	devq = sim->devq;
3058	dev = start_ccb->ccb_h.path->device;
3059
3060	s = splcam();
3061
3062	/*
3063	 * Steal an opening so that no other queued requests
3064	 * can get it before us while we simulate interrupts.
3065	 */
3066	dev->ccbq.devq_openings--;
3067	dev->ccbq.dev_openings--;
3068
3069	while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0)
3070	   && (--timeout > 0)) {
3071		DELAY(1000);
3072		(*(sim->sim_poll))(sim);
3073		swi_camnet();
3074		swi_cambio();
3075	}
3076
3077	dev->ccbq.devq_openings++;
3078	dev->ccbq.dev_openings++;
3079
3080	if (timeout != 0) {
3081		xpt_action(start_ccb);
3082		while(--timeout > 0) {
3083			(*(sim->sim_poll))(sim);
3084			swi_camnet();
3085			swi_cambio();
3086			if ((start_ccb->ccb_h.status  & CAM_STATUS_MASK)
3087			    != CAM_REQ_INPROG)
3088				break;
3089			DELAY(1000);
3090		}
3091		if (timeout == 0) {
3092			/*
3093			 * XXX Is it worth adding a sim_timeout entry
3094			 * point so we can attempt recovery?  If
3095			 * this is only used for dumps, I don't think
3096			 * it is.
3097			 */
3098			start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
3099		}
3100	} else {
3101		start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3102	}
3103	splx(s);
3104}
3105
3106/*
3107 * Schedule a peripheral driver to receive a ccb when it's
3108 * target device has space for more transactions.
3109 */
3110void
3111xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
3112{
3113	struct cam_ed *device;
3114	int s;
3115	int runq;
3116
3117	CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
3118	device = perph->path->device;
3119	s = splsoftcam();
3120	if (periph_is_queued(perph)) {
3121		/* Simply reorder based on new priority */
3122		CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3123			  ("   change priority to %d\n", new_priority));
3124		if (new_priority < perph->pinfo.priority) {
3125			camq_change_priority(&device->drvq,
3126					     perph->pinfo.index,
3127					     new_priority);
3128		}
3129		runq = 0;
3130	} else {
3131		/* New entry on the queue */
3132		CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3133			  ("   added periph to queue\n"));
3134		if (device->drvq.generation++ == 0) {
3135			/* Generation wrap, regen all entries */
3136			camq_regen(&device->drvq);
3137		}
3138		perph->pinfo.priority = new_priority;
3139		perph->pinfo.generation = device->drvq.generation;
3140		camq_insert(&device->drvq, &perph->pinfo);
3141		runq = xpt_schedule_dev_allocq(perph->path->bus, device);
3142	}
3143	splx(s);
3144	if (runq != 0) {
3145		CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3146			  ("   calling xpt_run_devq\n"));
3147		xpt_run_dev_allocq(perph->path->bus);
3148	}
3149}
3150
3151
3152/*
3153 * Schedule a device to run on a given queue.
3154 * If the device was inserted as a new entry on the queue,
3155 * return 1 meaning the device queue should be run. If we
3156 * were already queued, implying someone else has already
3157 * started the queue, return 0 so the caller doesn't attempt
3158 * to run the queue.  Must be run at either splsoftcam
3159 * (or splcam since that encompases splsoftcam).
3160 */
3161static int
3162xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
3163		 u_int32_t new_priority)
3164{
3165	int retval;
3166	u_int32_t old_priority;
3167
3168	CAM_DEBUG_PRINT(CAM_DEBUG_TRACE, ("xpt_schedule_dev\n"));
3169
3170	old_priority = pinfo->priority;
3171
3172	/*
3173	 * Are we already queued?
3174	 */
3175	if (pinfo->index != CAM_UNQUEUED_INDEX) {
3176		/* Simply reorder based on new priority */
3177		if (new_priority < old_priority) {
3178			camq_change_priority(queue, pinfo->index,
3179					     new_priority);
3180			CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
3181					("changed priority to %d\n",
3182					 new_priority));
3183		}
3184		retval = 0;
3185	} else {
3186		/* New entry on the queue */
3187		if (new_priority < old_priority)
3188			pinfo->priority = new_priority;
3189
3190		CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
3191				("Inserting onto queue\n"));
3192		if (queue->generation++ == 0) {
3193			/* Generation wrap, regen all entries */
3194			camq_regen(queue);
3195		}
3196		pinfo->generation = queue->generation;
3197		camq_insert(queue, pinfo);
3198		retval = 1;
3199	}
3200	return (retval);
3201}
3202
3203static void
3204xpt_run_dev_allocq(struct cam_eb *bus)
3205{
3206	struct	cam_devq *devq;
3207	int	s;
3208
3209	CAM_DEBUG_PRINT(CAM_DEBUG_TRACE, ("xpt_run_dev_allocq\n"));
3210	devq = bus->sim->devq;
3211
3212	CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
3213			("   qfrozen_cnt == 0x%x, entries == %d, "
3214			 "openings == %d, active == %d\n",
3215			 devq->alloc_queue.qfrozen_cnt,
3216			 devq->alloc_queue.entries,
3217			 devq->alloc_openings,
3218			 devq->alloc_active));
3219
3220	s = splsoftcam();
3221	devq->alloc_queue.qfrozen_cnt++;
3222	while ((devq->alloc_queue.entries > 0)
3223	    && (devq->alloc_openings > 0)
3224	    && (devq->alloc_queue.qfrozen_cnt <= 1)) {
3225		struct	cam_ed_qinfo *qinfo;
3226		struct	cam_ed *device;
3227		union	ccb *work_ccb;
3228		struct	cam_periph *drv;
3229		struct	camq *drvq;
3230
3231		qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
3232							   /*position*/0);
3233		device = qinfo->device;
3234
3235		CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
3236				("running device 0x%x\n", device));
3237
3238		drvq = &device->drvq;
3239
3240#ifdef CAMDEBUG
3241		if (drvq->entries <= 0) {
3242			panic("xpt_run_dev_allocq: "
3243			      "Device on queue without any work to do");
3244		}
3245#endif
3246		if ((work_ccb = xpt_get_ccb(device)) != NULL) {
3247			devq->alloc_openings--;
3248			devq->alloc_active++;
3249			drv = (struct cam_periph*)camq_remove(drvq,
3250							      /*pos*/0);
3251			/* Update priority */
3252			if (drvq->entries > 0) {
3253				qinfo->pinfo.priority = drvq->queue_array[0]->priority;
3254			} else {
3255				qinfo->pinfo.priority = CAM_PRIORITY_NONE;
3256			}
3257			splx(s);
3258			xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
3259				      drv->pinfo.priority);
3260			CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
3261					("calling periph start\n"));
3262			drv->periph_start(drv, work_ccb);
3263		} else {
3264			/*
3265			 * Malloc failure in alloc_ccb
3266			 */
3267			/*
3268			 * XXX add us to a list to be run from free_ccb
3269			 * if we don't have any ccbs active on this
3270			 * device queue otherwise we may never get run
3271			 * again.
3272			 */
3273			break;
3274		}
3275
3276		/* Raise IPL for possible insertion and test at top of loop */
3277		s = splsoftcam();
3278
3279		if (drvq->entries > 0) {
3280			/* We have more work.  Attempt to reschedule */
3281			xpt_schedule_dev_allocq(bus, device);
3282		}
3283	}
3284	devq->alloc_queue.qfrozen_cnt--;
3285	splx(s);
3286}
3287
3288static void
3289xpt_run_dev_sendq(struct cam_eb *bus)
3290{
3291	struct	cam_devq *devq;
3292	int	s;
3293
3294	CAM_DEBUG_PRINT(CAM_DEBUG_TRACE, ("xpt_run_dev_sendq\n"));
3295
3296	devq = bus->sim->devq;
3297
3298	s = splcam();
3299	devq->send_queue.qfrozen_cnt++;
3300	splx(s);
3301	s = splsoftcam();
3302	while ((devq->send_queue.entries > 0)
3303	    && (devq->send_openings > 0)) {
3304		struct	cam_ed_qinfo *qinfo;
3305		struct	cam_ed *device;
3306		union ccb *work_ccb;
3307		struct	cam_sim *sim;
3308		int	ospl;
3309
3310		ospl = splcam();
3311	    	if (devq->send_queue.qfrozen_cnt > 1) {
3312			splx(ospl);
3313			break;
3314		}
3315
3316		qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
3317							   /*position*/0);
3318		device = qinfo->device;
3319
3320		/*
3321		 * If the device has been "frozen", don't attempt
3322		 * to run it.
3323		 */
3324		if (device->qfrozen_cnt > 0) {
3325			splx(ospl);
3326			continue;
3327		}
3328
3329		CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
3330				("running device 0x%x\n", device));
3331
3332		work_ccb = cam_ccbq_peek_ccb(&device->ccbq, 0);
3333		if (work_ccb == NULL) {
3334			printf("device on run queue with no ccbs???");
3335			splx(ospl);
3336			continue;
3337		}
3338
3339		if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
3340
3341		 	if (num_highpower <= 0) {
3342				/*
3343				 * We got a high power command, but we
3344				 * don't have any available slots.  Freeze
3345				 * the device queue until we have a slot
3346				 * available.
3347				 */
3348				device->qfrozen_cnt++;
3349				STAILQ_INSERT_TAIL(&highpowerq,
3350						   &work_ccb->ccb_h,
3351						   xpt_links.stqe);
3352
3353				splx(ospl);
3354				continue;
3355			} else {
3356				/*
3357				 * Consume a high power slot while
3358				 * this ccb runs.
3359				 */
3360				num_highpower--;
3361			}
3362		}
3363		devq->active_dev = device;
3364		cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
3365
3366		cam_ccbq_send_ccb(&device->ccbq, work_ccb);
3367		splx(ospl);
3368
3369		devq->send_openings--;
3370		devq->send_active++;
3371
3372		if (device->ccbq.queue.entries > 0) {
3373			qinfo->pinfo.priority =
3374			    device->ccbq.queue.queue_array[0]->priority;
3375			xpt_schedule_dev_sendq(bus, device);
3376		} else {
3377			qinfo->pinfo.priority = CAM_PRIORITY_NONE;
3378		}
3379
3380		if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){
3381			/*
3382			 * The client wants to freeze the queue
3383			 * after this CCB is sent.
3384			 */
3385			ospl = splcam();
3386			device->qfrozen_cnt++;
3387			splx(ospl);
3388		}
3389
3390		splx(s);
3391
3392		if ((device->inq_flags & SID_CmdQue) != 0)
3393			work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
3394		else
3395			/*
3396			 * Clear this in case of a retried CCB that failed
3397			 * due to a rejected tag.
3398			 */
3399			work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
3400
3401		/*
3402		 * Device queues can be shared among multiple sim instances
3403		 * that reside on different busses.  Use the SIM in the queue
3404		 * CCB's path, rather than the one in the bus that was passed
3405		 * into this function.
3406		 */
3407		sim = work_ccb->ccb_h.path->bus->sim;
3408		(*(sim->sim_action))(sim, work_ccb);
3409
3410		ospl = splcam();
3411		devq->active_dev = NULL;
3412		splx(ospl);
3413		/* Raise IPL for possible insertion and test at top of loop */
3414		s = splsoftcam();
3415	}
3416	splx(s);
3417	s = splcam();
3418	devq->send_queue.qfrozen_cnt--;
3419	splx(s);
3420}
3421
3422/*
3423 * This function merges stuff from the slave ccb into the master ccb, while
3424 * keeping important fields in the master ccb constant.
3425 */
3426void
3427xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
3428{
3429	/*
3430	 * Pull fields that are valid for peripheral drivers to set
3431	 * into the master CCB along with the CCB "payload".
3432	 */
3433	master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
3434	master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
3435	master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
3436	master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
3437	bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
3438	      sizeof(union ccb) - sizeof(struct ccb_hdr));
3439}
3440
3441void
3442xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
3443{
3444	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
3445	ccb_h->pinfo.priority = priority;
3446	ccb_h->path = path;
3447	ccb_h->path_id = path->bus->path_id;
3448	if (path->target)
3449		ccb_h->target_id = path->target->target_id;
3450	else
3451		ccb_h->target_id = CAM_TARGET_WILDCARD;
3452	if (path->device) {
3453		if (path->device->ccbq.queue.generation++ == 0) {
3454			/* Generation wrap, regen all entries */
3455			cam_ccbq_regen(&path->device->ccbq);
3456		}
3457		ccb_h->target_lun = path->device->lun_id;
3458		ccb_h->pinfo.generation = path->device->ccbq.queue.generation;
3459	} else {
3460		ccb_h->target_lun = CAM_TARGET_WILDCARD;
3461	}
3462	ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
3463	ccb_h->flags = 0;
3464}
3465
3466/* Path manipulation functions */
3467cam_status
3468xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
3469		path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3470{
3471	struct	   cam_path *path;
3472	cam_status status;
3473
3474	path = (struct cam_path *)malloc(sizeof(*path), M_DEVBUF, M_NOWAIT);
3475
3476	if (path == NULL) {
3477		status = CAM_RESRC_UNAVAIL;
3478		return(status);
3479	}
3480	status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
3481	if (status != CAM_REQ_CMP) {
3482		free(path, M_DEVBUF);
3483		path = NULL;
3484	}
3485	*new_path_ptr = path;
3486	return (status);
3487}
3488
3489static cam_status
3490xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
3491		 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3492{
3493	struct	     cam_eb *bus;
3494	struct	     cam_et *target;
3495	struct	     cam_ed *device;
3496	cam_status   status;
3497	int	     s;
3498
3499	status = CAM_REQ_CMP;	/* Completed without error */
3500	target = NULL;		/* Wildcarded */
3501	device = NULL;		/* Wildcarded */
3502	s = splsoftcam();
3503	bus = xpt_find_bus(path_id);
3504	if (bus == NULL) {
3505		status = CAM_PATH_INVALID;
3506	} else if (target_id != CAM_TARGET_WILDCARD) {
3507		target = xpt_find_target(bus, target_id);
3508		if (target == NULL) {
3509			if (path_id == CAM_XPT_PATH_ID) {
3510				status = CAM_TID_INVALID;
3511			} else {
3512				/* Create one */
3513				struct cam_et *new_target;
3514
3515				new_target = xpt_alloc_target(bus, target_id);
3516				if (new_target == NULL) {
3517					status = CAM_RESRC_UNAVAIL;
3518				} else {
3519					target = new_target;
3520				}
3521			}
3522		}
3523		if (target != NULL && lun_id != CAM_LUN_WILDCARD) {
3524			device = xpt_find_device(target, lun_id);
3525			if (device == NULL) {
3526				if (path_id == CAM_XPT_PATH_ID) {
3527					status = CAM_LUN_INVALID;
3528				} else {
3529					/* Create one */
3530					struct cam_ed *new_device;
3531
3532					new_device = xpt_alloc_device(bus,
3533								      target,
3534								      lun_id);
3535					if (new_device == NULL) {
3536						status = CAM_RESRC_UNAVAIL;
3537					} else {
3538						device = new_device;
3539					}
3540				}
3541			}
3542		}
3543	} else if (lun_id != CAM_LUN_WILDCARD) {
3544		/*
3545		 * Specific luns are not allowed if the
3546		 * target is wildcarded
3547		 */
3548		status = CAM_LUN_INVALID;
3549	}
3550
3551	/*
3552	 * Only touch the user's data if we are successful.
3553	 */
3554	if (status == CAM_REQ_CMP) {
3555		new_path->periph = perph;
3556		new_path->bus = bus;
3557		new_path->target = target;
3558		new_path->device = device;
3559		CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
3560	} else {
3561		if (device != NULL)
3562			xpt_release_device(bus, target, device);
3563		if (target != NULL)
3564			xpt_release_target(bus, target);
3565	}
3566	splx(s);
3567	return (status);
3568}
3569
3570static void
3571xpt_release_path(struct cam_path *path)
3572{
3573	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
3574	if (path->device != NULL)
3575		xpt_release_device(path->bus, path->target, path->device);
3576	if (path->target != NULL)
3577		xpt_release_target(path->bus, path->target);
3578}
3579
3580void
3581xpt_free_path(struct cam_path *path)
3582{
3583	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
3584	xpt_release_path(path);
3585	free(path, M_DEVBUF);
3586}
3587
3588
3589/*
3590 * Return -1 for failure, 0 for exact match, 1 for match with wildcards.
3591 */
3592int
3593xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
3594{
3595	int retval = 0;
3596
3597	if (path1->bus != path2->bus) {
3598		if ((path1->bus == NULL)
3599		 || (path2->bus == NULL))
3600			retval = 1;
3601		else
3602			return (-1);
3603	}
3604	if (path1->target != path2->target) {
3605		if ((path1->target == NULL)
3606		 || (path2->target == NULL))
3607			retval = 1;
3608		else
3609			return (-1);
3610	}
3611	if (path1->device != path2->device) {
3612		if ((path1->device == NULL)
3613		 || (path2->device == NULL))
3614			retval = 1;
3615		else
3616			return (-1);
3617	}
3618	return (retval);
3619}
3620
3621void
3622xpt_print_path(struct cam_path *path)
3623{
3624	if (path == NULL)
3625		printf("(nopath): ");
3626	else {
3627		if (path->periph != NULL)
3628			printf("(%s%d:", path->periph->periph_name,
3629			       path->periph->unit_number);
3630		else
3631			printf("(noperiph:");
3632
3633		if (path->bus != NULL)
3634			printf("%s%d:%d:", path->bus->sim->sim_name,
3635			       path->bus->sim->unit_number,
3636			       path->bus->sim->bus_id);
3637		else
3638			printf("nobus:");
3639
3640		if (path->target != NULL)
3641			printf("%d:", path->target->target_id);
3642		else
3643			printf("X:");
3644
3645		if (path->device != NULL)
3646			printf("%d): ", path->device->lun_id);
3647		else
3648			printf("X): ");
3649	}
3650}
3651
3652path_id_t
3653xpt_path_path_id(struct cam_path *path)
3654{
3655	return(path->bus->path_id);
3656}
3657
3658target_id_t
3659xpt_path_target_id(struct cam_path *path)
3660{
3661	if (path->target != NULL)
3662		return (path->target->target_id);
3663	else
3664		return (CAM_TARGET_WILDCARD);
3665}
3666
3667lun_id_t
3668xpt_path_lun_id(struct cam_path *path)
3669{
3670	if (path->device != NULL)
3671		return (path->device->lun_id);
3672	else
3673		return (CAM_LUN_WILDCARD);
3674}
3675
3676struct cam_sim *
3677xpt_path_sim(struct cam_path *path)
3678{
3679	return (path->bus->sim);
3680}
3681
3682struct cam_periph*
3683xpt_path_periph(struct cam_path *path)
3684{
3685	return (path->periph);
3686}
3687
3688/*
3689 * Release a CAM control block for the caller.  Remit the cost of the structure
3690 * to the device referenced by the path.  If the this device had no 'credits'
3691 * and peripheral drivers have registered async callbacks for this notification
3692 * call them now.
3693 */
3694void
3695xpt_release_ccb(union ccb *free_ccb)
3696{
3697	int	 s;
3698	struct	 cam_path *path;
3699	struct	 cam_ed *device;
3700	struct	 cam_eb *bus;
3701
3702	CAM_DEBUG_PRINT(CAM_DEBUG_TRACE, ("xpt_release_ccb\n"));
3703	path = free_ccb->ccb_h.path;
3704	device = path->device;
3705	bus = path->bus;
3706	s = splsoftcam();
3707	cam_ccbq_release_opening(&device->ccbq);
3708	if (xpt_ccb_count > xpt_max_ccbs) {
3709		xpt_free_ccb(free_ccb);
3710		xpt_ccb_count--;
3711	} else {
3712		SLIST_INSERT_HEAD(&ccb_freeq, &free_ccb->ccb_h, xpt_links.sle);
3713	}
3714	bus->sim->devq->alloc_openings++;
3715	bus->sim->devq->alloc_active--;
3716	/* XXX Turn this into an inline function - xpt_run_device?? */
3717	if ((device_is_alloc_queued(device) == 0)
3718	 && (device->drvq.entries > 0)) {
3719		xpt_schedule_dev_allocq(bus, device);
3720	}
3721	splx(s);
3722	if (dev_allocq_is_runnable(bus->sim->devq))
3723		xpt_run_dev_allocq(bus);
3724}
3725
3726/* Functions accessed by SIM drivers */
3727
3728/*
3729 * A sim structure, listing the SIM entry points and instance
3730 * identification info is passed to xpt_bus_register to hook the SIM
3731 * into the CAM framework.  xpt_bus_register creates a cam_eb entry
3732 * for this new bus and places it in the array of busses and assigns
3733 * it a path_id.  The path_id may be influenced by "hard wiring"
3734 * information specified by the user.  Once interrupt services are
3735 * availible, the bus will be probed.
3736 */
3737int32_t
3738xpt_bus_register(struct cam_sim *sim, u_int32_t bus)
3739{
3740	static path_id_t buscount;
3741	struct cam_eb *new_bus;
3742	struct ccb_pathinq cpi;
3743	int s;
3744
3745	sim->bus_id = bus;
3746	new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
3747					  M_DEVBUF, M_NOWAIT);
3748	if (new_bus == NULL) {
3749		/* Couldn't satisfy request */
3750		return (CAM_RESRC_UNAVAIL);
3751	}
3752
3753	bzero(new_bus, sizeof(*new_bus));
3754
3755	if (strcmp(sim->sim_name, "xpt") != 0) {
3756
3757		sim->path_id = xptpathid(sim->sim_name, sim->unit_number,
3758					 sim->bus_id, &buscount);
3759	}
3760
3761	new_bus->path_id = sim->path_id;
3762	new_bus->sim = sim;
3763	SLIST_INIT(&new_bus->asyncs);
3764	TAILQ_INIT(&new_bus->et_entries);
3765	s = splsoftcam();
3766	TAILQ_INSERT_TAIL(&xpt_busses, new_bus, links);
3767	bus_generation++;
3768
3769	/* Notify interested parties */
3770	if (sim->path_id != CAM_XPT_PATH_ID) {
3771		struct cam_path path;
3772
3773		xpt_compile_path(&path, /*periph*/NULL, sim->path_id,
3774			         CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
3775		xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
3776		cpi.ccb_h.func_code = XPT_PATH_INQ;
3777		xpt_action((union ccb *)&cpi);
3778		xpt_async(AC_PATH_REGISTERED, xpt_periph->path, &cpi);
3779		xpt_release_path(&path);
3780	}
3781	splx(s);
3782	return (CAM_SUCCESS);
3783}
3784
3785static int
3786xptnextfreebus(path_id_t startbus)
3787{
3788	struct cam_sim_config *sim_conf;
3789
3790	sim_conf = cam_sinit;
3791	while (sim_conf->sim_name != NULL) {
3792
3793		if (IS_SPECIFIED(sim_conf->pathid)
3794		 && (startbus == sim_conf->pathid)) {
3795			++startbus;
3796			/* Start the search over */
3797			sim_conf = cam_sinit;
3798		} else {
3799			sim_conf++;
3800		}
3801	}
3802	return (startbus);
3803}
3804
3805static int
3806xptpathid(const char *sim_name, int sim_unit,
3807	  int sim_bus, path_id_t *nextpath)
3808{
3809	struct cam_sim_config *sim_conf;
3810	path_id_t pathid;
3811
3812	pathid = CAM_XPT_PATH_ID;
3813	for (sim_conf = cam_sinit; sim_conf->sim_name != NULL; sim_conf++) {
3814
3815		if (!IS_SPECIFIED(sim_conf->pathid))
3816			continue;
3817
3818		if (!strcmp(sim_name, sim_conf->sim_name)
3819		 && (sim_unit == sim_conf->sim_unit)) {
3820
3821			if (IS_SPECIFIED(sim_conf->sim_bus)) {
3822				if (sim_bus == sim_conf->sim_bus) {
3823					pathid = sim_conf->pathid;
3824					break;
3825				}
3826			} else if (sim_bus == 0) {
3827				/* Unspecified matches bus 0 */
3828				pathid = sim_conf->pathid;
3829				break;
3830			} else {
3831				printf("Ambiguous scbus configuration for %s%d "
3832				       "bus %d, cannot wire down.  The kernel "
3833				       "config entry for scbus%d should "
3834				       "specify a controller bus.\n"
3835				       "Scbus will be assigned dynamically.\n",
3836				       sim_name, sim_unit, sim_bus,
3837				       sim_conf->pathid);
3838                             break;
3839			}
3840		}
3841	}
3842
3843	if (pathid == CAM_XPT_PATH_ID) {
3844		pathid = xptnextfreebus(*nextpath);
3845		*nextpath = pathid + 1;
3846	}
3847	return (pathid);
3848}
3849
3850int32_t
3851xpt_bus_deregister(path_id)
3852	u_int8_t path_id;
3853{
3854	/* XXX */
3855	return (CAM_SUCCESS);
3856}
3857
3858void
3859xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
3860{
3861	struct cam_eb *bus;
3862	struct cam_et *target, *next_target;
3863	struct cam_ed *device, *next_device;
3864	int s;
3865
3866	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_async\n"));
3867
3868	s = splsoftcam();
3869
3870	bus = path->bus;
3871
3872	/*
3873	 * Freeze the SIM queue for SCSI_DELAY ms to
3874	 * allow the bus to settle.
3875	 */
3876	if (async_code == AC_BUS_RESET) {
3877		struct cam_sim *sim;
3878
3879		sim = bus->sim;
3880
3881		/*
3882		 * If there isn't already another timeout pending, go ahead
3883		 * and freeze the simq and set the timeout flag.  If there
3884		 * is another timeout pending, replace it with this
3885		 * timeout.  There could be two bus reset async broadcasts
3886		 * sent for some dual-channel controllers.
3887		 */
3888		if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) == 0) {
3889			xpt_freeze_simq(sim, 1);
3890			sim->flags |= CAM_SIM_REL_TIMEOUT_PENDING;
3891		} else
3892			untimeout(xpt_release_simq_timeout, sim, sim->c_handle);
3893
3894		sim->c_handle = timeout(xpt_release_simq_timeout,
3895					sim, (SCSI_DELAY * hz) / 1000);
3896	}
3897
3898	for (target = TAILQ_FIRST(&bus->et_entries);
3899	     target != NULL;
3900	     target = next_target) {
3901
3902		next_target = TAILQ_NEXT(target, links);
3903
3904		if (path->target != target
3905		 && path->target != NULL)
3906			continue;
3907
3908		for (device = TAILQ_FIRST(&target->ed_entries);
3909		     device != NULL;
3910		     device = next_device) {
3911			cam_status status;
3912			struct cam_path newpath;
3913
3914			next_device = TAILQ_NEXT(device, links);
3915
3916			if (path->device != device
3917			 && path->device != NULL)
3918				continue;
3919
3920			/*
3921			 * We need our own path with wildcards expanded to
3922			 * handle certain types of events.
3923			 */
3924			if ((async_code == AC_SENT_BDR)
3925			 || (async_code == AC_BUS_RESET)
3926			 || (async_code == AC_INQ_CHANGED))
3927				status = xpt_compile_path(&newpath, NULL,
3928							  bus->path_id,
3929							  target->target_id,
3930							  device->lun_id);
3931			else
3932				status = CAM_REQ_CMP_ERR;
3933
3934			if (status == CAM_REQ_CMP) {
3935
3936				/*
3937				 * Allow transfer negotiation to occur in a
3938				 * tag free environment.
3939				 */
3940				if (async_code == AC_SENT_BDR
3941				  || async_code == AC_BUS_RESET)
3942					xpt_toggle_tags(&newpath);
3943
3944				/*
3945				 * If we send a BDR, freeze the device queue
3946				 * for SCSI_DELAY ms to allow it to settle
3947				 * down.
3948				 */
3949				if (async_code == AC_SENT_BDR) {
3950					xpt_freeze_devq(&newpath, 1);
3951					/*
3952					 * Although this looks bad, it
3953					 * isn't as bad as it seems.  We're
3954					 * passing in a stack-allocated path
3955					 * that we then immediately release
3956					 * after scheduling a timeout to
3957					 * release the device queue.  So
3958					 * the path won't be around when
3959					 * the timeout fires, right?  Right.
3960					 * But it doesn't matter, since
3961					 * xpt_release_devq and its timeout
3962					 * function both take the device as
3963					 * an argument.  Theoretically, the
3964					 * device will still be there when
3965					 * the timeout fires, even though
3966					 * the path will be gone.
3967					 */
3968					cam_release_devq(
3969						   &newpath,
3970						   /*relsim_flags*/
3971						   RELSIM_RELEASE_AFTER_TIMEOUT,
3972						   /*reduction*/0,
3973						   /*timeout*/SCSI_DELAY,
3974						   /*getcount_only*/0);
3975				} else if (async_code == AC_INQ_CHANGED) {
3976					/*
3977					 * We've sent a start unit command, or
3978					 * something similar to a device that
3979					 * may have caused its inquiry data to
3980					 * change. So we re-scan the device to
3981					 * refresh the inquiry data for it.
3982					 */
3983					xpt_scan_lun(newpath.periph, &newpath,
3984						     CAM_EXPECT_INQ_CHANGE,
3985						     NULL);
3986				}
3987				xpt_release_path(&newpath);
3988			} else if (async_code == AC_LOST_DEVICE) {
3989				device->flags |= CAM_DEV_UNCONFIGURED;
3990			} else if (async_code == AC_TRANSFER_NEG) {
3991				struct ccb_trans_settings *settings;
3992
3993				settings =
3994				    (struct ccb_trans_settings *)async_arg;
3995				xpt_set_transfer_settings(settings, device,
3996							  /*async_update*/TRUE);
3997			}
3998
3999
4000			xpt_async_bcast(&device->asyncs,
4001					async_code,
4002					path,
4003					async_arg);
4004		}
4005	}
4006	xpt_async_bcast(&bus->asyncs, async_code,
4007			path, async_arg);
4008	splx(s);
4009}
4010
4011static void
4012xpt_async_bcast(struct async_list *async_head,
4013		u_int32_t async_code,
4014		struct cam_path *path, void *async_arg)
4015{
4016	struct async_node *cur_entry;
4017
4018	cur_entry = SLIST_FIRST(async_head);
4019	while (cur_entry != NULL) {
4020		struct async_node *next_entry;
4021		/*
4022		 * Grab the next list entry before we call the current
4023		 * entry's callback.  This is because the callback function
4024		 * can delete its async callback entry.
4025		 */
4026		next_entry = SLIST_NEXT(cur_entry, links);
4027		if ((cur_entry->event_enable & async_code) != 0)
4028			cur_entry->callback(cur_entry->callback_arg,
4029					    async_code, path,
4030					    async_arg);
4031		cur_entry = next_entry;
4032	}
4033}
4034
4035u_int32_t
4036xpt_freeze_devq(struct cam_path *path, u_int count)
4037{
4038	int s;
4039	struct ccb_hdr *ccbh;
4040
4041	s = splcam();
4042	path->device->qfrozen_cnt += count;
4043
4044	/*
4045	 * Mark the last CCB in the queue as needing
4046	 * to be requeued if the driver hasn't
4047	 * changed it's state yet.  This fixes a race
4048	 * where a ccb is just about to be queued to
4049	 * a controller driver when it's interrupt routine
4050	 * freezes the queue.  To completly close the
4051	 * hole, controller drives must check to see
4052	 * if a ccb's status is still CAM_REQ_INPROG
4053	 * under spl protection just before they queue
4054	 * the CCB.  See ahc_action/ahc_freeze_devq for
4055	 * an example.
4056	 */
4057	ccbh = TAILQ_LAST(&path->device->ccbq.active_ccbs, ccb_hdr_list);
4058	if (ccbh && ccbh->status == CAM_REQ_INPROG)
4059		ccbh->status = CAM_REQUEUE_REQ;
4060	splx(s);
4061	return (path->device->qfrozen_cnt);
4062}
4063
4064u_int32_t
4065xpt_freeze_simq(struct cam_sim *sim, u_int count)
4066{
4067	sim->devq->send_queue.qfrozen_cnt += count;
4068	if (sim->devq->active_dev != NULL) {
4069		struct ccb_hdr *ccbh;
4070
4071		ccbh = TAILQ_LAST(&sim->devq->active_dev->ccbq.active_ccbs,
4072				  ccb_hdr_list);
4073		if (ccbh && ccbh->status == CAM_REQ_INPROG)
4074			ccbh->status = CAM_REQUEUE_REQ;
4075	}
4076	return (sim->devq->send_queue.qfrozen_cnt);
4077}
4078
4079static void
4080xpt_release_devq_timeout(void *arg)
4081{
4082	struct cam_ed *device;
4083
4084	device = (struct cam_ed *)arg;
4085
4086	xpt_release_devq(device, /*run_queue*/TRUE);
4087}
4088
4089void
4090xpt_release_devq(struct cam_ed *dev, int run_queue)
4091{
4092	int	rundevq;
4093	int	s;
4094
4095	rundevq = 0;
4096	s = splcam();
4097	if (dev->qfrozen_cnt > 0) {
4098
4099		dev->qfrozen_cnt--;
4100		if (dev->qfrozen_cnt == 0) {
4101
4102			/*
4103			 * No longer need to wait for a successful
4104			 * command completion.
4105			 */
4106			dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
4107
4108			/*
4109			 * Remove any timeouts that might be scheduled
4110			 * to release this queue.
4111			 */
4112			if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
4113				untimeout(xpt_release_devq_timeout, dev,
4114					  dev->c_handle);
4115				dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
4116			}
4117
4118			/*
4119			 * Now that we are unfrozen schedule the
4120			 * device so any pending transactions are
4121			 * run.
4122			 */
4123			if ((dev->ccbq.queue.entries > 0)
4124			 && (xpt_schedule_dev_sendq(dev->target->bus, dev))
4125			 && (run_queue != 0)) {
4126				rundevq = 1;
4127			}
4128		}
4129	}
4130	splx(s);
4131	if (rundevq != 0)
4132		xpt_run_dev_sendq(dev->target->bus);
4133}
4134
4135void
4136xpt_release_simq(struct cam_sim *sim, int run_queue)
4137{
4138	int	s;
4139	struct	camq *sendq;
4140
4141	sendq = &(sim->devq->send_queue);
4142	s = splcam();
4143	if (sendq->qfrozen_cnt > 0) {
4144
4145		sendq->qfrozen_cnt--;
4146		if (sendq->qfrozen_cnt == 0) {
4147
4148			/*
4149			 * If there is a timeout scheduled to release this
4150			 * sim queue, remove it.  The queue frozen count is
4151			 * already at 0.
4152			 */
4153			if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
4154				untimeout(xpt_release_simq_timeout, sim,
4155					  sim->c_handle);
4156				sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
4157			}
4158
4159			splx(s);
4160
4161			if (run_queue) {
4162				/*
4163				 * Now that we are unfrozen run the send queue.
4164				 */
4165				xpt_run_dev_sendq(xpt_find_bus(sim->path_id));
4166			}
4167		} else
4168			splx(s);
4169	} else
4170		splx(s);
4171}
4172
4173static void
4174xpt_release_simq_timeout(void *arg)
4175{
4176	struct cam_sim *sim;
4177
4178	sim = (struct cam_sim *)arg;
4179	xpt_release_simq(sim, /* run_queue */ TRUE);
4180}
4181
4182void
4183xpt_done(union ccb *done_ccb)
4184{
4185	int s;
4186
4187	s = splcam();
4188
4189	CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n"));
4190	switch (done_ccb->ccb_h.func_code) {
4191	case XPT_SCSI_IO:
4192	case XPT_ENG_EXEC:
4193	case XPT_TARGET_IO:
4194	case XPT_ACCEPT_TARGET_IO:
4195	case XPT_CONT_TARGET_IO:
4196	case XPT_SCAN_BUS:
4197	case XPT_SCAN_LUN:
4198	{
4199		/*
4200		 * Queue up the request for handling by our SWI handler
4201		 * any of the "non-immediate" type of ccbs.
4202		 */
4203		switch (done_ccb->ccb_h.path->periph->type) {
4204		case CAM_PERIPH_BIO:
4205			TAILQ_INSERT_TAIL(&cam_bioq, &done_ccb->ccb_h,
4206					  sim_links.tqe);
4207			done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4208			setsoftcambio();
4209			break;
4210		case CAM_PERIPH_NET:
4211			TAILQ_INSERT_TAIL(&cam_netq, &done_ccb->ccb_h,
4212					  sim_links.tqe);
4213			done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4214			setsoftcamnet();
4215			break;
4216		}
4217		break;
4218	}
4219	default:
4220		break;
4221	}
4222	splx(s);
4223}
4224
4225union ccb *
4226xpt_alloc_ccb()
4227{
4228	union ccb *new_ccb;
4229
4230	new_ccb = malloc(sizeof(*new_ccb), M_DEVBUF, M_WAITOK);
4231	return (new_ccb);
4232}
4233
4234void
4235xpt_free_ccb(union ccb *free_ccb)
4236{
4237	free(free_ccb, M_DEVBUF);
4238}
4239
4240
4241
4242/* Private XPT functions */
4243
4244/*
4245 * Get a CAM control block for the caller. Charge the structure to the device
4246 * referenced by the path.  If the this device has no 'credits' then the
4247 * device already has the maximum number of outstanding operations under way
4248 * and we return NULL. If we don't have sufficient resources to allocate more
4249 * ccbs, we also return NULL.
4250 */
4251static union ccb *
4252xpt_get_ccb(struct cam_ed *device)
4253{
4254	union ccb *new_ccb;
4255	int s;
4256
4257	s = splsoftcam();
4258	if ((new_ccb = (union ccb *)ccb_freeq.slh_first) == NULL) {
4259		new_ccb = malloc(sizeof(*new_ccb), M_DEVBUF, M_NOWAIT);
4260                if (new_ccb == NULL) {
4261			splx(s);
4262			return (NULL);
4263		}
4264		callout_handle_init(&new_ccb->ccb_h.timeout_ch);
4265		SLIST_INSERT_HEAD(&ccb_freeq, &new_ccb->ccb_h,
4266				  xpt_links.sle);
4267		xpt_ccb_count++;
4268	}
4269	cam_ccbq_take_opening(&device->ccbq);
4270	SLIST_REMOVE_HEAD(&ccb_freeq, xpt_links.sle);
4271	splx(s);
4272	return (new_ccb);
4273}
4274
4275
4276static struct cam_et *
4277xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
4278{
4279	struct cam_et *target;
4280
4281	target = (struct cam_et *)malloc(sizeof(*target), M_DEVBUF, M_NOWAIT);
4282	if (target != NULL) {
4283		struct cam_et *cur_target;
4284
4285		target->bus = bus;
4286		target->target_id = target_id;
4287		target->refcount = 1;
4288		TAILQ_INIT(&target->ed_entries);
4289
4290		/* Insertion sort into our bus's target list */
4291		cur_target = TAILQ_FIRST(&bus->et_entries);
4292		while (cur_target != NULL && cur_target->target_id < target_id)
4293			cur_target = TAILQ_NEXT(cur_target, links);
4294
4295		if (cur_target != NULL) {
4296			TAILQ_INSERT_BEFORE(cur_target, target, links);
4297		} else {
4298			TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
4299			bus->generation++;
4300		}
4301	}
4302	return (target);
4303}
4304
4305void
4306xpt_release_target(struct cam_eb *bus, struct cam_et *target)
4307{
4308	if ((--target->refcount == 0)
4309	 && (TAILQ_FIRST(&target->ed_entries) == NULL)) {
4310		TAILQ_REMOVE(&bus->et_entries, target, links);
4311		bus->generation++;
4312		free(target, M_DEVBUF);
4313	}
4314}
4315
4316static struct cam_ed *
4317xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
4318{
4319	struct	 cam_ed *device;
4320	struct 	 cam_devq *devq;
4321	int32_t	 status;
4322	int	 s;
4323
4324	s = splsoftcam();
4325	/* Make space for us in the device queue on our bus */
4326	devq = bus->sim->devq;
4327	status = cam_devq_resize(devq, devq->alloc_queue.array_size + 1);
4328	splx(s);
4329
4330	if (status != CAM_REQ_CMP) {
4331		device = NULL;
4332	} else {
4333		device = (struct cam_ed *)malloc(sizeof(*device),
4334						 M_DEVBUF, M_NOWAIT);
4335	}
4336
4337	if (device != NULL) {
4338		struct cam_ed *cur_device;
4339
4340		bzero(device, sizeof(*device));
4341
4342		SLIST_INIT(&device->asyncs);
4343		SLIST_INIT(&device->periphs);
4344		callout_handle_init(&device->c_handle);
4345		device->refcount = 1;
4346		device->flags |= CAM_DEV_UNCONFIGURED;
4347
4348		cam_init_pinfo(&device->alloc_ccb_entry.pinfo);
4349		device->alloc_ccb_entry.device = device;
4350		cam_init_pinfo(&device->send_ccb_entry.pinfo);
4351		device->send_ccb_entry.device = device;
4352
4353		device->target = target;
4354
4355		device->lun_id = lun_id;
4356
4357		/* Initialize our queues */
4358		if (camq_init(&device->drvq, 0) != 0) {
4359			free(device, M_DEVBUF);
4360			return (NULL);
4361		}
4362
4363		if (cam_ccbq_init(&device->ccbq,
4364				  bus->sim->max_dev_openings) != 0) {
4365			camq_fini(&device->drvq);
4366			free(device, M_DEVBUF);
4367			return (NULL);
4368		}
4369		s = splsoftcam();
4370		/*
4371		 * XXX should be limited by number of CCBs this bus can
4372		 * do.
4373		 */
4374		xpt_max_ccbs += device->ccbq.devq_openings;
4375		/* Insertion sort into our target's device list */
4376		cur_device = TAILQ_FIRST(&target->ed_entries);
4377		while (cur_device != NULL && cur_device->lun_id < lun_id)
4378			cur_device = TAILQ_NEXT(cur_device, links);
4379		if (cur_device != NULL) {
4380			TAILQ_INSERT_BEFORE(cur_device, device, links);
4381		} else {
4382			TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
4383			target->generation++;
4384		}
4385		splx(s);
4386	}
4387	return (device);
4388}
4389
4390static void
4391xpt_release_device(struct cam_eb *bus, struct cam_et *target,
4392		   struct cam_ed *device)
4393{
4394	int s;
4395
4396	if ((--device->refcount == 0)
4397	 && ((device->flags & CAM_DEV_UNCONFIGURED) != 0)) {
4398		struct cam_devq *devq;
4399
4400		s = splsoftcam();
4401		TAILQ_REMOVE(&target->ed_entries, device,links);
4402		target->generation++;
4403		xpt_max_ccbs -= device->ccbq.devq_openings;
4404		free(device, M_DEVBUF);
4405		/* Release our slot in the devq */
4406		devq = bus->sim->devq;
4407		cam_devq_resize(devq, devq->alloc_queue.array_size - 1);
4408		splx(s);
4409	}
4410}
4411
4412static u_int32_t
4413xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
4414{
4415	int	s;
4416	int	diff;
4417	int	result;
4418	struct	cam_ed *dev;
4419
4420	dev = path->device;
4421	s = splsoftcam();
4422
4423	diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings);
4424	result = cam_ccbq_resize(&dev->ccbq, newopenings);
4425	if (result == CAM_REQ_CMP && (diff < 0)) {
4426		dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED;
4427	}
4428	/* Adjust the global limit */
4429	xpt_max_ccbs += diff;
4430	splx(s);
4431	return (result);
4432}
4433
4434static struct cam_eb *
4435xpt_find_bus(path_id_t path_id)
4436{
4437	struct cam_eb *bus;
4438
4439	for (bus = TAILQ_FIRST(&xpt_busses);
4440	     bus != NULL;
4441	     bus = TAILQ_NEXT(bus, links)) {
4442		if (bus->path_id == path_id)
4443			break;
4444	}
4445	return (bus);
4446}
4447
4448static struct cam_et *
4449xpt_find_target(struct cam_eb *bus, target_id_t	target_id)
4450{
4451	struct cam_et *target;
4452
4453	for (target = TAILQ_FIRST(&bus->et_entries);
4454	     target != NULL;
4455	     target = TAILQ_NEXT(target, links)) {
4456		if (target->target_id == target_id) {
4457			target->refcount++;
4458			break;
4459		}
4460	}
4461	return (target);
4462}
4463
4464static struct cam_ed *
4465xpt_find_device(struct cam_et *target, lun_id_t lun_id)
4466{
4467	struct cam_ed *device;
4468
4469	for (device = TAILQ_FIRST(&target->ed_entries);
4470	     device != NULL;
4471	     device = TAILQ_NEXT(device, links)) {
4472		if (device->lun_id == lun_id) {
4473			device->refcount++;
4474			break;
4475		}
4476	}
4477	return (device);
4478}
4479
4480typedef struct {
4481	union	ccb *request_ccb;
4482	struct 	ccb_pathinq *cpi;
4483	int	pending_count;
4484} xpt_scan_bus_info;
4485
4486/*
4487 * To start a scan, request_ccb is an XPT_SCAN_BUS ccb.
4488 * As the scan progresses, xpt_scan_bus is used as the
4489 * callback on completion function.
4490 */
4491static void
4492xpt_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
4493{
4494	CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
4495		  ("xpt_scan_bus\n"));
4496	switch (request_ccb->ccb_h.func_code) {
4497	case XPT_SCAN_BUS:
4498	{
4499		xpt_scan_bus_info *scan_info;
4500		union	ccb *work_ccb;
4501		struct	cam_path *path;
4502		u_int	i;
4503		u_int	max_target;
4504		u_int	initiator_id;
4505
4506		/* Find out the characteristics of the bus */
4507		work_ccb = xpt_alloc_ccb();
4508		xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path,
4509			      request_ccb->ccb_h.pinfo.priority);
4510		work_ccb->ccb_h.func_code = XPT_PATH_INQ;
4511		xpt_action(work_ccb);
4512		if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
4513			request_ccb->ccb_h.status = work_ccb->ccb_h.status;
4514			xpt_free_ccb(work_ccb);
4515			xpt_done(request_ccb);
4516			return;
4517		}
4518
4519		/* Save some state for use while we probe for devices */
4520		scan_info = (xpt_scan_bus_info *)
4521		    malloc(sizeof(xpt_scan_bus_info), M_TEMP, M_WAITOK);
4522		scan_info->request_ccb = request_ccb;
4523		scan_info->cpi = &work_ccb->cpi;
4524
4525		/* Cache on our stack so we can work asynchronously */
4526		max_target = scan_info->cpi->max_target;
4527		initiator_id = scan_info->cpi->initiator_id;
4528
4529		/*
4530		 * Don't count the initiator if the
4531		 * initiator is addressable.
4532		 */
4533		scan_info->pending_count = max_target + 1;
4534		if (initiator_id <= max_target)
4535			scan_info->pending_count--;
4536
4537		for (i = 0; i <= max_target; i++) {
4538			cam_status status;
4539		 	if (i == initiator_id)
4540				continue;
4541
4542			status = xpt_create_path(&path, xpt_periph,
4543						 request_ccb->ccb_h.path_id,
4544						 i, 0);
4545			if (status != CAM_REQ_CMP) {
4546				printf("xpt_scan_bus: xpt_create_path failed"
4547				       " with status %#x, bus scan halted\n",
4548				       status);
4549				break;
4550			}
4551			work_ccb = xpt_alloc_ccb();
4552			xpt_setup_ccb(&work_ccb->ccb_h, path,
4553				      request_ccb->ccb_h.pinfo.priority);
4554			work_ccb->ccb_h.func_code = XPT_SCAN_LUN;
4555			work_ccb->ccb_h.cbfcnp = xpt_scan_bus;
4556			work_ccb->ccb_h.ppriv_ptr0 = scan_info;
4557			work_ccb->crcn.flags = request_ccb->crcn.flags;
4558#if 0
4559			printf("xpt_scan_bus: probing %d:%d:%d\n",
4560				request_ccb->ccb_h.path_id, i, 0);
4561#endif
4562			xpt_action(work_ccb);
4563		}
4564		break;
4565	}
4566	case XPT_SCAN_LUN:
4567	{
4568		xpt_scan_bus_info *scan_info;
4569		path_id_t path_id;
4570		target_id_t target_id;
4571		lun_id_t lun_id;
4572
4573		/* Reuse the same CCB to query if a device was really found */
4574		scan_info = (xpt_scan_bus_info *)request_ccb->ccb_h.ppriv_ptr0;
4575		xpt_setup_ccb(&request_ccb->ccb_h, request_ccb->ccb_h.path,
4576			      request_ccb->ccb_h.pinfo.priority);
4577		request_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
4578
4579		path_id = request_ccb->ccb_h.path_id;
4580		target_id = request_ccb->ccb_h.target_id;
4581		lun_id = request_ccb->ccb_h.target_lun;
4582		xpt_action(request_ccb);
4583
4584#if 0
4585		printf("xpt_scan_bus: got back probe from %d:%d:%d\n",
4586			path_id, target_id, lun_id);
4587#endif
4588
4589		if (request_ccb->ccb_h.status != CAM_REQ_CMP) {
4590			struct cam_ed *device;
4591			struct cam_et *target;
4592
4593			/*
4594			 * If we already probed lun 0 successfully, or
4595			 * we have additional configured luns on this
4596			 * target that might have "gone away", go onto
4597			 * the next lun.
4598			 */
4599			target = request_ccb->ccb_h.path->target;
4600			device = TAILQ_FIRST(&target->ed_entries);
4601			if (device != NULL)
4602				device = TAILQ_NEXT(device, links);
4603
4604			if ((lun_id != 0) || (device != NULL)) {
4605				/* Try the next lun */
4606				lun_id++;
4607			}
4608		} else {
4609			struct cam_ed *device;
4610
4611			device = request_ccb->ccb_h.path->device;
4612
4613			if ((device->quirk->quirks & CAM_QUIRK_NOLUNS) == 0) {
4614				/* Try the next lun */
4615				lun_id++;
4616			}
4617		}
4618
4619		xpt_free_path(request_ccb->ccb_h.path);
4620
4621		/* Check Bounds */
4622		if ((lun_id == request_ccb->ccb_h.target_lun)
4623		 || lun_id > scan_info->cpi->max_lun) {
4624			/* We're done */
4625
4626			xpt_free_ccb(request_ccb);
4627			scan_info->pending_count--;
4628			if (scan_info->pending_count == 0) {
4629				xpt_free_ccb((union ccb *)scan_info->cpi);
4630				request_ccb = scan_info->request_ccb;
4631				free(scan_info, M_TEMP);
4632				request_ccb->ccb_h.status = CAM_REQ_CMP;
4633				xpt_done(request_ccb);
4634			}
4635		} else {
4636			/* Try the next device */
4637			struct cam_path *path;
4638			cam_status status;
4639
4640			path = request_ccb->ccb_h.path;
4641			status = xpt_create_path(&path, xpt_periph,
4642						 path_id, target_id, lun_id);
4643			if (status != CAM_REQ_CMP) {
4644				printf("xpt_scan_bus: xpt_create_path failed "
4645				       "with status %#x, halting LUN scan\n",
4646			 	       status);
4647				xpt_free_ccb(request_ccb);
4648				scan_info->pending_count--;
4649				if (scan_info->pending_count == 0) {
4650					xpt_free_ccb(
4651						(union ccb *)scan_info->cpi);
4652					request_ccb = scan_info->request_ccb;
4653					free(scan_info, M_TEMP);
4654					request_ccb->ccb_h.status = CAM_REQ_CMP;
4655					xpt_done(request_ccb);
4656					break;
4657				}
4658			}
4659			xpt_setup_ccb(&request_ccb->ccb_h, path,
4660				      request_ccb->ccb_h.pinfo.priority);
4661			request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
4662			request_ccb->ccb_h.cbfcnp = xpt_scan_bus;
4663			request_ccb->ccb_h.ppriv_ptr0 = scan_info;
4664			request_ccb->crcn.flags =
4665				scan_info->request_ccb->crcn.flags;
4666#if 0
4667			xpt_print_path(path);
4668			printf("xpt_scan bus probing\n");
4669#endif
4670			xpt_action(request_ccb);
4671		}
4672		break;
4673	}
4674	default:
4675		break;
4676	}
4677}
4678
4679typedef enum {
4680	PROBE_TUR,
4681	PROBE_INQUIRY,
4682	PROBE_MODE_SENSE,
4683	PROBE_SERIAL_NUM,
4684	PROBE_TUR_FOR_NEGOTIATION
4685} probe_action;
4686
4687typedef enum {
4688	PROBE_INQUIRY_CKSUM	= 0x01,
4689	PROBE_SERIAL_CKSUM	= 0x02,
4690	PROBE_NO_ANNOUNCE	= 0x04
4691} probe_flags;
4692
4693typedef struct {
4694	TAILQ_HEAD(, ccb_hdr) request_ccbs;
4695	probe_action	action;
4696	union ccb	saved_ccb;
4697	probe_flags	flags;
4698	MD5_CTX		context;
4699	u_int8_t	digest[16];
4700} probe_softc;
4701
4702static void
4703xpt_scan_lun(struct cam_periph *periph, struct cam_path *path,
4704	     cam_flags flags, union ccb *request_ccb)
4705{
4706	u_int32_t unit;
4707	cam_status status;
4708	struct cam_path *new_path;
4709	struct cam_periph *old_periph;
4710	int s;
4711
4712	CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
4713		  ("xpt_scan_lun\n"));
4714
4715	if (request_ccb == NULL) {
4716		request_ccb = malloc(sizeof(union ccb), M_TEMP, M_NOWAIT);
4717		if (request_ccb == NULL) {
4718			xpt_print_path(path);
4719			printf("xpt_scan_lun: can't allocate CCB, can't "
4720			       "continue\n");
4721			return;
4722		}
4723		new_path = malloc(sizeof(*new_path), M_TEMP, M_NOWAIT);
4724		if (new_path == NULL) {
4725			xpt_print_path(path);
4726			printf("xpt_scan_lun: can't allocate path, can't "
4727			       "continue\n");
4728			free(request_ccb, M_TEMP);
4729			return;
4730		}
4731		status = xpt_compile_path(new_path, periph, path->bus->path_id,
4732					  path->target->target_id,
4733					  path->device->lun_id);
4734
4735		if (status != CAM_REQ_CMP) {
4736			xpt_print_path(path);
4737			printf("xpt_scan_lun: can't compile path, can't "
4738			       "continue\n");
4739			free(request_ccb, M_TEMP);
4740			free(new_path, M_TEMP);
4741			return;
4742		}
4743		xpt_setup_ccb(&request_ccb->ccb_h, new_path, /*priority*/ 1);
4744		request_ccb->ccb_h.cbfcnp = xptscandone;
4745		request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
4746		request_ccb->crcn.flags = flags;
4747	}
4748
4749	s = splsoftcam();
4750	if ((old_periph = cam_periph_find(path, "probe")) != NULL) {
4751		probe_softc *softc;
4752
4753		softc = (probe_softc *)old_periph->softc;
4754		TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
4755				  periph_links.tqe);
4756	} else {
4757		status = cam_periph_alloc(proberegister, probecleanup,
4758					  probestart, "probe",
4759					  CAM_PERIPH_BIO,
4760					  request_ccb->ccb_h.path, NULL, 0,
4761					  request_ccb);
4762
4763		if (status != CAM_REQ_CMP) {
4764			xpt_print_path(path);
4765			printf("xpt_scan_lun: cam_alloc_periph returned an "
4766			       "error, can't continue probe\n");
4767			request_ccb->ccb_h.status = status;
4768			xpt_done(request_ccb);
4769		}
4770	}
4771	splx(s);
4772}
4773
4774static void
4775xptscandone(struct cam_periph *periph, union ccb *done_ccb)
4776{
4777	xpt_release_path(done_ccb->ccb_h.path);
4778	free(done_ccb->ccb_h.path, M_TEMP);
4779	free(done_ccb, M_TEMP);
4780}
4781
4782static cam_status
4783proberegister(struct cam_periph *periph, void *arg)
4784{
4785	struct ccb_getdev *cgd;
4786	probe_softc *softc;
4787	union ccb *ccb;
4788
4789	cgd = (struct ccb_getdev *)arg;
4790	if (periph == NULL) {
4791		printf("proberegister: periph was NULL!!\n");
4792		return(CAM_REQ_CMP_ERR);
4793	}
4794
4795	if (cgd == NULL) {
4796		printf("proberegister: no getdev CCB, can't register device\n");
4797		return(CAM_REQ_CMP_ERR);
4798	}
4799
4800	softc = (probe_softc *)malloc(sizeof(*softc), M_TEMP, M_NOWAIT);
4801
4802	if (softc == NULL) {
4803		printf("proberegister: Unable to probe new device. "
4804		       "Unable to allocate softc\n");
4805		return(CAM_REQ_CMP_ERR);
4806	}
4807	ccb = (union ccb *)cgd;
4808	TAILQ_INIT(&softc->request_ccbs);
4809	TAILQ_INSERT_TAIL(&softc->request_ccbs, &ccb->ccb_h, periph_links.tqe);
4810	softc->flags = 0;
4811	periph->softc = softc;
4812	cam_periph_acquire(periph);
4813	probeschedule(periph);
4814	return(CAM_REQ_CMP);
4815}
4816
4817static void
4818probeschedule(struct cam_periph *periph)
4819{
4820	union ccb *ccb;
4821	probe_softc *softc;
4822
4823	softc = (probe_softc *)periph->softc;
4824	ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
4825
4826	/*
4827	 * If a device has gone away and another device, or the same one,
4828	 * is back in the same place, it should have a unit attention
4829	 * condition pending.  It will not report the unit attention in
4830	 * response to an inquiry, which may leave invalid transfer
4831	 * negotiations in effect.  The TUR will reveal the unit attention
4832	 * condition.  Only send the TUR for lun 0, since some devices
4833	 * will get confused by commands other than inquiry to non-existent
4834	 * luns.  If you think a device has gone away start your scan from
4835	 * lun 0.  This will insure that any bogus transfer settings are
4836	 * invalidated.
4837	 */
4838	if (((ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED)==0)
4839	 && (ccb->ccb_h.target_lun == 0))
4840		softc->action = PROBE_TUR;
4841	else
4842		softc->action = PROBE_INQUIRY;
4843
4844	if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE)
4845		softc->flags |= PROBE_NO_ANNOUNCE;
4846	else
4847		softc->flags &= ~PROBE_NO_ANNOUNCE;
4848
4849	xpt_schedule(periph, ccb->ccb_h.pinfo.priority);
4850}
4851
4852static void
4853probestart(struct cam_periph *periph, union ccb *start_ccb)
4854{
4855	/* Probe the device that our peripheral driver points to */
4856	struct ccb_scsiio *csio;
4857	probe_softc *softc;
4858
4859	CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n"));
4860
4861	softc = (probe_softc *)periph->softc;
4862	csio = &start_ccb->csio;
4863
4864	switch (softc->action) {
4865	case PROBE_TUR:
4866	case PROBE_TUR_FOR_NEGOTIATION:
4867	{
4868		scsi_test_unit_ready(csio,
4869				     /*retries*/4,
4870				     probedone,
4871				     MSG_SIMPLE_Q_TAG,
4872				     SSD_FULL_SIZE,
4873				     /*timeout*/60000);
4874		break;
4875	}
4876	case PROBE_INQUIRY:
4877	{
4878		struct scsi_inquiry_data *inq_buf;
4879
4880		inq_buf = &periph->path->device->inq_data;
4881		/*
4882		 * If the device is currently configured, we calculate an
4883		 * MD5 checksum of the inquiry data, and if the serial number
4884		 * length is greater than 0, add the serial number data
4885		 * into the checksum as well.  Once the inquiry and the
4886		 * serial number check finish, we attempt to figure out
4887		 * whether we still have the same device.
4888		 */
4889		if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) {
4890
4891			MD5Init(&softc->context);
4892			MD5Update(&softc->context, (unsigned char *)inq_buf,
4893				  sizeof(struct scsi_inquiry_data));
4894			softc->flags |= PROBE_INQUIRY_CKSUM;
4895			if (periph->path->device->serial_num_len > 0) {
4896				MD5Update(&softc->context,
4897					  periph->path->device->serial_num,
4898					  periph->path->device->serial_num_len);
4899				softc->flags |= PROBE_SERIAL_CKSUM;
4900			}
4901			MD5Final(softc->digest, &softc->context);
4902		}
4903
4904		scsi_inquiry(csio,
4905			     /*retries*/4,
4906			     probedone,
4907			     MSG_SIMPLE_Q_TAG,
4908			     (u_int8_t *)inq_buf,
4909			     sizeof(*inq_buf),
4910			     /*evpd*/FALSE,
4911			     /*page_code*/0,
4912			     SSD_MIN_SIZE,
4913			     /*timeout*/60 * 1000);
4914		break;
4915	}
4916	case PROBE_MODE_SENSE:
4917	{
4918		void  *mode_buf;
4919		int    mode_buf_len;
4920
4921		mode_buf_len = sizeof(struct scsi_mode_header_6)
4922			     + sizeof(struct scsi_mode_blk_desc)
4923			     + sizeof(struct scsi_control_page);
4924		mode_buf = malloc(mode_buf_len, M_TEMP, M_NOWAIT);
4925		if (mode_buf != NULL) {
4926	                scsi_mode_sense(csio,
4927					/*retries*/4,
4928					probedone,
4929					MSG_SIMPLE_Q_TAG,
4930					/*dbd*/FALSE,
4931					SMS_PAGE_CTRL_CURRENT,
4932					SMS_CONTROL_MODE_PAGE,
4933					mode_buf,
4934					mode_buf_len,
4935					SSD_FULL_SIZE,
4936					/*timeout*/60000);
4937			break;
4938		}
4939		xpt_print_path(periph->path);
4940		printf("Unable to mode sense control page - malloc failure\n");
4941		softc->action = PROBE_SERIAL_NUM;
4942		/* FALLTHROUGH */
4943	}
4944	case PROBE_SERIAL_NUM:
4945	{
4946		struct scsi_vpd_unit_serial_number *serial_buf;
4947		struct cam_ed* device;
4948
4949		serial_buf = NULL;
4950		device = periph->path->device;
4951		device->serial_num = NULL;
4952		device->serial_num_len = 0;
4953
4954		if ((device->quirk->quirks & CAM_QUIRK_NOSERIAL) == 0)
4955			serial_buf = (struct scsi_vpd_unit_serial_number *)
4956				malloc(sizeof(*serial_buf), M_TEMP, M_NOWAIT);
4957
4958		if (serial_buf != NULL) {
4959			bzero(serial_buf, sizeof(*serial_buf));
4960			scsi_inquiry(csio,
4961				     /*retries*/4,
4962				     probedone,
4963				     MSG_SIMPLE_Q_TAG,
4964				     (u_int8_t *)serial_buf,
4965				     sizeof(*serial_buf),
4966				     /*evpd*/TRUE,
4967				     SVPD_UNIT_SERIAL_NUMBER,
4968				     SSD_MIN_SIZE,
4969				     /*timeout*/60 * 1000);
4970			break;
4971		}
4972		/*
4973		 * We'll have to do without, let our probedone
4974		 * routine finish up for us.
4975		 */
4976		start_ccb->csio.data_ptr = NULL;
4977		probedone(periph, start_ccb);
4978		return;
4979	}
4980	}
4981	xpt_action(start_ccb);
4982}
4983
4984static void
4985probedone(struct cam_periph *periph, union ccb *done_ccb)
4986{
4987	probe_softc *softc;
4988	struct cam_path *path;
4989	u_int32_t  priority;
4990
4991	CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n"));
4992
4993	softc = (probe_softc *)periph->softc;
4994	path = done_ccb->ccb_h.path;
4995	priority = done_ccb->ccb_h.pinfo.priority;
4996
4997	switch (softc->action) {
4998	case PROBE_TUR:
4999	{
5000		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
5001
5002			if (cam_periph_error(done_ccb, 0,
5003					     SF_NO_PRINT, NULL) == ERESTART)
5004				return;
5005			else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
5006				/* Don't wedge the queue */
5007				xpt_release_devq(done_ccb->ccb_h.path->device,
5008						 /*run_queue*/TRUE);
5009		}
5010		softc->action = PROBE_INQUIRY;
5011		xpt_release_ccb(done_ccb);
5012		xpt_schedule(periph, priority);
5013		return;
5014	}
5015	case PROBE_INQUIRY:
5016	{
5017		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5018			struct scsi_inquiry_data *inq_buf;
5019			u_int8_t periph_qual;
5020			u_int8_t periph_dtype;
5021
5022			inq_buf = &path->device->inq_data;
5023
5024			periph_qual = SID_QUAL(inq_buf);
5025			periph_dtype = SID_TYPE(inq_buf);
5026			if (periph_dtype != T_NODEVICE) {
5027				switch(periph_qual) {
5028				case SID_QUAL_LU_CONNECTED:
5029				{
5030					xpt_find_quirk(path->device);
5031
5032					if ((inq_buf->flags & SID_CmdQue) != 0)
5033						softc->action =
5034						    PROBE_MODE_SENSE;
5035					else
5036						softc->action =
5037						    PROBE_SERIAL_NUM;
5038
5039					path->device->flags &=
5040						~CAM_DEV_UNCONFIGURED;
5041
5042					xpt_release_ccb(done_ccb);
5043					xpt_schedule(periph, priority);
5044					return;
5045				}
5046				default:
5047					break;
5048				}
5049			}
5050		} else if (cam_periph_error(done_ccb, 0,
5051					    done_ccb->ccb_h.target_lun > 0
5052					    ? SF_RETRY_UA|SF_QUIET_IR
5053					    : SF_RETRY_UA,
5054					    &softc->saved_ccb) == ERESTART) {
5055			return;
5056		} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5057			/* Don't wedge the queue */
5058			xpt_release_devq(done_ccb->ccb_h.path->device,
5059					 /*run_queue*/TRUE);
5060		}
5061		/*
5062		 * If we get to this point, we got an error status back
5063		 * from the inquiry and the error status doesn't require
5064		 * automatically retrying the command.  Therefore, the
5065		 * inquiry failed.  If we had inquiry information before
5066		 * for this device, but this latest inquiry command failed,
5067		 * the device has probably gone away.  If this device isn't
5068		 * already marked unconfigured, notify the peripheral
5069		 * drivers that this device is no more.
5070		 */
5071		if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
5072			/* Send the async notification. */
5073			xpt_async(AC_LOST_DEVICE, path, NULL);
5074
5075		xpt_release_ccb(done_ccb);
5076		break;
5077	}
5078	case PROBE_MODE_SENSE:
5079	{
5080		struct ccb_scsiio *csio;
5081		struct scsi_mode_header_6 *mode_hdr;
5082
5083		csio = &done_ccb->csio;
5084		mode_hdr = (struct scsi_mode_header_6 *)csio->data_ptr;
5085		if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5086			struct scsi_control_page *page;
5087			u_int8_t *offset;
5088
5089			offset = ((u_int8_t *)&mode_hdr[1])
5090			    + mode_hdr->blk_desc_len;
5091			page = (struct scsi_control_page *)offset;
5092			path->device->queue_flags = page->queue_flags;
5093		} else if (cam_periph_error(done_ccb, 0,
5094					    SF_RETRY_UA|SF_NO_PRINT,
5095					    &softc->saved_ccb) == ERESTART) {
5096			return;
5097		} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5098			/* Don't wedge the queue */
5099			xpt_release_devq(done_ccb->ccb_h.path->device,
5100					 /*run_queue*/TRUE);
5101		}
5102		xpt_release_ccb(done_ccb);
5103		free(mode_hdr, M_TEMP);
5104		softc->action = PROBE_SERIAL_NUM;
5105		xpt_schedule(periph, priority);
5106		return;
5107	}
5108	case PROBE_SERIAL_NUM:
5109	{
5110		struct ccb_scsiio *csio;
5111		struct scsi_vpd_unit_serial_number *serial_buf;
5112		u_int32_t  priority;
5113		int changed;
5114		int have_serialnum;
5115
5116		changed = 1;
5117		have_serialnum = 0;
5118		csio = &done_ccb->csio;
5119		priority = done_ccb->ccb_h.pinfo.priority;
5120		serial_buf =
5121		    (struct scsi_vpd_unit_serial_number *)csio->data_ptr;
5122
5123		/* Clean up from previous instance of this device */
5124		if (path->device->serial_num != NULL) {
5125			free(path->device->serial_num, M_DEVBUF);
5126			path->device->serial_num = NULL;
5127			path->device->serial_num_len = 0;
5128		}
5129
5130		if (serial_buf == NULL) {
5131			/*
5132			 * Don't process the command as it was never sent
5133			 */
5134		} else if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP
5135		      && (serial_buf->length > 0)) {
5136
5137			have_serialnum = 1;
5138			path->device->serial_num =
5139				(u_int8_t *)malloc((serial_buf->length + 1),
5140						   M_DEVBUF, M_NOWAIT);
5141			if (path->device->serial_num != NULL) {
5142				bcopy(serial_buf->serial_num,
5143				      path->device->serial_num,
5144				      serial_buf->length);
5145				path->device->serial_num_len =
5146				    serial_buf->length;
5147				path->device->serial_num[serial_buf->length]
5148				    = '\0';
5149			}
5150		} else if (cam_periph_error(done_ccb, 0,
5151					    SF_RETRY_UA|SF_NO_PRINT,
5152					    &softc->saved_ccb) == ERESTART) {
5153			return;
5154		} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5155			/* Don't wedge the queue */
5156			xpt_release_devq(done_ccb->ccb_h.path->device,
5157					 /*run_queue*/TRUE);
5158		}
5159
5160		/*
5161		 * Let's see if we have seen this device before.
5162		 */
5163		if ((softc->flags & PROBE_INQUIRY_CKSUM) != 0) {
5164			MD5_CTX context;
5165			u_int8_t digest[16];
5166
5167			MD5Init(&context);
5168
5169			MD5Update(&context,
5170				  (unsigned char *)&path->device->inq_data,
5171				  sizeof(struct scsi_inquiry_data));
5172
5173			if (have_serialnum)
5174				MD5Update(&context, serial_buf->serial_num,
5175					  serial_buf->length);
5176
5177			MD5Final(digest, &context);
5178			if (bcmp(softc->digest, digest, 16) == 0)
5179				changed = 0;
5180
5181			/*
5182			 * XXX Do we need to do a TUR in order to ensure
5183			 *     that the device really hasn't changed???
5184			 */
5185			if ((changed != 0)
5186			 && ((softc->flags & PROBE_NO_ANNOUNCE) == 0))
5187				xpt_async(AC_LOST_DEVICE, path, NULL);
5188		}
5189		if (serial_buf != NULL)
5190			free(serial_buf, M_TEMP);
5191
5192		if (changed != 0) {
5193			/*
5194			 * Now that we have all the necessary
5195			 * information to safely perform transfer
5196			 * negotiations... Controllers don't perform
5197			 * any negotiation or tagged queuing until
5198			 * after the first XPT_SET_TRAN_SETTINGS ccb is
5199			 * received.  So, on a new device, just retreive
5200			 * the user settings, and set them as the current
5201			 * settings to set the device up.
5202			 */
5203			done_ccb->ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
5204			done_ccb->cts.flags = CCB_TRANS_USER_SETTINGS;
5205			xpt_action(done_ccb);
5206			done_ccb->ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
5207			done_ccb->cts.flags &= ~CCB_TRANS_USER_SETTINGS;
5208			done_ccb->cts.flags |= CCB_TRANS_CURRENT_SETTINGS;
5209			xpt_action(done_ccb);
5210			xpt_release_ccb(done_ccb);
5211
5212			/*
5213			 * Perform a TUR to allow the controller to
5214			 * perform any necessary transfer negotiation.
5215			 */
5216			softc->action = PROBE_TUR_FOR_NEGOTIATION;
5217			xpt_schedule(periph, priority);
5218			return;
5219		}
5220		xpt_release_ccb(done_ccb);
5221		break;
5222	}
5223	case PROBE_TUR_FOR_NEGOTIATION:
5224		if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5225			/* Don't wedge the queue */
5226			xpt_release_devq(done_ccb->ccb_h.path->device,
5227					 /*run_queue*/TRUE);
5228		}
5229
5230		path->device->flags &= ~CAM_DEV_UNCONFIGURED;
5231
5232		if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) {
5233			/* Inform the XPT that a new device has been found */
5234			done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
5235			xpt_action(done_ccb);
5236
5237			xpt_async(AC_FOUND_DEVICE, xpt_periph->path, done_ccb);
5238		}
5239		xpt_release_ccb(done_ccb);
5240		break;
5241	}
5242	done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
5243	TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe);
5244	done_ccb->ccb_h.status = CAM_REQ_CMP;
5245	xpt_done(done_ccb);
5246	if (TAILQ_FIRST(&softc->request_ccbs) == NULL) {
5247		cam_periph_invalidate(periph);
5248		cam_periph_release(periph);
5249	} else {
5250		probeschedule(periph);
5251	}
5252}
5253
5254static void
5255probecleanup(struct cam_periph *periph)
5256{
5257	free(periph->softc, M_TEMP);
5258}
5259
5260static void
5261xpt_find_quirk(struct cam_ed *device)
5262{
5263	caddr_t	match;
5264
5265	match = cam_quirkmatch((caddr_t)&device->inq_data,
5266			       (caddr_t)xpt_quirk_table,
5267			       sizeof(xpt_quirk_table)/sizeof(*xpt_quirk_table),
5268			       sizeof(*xpt_quirk_table), scsi_inquiry_match);
5269
5270	if (match == NULL)
5271		panic("xpt_find_quirk: device didn't match wildcard entry!!");
5272
5273	device->quirk = (struct xpt_quirk_entry *)match;
5274}
5275
5276static void
5277xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
5278			  int async_update)
5279{
5280	struct	cam_sim *sim;
5281	int	qfrozen;
5282
5283	sim = cts->ccb_h.path->bus->sim;
5284	if (async_update == FALSE) {
5285		struct	scsi_inquiry_data *inq_data;
5286		struct	ccb_pathinq cpi;
5287
5288		if (device == NULL) {
5289			cts->ccb_h.status = CAM_PATH_INVALID;
5290			xpt_done((union ccb *)cts);
5291			return;
5292		}
5293
5294		/*
5295		 * Perform sanity checking against what the
5296		 * controller and device can do.
5297		 */
5298		xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1);
5299		cpi.ccb_h.func_code = XPT_PATH_INQ;
5300		xpt_action((union ccb *)&cpi);
5301
5302		inq_data = &device->inq_data;
5303		if ((inq_data->flags & SID_Sync) == 0
5304		 || (cpi.hba_inquiry & PI_SDTR_ABLE) == 0) {
5305			/* Force async */
5306			cts->sync_period = 0;
5307			cts->sync_offset = 0;
5308		}
5309
5310		switch (cts->bus_width) {
5311		case MSG_EXT_WDTR_BUS_32_BIT:
5312			if ((inq_data->flags & SID_WBus32) != 0
5313			 && (cpi.hba_inquiry & PI_WIDE_32) != 0)
5314				break;
5315			/* Fall Through to 16-bit */
5316		case MSG_EXT_WDTR_BUS_16_BIT:
5317			if ((inq_data->flags & SID_WBus16) != 0
5318			 && (cpi.hba_inquiry & PI_WIDE_16) != 0) {
5319				cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
5320				break;
5321			}
5322			/* Fall Through to 8-bit */
5323		default: /* New bus width?? */
5324		case MSG_EXT_WDTR_BUS_8_BIT:
5325			/* All targets can do this */
5326			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
5327			break;
5328		}
5329
5330		if ((cts->flags & CCB_TRANS_DISC_ENB) == 0) {
5331			/*
5332			 * Can't tag queue without disconnection.
5333			 */
5334			cts->flags &= ~CCB_TRANS_TAG_ENB;
5335			cts->valid |= CCB_TRANS_TQ_VALID;
5336		}
5337
5338		if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0
5339		 || (inq_data->flags & SID_CmdQue) == 0
5340		 || (device->queue_flags & SCP_QUEUE_DQUE) != 0
5341		 || (device->quirk->mintags == 0)) {
5342			/*
5343			 * Can't tag on hardware that doesn't support,
5344			 * doesn't have it enabled, or has broken tag support.
5345			 */
5346			cts->flags &= ~CCB_TRANS_TAG_ENB;
5347		}
5348	}
5349
5350	qfrozen = FALSE;
5351	if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
5352		int device_tagenb;
5353
5354		/*
5355		 * If we are transitioning from tags to no-tags or
5356		 * vice-versa, we need to carefully freeze and restart
5357		 * the queue so that we don't overlap tagged and non-tagged
5358		 * commands.  We also temporarily stop tags if there is
5359		 * a change in transfer negotiation settings to allow
5360		 * "tag-less" negotiation.
5361		 */
5362		if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
5363		 || (device->inq_flags & SID_CmdQue) != 0)
5364			device_tagenb = TRUE;
5365		else
5366			device_tagenb = FALSE;
5367
5368		if (((cts->flags & CCB_TRANS_TAG_ENB) != 0
5369		  && device_tagenb == FALSE)
5370		 || ((cts->flags & CCB_TRANS_TAG_ENB) == 0
5371		  && device_tagenb == TRUE)) {
5372
5373			if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
5374				/*
5375				 * Delay change to use tags until after a
5376				 * few commands have gone to this device so
5377				 * the controller has time to perform transfer
5378				 * negotiations without tagged messages getting
5379				 * in the way.
5380				 */
5381				device->tag_delay_count = CAM_TAG_DELAY_COUNT;
5382				device->flags |= CAM_DEV_TAG_AFTER_COUNT;
5383			} else {
5384				xpt_freeze_devq(cts->ccb_h.path, /*count*/1);
5385				qfrozen = TRUE;
5386		  		device->inq_flags &= ~SID_CmdQue;
5387				xpt_dev_ccbq_resize(cts->ccb_h.path,
5388						    sim->max_dev_openings);
5389				device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
5390				device->tag_delay_count = 0;
5391			}
5392		} else if ((cts->flags & (CCB_TRANS_SYNC_RATE_VALID|
5393					  CCB_TRANS_SYNC_OFFSET_VALID|
5394					  CCB_TRANS_BUS_WIDTH_VALID)) != 0) {
5395			xpt_toggle_tags(cts->ccb_h.path);
5396		}
5397	}
5398
5399	if (async_update == FALSE)
5400		(*(sim->sim_action))(sim, (union ccb *)cts);
5401
5402	if (qfrozen) {
5403		struct ccb_relsim crs;
5404
5405		xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path,
5406			      /*priority*/1);
5407		crs.ccb_h.func_code = XPT_REL_SIMQ;
5408		crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
5409		crs.openings
5410		    = crs.release_timeout
5411		    = crs.qfrozen_cnt
5412		    = 0;
5413		xpt_action((union ccb *)&crs);
5414	}
5415}
5416
5417static void
5418xpt_toggle_tags(struct cam_path *path)
5419{
5420	/*
5421	 * Give controllers a chance to renegotiate
5422	 * before starting tag operations.  We
5423	 * "toggle" tagged queuing off then on
5424	 * which causes the tag enable command delay
5425	 * counter to come into effect.
5426	 */
5427	if ((path->device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
5428	 || (path->device->inq_flags & SID_CmdQue) != 0) {
5429		struct ccb_trans_settings cts;
5430
5431		xpt_setup_ccb(&cts.ccb_h, path, 1);
5432		cts.flags = 0;
5433		cts.valid = CCB_TRANS_TQ_VALID;
5434		xpt_set_transfer_settings(&cts, path->device,
5435					  /*async_update*/TRUE);
5436		cts.flags = CCB_TRANS_TAG_ENB;
5437		xpt_set_transfer_settings(&cts, path->device,
5438					  /*async_update*/TRUE);
5439	}
5440}
5441
5442static void
5443xpt_start_tags(struct cam_path *path)
5444{
5445	struct ccb_relsim crs;
5446	struct cam_ed *device;
5447	struct cam_sim *sim;
5448	int    newopenings;
5449
5450	device = path->device;
5451	sim = path->bus->sim;
5452	device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
5453	xpt_freeze_devq(path, /*count*/1);
5454	device->inq_flags |= SID_CmdQue;
5455	newopenings = min(device->quirk->maxtags, sim->max_tagged_dev_openings);
5456	xpt_dev_ccbq_resize(path, newopenings);
5457	xpt_setup_ccb(&crs.ccb_h, path, /*priority*/1);
5458	crs.ccb_h.func_code = XPT_REL_SIMQ;
5459	crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
5460	crs.openings
5461	    = crs.release_timeout
5462	    = crs.qfrozen_cnt
5463	    = 0;
5464	xpt_action((union ccb *)&crs);
5465}
5466
5467static int busses_to_config;
5468
5469static int
5470xptconfigbuscountfunc(struct cam_eb *bus, void *arg)
5471{
5472	if (bus->path_id != CAM_XPT_PATH_ID)
5473		busses_to_config++;
5474
5475	return(1);
5476}
5477
5478static int
5479xptconfigfunc(struct cam_eb *bus, void *arg)
5480{
5481	struct	cam_path *path;
5482	union	ccb *work_ccb;
5483
5484	if (bus->path_id != CAM_XPT_PATH_ID) {
5485		cam_status status;
5486
5487		work_ccb = xpt_alloc_ccb();
5488		if ((status = xpt_create_path(&path, xpt_periph, bus->path_id,
5489					      CAM_TARGET_WILDCARD,
5490					      CAM_LUN_WILDCARD)) !=CAM_REQ_CMP){
5491			printf("xptconfigfunc: xpt_create_path failed with "
5492			       "status %#x for bus %d\n", status, bus->path_id);
5493			printf("xptconfigfunc: halting bus configuration\n");
5494			xpt_free_ccb(work_ccb);
5495			return(0);
5496		}
5497		xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
5498		work_ccb->ccb_h.func_code = XPT_RESET_BUS;
5499		work_ccb->ccb_h.cbfcnp = NULL;
5500		CAM_DEBUG(path, CAM_DEBUG_SUBTRACE,
5501			  ("Resetting Bus\n"));
5502		xpt_action(work_ccb);
5503		xpt_finishconfig(xpt_periph, work_ccb);
5504	}
5505
5506	return(1);
5507
5508}
5509
5510static void
5511xpt_config(void *arg)
5512{
5513	/* Now that interrupts are enabled, go find our devices */
5514	struct cam_eb *bus;
5515
5516#ifdef CAMDEBUG
5517	/* Setup debugging flags and path */
5518#ifdef CAM_DEBUG_FLAGS
5519	cam_dflags = CAM_DEBUG_FLAGS;
5520#else /* !CAM_DEBUG_FLAGS */
5521	cam_dflags = CAM_DEBUG_NONE;
5522#endif /* CAM_DEBUG_FLAGS */
5523#ifdef CAM_DEBUG_BUS
5524	if (cam_dflags != CAM_DEBUG_NONE) {
5525		if (xpt_create_path(&cam_dpath, xpt_periph,
5526				    CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
5527				    CAM_DEBUG_LUN) != CAM_REQ_CMP) {
5528			printf("xpt_config: xpt_create_path() failed for debug"
5529			       " target %d:%d:%d, debugging disabled\n",
5530			       CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
5531			cam_dflags = CAM_DEBUG_NONE;
5532		}
5533	} else
5534		cam_dpath = NULL;
5535#else /* !CAM_DEBUG_BUS */
5536	cam_dpath = NULL;
5537#endif /* CAM_DEBUG_BUS */
5538#endif /* CAMDEBUG */
5539
5540	/* Scan all installed busses */
5541	xpt_for_all_busses(xptconfigbuscountfunc, NULL);
5542
5543	if (busses_to_config == 0) {
5544		/* Call manually because we don't have any busses */
5545		xpt_finishconfig(xpt_periph, NULL);
5546	} else
5547		xpt_for_all_busses(xptconfigfunc, NULL);
5548}
5549
5550static int
5551xptfinishconfigfunc(struct cam_ed *device, void *arg)
5552{
5553	union ccb work_ccb;
5554	struct cam_path path;
5555	cam_status status;
5556
5557	if ((status = xpt_compile_path(&path, xpt_periph,
5558				       device->target->bus->path_id,
5559				       device->target->target_id,
5560				       device->lun_id)) != CAM_REQ_CMP) {
5561		printf("xptfinishconfig: xpt_compile_path failed with status"
5562		       " %#x, halting device registration\n", status);
5563		return(0);
5564	}
5565
5566	xpt_setup_ccb(&work_ccb.ccb_h, &path, /*priority*/1);
5567
5568	work_ccb.ccb_h.func_code = XPT_GDEV_TYPE;
5569	xpt_action(&work_ccb);
5570	xpt_async(AC_FOUND_DEVICE, &path, &work_ccb);
5571
5572	xpt_release_path(&path);
5573	return(1);
5574}
5575
5576/*
5577 * If the given device only has one peripheral attached to it, and if that
5578 * peripheral is the passthrough driver, announce it.  This insures that the
5579 * user sees some sort of announcement for every peripheral in their system.
5580 */
5581static int
5582xptpassannouncefunc(struct cam_ed *device, void *arg)
5583{
5584	struct cam_periph *periph;
5585	int i;
5586
5587	for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
5588	     periph = SLIST_NEXT(periph, periph_links), i++);
5589
5590	periph = SLIST_FIRST(&device->periphs);
5591	if ((i == 1)
5592	 && (strncmp(periph->periph_name, "pass", 4) == 0))
5593		xpt_announce_periph(periph, NULL);
5594
5595	return(1);
5596}
5597
5598static void
5599xpt_finishconfig(struct cam_periph *periph, union ccb *done_ccb)
5600{
5601	struct	periph_driver **p_drv;
5602	struct	cam_eb *bus;
5603	struct	cam_et *target;
5604	struct	cam_ed *dev;
5605	struct	cam_periph  *nperiph;
5606	struct	periph_list *periph_head;
5607	int	i;
5608
5609	if (done_ccb != NULL) {
5610		CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE,
5611			  ("xpt_finishconfig\n"));
5612		switch(done_ccb->ccb_h.func_code) {
5613		case XPT_RESET_BUS:
5614			if (done_ccb->ccb_h.status == CAM_REQ_CMP) {
5615				done_ccb->ccb_h.func_code = XPT_SCAN_BUS;
5616				done_ccb->ccb_h.cbfcnp = xpt_finishconfig;
5617				xpt_action(done_ccb);
5618				return;
5619			}
5620			/* FALLTHROUGH */
5621		case XPT_SCAN_BUS:
5622			xpt_free_path(done_ccb->ccb_h.path);
5623			busses_to_config--;
5624			break;
5625		default:
5626			break;
5627		}
5628	}
5629
5630	if (busses_to_config == 0) {
5631		/* Register all the peripheral drivers */
5632		/* XXX This will have to change when we have LKMs */
5633		p_drv = (struct periph_driver **)periphdriver_set.ls_items;
5634		for (i = 0; p_drv[i] != NULL; i++) {
5635			(*p_drv[i]->init)();
5636		}
5637
5638		/*
5639		 * Itterate through our devices announcing
5640		 * them in probed bus order.
5641		 */
5642		xpt_for_all_devices(xptfinishconfigfunc, NULL);
5643
5644		/*
5645		 * Check for devices with no "standard" peripheral driver
5646		 * attached.  For any devices like that, announce the
5647		 * passthrough driver so the user will see something.
5648		 */
5649		xpt_for_all_devices(xptpassannouncefunc, NULL);
5650
5651		/* Release our hook so that the boot can continue. */
5652		config_intrhook_disestablish(xpt_config_hook);
5653	}
5654	if (done_ccb != NULL)
5655		xpt_free_ccb(done_ccb);
5656}
5657
5658static void
5659xptaction(struct cam_sim *sim, union ccb *work_ccb)
5660{
5661	CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
5662
5663	switch (work_ccb->ccb_h.func_code) {
5664	/* Common cases first */
5665	case XPT_PATH_INQ:		/* Path routing inquiry */
5666	{
5667		struct ccb_pathinq *cpi;
5668
5669		cpi = &work_ccb->cpi;
5670		cpi->version_num = 1; /* XXX??? */
5671		cpi->hba_inquiry = 0;
5672		cpi->target_sprt = 0;
5673		cpi->hba_misc = 0;
5674		cpi->hba_eng_cnt = 0;
5675		cpi->max_target = 0;
5676		cpi->max_lun = 0;
5677		cpi->initiator_id = 0;
5678		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
5679		strncpy(cpi->hba_vid, "", HBA_IDLEN);
5680		strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
5681		cpi->unit_number = sim->unit_number;
5682		cpi->bus_id = sim->bus_id;
5683		cpi->ccb_h.status = CAM_REQ_CMP;
5684		xpt_done(work_ccb);
5685		break;
5686	}
5687	default:
5688		work_ccb->ccb_h.status = CAM_REQ_INVALID;
5689		xpt_done(work_ccb);
5690		break;
5691	}
5692}
5693
5694/*
5695 * Should only be called by the machine interrupt dispatch routines,
5696 * so put these prototypes here instead of in the header.
5697 *
5698 * XXX we should really have a way to dynamically register SWI handlers.
5699 */
5700
5701void
5702swi_camnet()
5703{
5704	camisr(&cam_netq);
5705}
5706
5707void
5708swi_cambio()
5709{
5710	camisr(&cam_bioq);
5711}
5712
5713static void
5714camisr(cam_isrq_t *queue)
5715{
5716	int	s;
5717	struct	ccb_hdr *ccb_h;
5718
5719	s = splcam();
5720	while ((ccb_h = TAILQ_FIRST(queue)) != NULL) {
5721		int	runq;
5722
5723		TAILQ_REMOVE(queue, ccb_h, sim_links.tqe);
5724		ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
5725		splx(s);
5726
5727		CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE,
5728			  ("camisr"));
5729
5730		runq = FALSE;
5731
5732		if (ccb_h->flags & CAM_HIGH_POWER) {
5733			struct highpowerlist	*hphead;
5734			struct cam_ed		*device;
5735			union ccb		*send_ccb;
5736
5737			hphead = &highpowerq;
5738
5739			send_ccb = (union ccb *)STAILQ_FIRST(hphead);
5740
5741			/*
5742			 * Increment the count since this command is done.
5743			 */
5744			num_highpower++;
5745
5746			/*
5747			 * Any high powered commands queued up?
5748			 */
5749			if (send_ccb != NULL) {
5750				device = send_ccb->ccb_h.path->device;
5751
5752				STAILQ_REMOVE_HEAD(hphead, xpt_links.stqe);
5753
5754				xpt_release_devq(send_ccb->ccb_h.path->device,
5755						 TRUE);
5756			}
5757		}
5758		if ((ccb_h->func_code != XPT_ACCEPT_TARGET_IO)
5759		 && (ccb_h->func_code != XPT_SCAN_LUN)
5760		 && (ccb_h->func_code != XPT_SCAN_BUS)) {
5761			struct cam_ed *dev;
5762
5763			dev = ccb_h->path->device;
5764
5765			s = splcam();
5766			cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
5767
5768			ccb_h->path->bus->sim->devq->send_active--;
5769			ccb_h->path->bus->sim->devq->send_openings++;
5770			splx(s);
5771
5772			if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
5773			 || ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
5774			  && (dev->ccbq.dev_active == 0))) {
5775
5776				xpt_release_devq(ccb_h->path->device,
5777						 /*run_queue*/TRUE);
5778			}
5779
5780			if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
5781			 && (--dev->tag_delay_count == 0))
5782				xpt_start_tags(ccb_h->path);
5783
5784			if ((dev->ccbq.queue.entries > 0)
5785			 && (dev->qfrozen_cnt == 0)
5786			 && (device_is_send_queued(dev) == 0)) {
5787				runq = xpt_schedule_dev_sendq(ccb_h->path->bus,
5788							      dev);
5789			}
5790		}
5791
5792		if (ccb_h->status & CAM_RELEASE_SIMQ) {
5793			xpt_release_simq(ccb_h->path->bus->sim,
5794					 /*run_queue*/TRUE);
5795		} else if ((ccb_h->flags & CAM_DEV_QFRZDIS)
5796			&& (ccb_h->status & CAM_DEV_QFRZN)) {
5797			xpt_release_devq(ccb_h->path->device,
5798					 /*run_queue*/TRUE);
5799			ccb_h->status &= ~CAM_DEV_QFRZN;
5800		} else if (runq) {
5801			xpt_run_dev_sendq(ccb_h->path->bus);
5802		}
5803
5804		/* Call the peripheral driver's callback */
5805		(*ccb_h->cbfcnp)(ccb_h->path->periph,
5806				 (union ccb *)ccb_h);
5807
5808		/* Raise IPL for while test */
5809		s = splcam();
5810	}
5811	splx(s);
5812}
5813