1/*-
2 * Implementation of the Common Access Method Transport (XPT) layer.
3 *
4 * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
5 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions, and the following disclaimer,
13 *    without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 *    derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: stable/11/sys/cam/cam_xpt.c 350804 2019-08-08 22:16:19Z mav $");
32
33#include <sys/param.h>
34#include <sys/bus.h>
35#include <sys/systm.h>
36#include <sys/types.h>
37#include <sys/malloc.h>
38#include <sys/kernel.h>
39#include <sys/time.h>
40#include <sys/conf.h>
41#include <sys/fcntl.h>
42#include <sys/proc.h>
43#include <sys/sbuf.h>
44#include <sys/smp.h>
45#include <sys/taskqueue.h>
46
47#include <sys/lock.h>
48#include <sys/mutex.h>
49#include <sys/sysctl.h>
50#include <sys/kthread.h>
51
52#include <cam/cam.h>
53#include <cam/cam_ccb.h>
54#include <cam/cam_periph.h>
55#include <cam/cam_queue.h>
56#include <cam/cam_sim.h>
57#include <cam/cam_xpt.h>
58#include <cam/cam_xpt_sim.h>
59#include <cam/cam_xpt_periph.h>
60#include <cam/cam_xpt_internal.h>
61#include <cam/cam_debug.h>
62#include <cam/cam_compat.h>
63
64#include <cam/scsi/scsi_all.h>
65#include <cam/scsi/scsi_message.h>
66#include <cam/scsi/scsi_pass.h>
67
68#include <machine/md_var.h>	/* geometry translation */
69#include <machine/stdarg.h>	/* for xpt_print below */
70
71#include "opt_cam.h"
72
73/*
74 * This is the maximum number of high powered commands (e.g. start unit)
75 * that can be outstanding at a particular time.
76 */
77#ifndef CAM_MAX_HIGHPOWER
78#define CAM_MAX_HIGHPOWER  4
79#endif
80
81/* Datastructures internal to the xpt layer */
82MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers");
83MALLOC_DEFINE(M_CAMDEV, "CAM DEV", "CAM devices");
84MALLOC_DEFINE(M_CAMCCB, "CAM CCB", "CAM CCBs");
85MALLOC_DEFINE(M_CAMPATH, "CAM path", "CAM paths");
86
87/* Object for defering XPT actions to a taskqueue */
88struct xpt_task {
89	struct task	task;
90	void		*data1;
91	uintptr_t	data2;
92};
93
94struct xpt_softc {
95	uint32_t		xpt_generation;
96
97	/* number of high powered commands that can go through right now */
98	struct mtx		xpt_highpower_lock;
99	STAILQ_HEAD(highpowerlist, cam_ed)	highpowerq;
100	int			num_highpower;
101
102	/* queue for handling async rescan requests. */
103	TAILQ_HEAD(, ccb_hdr) ccb_scanq;
104	int buses_to_config;
105	int buses_config_done;
106
107	/* Registered busses */
108	TAILQ_HEAD(,cam_eb)	xpt_busses;
109	u_int			bus_generation;
110
111	struct intr_config_hook	*xpt_config_hook;
112
113	int			boot_delay;
114	struct callout 		boot_callout;
115
116	struct mtx		xpt_topo_lock;
117	struct mtx		xpt_lock;
118	struct taskqueue	*xpt_taskq;
119};
120
121typedef enum {
122	DM_RET_COPY		= 0x01,
123	DM_RET_FLAG_MASK	= 0x0f,
124	DM_RET_NONE		= 0x00,
125	DM_RET_STOP		= 0x10,
126	DM_RET_DESCEND		= 0x20,
127	DM_RET_ERROR		= 0x30,
128	DM_RET_ACTION_MASK	= 0xf0
129} dev_match_ret;
130
131typedef enum {
132	XPT_DEPTH_BUS,
133	XPT_DEPTH_TARGET,
134	XPT_DEPTH_DEVICE,
135	XPT_DEPTH_PERIPH
136} xpt_traverse_depth;
137
138struct xpt_traverse_config {
139	xpt_traverse_depth	depth;
140	void			*tr_func;
141	void			*tr_arg;
142};
143
144typedef	int	xpt_busfunc_t (struct cam_eb *bus, void *arg);
145typedef	int	xpt_targetfunc_t (struct cam_et *target, void *arg);
146typedef	int	xpt_devicefunc_t (struct cam_ed *device, void *arg);
147typedef	int	xpt_periphfunc_t (struct cam_periph *periph, void *arg);
148typedef int	xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
149
150/* Transport layer configuration information */
151static struct xpt_softc xsoftc;
152
153MTX_SYSINIT(xpt_topo_init, &xsoftc.xpt_topo_lock, "XPT topology lock", MTX_DEF);
154
155SYSCTL_INT(_kern_cam, OID_AUTO, boot_delay, CTLFLAG_RDTUN,
156           &xsoftc.boot_delay, 0, "Bus registration wait time");
157SYSCTL_UINT(_kern_cam, OID_AUTO, xpt_generation, CTLFLAG_RD,
158	    &xsoftc.xpt_generation, 0, "CAM peripheral generation count");
159
160struct cam_doneq {
161	struct mtx_padalign	cam_doneq_mtx;
162	STAILQ_HEAD(, ccb_hdr)	cam_doneq;
163	int			cam_doneq_sleep;
164};
165
166static struct cam_doneq cam_doneqs[MAXCPU];
167static int cam_num_doneqs;
168static struct proc *cam_proc;
169
170SYSCTL_INT(_kern_cam, OID_AUTO, num_doneqs, CTLFLAG_RDTUN,
171           &cam_num_doneqs, 0, "Number of completion queues/threads");
172
173struct cam_periph *xpt_periph;
174
175static periph_init_t xpt_periph_init;
176
177static struct periph_driver xpt_driver =
178{
179	xpt_periph_init, "xpt",
180	TAILQ_HEAD_INITIALIZER(xpt_driver.units), /* generation */ 0,
181	CAM_PERIPH_DRV_EARLY
182};
183
184PERIPHDRIVER_DECLARE(xpt, xpt_driver);
185
186static d_open_t xptopen;
187static d_close_t xptclose;
188static d_ioctl_t xptioctl;
189static d_ioctl_t xptdoioctl;
190
191static struct cdevsw xpt_cdevsw = {
192	.d_version =	D_VERSION,
193	.d_flags =	0,
194	.d_open =	xptopen,
195	.d_close =	xptclose,
196	.d_ioctl =	xptioctl,
197	.d_name =	"xpt",
198};
199
200/* Storage for debugging datastructures */
201struct cam_path *cam_dpath;
202u_int32_t cam_dflags = CAM_DEBUG_FLAGS;
203SYSCTL_UINT(_kern_cam, OID_AUTO, dflags, CTLFLAG_RWTUN,
204	&cam_dflags, 0, "Enabled debug flags");
205u_int32_t cam_debug_delay = CAM_DEBUG_DELAY;
206SYSCTL_UINT(_kern_cam, OID_AUTO, debug_delay, CTLFLAG_RWTUN,
207	&cam_debug_delay, 0, "Delay in us after each debug message");
208
209/* Our boot-time initialization hook */
210static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
211
212static moduledata_t cam_moduledata = {
213	"cam",
214	cam_module_event_handler,
215	NULL
216};
217
218static int	xpt_init(void *);
219
220DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
221MODULE_VERSION(cam, 1);
222
223
224static void		xpt_async_bcast(struct async_list *async_head,
225					u_int32_t async_code,
226					struct cam_path *path,
227					void *async_arg);
228static path_id_t xptnextfreepathid(void);
229static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
230static union ccb *xpt_get_ccb(struct cam_periph *periph);
231static union ccb *xpt_get_ccb_nowait(struct cam_periph *periph);
232static void	 xpt_run_allocq(struct cam_periph *periph, int sleep);
233static void	 xpt_run_allocq_task(void *context, int pending);
234static void	 xpt_run_devq(struct cam_devq *devq);
235static timeout_t xpt_release_devq_timeout;
236static void	 xpt_release_simq_timeout(void *arg) __unused;
237static void	 xpt_acquire_bus(struct cam_eb *bus);
238static void	 xpt_release_bus(struct cam_eb *bus);
239static uint32_t	 xpt_freeze_devq_device(struct cam_ed *dev, u_int count);
240static int	 xpt_release_devq_device(struct cam_ed *dev, u_int count,
241		    int run_queue);
242static struct cam_et*
243		 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
244static void	 xpt_acquire_target(struct cam_et *target);
245static void	 xpt_release_target(struct cam_et *target);
246static struct cam_eb*
247		 xpt_find_bus(path_id_t path_id);
248static struct cam_et*
249		 xpt_find_target(struct cam_eb *bus, target_id_t target_id);
250static struct cam_ed*
251		 xpt_find_device(struct cam_et *target, lun_id_t lun_id);
252static void	 xpt_config(void *arg);
253static int	 xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
254				 u_int32_t new_priority);
255static xpt_devicefunc_t xptpassannouncefunc;
256static void	 xptaction(struct cam_sim *sim, union ccb *work_ccb);
257static void	 xptpoll(struct cam_sim *sim);
258static void	 camisr_runqueue(void);
259static void	 xpt_done_process(struct ccb_hdr *ccb_h);
260static void	 xpt_done_td(void *);
261static dev_match_ret	xptbusmatch(struct dev_match_pattern *patterns,
262				    u_int num_patterns, struct cam_eb *bus);
263static dev_match_ret	xptdevicematch(struct dev_match_pattern *patterns,
264				       u_int num_patterns,
265				       struct cam_ed *device);
266static dev_match_ret	xptperiphmatch(struct dev_match_pattern *patterns,
267				       u_int num_patterns,
268				       struct cam_periph *periph);
269static xpt_busfunc_t	xptedtbusfunc;
270static xpt_targetfunc_t	xptedttargetfunc;
271static xpt_devicefunc_t	xptedtdevicefunc;
272static xpt_periphfunc_t	xptedtperiphfunc;
273static xpt_pdrvfunc_t	xptplistpdrvfunc;
274static xpt_periphfunc_t	xptplistperiphfunc;
275static int		xptedtmatch(struct ccb_dev_match *cdm);
276static int		xptperiphlistmatch(struct ccb_dev_match *cdm);
277static int		xptbustraverse(struct cam_eb *start_bus,
278				       xpt_busfunc_t *tr_func, void *arg);
279static int		xpttargettraverse(struct cam_eb *bus,
280					  struct cam_et *start_target,
281					  xpt_targetfunc_t *tr_func, void *arg);
282static int		xptdevicetraverse(struct cam_et *target,
283					  struct cam_ed *start_device,
284					  xpt_devicefunc_t *tr_func, void *arg);
285static int		xptperiphtraverse(struct cam_ed *device,
286					  struct cam_periph *start_periph,
287					  xpt_periphfunc_t *tr_func, void *arg);
288static int		xptpdrvtraverse(struct periph_driver **start_pdrv,
289					xpt_pdrvfunc_t *tr_func, void *arg);
290static int		xptpdperiphtraverse(struct periph_driver **pdrv,
291					    struct cam_periph *start_periph,
292					    xpt_periphfunc_t *tr_func,
293					    void *arg);
294static xpt_busfunc_t	xptdefbusfunc;
295static xpt_targetfunc_t	xptdeftargetfunc;
296static xpt_devicefunc_t	xptdefdevicefunc;
297static xpt_periphfunc_t	xptdefperiphfunc;
298static void		xpt_finishconfig_task(void *context, int pending);
299static void		xpt_dev_async_default(u_int32_t async_code,
300					      struct cam_eb *bus,
301					      struct cam_et *target,
302					      struct cam_ed *device,
303					      void *async_arg);
304static struct cam_ed *	xpt_alloc_device_default(struct cam_eb *bus,
305						 struct cam_et *target,
306						 lun_id_t lun_id);
307static xpt_devicefunc_t	xptsetasyncfunc;
308static xpt_busfunc_t	xptsetasyncbusfunc;
309static cam_status	xptregister(struct cam_periph *periph,
310				    void *arg);
311static const char *	xpt_action_name(uint32_t action);
312static __inline int device_is_queued(struct cam_ed *device);
313
314static __inline int
315xpt_schedule_devq(struct cam_devq *devq, struct cam_ed *dev)
316{
317	int	retval;
318
319	mtx_assert(&devq->send_mtx, MA_OWNED);
320	if ((dev->ccbq.queue.entries > 0) &&
321	    (dev->ccbq.dev_openings > 0) &&
322	    (dev->ccbq.queue.qfrozen_cnt == 0)) {
323		/*
324		 * The priority of a device waiting for controller
325		 * resources is that of the highest priority CCB
326		 * enqueued.
327		 */
328		retval =
329		    xpt_schedule_dev(&devq->send_queue,
330				     &dev->devq_entry,
331				     CAMQ_GET_PRIO(&dev->ccbq.queue));
332	} else {
333		retval = 0;
334	}
335	return (retval);
336}
337
338static __inline int
339device_is_queued(struct cam_ed *device)
340{
341	return (device->devq_entry.index != CAM_UNQUEUED_INDEX);
342}
343
344static void
345xpt_periph_init()
346{
347	make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
348}
349
350static int
351xptopen(struct cdev *dev, int flags, int fmt, struct thread *td)
352{
353
354	/*
355	 * Only allow read-write access.
356	 */
357	if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
358		return(EPERM);
359
360	/*
361	 * We don't allow nonblocking access.
362	 */
363	if ((flags & O_NONBLOCK) != 0) {
364		printf("%s: can't do nonblocking access\n", devtoname(dev));
365		return(ENODEV);
366	}
367
368	return(0);
369}
370
371static int
372xptclose(struct cdev *dev, int flag, int fmt, struct thread *td)
373{
374
375	return(0);
376}
377
378/*
379 * Don't automatically grab the xpt softc lock here even though this is going
380 * through the xpt device.  The xpt device is really just a back door for
381 * accessing other devices and SIMs, so the right thing to do is to grab
382 * the appropriate SIM lock once the bus/SIM is located.
383 */
384static int
385xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
386{
387	int error;
388
389	if ((error = xptdoioctl(dev, cmd, addr, flag, td)) == ENOTTY) {
390		error = cam_compat_ioctl(dev, cmd, addr, flag, td, xptdoioctl);
391	}
392	return (error);
393}
394
395static int
396xptdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
397{
398	int error;
399
400	error = 0;
401
402	switch(cmd) {
403	/*
404	 * For the transport layer CAMIOCOMMAND ioctl, we really only want
405	 * to accept CCB types that don't quite make sense to send through a
406	 * passthrough driver. XPT_PATH_INQ is an exception to this, as stated
407	 * in the CAM spec.
408	 */
409	case CAMIOCOMMAND: {
410		union ccb *ccb;
411		union ccb *inccb;
412		struct cam_eb *bus;
413
414		inccb = (union ccb *)addr;
415
416		if (inccb->ccb_h.flags & CAM_UNLOCKED)
417			return (EINVAL);
418
419		bus = xpt_find_bus(inccb->ccb_h.path_id);
420		if (bus == NULL)
421			return (EINVAL);
422
423		switch (inccb->ccb_h.func_code) {
424		case XPT_SCAN_BUS:
425		case XPT_RESET_BUS:
426			if (inccb->ccb_h.target_id != CAM_TARGET_WILDCARD ||
427			    inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) {
428				xpt_release_bus(bus);
429				return (EINVAL);
430			}
431			break;
432		case XPT_SCAN_TGT:
433			if (inccb->ccb_h.target_id == CAM_TARGET_WILDCARD ||
434			    inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) {
435				xpt_release_bus(bus);
436				return (EINVAL);
437			}
438			break;
439		default:
440			break;
441		}
442
443		switch(inccb->ccb_h.func_code) {
444		case XPT_SCAN_BUS:
445		case XPT_RESET_BUS:
446		case XPT_PATH_INQ:
447		case XPT_ENG_INQ:
448		case XPT_SCAN_LUN:
449		case XPT_SCAN_TGT:
450
451			ccb = xpt_alloc_ccb();
452
453			/*
454			 * Create a path using the bus, target, and lun the
455			 * user passed in.
456			 */
457			if (xpt_create_path(&ccb->ccb_h.path, NULL,
458					    inccb->ccb_h.path_id,
459					    inccb->ccb_h.target_id,
460					    inccb->ccb_h.target_lun) !=
461					    CAM_REQ_CMP){
462				error = EINVAL;
463				xpt_free_ccb(ccb);
464				break;
465			}
466			/* Ensure all of our fields are correct */
467			xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
468				      inccb->ccb_h.pinfo.priority);
469			xpt_merge_ccb(ccb, inccb);
470			xpt_path_lock(ccb->ccb_h.path);
471			cam_periph_runccb(ccb, NULL, 0, 0, NULL);
472			xpt_path_unlock(ccb->ccb_h.path);
473			bcopy(ccb, inccb, sizeof(union ccb));
474			xpt_free_path(ccb->ccb_h.path);
475			xpt_free_ccb(ccb);
476			break;
477
478		case XPT_DEBUG: {
479			union ccb ccb;
480
481			/*
482			 * This is an immediate CCB, so it's okay to
483			 * allocate it on the stack.
484			 */
485
486			/*
487			 * Create a path using the bus, target, and lun the
488			 * user passed in.
489			 */
490			if (xpt_create_path(&ccb.ccb_h.path, NULL,
491					    inccb->ccb_h.path_id,
492					    inccb->ccb_h.target_id,
493					    inccb->ccb_h.target_lun) !=
494					    CAM_REQ_CMP){
495				error = EINVAL;
496				break;
497			}
498			/* Ensure all of our fields are correct */
499			xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
500				      inccb->ccb_h.pinfo.priority);
501			xpt_merge_ccb(&ccb, inccb);
502			xpt_action(&ccb);
503			bcopy(&ccb, inccb, sizeof(union ccb));
504			xpt_free_path(ccb.ccb_h.path);
505			break;
506
507		}
508		case XPT_DEV_MATCH: {
509			struct cam_periph_map_info mapinfo;
510			struct cam_path *old_path;
511
512			/*
513			 * We can't deal with physical addresses for this
514			 * type of transaction.
515			 */
516			if ((inccb->ccb_h.flags & CAM_DATA_MASK) !=
517			    CAM_DATA_VADDR) {
518				error = EINVAL;
519				break;
520			}
521
522			/*
523			 * Save this in case the caller had it set to
524			 * something in particular.
525			 */
526			old_path = inccb->ccb_h.path;
527
528			/*
529			 * We really don't need a path for the matching
530			 * code.  The path is needed because of the
531			 * debugging statements in xpt_action().  They
532			 * assume that the CCB has a valid path.
533			 */
534			inccb->ccb_h.path = xpt_periph->path;
535
536			bzero(&mapinfo, sizeof(mapinfo));
537
538			/*
539			 * Map the pattern and match buffers into kernel
540			 * virtual address space.
541			 */
542			error = cam_periph_mapmem(inccb, &mapinfo, MAXPHYS);
543
544			if (error) {
545				inccb->ccb_h.path = old_path;
546				break;
547			}
548
549			/*
550			 * This is an immediate CCB, we can send it on directly.
551			 */
552			xpt_action(inccb);
553
554			/*
555			 * Map the buffers back into user space.
556			 */
557			cam_periph_unmapmem(inccb, &mapinfo);
558
559			inccb->ccb_h.path = old_path;
560
561			error = 0;
562			break;
563		}
564		default:
565			error = ENOTSUP;
566			break;
567		}
568		xpt_release_bus(bus);
569		break;
570	}
571	/*
572	 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
573	 * with the periphal driver name and unit name filled in.  The other
574	 * fields don't really matter as input.  The passthrough driver name
575	 * ("pass"), and unit number are passed back in the ccb.  The current
576	 * device generation number, and the index into the device peripheral
577	 * driver list, and the status are also passed back.  Note that
578	 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
579	 * we never return a status of CAM_GDEVLIST_LIST_CHANGED.  It is
580	 * (or rather should be) impossible for the device peripheral driver
581	 * list to change since we look at the whole thing in one pass, and
582	 * we do it with lock protection.
583	 *
584	 */
585	case CAMGETPASSTHRU: {
586		union ccb *ccb;
587		struct cam_periph *periph;
588		struct periph_driver **p_drv;
589		char   *name;
590		u_int unit;
591		int base_periph_found;
592
593		ccb = (union ccb *)addr;
594		unit = ccb->cgdl.unit_number;
595		name = ccb->cgdl.periph_name;
596		base_periph_found = 0;
597
598		/*
599		 * Sanity check -- make sure we don't get a null peripheral
600		 * driver name.
601		 */
602		if (*ccb->cgdl.periph_name == '\0') {
603			error = EINVAL;
604			break;
605		}
606
607		/* Keep the list from changing while we traverse it */
608		xpt_lock_buses();
609
610		/* first find our driver in the list of drivers */
611		for (p_drv = periph_drivers; *p_drv != NULL; p_drv++)
612			if (strcmp((*p_drv)->driver_name, name) == 0)
613				break;
614
615		if (*p_drv == NULL) {
616			xpt_unlock_buses();
617			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
618			ccb->cgdl.status = CAM_GDEVLIST_ERROR;
619			*ccb->cgdl.periph_name = '\0';
620			ccb->cgdl.unit_number = 0;
621			error = ENOENT;
622			break;
623		}
624
625		/*
626		 * Run through every peripheral instance of this driver
627		 * and check to see whether it matches the unit passed
628		 * in by the user.  If it does, get out of the loops and
629		 * find the passthrough driver associated with that
630		 * peripheral driver.
631		 */
632		for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
633		     periph = TAILQ_NEXT(periph, unit_links)) {
634
635			if (periph->unit_number == unit)
636				break;
637		}
638		/*
639		 * If we found the peripheral driver that the user passed
640		 * in, go through all of the peripheral drivers for that
641		 * particular device and look for a passthrough driver.
642		 */
643		if (periph != NULL) {
644			struct cam_ed *device;
645			int i;
646
647			base_periph_found = 1;
648			device = periph->path->device;
649			for (i = 0, periph = SLIST_FIRST(&device->periphs);
650			     periph != NULL;
651			     periph = SLIST_NEXT(periph, periph_links), i++) {
652				/*
653				 * Check to see whether we have a
654				 * passthrough device or not.
655				 */
656				if (strcmp(periph->periph_name, "pass") == 0) {
657					/*
658					 * Fill in the getdevlist fields.
659					 */
660					strlcpy(ccb->cgdl.periph_name,
661					       periph->periph_name,
662					       sizeof(ccb->cgdl.periph_name));
663					ccb->cgdl.unit_number =
664						periph->unit_number;
665					if (SLIST_NEXT(periph, periph_links))
666						ccb->cgdl.status =
667							CAM_GDEVLIST_MORE_DEVS;
668					else
669						ccb->cgdl.status =
670						       CAM_GDEVLIST_LAST_DEVICE;
671					ccb->cgdl.generation =
672						device->generation;
673					ccb->cgdl.index = i;
674					/*
675					 * Fill in some CCB header fields
676					 * that the user may want.
677					 */
678					ccb->ccb_h.path_id =
679						periph->path->bus->path_id;
680					ccb->ccb_h.target_id =
681						periph->path->target->target_id;
682					ccb->ccb_h.target_lun =
683						periph->path->device->lun_id;
684					ccb->ccb_h.status = CAM_REQ_CMP;
685					break;
686				}
687			}
688		}
689
690		/*
691		 * If the periph is null here, one of two things has
692		 * happened.  The first possibility is that we couldn't
693		 * find the unit number of the particular peripheral driver
694		 * that the user is asking about.  e.g. the user asks for
695		 * the passthrough driver for "da11".  We find the list of
696		 * "da" peripherals all right, but there is no unit 11.
697		 * The other possibility is that we went through the list
698		 * of peripheral drivers attached to the device structure,
699		 * but didn't find one with the name "pass".  Either way,
700		 * we return ENOENT, since we couldn't find something.
701		 */
702		if (periph == NULL) {
703			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
704			ccb->cgdl.status = CAM_GDEVLIST_ERROR;
705			*ccb->cgdl.periph_name = '\0';
706			ccb->cgdl.unit_number = 0;
707			error = ENOENT;
708			/*
709			 * It is unfortunate that this is even necessary,
710			 * but there are many, many clueless users out there.
711			 * If this is true, the user is looking for the
712			 * passthrough driver, but doesn't have one in his
713			 * kernel.
714			 */
715			if (base_periph_found == 1) {
716				printf("xptioctl: pass driver is not in the "
717				       "kernel\n");
718				printf("xptioctl: put \"device pass\" in "
719				       "your kernel config file\n");
720			}
721		}
722		xpt_unlock_buses();
723		break;
724		}
725	default:
726		error = ENOTTY;
727		break;
728	}
729
730	return(error);
731}
732
733static int
734cam_module_event_handler(module_t mod, int what, void *arg)
735{
736	int error;
737
738	switch (what) {
739	case MOD_LOAD:
740		if ((error = xpt_init(NULL)) != 0)
741			return (error);
742		break;
743	case MOD_UNLOAD:
744		return EBUSY;
745	default:
746		return EOPNOTSUPP;
747	}
748
749	return 0;
750}
751
752static struct xpt_proto *
753xpt_proto_find(cam_proto proto)
754{
755	struct xpt_proto **pp;
756
757	SET_FOREACH(pp, cam_xpt_proto_set) {
758		if ((*pp)->proto == proto)
759			return *pp;
760	}
761
762	return NULL;
763}
764
765static void
766xpt_rescan_done(struct cam_periph *periph, union ccb *done_ccb)
767{
768
769	if (done_ccb->ccb_h.ppriv_ptr1 == NULL) {
770		xpt_free_path(done_ccb->ccb_h.path);
771		xpt_free_ccb(done_ccb);
772	} else {
773		done_ccb->ccb_h.cbfcnp = done_ccb->ccb_h.ppriv_ptr1;
774		(*done_ccb->ccb_h.cbfcnp)(periph, done_ccb);
775	}
776	xpt_release_boot();
777}
778
779/* thread to handle bus rescans */
780static void
781xpt_scanner_thread(void *dummy)
782{
783	union ccb	*ccb;
784	struct cam_path	 path;
785
786	xpt_lock_buses();
787	for (;;) {
788		if (TAILQ_EMPTY(&xsoftc.ccb_scanq))
789			msleep(&xsoftc.ccb_scanq, &xsoftc.xpt_topo_lock, PRIBIO,
790			       "-", 0);
791		if ((ccb = (union ccb *)TAILQ_FIRST(&xsoftc.ccb_scanq)) != NULL) {
792			TAILQ_REMOVE(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
793			xpt_unlock_buses();
794
795			/*
796			 * Since lock can be dropped inside and path freed
797			 * by completion callback even before return here,
798			 * take our own path copy for reference.
799			 */
800			xpt_copy_path(&path, ccb->ccb_h.path);
801			xpt_path_lock(&path);
802			xpt_action(ccb);
803			xpt_path_unlock(&path);
804			xpt_release_path(&path);
805
806			xpt_lock_buses();
807		}
808	}
809}
810
811void
812xpt_rescan(union ccb *ccb)
813{
814	struct ccb_hdr *hdr;
815
816	/* Prepare request */
817	if (ccb->ccb_h.path->target->target_id == CAM_TARGET_WILDCARD &&
818	    ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD)
819		ccb->ccb_h.func_code = XPT_SCAN_BUS;
820	else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD &&
821	    ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD)
822		ccb->ccb_h.func_code = XPT_SCAN_TGT;
823	else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD &&
824	    ccb->ccb_h.path->device->lun_id != CAM_LUN_WILDCARD)
825		ccb->ccb_h.func_code = XPT_SCAN_LUN;
826	else {
827		xpt_print(ccb->ccb_h.path, "illegal scan path\n");
828		xpt_free_path(ccb->ccb_h.path);
829		xpt_free_ccb(ccb);
830		return;
831	}
832	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE,
833	    ("xpt_rescan: func %#x %s\n", ccb->ccb_h.func_code,
834 		xpt_action_name(ccb->ccb_h.func_code)));
835
836	ccb->ccb_h.ppriv_ptr1 = ccb->ccb_h.cbfcnp;
837	ccb->ccb_h.cbfcnp = xpt_rescan_done;
838	xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, CAM_PRIORITY_XPT);
839	/* Don't make duplicate entries for the same paths. */
840	xpt_lock_buses();
841	if (ccb->ccb_h.ppriv_ptr1 == NULL) {
842		TAILQ_FOREACH(hdr, &xsoftc.ccb_scanq, sim_links.tqe) {
843			if (xpt_path_comp(hdr->path, ccb->ccb_h.path) == 0) {
844				wakeup(&xsoftc.ccb_scanq);
845				xpt_unlock_buses();
846				xpt_print(ccb->ccb_h.path, "rescan already queued\n");
847				xpt_free_path(ccb->ccb_h.path);
848				xpt_free_ccb(ccb);
849				return;
850			}
851		}
852	}
853	TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
854	xsoftc.buses_to_config++;
855	wakeup(&xsoftc.ccb_scanq);
856	xpt_unlock_buses();
857}
858
859/* Functions accessed by the peripheral drivers */
860static int
861xpt_init(void *dummy)
862{
863	struct cam_sim *xpt_sim;
864	struct cam_path *path;
865	struct cam_devq *devq;
866	cam_status status;
867	int error, i;
868
869	TAILQ_INIT(&xsoftc.xpt_busses);
870	TAILQ_INIT(&xsoftc.ccb_scanq);
871	STAILQ_INIT(&xsoftc.highpowerq);
872	xsoftc.num_highpower = CAM_MAX_HIGHPOWER;
873
874	mtx_init(&xsoftc.xpt_lock, "XPT lock", NULL, MTX_DEF);
875	mtx_init(&xsoftc.xpt_highpower_lock, "XPT highpower lock", NULL, MTX_DEF);
876	xsoftc.xpt_taskq = taskqueue_create("CAM XPT task", M_WAITOK,
877	    taskqueue_thread_enqueue, /*context*/&xsoftc.xpt_taskq);
878
879#ifdef CAM_BOOT_DELAY
880	/*
881	 * Override this value at compile time to assist our users
882	 * who don't use loader to boot a kernel.
883	 */
884	xsoftc.boot_delay = CAM_BOOT_DELAY;
885#endif
886	/*
887	 * The xpt layer is, itself, the equivalent of a SIM.
888	 * Allow 16 ccbs in the ccb pool for it.  This should
889	 * give decent parallelism when we probe busses and
890	 * perform other XPT functions.
891	 */
892	devq = cam_simq_alloc(16);
893	xpt_sim = cam_sim_alloc(xptaction,
894				xptpoll,
895				"xpt",
896				/*softc*/NULL,
897				/*unit*/0,
898				/*mtx*/&xsoftc.xpt_lock,
899				/*max_dev_transactions*/0,
900				/*max_tagged_dev_transactions*/0,
901				devq);
902	if (xpt_sim == NULL)
903		return (ENOMEM);
904
905	mtx_lock(&xsoftc.xpt_lock);
906	if ((status = xpt_bus_register(xpt_sim, NULL, 0)) != CAM_SUCCESS) {
907		mtx_unlock(&xsoftc.xpt_lock);
908		printf("xpt_init: xpt_bus_register failed with status %#x,"
909		       " failing attach\n", status);
910		return (EINVAL);
911	}
912	mtx_unlock(&xsoftc.xpt_lock);
913
914	/*
915	 * Looking at the XPT from the SIM layer, the XPT is
916	 * the equivalent of a peripheral driver.  Allocate
917	 * a peripheral driver entry for us.
918	 */
919	if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
920				      CAM_TARGET_WILDCARD,
921				      CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
922		printf("xpt_init: xpt_create_path failed with status %#x,"
923		       " failing attach\n", status);
924		return (EINVAL);
925	}
926	xpt_path_lock(path);
927	cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
928			 path, NULL, 0, xpt_sim);
929	xpt_path_unlock(path);
930	xpt_free_path(path);
931
932	if (cam_num_doneqs < 1)
933		cam_num_doneqs = 1 + mp_ncpus / 6;
934	else if (cam_num_doneqs > MAXCPU)
935		cam_num_doneqs = MAXCPU;
936	for (i = 0; i < cam_num_doneqs; i++) {
937		mtx_init(&cam_doneqs[i].cam_doneq_mtx, "CAM doneq", NULL,
938		    MTX_DEF);
939		STAILQ_INIT(&cam_doneqs[i].cam_doneq);
940		error = kproc_kthread_add(xpt_done_td, &cam_doneqs[i],
941		    &cam_proc, NULL, 0, 0, "cam", "doneq%d", i);
942		if (error != 0) {
943			cam_num_doneqs = i;
944			break;
945		}
946	}
947	if (cam_num_doneqs < 1) {
948		printf("xpt_init: Cannot init completion queues "
949		       "- failing attach\n");
950		return (ENOMEM);
951	}
952	/*
953	 * Register a callback for when interrupts are enabled.
954	 */
955	xsoftc.xpt_config_hook =
956	    (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook),
957					      M_CAMXPT, M_NOWAIT | M_ZERO);
958	if (xsoftc.xpt_config_hook == NULL) {
959		printf("xpt_init: Cannot malloc config hook "
960		       "- failing attach\n");
961		return (ENOMEM);
962	}
963	xsoftc.xpt_config_hook->ich_func = xpt_config;
964	if (config_intrhook_establish(xsoftc.xpt_config_hook) != 0) {
965		free (xsoftc.xpt_config_hook, M_CAMXPT);
966		printf("xpt_init: config_intrhook_establish failed "
967		       "- failing attach\n");
968	}
969
970	return (0);
971}
972
973static cam_status
974xptregister(struct cam_periph *periph, void *arg)
975{
976	struct cam_sim *xpt_sim;
977
978	if (periph == NULL) {
979		printf("xptregister: periph was NULL!!\n");
980		return(CAM_REQ_CMP_ERR);
981	}
982
983	xpt_sim = (struct cam_sim *)arg;
984	xpt_sim->softc = periph;
985	xpt_periph = periph;
986	periph->softc = NULL;
987
988	return(CAM_REQ_CMP);
989}
990
991int32_t
992xpt_add_periph(struct cam_periph *periph)
993{
994	struct cam_ed *device;
995	int32_t	 status;
996
997	TASK_INIT(&periph->periph_run_task, 0, xpt_run_allocq_task, periph);
998	device = periph->path->device;
999	status = CAM_REQ_CMP;
1000	if (device != NULL) {
1001		mtx_lock(&device->target->bus->eb_mtx);
1002		device->generation++;
1003		SLIST_INSERT_HEAD(&device->periphs, periph, periph_links);
1004		mtx_unlock(&device->target->bus->eb_mtx);
1005		atomic_add_32(&xsoftc.xpt_generation, 1);
1006	}
1007
1008	return (status);
1009}
1010
1011void
1012xpt_remove_periph(struct cam_periph *periph)
1013{
1014	struct cam_ed *device;
1015
1016	device = periph->path->device;
1017	if (device != NULL) {
1018		mtx_lock(&device->target->bus->eb_mtx);
1019		device->generation++;
1020		SLIST_REMOVE(&device->periphs, periph, cam_periph, periph_links);
1021		mtx_unlock(&device->target->bus->eb_mtx);
1022		atomic_add_32(&xsoftc.xpt_generation, 1);
1023	}
1024}
1025
1026
1027void
1028xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1029{
1030	struct	cam_path *path = periph->path;
1031	struct  xpt_proto *proto;
1032
1033	cam_periph_assert(periph, MA_OWNED);
1034	periph->flags |= CAM_PERIPH_ANNOUNCED;
1035
1036	printf("%s%d at %s%d bus %d scbus%d target %d lun %jx\n",
1037	       periph->periph_name, periph->unit_number,
1038	       path->bus->sim->sim_name,
1039	       path->bus->sim->unit_number,
1040	       path->bus->sim->bus_id,
1041	       path->bus->path_id,
1042	       path->target->target_id,
1043	       (uintmax_t)path->device->lun_id);
1044	printf("%s%d: ", periph->periph_name, periph->unit_number);
1045	proto = xpt_proto_find(path->device->protocol);
1046	if (proto)
1047		proto->ops->announce(path->device);
1048	else
1049		printf("%s%d: Unknown protocol device %d\n",
1050		    periph->periph_name, periph->unit_number,
1051		    path->device->protocol);
1052	if (path->device->serial_num_len > 0) {
1053		/* Don't wrap the screen  - print only the first 60 chars */
1054		printf("%s%d: Serial Number %.60s\n", periph->periph_name,
1055		       periph->unit_number, path->device->serial_num);
1056	}
1057	/* Announce transport details. */
1058	path->bus->xport->ops->announce(periph);
1059	/* Announce command queueing. */
1060	if (path->device->inq_flags & SID_CmdQue
1061	 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1062		printf("%s%d: Command Queueing enabled\n",
1063		       periph->periph_name, periph->unit_number);
1064	}
1065	/* Announce caller's details if they've passed in. */
1066	if (announce_string != NULL)
1067		printf("%s%d: %s\n", periph->periph_name,
1068		       periph->unit_number, announce_string);
1069}
1070
1071void
1072xpt_announce_quirks(struct cam_periph *periph, int quirks, char *bit_string)
1073{
1074	if (quirks != 0) {
1075		printf("%s%d: quirks=0x%b\n", periph->periph_name,
1076		    periph->unit_number, quirks, bit_string);
1077	}
1078}
1079
1080void
1081xpt_denounce_periph(struct cam_periph *periph)
1082{
1083	struct	cam_path *path = periph->path;
1084	struct  xpt_proto *proto;
1085
1086	cam_periph_assert(periph, MA_OWNED);
1087	printf("%s%d at %s%d bus %d scbus%d target %d lun %jx\n",
1088	       periph->periph_name, periph->unit_number,
1089	       path->bus->sim->sim_name,
1090	       path->bus->sim->unit_number,
1091	       path->bus->sim->bus_id,
1092	       path->bus->path_id,
1093	       path->target->target_id,
1094	       (uintmax_t)path->device->lun_id);
1095	printf("%s%d: ", periph->periph_name, periph->unit_number);
1096	proto = xpt_proto_find(path->device->protocol);
1097	if (proto)
1098		proto->ops->denounce(path->device);
1099	else
1100		printf("%s%d: Unknown protocol device %d\n",
1101		    periph->periph_name, periph->unit_number,
1102		    path->device->protocol);
1103	if (path->device->serial_num_len > 0)
1104		printf(" s/n %.60s", path->device->serial_num);
1105	printf(" detached\n");
1106}
1107
1108
1109int
1110xpt_getattr(char *buf, size_t len, const char *attr, struct cam_path *path)
1111{
1112	int ret = -1, l, o;
1113	struct ccb_dev_advinfo cdai;
1114	struct scsi_vpd_device_id *did;
1115	struct scsi_vpd_id_descriptor *idd;
1116
1117	xpt_path_assert(path, MA_OWNED);
1118
1119	memset(&cdai, 0, sizeof(cdai));
1120	xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
1121	cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
1122	cdai.flags = CDAI_FLAG_NONE;
1123	cdai.bufsiz = len;
1124	cdai.buf = buf;
1125
1126	if (!strcmp(attr, "GEOM::ident"))
1127		cdai.buftype = CDAI_TYPE_SERIAL_NUM;
1128	else if (!strcmp(attr, "GEOM::physpath"))
1129		cdai.buftype = CDAI_TYPE_PHYS_PATH;
1130	else if (strcmp(attr, "GEOM::lunid") == 0 ||
1131		 strcmp(attr, "GEOM::lunname") == 0) {
1132		cdai.buftype = CDAI_TYPE_SCSI_DEVID;
1133		cdai.bufsiz = CAM_SCSI_DEVID_MAXLEN;
1134		cdai.buf = malloc(cdai.bufsiz, M_CAMXPT, M_NOWAIT);
1135		if (cdai.buf == NULL) {
1136			ret = ENOMEM;
1137			goto out;
1138		}
1139	} else
1140		goto out;
1141
1142	xpt_action((union ccb *)&cdai); /* can only be synchronous */
1143	if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
1144		cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
1145	if (cdai.provsiz == 0)
1146		goto out;
1147	switch(cdai.buftype) {
1148	case CDAI_TYPE_SCSI_DEVID:
1149		did = (struct scsi_vpd_device_id *)cdai.buf;
1150		if (strcmp(attr, "GEOM::lunid") == 0) {
1151			idd = scsi_get_devid(did, cdai.provsiz,
1152			    scsi_devid_is_lun_naa);
1153			if (idd == NULL)
1154				idd = scsi_get_devid(did, cdai.provsiz,
1155				    scsi_devid_is_lun_eui64);
1156			if (idd == NULL)
1157				idd = scsi_get_devid(did, cdai.provsiz,
1158				    scsi_devid_is_lun_uuid);
1159			if (idd == NULL)
1160				idd = scsi_get_devid(did, cdai.provsiz,
1161				    scsi_devid_is_lun_md5);
1162		} else
1163			idd = NULL;
1164
1165		if (idd == NULL)
1166			idd = scsi_get_devid(did, cdai.provsiz,
1167			    scsi_devid_is_lun_t10);
1168		if (idd == NULL)
1169			idd = scsi_get_devid(did, cdai.provsiz,
1170			    scsi_devid_is_lun_name);
1171		if (idd == NULL)
1172			break;
1173
1174		ret = 0;
1175		if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) ==
1176		    SVPD_ID_CODESET_ASCII) {
1177			if (idd->length < len) {
1178				for (l = 0; l < idd->length; l++)
1179					buf[l] = idd->identifier[l] ?
1180					    idd->identifier[l] : ' ';
1181				buf[l] = 0;
1182			} else
1183				ret = EFAULT;
1184			break;
1185		}
1186		if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) ==
1187		    SVPD_ID_CODESET_UTF8) {
1188			l = strnlen(idd->identifier, idd->length);
1189			if (l < len) {
1190				bcopy(idd->identifier, buf, l);
1191				buf[l] = 0;
1192			} else
1193				ret = EFAULT;
1194			break;
1195		}
1196		if ((idd->id_type & SVPD_ID_TYPE_MASK) ==
1197		    SVPD_ID_TYPE_UUID && idd->identifier[0] == 0x10) {
1198			if ((idd->length - 2) * 2 + 4 >= len) {
1199				ret = EFAULT;
1200				break;
1201			}
1202			for (l = 2, o = 0; l < idd->length; l++) {
1203				if (l == 6 || l == 8 || l == 10 || l == 12)
1204				    o += sprintf(buf + o, "-");
1205				o += sprintf(buf + o, "%02x",
1206				    idd->identifier[l]);
1207			}
1208			break;
1209		}
1210		if (idd->length * 2 < len) {
1211			for (l = 0; l < idd->length; l++)
1212				sprintf(buf + l * 2, "%02x",
1213				    idd->identifier[l]);
1214		} else
1215				ret = EFAULT;
1216		break;
1217	default:
1218		if (cdai.provsiz < len) {
1219			cdai.buf[cdai.provsiz] = 0;
1220			ret = 0;
1221		} else
1222			ret = EFAULT;
1223		break;
1224	}
1225
1226out:
1227	if ((char *)cdai.buf != buf)
1228		free(cdai.buf, M_CAMXPT);
1229	return ret;
1230}
1231
1232static dev_match_ret
1233xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1234	    struct cam_eb *bus)
1235{
1236	dev_match_ret retval;
1237	u_int i;
1238
1239	retval = DM_RET_NONE;
1240
1241	/*
1242	 * If we aren't given something to match against, that's an error.
1243	 */
1244	if (bus == NULL)
1245		return(DM_RET_ERROR);
1246
1247	/*
1248	 * If there are no match entries, then this bus matches no
1249	 * matter what.
1250	 */
1251	if ((patterns == NULL) || (num_patterns == 0))
1252		return(DM_RET_DESCEND | DM_RET_COPY);
1253
1254	for (i = 0; i < num_patterns; i++) {
1255		struct bus_match_pattern *cur_pattern;
1256
1257		/*
1258		 * If the pattern in question isn't for a bus node, we
1259		 * aren't interested.  However, we do indicate to the
1260		 * calling routine that we should continue descending the
1261		 * tree, since the user wants to match against lower-level
1262		 * EDT elements.
1263		 */
1264		if (patterns[i].type != DEV_MATCH_BUS) {
1265			if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1266				retval |= DM_RET_DESCEND;
1267			continue;
1268		}
1269
1270		cur_pattern = &patterns[i].pattern.bus_pattern;
1271
1272		/*
1273		 * If they want to match any bus node, we give them any
1274		 * device node.
1275		 */
1276		if (cur_pattern->flags == BUS_MATCH_ANY) {
1277			/* set the copy flag */
1278			retval |= DM_RET_COPY;
1279
1280			/*
1281			 * If we've already decided on an action, go ahead
1282			 * and return.
1283			 */
1284			if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1285				return(retval);
1286		}
1287
1288		/*
1289		 * Not sure why someone would do this...
1290		 */
1291		if (cur_pattern->flags == BUS_MATCH_NONE)
1292			continue;
1293
1294		if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
1295		 && (cur_pattern->path_id != bus->path_id))
1296			continue;
1297
1298		if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
1299		 && (cur_pattern->bus_id != bus->sim->bus_id))
1300			continue;
1301
1302		if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
1303		 && (cur_pattern->unit_number != bus->sim->unit_number))
1304			continue;
1305
1306		if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
1307		 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
1308			     DEV_IDLEN) != 0))
1309			continue;
1310
1311		/*
1312		 * If we get to this point, the user definitely wants
1313		 * information on this bus.  So tell the caller to copy the
1314		 * data out.
1315		 */
1316		retval |= DM_RET_COPY;
1317
1318		/*
1319		 * If the return action has been set to descend, then we
1320		 * know that we've already seen a non-bus matching
1321		 * expression, therefore we need to further descend the tree.
1322		 * This won't change by continuing around the loop, so we
1323		 * go ahead and return.  If we haven't seen a non-bus
1324		 * matching expression, we keep going around the loop until
1325		 * we exhaust the matching expressions.  We'll set the stop
1326		 * flag once we fall out of the loop.
1327		 */
1328		if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1329			return(retval);
1330	}
1331
1332	/*
1333	 * If the return action hasn't been set to descend yet, that means
1334	 * we haven't seen anything other than bus matching patterns.  So
1335	 * tell the caller to stop descending the tree -- the user doesn't
1336	 * want to match against lower level tree elements.
1337	 */
1338	if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1339		retval |= DM_RET_STOP;
1340
1341	return(retval);
1342}
1343
1344static dev_match_ret
1345xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
1346	       struct cam_ed *device)
1347{
1348	dev_match_ret retval;
1349	u_int i;
1350
1351	retval = DM_RET_NONE;
1352
1353	/*
1354	 * If we aren't given something to match against, that's an error.
1355	 */
1356	if (device == NULL)
1357		return(DM_RET_ERROR);
1358
1359	/*
1360	 * If there are no match entries, then this device matches no
1361	 * matter what.
1362	 */
1363	if ((patterns == NULL) || (num_patterns == 0))
1364		return(DM_RET_DESCEND | DM_RET_COPY);
1365
1366	for (i = 0; i < num_patterns; i++) {
1367		struct device_match_pattern *cur_pattern;
1368		struct scsi_vpd_device_id *device_id_page;
1369
1370		/*
1371		 * If the pattern in question isn't for a device node, we
1372		 * aren't interested.
1373		 */
1374		if (patterns[i].type != DEV_MATCH_DEVICE) {
1375			if ((patterns[i].type == DEV_MATCH_PERIPH)
1376			 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
1377				retval |= DM_RET_DESCEND;
1378			continue;
1379		}
1380
1381		cur_pattern = &patterns[i].pattern.device_pattern;
1382
1383		/* Error out if mutually exclusive options are specified. */
1384		if ((cur_pattern->flags & (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID))
1385		 == (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID))
1386			return(DM_RET_ERROR);
1387
1388		/*
1389		 * If they want to match any device node, we give them any
1390		 * device node.
1391		 */
1392		if (cur_pattern->flags == DEV_MATCH_ANY)
1393			goto copy_dev_node;
1394
1395		/*
1396		 * Not sure why someone would do this...
1397		 */
1398		if (cur_pattern->flags == DEV_MATCH_NONE)
1399			continue;
1400
1401		if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
1402		 && (cur_pattern->path_id != device->target->bus->path_id))
1403			continue;
1404
1405		if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
1406		 && (cur_pattern->target_id != device->target->target_id))
1407			continue;
1408
1409		if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
1410		 && (cur_pattern->target_lun != device->lun_id))
1411			continue;
1412
1413		if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
1414		 && (cam_quirkmatch((caddr_t)&device->inq_data,
1415				    (caddr_t)&cur_pattern->data.inq_pat,
1416				    1, sizeof(cur_pattern->data.inq_pat),
1417				    scsi_static_inquiry_match) == NULL))
1418			continue;
1419
1420		device_id_page = (struct scsi_vpd_device_id *)device->device_id;
1421		if (((cur_pattern->flags & DEV_MATCH_DEVID) != 0)
1422		 && (device->device_id_len < SVPD_DEVICE_ID_HDR_LEN
1423		  || scsi_devid_match((uint8_t *)device_id_page->desc_list,
1424				      device->device_id_len
1425				    - SVPD_DEVICE_ID_HDR_LEN,
1426				      cur_pattern->data.devid_pat.id,
1427				      cur_pattern->data.devid_pat.id_len) != 0))
1428			continue;
1429
1430copy_dev_node:
1431		/*
1432		 * If we get to this point, the user definitely wants
1433		 * information on this device.  So tell the caller to copy
1434		 * the data out.
1435		 */
1436		retval |= DM_RET_COPY;
1437
1438		/*
1439		 * If the return action has been set to descend, then we
1440		 * know that we've already seen a peripheral matching
1441		 * expression, therefore we need to further descend the tree.
1442		 * This won't change by continuing around the loop, so we
1443		 * go ahead and return.  If we haven't seen a peripheral
1444		 * matching expression, we keep going around the loop until
1445		 * we exhaust the matching expressions.  We'll set the stop
1446		 * flag once we fall out of the loop.
1447		 */
1448		if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1449			return(retval);
1450	}
1451
1452	/*
1453	 * If the return action hasn't been set to descend yet, that means
1454	 * we haven't seen any peripheral matching patterns.  So tell the
1455	 * caller to stop descending the tree -- the user doesn't want to
1456	 * match against lower level tree elements.
1457	 */
1458	if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1459		retval |= DM_RET_STOP;
1460
1461	return(retval);
1462}
1463
1464/*
1465 * Match a single peripheral against any number of match patterns.
1466 */
1467static dev_match_ret
1468xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1469	       struct cam_periph *periph)
1470{
1471	dev_match_ret retval;
1472	u_int i;
1473
1474	/*
1475	 * If we aren't given something to match against, that's an error.
1476	 */
1477	if (periph == NULL)
1478		return(DM_RET_ERROR);
1479
1480	/*
1481	 * If there are no match entries, then this peripheral matches no
1482	 * matter what.
1483	 */
1484	if ((patterns == NULL) || (num_patterns == 0))
1485		return(DM_RET_STOP | DM_RET_COPY);
1486
1487	/*
1488	 * There aren't any nodes below a peripheral node, so there's no
1489	 * reason to descend the tree any further.
1490	 */
1491	retval = DM_RET_STOP;
1492
1493	for (i = 0; i < num_patterns; i++) {
1494		struct periph_match_pattern *cur_pattern;
1495
1496		/*
1497		 * If the pattern in question isn't for a peripheral, we
1498		 * aren't interested.
1499		 */
1500		if (patterns[i].type != DEV_MATCH_PERIPH)
1501			continue;
1502
1503		cur_pattern = &patterns[i].pattern.periph_pattern;
1504
1505		/*
1506		 * If they want to match on anything, then we will do so.
1507		 */
1508		if (cur_pattern->flags == PERIPH_MATCH_ANY) {
1509			/* set the copy flag */
1510			retval |= DM_RET_COPY;
1511
1512			/*
1513			 * We've already set the return action to stop,
1514			 * since there are no nodes below peripherals in
1515			 * the tree.
1516			 */
1517			return(retval);
1518		}
1519
1520		/*
1521		 * Not sure why someone would do this...
1522		 */
1523		if (cur_pattern->flags == PERIPH_MATCH_NONE)
1524			continue;
1525
1526		if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
1527		 && (cur_pattern->path_id != periph->path->bus->path_id))
1528			continue;
1529
1530		/*
1531		 * For the target and lun id's, we have to make sure the
1532		 * target and lun pointers aren't NULL.  The xpt peripheral
1533		 * has a wildcard target and device.
1534		 */
1535		if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
1536		 && ((periph->path->target == NULL)
1537		 ||(cur_pattern->target_id != periph->path->target->target_id)))
1538			continue;
1539
1540		if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
1541		 && ((periph->path->device == NULL)
1542		 || (cur_pattern->target_lun != periph->path->device->lun_id)))
1543			continue;
1544
1545		if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
1546		 && (cur_pattern->unit_number != periph->unit_number))
1547			continue;
1548
1549		if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
1550		 && (strncmp(cur_pattern->periph_name, periph->periph_name,
1551			     DEV_IDLEN) != 0))
1552			continue;
1553
1554		/*
1555		 * If we get to this point, the user definitely wants
1556		 * information on this peripheral.  So tell the caller to
1557		 * copy the data out.
1558		 */
1559		retval |= DM_RET_COPY;
1560
1561		/*
1562		 * The return action has already been set to stop, since
1563		 * peripherals don't have any nodes below them in the EDT.
1564		 */
1565		return(retval);
1566	}
1567
1568	/*
1569	 * If we get to this point, the peripheral that was passed in
1570	 * doesn't match any of the patterns.
1571	 */
1572	return(retval);
1573}
1574
1575static int
1576xptedtbusfunc(struct cam_eb *bus, void *arg)
1577{
1578	struct ccb_dev_match *cdm;
1579	struct cam_et *target;
1580	dev_match_ret retval;
1581
1582	cdm = (struct ccb_dev_match *)arg;
1583
1584	/*
1585	 * If our position is for something deeper in the tree, that means
1586	 * that we've already seen this node.  So, we keep going down.
1587	 */
1588	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1589	 && (cdm->pos.cookie.bus == bus)
1590	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1591	 && (cdm->pos.cookie.target != NULL))
1592		retval = DM_RET_DESCEND;
1593	else
1594		retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
1595
1596	/*
1597	 * If we got an error, bail out of the search.
1598	 */
1599	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1600		cdm->status = CAM_DEV_MATCH_ERROR;
1601		return(0);
1602	}
1603
1604	/*
1605	 * If the copy flag is set, copy this bus out.
1606	 */
1607	if (retval & DM_RET_COPY) {
1608		int spaceleft, j;
1609
1610		spaceleft = cdm->match_buf_len - (cdm->num_matches *
1611			sizeof(struct dev_match_result));
1612
1613		/*
1614		 * If we don't have enough space to put in another
1615		 * match result, save our position and tell the
1616		 * user there are more devices to check.
1617		 */
1618		if (spaceleft < sizeof(struct dev_match_result)) {
1619			bzero(&cdm->pos, sizeof(cdm->pos));
1620			cdm->pos.position_type =
1621				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
1622
1623			cdm->pos.cookie.bus = bus;
1624			cdm->pos.generations[CAM_BUS_GENERATION]=
1625				xsoftc.bus_generation;
1626			cdm->status = CAM_DEV_MATCH_MORE;
1627			return(0);
1628		}
1629		j = cdm->num_matches;
1630		cdm->num_matches++;
1631		cdm->matches[j].type = DEV_MATCH_BUS;
1632		cdm->matches[j].result.bus_result.path_id = bus->path_id;
1633		cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
1634		cdm->matches[j].result.bus_result.unit_number =
1635			bus->sim->unit_number;
1636		strlcpy(cdm->matches[j].result.bus_result.dev_name,
1637			bus->sim->sim_name,
1638			sizeof(cdm->matches[j].result.bus_result.dev_name));
1639	}
1640
1641	/*
1642	 * If the user is only interested in busses, there's no
1643	 * reason to descend to the next level in the tree.
1644	 */
1645	if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
1646		return(1);
1647
1648	/*
1649	 * If there is a target generation recorded, check it to
1650	 * make sure the target list hasn't changed.
1651	 */
1652	mtx_lock(&bus->eb_mtx);
1653	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1654	 && (cdm->pos.cookie.bus == bus)
1655	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1656	 && (cdm->pos.cookie.target != NULL)) {
1657		if ((cdm->pos.generations[CAM_TARGET_GENERATION] !=
1658		    bus->generation)) {
1659			mtx_unlock(&bus->eb_mtx);
1660			cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1661			return (0);
1662		}
1663		target = (struct cam_et *)cdm->pos.cookie.target;
1664		target->refcount++;
1665	} else
1666		target = NULL;
1667	mtx_unlock(&bus->eb_mtx);
1668
1669	return (xpttargettraverse(bus, target, xptedttargetfunc, arg));
1670}
1671
1672static int
1673xptedttargetfunc(struct cam_et *target, void *arg)
1674{
1675	struct ccb_dev_match *cdm;
1676	struct cam_eb *bus;
1677	struct cam_ed *device;
1678
1679	cdm = (struct ccb_dev_match *)arg;
1680	bus = target->bus;
1681
1682	/*
1683	 * If there is a device list generation recorded, check it to
1684	 * make sure the device list hasn't changed.
1685	 */
1686	mtx_lock(&bus->eb_mtx);
1687	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1688	 && (cdm->pos.cookie.bus == bus)
1689	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1690	 && (cdm->pos.cookie.target == target)
1691	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1692	 && (cdm->pos.cookie.device != NULL)) {
1693		if (cdm->pos.generations[CAM_DEV_GENERATION] !=
1694		    target->generation) {
1695			mtx_unlock(&bus->eb_mtx);
1696			cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1697			return(0);
1698		}
1699		device = (struct cam_ed *)cdm->pos.cookie.device;
1700		device->refcount++;
1701	} else
1702		device = NULL;
1703	mtx_unlock(&bus->eb_mtx);
1704
1705	return (xptdevicetraverse(target, device, xptedtdevicefunc, arg));
1706}
1707
1708static int
1709xptedtdevicefunc(struct cam_ed *device, void *arg)
1710{
1711	struct cam_eb *bus;
1712	struct cam_periph *periph;
1713	struct ccb_dev_match *cdm;
1714	dev_match_ret retval;
1715
1716	cdm = (struct ccb_dev_match *)arg;
1717	bus = device->target->bus;
1718
1719	/*
1720	 * If our position is for something deeper in the tree, that means
1721	 * that we've already seen this node.  So, we keep going down.
1722	 */
1723	if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1724	 && (cdm->pos.cookie.device == device)
1725	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1726	 && (cdm->pos.cookie.periph != NULL))
1727		retval = DM_RET_DESCEND;
1728	else
1729		retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
1730					device);
1731
1732	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1733		cdm->status = CAM_DEV_MATCH_ERROR;
1734		return(0);
1735	}
1736
1737	/*
1738	 * If the copy flag is set, copy this device out.
1739	 */
1740	if (retval & DM_RET_COPY) {
1741		int spaceleft, j;
1742
1743		spaceleft = cdm->match_buf_len - (cdm->num_matches *
1744			sizeof(struct dev_match_result));
1745
1746		/*
1747		 * If we don't have enough space to put in another
1748		 * match result, save our position and tell the
1749		 * user there are more devices to check.
1750		 */
1751		if (spaceleft < sizeof(struct dev_match_result)) {
1752			bzero(&cdm->pos, sizeof(cdm->pos));
1753			cdm->pos.position_type =
1754				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
1755				CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
1756
1757			cdm->pos.cookie.bus = device->target->bus;
1758			cdm->pos.generations[CAM_BUS_GENERATION]=
1759				xsoftc.bus_generation;
1760			cdm->pos.cookie.target = device->target;
1761			cdm->pos.generations[CAM_TARGET_GENERATION] =
1762				device->target->bus->generation;
1763			cdm->pos.cookie.device = device;
1764			cdm->pos.generations[CAM_DEV_GENERATION] =
1765				device->target->generation;
1766			cdm->status = CAM_DEV_MATCH_MORE;
1767			return(0);
1768		}
1769		j = cdm->num_matches;
1770		cdm->num_matches++;
1771		cdm->matches[j].type = DEV_MATCH_DEVICE;
1772		cdm->matches[j].result.device_result.path_id =
1773			device->target->bus->path_id;
1774		cdm->matches[j].result.device_result.target_id =
1775			device->target->target_id;
1776		cdm->matches[j].result.device_result.target_lun =
1777			device->lun_id;
1778		cdm->matches[j].result.device_result.protocol =
1779			device->protocol;
1780		bcopy(&device->inq_data,
1781		      &cdm->matches[j].result.device_result.inq_data,
1782		      sizeof(struct scsi_inquiry_data));
1783		bcopy(&device->ident_data,
1784		      &cdm->matches[j].result.device_result.ident_data,
1785		      sizeof(struct ata_params));
1786
1787		/* Let the user know whether this device is unconfigured */
1788		if (device->flags & CAM_DEV_UNCONFIGURED)
1789			cdm->matches[j].result.device_result.flags =
1790				DEV_RESULT_UNCONFIGURED;
1791		else
1792			cdm->matches[j].result.device_result.flags =
1793				DEV_RESULT_NOFLAG;
1794	}
1795
1796	/*
1797	 * If the user isn't interested in peripherals, don't descend
1798	 * the tree any further.
1799	 */
1800	if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
1801		return(1);
1802
1803	/*
1804	 * If there is a peripheral list generation recorded, make sure
1805	 * it hasn't changed.
1806	 */
1807	xpt_lock_buses();
1808	mtx_lock(&bus->eb_mtx);
1809	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1810	 && (cdm->pos.cookie.bus == bus)
1811	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1812	 && (cdm->pos.cookie.target == device->target)
1813	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1814	 && (cdm->pos.cookie.device == device)
1815	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1816	 && (cdm->pos.cookie.periph != NULL)) {
1817		if (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
1818		    device->generation) {
1819			mtx_unlock(&bus->eb_mtx);
1820			xpt_unlock_buses();
1821			cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1822			return(0);
1823		}
1824		periph = (struct cam_periph *)cdm->pos.cookie.periph;
1825		periph->refcount++;
1826	} else
1827		periph = NULL;
1828	mtx_unlock(&bus->eb_mtx);
1829	xpt_unlock_buses();
1830
1831	return (xptperiphtraverse(device, periph, xptedtperiphfunc, arg));
1832}
1833
1834static int
1835xptedtperiphfunc(struct cam_periph *periph, void *arg)
1836{
1837	struct ccb_dev_match *cdm;
1838	dev_match_ret retval;
1839
1840	cdm = (struct ccb_dev_match *)arg;
1841
1842	retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
1843
1844	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1845		cdm->status = CAM_DEV_MATCH_ERROR;
1846		return(0);
1847	}
1848
1849	/*
1850	 * If the copy flag is set, copy this peripheral out.
1851	 */
1852	if (retval & DM_RET_COPY) {
1853		int spaceleft, j;
1854		size_t l;
1855
1856		spaceleft = cdm->match_buf_len - (cdm->num_matches *
1857			sizeof(struct dev_match_result));
1858
1859		/*
1860		 * If we don't have enough space to put in another
1861		 * match result, save our position and tell the
1862		 * user there are more devices to check.
1863		 */
1864		if (spaceleft < sizeof(struct dev_match_result)) {
1865			bzero(&cdm->pos, sizeof(cdm->pos));
1866			cdm->pos.position_type =
1867				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
1868				CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
1869				CAM_DEV_POS_PERIPH;
1870
1871			cdm->pos.cookie.bus = periph->path->bus;
1872			cdm->pos.generations[CAM_BUS_GENERATION]=
1873				xsoftc.bus_generation;
1874			cdm->pos.cookie.target = periph->path->target;
1875			cdm->pos.generations[CAM_TARGET_GENERATION] =
1876				periph->path->bus->generation;
1877			cdm->pos.cookie.device = periph->path->device;
1878			cdm->pos.generations[CAM_DEV_GENERATION] =
1879				periph->path->target->generation;
1880			cdm->pos.cookie.periph = periph;
1881			cdm->pos.generations[CAM_PERIPH_GENERATION] =
1882				periph->path->device->generation;
1883			cdm->status = CAM_DEV_MATCH_MORE;
1884			return(0);
1885		}
1886
1887		j = cdm->num_matches;
1888		cdm->num_matches++;
1889		cdm->matches[j].type = DEV_MATCH_PERIPH;
1890		cdm->matches[j].result.periph_result.path_id =
1891			periph->path->bus->path_id;
1892		cdm->matches[j].result.periph_result.target_id =
1893			periph->path->target->target_id;
1894		cdm->matches[j].result.periph_result.target_lun =
1895			periph->path->device->lun_id;
1896		cdm->matches[j].result.periph_result.unit_number =
1897			periph->unit_number;
1898		l = sizeof(cdm->matches[j].result.periph_result.periph_name);
1899		strlcpy(cdm->matches[j].result.periph_result.periph_name,
1900			periph->periph_name, l);
1901	}
1902
1903	return(1);
1904}
1905
1906static int
1907xptedtmatch(struct ccb_dev_match *cdm)
1908{
1909	struct cam_eb *bus;
1910	int ret;
1911
1912	cdm->num_matches = 0;
1913
1914	/*
1915	 * Check the bus list generation.  If it has changed, the user
1916	 * needs to reset everything and start over.
1917	 */
1918	xpt_lock_buses();
1919	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1920	 && (cdm->pos.cookie.bus != NULL)) {
1921		if (cdm->pos.generations[CAM_BUS_GENERATION] !=
1922		    xsoftc.bus_generation) {
1923			xpt_unlock_buses();
1924			cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1925			return(0);
1926		}
1927		bus = (struct cam_eb *)cdm->pos.cookie.bus;
1928		bus->refcount++;
1929	} else
1930		bus = NULL;
1931	xpt_unlock_buses();
1932
1933	ret = xptbustraverse(bus, xptedtbusfunc, cdm);
1934
1935	/*
1936	 * If we get back 0, that means that we had to stop before fully
1937	 * traversing the EDT.  It also means that one of the subroutines
1938	 * has set the status field to the proper value.  If we get back 1,
1939	 * we've fully traversed the EDT and copied out any matching entries.
1940	 */
1941	if (ret == 1)
1942		cdm->status = CAM_DEV_MATCH_LAST;
1943
1944	return(ret);
1945}
1946
1947static int
1948xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
1949{
1950	struct cam_periph *periph;
1951	struct ccb_dev_match *cdm;
1952
1953	cdm = (struct ccb_dev_match *)arg;
1954
1955	xpt_lock_buses();
1956	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
1957	 && (cdm->pos.cookie.pdrv == pdrv)
1958	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1959	 && (cdm->pos.cookie.periph != NULL)) {
1960		if (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
1961		    (*pdrv)->generation) {
1962			xpt_unlock_buses();
1963			cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1964			return(0);
1965		}
1966		periph = (struct cam_periph *)cdm->pos.cookie.periph;
1967		periph->refcount++;
1968	} else
1969		periph = NULL;
1970	xpt_unlock_buses();
1971
1972	return (xptpdperiphtraverse(pdrv, periph, xptplistperiphfunc, arg));
1973}
1974
1975static int
1976xptplistperiphfunc(struct cam_periph *periph, void *arg)
1977{
1978	struct ccb_dev_match *cdm;
1979	dev_match_ret retval;
1980
1981	cdm = (struct ccb_dev_match *)arg;
1982
1983	retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
1984
1985	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1986		cdm->status = CAM_DEV_MATCH_ERROR;
1987		return(0);
1988	}
1989
1990	/*
1991	 * If the copy flag is set, copy this peripheral out.
1992	 */
1993	if (retval & DM_RET_COPY) {
1994		int spaceleft, j;
1995		size_t l;
1996
1997		spaceleft = cdm->match_buf_len - (cdm->num_matches *
1998			sizeof(struct dev_match_result));
1999
2000		/*
2001		 * If we don't have enough space to put in another
2002		 * match result, save our position and tell the
2003		 * user there are more devices to check.
2004		 */
2005		if (spaceleft < sizeof(struct dev_match_result)) {
2006			struct periph_driver **pdrv;
2007
2008			pdrv = NULL;
2009			bzero(&cdm->pos, sizeof(cdm->pos));
2010			cdm->pos.position_type =
2011				CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
2012				CAM_DEV_POS_PERIPH;
2013
2014			/*
2015			 * This may look a bit non-sensical, but it is
2016			 * actually quite logical.  There are very few
2017			 * peripheral drivers, and bloating every peripheral
2018			 * structure with a pointer back to its parent
2019			 * peripheral driver linker set entry would cost
2020			 * more in the long run than doing this quick lookup.
2021			 */
2022			for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
2023				if (strcmp((*pdrv)->driver_name,
2024				    periph->periph_name) == 0)
2025					break;
2026			}
2027
2028			if (*pdrv == NULL) {
2029				cdm->status = CAM_DEV_MATCH_ERROR;
2030				return(0);
2031			}
2032
2033			cdm->pos.cookie.pdrv = pdrv;
2034			/*
2035			 * The periph generation slot does double duty, as
2036			 * does the periph pointer slot.  They are used for
2037			 * both edt and pdrv lookups and positioning.
2038			 */
2039			cdm->pos.cookie.periph = periph;
2040			cdm->pos.generations[CAM_PERIPH_GENERATION] =
2041				(*pdrv)->generation;
2042			cdm->status = CAM_DEV_MATCH_MORE;
2043			return(0);
2044		}
2045
2046		j = cdm->num_matches;
2047		cdm->num_matches++;
2048		cdm->matches[j].type = DEV_MATCH_PERIPH;
2049		cdm->matches[j].result.periph_result.path_id =
2050			periph->path->bus->path_id;
2051
2052		/*
2053		 * The transport layer peripheral doesn't have a target or
2054		 * lun.
2055		 */
2056		if (periph->path->target)
2057			cdm->matches[j].result.periph_result.target_id =
2058				periph->path->target->target_id;
2059		else
2060			cdm->matches[j].result.periph_result.target_id =
2061				CAM_TARGET_WILDCARD;
2062
2063		if (periph->path->device)
2064			cdm->matches[j].result.periph_result.target_lun =
2065				periph->path->device->lun_id;
2066		else
2067			cdm->matches[j].result.periph_result.target_lun =
2068				CAM_LUN_WILDCARD;
2069
2070		cdm->matches[j].result.periph_result.unit_number =
2071			periph->unit_number;
2072		l = sizeof(cdm->matches[j].result.periph_result.periph_name);
2073		strlcpy(cdm->matches[j].result.periph_result.periph_name,
2074			periph->periph_name, l);
2075	}
2076
2077	return(1);
2078}
2079
2080static int
2081xptperiphlistmatch(struct ccb_dev_match *cdm)
2082{
2083	int ret;
2084
2085	cdm->num_matches = 0;
2086
2087	/*
2088	 * At this point in the edt traversal function, we check the bus
2089	 * list generation to make sure that no busses have been added or
2090	 * removed since the user last sent a XPT_DEV_MATCH ccb through.
2091	 * For the peripheral driver list traversal function, however, we
2092	 * don't have to worry about new peripheral driver types coming or
2093	 * going; they're in a linker set, and therefore can't change
2094	 * without a recompile.
2095	 */
2096
2097	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2098	 && (cdm->pos.cookie.pdrv != NULL))
2099		ret = xptpdrvtraverse(
2100				(struct periph_driver **)cdm->pos.cookie.pdrv,
2101				xptplistpdrvfunc, cdm);
2102	else
2103		ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
2104
2105	/*
2106	 * If we get back 0, that means that we had to stop before fully
2107	 * traversing the peripheral driver tree.  It also means that one of
2108	 * the subroutines has set the status field to the proper value.  If
2109	 * we get back 1, we've fully traversed the EDT and copied out any
2110	 * matching entries.
2111	 */
2112	if (ret == 1)
2113		cdm->status = CAM_DEV_MATCH_LAST;
2114
2115	return(ret);
2116}
2117
2118static int
2119xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
2120{
2121	struct cam_eb *bus, *next_bus;
2122	int retval;
2123
2124	retval = 1;
2125	if (start_bus)
2126		bus = start_bus;
2127	else {
2128		xpt_lock_buses();
2129		bus = TAILQ_FIRST(&xsoftc.xpt_busses);
2130		if (bus == NULL) {
2131			xpt_unlock_buses();
2132			return (retval);
2133		}
2134		bus->refcount++;
2135		xpt_unlock_buses();
2136	}
2137	for (; bus != NULL; bus = next_bus) {
2138		retval = tr_func(bus, arg);
2139		if (retval == 0) {
2140			xpt_release_bus(bus);
2141			break;
2142		}
2143		xpt_lock_buses();
2144		next_bus = TAILQ_NEXT(bus, links);
2145		if (next_bus)
2146			next_bus->refcount++;
2147		xpt_unlock_buses();
2148		xpt_release_bus(bus);
2149	}
2150	return(retval);
2151}
2152
2153static int
2154xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
2155		  xpt_targetfunc_t *tr_func, void *arg)
2156{
2157	struct cam_et *target, *next_target;
2158	int retval;
2159
2160	retval = 1;
2161	if (start_target)
2162		target = start_target;
2163	else {
2164		mtx_lock(&bus->eb_mtx);
2165		target = TAILQ_FIRST(&bus->et_entries);
2166		if (target == NULL) {
2167			mtx_unlock(&bus->eb_mtx);
2168			return (retval);
2169		}
2170		target->refcount++;
2171		mtx_unlock(&bus->eb_mtx);
2172	}
2173	for (; target != NULL; target = next_target) {
2174		retval = tr_func(target, arg);
2175		if (retval == 0) {
2176			xpt_release_target(target);
2177			break;
2178		}
2179		mtx_lock(&bus->eb_mtx);
2180		next_target = TAILQ_NEXT(target, links);
2181		if (next_target)
2182			next_target->refcount++;
2183		mtx_unlock(&bus->eb_mtx);
2184		xpt_release_target(target);
2185	}
2186	return(retval);
2187}
2188
2189static int
2190xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
2191		  xpt_devicefunc_t *tr_func, void *arg)
2192{
2193	struct cam_eb *bus;
2194	struct cam_ed *device, *next_device;
2195	int retval;
2196
2197	retval = 1;
2198	bus = target->bus;
2199	if (start_device)
2200		device = start_device;
2201	else {
2202		mtx_lock(&bus->eb_mtx);
2203		device = TAILQ_FIRST(&target->ed_entries);
2204		if (device == NULL) {
2205			mtx_unlock(&bus->eb_mtx);
2206			return (retval);
2207		}
2208		device->refcount++;
2209		mtx_unlock(&bus->eb_mtx);
2210	}
2211	for (; device != NULL; device = next_device) {
2212		mtx_lock(&device->device_mtx);
2213		retval = tr_func(device, arg);
2214		mtx_unlock(&device->device_mtx);
2215		if (retval == 0) {
2216			xpt_release_device(device);
2217			break;
2218		}
2219		mtx_lock(&bus->eb_mtx);
2220		next_device = TAILQ_NEXT(device, links);
2221		if (next_device)
2222			next_device->refcount++;
2223		mtx_unlock(&bus->eb_mtx);
2224		xpt_release_device(device);
2225	}
2226	return(retval);
2227}
2228
2229static int
2230xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
2231		  xpt_periphfunc_t *tr_func, void *arg)
2232{
2233	struct cam_eb *bus;
2234	struct cam_periph *periph, *next_periph;
2235	int retval;
2236
2237	retval = 1;
2238
2239	bus = device->target->bus;
2240	if (start_periph)
2241		periph = start_periph;
2242	else {
2243		xpt_lock_buses();
2244		mtx_lock(&bus->eb_mtx);
2245		periph = SLIST_FIRST(&device->periphs);
2246		while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0)
2247			periph = SLIST_NEXT(periph, periph_links);
2248		if (periph == NULL) {
2249			mtx_unlock(&bus->eb_mtx);
2250			xpt_unlock_buses();
2251			return (retval);
2252		}
2253		periph->refcount++;
2254		mtx_unlock(&bus->eb_mtx);
2255		xpt_unlock_buses();
2256	}
2257	for (; periph != NULL; periph = next_periph) {
2258		retval = tr_func(periph, arg);
2259		if (retval == 0) {
2260			cam_periph_release_locked(periph);
2261			break;
2262		}
2263		xpt_lock_buses();
2264		mtx_lock(&bus->eb_mtx);
2265		next_periph = SLIST_NEXT(periph, periph_links);
2266		while (next_periph != NULL &&
2267		    (next_periph->flags & CAM_PERIPH_FREE) != 0)
2268			next_periph = SLIST_NEXT(next_periph, periph_links);
2269		if (next_periph)
2270			next_periph->refcount++;
2271		mtx_unlock(&bus->eb_mtx);
2272		xpt_unlock_buses();
2273		cam_periph_release_locked(periph);
2274	}
2275	return(retval);
2276}
2277
2278static int
2279xptpdrvtraverse(struct periph_driver **start_pdrv,
2280		xpt_pdrvfunc_t *tr_func, void *arg)
2281{
2282	struct periph_driver **pdrv;
2283	int retval;
2284
2285	retval = 1;
2286
2287	/*
2288	 * We don't traverse the peripheral driver list like we do the
2289	 * other lists, because it is a linker set, and therefore cannot be
2290	 * changed during runtime.  If the peripheral driver list is ever
2291	 * re-done to be something other than a linker set (i.e. it can
2292	 * change while the system is running), the list traversal should
2293	 * be modified to work like the other traversal functions.
2294	 */
2295	for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
2296	     *pdrv != NULL; pdrv++) {
2297		retval = tr_func(pdrv, arg);
2298
2299		if (retval == 0)
2300			return(retval);
2301	}
2302
2303	return(retval);
2304}
2305
2306static int
2307xptpdperiphtraverse(struct periph_driver **pdrv,
2308		    struct cam_periph *start_periph,
2309		    xpt_periphfunc_t *tr_func, void *arg)
2310{
2311	struct cam_periph *periph, *next_periph;
2312	int retval;
2313
2314	retval = 1;
2315
2316	if (start_periph)
2317		periph = start_periph;
2318	else {
2319		xpt_lock_buses();
2320		periph = TAILQ_FIRST(&(*pdrv)->units);
2321		while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0)
2322			periph = TAILQ_NEXT(periph, unit_links);
2323		if (periph == NULL) {
2324			xpt_unlock_buses();
2325			return (retval);
2326		}
2327		periph->refcount++;
2328		xpt_unlock_buses();
2329	}
2330	for (; periph != NULL; periph = next_periph) {
2331		cam_periph_lock(periph);
2332		retval = tr_func(periph, arg);
2333		cam_periph_unlock(periph);
2334		if (retval == 0) {
2335			cam_periph_release(periph);
2336			break;
2337		}
2338		xpt_lock_buses();
2339		next_periph = TAILQ_NEXT(periph, unit_links);
2340		while (next_periph != NULL &&
2341		    (next_periph->flags & CAM_PERIPH_FREE) != 0)
2342			next_periph = TAILQ_NEXT(next_periph, unit_links);
2343		if (next_periph)
2344			next_periph->refcount++;
2345		xpt_unlock_buses();
2346		cam_periph_release(periph);
2347	}
2348	return(retval);
2349}
2350
2351static int
2352xptdefbusfunc(struct cam_eb *bus, void *arg)
2353{
2354	struct xpt_traverse_config *tr_config;
2355
2356	tr_config = (struct xpt_traverse_config *)arg;
2357
2358	if (tr_config->depth == XPT_DEPTH_BUS) {
2359		xpt_busfunc_t *tr_func;
2360
2361		tr_func = (xpt_busfunc_t *)tr_config->tr_func;
2362
2363		return(tr_func(bus, tr_config->tr_arg));
2364	} else
2365		return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
2366}
2367
2368static int
2369xptdeftargetfunc(struct cam_et *target, void *arg)
2370{
2371	struct xpt_traverse_config *tr_config;
2372
2373	tr_config = (struct xpt_traverse_config *)arg;
2374
2375	if (tr_config->depth == XPT_DEPTH_TARGET) {
2376		xpt_targetfunc_t *tr_func;
2377
2378		tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
2379
2380		return(tr_func(target, tr_config->tr_arg));
2381	} else
2382		return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
2383}
2384
2385static int
2386xptdefdevicefunc(struct cam_ed *device, void *arg)
2387{
2388	struct xpt_traverse_config *tr_config;
2389
2390	tr_config = (struct xpt_traverse_config *)arg;
2391
2392	if (tr_config->depth == XPT_DEPTH_DEVICE) {
2393		xpt_devicefunc_t *tr_func;
2394
2395		tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
2396
2397		return(tr_func(device, tr_config->tr_arg));
2398	} else
2399		return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
2400}
2401
2402static int
2403xptdefperiphfunc(struct cam_periph *periph, void *arg)
2404{
2405	struct xpt_traverse_config *tr_config;
2406	xpt_periphfunc_t *tr_func;
2407
2408	tr_config = (struct xpt_traverse_config *)arg;
2409
2410	tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
2411
2412	/*
2413	 * Unlike the other default functions, we don't check for depth
2414	 * here.  The peripheral driver level is the last level in the EDT,
2415	 * so if we're here, we should execute the function in question.
2416	 */
2417	return(tr_func(periph, tr_config->tr_arg));
2418}
2419
2420/*
2421 * Execute the given function for every bus in the EDT.
2422 */
2423static int
2424xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
2425{
2426	struct xpt_traverse_config tr_config;
2427
2428	tr_config.depth = XPT_DEPTH_BUS;
2429	tr_config.tr_func = tr_func;
2430	tr_config.tr_arg = arg;
2431
2432	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2433}
2434
2435/*
2436 * Execute the given function for every device in the EDT.
2437 */
2438static int
2439xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
2440{
2441	struct xpt_traverse_config tr_config;
2442
2443	tr_config.depth = XPT_DEPTH_DEVICE;
2444	tr_config.tr_func = tr_func;
2445	tr_config.tr_arg = arg;
2446
2447	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2448}
2449
2450static int
2451xptsetasyncfunc(struct cam_ed *device, void *arg)
2452{
2453	struct cam_path path;
2454	struct ccb_getdev cgd;
2455	struct ccb_setasync *csa = (struct ccb_setasync *)arg;
2456
2457	/*
2458	 * Don't report unconfigured devices (Wildcard devs,
2459	 * devices only for target mode, device instances
2460	 * that have been invalidated but are waiting for
2461	 * their last reference count to be released).
2462	 */
2463	if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
2464		return (1);
2465
2466	xpt_compile_path(&path,
2467			 NULL,
2468			 device->target->bus->path_id,
2469			 device->target->target_id,
2470			 device->lun_id);
2471	xpt_setup_ccb(&cgd.ccb_h, &path, CAM_PRIORITY_NORMAL);
2472	cgd.ccb_h.func_code = XPT_GDEV_TYPE;
2473	xpt_action((union ccb *)&cgd);
2474	csa->callback(csa->callback_arg,
2475			    AC_FOUND_DEVICE,
2476			    &path, &cgd);
2477	xpt_release_path(&path);
2478
2479	return(1);
2480}
2481
2482static int
2483xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
2484{
2485	struct cam_path path;
2486	struct ccb_pathinq cpi;
2487	struct ccb_setasync *csa = (struct ccb_setasync *)arg;
2488
2489	xpt_compile_path(&path, /*periph*/NULL,
2490			 bus->path_id,
2491			 CAM_TARGET_WILDCARD,
2492			 CAM_LUN_WILDCARD);
2493	xpt_path_lock(&path);
2494	xpt_path_inq(&cpi, &path);
2495	csa->callback(csa->callback_arg,
2496			    AC_PATH_REGISTERED,
2497			    &path, &cpi);
2498	xpt_path_unlock(&path);
2499	xpt_release_path(&path);
2500
2501	return(1);
2502}
2503
2504void
2505xpt_action(union ccb *start_ccb)
2506{
2507
2508	CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE,
2509	    ("xpt_action: func %#x %s\n", start_ccb->ccb_h.func_code,
2510		xpt_action_name(start_ccb->ccb_h.func_code)));
2511
2512	start_ccb->ccb_h.status = CAM_REQ_INPROG;
2513	(*(start_ccb->ccb_h.path->bus->xport->ops->action))(start_ccb);
2514}
2515
2516void
2517xpt_action_default(union ccb *start_ccb)
2518{
2519	struct cam_path *path;
2520	struct cam_sim *sim;
2521	struct mtx *mtx;
2522
2523	path = start_ccb->ccb_h.path;
2524	CAM_DEBUG(path, CAM_DEBUG_TRACE,
2525	    ("xpt_action_default: func %#x %s\n", start_ccb->ccb_h.func_code,
2526		xpt_action_name(start_ccb->ccb_h.func_code)));
2527
2528	switch (start_ccb->ccb_h.func_code) {
2529	case XPT_SCSI_IO:
2530	{
2531		struct cam_ed *device;
2532
2533		/*
2534		 * For the sake of compatibility with SCSI-1
2535		 * devices that may not understand the identify
2536		 * message, we include lun information in the
2537		 * second byte of all commands.  SCSI-1 specifies
2538		 * that luns are a 3 bit value and reserves only 3
2539		 * bits for lun information in the CDB.  Later
2540		 * revisions of the SCSI spec allow for more than 8
2541		 * luns, but have deprecated lun information in the
2542		 * CDB.  So, if the lun won't fit, we must omit.
2543		 *
2544		 * Also be aware that during initial probing for devices,
2545		 * the inquiry information is unknown but initialized to 0.
2546		 * This means that this code will be exercised while probing
2547		 * devices with an ANSI revision greater than 2.
2548		 */
2549		device = path->device;
2550		if (device->protocol_version <= SCSI_REV_2
2551		 && start_ccb->ccb_h.target_lun < 8
2552		 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
2553
2554			start_ccb->csio.cdb_io.cdb_bytes[1] |=
2555			    start_ccb->ccb_h.target_lun << 5;
2556		}
2557		start_ccb->csio.scsi_status = SCSI_STATUS_OK;
2558	}
2559	/* FALLTHROUGH */
2560	case XPT_TARGET_IO:
2561	case XPT_CONT_TARGET_IO:
2562		start_ccb->csio.sense_resid = 0;
2563		start_ccb->csio.resid = 0;
2564		/* FALLTHROUGH */
2565	case XPT_ATA_IO:
2566		if (start_ccb->ccb_h.func_code == XPT_ATA_IO)
2567			start_ccb->ataio.resid = 0;
2568		/* FALLTHROUGH */
2569	case XPT_NVME_IO:
2570		/* FALLTHROUGH */
2571	case XPT_NVME_ADMIN:
2572		/* FALLTHROUGH */
2573	case XPT_RESET_DEV:
2574	case XPT_ENG_EXEC:
2575	case XPT_SMP_IO:
2576	{
2577		struct cam_devq *devq;
2578
2579		devq = path->bus->sim->devq;
2580		mtx_lock(&devq->send_mtx);
2581		cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
2582		if (xpt_schedule_devq(devq, path->device) != 0)
2583			xpt_run_devq(devq);
2584		mtx_unlock(&devq->send_mtx);
2585		break;
2586	}
2587	case XPT_CALC_GEOMETRY:
2588		/* Filter out garbage */
2589		if (start_ccb->ccg.block_size == 0
2590		 || start_ccb->ccg.volume_size == 0) {
2591			start_ccb->ccg.cylinders = 0;
2592			start_ccb->ccg.heads = 0;
2593			start_ccb->ccg.secs_per_track = 0;
2594			start_ccb->ccb_h.status = CAM_REQ_CMP;
2595			break;
2596		}
2597#if defined(PC98) || defined(__sparc64__)
2598		/*
2599		 * In a PC-98 system, geometry translation depens on
2600		 * the "real" device geometry obtained from mode page 4.
2601		 * SCSI geometry translation is performed in the
2602		 * initialization routine of the SCSI BIOS and the result
2603		 * stored in host memory.  If the translation is available
2604		 * in host memory, use it.  If not, rely on the default
2605		 * translation the device driver performs.
2606		 * For sparc64, we may need adjust the geometry of large
2607		 * disks in order to fit the limitations of the 16-bit
2608		 * fields of the VTOC8 disk label.
2609		 */
2610		if (scsi_da_bios_params(&start_ccb->ccg) != 0) {
2611			start_ccb->ccb_h.status = CAM_REQ_CMP;
2612			break;
2613		}
2614#endif
2615		goto call_sim;
2616	case XPT_ABORT:
2617	{
2618		union ccb* abort_ccb;
2619
2620		abort_ccb = start_ccb->cab.abort_ccb;
2621		if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
2622			struct cam_ed *device;
2623			struct cam_devq *devq;
2624
2625			device = abort_ccb->ccb_h.path->device;
2626			devq = device->sim->devq;
2627
2628			mtx_lock(&devq->send_mtx);
2629			if (abort_ccb->ccb_h.pinfo.index > 0) {
2630				cam_ccbq_remove_ccb(&device->ccbq, abort_ccb);
2631				abort_ccb->ccb_h.status =
2632				    CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2633				xpt_freeze_devq_device(device, 1);
2634				mtx_unlock(&devq->send_mtx);
2635				xpt_done(abort_ccb);
2636				start_ccb->ccb_h.status = CAM_REQ_CMP;
2637				break;
2638			}
2639			mtx_unlock(&devq->send_mtx);
2640
2641			if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
2642			 && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
2643				/*
2644				 * We've caught this ccb en route to
2645				 * the SIM.  Flag it for abort and the
2646				 * SIM will do so just before starting
2647				 * real work on the CCB.
2648				 */
2649				abort_ccb->ccb_h.status =
2650				    CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2651				xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
2652				start_ccb->ccb_h.status = CAM_REQ_CMP;
2653				break;
2654			}
2655		}
2656		if (XPT_FC_IS_QUEUED(abort_ccb)
2657		 && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
2658			/*
2659			 * It's already completed but waiting
2660			 * for our SWI to get to it.
2661			 */
2662			start_ccb->ccb_h.status = CAM_UA_ABORT;
2663			break;
2664		}
2665		/*
2666		 * If we weren't able to take care of the abort request
2667		 * in the XPT, pass the request down to the SIM for processing.
2668		 */
2669	}
2670	/* FALLTHROUGH */
2671	case XPT_ACCEPT_TARGET_IO:
2672	case XPT_EN_LUN:
2673	case XPT_IMMED_NOTIFY:
2674	case XPT_NOTIFY_ACK:
2675	case XPT_RESET_BUS:
2676	case XPT_IMMEDIATE_NOTIFY:
2677	case XPT_NOTIFY_ACKNOWLEDGE:
2678	case XPT_GET_SIM_KNOB_OLD:
2679	case XPT_GET_SIM_KNOB:
2680	case XPT_SET_SIM_KNOB:
2681	case XPT_GET_TRAN_SETTINGS:
2682	case XPT_SET_TRAN_SETTINGS:
2683	case XPT_PATH_INQ:
2684call_sim:
2685		sim = path->bus->sim;
2686		mtx = sim->mtx;
2687		if (mtx && !mtx_owned(mtx))
2688			mtx_lock(mtx);
2689		else
2690			mtx = NULL;
2691		CAM_DEBUG(path, CAM_DEBUG_TRACE,
2692		    ("sim->sim_action: func=%#x\n", start_ccb->ccb_h.func_code));
2693		(*(sim->sim_action))(sim, start_ccb);
2694		CAM_DEBUG(path, CAM_DEBUG_TRACE,
2695		    ("sim->sim_action: status=%#x\n", start_ccb->ccb_h.status));
2696		if (mtx)
2697			mtx_unlock(mtx);
2698		break;
2699	case XPT_PATH_STATS:
2700		start_ccb->cpis.last_reset = path->bus->last_reset;
2701		start_ccb->ccb_h.status = CAM_REQ_CMP;
2702		break;
2703	case XPT_GDEV_TYPE:
2704	{
2705		struct cam_ed *dev;
2706
2707		dev = path->device;
2708		if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
2709			start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2710		} else {
2711			struct ccb_getdev *cgd;
2712
2713			cgd = &start_ccb->cgd;
2714			cgd->protocol = dev->protocol;
2715			cgd->inq_data = dev->inq_data;
2716			cgd->ident_data = dev->ident_data;
2717			cgd->inq_flags = dev->inq_flags;
2718			cgd->ccb_h.status = CAM_REQ_CMP;
2719			cgd->serial_num_len = dev->serial_num_len;
2720			if ((dev->serial_num_len > 0)
2721			 && (dev->serial_num != NULL))
2722				bcopy(dev->serial_num, cgd->serial_num,
2723				      dev->serial_num_len);
2724		}
2725		break;
2726	}
2727	case XPT_GDEV_STATS:
2728	{
2729		struct ccb_getdevstats *cgds = &start_ccb->cgds;
2730		struct cam_ed *dev = path->device;
2731		struct cam_eb *bus = path->bus;
2732		struct cam_et *tar = path->target;
2733		struct cam_devq *devq = bus->sim->devq;
2734
2735		mtx_lock(&devq->send_mtx);
2736		cgds->dev_openings = dev->ccbq.dev_openings;
2737		cgds->dev_active = dev->ccbq.dev_active;
2738		cgds->allocated = dev->ccbq.allocated;
2739		cgds->queued = cam_ccbq_pending_ccb_count(&dev->ccbq);
2740		cgds->held = cgds->allocated - cgds->dev_active - cgds->queued;
2741		cgds->last_reset = tar->last_reset;
2742		cgds->maxtags = dev->maxtags;
2743		cgds->mintags = dev->mintags;
2744		if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
2745			cgds->last_reset = bus->last_reset;
2746		mtx_unlock(&devq->send_mtx);
2747		cgds->ccb_h.status = CAM_REQ_CMP;
2748		break;
2749	}
2750	case XPT_GDEVLIST:
2751	{
2752		struct cam_periph	*nperiph;
2753		struct periph_list	*periph_head;
2754		struct ccb_getdevlist	*cgdl;
2755		u_int			i;
2756		struct cam_ed		*device;
2757		int			found;
2758
2759
2760		found = 0;
2761
2762		/*
2763		 * Don't want anyone mucking with our data.
2764		 */
2765		device = path->device;
2766		periph_head = &device->periphs;
2767		cgdl = &start_ccb->cgdl;
2768
2769		/*
2770		 * Check and see if the list has changed since the user
2771		 * last requested a list member.  If so, tell them that the
2772		 * list has changed, and therefore they need to start over
2773		 * from the beginning.
2774		 */
2775		if ((cgdl->index != 0) &&
2776		    (cgdl->generation != device->generation)) {
2777			cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
2778			break;
2779		}
2780
2781		/*
2782		 * Traverse the list of peripherals and attempt to find
2783		 * the requested peripheral.
2784		 */
2785		for (nperiph = SLIST_FIRST(periph_head), i = 0;
2786		     (nperiph != NULL) && (i <= cgdl->index);
2787		     nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
2788			if (i == cgdl->index) {
2789				strlcpy(cgdl->periph_name,
2790					nperiph->periph_name,
2791					sizeof(cgdl->periph_name));
2792				cgdl->unit_number = nperiph->unit_number;
2793				found = 1;
2794			}
2795		}
2796		if (found == 0) {
2797			cgdl->status = CAM_GDEVLIST_ERROR;
2798			break;
2799		}
2800
2801		if (nperiph == NULL)
2802			cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
2803		else
2804			cgdl->status = CAM_GDEVLIST_MORE_DEVS;
2805
2806		cgdl->index++;
2807		cgdl->generation = device->generation;
2808
2809		cgdl->ccb_h.status = CAM_REQ_CMP;
2810		break;
2811	}
2812	case XPT_DEV_MATCH:
2813	{
2814		dev_pos_type position_type;
2815		struct ccb_dev_match *cdm;
2816
2817		cdm = &start_ccb->cdm;
2818
2819		/*
2820		 * There are two ways of getting at information in the EDT.
2821		 * The first way is via the primary EDT tree.  It starts
2822		 * with a list of busses, then a list of targets on a bus,
2823		 * then devices/luns on a target, and then peripherals on a
2824		 * device/lun.  The "other" way is by the peripheral driver
2825		 * lists.  The peripheral driver lists are organized by
2826		 * peripheral driver.  (obviously)  So it makes sense to
2827		 * use the peripheral driver list if the user is looking
2828		 * for something like "da1", or all "da" devices.  If the
2829		 * user is looking for something on a particular bus/target
2830		 * or lun, it's generally better to go through the EDT tree.
2831		 */
2832
2833		if (cdm->pos.position_type != CAM_DEV_POS_NONE)
2834			position_type = cdm->pos.position_type;
2835		else {
2836			u_int i;
2837
2838			position_type = CAM_DEV_POS_NONE;
2839
2840			for (i = 0; i < cdm->num_patterns; i++) {
2841				if ((cdm->patterns[i].type == DEV_MATCH_BUS)
2842				 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
2843					position_type = CAM_DEV_POS_EDT;
2844					break;
2845				}
2846			}
2847
2848			if (cdm->num_patterns == 0)
2849				position_type = CAM_DEV_POS_EDT;
2850			else if (position_type == CAM_DEV_POS_NONE)
2851				position_type = CAM_DEV_POS_PDRV;
2852		}
2853
2854		switch(position_type & CAM_DEV_POS_TYPEMASK) {
2855		case CAM_DEV_POS_EDT:
2856			xptedtmatch(cdm);
2857			break;
2858		case CAM_DEV_POS_PDRV:
2859			xptperiphlistmatch(cdm);
2860			break;
2861		default:
2862			cdm->status = CAM_DEV_MATCH_ERROR;
2863			break;
2864		}
2865
2866		if (cdm->status == CAM_DEV_MATCH_ERROR)
2867			start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2868		else
2869			start_ccb->ccb_h.status = CAM_REQ_CMP;
2870
2871		break;
2872	}
2873	case XPT_SASYNC_CB:
2874	{
2875		struct ccb_setasync *csa;
2876		struct async_node *cur_entry;
2877		struct async_list *async_head;
2878		u_int32_t added;
2879
2880		csa = &start_ccb->csa;
2881		added = csa->event_enable;
2882		async_head = &path->device->asyncs;
2883
2884		/*
2885		 * If there is already an entry for us, simply
2886		 * update it.
2887		 */
2888		cur_entry = SLIST_FIRST(async_head);
2889		while (cur_entry != NULL) {
2890			if ((cur_entry->callback_arg == csa->callback_arg)
2891			 && (cur_entry->callback == csa->callback))
2892				break;
2893			cur_entry = SLIST_NEXT(cur_entry, links);
2894		}
2895
2896		if (cur_entry != NULL) {
2897		 	/*
2898			 * If the request has no flags set,
2899			 * remove the entry.
2900			 */
2901			added &= ~cur_entry->event_enable;
2902			if (csa->event_enable == 0) {
2903				SLIST_REMOVE(async_head, cur_entry,
2904					     async_node, links);
2905				xpt_release_device(path->device);
2906				free(cur_entry, M_CAMXPT);
2907			} else {
2908				cur_entry->event_enable = csa->event_enable;
2909			}
2910			csa->event_enable = added;
2911		} else {
2912			cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT,
2913					   M_NOWAIT);
2914			if (cur_entry == NULL) {
2915				csa->ccb_h.status = CAM_RESRC_UNAVAIL;
2916				break;
2917			}
2918			cur_entry->event_enable = csa->event_enable;
2919			cur_entry->event_lock = (path->bus->sim->mtx &&
2920			    mtx_owned(path->bus->sim->mtx)) ? 1 : 0;
2921			cur_entry->callback_arg = csa->callback_arg;
2922			cur_entry->callback = csa->callback;
2923			SLIST_INSERT_HEAD(async_head, cur_entry, links);
2924			xpt_acquire_device(path->device);
2925		}
2926		start_ccb->ccb_h.status = CAM_REQ_CMP;
2927		break;
2928	}
2929	case XPT_REL_SIMQ:
2930	{
2931		struct ccb_relsim *crs;
2932		struct cam_ed *dev;
2933
2934		crs = &start_ccb->crs;
2935		dev = path->device;
2936		if (dev == NULL) {
2937
2938			crs->ccb_h.status = CAM_DEV_NOT_THERE;
2939			break;
2940		}
2941
2942		if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
2943
2944			/* Don't ever go below one opening */
2945			if (crs->openings > 0) {
2946				xpt_dev_ccbq_resize(path, crs->openings);
2947				if (bootverbose) {
2948					xpt_print(path,
2949					    "number of openings is now %d\n",
2950					    crs->openings);
2951				}
2952			}
2953		}
2954
2955		mtx_lock(&dev->sim->devq->send_mtx);
2956		if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
2957
2958			if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
2959
2960				/*
2961				 * Just extend the old timeout and decrement
2962				 * the freeze count so that a single timeout
2963				 * is sufficient for releasing the queue.
2964				 */
2965				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
2966				callout_stop(&dev->callout);
2967			} else {
2968
2969				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
2970			}
2971
2972			callout_reset_sbt(&dev->callout,
2973			    SBT_1MS * crs->release_timeout, 0,
2974			    xpt_release_devq_timeout, dev, 0);
2975
2976			dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
2977
2978		}
2979
2980		if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
2981
2982			if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
2983				/*
2984				 * Decrement the freeze count so that a single
2985				 * completion is still sufficient to unfreeze
2986				 * the queue.
2987				 */
2988				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
2989			} else {
2990
2991				dev->flags |= CAM_DEV_REL_ON_COMPLETE;
2992				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
2993			}
2994		}
2995
2996		if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
2997
2998			if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
2999			 || (dev->ccbq.dev_active == 0)) {
3000
3001				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3002			} else {
3003
3004				dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
3005				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3006			}
3007		}
3008		mtx_unlock(&dev->sim->devq->send_mtx);
3009
3010		if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0)
3011			xpt_release_devq(path, /*count*/1, /*run_queue*/TRUE);
3012		start_ccb->crs.qfrozen_cnt = dev->ccbq.queue.qfrozen_cnt;
3013		start_ccb->ccb_h.status = CAM_REQ_CMP;
3014		break;
3015	}
3016	case XPT_DEBUG: {
3017		struct cam_path *oldpath;
3018
3019		/* Check that all request bits are supported. */
3020		if (start_ccb->cdbg.flags & ~(CAM_DEBUG_COMPILE)) {
3021			start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3022			break;
3023		}
3024
3025		cam_dflags = CAM_DEBUG_NONE;
3026		if (cam_dpath != NULL) {
3027			oldpath = cam_dpath;
3028			cam_dpath = NULL;
3029			xpt_free_path(oldpath);
3030		}
3031		if (start_ccb->cdbg.flags != CAM_DEBUG_NONE) {
3032			if (xpt_create_path(&cam_dpath, NULL,
3033					    start_ccb->ccb_h.path_id,
3034					    start_ccb->ccb_h.target_id,
3035					    start_ccb->ccb_h.target_lun) !=
3036					    CAM_REQ_CMP) {
3037				start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3038			} else {
3039				cam_dflags = start_ccb->cdbg.flags;
3040				start_ccb->ccb_h.status = CAM_REQ_CMP;
3041				xpt_print(cam_dpath, "debugging flags now %x\n",
3042				    cam_dflags);
3043			}
3044		} else
3045			start_ccb->ccb_h.status = CAM_REQ_CMP;
3046		break;
3047	}
3048	case XPT_NOOP:
3049		if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
3050			xpt_freeze_devq(path, 1);
3051		start_ccb->ccb_h.status = CAM_REQ_CMP;
3052		break;
3053	case XPT_REPROBE_LUN:
3054		xpt_async(AC_INQ_CHANGED, path, NULL);
3055		start_ccb->ccb_h.status = CAM_REQ_CMP;
3056		xpt_done(start_ccb);
3057		break;
3058	default:
3059	case XPT_SDEV_TYPE:
3060	case XPT_TERM_IO:
3061	case XPT_ENG_INQ:
3062		/* XXX Implement */
3063		xpt_print_path(start_ccb->ccb_h.path);
3064		printf("%s: CCB type %#x %s not supported\n", __func__,
3065		    start_ccb->ccb_h.func_code,
3066		    xpt_action_name(start_ccb->ccb_h.func_code));
3067		start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
3068		if (start_ccb->ccb_h.func_code & XPT_FC_DEV_QUEUED) {
3069			xpt_done(start_ccb);
3070		}
3071		break;
3072	}
3073	CAM_DEBUG(path, CAM_DEBUG_TRACE,
3074	    ("xpt_action_default: func= %#x %s status %#x\n",
3075		start_ccb->ccb_h.func_code,
3076 		xpt_action_name(start_ccb->ccb_h.func_code),
3077		start_ccb->ccb_h.status));
3078}
3079
3080void
3081xpt_polled_action(union ccb *start_ccb)
3082{
3083	u_int32_t timeout;
3084	struct	  cam_sim *sim;
3085	struct	  cam_devq *devq;
3086	struct	  cam_ed *dev;
3087	struct mtx *mtx;
3088
3089	timeout = start_ccb->ccb_h.timeout * 10;
3090	sim = start_ccb->ccb_h.path->bus->sim;
3091	devq = sim->devq;
3092	mtx = sim->mtx;
3093	dev = start_ccb->ccb_h.path->device;
3094
3095	mtx_unlock(&dev->device_mtx);
3096
3097	/*
3098	 * Steal an opening so that no other queued requests
3099	 * can get it before us while we simulate interrupts.
3100	 */
3101	mtx_lock(&devq->send_mtx);
3102	dev->ccbq.dev_openings--;
3103	while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0) &&
3104	    (--timeout > 0)) {
3105		mtx_unlock(&devq->send_mtx);
3106		DELAY(100);
3107		if (mtx)
3108			mtx_lock(mtx);
3109		(*(sim->sim_poll))(sim);
3110		if (mtx)
3111			mtx_unlock(mtx);
3112		camisr_runqueue();
3113		mtx_lock(&devq->send_mtx);
3114	}
3115	dev->ccbq.dev_openings++;
3116	mtx_unlock(&devq->send_mtx);
3117
3118	if (timeout != 0) {
3119		xpt_action(start_ccb);
3120		while(--timeout > 0) {
3121			if (mtx)
3122				mtx_lock(mtx);
3123			(*(sim->sim_poll))(sim);
3124			if (mtx)
3125				mtx_unlock(mtx);
3126			camisr_runqueue();
3127			if ((start_ccb->ccb_h.status  & CAM_STATUS_MASK)
3128			    != CAM_REQ_INPROG)
3129				break;
3130			DELAY(100);
3131		}
3132		if (timeout == 0) {
3133			/*
3134			 * XXX Is it worth adding a sim_timeout entry
3135			 * point so we can attempt recovery?  If
3136			 * this is only used for dumps, I don't think
3137			 * it is.
3138			 */
3139			start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
3140		}
3141	} else {
3142		start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3143	}
3144
3145	mtx_lock(&dev->device_mtx);
3146}
3147
3148/*
3149 * Schedule a peripheral driver to receive a ccb when its
3150 * target device has space for more transactions.
3151 */
3152void
3153xpt_schedule(struct cam_periph *periph, u_int32_t new_priority)
3154{
3155
3156	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
3157	cam_periph_assert(periph, MA_OWNED);
3158	if (new_priority < periph->scheduled_priority) {
3159		periph->scheduled_priority = new_priority;
3160		xpt_run_allocq(periph, 0);
3161	}
3162}
3163
3164
3165/*
3166 * Schedule a device to run on a given queue.
3167 * If the device was inserted as a new entry on the queue,
3168 * return 1 meaning the device queue should be run. If we
3169 * were already queued, implying someone else has already
3170 * started the queue, return 0 so the caller doesn't attempt
3171 * to run the queue.
3172 */
3173static int
3174xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
3175		 u_int32_t new_priority)
3176{
3177	int retval;
3178	u_int32_t old_priority;
3179
3180	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
3181
3182	old_priority = pinfo->priority;
3183
3184	/*
3185	 * Are we already queued?
3186	 */
3187	if (pinfo->index != CAM_UNQUEUED_INDEX) {
3188		/* Simply reorder based on new priority */
3189		if (new_priority < old_priority) {
3190			camq_change_priority(queue, pinfo->index,
3191					     new_priority);
3192			CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3193					("changed priority to %d\n",
3194					 new_priority));
3195			retval = 1;
3196		} else
3197			retval = 0;
3198	} else {
3199		/* New entry on the queue */
3200		if (new_priority < old_priority)
3201			pinfo->priority = new_priority;
3202
3203		CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3204				("Inserting onto queue\n"));
3205		pinfo->generation = ++queue->generation;
3206		camq_insert(queue, pinfo);
3207		retval = 1;
3208	}
3209	return (retval);
3210}
3211
3212static void
3213xpt_run_allocq_task(void *context, int pending)
3214{
3215	struct cam_periph *periph = context;
3216
3217	cam_periph_lock(periph);
3218	periph->flags &= ~CAM_PERIPH_RUN_TASK;
3219	xpt_run_allocq(periph, 1);
3220	cam_periph_unlock(periph);
3221	cam_periph_release(periph);
3222}
3223
3224static void
3225xpt_run_allocq(struct cam_periph *periph, int sleep)
3226{
3227	struct cam_ed	*device;
3228	union ccb	*ccb;
3229	uint32_t	 prio;
3230
3231	cam_periph_assert(periph, MA_OWNED);
3232	if (periph->periph_allocating)
3233		return;
3234	periph->periph_allocating = 1;
3235	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_allocq(%p)\n", periph));
3236	device = periph->path->device;
3237	ccb = NULL;
3238restart:
3239	while ((prio = min(periph->scheduled_priority,
3240	    periph->immediate_priority)) != CAM_PRIORITY_NONE &&
3241	    (periph->periph_allocated - (ccb != NULL ? 1 : 0) <
3242	     device->ccbq.total_openings || prio <= CAM_PRIORITY_OOB)) {
3243
3244		if (ccb == NULL &&
3245		    (ccb = xpt_get_ccb_nowait(periph)) == NULL) {
3246			if (sleep) {
3247				ccb = xpt_get_ccb(periph);
3248				goto restart;
3249			}
3250			if (periph->flags & CAM_PERIPH_RUN_TASK)
3251				break;
3252			cam_periph_doacquire(periph);
3253			periph->flags |= CAM_PERIPH_RUN_TASK;
3254			taskqueue_enqueue(xsoftc.xpt_taskq,
3255			    &periph->periph_run_task);
3256			break;
3257		}
3258		xpt_setup_ccb(&ccb->ccb_h, periph->path, prio);
3259		if (prio == periph->immediate_priority) {
3260			periph->immediate_priority = CAM_PRIORITY_NONE;
3261			CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3262					("waking cam_periph_getccb()\n"));
3263			SLIST_INSERT_HEAD(&periph->ccb_list, &ccb->ccb_h,
3264					  periph_links.sle);
3265			wakeup(&periph->ccb_list);
3266		} else {
3267			periph->scheduled_priority = CAM_PRIORITY_NONE;
3268			CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3269					("calling periph_start()\n"));
3270			periph->periph_start(periph, ccb);
3271		}
3272		ccb = NULL;
3273	}
3274	if (ccb != NULL)
3275		xpt_release_ccb(ccb);
3276	periph->periph_allocating = 0;
3277}
3278
3279static void
3280xpt_run_devq(struct cam_devq *devq)
3281{
3282	struct mtx *mtx;
3283
3284	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_devq\n"));
3285
3286	devq->send_queue.qfrozen_cnt++;
3287	while ((devq->send_queue.entries > 0)
3288	    && (devq->send_openings > 0)
3289	    && (devq->send_queue.qfrozen_cnt <= 1)) {
3290		struct	cam_ed *device;
3291		union ccb *work_ccb;
3292		struct	cam_sim *sim;
3293		struct xpt_proto *proto;
3294
3295		device = (struct cam_ed *)camq_remove(&devq->send_queue,
3296							   CAMQ_HEAD);
3297		CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3298				("running device %p\n", device));
3299
3300		work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
3301		if (work_ccb == NULL) {
3302			printf("device on run queue with no ccbs???\n");
3303			continue;
3304		}
3305
3306		if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
3307
3308			mtx_lock(&xsoftc.xpt_highpower_lock);
3309		 	if (xsoftc.num_highpower <= 0) {
3310				/*
3311				 * We got a high power command, but we
3312				 * don't have any available slots.  Freeze
3313				 * the device queue until we have a slot
3314				 * available.
3315				 */
3316				xpt_freeze_devq_device(device, 1);
3317				STAILQ_INSERT_TAIL(&xsoftc.highpowerq, device,
3318						   highpowerq_entry);
3319
3320				mtx_unlock(&xsoftc.xpt_highpower_lock);
3321				continue;
3322			} else {
3323				/*
3324				 * Consume a high power slot while
3325				 * this ccb runs.
3326				 */
3327				xsoftc.num_highpower--;
3328			}
3329			mtx_unlock(&xsoftc.xpt_highpower_lock);
3330		}
3331		cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
3332		cam_ccbq_send_ccb(&device->ccbq, work_ccb);
3333		devq->send_openings--;
3334		devq->send_active++;
3335		xpt_schedule_devq(devq, device);
3336		mtx_unlock(&devq->send_mtx);
3337
3338		if ((work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) {
3339			/*
3340			 * The client wants to freeze the queue
3341			 * after this CCB is sent.
3342			 */
3343			xpt_freeze_devq(work_ccb->ccb_h.path, 1);
3344		}
3345
3346		/* In Target mode, the peripheral driver knows best... */
3347		if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
3348			if ((device->inq_flags & SID_CmdQue) != 0
3349			 && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
3350				work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
3351			else
3352				/*
3353				 * Clear this in case of a retried CCB that
3354				 * failed due to a rejected tag.
3355				 */
3356				work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
3357		}
3358
3359		KASSERT(device == work_ccb->ccb_h.path->device,
3360		    ("device (%p) / path->device (%p) mismatch",
3361			device, work_ccb->ccb_h.path->device));
3362		proto = xpt_proto_find(device->protocol);
3363		if (proto && proto->ops->debug_out)
3364			proto->ops->debug_out(work_ccb);
3365
3366		/*
3367		 * Device queues can be shared among multiple SIM instances
3368		 * that reside on different busses.  Use the SIM from the
3369		 * queued device, rather than the one from the calling bus.
3370		 */
3371		sim = device->sim;
3372		mtx = sim->mtx;
3373		if (mtx && !mtx_owned(mtx))
3374			mtx_lock(mtx);
3375		else
3376			mtx = NULL;
3377		work_ccb->ccb_h.qos.sim_data = sbinuptime(); // xxx uintprt_t too small 32bit platforms
3378		(*(sim->sim_action))(sim, work_ccb);
3379		if (mtx)
3380			mtx_unlock(mtx);
3381		mtx_lock(&devq->send_mtx);
3382	}
3383	devq->send_queue.qfrozen_cnt--;
3384}
3385
3386/*
3387 * This function merges stuff from the slave ccb into the master ccb, while
3388 * keeping important fields in the master ccb constant.
3389 */
3390void
3391xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
3392{
3393
3394	/*
3395	 * Pull fields that are valid for peripheral drivers to set
3396	 * into the master CCB along with the CCB "payload".
3397	 */
3398	master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
3399	master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
3400	master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
3401	master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
3402	bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
3403	      sizeof(union ccb) - sizeof(struct ccb_hdr));
3404}
3405
3406void
3407xpt_setup_ccb_flags(struct ccb_hdr *ccb_h, struct cam_path *path,
3408		    u_int32_t priority, u_int32_t flags)
3409{
3410
3411	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
3412	ccb_h->pinfo.priority = priority;
3413	ccb_h->path = path;
3414	ccb_h->path_id = path->bus->path_id;
3415	if (path->target)
3416		ccb_h->target_id = path->target->target_id;
3417	else
3418		ccb_h->target_id = CAM_TARGET_WILDCARD;
3419	if (path->device) {
3420		ccb_h->target_lun = path->device->lun_id;
3421		ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
3422	} else {
3423		ccb_h->target_lun = CAM_TARGET_WILDCARD;
3424	}
3425	ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
3426	ccb_h->flags = flags;
3427	ccb_h->xflags = 0;
3428}
3429
3430void
3431xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
3432{
3433	xpt_setup_ccb_flags(ccb_h, path, priority, /*flags*/ 0);
3434}
3435
3436/* Path manipulation functions */
3437cam_status
3438xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
3439		path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3440{
3441	struct	   cam_path *path;
3442	cam_status status;
3443
3444	path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT);
3445
3446	if (path == NULL) {
3447		status = CAM_RESRC_UNAVAIL;
3448		return(status);
3449	}
3450	status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
3451	if (status != CAM_REQ_CMP) {
3452		free(path, M_CAMPATH);
3453		path = NULL;
3454	}
3455	*new_path_ptr = path;
3456	return (status);
3457}
3458
3459cam_status
3460xpt_create_path_unlocked(struct cam_path **new_path_ptr,
3461			 struct cam_periph *periph, path_id_t path_id,
3462			 target_id_t target_id, lun_id_t lun_id)
3463{
3464
3465	return (xpt_create_path(new_path_ptr, periph, path_id, target_id,
3466	    lun_id));
3467}
3468
3469cam_status
3470xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
3471		 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3472{
3473	struct	     cam_eb *bus;
3474	struct	     cam_et *target;
3475	struct	     cam_ed *device;
3476	cam_status   status;
3477
3478	status = CAM_REQ_CMP;	/* Completed without error */
3479	target = NULL;		/* Wildcarded */
3480	device = NULL;		/* Wildcarded */
3481
3482	/*
3483	 * We will potentially modify the EDT, so block interrupts
3484	 * that may attempt to create cam paths.
3485	 */
3486	bus = xpt_find_bus(path_id);
3487	if (bus == NULL) {
3488		status = CAM_PATH_INVALID;
3489	} else {
3490		xpt_lock_buses();
3491		mtx_lock(&bus->eb_mtx);
3492		target = xpt_find_target(bus, target_id);
3493		if (target == NULL) {
3494			/* Create one */
3495			struct cam_et *new_target;
3496
3497			new_target = xpt_alloc_target(bus, target_id);
3498			if (new_target == NULL) {
3499				status = CAM_RESRC_UNAVAIL;
3500			} else {
3501				target = new_target;
3502			}
3503		}
3504		xpt_unlock_buses();
3505		if (target != NULL) {
3506			device = xpt_find_device(target, lun_id);
3507			if (device == NULL) {
3508				/* Create one */
3509				struct cam_ed *new_device;
3510
3511				new_device =
3512				    (*(bus->xport->ops->alloc_device))(bus,
3513								       target,
3514								       lun_id);
3515				if (new_device == NULL) {
3516					status = CAM_RESRC_UNAVAIL;
3517				} else {
3518					device = new_device;
3519				}
3520			}
3521		}
3522		mtx_unlock(&bus->eb_mtx);
3523	}
3524
3525	/*
3526	 * Only touch the user's data if we are successful.
3527	 */
3528	if (status == CAM_REQ_CMP) {
3529		new_path->periph = perph;
3530		new_path->bus = bus;
3531		new_path->target = target;
3532		new_path->device = device;
3533		CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
3534	} else {
3535		if (device != NULL)
3536			xpt_release_device(device);
3537		if (target != NULL)
3538			xpt_release_target(target);
3539		if (bus != NULL)
3540			xpt_release_bus(bus);
3541	}
3542	return (status);
3543}
3544
3545cam_status
3546xpt_clone_path(struct cam_path **new_path_ptr, struct cam_path *path)
3547{
3548	struct	   cam_path *new_path;
3549
3550	new_path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT);
3551	if (new_path == NULL)
3552		return(CAM_RESRC_UNAVAIL);
3553	xpt_copy_path(new_path, path);
3554	*new_path_ptr = new_path;
3555	return (CAM_REQ_CMP);
3556}
3557
3558void
3559xpt_copy_path(struct cam_path *new_path, struct cam_path *path)
3560{
3561
3562	*new_path = *path;
3563	if (path->bus != NULL)
3564		xpt_acquire_bus(path->bus);
3565	if (path->target != NULL)
3566		xpt_acquire_target(path->target);
3567	if (path->device != NULL)
3568		xpt_acquire_device(path->device);
3569}
3570
3571void
3572xpt_release_path(struct cam_path *path)
3573{
3574	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
3575	if (path->device != NULL) {
3576		xpt_release_device(path->device);
3577		path->device = NULL;
3578	}
3579	if (path->target != NULL) {
3580		xpt_release_target(path->target);
3581		path->target = NULL;
3582	}
3583	if (path->bus != NULL) {
3584		xpt_release_bus(path->bus);
3585		path->bus = NULL;
3586	}
3587}
3588
3589void
3590xpt_free_path(struct cam_path *path)
3591{
3592
3593	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
3594	xpt_release_path(path);
3595	free(path, M_CAMPATH);
3596}
3597
3598void
3599xpt_path_counts(struct cam_path *path, uint32_t *bus_ref,
3600    uint32_t *periph_ref, uint32_t *target_ref, uint32_t *device_ref)
3601{
3602
3603	xpt_lock_buses();
3604	if (bus_ref) {
3605		if (path->bus)
3606			*bus_ref = path->bus->refcount;
3607		else
3608			*bus_ref = 0;
3609	}
3610	if (periph_ref) {
3611		if (path->periph)
3612			*periph_ref = path->periph->refcount;
3613		else
3614			*periph_ref = 0;
3615	}
3616	xpt_unlock_buses();
3617	if (target_ref) {
3618		if (path->target)
3619			*target_ref = path->target->refcount;
3620		else
3621			*target_ref = 0;
3622	}
3623	if (device_ref) {
3624		if (path->device)
3625			*device_ref = path->device->refcount;
3626		else
3627			*device_ref = 0;
3628	}
3629}
3630
3631/*
3632 * Return -1 for failure, 0 for exact match, 1 for match with wildcards
3633 * in path1, 2 for match with wildcards in path2.
3634 */
3635int
3636xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
3637{
3638	int retval = 0;
3639
3640	if (path1->bus != path2->bus) {
3641		if (path1->bus->path_id == CAM_BUS_WILDCARD)
3642			retval = 1;
3643		else if (path2->bus->path_id == CAM_BUS_WILDCARD)
3644			retval = 2;
3645		else
3646			return (-1);
3647	}
3648	if (path1->target != path2->target) {
3649		if (path1->target->target_id == CAM_TARGET_WILDCARD) {
3650			if (retval == 0)
3651				retval = 1;
3652		} else if (path2->target->target_id == CAM_TARGET_WILDCARD)
3653			retval = 2;
3654		else
3655			return (-1);
3656	}
3657	if (path1->device != path2->device) {
3658		if (path1->device->lun_id == CAM_LUN_WILDCARD) {
3659			if (retval == 0)
3660				retval = 1;
3661		} else if (path2->device->lun_id == CAM_LUN_WILDCARD)
3662			retval = 2;
3663		else
3664			return (-1);
3665	}
3666	return (retval);
3667}
3668
3669int
3670xpt_path_comp_dev(struct cam_path *path, struct cam_ed *dev)
3671{
3672	int retval = 0;
3673
3674	if (path->bus != dev->target->bus) {
3675		if (path->bus->path_id == CAM_BUS_WILDCARD)
3676			retval = 1;
3677		else if (dev->target->bus->path_id == CAM_BUS_WILDCARD)
3678			retval = 2;
3679		else
3680			return (-1);
3681	}
3682	if (path->target != dev->target) {
3683		if (path->target->target_id == CAM_TARGET_WILDCARD) {
3684			if (retval == 0)
3685				retval = 1;
3686		} else if (dev->target->target_id == CAM_TARGET_WILDCARD)
3687			retval = 2;
3688		else
3689			return (-1);
3690	}
3691	if (path->device != dev) {
3692		if (path->device->lun_id == CAM_LUN_WILDCARD) {
3693			if (retval == 0)
3694				retval = 1;
3695		} else if (dev->lun_id == CAM_LUN_WILDCARD)
3696			retval = 2;
3697		else
3698			return (-1);
3699	}
3700	return (retval);
3701}
3702
3703void
3704xpt_print_path(struct cam_path *path)
3705{
3706
3707	if (path == NULL)
3708		printf("(nopath): ");
3709	else {
3710		if (path->periph != NULL)
3711			printf("(%s%d:", path->periph->periph_name,
3712			       path->periph->unit_number);
3713		else
3714			printf("(noperiph:");
3715
3716		if (path->bus != NULL)
3717			printf("%s%d:%d:", path->bus->sim->sim_name,
3718			       path->bus->sim->unit_number,
3719			       path->bus->sim->bus_id);
3720		else
3721			printf("nobus:");
3722
3723		if (path->target != NULL)
3724			printf("%d:", path->target->target_id);
3725		else
3726			printf("X:");
3727
3728		if (path->device != NULL)
3729			printf("%jx): ", (uintmax_t)path->device->lun_id);
3730		else
3731			printf("X): ");
3732	}
3733}
3734
3735void
3736xpt_print_device(struct cam_ed *device)
3737{
3738
3739	if (device == NULL)
3740		printf("(nopath): ");
3741	else {
3742		printf("(noperiph:%s%d:%d:%d:%jx): ", device->sim->sim_name,
3743		       device->sim->unit_number,
3744		       device->sim->bus_id,
3745		       device->target->target_id,
3746		       (uintmax_t)device->lun_id);
3747	}
3748}
3749
3750void
3751xpt_print(struct cam_path *path, const char *fmt, ...)
3752{
3753	va_list ap;
3754	xpt_print_path(path);
3755	va_start(ap, fmt);
3756	vprintf(fmt, ap);
3757	va_end(ap);
3758}
3759
3760int
3761xpt_path_string(struct cam_path *path, char *str, size_t str_len)
3762{
3763	struct sbuf sb;
3764
3765	sbuf_new(&sb, str, str_len, 0);
3766
3767	if (path == NULL)
3768		sbuf_printf(&sb, "(nopath): ");
3769	else {
3770		if (path->periph != NULL)
3771			sbuf_printf(&sb, "(%s%d:", path->periph->periph_name,
3772				    path->periph->unit_number);
3773		else
3774			sbuf_printf(&sb, "(noperiph:");
3775
3776		if (path->bus != NULL)
3777			sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name,
3778				    path->bus->sim->unit_number,
3779				    path->bus->sim->bus_id);
3780		else
3781			sbuf_printf(&sb, "nobus:");
3782
3783		if (path->target != NULL)
3784			sbuf_printf(&sb, "%d:", path->target->target_id);
3785		else
3786			sbuf_printf(&sb, "X:");
3787
3788		if (path->device != NULL)
3789			sbuf_printf(&sb, "%jx): ",
3790			    (uintmax_t)path->device->lun_id);
3791		else
3792			sbuf_printf(&sb, "X): ");
3793	}
3794	sbuf_finish(&sb);
3795
3796	return(sbuf_len(&sb));
3797}
3798
3799path_id_t
3800xpt_path_path_id(struct cam_path *path)
3801{
3802	return(path->bus->path_id);
3803}
3804
3805target_id_t
3806xpt_path_target_id(struct cam_path *path)
3807{
3808	if (path->target != NULL)
3809		return (path->target->target_id);
3810	else
3811		return (CAM_TARGET_WILDCARD);
3812}
3813
3814lun_id_t
3815xpt_path_lun_id(struct cam_path *path)
3816{
3817	if (path->device != NULL)
3818		return (path->device->lun_id);
3819	else
3820		return (CAM_LUN_WILDCARD);
3821}
3822
3823struct cam_sim *
3824xpt_path_sim(struct cam_path *path)
3825{
3826
3827	return (path->bus->sim);
3828}
3829
3830struct cam_periph*
3831xpt_path_periph(struct cam_path *path)
3832{
3833
3834	return (path->periph);
3835}
3836
3837/*
3838 * Release a CAM control block for the caller.  Remit the cost of the structure
3839 * to the device referenced by the path.  If the this device had no 'credits'
3840 * and peripheral drivers have registered async callbacks for this notification
3841 * call them now.
3842 */
3843void
3844xpt_release_ccb(union ccb *free_ccb)
3845{
3846	struct	 cam_ed *device;
3847	struct	 cam_periph *periph;
3848
3849	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
3850	xpt_path_assert(free_ccb->ccb_h.path, MA_OWNED);
3851	device = free_ccb->ccb_h.path->device;
3852	periph = free_ccb->ccb_h.path->periph;
3853
3854	xpt_free_ccb(free_ccb);
3855	periph->periph_allocated--;
3856	cam_ccbq_release_opening(&device->ccbq);
3857	xpt_run_allocq(periph, 0);
3858}
3859
3860/* Functions accessed by SIM drivers */
3861
3862static struct xpt_xport_ops xport_default_ops = {
3863	.alloc_device = xpt_alloc_device_default,
3864	.action = xpt_action_default,
3865	.async = xpt_dev_async_default,
3866};
3867static struct xpt_xport xport_default = {
3868	.xport = XPORT_UNKNOWN,
3869	.name = "unknown",
3870	.ops = &xport_default_ops,
3871};
3872
3873CAM_XPT_XPORT(xport_default);
3874
3875/*
3876 * A sim structure, listing the SIM entry points and instance
3877 * identification info is passed to xpt_bus_register to hook the SIM
3878 * into the CAM framework.  xpt_bus_register creates a cam_eb entry
3879 * for this new bus and places it in the array of busses and assigns
3880 * it a path_id.  The path_id may be influenced by "hard wiring"
3881 * information specified by the user.  Once interrupt services are
3882 * available, the bus will be probed.
3883 */
3884int32_t
3885xpt_bus_register(struct cam_sim *sim, device_t parent, u_int32_t bus)
3886{
3887	struct cam_eb *new_bus;
3888	struct cam_eb *old_bus;
3889	struct ccb_pathinq cpi;
3890	struct cam_path *path;
3891	cam_status status;
3892
3893	sim->bus_id = bus;
3894	new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
3895					  M_CAMXPT, M_NOWAIT|M_ZERO);
3896	if (new_bus == NULL) {
3897		/* Couldn't satisfy request */
3898		return (CAM_RESRC_UNAVAIL);
3899	}
3900
3901	mtx_init(&new_bus->eb_mtx, "CAM bus lock", NULL, MTX_DEF);
3902	TAILQ_INIT(&new_bus->et_entries);
3903	cam_sim_hold(sim);
3904	new_bus->sim = sim;
3905	timevalclear(&new_bus->last_reset);
3906	new_bus->flags = 0;
3907	new_bus->refcount = 1;	/* Held until a bus_deregister event */
3908	new_bus->generation = 0;
3909
3910	xpt_lock_buses();
3911	sim->path_id = new_bus->path_id =
3912	    xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
3913	old_bus = TAILQ_FIRST(&xsoftc.xpt_busses);
3914	while (old_bus != NULL
3915	    && old_bus->path_id < new_bus->path_id)
3916		old_bus = TAILQ_NEXT(old_bus, links);
3917	if (old_bus != NULL)
3918		TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
3919	else
3920		TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links);
3921	xsoftc.bus_generation++;
3922	xpt_unlock_buses();
3923
3924	/*
3925	 * Set a default transport so that a PATH_INQ can be issued to
3926	 * the SIM.  This will then allow for probing and attaching of
3927	 * a more appropriate transport.
3928	 */
3929	new_bus->xport = &xport_default;
3930
3931	status = xpt_create_path(&path, /*periph*/NULL, sim->path_id,
3932				  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
3933	if (status != CAM_REQ_CMP) {
3934		xpt_release_bus(new_bus);
3935		return (CAM_RESRC_UNAVAIL);
3936	}
3937
3938	xpt_path_inq(&cpi, path);
3939
3940	if (cpi.ccb_h.status == CAM_REQ_CMP) {
3941		struct xpt_xport **xpt;
3942
3943		SET_FOREACH(xpt, cam_xpt_xport_set) {
3944			if ((*xpt)->xport == cpi.transport) {
3945				new_bus->xport = *xpt;
3946				break;
3947			}
3948		}
3949		if (new_bus->xport == NULL) {
3950			xpt_print_path(path);
3951			printf("No transport found for %d\n", cpi.transport);
3952			xpt_release_bus(new_bus);
3953			free(path, M_CAMXPT);
3954			return (CAM_RESRC_UNAVAIL);
3955		}
3956	}
3957
3958	/* Notify interested parties */
3959	if (sim->path_id != CAM_XPT_PATH_ID) {
3960
3961		xpt_async(AC_PATH_REGISTERED, path, &cpi);
3962		if ((cpi.hba_misc & PIM_NOSCAN) == 0) {
3963			union	ccb *scan_ccb;
3964
3965			/* Initiate bus rescan. */
3966			scan_ccb = xpt_alloc_ccb_nowait();
3967			if (scan_ccb != NULL) {
3968				scan_ccb->ccb_h.path = path;
3969				scan_ccb->ccb_h.func_code = XPT_SCAN_BUS;
3970				scan_ccb->crcn.flags = 0;
3971				xpt_rescan(scan_ccb);
3972			} else {
3973				xpt_print(path,
3974					  "Can't allocate CCB to scan bus\n");
3975				xpt_free_path(path);
3976			}
3977		} else
3978			xpt_free_path(path);
3979	} else
3980		xpt_free_path(path);
3981	return (CAM_SUCCESS);
3982}
3983
3984int32_t
3985xpt_bus_deregister(path_id_t pathid)
3986{
3987	struct cam_path bus_path;
3988	cam_status status;
3989
3990	status = xpt_compile_path(&bus_path, NULL, pathid,
3991				  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
3992	if (status != CAM_REQ_CMP)
3993		return (status);
3994
3995	xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
3996	xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
3997
3998	/* Release the reference count held while registered. */
3999	xpt_release_bus(bus_path.bus);
4000	xpt_release_path(&bus_path);
4001
4002	return (CAM_REQ_CMP);
4003}
4004
4005static path_id_t
4006xptnextfreepathid(void)
4007{
4008	struct cam_eb *bus;
4009	path_id_t pathid;
4010	const char *strval;
4011
4012	mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED);
4013	pathid = 0;
4014	bus = TAILQ_FIRST(&xsoftc.xpt_busses);
4015retry:
4016	/* Find an unoccupied pathid */
4017	while (bus != NULL && bus->path_id <= pathid) {
4018		if (bus->path_id == pathid)
4019			pathid++;
4020		bus = TAILQ_NEXT(bus, links);
4021	}
4022
4023	/*
4024	 * Ensure that this pathid is not reserved for
4025	 * a bus that may be registered in the future.
4026	 */
4027	if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
4028		++pathid;
4029		/* Start the search over */
4030		goto retry;
4031	}
4032	return (pathid);
4033}
4034
4035static path_id_t
4036xptpathid(const char *sim_name, int sim_unit, int sim_bus)
4037{
4038	path_id_t pathid;
4039	int i, dunit, val;
4040	char buf[32];
4041	const char *dname;
4042
4043	pathid = CAM_XPT_PATH_ID;
4044	snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
4045	if (strcmp(buf, "xpt0") == 0 && sim_bus == 0)
4046		return (pathid);
4047	i = 0;
4048	while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) {
4049		if (strcmp(dname, "scbus")) {
4050			/* Avoid a bit of foot shooting. */
4051			continue;
4052		}
4053		if (dunit < 0)		/* unwired?! */
4054			continue;
4055		if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
4056			if (sim_bus == val) {
4057				pathid = dunit;
4058				break;
4059			}
4060		} else if (sim_bus == 0) {
4061			/* Unspecified matches bus 0 */
4062			pathid = dunit;
4063			break;
4064		} else {
4065			printf("Ambiguous scbus configuration for %s%d "
4066			       "bus %d, cannot wire down.  The kernel "
4067			       "config entry for scbus%d should "
4068			       "specify a controller bus.\n"
4069			       "Scbus will be assigned dynamically.\n",
4070			       sim_name, sim_unit, sim_bus, dunit);
4071			break;
4072		}
4073	}
4074
4075	if (pathid == CAM_XPT_PATH_ID)
4076		pathid = xptnextfreepathid();
4077	return (pathid);
4078}
4079
4080static const char *
4081xpt_async_string(u_int32_t async_code)
4082{
4083
4084	switch (async_code) {
4085	case AC_BUS_RESET: return ("AC_BUS_RESET");
4086	case AC_UNSOL_RESEL: return ("AC_UNSOL_RESEL");
4087	case AC_SCSI_AEN: return ("AC_SCSI_AEN");
4088	case AC_SENT_BDR: return ("AC_SENT_BDR");
4089	case AC_PATH_REGISTERED: return ("AC_PATH_REGISTERED");
4090	case AC_PATH_DEREGISTERED: return ("AC_PATH_DEREGISTERED");
4091	case AC_FOUND_DEVICE: return ("AC_FOUND_DEVICE");
4092	case AC_LOST_DEVICE: return ("AC_LOST_DEVICE");
4093	case AC_TRANSFER_NEG: return ("AC_TRANSFER_NEG");
4094	case AC_INQ_CHANGED: return ("AC_INQ_CHANGED");
4095	case AC_GETDEV_CHANGED: return ("AC_GETDEV_CHANGED");
4096	case AC_CONTRACT: return ("AC_CONTRACT");
4097	case AC_ADVINFO_CHANGED: return ("AC_ADVINFO_CHANGED");
4098	case AC_UNIT_ATTENTION: return ("AC_UNIT_ATTENTION");
4099	}
4100	return ("AC_UNKNOWN");
4101}
4102
4103static int
4104xpt_async_size(u_int32_t async_code)
4105{
4106
4107	switch (async_code) {
4108	case AC_BUS_RESET: return (0);
4109	case AC_UNSOL_RESEL: return (0);
4110	case AC_SCSI_AEN: return (0);
4111	case AC_SENT_BDR: return (0);
4112	case AC_PATH_REGISTERED: return (sizeof(struct ccb_pathinq));
4113	case AC_PATH_DEREGISTERED: return (0);
4114	case AC_FOUND_DEVICE: return (sizeof(struct ccb_getdev));
4115	case AC_LOST_DEVICE: return (0);
4116	case AC_TRANSFER_NEG: return (sizeof(struct ccb_trans_settings));
4117	case AC_INQ_CHANGED: return (0);
4118	case AC_GETDEV_CHANGED: return (0);
4119	case AC_CONTRACT: return (sizeof(struct ac_contract));
4120	case AC_ADVINFO_CHANGED: return (-1);
4121	case AC_UNIT_ATTENTION: return (sizeof(struct ccb_scsiio));
4122	}
4123	return (0);
4124}
4125
4126static int
4127xpt_async_process_dev(struct cam_ed *device, void *arg)
4128{
4129	union ccb *ccb = arg;
4130	struct cam_path *path = ccb->ccb_h.path;
4131	void *async_arg = ccb->casync.async_arg_ptr;
4132	u_int32_t async_code = ccb->casync.async_code;
4133	int relock;
4134
4135	if (path->device != device
4136	 && path->device->lun_id != CAM_LUN_WILDCARD
4137	 && device->lun_id != CAM_LUN_WILDCARD)
4138		return (1);
4139
4140	/*
4141	 * The async callback could free the device.
4142	 * If it is a broadcast async, it doesn't hold
4143	 * device reference, so take our own reference.
4144	 */
4145	xpt_acquire_device(device);
4146
4147	/*
4148	 * If async for specific device is to be delivered to
4149	 * the wildcard client, take the specific device lock.
4150	 * XXX: We may need a way for client to specify it.
4151	 */
4152	if ((device->lun_id == CAM_LUN_WILDCARD &&
4153	     path->device->lun_id != CAM_LUN_WILDCARD) ||
4154	    (device->target->target_id == CAM_TARGET_WILDCARD &&
4155	     path->target->target_id != CAM_TARGET_WILDCARD) ||
4156	    (device->target->bus->path_id == CAM_BUS_WILDCARD &&
4157	     path->target->bus->path_id != CAM_BUS_WILDCARD)) {
4158		mtx_unlock(&device->device_mtx);
4159		xpt_path_lock(path);
4160		relock = 1;
4161	} else
4162		relock = 0;
4163
4164	(*(device->target->bus->xport->ops->async))(async_code,
4165	    device->target->bus, device->target, device, async_arg);
4166	xpt_async_bcast(&device->asyncs, async_code, path, async_arg);
4167
4168	if (relock) {
4169		xpt_path_unlock(path);
4170		mtx_lock(&device->device_mtx);
4171	}
4172	xpt_release_device(device);
4173	return (1);
4174}
4175
4176static int
4177xpt_async_process_tgt(struct cam_et *target, void *arg)
4178{
4179	union ccb *ccb = arg;
4180	struct cam_path *path = ccb->ccb_h.path;
4181
4182	if (path->target != target
4183	 && path->target->target_id != CAM_TARGET_WILDCARD
4184	 && target->target_id != CAM_TARGET_WILDCARD)
4185		return (1);
4186
4187	if (ccb->casync.async_code == AC_SENT_BDR) {
4188		/* Update our notion of when the last reset occurred */
4189		microtime(&target->last_reset);
4190	}
4191
4192	return (xptdevicetraverse(target, NULL, xpt_async_process_dev, ccb));
4193}
4194
4195static void
4196xpt_async_process(struct cam_periph *periph, union ccb *ccb)
4197{
4198	struct cam_eb *bus;
4199	struct cam_path *path;
4200	void *async_arg;
4201	u_int32_t async_code;
4202
4203	path = ccb->ccb_h.path;
4204	async_code = ccb->casync.async_code;
4205	async_arg = ccb->casync.async_arg_ptr;
4206	CAM_DEBUG(path, CAM_DEBUG_TRACE | CAM_DEBUG_INFO,
4207	    ("xpt_async(%s)\n", xpt_async_string(async_code)));
4208	bus = path->bus;
4209
4210	if (async_code == AC_BUS_RESET) {
4211		/* Update our notion of when the last reset occurred */
4212		microtime(&bus->last_reset);
4213	}
4214
4215	xpttargettraverse(bus, NULL, xpt_async_process_tgt, ccb);
4216
4217	/*
4218	 * If this wasn't a fully wildcarded async, tell all
4219	 * clients that want all async events.
4220	 */
4221	if (bus != xpt_periph->path->bus) {
4222		xpt_path_lock(xpt_periph->path);
4223		xpt_async_process_dev(xpt_periph->path->device, ccb);
4224		xpt_path_unlock(xpt_periph->path);
4225	}
4226
4227	if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD)
4228		xpt_release_devq(path, 1, TRUE);
4229	else
4230		xpt_release_simq(path->bus->sim, TRUE);
4231	if (ccb->casync.async_arg_size > 0)
4232		free(async_arg, M_CAMXPT);
4233	xpt_free_path(path);
4234	xpt_free_ccb(ccb);
4235}
4236
4237static void
4238xpt_async_bcast(struct async_list *async_head,
4239		u_int32_t async_code,
4240		struct cam_path *path, void *async_arg)
4241{
4242	struct async_node *cur_entry;
4243	struct mtx *mtx;
4244
4245	cur_entry = SLIST_FIRST(async_head);
4246	while (cur_entry != NULL) {
4247		struct async_node *next_entry;
4248		/*
4249		 * Grab the next list entry before we call the current
4250		 * entry's callback.  This is because the callback function
4251		 * can delete its async callback entry.
4252		 */
4253		next_entry = SLIST_NEXT(cur_entry, links);
4254		if ((cur_entry->event_enable & async_code) != 0) {
4255			mtx = cur_entry->event_lock ?
4256			    path->device->sim->mtx : NULL;
4257			if (mtx)
4258				mtx_lock(mtx);
4259			cur_entry->callback(cur_entry->callback_arg,
4260					    async_code, path,
4261					    async_arg);
4262			if (mtx)
4263				mtx_unlock(mtx);
4264		}
4265		cur_entry = next_entry;
4266	}
4267}
4268
4269void
4270xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
4271{
4272	union ccb *ccb;
4273	int size;
4274
4275	ccb = xpt_alloc_ccb_nowait();
4276	if (ccb == NULL) {
4277		xpt_print(path, "Can't allocate CCB to send %s\n",
4278		    xpt_async_string(async_code));
4279		return;
4280	}
4281
4282	if (xpt_clone_path(&ccb->ccb_h.path, path) != CAM_REQ_CMP) {
4283		xpt_print(path, "Can't allocate path to send %s\n",
4284		    xpt_async_string(async_code));
4285		xpt_free_ccb(ccb);
4286		return;
4287	}
4288	ccb->ccb_h.path->periph = NULL;
4289	ccb->ccb_h.func_code = XPT_ASYNC;
4290	ccb->ccb_h.cbfcnp = xpt_async_process;
4291	ccb->ccb_h.flags |= CAM_UNLOCKED;
4292	ccb->casync.async_code = async_code;
4293	ccb->casync.async_arg_size = 0;
4294	size = xpt_async_size(async_code);
4295	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE,
4296	    ("xpt_async: func %#x %s aync_code %d %s\n",
4297		ccb->ccb_h.func_code,
4298		xpt_action_name(ccb->ccb_h.func_code),
4299		async_code,
4300		xpt_async_string(async_code)));
4301	if (size > 0 && async_arg != NULL) {
4302		ccb->casync.async_arg_ptr = malloc(size, M_CAMXPT, M_NOWAIT);
4303		if (ccb->casync.async_arg_ptr == NULL) {
4304			xpt_print(path, "Can't allocate argument to send %s\n",
4305			    xpt_async_string(async_code));
4306			xpt_free_path(ccb->ccb_h.path);
4307			xpt_free_ccb(ccb);
4308			return;
4309		}
4310		memcpy(ccb->casync.async_arg_ptr, async_arg, size);
4311		ccb->casync.async_arg_size = size;
4312	} else if (size < 0) {
4313		ccb->casync.async_arg_ptr = async_arg;
4314		ccb->casync.async_arg_size = size;
4315	}
4316	if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD)
4317		xpt_freeze_devq(path, 1);
4318	else
4319		xpt_freeze_simq(path->bus->sim, 1);
4320	xpt_done(ccb);
4321}
4322
4323static void
4324xpt_dev_async_default(u_int32_t async_code, struct cam_eb *bus,
4325		      struct cam_et *target, struct cam_ed *device,
4326		      void *async_arg)
4327{
4328
4329	/*
4330	 * We only need to handle events for real devices.
4331	 */
4332	if (target->target_id == CAM_TARGET_WILDCARD
4333	 || device->lun_id == CAM_LUN_WILDCARD)
4334		return;
4335
4336	printf("%s called\n", __func__);
4337}
4338
4339static uint32_t
4340xpt_freeze_devq_device(struct cam_ed *dev, u_int count)
4341{
4342	struct cam_devq	*devq;
4343	uint32_t freeze;
4344
4345	devq = dev->sim->devq;
4346	mtx_assert(&devq->send_mtx, MA_OWNED);
4347	CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE,
4348	    ("xpt_freeze_devq_device(%d) %u->%u\n", count,
4349	    dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt + count));
4350	freeze = (dev->ccbq.queue.qfrozen_cnt += count);
4351	/* Remove frozen device from sendq. */
4352	if (device_is_queued(dev))
4353		camq_remove(&devq->send_queue, dev->devq_entry.index);
4354	return (freeze);
4355}
4356
4357u_int32_t
4358xpt_freeze_devq(struct cam_path *path, u_int count)
4359{
4360	struct cam_ed	*dev = path->device;
4361	struct cam_devq	*devq;
4362	uint32_t	 freeze;
4363
4364	devq = dev->sim->devq;
4365	mtx_lock(&devq->send_mtx);
4366	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_freeze_devq(%d)\n", count));
4367	freeze = xpt_freeze_devq_device(dev, count);
4368	mtx_unlock(&devq->send_mtx);
4369	return (freeze);
4370}
4371
4372u_int32_t
4373xpt_freeze_simq(struct cam_sim *sim, u_int count)
4374{
4375	struct cam_devq	*devq;
4376	uint32_t	 freeze;
4377
4378	devq = sim->devq;
4379	mtx_lock(&devq->send_mtx);
4380	freeze = (devq->send_queue.qfrozen_cnt += count);
4381	mtx_unlock(&devq->send_mtx);
4382	return (freeze);
4383}
4384
4385static void
4386xpt_release_devq_timeout(void *arg)
4387{
4388	struct cam_ed *dev;
4389	struct cam_devq *devq;
4390
4391	dev = (struct cam_ed *)arg;
4392	CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, ("xpt_release_devq_timeout\n"));
4393	devq = dev->sim->devq;
4394	mtx_assert(&devq->send_mtx, MA_OWNED);
4395	if (xpt_release_devq_device(dev, /*count*/1, /*run_queue*/TRUE))
4396		xpt_run_devq(devq);
4397}
4398
4399void
4400xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
4401{
4402	struct cam_ed *dev;
4403	struct cam_devq *devq;
4404
4405	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_devq(%d, %d)\n",
4406	    count, run_queue));
4407	dev = path->device;
4408	devq = dev->sim->devq;
4409	mtx_lock(&devq->send_mtx);
4410	if (xpt_release_devq_device(dev, count, run_queue))
4411		xpt_run_devq(dev->sim->devq);
4412	mtx_unlock(&devq->send_mtx);
4413}
4414
4415static int
4416xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue)
4417{
4418
4419	mtx_assert(&dev->sim->devq->send_mtx, MA_OWNED);
4420	CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE,
4421	    ("xpt_release_devq_device(%d, %d) %u->%u\n", count, run_queue,
4422	    dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt - count));
4423	if (count > dev->ccbq.queue.qfrozen_cnt) {
4424#ifdef INVARIANTS
4425		printf("xpt_release_devq(): requested %u > present %u\n",
4426		    count, dev->ccbq.queue.qfrozen_cnt);
4427#endif
4428		count = dev->ccbq.queue.qfrozen_cnt;
4429	}
4430	dev->ccbq.queue.qfrozen_cnt -= count;
4431	if (dev->ccbq.queue.qfrozen_cnt == 0) {
4432		/*
4433		 * No longer need to wait for a successful
4434		 * command completion.
4435		 */
4436		dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
4437		/*
4438		 * Remove any timeouts that might be scheduled
4439		 * to release this queue.
4440		 */
4441		if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
4442			callout_stop(&dev->callout);
4443			dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
4444		}
4445		/*
4446		 * Now that we are unfrozen schedule the
4447		 * device so any pending transactions are
4448		 * run.
4449		 */
4450		xpt_schedule_devq(dev->sim->devq, dev);
4451	} else
4452		run_queue = 0;
4453	return (run_queue);
4454}
4455
4456void
4457xpt_release_simq(struct cam_sim *sim, int run_queue)
4458{
4459	struct cam_devq	*devq;
4460
4461	devq = sim->devq;
4462	mtx_lock(&devq->send_mtx);
4463	if (devq->send_queue.qfrozen_cnt <= 0) {
4464#ifdef INVARIANTS
4465		printf("xpt_release_simq: requested 1 > present %u\n",
4466		    devq->send_queue.qfrozen_cnt);
4467#endif
4468	} else
4469		devq->send_queue.qfrozen_cnt--;
4470	if (devq->send_queue.qfrozen_cnt == 0) {
4471		/*
4472		 * If there is a timeout scheduled to release this
4473		 * sim queue, remove it.  The queue frozen count is
4474		 * already at 0.
4475		 */
4476		if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
4477			callout_stop(&sim->callout);
4478			sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
4479		}
4480		if (run_queue) {
4481			/*
4482			 * Now that we are unfrozen run the send queue.
4483			 */
4484			xpt_run_devq(sim->devq);
4485		}
4486	}
4487	mtx_unlock(&devq->send_mtx);
4488}
4489
4490/*
4491 * XXX Appears to be unused.
4492 */
4493static void
4494xpt_release_simq_timeout(void *arg)
4495{
4496	struct cam_sim *sim;
4497
4498	sim = (struct cam_sim *)arg;
4499	xpt_release_simq(sim, /* run_queue */ TRUE);
4500}
4501
4502void
4503xpt_done(union ccb *done_ccb)
4504{
4505	struct cam_doneq *queue;
4506	int	run, hash;
4507
4508	CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE,
4509	    ("xpt_done: func= %#x %s status %#x\n",
4510		done_ccb->ccb_h.func_code,
4511		xpt_action_name(done_ccb->ccb_h.func_code),
4512		done_ccb->ccb_h.status));
4513	if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0)
4514		return;
4515
4516	/* Store the time the ccb was in the sim */
4517	done_ccb->ccb_h.qos.sim_data = sbinuptime() - done_ccb->ccb_h.qos.sim_data;
4518	hash = (done_ccb->ccb_h.path_id + done_ccb->ccb_h.target_id +
4519	    done_ccb->ccb_h.target_lun) % cam_num_doneqs;
4520	queue = &cam_doneqs[hash];
4521	mtx_lock(&queue->cam_doneq_mtx);
4522	run = (queue->cam_doneq_sleep && STAILQ_EMPTY(&queue->cam_doneq));
4523	STAILQ_INSERT_TAIL(&queue->cam_doneq, &done_ccb->ccb_h, sim_links.stqe);
4524	done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4525	mtx_unlock(&queue->cam_doneq_mtx);
4526	if (run)
4527		wakeup(&queue->cam_doneq);
4528}
4529
4530void
4531xpt_done_direct(union ccb *done_ccb)
4532{
4533
4534	CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE,
4535	    ("xpt_done_direct: status %#x\n", done_ccb->ccb_h.status));
4536	if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0)
4537		return;
4538
4539	/* Store the time the ccb was in the sim */
4540	done_ccb->ccb_h.qos.sim_data = sbinuptime() - done_ccb->ccb_h.qos.sim_data;
4541	xpt_done_process(&done_ccb->ccb_h);
4542}
4543
4544union ccb *
4545xpt_alloc_ccb()
4546{
4547	union ccb *new_ccb;
4548
4549	new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK);
4550	return (new_ccb);
4551}
4552
4553union ccb *
4554xpt_alloc_ccb_nowait()
4555{
4556	union ccb *new_ccb;
4557
4558	new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT);
4559	return (new_ccb);
4560}
4561
4562void
4563xpt_free_ccb(union ccb *free_ccb)
4564{
4565	free(free_ccb, M_CAMCCB);
4566}
4567
4568
4569
4570/* Private XPT functions */
4571
4572/*
4573 * Get a CAM control block for the caller. Charge the structure to the device
4574 * referenced by the path.  If we don't have sufficient resources to allocate
4575 * more ccbs, we return NULL.
4576 */
4577static union ccb *
4578xpt_get_ccb_nowait(struct cam_periph *periph)
4579{
4580	union ccb *new_ccb;
4581
4582	new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT);
4583	if (new_ccb == NULL)
4584		return (NULL);
4585	periph->periph_allocated++;
4586	cam_ccbq_take_opening(&periph->path->device->ccbq);
4587	return (new_ccb);
4588}
4589
4590static union ccb *
4591xpt_get_ccb(struct cam_periph *periph)
4592{
4593	union ccb *new_ccb;
4594
4595	cam_periph_unlock(periph);
4596	new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK);
4597	cam_periph_lock(periph);
4598	periph->periph_allocated++;
4599	cam_ccbq_take_opening(&periph->path->device->ccbq);
4600	return (new_ccb);
4601}
4602
4603union ccb *
4604cam_periph_getccb(struct cam_periph *periph, u_int32_t priority)
4605{
4606	struct ccb_hdr *ccb_h;
4607
4608	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("cam_periph_getccb\n"));
4609	cam_periph_assert(periph, MA_OWNED);
4610	while ((ccb_h = SLIST_FIRST(&periph->ccb_list)) == NULL ||
4611	    ccb_h->pinfo.priority != priority) {
4612		if (priority < periph->immediate_priority) {
4613			periph->immediate_priority = priority;
4614			xpt_run_allocq(periph, 0);
4615		} else
4616			cam_periph_sleep(periph, &periph->ccb_list, PRIBIO,
4617			    "cgticb", 0);
4618	}
4619	SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle);
4620	return ((union ccb *)ccb_h);
4621}
4622
4623static void
4624xpt_acquire_bus(struct cam_eb *bus)
4625{
4626
4627	xpt_lock_buses();
4628	bus->refcount++;
4629	xpt_unlock_buses();
4630}
4631
4632static void
4633xpt_release_bus(struct cam_eb *bus)
4634{
4635
4636	xpt_lock_buses();
4637	KASSERT(bus->refcount >= 1, ("bus->refcount >= 1"));
4638	if (--bus->refcount > 0) {
4639		xpt_unlock_buses();
4640		return;
4641	}
4642	TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links);
4643	xsoftc.bus_generation++;
4644	xpt_unlock_buses();
4645	KASSERT(TAILQ_EMPTY(&bus->et_entries),
4646	    ("destroying bus, but target list is not empty"));
4647	cam_sim_release(bus->sim);
4648	mtx_destroy(&bus->eb_mtx);
4649	free(bus, M_CAMXPT);
4650}
4651
4652static struct cam_et *
4653xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
4654{
4655	struct cam_et *cur_target, *target;
4656
4657	mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED);
4658	mtx_assert(&bus->eb_mtx, MA_OWNED);
4659	target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT,
4660					 M_NOWAIT|M_ZERO);
4661	if (target == NULL)
4662		return (NULL);
4663
4664	TAILQ_INIT(&target->ed_entries);
4665	target->bus = bus;
4666	target->target_id = target_id;
4667	target->refcount = 1;
4668	target->generation = 0;
4669	target->luns = NULL;
4670	mtx_init(&target->luns_mtx, "CAM LUNs lock", NULL, MTX_DEF);
4671	timevalclear(&target->last_reset);
4672	/*
4673	 * Hold a reference to our parent bus so it
4674	 * will not go away before we do.
4675	 */
4676	bus->refcount++;
4677
4678	/* Insertion sort into our bus's target list */
4679	cur_target = TAILQ_FIRST(&bus->et_entries);
4680	while (cur_target != NULL && cur_target->target_id < target_id)
4681		cur_target = TAILQ_NEXT(cur_target, links);
4682	if (cur_target != NULL) {
4683		TAILQ_INSERT_BEFORE(cur_target, target, links);
4684	} else {
4685		TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
4686	}
4687	bus->generation++;
4688	return (target);
4689}
4690
4691static void
4692xpt_acquire_target(struct cam_et *target)
4693{
4694	struct cam_eb *bus = target->bus;
4695
4696	mtx_lock(&bus->eb_mtx);
4697	target->refcount++;
4698	mtx_unlock(&bus->eb_mtx);
4699}
4700
4701static void
4702xpt_release_target(struct cam_et *target)
4703{
4704	struct cam_eb *bus = target->bus;
4705
4706	mtx_lock(&bus->eb_mtx);
4707	if (--target->refcount > 0) {
4708		mtx_unlock(&bus->eb_mtx);
4709		return;
4710	}
4711	TAILQ_REMOVE(&bus->et_entries, target, links);
4712	bus->generation++;
4713	mtx_unlock(&bus->eb_mtx);
4714	KASSERT(TAILQ_EMPTY(&target->ed_entries),
4715	    ("destroying target, but device list is not empty"));
4716	xpt_release_bus(bus);
4717	mtx_destroy(&target->luns_mtx);
4718	if (target->luns)
4719		free(target->luns, M_CAMXPT);
4720	free(target, M_CAMXPT);
4721}
4722
4723static struct cam_ed *
4724xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target,
4725			 lun_id_t lun_id)
4726{
4727	struct cam_ed *device;
4728
4729	device = xpt_alloc_device(bus, target, lun_id);
4730	if (device == NULL)
4731		return (NULL);
4732
4733	device->mintags = 1;
4734	device->maxtags = 1;
4735	return (device);
4736}
4737
4738static void
4739xpt_destroy_device(void *context, int pending)
4740{
4741	struct cam_ed	*device = context;
4742
4743	mtx_lock(&device->device_mtx);
4744	mtx_destroy(&device->device_mtx);
4745	free(device, M_CAMDEV);
4746}
4747
4748struct cam_ed *
4749xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
4750{
4751	struct cam_ed	*cur_device, *device;
4752	struct cam_devq	*devq;
4753	cam_status status;
4754
4755	mtx_assert(&bus->eb_mtx, MA_OWNED);
4756	/* Make space for us in the device queue on our bus */
4757	devq = bus->sim->devq;
4758	mtx_lock(&devq->send_mtx);
4759	status = cam_devq_resize(devq, devq->send_queue.array_size + 1);
4760	mtx_unlock(&devq->send_mtx);
4761	if (status != CAM_REQ_CMP)
4762		return (NULL);
4763
4764	device = (struct cam_ed *)malloc(sizeof(*device),
4765					 M_CAMDEV, M_NOWAIT|M_ZERO);
4766	if (device == NULL)
4767		return (NULL);
4768
4769	cam_init_pinfo(&device->devq_entry);
4770	device->target = target;
4771	device->lun_id = lun_id;
4772	device->sim = bus->sim;
4773	if (cam_ccbq_init(&device->ccbq,
4774			  bus->sim->max_dev_openings) != 0) {
4775		free(device, M_CAMDEV);
4776		return (NULL);
4777	}
4778	SLIST_INIT(&device->asyncs);
4779	SLIST_INIT(&device->periphs);
4780	device->generation = 0;
4781	device->flags = CAM_DEV_UNCONFIGURED;
4782	device->tag_delay_count = 0;
4783	device->tag_saved_openings = 0;
4784	device->refcount = 1;
4785	mtx_init(&device->device_mtx, "CAM device lock", NULL, MTX_DEF);
4786	callout_init_mtx(&device->callout, &devq->send_mtx, 0);
4787	TASK_INIT(&device->device_destroy_task, 0, xpt_destroy_device, device);
4788	/*
4789	 * Hold a reference to our parent bus so it
4790	 * will not go away before we do.
4791	 */
4792	target->refcount++;
4793
4794	cur_device = TAILQ_FIRST(&target->ed_entries);
4795	while (cur_device != NULL && cur_device->lun_id < lun_id)
4796		cur_device = TAILQ_NEXT(cur_device, links);
4797	if (cur_device != NULL)
4798		TAILQ_INSERT_BEFORE(cur_device, device, links);
4799	else
4800		TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
4801	target->generation++;
4802	return (device);
4803}
4804
4805void
4806xpt_acquire_device(struct cam_ed *device)
4807{
4808	struct cam_eb *bus = device->target->bus;
4809
4810	mtx_lock(&bus->eb_mtx);
4811	device->refcount++;
4812	mtx_unlock(&bus->eb_mtx);
4813}
4814
4815void
4816xpt_release_device(struct cam_ed *device)
4817{
4818	struct cam_eb *bus = device->target->bus;
4819	struct cam_devq *devq;
4820
4821	mtx_lock(&bus->eb_mtx);
4822	if (--device->refcount > 0) {
4823		mtx_unlock(&bus->eb_mtx);
4824		return;
4825	}
4826
4827	TAILQ_REMOVE(&device->target->ed_entries, device,links);
4828	device->target->generation++;
4829	mtx_unlock(&bus->eb_mtx);
4830
4831	/* Release our slot in the devq */
4832	devq = bus->sim->devq;
4833	mtx_lock(&devq->send_mtx);
4834	cam_devq_resize(devq, devq->send_queue.array_size - 1);
4835	mtx_unlock(&devq->send_mtx);
4836
4837	KASSERT(SLIST_EMPTY(&device->periphs),
4838	    ("destroying device, but periphs list is not empty"));
4839	KASSERT(device->devq_entry.index == CAM_UNQUEUED_INDEX,
4840	    ("destroying device while still queued for ccbs"));
4841
4842	if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0)
4843		callout_stop(&device->callout);
4844
4845	xpt_release_target(device->target);
4846
4847	cam_ccbq_fini(&device->ccbq);
4848	/*
4849	 * Free allocated memory.  free(9) does nothing if the
4850	 * supplied pointer is NULL, so it is safe to call without
4851	 * checking.
4852	 */
4853	free(device->supported_vpds, M_CAMXPT);
4854	free(device->device_id, M_CAMXPT);
4855	free(device->ext_inq, M_CAMXPT);
4856	free(device->physpath, M_CAMXPT);
4857	free(device->rcap_buf, M_CAMXPT);
4858	free(device->serial_num, M_CAMXPT);
4859	taskqueue_enqueue(xsoftc.xpt_taskq, &device->device_destroy_task);
4860}
4861
4862u_int32_t
4863xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
4864{
4865	int	result;
4866	struct	cam_ed *dev;
4867
4868	dev = path->device;
4869	mtx_lock(&dev->sim->devq->send_mtx);
4870	result = cam_ccbq_resize(&dev->ccbq, newopenings);
4871	mtx_unlock(&dev->sim->devq->send_mtx);
4872	if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
4873	 || (dev->inq_flags & SID_CmdQue) != 0)
4874		dev->tag_saved_openings = newopenings;
4875	return (result);
4876}
4877
4878static struct cam_eb *
4879xpt_find_bus(path_id_t path_id)
4880{
4881	struct cam_eb *bus;
4882
4883	xpt_lock_buses();
4884	for (bus = TAILQ_FIRST(&xsoftc.xpt_busses);
4885	     bus != NULL;
4886	     bus = TAILQ_NEXT(bus, links)) {
4887		if (bus->path_id == path_id) {
4888			bus->refcount++;
4889			break;
4890		}
4891	}
4892	xpt_unlock_buses();
4893	return (bus);
4894}
4895
4896static struct cam_et *
4897xpt_find_target(struct cam_eb *bus, target_id_t	target_id)
4898{
4899	struct cam_et *target;
4900
4901	mtx_assert(&bus->eb_mtx, MA_OWNED);
4902	for (target = TAILQ_FIRST(&bus->et_entries);
4903	     target != NULL;
4904	     target = TAILQ_NEXT(target, links)) {
4905		if (target->target_id == target_id) {
4906			target->refcount++;
4907			break;
4908		}
4909	}
4910	return (target);
4911}
4912
4913static struct cam_ed *
4914xpt_find_device(struct cam_et *target, lun_id_t lun_id)
4915{
4916	struct cam_ed *device;
4917
4918	mtx_assert(&target->bus->eb_mtx, MA_OWNED);
4919	for (device = TAILQ_FIRST(&target->ed_entries);
4920	     device != NULL;
4921	     device = TAILQ_NEXT(device, links)) {
4922		if (device->lun_id == lun_id) {
4923			device->refcount++;
4924			break;
4925		}
4926	}
4927	return (device);
4928}
4929
4930void
4931xpt_start_tags(struct cam_path *path)
4932{
4933	struct ccb_relsim crs;
4934	struct cam_ed *device;
4935	struct cam_sim *sim;
4936	int    newopenings;
4937
4938	device = path->device;
4939	sim = path->bus->sim;
4940	device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
4941	xpt_freeze_devq(path, /*count*/1);
4942	device->inq_flags |= SID_CmdQue;
4943	if (device->tag_saved_openings != 0)
4944		newopenings = device->tag_saved_openings;
4945	else
4946		newopenings = min(device->maxtags,
4947				  sim->max_tagged_dev_openings);
4948	xpt_dev_ccbq_resize(path, newopenings);
4949	xpt_async(AC_GETDEV_CHANGED, path, NULL);
4950	xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
4951	crs.ccb_h.func_code = XPT_REL_SIMQ;
4952	crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
4953	crs.openings
4954	    = crs.release_timeout
4955	    = crs.qfrozen_cnt
4956	    = 0;
4957	xpt_action((union ccb *)&crs);
4958}
4959
4960void
4961xpt_stop_tags(struct cam_path *path)
4962{
4963	struct ccb_relsim crs;
4964	struct cam_ed *device;
4965	struct cam_sim *sim;
4966
4967	device = path->device;
4968	sim = path->bus->sim;
4969	device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
4970	device->tag_delay_count = 0;
4971	xpt_freeze_devq(path, /*count*/1);
4972	device->inq_flags &= ~SID_CmdQue;
4973	xpt_dev_ccbq_resize(path, sim->max_dev_openings);
4974	xpt_async(AC_GETDEV_CHANGED, path, NULL);
4975	xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
4976	crs.ccb_h.func_code = XPT_REL_SIMQ;
4977	crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
4978	crs.openings
4979	    = crs.release_timeout
4980	    = crs.qfrozen_cnt
4981	    = 0;
4982	xpt_action((union ccb *)&crs);
4983}
4984
4985static void
4986xpt_boot_delay(void *arg)
4987{
4988
4989	xpt_release_boot();
4990}
4991
4992static void
4993xpt_config(void *arg)
4994{
4995	/*
4996	 * Now that interrupts are enabled, go find our devices
4997	 */
4998	if (taskqueue_start_threads(&xsoftc.xpt_taskq, 1, PRIBIO, "CAM taskq"))
4999		printf("xpt_config: failed to create taskqueue thread.\n");
5000
5001	/* Setup debugging path */
5002	if (cam_dflags != CAM_DEBUG_NONE) {
5003		if (xpt_create_path(&cam_dpath, NULL,
5004				    CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
5005				    CAM_DEBUG_LUN) != CAM_REQ_CMP) {
5006			printf("xpt_config: xpt_create_path() failed for debug"
5007			       " target %d:%d:%d, debugging disabled\n",
5008			       CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
5009			cam_dflags = CAM_DEBUG_NONE;
5010		}
5011	} else
5012		cam_dpath = NULL;
5013
5014	periphdriver_init(1);
5015	xpt_hold_boot();
5016	callout_init(&xsoftc.boot_callout, 1);
5017	callout_reset_sbt(&xsoftc.boot_callout, SBT_1MS * xsoftc.boot_delay, 0,
5018	    xpt_boot_delay, NULL, 0);
5019	/* Fire up rescan thread. */
5020	if (kproc_kthread_add(xpt_scanner_thread, NULL, &cam_proc, NULL, 0, 0,
5021	    "cam", "scanner")) {
5022		printf("xpt_config: failed to create rescan thread.\n");
5023	}
5024}
5025
5026void
5027xpt_hold_boot(void)
5028{
5029	xpt_lock_buses();
5030	xsoftc.buses_to_config++;
5031	xpt_unlock_buses();
5032}
5033
5034void
5035xpt_release_boot(void)
5036{
5037	xpt_lock_buses();
5038	xsoftc.buses_to_config--;
5039	if (xsoftc.buses_to_config == 0 && xsoftc.buses_config_done == 0) {
5040		struct	xpt_task *task;
5041
5042		xsoftc.buses_config_done = 1;
5043		xpt_unlock_buses();
5044		/* Call manually because we don't have any busses */
5045		task = malloc(sizeof(struct xpt_task), M_CAMXPT, M_NOWAIT);
5046		if (task != NULL) {
5047			TASK_INIT(&task->task, 0, xpt_finishconfig_task, task);
5048			taskqueue_enqueue(taskqueue_thread, &task->task);
5049		}
5050	} else
5051		xpt_unlock_buses();
5052}
5053
5054/*
5055 * If the given device only has one peripheral attached to it, and if that
5056 * peripheral is the passthrough driver, announce it.  This insures that the
5057 * user sees some sort of announcement for every peripheral in their system.
5058 */
5059static int
5060xptpassannouncefunc(struct cam_ed *device, void *arg)
5061{
5062	struct cam_periph *periph;
5063	int i;
5064
5065	for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
5066	     periph = SLIST_NEXT(periph, periph_links), i++);
5067
5068	periph = SLIST_FIRST(&device->periphs);
5069	if ((i == 1)
5070	 && (strncmp(periph->periph_name, "pass", 4) == 0))
5071		xpt_announce_periph(periph, NULL);
5072
5073	return(1);
5074}
5075
5076static void
5077xpt_finishconfig_task(void *context, int pending)
5078{
5079
5080	periphdriver_init(2);
5081	/*
5082	 * Check for devices with no "standard" peripheral driver
5083	 * attached.  For any devices like that, announce the
5084	 * passthrough driver so the user will see something.
5085	 */
5086	if (!bootverbose)
5087		xpt_for_all_devices(xptpassannouncefunc, NULL);
5088
5089	/* Release our hook so that the boot can continue. */
5090	config_intrhook_disestablish(xsoftc.xpt_config_hook);
5091	free(xsoftc.xpt_config_hook, M_CAMXPT);
5092	xsoftc.xpt_config_hook = NULL;
5093
5094	free(context, M_CAMXPT);
5095}
5096
5097cam_status
5098xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg,
5099		   struct cam_path *path)
5100{
5101	struct ccb_setasync csa;
5102	cam_status status;
5103	int xptpath = 0;
5104
5105	if (path == NULL) {
5106		status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
5107					 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
5108		if (status != CAM_REQ_CMP)
5109			return (status);
5110		xpt_path_lock(path);
5111		xptpath = 1;
5112	}
5113
5114	xpt_setup_ccb(&csa.ccb_h, path, CAM_PRIORITY_NORMAL);
5115	csa.ccb_h.func_code = XPT_SASYNC_CB;
5116	csa.event_enable = event;
5117	csa.callback = cbfunc;
5118	csa.callback_arg = cbarg;
5119	xpt_action((union ccb *)&csa);
5120	status = csa.ccb_h.status;
5121
5122	CAM_DEBUG(csa.ccb_h.path, CAM_DEBUG_TRACE,
5123	    ("xpt_register_async: func %p\n", cbfunc));
5124
5125	if (xptpath) {
5126		xpt_path_unlock(path);
5127		xpt_free_path(path);
5128	}
5129
5130	if ((status == CAM_REQ_CMP) &&
5131	    (csa.event_enable & AC_FOUND_DEVICE)) {
5132		/*
5133		 * Get this peripheral up to date with all
5134		 * the currently existing devices.
5135		 */
5136		xpt_for_all_devices(xptsetasyncfunc, &csa);
5137	}
5138	if ((status == CAM_REQ_CMP) &&
5139	    (csa.event_enable & AC_PATH_REGISTERED)) {
5140		/*
5141		 * Get this peripheral up to date with all
5142		 * the currently existing busses.
5143		 */
5144		xpt_for_all_busses(xptsetasyncbusfunc, &csa);
5145	}
5146
5147	return (status);
5148}
5149
5150static void
5151xptaction(struct cam_sim *sim, union ccb *work_ccb)
5152{
5153	CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
5154
5155	switch (work_ccb->ccb_h.func_code) {
5156	/* Common cases first */
5157	case XPT_PATH_INQ:		/* Path routing inquiry */
5158	{
5159		struct ccb_pathinq *cpi;
5160
5161		cpi = &work_ccb->cpi;
5162		cpi->version_num = 1; /* XXX??? */
5163		cpi->hba_inquiry = 0;
5164		cpi->target_sprt = 0;
5165		cpi->hba_misc = 0;
5166		cpi->hba_eng_cnt = 0;
5167		cpi->max_target = 0;
5168		cpi->max_lun = 0;
5169		cpi->initiator_id = 0;
5170		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
5171		strlcpy(cpi->hba_vid, "", HBA_IDLEN);
5172		strlcpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
5173		cpi->unit_number = sim->unit_number;
5174		cpi->bus_id = sim->bus_id;
5175		cpi->base_transfer_speed = 0;
5176		cpi->protocol = PROTO_UNSPECIFIED;
5177		cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
5178		cpi->transport = XPORT_UNSPECIFIED;
5179		cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
5180		cpi->ccb_h.status = CAM_REQ_CMP;
5181		xpt_done(work_ccb);
5182		break;
5183	}
5184	default:
5185		work_ccb->ccb_h.status = CAM_REQ_INVALID;
5186		xpt_done(work_ccb);
5187		break;
5188	}
5189}
5190
5191/*
5192 * The xpt as a "controller" has no interrupt sources, so polling
5193 * is a no-op.
5194 */
5195static void
5196xptpoll(struct cam_sim *sim)
5197{
5198}
5199
5200void
5201xpt_lock_buses(void)
5202{
5203	mtx_lock(&xsoftc.xpt_topo_lock);
5204}
5205
5206void
5207xpt_unlock_buses(void)
5208{
5209	mtx_unlock(&xsoftc.xpt_topo_lock);
5210}
5211
5212struct mtx *
5213xpt_path_mtx(struct cam_path *path)
5214{
5215
5216	return (&path->device->device_mtx);
5217}
5218
5219static void
5220xpt_done_process(struct ccb_hdr *ccb_h)
5221{
5222	struct cam_sim *sim;
5223	struct cam_devq *devq;
5224	struct mtx *mtx = NULL;
5225
5226	if (ccb_h->flags & CAM_HIGH_POWER) {
5227		struct highpowerlist	*hphead;
5228		struct cam_ed		*device;
5229
5230		mtx_lock(&xsoftc.xpt_highpower_lock);
5231		hphead = &xsoftc.highpowerq;
5232
5233		device = STAILQ_FIRST(hphead);
5234
5235		/*
5236		 * Increment the count since this command is done.
5237		 */
5238		xsoftc.num_highpower++;
5239
5240		/*
5241		 * Any high powered commands queued up?
5242		 */
5243		if (device != NULL) {
5244
5245			STAILQ_REMOVE_HEAD(hphead, highpowerq_entry);
5246			mtx_unlock(&xsoftc.xpt_highpower_lock);
5247
5248			mtx_lock(&device->sim->devq->send_mtx);
5249			xpt_release_devq_device(device,
5250					 /*count*/1, /*runqueue*/TRUE);
5251			mtx_unlock(&device->sim->devq->send_mtx);
5252		} else
5253			mtx_unlock(&xsoftc.xpt_highpower_lock);
5254	}
5255
5256	sim = ccb_h->path->bus->sim;
5257
5258	if (ccb_h->status & CAM_RELEASE_SIMQ) {
5259		xpt_release_simq(sim, /*run_queue*/FALSE);
5260		ccb_h->status &= ~CAM_RELEASE_SIMQ;
5261	}
5262
5263	if ((ccb_h->flags & CAM_DEV_QFRZDIS)
5264	 && (ccb_h->status & CAM_DEV_QFRZN)) {
5265		xpt_release_devq(ccb_h->path, /*count*/1, /*run_queue*/TRUE);
5266		ccb_h->status &= ~CAM_DEV_QFRZN;
5267	}
5268
5269	devq = sim->devq;
5270	if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
5271		struct cam_ed *dev = ccb_h->path->device;
5272
5273		mtx_lock(&devq->send_mtx);
5274		devq->send_active--;
5275		devq->send_openings++;
5276		cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
5277
5278		if (((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
5279		  && (dev->ccbq.dev_active == 0))) {
5280			dev->flags &= ~CAM_DEV_REL_ON_QUEUE_EMPTY;
5281			xpt_release_devq_device(dev, /*count*/1,
5282					 /*run_queue*/FALSE);
5283		}
5284
5285		if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
5286		  && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)) {
5287			dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
5288			xpt_release_devq_device(dev, /*count*/1,
5289					 /*run_queue*/FALSE);
5290		}
5291
5292		if (!device_is_queued(dev))
5293			(void)xpt_schedule_devq(devq, dev);
5294		xpt_run_devq(devq);
5295		mtx_unlock(&devq->send_mtx);
5296
5297		if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0) {
5298			mtx = xpt_path_mtx(ccb_h->path);
5299			mtx_lock(mtx);
5300
5301			if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
5302			 && (--dev->tag_delay_count == 0))
5303				xpt_start_tags(ccb_h->path);
5304		}
5305	}
5306
5307	if ((ccb_h->flags & CAM_UNLOCKED) == 0) {
5308		if (mtx == NULL) {
5309			mtx = xpt_path_mtx(ccb_h->path);
5310			mtx_lock(mtx);
5311		}
5312	} else {
5313		if (mtx != NULL) {
5314			mtx_unlock(mtx);
5315			mtx = NULL;
5316		}
5317	}
5318
5319	/* Call the peripheral driver's callback */
5320	ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
5321	(*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
5322	if (mtx != NULL)
5323		mtx_unlock(mtx);
5324}
5325
5326void
5327xpt_done_td(void *arg)
5328{
5329	struct cam_doneq *queue = arg;
5330	struct ccb_hdr *ccb_h;
5331	STAILQ_HEAD(, ccb_hdr)	doneq;
5332
5333	STAILQ_INIT(&doneq);
5334	mtx_lock(&queue->cam_doneq_mtx);
5335	while (1) {
5336		while (STAILQ_EMPTY(&queue->cam_doneq)) {
5337			queue->cam_doneq_sleep = 1;
5338			msleep(&queue->cam_doneq, &queue->cam_doneq_mtx,
5339			    PRIBIO, "-", 0);
5340			queue->cam_doneq_sleep = 0;
5341		}
5342		STAILQ_CONCAT(&doneq, &queue->cam_doneq);
5343		mtx_unlock(&queue->cam_doneq_mtx);
5344
5345		THREAD_NO_SLEEPING();
5346		while ((ccb_h = STAILQ_FIRST(&doneq)) != NULL) {
5347			STAILQ_REMOVE_HEAD(&doneq, sim_links.stqe);
5348			xpt_done_process(ccb_h);
5349		}
5350		THREAD_SLEEPING_OK();
5351
5352		mtx_lock(&queue->cam_doneq_mtx);
5353	}
5354}
5355
5356static void
5357camisr_runqueue(void)
5358{
5359	struct	ccb_hdr *ccb_h;
5360	struct cam_doneq *queue;
5361	int i;
5362
5363	/* Process global queues. */
5364	for (i = 0; i < cam_num_doneqs; i++) {
5365		queue = &cam_doneqs[i];
5366		mtx_lock(&queue->cam_doneq_mtx);
5367		while ((ccb_h = STAILQ_FIRST(&queue->cam_doneq)) != NULL) {
5368			STAILQ_REMOVE_HEAD(&queue->cam_doneq, sim_links.stqe);
5369			mtx_unlock(&queue->cam_doneq_mtx);
5370			xpt_done_process(ccb_h);
5371			mtx_lock(&queue->cam_doneq_mtx);
5372		}
5373		mtx_unlock(&queue->cam_doneq_mtx);
5374	}
5375}
5376
5377struct kv
5378{
5379	uint32_t v;
5380	const char *name;
5381};
5382
5383static struct kv map[] = {
5384	{ XPT_NOOP, "XPT_NOOP" },
5385	{ XPT_SCSI_IO, "XPT_SCSI_IO" },
5386	{ XPT_GDEV_TYPE, "XPT_GDEV_TYPE" },
5387	{ XPT_GDEVLIST, "XPT_GDEVLIST" },
5388	{ XPT_PATH_INQ, "XPT_PATH_INQ" },
5389	{ XPT_REL_SIMQ, "XPT_REL_SIMQ" },
5390	{ XPT_SASYNC_CB, "XPT_SASYNC_CB" },
5391	{ XPT_SDEV_TYPE, "XPT_SDEV_TYPE" },
5392	{ XPT_SCAN_BUS, "XPT_SCAN_BUS" },
5393	{ XPT_DEV_MATCH, "XPT_DEV_MATCH" },
5394	{ XPT_DEBUG, "XPT_DEBUG" },
5395	{ XPT_PATH_STATS, "XPT_PATH_STATS" },
5396	{ XPT_GDEV_STATS, "XPT_GDEV_STATS" },
5397	{ XPT_DEV_ADVINFO, "XPT_DEV_ADVINFO" },
5398	{ XPT_ASYNC, "XPT_ASYNC" },
5399	{ XPT_ABORT, "XPT_ABORT" },
5400	{ XPT_RESET_BUS, "XPT_RESET_BUS" },
5401	{ XPT_RESET_DEV, "XPT_RESET_DEV" },
5402	{ XPT_TERM_IO, "XPT_TERM_IO" },
5403	{ XPT_SCAN_LUN, "XPT_SCAN_LUN" },
5404	{ XPT_GET_TRAN_SETTINGS, "XPT_GET_TRAN_SETTINGS" },
5405	{ XPT_SET_TRAN_SETTINGS, "XPT_SET_TRAN_SETTINGS" },
5406	{ XPT_CALC_GEOMETRY, "XPT_CALC_GEOMETRY" },
5407	{ XPT_ATA_IO, "XPT_ATA_IO" },
5408	{ XPT_GET_SIM_KNOB, "XPT_GET_SIM_KNOB" },
5409	{ XPT_SET_SIM_KNOB, "XPT_SET_SIM_KNOB" },
5410	{ XPT_NVME_IO, "XPT_NVME_IO" },
5411	{ XPT_MMCSD_IO, "XPT_MMCSD_IO" },
5412	{ XPT_SMP_IO, "XPT_SMP_IO" },
5413	{ XPT_SCAN_TGT, "XPT_SCAN_TGT" },
5414	{ XPT_NVME_ADMIN, "XPT_NVME_ADMIN" },
5415	{ XPT_ENG_INQ, "XPT_ENG_INQ" },
5416	{ XPT_ENG_EXEC, "XPT_ENG_EXEC" },
5417	{ XPT_EN_LUN, "XPT_EN_LUN" },
5418	{ XPT_TARGET_IO, "XPT_TARGET_IO" },
5419	{ XPT_ACCEPT_TARGET_IO, "XPT_ACCEPT_TARGET_IO" },
5420	{ XPT_CONT_TARGET_IO, "XPT_CONT_TARGET_IO" },
5421	{ XPT_IMMED_NOTIFY, "XPT_IMMED_NOTIFY" },
5422	{ XPT_NOTIFY_ACK, "XPT_NOTIFY_ACK" },
5423	{ XPT_IMMEDIATE_NOTIFY, "XPT_IMMEDIATE_NOTIFY" },
5424	{ XPT_NOTIFY_ACKNOWLEDGE, "XPT_NOTIFY_ACKNOWLEDGE" },
5425	{ 0, 0 }
5426};
5427
5428static const char *
5429xpt_action_name(uint32_t action)
5430{
5431	static char buffer[32];	/* Only for unknown messages -- racy */
5432	struct kv *walker = map;
5433
5434	while (walker->name != NULL) {
5435		if (walker->v == action)
5436			return (walker->name);
5437		walker++;
5438	}
5439
5440	snprintf(buffer, sizeof(buffer), "%#x", action);
5441	return (buffer);
5442}
5443