1/*-
2 * Common functions for CAM "type" (peripheral) drivers.
3 *
4 * Copyright (c) 1997, 1998 Justin T. Gibbs.
5 * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions, and the following disclaimer,
13 *    without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 *    derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: stable/11/sys/cam/cam_periph.c 367145 2020-10-29 22:00:15Z brooks $");
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/types.h>
36#include <sys/malloc.h>
37#include <sys/kernel.h>
38#include <sys/bio.h>
39#include <sys/lock.h>
40#include <sys/mutex.h>
41#include <sys/buf.h>
42#include <sys/proc.h>
43#include <sys/devicestat.h>
44#include <sys/bus.h>
45#include <sys/sbuf.h>
46#include <sys/sysctl.h>
47#include <vm/vm.h>
48#include <vm/vm_extern.h>
49
50#include <cam/cam.h>
51#include <cam/cam_ccb.h>
52#include <cam/cam_queue.h>
53#include <cam/cam_xpt_periph.h>
54#include <cam/cam_periph.h>
55#include <cam/cam_debug.h>
56#include <cam/cam_sim.h>
57
58#include <cam/scsi/scsi_all.h>
59#include <cam/scsi/scsi_message.h>
60#include <cam/scsi/scsi_pass.h>
61
62static	u_int		camperiphnextunit(struct periph_driver *p_drv,
63					  u_int newunit, int wired,
64					  path_id_t pathid, target_id_t target,
65					  lun_id_t lun);
66static	u_int		camperiphunit(struct periph_driver *p_drv,
67				      path_id_t pathid, target_id_t target,
68				      lun_id_t lun);
69static	void		camperiphdone(struct cam_periph *periph,
70					union ccb *done_ccb);
71static  void		camperiphfree(struct cam_periph *periph);
72static int		camperiphscsistatuserror(union ccb *ccb,
73					        union ccb **orig_ccb,
74						 cam_flags camflags,
75						 u_int32_t sense_flags,
76						 int *openings,
77						 u_int32_t *relsim_flags,
78						 u_int32_t *timeout,
79						 u_int32_t  *action,
80						 const char **action_string);
81static	int		camperiphscsisenseerror(union ccb *ccb,
82					        union ccb **orig_ccb,
83					        cam_flags camflags,
84					        u_int32_t sense_flags,
85					        int *openings,
86					        u_int32_t *relsim_flags,
87					        u_int32_t *timeout,
88					        u_int32_t *action,
89					        const char **action_string);
90static void		cam_periph_devctl_notify(union ccb *ccb);
91
92static int nperiph_drivers;
93static int initialized = 0;
94struct periph_driver **periph_drivers;
95
96static MALLOC_DEFINE(M_CAMPERIPH, "CAM periph", "CAM peripheral buffers");
97
98static int periph_selto_delay = 1000;
99TUNABLE_INT("kern.cam.periph_selto_delay", &periph_selto_delay);
100static int periph_noresrc_delay = 500;
101TUNABLE_INT("kern.cam.periph_noresrc_delay", &periph_noresrc_delay);
102static int periph_busy_delay = 500;
103TUNABLE_INT("kern.cam.periph_busy_delay", &periph_busy_delay);
104
105static u_int periph_mapmem_thresh = 65536;
106SYSCTL_UINT(_kern_cam, OID_AUTO, mapmem_thresh, CTLFLAG_RWTUN,
107    &periph_mapmem_thresh, 0, "Threshold for user-space buffer mapping");
108
109void
110periphdriver_register(void *data)
111{
112	struct periph_driver *drv = (struct periph_driver *)data;
113	struct periph_driver **newdrivers, **old;
114	int ndrivers;
115
116again:
117	ndrivers = nperiph_drivers + 2;
118	newdrivers = malloc(sizeof(*newdrivers) * ndrivers, M_CAMPERIPH,
119			    M_WAITOK);
120	xpt_lock_buses();
121	if (ndrivers != nperiph_drivers + 2) {
122		/*
123		 * Lost race against itself; go around.
124		 */
125		xpt_unlock_buses();
126		free(newdrivers, M_CAMPERIPH);
127		goto again;
128	}
129	if (periph_drivers)
130		bcopy(periph_drivers, newdrivers,
131		      sizeof(*newdrivers) * nperiph_drivers);
132	newdrivers[nperiph_drivers] = drv;
133	newdrivers[nperiph_drivers + 1] = NULL;
134	old = periph_drivers;
135	periph_drivers = newdrivers;
136	nperiph_drivers++;
137	xpt_unlock_buses();
138	if (old)
139		free(old, M_CAMPERIPH);
140	/* If driver marked as early or it is late now, initialize it. */
141	if (((drv->flags & CAM_PERIPH_DRV_EARLY) != 0 && initialized > 0) ||
142	    initialized > 1)
143		(*drv->init)();
144}
145
146int
147periphdriver_unregister(void *data)
148{
149	struct periph_driver *drv = (struct periph_driver *)data;
150	int error, n;
151
152	/* If driver marked as early or it is late now, deinitialize it. */
153	if (((drv->flags & CAM_PERIPH_DRV_EARLY) != 0 && initialized > 0) ||
154	    initialized > 1) {
155		if (drv->deinit == NULL) {
156			printf("CAM periph driver '%s' doesn't have deinit.\n",
157			    drv->driver_name);
158			return (EOPNOTSUPP);
159		}
160		error = drv->deinit();
161		if (error != 0)
162			return (error);
163	}
164
165	xpt_lock_buses();
166	for (n = 0; n < nperiph_drivers && periph_drivers[n] != drv; n++)
167		;
168	KASSERT(n < nperiph_drivers,
169	    ("Periph driver '%s' was not registered", drv->driver_name));
170	for (; n + 1 < nperiph_drivers; n++)
171		periph_drivers[n] = periph_drivers[n + 1];
172	periph_drivers[n + 1] = NULL;
173	nperiph_drivers--;
174	xpt_unlock_buses();
175	return (0);
176}
177
178void
179periphdriver_init(int level)
180{
181	int	i, early;
182
183	initialized = max(initialized, level);
184	for (i = 0; periph_drivers[i] != NULL; i++) {
185		early = (periph_drivers[i]->flags & CAM_PERIPH_DRV_EARLY) ? 1 : 2;
186		if (early == initialized)
187			(*periph_drivers[i]->init)();
188	}
189}
190
191cam_status
192cam_periph_alloc(periph_ctor_t *periph_ctor,
193		 periph_oninv_t *periph_oninvalidate,
194		 periph_dtor_t *periph_dtor, periph_start_t *periph_start,
195		 char *name, cam_periph_type type, struct cam_path *path,
196		 ac_callback_t *ac_callback, ac_code code, void *arg)
197{
198	struct		periph_driver **p_drv;
199	struct		cam_sim *sim;
200	struct		cam_periph *periph;
201	struct		cam_periph *cur_periph;
202	path_id_t	path_id;
203	target_id_t	target_id;
204	lun_id_t	lun_id;
205	cam_status	status;
206	u_int		init_level;
207
208	init_level = 0;
209	/*
210	 * Handle Hot-Plug scenarios.  If there is already a peripheral
211	 * of our type assigned to this path, we are likely waiting for
212	 * final close on an old, invalidated, peripheral.  If this is
213	 * the case, queue up a deferred call to the peripheral's async
214	 * handler.  If it looks like a mistaken re-allocation, complain.
215	 */
216	if ((periph = cam_periph_find(path, name)) != NULL) {
217
218		if ((periph->flags & CAM_PERIPH_INVALID) != 0
219		 && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) {
220			periph->flags |= CAM_PERIPH_NEW_DEV_FOUND;
221			periph->deferred_callback = ac_callback;
222			periph->deferred_ac = code;
223			return (CAM_REQ_INPROG);
224		} else {
225			printf("cam_periph_alloc: attempt to re-allocate "
226			       "valid device %s%d rejected flags %#x "
227			       "refcount %d\n", periph->periph_name,
228			       periph->unit_number, periph->flags,
229			       periph->refcount);
230		}
231		return (CAM_REQ_INVALID);
232	}
233
234	periph = (struct cam_periph *)malloc(sizeof(*periph), M_CAMPERIPH,
235					     M_NOWAIT|M_ZERO);
236
237	if (periph == NULL)
238		return (CAM_RESRC_UNAVAIL);
239
240	init_level++;
241
242
243	sim = xpt_path_sim(path);
244	path_id = xpt_path_path_id(path);
245	target_id = xpt_path_target_id(path);
246	lun_id = xpt_path_lun_id(path);
247	periph->periph_start = periph_start;
248	periph->periph_dtor = periph_dtor;
249	periph->periph_oninval = periph_oninvalidate;
250	periph->type = type;
251	periph->periph_name = name;
252	periph->scheduled_priority = CAM_PRIORITY_NONE;
253	periph->immediate_priority = CAM_PRIORITY_NONE;
254	periph->refcount = 1;		/* Dropped by invalidation. */
255	periph->sim = sim;
256	SLIST_INIT(&periph->ccb_list);
257	status = xpt_create_path(&path, periph, path_id, target_id, lun_id);
258	if (status != CAM_REQ_CMP)
259		goto failure;
260	periph->path = path;
261
262	xpt_lock_buses();
263	for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
264		if (strcmp((*p_drv)->driver_name, name) == 0)
265			break;
266	}
267	if (*p_drv == NULL) {
268		printf("cam_periph_alloc: invalid periph name '%s'\n", name);
269		xpt_unlock_buses();
270		xpt_free_path(periph->path);
271		free(periph, M_CAMPERIPH);
272		return (CAM_REQ_INVALID);
273	}
274	periph->unit_number = camperiphunit(*p_drv, path_id, target_id, lun_id);
275	cur_periph = TAILQ_FIRST(&(*p_drv)->units);
276	while (cur_periph != NULL
277	    && cur_periph->unit_number < periph->unit_number)
278		cur_periph = TAILQ_NEXT(cur_periph, unit_links);
279	if (cur_periph != NULL) {
280		KASSERT(cur_periph->unit_number != periph->unit_number, ("duplicate units on periph list"));
281		TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links);
282	} else {
283		TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links);
284		(*p_drv)->generation++;
285	}
286	xpt_unlock_buses();
287
288	init_level++;
289
290	status = xpt_add_periph(periph);
291	if (status != CAM_REQ_CMP)
292		goto failure;
293
294	init_level++;
295	CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph created\n"));
296
297	status = periph_ctor(periph, arg);
298
299	if (status == CAM_REQ_CMP)
300		init_level++;
301
302failure:
303	switch (init_level) {
304	case 4:
305		/* Initialized successfully */
306		break;
307	case 3:
308		CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n"));
309		xpt_remove_periph(periph);
310		/* FALLTHROUGH */
311	case 2:
312		xpt_lock_buses();
313		TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
314		xpt_unlock_buses();
315		xpt_free_path(periph->path);
316		/* FALLTHROUGH */
317	case 1:
318		free(periph, M_CAMPERIPH);
319		/* FALLTHROUGH */
320	case 0:
321		/* No cleanup to perform. */
322		break;
323	default:
324		panic("%s: Unknown init level", __func__);
325	}
326	return(status);
327}
328
329/*
330 * Find a peripheral structure with the specified path, target, lun,
331 * and (optionally) type.  If the name is NULL, this function will return
332 * the first peripheral driver that matches the specified path.
333 */
334struct cam_periph *
335cam_periph_find(struct cam_path *path, char *name)
336{
337	struct periph_driver **p_drv;
338	struct cam_periph *periph;
339
340	xpt_lock_buses();
341	for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
342
343		if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0))
344			continue;
345
346		TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
347			if (xpt_path_comp(periph->path, path) == 0) {
348				xpt_unlock_buses();
349				cam_periph_assert(periph, MA_OWNED);
350				return(periph);
351			}
352		}
353		if (name != NULL) {
354			xpt_unlock_buses();
355			return(NULL);
356		}
357	}
358	xpt_unlock_buses();
359	return(NULL);
360}
361
362/*
363 * Find peripheral driver instances attached to the specified path.
364 */
365int
366cam_periph_list(struct cam_path *path, struct sbuf *sb)
367{
368	struct sbuf local_sb;
369	struct periph_driver **p_drv;
370	struct cam_periph *periph;
371	int count;
372	int sbuf_alloc_len;
373
374	sbuf_alloc_len = 16;
375retry:
376	sbuf_new(&local_sb, NULL, sbuf_alloc_len, SBUF_FIXEDLEN);
377	count = 0;
378	xpt_lock_buses();
379	for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
380
381		TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
382			if (xpt_path_comp(periph->path, path) != 0)
383				continue;
384
385			if (sbuf_len(&local_sb) != 0)
386				sbuf_cat(&local_sb, ",");
387
388			sbuf_printf(&local_sb, "%s%d", periph->periph_name,
389				    periph->unit_number);
390
391			if (sbuf_error(&local_sb) == ENOMEM) {
392				sbuf_alloc_len *= 2;
393				xpt_unlock_buses();
394				sbuf_delete(&local_sb);
395				goto retry;
396			}
397			count++;
398		}
399	}
400	xpt_unlock_buses();
401	sbuf_finish(&local_sb);
402	if (sbuf_len(sb) != 0)
403		sbuf_cat(sb, ",");
404	sbuf_cat(sb, sbuf_data(&local_sb));
405	sbuf_delete(&local_sb);
406	return (count);
407}
408
409cam_status
410cam_periph_acquire(struct cam_periph *periph)
411{
412	cam_status status;
413
414	status = CAM_REQ_CMP_ERR;
415	if (periph == NULL)
416		return (status);
417
418	xpt_lock_buses();
419	if ((periph->flags & CAM_PERIPH_INVALID) == 0) {
420		periph->refcount++;
421		status = CAM_REQ_CMP;
422	}
423	xpt_unlock_buses();
424
425	return (status);
426}
427
428void
429cam_periph_doacquire(struct cam_periph *periph)
430{
431
432	xpt_lock_buses();
433	KASSERT(periph->refcount >= 1,
434	    ("cam_periph_doacquire() with refcount == %d", periph->refcount));
435	periph->refcount++;
436	xpt_unlock_buses();
437}
438
439void
440cam_periph_release_locked_buses(struct cam_periph *periph)
441{
442
443	cam_periph_assert(periph, MA_OWNED);
444	KASSERT(periph->refcount >= 1, ("periph->refcount >= 1"));
445	if (--periph->refcount == 0)
446		camperiphfree(periph);
447}
448
449void
450cam_periph_release_locked(struct cam_periph *periph)
451{
452
453	if (periph == NULL)
454		return;
455
456	xpt_lock_buses();
457	cam_periph_release_locked_buses(periph);
458	xpt_unlock_buses();
459}
460
461void
462cam_periph_release(struct cam_periph *periph)
463{
464	struct mtx *mtx;
465
466	if (periph == NULL)
467		return;
468
469	cam_periph_assert(periph, MA_NOTOWNED);
470	mtx = cam_periph_mtx(periph);
471	mtx_lock(mtx);
472	cam_periph_release_locked(periph);
473	mtx_unlock(mtx);
474}
475
476int
477cam_periph_hold(struct cam_periph *periph, int priority)
478{
479	int error;
480
481	/*
482	 * Increment the reference count on the peripheral
483	 * while we wait for our lock attempt to succeed
484	 * to ensure the peripheral doesn't disappear out
485	 * from user us while we sleep.
486	 */
487
488	if (cam_periph_acquire(periph) != CAM_REQ_CMP)
489		return (ENXIO);
490
491	cam_periph_assert(periph, MA_OWNED);
492	while ((periph->flags & CAM_PERIPH_LOCKED) != 0) {
493		periph->flags |= CAM_PERIPH_LOCK_WANTED;
494		if ((error = cam_periph_sleep(periph, periph, priority,
495		    "caplck", 0)) != 0) {
496			cam_periph_release_locked(periph);
497			return (error);
498		}
499		if (periph->flags & CAM_PERIPH_INVALID) {
500			cam_periph_release_locked(periph);
501			return (ENXIO);
502		}
503	}
504
505	periph->flags |= CAM_PERIPH_LOCKED;
506	return (0);
507}
508
509void
510cam_periph_unhold(struct cam_periph *periph)
511{
512
513	cam_periph_assert(periph, MA_OWNED);
514
515	periph->flags &= ~CAM_PERIPH_LOCKED;
516	if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) {
517		periph->flags &= ~CAM_PERIPH_LOCK_WANTED;
518		wakeup(periph);
519	}
520
521	cam_periph_release_locked(periph);
522}
523
524/*
525 * Look for the next unit number that is not currently in use for this
526 * peripheral type starting at "newunit".  Also exclude unit numbers that
527 * are reserved by for future "hardwiring" unless we already know that this
528 * is a potential wired device.  Only assume that the device is "wired" the
529 * first time through the loop since after that we'll be looking at unit
530 * numbers that did not match a wiring entry.
531 */
532static u_int
533camperiphnextunit(struct periph_driver *p_drv, u_int newunit, int wired,
534		  path_id_t pathid, target_id_t target, lun_id_t lun)
535{
536	struct	cam_periph *periph;
537	char	*periph_name;
538	int	i, val, dunit, r;
539	const char *dname, *strval;
540
541	periph_name = p_drv->driver_name;
542	for (;;newunit++) {
543
544		for (periph = TAILQ_FIRST(&p_drv->units);
545		     periph != NULL && periph->unit_number != newunit;
546		     periph = TAILQ_NEXT(periph, unit_links))
547			;
548
549		if (periph != NULL && periph->unit_number == newunit) {
550			if (wired != 0) {
551				xpt_print(periph->path, "Duplicate Wired "
552				    "Device entry!\n");
553				xpt_print(periph->path, "Second device (%s "
554				    "device at scbus%d target %d lun %d) will "
555				    "not be wired\n", periph_name, pathid,
556				    target, lun);
557				wired = 0;
558			}
559			continue;
560		}
561		if (wired)
562			break;
563
564		/*
565		 * Don't match entries like "da 4" as a wired down
566		 * device, but do match entries like "da 4 target 5"
567		 * or even "da 4 scbus 1".
568		 */
569		i = 0;
570		dname = periph_name;
571		for (;;) {
572			r = resource_find_dev(&i, dname, &dunit, NULL, NULL);
573			if (r != 0)
574				break;
575			/* if no "target" and no specific scbus, skip */
576			if (resource_int_value(dname, dunit, "target", &val) &&
577			    (resource_string_value(dname, dunit, "at",&strval)||
578			     strcmp(strval, "scbus") == 0))
579				continue;
580			if (newunit == dunit)
581				break;
582		}
583		if (r != 0)
584			break;
585	}
586	return (newunit);
587}
588
589static u_int
590camperiphunit(struct periph_driver *p_drv, path_id_t pathid,
591	      target_id_t target, lun_id_t lun)
592{
593	u_int	unit;
594	int	wired, i, val, dunit;
595	const char *dname, *strval;
596	char	pathbuf[32], *periph_name;
597
598	periph_name = p_drv->driver_name;
599	snprintf(pathbuf, sizeof(pathbuf), "scbus%d", pathid);
600	unit = 0;
601	i = 0;
602	dname = periph_name;
603	for (wired = 0; resource_find_dev(&i, dname, &dunit, NULL, NULL) == 0;
604	     wired = 0) {
605		if (resource_string_value(dname, dunit, "at", &strval) == 0) {
606			if (strcmp(strval, pathbuf) != 0)
607				continue;
608			wired++;
609		}
610		if (resource_int_value(dname, dunit, "target", &val) == 0) {
611			if (val != target)
612				continue;
613			wired++;
614		}
615		if (resource_int_value(dname, dunit, "lun", &val) == 0) {
616			if (val != lun)
617				continue;
618			wired++;
619		}
620		if (wired != 0) {
621			unit = dunit;
622			break;
623		}
624	}
625
626	/*
627	 * Either start from 0 looking for the next unit or from
628	 * the unit number given in the resource config.  This way,
629	 * if we have wildcard matches, we don't return the same
630	 * unit number twice.
631	 */
632	unit = camperiphnextunit(p_drv, unit, wired, pathid, target, lun);
633
634	return (unit);
635}
636
637void
638cam_periph_invalidate(struct cam_periph *periph)
639{
640
641	cam_periph_assert(periph, MA_OWNED);
642	/*
643	 * We only call this routine the first time a peripheral is
644	 * invalidated.
645	 */
646	if ((periph->flags & CAM_PERIPH_INVALID) != 0)
647		return;
648
649	CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph invalidated\n"));
650	if ((periph->flags & CAM_PERIPH_ANNOUNCED) && !rebooting)
651		xpt_denounce_periph(periph);
652	periph->flags |= CAM_PERIPH_INVALID;
653	periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND;
654	if (periph->periph_oninval != NULL)
655		periph->periph_oninval(periph);
656	cam_periph_release_locked(periph);
657}
658
659static void
660camperiphfree(struct cam_periph *periph)
661{
662	struct periph_driver **p_drv;
663	struct periph_driver *drv;
664
665	cam_periph_assert(periph, MA_OWNED);
666	KASSERT(periph->periph_allocating == 0, ("%s%d: freed while allocating",
667	    periph->periph_name, periph->unit_number));
668	for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
669		if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0)
670			break;
671	}
672	if (*p_drv == NULL) {
673		printf("camperiphfree: attempt to free non-existant periph\n");
674		return;
675	}
676	/*
677	 * Cache a pointer to the periph_driver structure.  If a
678	 * periph_driver is added or removed from the array (see
679	 * periphdriver_register()) while we drop the toplogy lock
680	 * below, p_drv may change.  This doesn't protect against this
681	 * particular periph_driver going away.  That will require full
682	 * reference counting in the periph_driver infrastructure.
683	 */
684	drv = *p_drv;
685
686	/*
687	 * We need to set this flag before dropping the topology lock, to
688	 * let anyone who is traversing the list that this peripheral is
689	 * about to be freed, and there will be no more reference count
690	 * checks.
691	 */
692	periph->flags |= CAM_PERIPH_FREE;
693
694	/*
695	 * The peripheral destructor semantics dictate calling with only the
696	 * SIM mutex held.  Since it might sleep, it should not be called
697	 * with the topology lock held.
698	 */
699	xpt_unlock_buses();
700
701	/*
702	 * We need to call the peripheral destructor prior to removing the
703	 * peripheral from the list.  Otherwise, we risk running into a
704	 * scenario where the peripheral unit number may get reused
705	 * (because it has been removed from the list), but some resources
706	 * used by the peripheral are still hanging around.  In particular,
707	 * the devfs nodes used by some peripherals like the pass(4) driver
708	 * aren't fully cleaned up until the destructor is run.  If the
709	 * unit number is reused before the devfs instance is fully gone,
710	 * devfs will panic.
711	 */
712	if (periph->periph_dtor != NULL)
713		periph->periph_dtor(periph);
714
715	/*
716	 * The peripheral list is protected by the topology lock.
717	 */
718	xpt_lock_buses();
719
720	TAILQ_REMOVE(&drv->units, periph, unit_links);
721	drv->generation++;
722
723	xpt_remove_periph(periph);
724
725	xpt_unlock_buses();
726	if ((periph->flags & CAM_PERIPH_ANNOUNCED) && !rebooting)
727		xpt_print(periph->path, "Periph destroyed\n");
728	else
729		CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n"));
730
731	if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) {
732		union ccb ccb;
733		void *arg;
734
735		switch (periph->deferred_ac) {
736		case AC_FOUND_DEVICE:
737			ccb.ccb_h.func_code = XPT_GDEV_TYPE;
738			xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
739			xpt_action(&ccb);
740			arg = &ccb;
741			break;
742		case AC_PATH_REGISTERED:
743			xpt_path_inq(&ccb.cpi, periph->path);
744			arg = &ccb;
745			break;
746		default:
747			arg = NULL;
748			break;
749		}
750		periph->deferred_callback(NULL, periph->deferred_ac,
751					  periph->path, arg);
752	}
753	xpt_free_path(periph->path);
754	free(periph, M_CAMPERIPH);
755	xpt_lock_buses();
756}
757
758/*
759 * Map user virtual pointers into kernel virtual address space, so we can
760 * access the memory.  This is now a generic function that centralizes most
761 * of the sanity checks on the data flags, if any.
762 * This also only works for up to MAXPHYS memory.  Since we use
763 * buffers to map stuff in and out, we're limited to the buffer size.
764 */
765int
766cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo,
767    u_int maxmap)
768{
769	int numbufs, i;
770	u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
771	u_int32_t lengths[CAM_PERIPH_MAXMAPS];
772	u_int32_t dirs[CAM_PERIPH_MAXMAPS];
773	bool misaligned[CAM_PERIPH_MAXMAPS];
774
775	bzero(mapinfo, sizeof(*mapinfo));
776	if (maxmap == 0)
777		maxmap = DFLTPHYS;	/* traditional default */
778	else if (maxmap > MAXPHYS)
779		maxmap = MAXPHYS;	/* for safety */
780	switch(ccb->ccb_h.func_code) {
781	case XPT_DEV_MATCH:
782		if (ccb->cdm.match_buf_len == 0) {
783			printf("cam_periph_mapmem: invalid match buffer "
784			       "length 0\n");
785			return(EINVAL);
786		}
787		if (ccb->cdm.pattern_buf_len > 0) {
788			data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
789			lengths[0] = ccb->cdm.pattern_buf_len;
790			dirs[0] = CAM_DIR_OUT;
791			data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
792			lengths[1] = ccb->cdm.match_buf_len;
793			dirs[1] = CAM_DIR_IN;
794			numbufs = 2;
795		} else {
796			data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
797			lengths[0] = ccb->cdm.match_buf_len;
798			dirs[0] = CAM_DIR_IN;
799			numbufs = 1;
800		}
801		/*
802		 * This request will not go to the hardware, no reason
803		 * to be so strict. vmapbuf() is able to map up to MAXPHYS.
804		 */
805		maxmap = MAXPHYS;
806		break;
807	case XPT_SCSI_IO:
808	case XPT_CONT_TARGET_IO:
809		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
810			return(0);
811		if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
812			return (EINVAL);
813		data_ptrs[0] = &ccb->csio.data_ptr;
814		lengths[0] = ccb->csio.dxfer_len;
815		dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
816		numbufs = 1;
817		break;
818	case XPT_ATA_IO:
819		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
820			return(0);
821		if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
822			return (EINVAL);
823		data_ptrs[0] = &ccb->ataio.data_ptr;
824		lengths[0] = ccb->ataio.dxfer_len;
825		dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
826		numbufs = 1;
827		break;
828	case XPT_SMP_IO:
829		data_ptrs[0] = &ccb->smpio.smp_request;
830		lengths[0] = ccb->smpio.smp_request_len;
831		dirs[0] = CAM_DIR_OUT;
832		data_ptrs[1] = &ccb->smpio.smp_response;
833		lengths[1] = ccb->smpio.smp_response_len;
834		dirs[1] = CAM_DIR_IN;
835		numbufs = 2;
836		break;
837	case XPT_NVME_IO:
838	case XPT_NVME_ADMIN:
839		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
840			return (0);
841		if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
842			return (EINVAL);
843		data_ptrs[0] = &ccb->nvmeio.data_ptr;
844		lengths[0] = ccb->nvmeio.dxfer_len;
845		dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
846		numbufs = 1;
847		break;
848	case XPT_DEV_ADVINFO:
849		if (ccb->cdai.bufsiz == 0)
850			return (0);
851
852		data_ptrs[0] = (uint8_t **)&ccb->cdai.buf;
853		lengths[0] = ccb->cdai.bufsiz;
854		dirs[0] = CAM_DIR_IN;
855		numbufs = 1;
856
857		/*
858		 * This request will not go to the hardware, no reason
859		 * to be so strict. vmapbuf() is able to map up to MAXPHYS.
860		 */
861		maxmap = MAXPHYS;
862		break;
863	default:
864		return(EINVAL);
865		break; /* NOTREACHED */
866	}
867
868	/*
869	 * Check the transfer length and permissions first, so we don't
870	 * have to unmap any previously mapped buffers.
871	 */
872	for (i = 0; i < numbufs; i++) {
873		if (lengths[i] > maxmap) {
874			printf("cam_periph_mapmem: attempt to map %lu bytes, "
875			       "which is greater than %lu\n",
876			       (long)(lengths[i]), (u_long)maxmap);
877			return (E2BIG);
878		}
879
880		/*
881		 * The userland data pointer passed in may not be page
882		 * aligned.  vmapbuf() truncates the address to a page
883		 * boundary, so if the address isn't page aligned, we'll
884		 * need enough space for the given transfer length, plus
885		 * whatever extra space is necessary to make it to the page
886		 * boundary.
887		 */
888		misaligned[i] = (lengths[i] +
889		    (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK) > MAXPHYS);
890	}
891
892	/*
893	 * This keeps the kernel stack of current thread from getting
894	 * swapped.  In low-memory situations where the kernel stack might
895	 * otherwise get swapped out, this holds it and allows the thread
896	 * to make progress and release the kernel mapped pages sooner.
897	 *
898	 * XXX KDM should I use P_NOSWAP instead?
899	 */
900	PHOLD(curproc);
901
902	for (i = 0; i < numbufs; i++) {
903
904		/* Save the user's data address. */
905		mapinfo->orig[i] = *data_ptrs[i];
906
907		/*
908		 * For small buffers use malloc+copyin/copyout instead of
909		 * mapping to KVA to avoid expensive TLB shootdowns.  For
910		 * small allocations malloc is backed by UMA, and so much
911		 * cheaper on SMP systems.
912		 */
913		if (lengths[i] <= periph_mapmem_thresh || misaligned[i]) {
914			*data_ptrs[i] = malloc(lengths[i], M_CAMPERIPH,
915			    M_WAITOK);
916			if (dirs[i] != CAM_DIR_IN) {
917				if (copyin(mapinfo->orig[i], *data_ptrs[i],
918				    lengths[i]) != 0) {
919					free(*data_ptrs[i], M_CAMPERIPH);
920					*data_ptrs[i] = mapinfo->orig[i];
921					goto fail;
922				}
923			} else
924				bzero(*data_ptrs[i], lengths[i]);
925			continue;
926		}
927
928		/*
929		 * Get the buffer.
930		 */
931		mapinfo->bp[i] = getpbuf(NULL);
932
933		/* set the direction */
934		mapinfo->bp[i]->b_iocmd = (dirs[i] == CAM_DIR_OUT) ?
935		    BIO_WRITE : BIO_READ;
936
937		/*
938		 * Map the buffer into kernel memory.
939		 *
940		 * Note that useracc() alone is not a  sufficient test.
941		 * vmapbuf() can still fail due to a smaller file mapped
942		 * into a larger area of VM, or if userland races against
943		 * vmapbuf() after the useracc() check.
944		 */
945		if (vmapbuf(mapinfo->bp[i], *data_ptrs[i], lengths[i], 1) < 0) {
946			relpbuf(mapinfo->bp[i], NULL);
947			goto fail;
948		}
949
950		/* set our pointer to the new mapped area */
951		*data_ptrs[i] = mapinfo->bp[i]->b_data;
952	}
953
954	/*
955	 * Now that we've gotten this far, change ownership to the kernel
956	 * of the buffers so that we don't run afoul of returning to user
957	 * space with locks (on the buffer) held.
958	 */
959	for (i = 0; i < numbufs; i++) {
960		if (mapinfo->bp[i])
961			BUF_KERNPROC(mapinfo->bp[i]);
962	}
963
964	mapinfo->num_bufs_used = numbufs;
965	return(0);
966
967fail:
968	for (i--; i >= 0; i--) {
969		if (mapinfo->bp[i]) {
970			vunmapbuf(mapinfo->bp[i]);
971			relpbuf(mapinfo->bp[i], NULL);
972		} else
973			free(*data_ptrs[i], M_CAMPERIPH);
974		*data_ptrs[i] = mapinfo->orig[i];
975	}
976	PRELE(curproc);
977	return(EACCES);
978}
979
980/*
981 * Unmap memory segments mapped into kernel virtual address space by
982 * cam_periph_mapmem().
983 */
984void
985cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
986{
987	int numbufs, i;
988	u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
989	u_int32_t lengths[CAM_PERIPH_MAXMAPS];
990	u_int32_t dirs[CAM_PERIPH_MAXMAPS];
991
992	if (mapinfo->num_bufs_used <= 0) {
993		/* nothing to free and the process wasn't held. */
994		return;
995	}
996
997	switch (ccb->ccb_h.func_code) {
998	case XPT_DEV_MATCH:
999		if (ccb->cdm.pattern_buf_len > 0) {
1000			data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
1001			lengths[0] = ccb->cdm.pattern_buf_len;
1002			dirs[0] = CAM_DIR_OUT;
1003			data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
1004			lengths[1] = ccb->cdm.match_buf_len;
1005			dirs[1] = CAM_DIR_IN;
1006			numbufs = 2;
1007		} else {
1008			data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
1009			lengths[0] = ccb->cdm.match_buf_len;
1010			dirs[0] = CAM_DIR_IN;
1011			numbufs = 1;
1012		}
1013		break;
1014	case XPT_SCSI_IO:
1015	case XPT_CONT_TARGET_IO:
1016		data_ptrs[0] = &ccb->csio.data_ptr;
1017		lengths[0] = ccb->csio.dxfer_len;
1018		dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
1019		numbufs = 1;
1020		break;
1021	case XPT_ATA_IO:
1022		data_ptrs[0] = &ccb->ataio.data_ptr;
1023		lengths[0] = ccb->ataio.dxfer_len;
1024		dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
1025		numbufs = 1;
1026		break;
1027	case XPT_SMP_IO:
1028		data_ptrs[0] = &ccb->smpio.smp_request;
1029		lengths[0] = ccb->smpio.smp_request_len;
1030		dirs[0] = CAM_DIR_OUT;
1031		data_ptrs[1] = &ccb->smpio.smp_response;
1032		lengths[1] = ccb->smpio.smp_response_len;
1033		dirs[1] = CAM_DIR_IN;
1034		numbufs = 2;
1035		break;
1036	case XPT_NVME_IO:
1037	case XPT_NVME_ADMIN:
1038		data_ptrs[0] = &ccb->nvmeio.data_ptr;
1039		lengths[0] = ccb->nvmeio.dxfer_len;
1040		dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
1041		numbufs = 1;
1042		break;
1043	case XPT_DEV_ADVINFO:
1044		data_ptrs[0] = (uint8_t **)&ccb->cdai.buf;
1045		lengths[0] = ccb->cdai.bufsiz;
1046		dirs[0] = CAM_DIR_IN;
1047		numbufs = 1;
1048		break;
1049	default:
1050		/* allow ourselves to be swapped once again */
1051		PRELE(curproc);
1052		return;
1053		break; /* NOTREACHED */
1054	}
1055
1056	for (i = 0; i < numbufs; i++) {
1057		if (mapinfo->bp[i]) {
1058			/* unmap the buffer */
1059			vunmapbuf(mapinfo->bp[i]);
1060
1061			/* release the buffer */
1062			relpbuf(mapinfo->bp[i], NULL);
1063		} else {
1064			if (dirs[i] != CAM_DIR_OUT) {
1065				copyout(*data_ptrs[i], mapinfo->orig[i],
1066				    lengths[i]);
1067			}
1068			free(*data_ptrs[i], M_CAMPERIPH);
1069		}
1070
1071		/* Set the user's pointer back to the original value */
1072		*data_ptrs[i] = mapinfo->orig[i];
1073	}
1074
1075	/* allow ourselves to be swapped once again */
1076	PRELE(curproc);
1077}
1078
1079int
1080cam_periph_ioctl(struct cam_periph *periph, u_long cmd, caddr_t addr,
1081		 int (*error_routine)(union ccb *ccb,
1082				      cam_flags camflags,
1083				      u_int32_t sense_flags))
1084{
1085	union ccb 	     *ccb;
1086	int 		     error;
1087	int		     found;
1088
1089	error = found = 0;
1090
1091	switch(cmd){
1092	case CAMGETPASSTHRU:
1093		ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
1094		xpt_setup_ccb(&ccb->ccb_h,
1095			      ccb->ccb_h.path,
1096			      CAM_PRIORITY_NORMAL);
1097		ccb->ccb_h.func_code = XPT_GDEVLIST;
1098
1099		/*
1100		 * Basically, the point of this is that we go through
1101		 * getting the list of devices, until we find a passthrough
1102		 * device.  In the current version of the CAM code, the
1103		 * only way to determine what type of device we're dealing
1104		 * with is by its name.
1105		 */
1106		while (found == 0) {
1107			ccb->cgdl.index = 0;
1108			ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS;
1109			while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) {
1110
1111				/* we want the next device in the list */
1112				xpt_action(ccb);
1113				if (strncmp(ccb->cgdl.periph_name,
1114				    "pass", 4) == 0){
1115					found = 1;
1116					break;
1117				}
1118			}
1119			if ((ccb->cgdl.status == CAM_GDEVLIST_LAST_DEVICE) &&
1120			    (found == 0)) {
1121				ccb->cgdl.periph_name[0] = '\0';
1122				ccb->cgdl.unit_number = 0;
1123				break;
1124			}
1125		}
1126
1127		/* copy the result back out */
1128		bcopy(ccb, addr, sizeof(union ccb));
1129
1130		/* and release the ccb */
1131		xpt_release_ccb(ccb);
1132
1133		break;
1134	default:
1135		error = ENOTTY;
1136		break;
1137	}
1138	return(error);
1139}
1140
1141static void
1142cam_periph_done_panic(struct cam_periph *periph, union ccb *done_ccb)
1143{
1144
1145	panic("%s: already done with ccb %p", __func__, done_ccb);
1146}
1147
1148static void
1149cam_periph_done(struct cam_periph *periph, union ccb *done_ccb)
1150{
1151
1152	/* Caller will release the CCB */
1153	xpt_path_assert(done_ccb->ccb_h.path, MA_OWNED);
1154	done_ccb->ccb_h.cbfcnp = cam_periph_done_panic;
1155	wakeup(&done_ccb->ccb_h.cbfcnp);
1156}
1157
1158static void
1159cam_periph_ccbwait(union ccb *ccb)
1160{
1161
1162	if ((ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
1163		while (ccb->ccb_h.cbfcnp != cam_periph_done_panic)
1164			xpt_path_sleep(ccb->ccb_h.path, &ccb->ccb_h.cbfcnp,
1165			    PRIBIO, "cbwait", 0);
1166	}
1167	KASSERT(ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX &&
1168	    (ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG,
1169	    ("%s: proceeding with incomplete ccb: ccb=%p, func_code=%#x, "
1170	     "status=%#x, index=%d", __func__, ccb, ccb->ccb_h.func_code,
1171	     ccb->ccb_h.status, ccb->ccb_h.pinfo.index));
1172}
1173
1174int
1175cam_periph_runccb(union ccb *ccb,
1176		  int (*error_routine)(union ccb *ccb,
1177				       cam_flags camflags,
1178				       u_int32_t sense_flags),
1179		  cam_flags camflags, u_int32_t sense_flags,
1180		  struct devstat *ds)
1181{
1182	struct bintime *starttime;
1183	struct bintime ltime;
1184	int error;
1185
1186	starttime = NULL;
1187	xpt_path_assert(ccb->ccb_h.path, MA_OWNED);
1188	KASSERT((ccb->ccb_h.flags & CAM_UNLOCKED) == 0,
1189	    ("%s: ccb=%p, func_code=%#x, flags=%#x", __func__, ccb,
1190	     ccb->ccb_h.func_code, ccb->ccb_h.flags));
1191
1192	/*
1193	 * If the user has supplied a stats structure, and if we understand
1194	 * this particular type of ccb, record the transaction start.
1195	 */
1196	if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO ||
1197	    ccb->ccb_h.func_code == XPT_ATA_IO)) {
1198		starttime = &ltime;
1199		binuptime(starttime);
1200		devstat_start_transaction(ds, starttime);
1201	}
1202
1203	ccb->ccb_h.cbfcnp = cam_periph_done;
1204	xpt_action(ccb);
1205
1206	do {
1207		cam_periph_ccbwait(ccb);
1208		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
1209			error = 0;
1210		else if (error_routine != NULL) {
1211			ccb->ccb_h.cbfcnp = cam_periph_done;
1212			error = (*error_routine)(ccb, camflags, sense_flags);
1213		} else
1214			error = 0;
1215
1216	} while (error == ERESTART);
1217
1218	if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1219		cam_release_devq(ccb->ccb_h.path,
1220				 /* relsim_flags */0,
1221				 /* openings */0,
1222				 /* timeout */0,
1223				 /* getcount_only */ FALSE);
1224		ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1225	}
1226
1227	if (ds != NULL) {
1228		if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
1229			devstat_end_transaction(ds,
1230					ccb->csio.dxfer_len - ccb->csio.resid,
1231					ccb->csio.tag_action & 0x3,
1232					((ccb->ccb_h.flags & CAM_DIR_MASK) ==
1233					CAM_DIR_NONE) ?  DEVSTAT_NO_DATA :
1234					(ccb->ccb_h.flags & CAM_DIR_OUT) ?
1235					DEVSTAT_WRITE :
1236					DEVSTAT_READ, NULL, starttime);
1237		} else if (ccb->ccb_h.func_code == XPT_ATA_IO) {
1238			devstat_end_transaction(ds,
1239					ccb->ataio.dxfer_len - ccb->ataio.resid,
1240					0, /* Not used in ATA */
1241					((ccb->ccb_h.flags & CAM_DIR_MASK) ==
1242					CAM_DIR_NONE) ?  DEVSTAT_NO_DATA :
1243					(ccb->ccb_h.flags & CAM_DIR_OUT) ?
1244					DEVSTAT_WRITE :
1245					DEVSTAT_READ, NULL, starttime);
1246		}
1247	}
1248
1249	return(error);
1250}
1251
1252void
1253cam_freeze_devq(struct cam_path *path)
1254{
1255	struct ccb_hdr ccb_h;
1256
1257	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("cam_freeze_devq\n"));
1258	xpt_setup_ccb(&ccb_h, path, /*priority*/1);
1259	ccb_h.func_code = XPT_NOOP;
1260	ccb_h.flags = CAM_DEV_QFREEZE;
1261	xpt_action((union ccb *)&ccb_h);
1262}
1263
1264u_int32_t
1265cam_release_devq(struct cam_path *path, u_int32_t relsim_flags,
1266		 u_int32_t openings, u_int32_t arg,
1267		 int getcount_only)
1268{
1269	struct ccb_relsim crs;
1270
1271	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("cam_release_devq(%u, %u, %u, %d)\n",
1272	    relsim_flags, openings, arg, getcount_only));
1273	xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
1274	crs.ccb_h.func_code = XPT_REL_SIMQ;
1275	crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0;
1276	crs.release_flags = relsim_flags;
1277	crs.openings = openings;
1278	crs.release_timeout = arg;
1279	xpt_action((union ccb *)&crs);
1280	return (crs.qfrozen_cnt);
1281}
1282
1283#define saved_ccb_ptr ppriv_ptr0
1284static void
1285camperiphdone(struct cam_periph *periph, union ccb *done_ccb)
1286{
1287	union ccb      *saved_ccb;
1288	cam_status	status;
1289	struct scsi_start_stop_unit *scsi_cmd;
1290	int		error = 0, error_code, sense_key, asc, ascq;
1291
1292	scsi_cmd = (struct scsi_start_stop_unit *)
1293	    &done_ccb->csio.cdb_io.cdb_bytes;
1294	status = done_ccb->ccb_h.status;
1295
1296	if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1297		if (scsi_extract_sense_ccb(done_ccb,
1298		    &error_code, &sense_key, &asc, &ascq)) {
1299			/*
1300			 * If the error is "invalid field in CDB",
1301			 * and the load/eject flag is set, turn the
1302			 * flag off and try again.  This is just in
1303			 * case the drive in question barfs on the
1304			 * load eject flag.  The CAM code should set
1305			 * the load/eject flag by default for
1306			 * removable media.
1307			 */
1308			if ((scsi_cmd->opcode == START_STOP_UNIT) &&
1309			    ((scsi_cmd->how & SSS_LOEJ) != 0) &&
1310			     (asc == 0x24) && (ascq == 0x00)) {
1311				scsi_cmd->how &= ~SSS_LOEJ;
1312				if (status & CAM_DEV_QFRZN) {
1313					cam_release_devq(done_ccb->ccb_h.path,
1314					    0, 0, 0, 0);
1315					done_ccb->ccb_h.status &=
1316					    ~CAM_DEV_QFRZN;
1317				}
1318				xpt_action(done_ccb);
1319				goto out;
1320			}
1321		}
1322		error = cam_periph_error(done_ccb, 0,
1323		    SF_RETRY_UA | SF_NO_PRINT, NULL);
1324		if (error == ERESTART)
1325			goto out;
1326		if (done_ccb->ccb_h.status & CAM_DEV_QFRZN) {
1327			cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0);
1328			done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1329		}
1330	} else {
1331		/*
1332		 * If we have successfully taken a device from the not
1333		 * ready to ready state, re-scan the device and re-get
1334		 * the inquiry information.  Many devices (mostly disks)
1335		 * don't properly report their inquiry information unless
1336		 * they are spun up.
1337		 */
1338		if (scsi_cmd->opcode == START_STOP_UNIT)
1339			xpt_async(AC_INQ_CHANGED, done_ccb->ccb_h.path, NULL);
1340	}
1341
1342	/* If we tried long wait and still failed, remember that. */
1343	if ((periph->flags & CAM_PERIPH_RECOVERY_WAIT) &&
1344	    (done_ccb->csio.cdb_io.cdb_bytes[0] == TEST_UNIT_READY)) {
1345		periph->flags &= ~CAM_PERIPH_RECOVERY_WAIT;
1346		if (error != 0 && done_ccb->ccb_h.retry_count == 0)
1347			periph->flags |= CAM_PERIPH_RECOVERY_WAIT_FAILED;
1348	}
1349
1350	/*
1351	 * After recovery action(s) completed, return to the original CCB.
1352	 * If the recovery CCB has failed, considering its own possible
1353	 * retries and recovery, assume we are back in state where we have
1354	 * been originally, but without recovery hopes left.  In such case,
1355	 * after the final attempt below, we cancel any further retries,
1356	 * blocking by that also any new recovery attempts for this CCB,
1357	 * and the result will be the final one returned to the CCB owher.
1358	 */
1359	saved_ccb = (union ccb *)done_ccb->ccb_h.saved_ccb_ptr;
1360	bcopy(saved_ccb, done_ccb, sizeof(*done_ccb));
1361	xpt_free_ccb(saved_ccb);
1362	if (done_ccb->ccb_h.cbfcnp != camperiphdone)
1363		periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
1364	if (error != 0)
1365		done_ccb->ccb_h.retry_count = 0;
1366	xpt_action(done_ccb);
1367
1368out:
1369	/* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */
1370	cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0);
1371}
1372
1373/*
1374 * Generic Async Event handler.  Peripheral drivers usually
1375 * filter out the events that require personal attention,
1376 * and leave the rest to this function.
1377 */
1378void
1379cam_periph_async(struct cam_periph *periph, u_int32_t code,
1380		 struct cam_path *path, void *arg)
1381{
1382	switch (code) {
1383	case AC_LOST_DEVICE:
1384		cam_periph_invalidate(periph);
1385		break;
1386	default:
1387		break;
1388	}
1389}
1390
1391void
1392cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle)
1393{
1394	struct ccb_getdevstats cgds;
1395
1396	xpt_setup_ccb(&cgds.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1397	cgds.ccb_h.func_code = XPT_GDEV_STATS;
1398	xpt_action((union ccb *)&cgds);
1399	cam_periph_freeze_after_event(periph, &cgds.last_reset, bus_settle);
1400}
1401
1402void
1403cam_periph_freeze_after_event(struct cam_periph *periph,
1404			      struct timeval* event_time, u_int duration_ms)
1405{
1406	struct timeval delta;
1407	struct timeval duration_tv;
1408
1409	if (!timevalisset(event_time))
1410		return;
1411
1412	microtime(&delta);
1413	timevalsub(&delta, event_time);
1414	duration_tv.tv_sec = duration_ms / 1000;
1415	duration_tv.tv_usec = (duration_ms % 1000) * 1000;
1416	if (timevalcmp(&delta, &duration_tv, <)) {
1417		timevalsub(&duration_tv, &delta);
1418
1419		duration_ms = duration_tv.tv_sec * 1000;
1420		duration_ms += duration_tv.tv_usec / 1000;
1421		cam_freeze_devq(periph->path);
1422		cam_release_devq(periph->path,
1423				RELSIM_RELEASE_AFTER_TIMEOUT,
1424				/*reduction*/0,
1425				/*timeout*/duration_ms,
1426				/*getcount_only*/0);
1427	}
1428
1429}
1430
1431static int
1432camperiphscsistatuserror(union ccb *ccb, union ccb **orig_ccb,
1433    cam_flags camflags, u_int32_t sense_flags,
1434    int *openings, u_int32_t *relsim_flags,
1435    u_int32_t *timeout, u_int32_t *action, const char **action_string)
1436{
1437	struct cam_periph *periph;
1438	int error;
1439
1440	switch (ccb->csio.scsi_status) {
1441	case SCSI_STATUS_OK:
1442	case SCSI_STATUS_COND_MET:
1443	case SCSI_STATUS_INTERMED:
1444	case SCSI_STATUS_INTERMED_COND_MET:
1445		error = 0;
1446		break;
1447	case SCSI_STATUS_CMD_TERMINATED:
1448	case SCSI_STATUS_CHECK_COND:
1449		error = camperiphscsisenseerror(ccb, orig_ccb,
1450					        camflags,
1451					        sense_flags,
1452					        openings,
1453					        relsim_flags,
1454					        timeout,
1455					        action,
1456					        action_string);
1457		break;
1458	case SCSI_STATUS_QUEUE_FULL:
1459	{
1460		/* no decrement */
1461		struct ccb_getdevstats cgds;
1462
1463		/*
1464		 * First off, find out what the current
1465		 * transaction counts are.
1466		 */
1467		xpt_setup_ccb(&cgds.ccb_h,
1468			      ccb->ccb_h.path,
1469			      CAM_PRIORITY_NORMAL);
1470		cgds.ccb_h.func_code = XPT_GDEV_STATS;
1471		xpt_action((union ccb *)&cgds);
1472
1473		/*
1474		 * If we were the only transaction active, treat
1475		 * the QUEUE FULL as if it were a BUSY condition.
1476		 */
1477		if (cgds.dev_active != 0) {
1478			int total_openings;
1479
1480			/*
1481		 	 * Reduce the number of openings to
1482			 * be 1 less than the amount it took
1483			 * to get a queue full bounded by the
1484			 * minimum allowed tag count for this
1485			 * device.
1486		 	 */
1487			total_openings = cgds.dev_active + cgds.dev_openings;
1488			*openings = cgds.dev_active;
1489			if (*openings < cgds.mintags)
1490				*openings = cgds.mintags;
1491			if (*openings < total_openings)
1492				*relsim_flags = RELSIM_ADJUST_OPENINGS;
1493			else {
1494				/*
1495				 * Some devices report queue full for
1496				 * temporary resource shortages.  For
1497				 * this reason, we allow a minimum
1498				 * tag count to be entered via a
1499				 * quirk entry to prevent the queue
1500				 * count on these devices from falling
1501				 * to a pessimisticly low value.  We
1502				 * still wait for the next successful
1503				 * completion, however, before queueing
1504				 * more transactions to the device.
1505				 */
1506				*relsim_flags = RELSIM_RELEASE_AFTER_CMDCMPLT;
1507			}
1508			*timeout = 0;
1509			error = ERESTART;
1510			*action &= ~SSQ_PRINT_SENSE;
1511			break;
1512		}
1513		/* FALLTHROUGH */
1514	}
1515	case SCSI_STATUS_BUSY:
1516		/*
1517		 * Restart the queue after either another
1518		 * command completes or a 1 second timeout.
1519		 */
1520		periph = xpt_path_periph(ccb->ccb_h.path);
1521		if (periph->flags & CAM_PERIPH_INVALID) {
1522			error = EIO;
1523			*action_string = "Periph was invalidated";
1524		} else if ((sense_flags & SF_RETRY_BUSY) != 0 ||
1525		    ccb->ccb_h.retry_count > 0) {
1526			if ((sense_flags & SF_RETRY_BUSY) == 0)
1527				ccb->ccb_h.retry_count--;
1528			error = ERESTART;
1529			*relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT
1530				      | RELSIM_RELEASE_AFTER_CMDCMPLT;
1531			*timeout = 1000;
1532		} else {
1533			error = EIO;
1534			*action_string = "Retries exhausted";
1535		}
1536		break;
1537	case SCSI_STATUS_RESERV_CONFLICT:
1538	default:
1539		error = EIO;
1540		break;
1541	}
1542	return (error);
1543}
1544
1545static int
1546camperiphscsisenseerror(union ccb *ccb, union ccb **orig,
1547    cam_flags camflags, u_int32_t sense_flags,
1548    int *openings, u_int32_t *relsim_flags,
1549    u_int32_t *timeout, u_int32_t *action, const char **action_string)
1550{
1551	struct cam_periph *periph;
1552	union ccb *orig_ccb = ccb;
1553	int error, recoveryccb;
1554
1555	periph = xpt_path_periph(ccb->ccb_h.path);
1556	recoveryccb = (ccb->ccb_h.cbfcnp == camperiphdone);
1557	if ((periph->flags & CAM_PERIPH_RECOVERY_INPROG) && !recoveryccb) {
1558		/*
1559		 * If error recovery is already in progress, don't attempt
1560		 * to process this error, but requeue it unconditionally
1561		 * and attempt to process it once error recovery has
1562		 * completed.  This failed command is probably related to
1563		 * the error that caused the currently active error recovery
1564		 * action so our  current recovery efforts should also
1565		 * address this command.  Be aware that the error recovery
1566		 * code assumes that only one recovery action is in progress
1567		 * on a particular peripheral instance at any given time
1568		 * (e.g. only one saved CCB for error recovery) so it is
1569		 * imperitive that we don't violate this assumption.
1570		 */
1571		error = ERESTART;
1572		*action &= ~SSQ_PRINT_SENSE;
1573	} else {
1574		scsi_sense_action err_action;
1575		struct ccb_getdev cgd;
1576
1577		/*
1578		 * Grab the inquiry data for this device.
1579		 */
1580		xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path, CAM_PRIORITY_NORMAL);
1581		cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1582		xpt_action((union ccb *)&cgd);
1583
1584		err_action = scsi_error_action(&ccb->csio, &cgd.inq_data,
1585		    sense_flags);
1586		error = err_action & SS_ERRMASK;
1587
1588		/*
1589		 * Do not autostart sequential access devices
1590		 * to avoid unexpected tape loading.
1591		 */
1592		if ((err_action & SS_MASK) == SS_START &&
1593		    SID_TYPE(&cgd.inq_data) == T_SEQUENTIAL) {
1594			*action_string = "Will not autostart a "
1595			    "sequential access device";
1596			goto sense_error_done;
1597		}
1598
1599		/*
1600		 * Avoid recovery recursion if recovery action is the same.
1601		 */
1602		if ((err_action & SS_MASK) >= SS_START && recoveryccb) {
1603			if (((err_action & SS_MASK) == SS_START &&
1604			     ccb->csio.cdb_io.cdb_bytes[0] == START_STOP_UNIT) ||
1605			    ((err_action & SS_MASK) == SS_TUR &&
1606			     (ccb->csio.cdb_io.cdb_bytes[0] == TEST_UNIT_READY))) {
1607				err_action = SS_RETRY|SSQ_DECREMENT_COUNT|EIO;
1608				*relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1609				*timeout = 500;
1610			}
1611		}
1612
1613		/*
1614		 * If the recovery action will consume a retry,
1615		 * make sure we actually have retries available.
1616		 */
1617		if ((err_action & SSQ_DECREMENT_COUNT) != 0) {
1618		 	if (ccb->ccb_h.retry_count > 0 &&
1619			    (periph->flags & CAM_PERIPH_INVALID) == 0)
1620		 		ccb->ccb_h.retry_count--;
1621			else {
1622				*action_string = "Retries exhausted";
1623				goto sense_error_done;
1624			}
1625		}
1626
1627		if ((err_action & SS_MASK) >= SS_START) {
1628			/*
1629			 * Do common portions of commands that
1630			 * use recovery CCBs.
1631			 */
1632			orig_ccb = xpt_alloc_ccb_nowait();
1633			if (orig_ccb == NULL) {
1634				*action_string = "Can't allocate recovery CCB";
1635				goto sense_error_done;
1636			}
1637			/*
1638			 * Clear freeze flag for original request here, as
1639			 * this freeze will be dropped as part of ERESTART.
1640			 */
1641			ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1642			bcopy(ccb, orig_ccb, sizeof(*orig_ccb));
1643		}
1644
1645		switch (err_action & SS_MASK) {
1646		case SS_NOP:
1647			*action_string = "No recovery action needed";
1648			error = 0;
1649			break;
1650		case SS_RETRY:
1651			*action_string = "Retrying command (per sense data)";
1652			error = ERESTART;
1653			break;
1654		case SS_FAIL:
1655			*action_string = "Unretryable error";
1656			break;
1657		case SS_START:
1658		{
1659			int le;
1660
1661			/*
1662			 * Send a start unit command to the device, and
1663			 * then retry the command.
1664			 */
1665			*action_string = "Attempting to start unit";
1666			periph->flags |= CAM_PERIPH_RECOVERY_INPROG;
1667
1668			/*
1669			 * Check for removable media and set
1670			 * load/eject flag appropriately.
1671			 */
1672			if (SID_IS_REMOVABLE(&cgd.inq_data))
1673				le = TRUE;
1674			else
1675				le = FALSE;
1676
1677			scsi_start_stop(&ccb->csio,
1678					/*retries*/1,
1679					camperiphdone,
1680					MSG_SIMPLE_Q_TAG,
1681					/*start*/TRUE,
1682					/*load/eject*/le,
1683					/*immediate*/FALSE,
1684					SSD_FULL_SIZE,
1685					/*timeout*/50000);
1686			break;
1687		}
1688		case SS_TUR:
1689		{
1690			/*
1691			 * Send a Test Unit Ready to the device.
1692			 * If the 'many' flag is set, we send 120
1693			 * test unit ready commands, one every half
1694			 * second.  Otherwise, we just send one TUR.
1695			 * We only want to do this if the retry
1696			 * count has not been exhausted.
1697			 */
1698			int retries;
1699
1700			if ((err_action & SSQ_MANY) != 0 && (periph->flags &
1701			     CAM_PERIPH_RECOVERY_WAIT_FAILED) == 0) {
1702				periph->flags |= CAM_PERIPH_RECOVERY_WAIT;
1703				*action_string = "Polling device for readiness";
1704				retries = 120;
1705			} else {
1706				*action_string = "Testing device for readiness";
1707				retries = 1;
1708			}
1709			periph->flags |= CAM_PERIPH_RECOVERY_INPROG;
1710			scsi_test_unit_ready(&ccb->csio,
1711					     retries,
1712					     camperiphdone,
1713					     MSG_SIMPLE_Q_TAG,
1714					     SSD_FULL_SIZE,
1715					     /*timeout*/5000);
1716
1717			/*
1718			 * Accomplish our 500ms delay by deferring
1719			 * the release of our device queue appropriately.
1720			 */
1721			*relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1722			*timeout = 500;
1723			break;
1724		}
1725		default:
1726			panic("Unhandled error action %x", err_action);
1727		}
1728
1729		if ((err_action & SS_MASK) >= SS_START) {
1730			/*
1731			 * Drop the priority, so that the recovery
1732			 * CCB is the first to execute.  Freeze the queue
1733			 * after this command is sent so that we can
1734			 * restore the old csio and have it queued in
1735			 * the proper order before we release normal
1736			 * transactions to the device.
1737			 */
1738			ccb->ccb_h.pinfo.priority--;
1739			ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1740			ccb->ccb_h.saved_ccb_ptr = orig_ccb;
1741			error = ERESTART;
1742			*orig = orig_ccb;
1743		}
1744
1745sense_error_done:
1746		*action = err_action;
1747	}
1748	return (error);
1749}
1750
1751/*
1752 * Generic error handler.  Peripheral drivers usually filter
1753 * out the errors that they handle in a unique manner, then
1754 * call this function.
1755 */
1756int
1757cam_periph_error(union ccb *ccb, cam_flags camflags,
1758		 u_int32_t sense_flags, union ccb *save_ccb)
1759{
1760	struct cam_path *newpath;
1761	union ccb  *orig_ccb, *scan_ccb;
1762	struct cam_periph *periph;
1763	const char *action_string;
1764	cam_status  status;
1765	int	    frozen, error, openings, devctl_err;
1766	u_int32_t   action, relsim_flags, timeout;
1767
1768	action = SSQ_PRINT_SENSE;
1769	periph = xpt_path_periph(ccb->ccb_h.path);
1770	action_string = NULL;
1771	status = ccb->ccb_h.status;
1772	frozen = (status & CAM_DEV_QFRZN) != 0;
1773	status &= CAM_STATUS_MASK;
1774	devctl_err = openings = relsim_flags = timeout = 0;
1775	orig_ccb = ccb;
1776
1777	/* Filter the errors that should be reported via devctl */
1778	switch (ccb->ccb_h.status & CAM_STATUS_MASK) {
1779	case CAM_CMD_TIMEOUT:
1780	case CAM_REQ_ABORTED:
1781	case CAM_REQ_CMP_ERR:
1782	case CAM_REQ_TERMIO:
1783	case CAM_UNREC_HBA_ERROR:
1784	case CAM_DATA_RUN_ERR:
1785	case CAM_SCSI_STATUS_ERROR:
1786	case CAM_ATA_STATUS_ERROR:
1787	case CAM_SMP_STATUS_ERROR:
1788		devctl_err++;
1789		break;
1790	default:
1791		break;
1792	}
1793
1794	switch (status) {
1795	case CAM_REQ_CMP:
1796		error = 0;
1797		action &= ~SSQ_PRINT_SENSE;
1798		break;
1799	case CAM_SCSI_STATUS_ERROR:
1800		error = camperiphscsistatuserror(ccb, &orig_ccb,
1801		    camflags, sense_flags, &openings, &relsim_flags,
1802		    &timeout, &action, &action_string);
1803		break;
1804	case CAM_AUTOSENSE_FAIL:
1805		error = EIO;	/* we have to kill the command */
1806		break;
1807	case CAM_UA_ABORT:
1808	case CAM_UA_TERMIO:
1809	case CAM_MSG_REJECT_REC:
1810		/* XXX Don't know that these are correct */
1811		error = EIO;
1812		break;
1813	case CAM_SEL_TIMEOUT:
1814		if ((camflags & CAM_RETRY_SELTO) != 0) {
1815			if (ccb->ccb_h.retry_count > 0 &&
1816			    (periph->flags & CAM_PERIPH_INVALID) == 0) {
1817				ccb->ccb_h.retry_count--;
1818				error = ERESTART;
1819
1820				/*
1821				 * Wait a bit to give the device
1822				 * time to recover before we try again.
1823				 */
1824				relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1825				timeout = periph_selto_delay;
1826				break;
1827			}
1828			action_string = "Retries exhausted";
1829		}
1830		/* FALLTHROUGH */
1831	case CAM_DEV_NOT_THERE:
1832		error = ENXIO;
1833		action = SSQ_LOST;
1834		break;
1835	case CAM_REQ_INVALID:
1836	case CAM_PATH_INVALID:
1837	case CAM_NO_HBA:
1838	case CAM_PROVIDE_FAIL:
1839	case CAM_REQ_TOO_BIG:
1840	case CAM_LUN_INVALID:
1841	case CAM_TID_INVALID:
1842	case CAM_FUNC_NOTAVAIL:
1843		error = EINVAL;
1844		break;
1845	case CAM_SCSI_BUS_RESET:
1846	case CAM_BDR_SENT:
1847		/*
1848		 * Commands that repeatedly timeout and cause these
1849		 * kinds of error recovery actions, should return
1850		 * CAM_CMD_TIMEOUT, which allows us to safely assume
1851		 * that this command was an innocent bystander to
1852		 * these events and should be unconditionally
1853		 * retried.
1854		 */
1855	case CAM_REQUEUE_REQ:
1856		/* Unconditional requeue if device is still there */
1857		if (periph->flags & CAM_PERIPH_INVALID) {
1858			action_string = "Periph was invalidated";
1859			error = EIO;
1860		} else if (sense_flags & SF_NO_RETRY) {
1861			error = EIO;
1862			action_string = "Retry was blocked";
1863		} else {
1864			error = ERESTART;
1865			action &= ~SSQ_PRINT_SENSE;
1866		}
1867		break;
1868	case CAM_RESRC_UNAVAIL:
1869		/* Wait a bit for the resource shortage to abate. */
1870		timeout = periph_noresrc_delay;
1871		/* FALLTHROUGH */
1872	case CAM_BUSY:
1873		if (timeout == 0) {
1874			/* Wait a bit for the busy condition to abate. */
1875			timeout = periph_busy_delay;
1876		}
1877		relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1878		/* FALLTHROUGH */
1879	case CAM_ATA_STATUS_ERROR:
1880	case CAM_REQ_CMP_ERR:
1881	case CAM_CMD_TIMEOUT:
1882	case CAM_UNEXP_BUSFREE:
1883	case CAM_UNCOR_PARITY:
1884	case CAM_DATA_RUN_ERR:
1885	default:
1886		if (periph->flags & CAM_PERIPH_INVALID) {
1887			error = EIO;
1888			action_string = "Periph was invalidated";
1889		} else if (ccb->ccb_h.retry_count == 0) {
1890			error = EIO;
1891			action_string = "Retries exhausted";
1892		} else if (sense_flags & SF_NO_RETRY) {
1893			error = EIO;
1894			action_string = "Retry was blocked";
1895		} else {
1896			ccb->ccb_h.retry_count--;
1897			error = ERESTART;
1898		}
1899		break;
1900	}
1901
1902	if ((sense_flags & SF_PRINT_ALWAYS) ||
1903	    CAM_DEBUGGED(ccb->ccb_h.path, CAM_DEBUG_INFO))
1904		action |= SSQ_PRINT_SENSE;
1905	else if (sense_flags & SF_NO_PRINT)
1906		action &= ~SSQ_PRINT_SENSE;
1907	if ((action & SSQ_PRINT_SENSE) != 0)
1908		cam_error_print(orig_ccb, CAM_ESF_ALL, CAM_EPF_ALL);
1909	if (error != 0 && (action & SSQ_PRINT_SENSE) != 0) {
1910		if (error != ERESTART) {
1911			if (action_string == NULL)
1912				action_string = "Unretryable error";
1913			xpt_print(ccb->ccb_h.path, "Error %d, %s\n",
1914			    error, action_string);
1915		} else if (action_string != NULL)
1916			xpt_print(ccb->ccb_h.path, "%s\n", action_string);
1917		else
1918			xpt_print(ccb->ccb_h.path, "Retrying command\n");
1919	}
1920
1921	if (devctl_err && (error != 0 || (action & SSQ_PRINT_SENSE) != 0))
1922		cam_periph_devctl_notify(orig_ccb);
1923
1924	if ((action & SSQ_LOST) != 0) {
1925		lun_id_t lun_id;
1926
1927		/*
1928		 * For a selection timeout, we consider all of the LUNs on
1929		 * the target to be gone.  If the status is CAM_DEV_NOT_THERE,
1930		 * then we only get rid of the device(s) specified by the
1931		 * path in the original CCB.
1932		 */
1933		if (status == CAM_SEL_TIMEOUT)
1934			lun_id = CAM_LUN_WILDCARD;
1935		else
1936			lun_id = xpt_path_lun_id(ccb->ccb_h.path);
1937
1938		/* Should we do more if we can't create the path?? */
1939		if (xpt_create_path(&newpath, periph,
1940				    xpt_path_path_id(ccb->ccb_h.path),
1941				    xpt_path_target_id(ccb->ccb_h.path),
1942				    lun_id) == CAM_REQ_CMP) {
1943
1944			/*
1945			 * Let peripheral drivers know that this
1946			 * device has gone away.
1947			 */
1948			xpt_async(AC_LOST_DEVICE, newpath, NULL);
1949			xpt_free_path(newpath);
1950		}
1951	}
1952
1953	/* Broadcast UNIT ATTENTIONs to all periphs. */
1954	if ((action & SSQ_UA) != 0)
1955		xpt_async(AC_UNIT_ATTENTION, orig_ccb->ccb_h.path, orig_ccb);
1956
1957	/* Rescan target on "Reported LUNs data has changed" */
1958	if ((action & SSQ_RESCAN) != 0) {
1959		if (xpt_create_path(&newpath, NULL,
1960				    xpt_path_path_id(ccb->ccb_h.path),
1961				    xpt_path_target_id(ccb->ccb_h.path),
1962				    CAM_LUN_WILDCARD) == CAM_REQ_CMP) {
1963
1964			scan_ccb = xpt_alloc_ccb_nowait();
1965			if (scan_ccb != NULL) {
1966				scan_ccb->ccb_h.path = newpath;
1967				scan_ccb->ccb_h.func_code = XPT_SCAN_TGT;
1968				scan_ccb->crcn.flags = 0;
1969				xpt_rescan(scan_ccb);
1970			} else {
1971				xpt_print(newpath,
1972				    "Can't allocate CCB to rescan target\n");
1973				xpt_free_path(newpath);
1974			}
1975		}
1976	}
1977
1978	/* Attempt a retry */
1979	if (error == ERESTART || error == 0) {
1980		if (frozen != 0)
1981			ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1982		if (error == ERESTART)
1983			xpt_action(ccb);
1984		if (frozen != 0)
1985			cam_release_devq(ccb->ccb_h.path,
1986					 relsim_flags,
1987					 openings,
1988					 timeout,
1989					 /*getcount_only*/0);
1990	}
1991
1992	return (error);
1993}
1994
1995#define CAM_PERIPH_DEVD_MSG_SIZE	256
1996
1997static void
1998cam_periph_devctl_notify(union ccb *ccb)
1999{
2000	struct cam_periph *periph;
2001	struct ccb_getdev *cgd;
2002	struct sbuf sb;
2003	int serr, sk, asc, ascq;
2004	char *sbmsg, *type;
2005
2006	sbmsg = malloc(CAM_PERIPH_DEVD_MSG_SIZE, M_CAMPERIPH, M_NOWAIT);
2007	if (sbmsg == NULL)
2008		return;
2009
2010	sbuf_new(&sb, sbmsg, CAM_PERIPH_DEVD_MSG_SIZE, SBUF_FIXEDLEN);
2011
2012	periph = xpt_path_periph(ccb->ccb_h.path);
2013	sbuf_printf(&sb, "device=%s%d ", periph->periph_name,
2014	    periph->unit_number);
2015
2016	sbuf_printf(&sb, "serial=\"");
2017	if ((cgd = (struct ccb_getdev *)xpt_alloc_ccb_nowait()) != NULL) {
2018		xpt_setup_ccb(&cgd->ccb_h, ccb->ccb_h.path,
2019		    CAM_PRIORITY_NORMAL);
2020		cgd->ccb_h.func_code = XPT_GDEV_TYPE;
2021		xpt_action((union ccb *)cgd);
2022
2023		if (cgd->ccb_h.status == CAM_REQ_CMP)
2024			sbuf_bcat(&sb, cgd->serial_num, cgd->serial_num_len);
2025		xpt_free_ccb((union ccb *)cgd);
2026	}
2027	sbuf_printf(&sb, "\" ");
2028	sbuf_printf(&sb, "cam_status=\"0x%x\" ", ccb->ccb_h.status);
2029
2030	switch (ccb->ccb_h.status & CAM_STATUS_MASK) {
2031	case CAM_CMD_TIMEOUT:
2032		sbuf_printf(&sb, "timeout=%d ", ccb->ccb_h.timeout);
2033		type = "timeout";
2034		break;
2035	case CAM_SCSI_STATUS_ERROR:
2036		sbuf_printf(&sb, "scsi_status=%d ", ccb->csio.scsi_status);
2037		if (scsi_extract_sense_ccb(ccb, &serr, &sk, &asc, &ascq))
2038			sbuf_printf(&sb, "scsi_sense=\"%02x %02x %02x %02x\" ",
2039			    serr, sk, asc, ascq);
2040		type = "error";
2041		break;
2042	case CAM_ATA_STATUS_ERROR:
2043		sbuf_printf(&sb, "RES=\"");
2044		ata_res_sbuf(&ccb->ataio.res, &sb);
2045		sbuf_printf(&sb, "\" ");
2046		type = "error";
2047		break;
2048	default:
2049		type = "error";
2050		break;
2051	}
2052
2053	if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
2054		sbuf_printf(&sb, "CDB=\"");
2055		scsi_cdb_sbuf(scsiio_cdb_ptr(&ccb->csio), &sb);
2056		sbuf_printf(&sb, "\" ");
2057	} else if (ccb->ccb_h.func_code == XPT_ATA_IO) {
2058		sbuf_printf(&sb, "ACB=\"");
2059		ata_cmd_sbuf(&ccb->ataio.cmd, &sb);
2060		sbuf_printf(&sb, "\" ");
2061	}
2062
2063	if (sbuf_finish(&sb) == 0)
2064		devctl_notify("CAM", "periph", type, sbuf_data(&sb));
2065	sbuf_delete(&sb);
2066	free(sbmsg, M_CAMPERIPH);
2067}
2068
2069