cam_periph.c revision 168882
1/*-
2 * Common functions for CAM "type" (peripheral) drivers.
3 *
4 * Copyright (c) 1997, 1998 Justin T. Gibbs.
5 * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions, and the following disclaimer,
13 *    without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 *    derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: head/sys/cam/cam_periph.c 168882 2007-04-19 23:34:51Z scottl $");
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/types.h>
36#include <sys/malloc.h>
37#include <sys/kernel.h>
38#include <sys/linker_set.h>
39#include <sys/bio.h>
40#include <sys/lock.h>
41#include <sys/mutex.h>
42#include <sys/buf.h>
43#include <sys/proc.h>
44#include <sys/devicestat.h>
45#include <sys/bus.h>
46#include <vm/vm.h>
47#include <vm/vm_extern.h>
48
49#include <cam/cam.h>
50#include <cam/cam_ccb.h>
51#include <cam/cam_xpt_periph.h>
52#include <cam/cam_periph.h>
53#include <cam/cam_debug.h>
54#include <cam/cam_sim.h>
55
56#include <cam/scsi/scsi_all.h>
57#include <cam/scsi/scsi_message.h>
58#include <cam/scsi/scsi_pass.h>
59
60static	u_int		camperiphnextunit(struct periph_driver *p_drv,
61					  u_int newunit, int wired,
62					  path_id_t pathid, target_id_t target,
63					  lun_id_t lun);
64static	u_int		camperiphunit(struct periph_driver *p_drv,
65				      path_id_t pathid, target_id_t target,
66				      lun_id_t lun);
67static	void		camperiphdone(struct cam_periph *periph,
68					union ccb *done_ccb);
69static  void		camperiphfree(struct cam_periph *periph);
70static int		camperiphscsistatuserror(union ccb *ccb,
71						 cam_flags camflags,
72						 u_int32_t sense_flags,
73						 union ccb *save_ccb,
74						 int *openings,
75						 u_int32_t *relsim_flags,
76						 u_int32_t *timeout);
77static	int		camperiphscsisenseerror(union ccb *ccb,
78					        cam_flags camflags,
79					        u_int32_t sense_flags,
80					        union ccb *save_ccb,
81					        int *openings,
82					        u_int32_t *relsim_flags,
83					        u_int32_t *timeout);
84
85static int nperiph_drivers;
86struct periph_driver **periph_drivers;
87
88MALLOC_DEFINE(M_CAMPERIPH, "CAM periph", "CAM peripheral buffers");
89
90static int periph_selto_delay = 1000;
91TUNABLE_INT("kern.cam.periph_selto_delay", &periph_selto_delay);
92static int periph_noresrc_delay = 500;
93TUNABLE_INT("kern.cam.periph_noresrc_delay", &periph_noresrc_delay);
94static int periph_busy_delay = 500;
95TUNABLE_INT("kern.cam.periph_busy_delay", &periph_busy_delay);
96
97
98void
99periphdriver_register(void *data)
100{
101	struct periph_driver **newdrivers, **old;
102	int ndrivers;
103
104	ndrivers = nperiph_drivers + 2;
105	newdrivers = malloc(sizeof(*newdrivers) * ndrivers, M_TEMP, M_WAITOK);
106	if (periph_drivers)
107		bcopy(periph_drivers, newdrivers,
108		      sizeof(*newdrivers) * nperiph_drivers);
109	newdrivers[nperiph_drivers] = (struct periph_driver *)data;
110	newdrivers[nperiph_drivers + 1] = NULL;
111	old = periph_drivers;
112	periph_drivers = newdrivers;
113	if (old)
114		free(old, M_TEMP);
115	nperiph_drivers++;
116}
117
118cam_status
119cam_periph_alloc(periph_ctor_t *periph_ctor,
120		 periph_oninv_t *periph_oninvalidate,
121		 periph_dtor_t *periph_dtor, periph_start_t *periph_start,
122		 char *name, cam_periph_type type, struct cam_path *path,
123		 ac_callback_t *ac_callback, ac_code code, void *arg)
124{
125	struct		periph_driver **p_drv;
126	struct		cam_sim *sim;
127	struct		cam_periph *periph;
128	struct		cam_periph *cur_periph;
129	path_id_t	path_id;
130	target_id_t	target_id;
131	lun_id_t	lun_id;
132	cam_status	status;
133	u_int		init_level;
134
135	init_level = 0;
136	/*
137	 * Handle Hot-Plug scenarios.  If there is already a peripheral
138	 * of our type assigned to this path, we are likely waiting for
139	 * final close on an old, invalidated, peripheral.  If this is
140	 * the case, queue up a deferred call to the peripheral's async
141	 * handler.  If it looks like a mistaken re-allocation, complain.
142	 */
143	if ((periph = cam_periph_find(path, name)) != NULL) {
144
145		if ((periph->flags & CAM_PERIPH_INVALID) != 0
146		 && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) {
147			periph->flags |= CAM_PERIPH_NEW_DEV_FOUND;
148			periph->deferred_callback = ac_callback;
149			periph->deferred_ac = code;
150			return (CAM_REQ_INPROG);
151		} else {
152			printf("cam_periph_alloc: attempt to re-allocate "
153			       "valid device %s%d rejected\n",
154			       periph->periph_name, periph->unit_number);
155		}
156		return (CAM_REQ_INVALID);
157	}
158
159	periph = (struct cam_periph *)malloc(sizeof(*periph), M_CAMPERIPH,
160					     M_NOWAIT);
161
162	if (periph == NULL)
163		return (CAM_RESRC_UNAVAIL);
164
165	init_level++;
166
167	xpt_lock_buses();
168	for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
169		if (strcmp((*p_drv)->driver_name, name) == 0)
170			break;
171	}
172	xpt_unlock_buses();
173
174	sim = xpt_path_sim(path);
175	path_id = xpt_path_path_id(path);
176	target_id = xpt_path_target_id(path);
177	lun_id = xpt_path_lun_id(path);
178	bzero(periph, sizeof(*periph));
179	cam_init_pinfo(&periph->pinfo);
180	periph->periph_start = periph_start;
181	periph->periph_dtor = periph_dtor;
182	periph->periph_oninval = periph_oninvalidate;
183	periph->type = type;
184	periph->periph_name = name;
185	periph->unit_number = camperiphunit(*p_drv, path_id, target_id, lun_id);
186	periph->immediate_priority = CAM_PRIORITY_NONE;
187	periph->refcount = 0;
188	periph->sim = sim;
189	SLIST_INIT(&periph->ccb_list);
190	status = xpt_create_path(&path, periph, path_id, target_id, lun_id);
191	if (status != CAM_REQ_CMP)
192		goto failure;
193
194	periph->path = path;
195	init_level++;
196
197	status = xpt_add_periph(periph);
198
199	if (status != CAM_REQ_CMP)
200		goto failure;
201
202	cur_periph = TAILQ_FIRST(&(*p_drv)->units);
203	while (cur_periph != NULL
204	    && cur_periph->unit_number < periph->unit_number)
205		cur_periph = TAILQ_NEXT(cur_periph, unit_links);
206
207	if (cur_periph != NULL)
208		TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links);
209	else {
210		TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links);
211		(*p_drv)->generation++;
212	}
213
214	init_level++;
215
216	status = periph_ctor(periph, arg);
217
218	if (status == CAM_REQ_CMP)
219		init_level++;
220
221failure:
222	switch (init_level) {
223	case 4:
224		/* Initialized successfully */
225		break;
226	case 3:
227		TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
228		xpt_remove_periph(periph);
229		/* FALLTHROUGH */
230	case 2:
231		xpt_free_path(periph->path);
232		/* FALLTHROUGH */
233	case 1:
234		free(periph, M_CAMPERIPH);
235		/* FALLTHROUGH */
236	case 0:
237		/* No cleanup to perform. */
238		break;
239	default:
240		panic("cam_periph_alloc: Unkown init level");
241	}
242	return(status);
243}
244
245/*
246 * Find a peripheral structure with the specified path, target, lun,
247 * and (optionally) type.  If the name is NULL, this function will return
248 * the first peripheral driver that matches the specified path.
249 */
250struct cam_periph *
251cam_periph_find(struct cam_path *path, char *name)
252{
253	struct periph_driver **p_drv;
254	struct cam_periph *periph;
255
256	xpt_lock_buses();
257	for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
258
259		if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0))
260			continue;
261
262		TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
263			if (xpt_path_comp(periph->path, path) == 0) {
264				xpt_unlock_buses();
265				return(periph);
266			}
267		}
268		if (name != NULL) {
269			xpt_unlock_buses();
270			return(NULL);
271		}
272	}
273	xpt_unlock_buses();
274	return(NULL);
275}
276
277cam_status
278cam_periph_acquire(struct cam_periph *periph)
279{
280
281	if (periph == NULL)
282		return(CAM_REQ_CMP_ERR);
283
284	xpt_lock_buses();
285	periph->refcount++;
286	xpt_unlock_buses();
287
288	return(CAM_REQ_CMP);
289}
290
291void
292cam_periph_release(struct cam_periph *periph)
293{
294
295	if (periph == NULL)
296		return;
297
298	xpt_lock_buses();
299	if ((--periph->refcount == 0)
300	 && (periph->flags & CAM_PERIPH_INVALID)) {
301		camperiphfree(periph);
302	}
303	xpt_unlock_buses();
304
305}
306
307int
308cam_periph_hold(struct cam_periph *periph, int priority)
309{
310	struct mtx *mtx;
311	int error;
312
313	mtx_assert(periph->sim->mtx, MA_OWNED);
314
315	/*
316	 * Increment the reference count on the peripheral
317	 * while we wait for our lock attempt to succeed
318	 * to ensure the peripheral doesn't disappear out
319	 * from user us while we sleep.
320	 */
321
322	if (cam_periph_acquire(periph) != CAM_REQ_CMP)
323		return (ENXIO);
324
325	mtx = periph->sim->mtx;
326	if (mtx == &Giant)
327		mtx = NULL;
328
329	while ((periph->flags & CAM_PERIPH_LOCKED) != 0) {
330		periph->flags |= CAM_PERIPH_LOCK_WANTED;
331		if ((error = msleep(periph, mtx, priority, "caplck", 0)) != 0) {
332			cam_periph_release(periph);
333			return (error);
334		}
335	}
336
337	periph->flags |= CAM_PERIPH_LOCKED;
338	return (0);
339}
340
341void
342cam_periph_unhold(struct cam_periph *periph)
343{
344
345	mtx_assert(periph->sim->mtx, MA_OWNED);
346
347	periph->flags &= ~CAM_PERIPH_LOCKED;
348	if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) {
349		periph->flags &= ~CAM_PERIPH_LOCK_WANTED;
350		wakeup(periph);
351	}
352
353	cam_periph_release(periph);
354}
355
356/*
357 * Look for the next unit number that is not currently in use for this
358 * peripheral type starting at "newunit".  Also exclude unit numbers that
359 * are reserved by for future "hardwiring" unless we already know that this
360 * is a potential wired device.  Only assume that the device is "wired" the
361 * first time through the loop since after that we'll be looking at unit
362 * numbers that did not match a wiring entry.
363 */
364static u_int
365camperiphnextunit(struct periph_driver *p_drv, u_int newunit, int wired,
366		  path_id_t pathid, target_id_t target, lun_id_t lun)
367{
368	struct	cam_periph *periph;
369	char	*periph_name;
370	int	i, val, dunit, r;
371	const char *dname, *strval;
372
373	periph_name = p_drv->driver_name;
374	for (;;newunit++) {
375
376		for (periph = TAILQ_FIRST(&p_drv->units);
377		     periph != NULL && periph->unit_number != newunit;
378		     periph = TAILQ_NEXT(periph, unit_links))
379			;
380
381		if (periph != NULL && periph->unit_number == newunit) {
382			if (wired != 0) {
383				xpt_print(periph->path, "Duplicate Wired "
384				    "Device entry!\n");
385				xpt_print(periph->path, "Second device (%s "
386				    "device at scbus%d target %d lun %d) will "
387				    "not be wired\n", periph_name, pathid,
388				    target, lun);
389				wired = 0;
390			}
391			continue;
392		}
393		if (wired)
394			break;
395
396		/*
397		 * Don't match entries like "da 4" as a wired down
398		 * device, but do match entries like "da 4 target 5"
399		 * or even "da 4 scbus 1".
400		 */
401		i = 0;
402		dname = periph_name;
403		for (;;) {
404			r = resource_find_dev(&i, dname, &dunit, NULL, NULL);
405			if (r != 0)
406				break;
407			/* if no "target" and no specific scbus, skip */
408			if (resource_int_value(dname, dunit, "target", &val) &&
409			    (resource_string_value(dname, dunit, "at",&strval)||
410			     strcmp(strval, "scbus") == 0))
411				continue;
412			if (newunit == dunit)
413				break;
414		}
415		if (r != 0)
416			break;
417	}
418	return (newunit);
419}
420
421static u_int
422camperiphunit(struct periph_driver *p_drv, path_id_t pathid,
423	      target_id_t target, lun_id_t lun)
424{
425	u_int	unit;
426	int	wired, i, val, dunit;
427	const char *dname, *strval;
428	char	pathbuf[32], *periph_name;
429
430	periph_name = p_drv->driver_name;
431	snprintf(pathbuf, sizeof(pathbuf), "scbus%d", pathid);
432	unit = 0;
433	i = 0;
434	dname = periph_name;
435	for (wired = 0; resource_find_dev(&i, dname, &dunit, NULL, NULL) == 0;
436	     wired = 0) {
437		if (resource_string_value(dname, dunit, "at", &strval) == 0) {
438			if (strcmp(strval, pathbuf) != 0)
439				continue;
440			wired++;
441		}
442		if (resource_int_value(dname, dunit, "target", &val) == 0) {
443			if (val != target)
444				continue;
445			wired++;
446		}
447		if (resource_int_value(dname, dunit, "lun", &val) == 0) {
448			if (val != lun)
449				continue;
450			wired++;
451		}
452		if (wired != 0) {
453			unit = dunit;
454			break;
455		}
456	}
457
458	/*
459	 * Either start from 0 looking for the next unit or from
460	 * the unit number given in the resource config.  This way,
461	 * if we have wildcard matches, we don't return the same
462	 * unit number twice.
463	 */
464	unit = camperiphnextunit(p_drv, unit, wired, pathid, target, lun);
465
466	return (unit);
467}
468
469void
470cam_periph_invalidate(struct cam_periph *periph)
471{
472
473	/*
474	 * We only call this routine the first time a peripheral is
475	 * invalidated.
476	 */
477	if (((periph->flags & CAM_PERIPH_INVALID) == 0)
478	 && (periph->periph_oninval != NULL))
479		periph->periph_oninval(periph);
480
481	periph->flags |= CAM_PERIPH_INVALID;
482	periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND;
483
484	xpt_lock_buses();
485	if (periph->refcount == 0)
486		camperiphfree(periph);
487	else if (periph->refcount < 0)
488		printf("cam_invalidate_periph: refcount < 0!!\n");
489	xpt_unlock_buses();
490}
491
492static void
493camperiphfree(struct cam_periph *periph)
494{
495	struct periph_driver **p_drv;
496
497	for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
498		if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0)
499			break;
500	}
501	if (*p_drv == NULL) {
502		printf("camperiphfree: attempt to free non-existant periph\n");
503		return;
504	}
505
506	TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
507	(*p_drv)->generation++;
508	xpt_unlock_buses();
509
510	if (periph->periph_dtor != NULL)
511		periph->periph_dtor(periph);
512	xpt_remove_periph(periph);
513
514	if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) {
515		union ccb ccb;
516		void *arg;
517
518		switch (periph->deferred_ac) {
519		case AC_FOUND_DEVICE:
520			ccb.ccb_h.func_code = XPT_GDEV_TYPE;
521			xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/ 1);
522			xpt_action(&ccb);
523			arg = &ccb;
524			break;
525		case AC_PATH_REGISTERED:
526			ccb.ccb_h.func_code = XPT_PATH_INQ;
527			xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/ 1);
528			xpt_action(&ccb);
529			arg = &ccb;
530			break;
531		default:
532			arg = NULL;
533			break;
534		}
535		periph->deferred_callback(NULL, periph->deferred_ac,
536					  periph->path, arg);
537	}
538	xpt_free_path(periph->path);
539	free(periph, M_CAMPERIPH);
540	xpt_lock_buses();
541}
542
543/*
544 * Map user virtual pointers into kernel virtual address space, so we can
545 * access the memory.  This won't work on physical pointers, for now it's
546 * up to the caller to check for that.  (XXX KDM -- should we do that here
547 * instead?)  This also only works for up to MAXPHYS memory.  Since we use
548 * buffers to map stuff in and out, we're limited to the buffer size.
549 */
550int
551cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
552{
553	int numbufs, i, j;
554	int flags[CAM_PERIPH_MAXMAPS];
555	u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
556	u_int32_t lengths[CAM_PERIPH_MAXMAPS];
557	u_int32_t dirs[CAM_PERIPH_MAXMAPS];
558
559	switch(ccb->ccb_h.func_code) {
560	case XPT_DEV_MATCH:
561		if (ccb->cdm.match_buf_len == 0) {
562			printf("cam_periph_mapmem: invalid match buffer "
563			       "length 0\n");
564			return(EINVAL);
565		}
566		if (ccb->cdm.pattern_buf_len > 0) {
567			data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
568			lengths[0] = ccb->cdm.pattern_buf_len;
569			dirs[0] = CAM_DIR_OUT;
570			data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
571			lengths[1] = ccb->cdm.match_buf_len;
572			dirs[1] = CAM_DIR_IN;
573			numbufs = 2;
574		} else {
575			data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
576			lengths[0] = ccb->cdm.match_buf_len;
577			dirs[0] = CAM_DIR_IN;
578			numbufs = 1;
579		}
580		break;
581	case XPT_SCSI_IO:
582	case XPT_CONT_TARGET_IO:
583		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
584			return(0);
585
586		data_ptrs[0] = &ccb->csio.data_ptr;
587		lengths[0] = ccb->csio.dxfer_len;
588		dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
589		numbufs = 1;
590		break;
591	default:
592		return(EINVAL);
593		break; /* NOTREACHED */
594	}
595
596	/*
597	 * Check the transfer length and permissions first, so we don't
598	 * have to unmap any previously mapped buffers.
599	 */
600	for (i = 0; i < numbufs; i++) {
601
602		flags[i] = 0;
603
604		/*
605		 * The userland data pointer passed in may not be page
606		 * aligned.  vmapbuf() truncates the address to a page
607		 * boundary, so if the address isn't page aligned, we'll
608		 * need enough space for the given transfer length, plus
609		 * whatever extra space is necessary to make it to the page
610		 * boundary.
611		 */
612		if ((lengths[i] +
613		    (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)) > DFLTPHYS){
614			printf("cam_periph_mapmem: attempt to map %lu bytes, "
615			       "which is greater than DFLTPHYS(%d)\n",
616			       (long)(lengths[i] +
617			       (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)),
618			       DFLTPHYS);
619			return(E2BIG);
620		}
621
622		if (dirs[i] & CAM_DIR_OUT) {
623			flags[i] = BIO_WRITE;
624		}
625
626		if (dirs[i] & CAM_DIR_IN) {
627			flags[i] = BIO_READ;
628		}
629
630	}
631
632	/* this keeps the current process from getting swapped */
633	/*
634	 * XXX KDM should I use P_NOSWAP instead?
635	 */
636	PHOLD(curproc);
637
638	for (i = 0; i < numbufs; i++) {
639		/*
640		 * Get the buffer.
641		 */
642		mapinfo->bp[i] = getpbuf(NULL);
643
644		/* save the buffer's data address */
645		mapinfo->bp[i]->b_saveaddr = mapinfo->bp[i]->b_data;
646
647		/* put our pointer in the data slot */
648		mapinfo->bp[i]->b_data = *data_ptrs[i];
649
650		/* set the transfer length, we know it's < DFLTPHYS */
651		mapinfo->bp[i]->b_bufsize = lengths[i];
652
653		/* set the direction */
654		mapinfo->bp[i]->b_iocmd = flags[i];
655
656		/*
657		 * Map the buffer into kernel memory.
658		 *
659		 * Note that useracc() alone is not a  sufficient test.
660		 * vmapbuf() can still fail due to a smaller file mapped
661		 * into a larger area of VM, or if userland races against
662		 * vmapbuf() after the useracc() check.
663		 */
664		if (vmapbuf(mapinfo->bp[i]) < 0) {
665			for (j = 0; j < i; ++j) {
666				*data_ptrs[j] = mapinfo->bp[j]->b_saveaddr;
667				vunmapbuf(mapinfo->bp[j]);
668				relpbuf(mapinfo->bp[j], NULL);
669			}
670			relpbuf(mapinfo->bp[i], NULL);
671			PRELE(curproc);
672			return(EACCES);
673		}
674
675		/* set our pointer to the new mapped area */
676		*data_ptrs[i] = mapinfo->bp[i]->b_data;
677
678		mapinfo->num_bufs_used++;
679	}
680
681	/*
682	 * Now that we've gotten this far, change ownership to the kernel
683	 * of the buffers so that we don't run afoul of returning to user
684	 * space with locks (on the buffer) held.
685	 */
686	for (i = 0; i < numbufs; i++) {
687		BUF_KERNPROC(mapinfo->bp[i]);
688	}
689
690
691	return(0);
692}
693
694/*
695 * Unmap memory segments mapped into kernel virtual address space by
696 * cam_periph_mapmem().
697 */
698void
699cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
700{
701	int numbufs, i;
702	u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
703
704	if (mapinfo->num_bufs_used <= 0) {
705		/* allow ourselves to be swapped once again */
706		PRELE(curproc);
707		return;
708	}
709
710	switch (ccb->ccb_h.func_code) {
711	case XPT_DEV_MATCH:
712		numbufs = min(mapinfo->num_bufs_used, 2);
713
714		if (numbufs == 1) {
715			data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
716		} else {
717			data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
718			data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
719		}
720		break;
721	case XPT_SCSI_IO:
722	case XPT_CONT_TARGET_IO:
723		data_ptrs[0] = &ccb->csio.data_ptr;
724		numbufs = min(mapinfo->num_bufs_used, 1);
725		break;
726	default:
727		/* allow ourselves to be swapped once again */
728		PRELE(curproc);
729		return;
730		break; /* NOTREACHED */
731	}
732
733	for (i = 0; i < numbufs; i++) {
734		/* Set the user's pointer back to the original value */
735		*data_ptrs[i] = mapinfo->bp[i]->b_saveaddr;
736
737		/* unmap the buffer */
738		vunmapbuf(mapinfo->bp[i]);
739
740		/* release the buffer */
741		relpbuf(mapinfo->bp[i], NULL);
742	}
743
744	/* allow ourselves to be swapped once again */
745	PRELE(curproc);
746}
747
748union ccb *
749cam_periph_getccb(struct cam_periph *periph, u_int32_t priority)
750{
751	struct ccb_hdr *ccb_h;
752	struct mtx *mtx;
753
754	mtx_assert(periph->sim->mtx, MA_OWNED);
755	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdgetccb\n"));
756
757	while (SLIST_FIRST(&periph->ccb_list) == NULL) {
758		if (periph->immediate_priority > priority)
759			periph->immediate_priority = priority;
760		xpt_schedule(periph, priority);
761		if ((SLIST_FIRST(&periph->ccb_list) != NULL)
762		 && (SLIST_FIRST(&periph->ccb_list)->pinfo.priority == priority))
763			break;
764		mtx_assert(periph->sim->mtx, MA_OWNED);
765		if (periph->sim->mtx == &Giant)
766			mtx = NULL;
767		else
768			mtx = periph->sim->mtx;
769		msleep(&periph->ccb_list, mtx, PRIBIO, "cgticb", 0);
770	}
771
772	ccb_h = SLIST_FIRST(&periph->ccb_list);
773	SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle);
774	return ((union ccb *)ccb_h);
775}
776
777void
778cam_periph_ccbwait(union ccb *ccb)
779{
780	struct mtx *mtx;
781	struct cam_sim *sim;
782
783	sim = xpt_path_sim(ccb->ccb_h.path);
784	if (sim->mtx == &Giant)
785		mtx = NULL;
786	else
787		mtx = sim->mtx;
788	if ((ccb->ccb_h.pinfo.index != CAM_UNQUEUED_INDEX)
789	 || ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG))
790		msleep(&ccb->ccb_h.cbfcnp, mtx, PRIBIO, "cbwait", 0);
791}
792
793int
794cam_periph_ioctl(struct cam_periph *periph, int cmd, caddr_t addr,
795		 int (*error_routine)(union ccb *ccb,
796				      cam_flags camflags,
797				      u_int32_t sense_flags))
798{
799	union ccb 	     *ccb;
800	int 		     error;
801	int		     found;
802
803	error = found = 0;
804
805	switch(cmd){
806	case CAMGETPASSTHRU:
807		ccb = cam_periph_getccb(periph, /* priority */ 1);
808		xpt_setup_ccb(&ccb->ccb_h,
809			      ccb->ccb_h.path,
810			      /*priority*/1);
811		ccb->ccb_h.func_code = XPT_GDEVLIST;
812
813		/*
814		 * Basically, the point of this is that we go through
815		 * getting the list of devices, until we find a passthrough
816		 * device.  In the current version of the CAM code, the
817		 * only way to determine what type of device we're dealing
818		 * with is by its name.
819		 */
820		while (found == 0) {
821			ccb->cgdl.index = 0;
822			ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS;
823			while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) {
824
825				/* we want the next device in the list */
826				xpt_action(ccb);
827				if (strncmp(ccb->cgdl.periph_name,
828				    "pass", 4) == 0){
829					found = 1;
830					break;
831				}
832			}
833			if ((ccb->cgdl.status == CAM_GDEVLIST_LAST_DEVICE) &&
834			    (found == 0)) {
835				ccb->cgdl.periph_name[0] = '\0';
836				ccb->cgdl.unit_number = 0;
837				break;
838			}
839		}
840
841		/* copy the result back out */
842		bcopy(ccb, addr, sizeof(union ccb));
843
844		/* and release the ccb */
845		xpt_release_ccb(ccb);
846
847		break;
848	default:
849		error = ENOTTY;
850		break;
851	}
852	return(error);
853}
854
855int
856cam_periph_runccb(union ccb *ccb,
857		  int (*error_routine)(union ccb *ccb,
858				       cam_flags camflags,
859				       u_int32_t sense_flags),
860		  cam_flags camflags, u_int32_t sense_flags,
861		  struct devstat *ds)
862{
863	struct cam_sim *sim;
864	int error;
865
866	error = 0;
867	sim = xpt_path_sim(ccb->ccb_h.path);
868	mtx_assert(sim->mtx, MA_OWNED);
869
870	/*
871	 * If the user has supplied a stats structure, and if we understand
872	 * this particular type of ccb, record the transaction start.
873	 */
874	if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO))
875		devstat_start_transaction(ds, NULL);
876
877	xpt_action(ccb);
878
879	do {
880		cam_periph_ccbwait(ccb);
881		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
882			error = 0;
883		else if (error_routine != NULL)
884			error = (*error_routine)(ccb, camflags, sense_flags);
885		else
886			error = 0;
887
888	} while (error == ERESTART);
889
890	if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
891		cam_release_devq(ccb->ccb_h.path,
892				 /* relsim_flags */0,
893				 /* openings */0,
894				 /* timeout */0,
895				 /* getcount_only */ FALSE);
896
897	if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO))
898		devstat_end_transaction(ds,
899					ccb->csio.dxfer_len,
900					ccb->csio.tag_action & 0xf,
901					((ccb->ccb_h.flags & CAM_DIR_MASK) ==
902					CAM_DIR_NONE) ?  DEVSTAT_NO_DATA :
903					(ccb->ccb_h.flags & CAM_DIR_OUT) ?
904					DEVSTAT_WRITE :
905					DEVSTAT_READ, NULL, NULL);
906
907	return(error);
908}
909
910void
911cam_freeze_devq(struct cam_path *path)
912{
913	struct ccb_hdr ccb_h;
914
915	xpt_setup_ccb(&ccb_h, path, /*priority*/1);
916	ccb_h.func_code = XPT_NOOP;
917	ccb_h.flags = CAM_DEV_QFREEZE;
918	xpt_action((union ccb *)&ccb_h);
919}
920
921u_int32_t
922cam_release_devq(struct cam_path *path, u_int32_t relsim_flags,
923		 u_int32_t openings, u_int32_t timeout,
924		 int getcount_only)
925{
926	struct ccb_relsim crs;
927
928	xpt_setup_ccb(&crs.ccb_h, path,
929		      /*priority*/1);
930	crs.ccb_h.func_code = XPT_REL_SIMQ;
931	crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0;
932	crs.release_flags = relsim_flags;
933	crs.openings = openings;
934	crs.release_timeout = timeout;
935	xpt_action((union ccb *)&crs);
936	return (crs.qfrozen_cnt);
937}
938
939#define saved_ccb_ptr ppriv_ptr0
940static void
941camperiphdone(struct cam_periph *periph, union ccb *done_ccb)
942{
943	union ccb      *saved_ccb;
944	cam_status	status;
945	int		frozen;
946	int		sense;
947	struct scsi_start_stop_unit *scsi_cmd;
948	u_int32_t	relsim_flags, timeout;
949	u_int32_t	qfrozen_cnt;
950	int		xpt_done_ccb;
951
952	xpt_done_ccb = FALSE;
953	status = done_ccb->ccb_h.status;
954	frozen = (status & CAM_DEV_QFRZN) != 0;
955	sense  = (status & CAM_AUTOSNS_VALID) != 0;
956	status &= CAM_STATUS_MASK;
957
958	timeout = 0;
959	relsim_flags = 0;
960	saved_ccb = (union ccb *)done_ccb->ccb_h.saved_ccb_ptr;
961
962	/*
963	 * Unfreeze the queue once if it is already frozen..
964	 */
965	if (frozen != 0) {
966		qfrozen_cnt = cam_release_devq(done_ccb->ccb_h.path,
967					      /*relsim_flags*/0,
968					      /*openings*/0,
969					      /*timeout*/0,
970					      /*getcount_only*/0);
971	}
972
973	switch (status) {
974	case CAM_REQ_CMP:
975	{
976		/*
977		 * If we have successfully taken a device from the not
978		 * ready to ready state, re-scan the device and re-get
979		 * the inquiry information.  Many devices (mostly disks)
980		 * don't properly report their inquiry information unless
981		 * they are spun up.
982		 *
983		 * If we manually retrieved sense into a CCB and got
984		 * something other than "NO SENSE" send the updated CCB
985		 * back to the client via xpt_done() to be processed via
986		 * the error recovery code again.
987		 */
988		if (done_ccb->ccb_h.func_code == XPT_SCSI_IO) {
989			scsi_cmd = (struct scsi_start_stop_unit *)
990					&done_ccb->csio.cdb_io.cdb_bytes;
991
992		 	if (scsi_cmd->opcode == START_STOP_UNIT)
993				xpt_async(AC_INQ_CHANGED,
994					  done_ccb->ccb_h.path, NULL);
995			if (scsi_cmd->opcode == REQUEST_SENSE) {
996				u_int sense_key;
997
998				sense_key = saved_ccb->csio.sense_data.flags;
999				sense_key &= SSD_KEY;
1000				if (sense_key != SSD_KEY_NO_SENSE) {
1001					saved_ccb->ccb_h.status |=
1002					    CAM_AUTOSNS_VALID;
1003#if 0
1004					xpt_print(saved_ccb->ccb_h.path,
1005					    "Recovered Sense\n");
1006					scsi_sense_print(&saved_ccb->csio);
1007					cam_error_print(saved_ccb, CAM_ESF_ALL,
1008							CAM_EPF_ALL);
1009#endif
1010					xpt_done_ccb = TRUE;
1011				}
1012			}
1013		}
1014		bcopy(done_ccb->ccb_h.saved_ccb_ptr, done_ccb,
1015		      sizeof(union ccb));
1016
1017		periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
1018
1019		if (xpt_done_ccb == FALSE)
1020			xpt_action(done_ccb);
1021
1022		break;
1023	}
1024	case CAM_SCSI_STATUS_ERROR:
1025		scsi_cmd = (struct scsi_start_stop_unit *)
1026				&done_ccb->csio.cdb_io.cdb_bytes;
1027		if (sense != 0) {
1028			struct ccb_getdev cgd;
1029			struct scsi_sense_data *sense;
1030			int    error_code, sense_key, asc, ascq;
1031			scsi_sense_action err_action;
1032
1033			sense = &done_ccb->csio.sense_data;
1034			scsi_extract_sense(sense, &error_code,
1035					   &sense_key, &asc, &ascq);
1036
1037			/*
1038			 * Grab the inquiry data for this device.
1039			 */
1040			xpt_setup_ccb(&cgd.ccb_h, done_ccb->ccb_h.path,
1041				      /*priority*/ 1);
1042			cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1043			xpt_action((union ccb *)&cgd);
1044			err_action = scsi_error_action(&done_ccb->csio,
1045						       &cgd.inq_data, 0);
1046
1047			/*
1048	 		 * If the error is "invalid field in CDB",
1049			 * and the load/eject flag is set, turn the
1050			 * flag off and try again.  This is just in
1051			 * case the drive in question barfs on the
1052			 * load eject flag.  The CAM code should set
1053			 * the load/eject flag by default for
1054			 * removable media.
1055			 */
1056
1057			/* XXX KDM
1058			 * Should we check to see what the specific
1059			 * scsi status is??  Or does it not matter
1060			 * since we already know that there was an
1061			 * error, and we know what the specific
1062			 * error code was, and we know what the
1063			 * opcode is..
1064			 */
1065			if ((scsi_cmd->opcode == START_STOP_UNIT) &&
1066			    ((scsi_cmd->how & SSS_LOEJ) != 0) &&
1067			     (asc == 0x24) && (ascq == 0x00) &&
1068			     (done_ccb->ccb_h.retry_count > 0)) {
1069
1070				scsi_cmd->how &= ~SSS_LOEJ;
1071
1072				xpt_action(done_ccb);
1073
1074			} else if ((done_ccb->ccb_h.retry_count > 1)
1075				&& ((err_action & SS_MASK) != SS_FAIL)) {
1076
1077				/*
1078				 * In this case, the error recovery
1079				 * command failed, but we've got
1080				 * some retries left on it.  Give
1081				 * it another try unless this is an
1082				 * unretryable error.
1083				 */
1084
1085				/* set the timeout to .5 sec */
1086				relsim_flags =
1087					RELSIM_RELEASE_AFTER_TIMEOUT;
1088				timeout = 500;
1089
1090				xpt_action(done_ccb);
1091
1092				break;
1093
1094			} else {
1095				/*
1096				 * Perform the final retry with the original
1097				 * CCB so that final error processing is
1098				 * performed by the owner of the CCB.
1099				 */
1100				bcopy(done_ccb->ccb_h.saved_ccb_ptr,
1101				      done_ccb, sizeof(union ccb));
1102
1103				periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
1104
1105				xpt_action(done_ccb);
1106			}
1107		} else {
1108			/*
1109			 * Eh??  The command failed, but we don't
1110			 * have any sense.  What's up with that?
1111			 * Fire the CCB again to return it to the
1112			 * caller.
1113			 */
1114			bcopy(done_ccb->ccb_h.saved_ccb_ptr,
1115			      done_ccb, sizeof(union ccb));
1116
1117			periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
1118
1119			xpt_action(done_ccb);
1120
1121		}
1122		break;
1123	default:
1124		bcopy(done_ccb->ccb_h.saved_ccb_ptr, done_ccb,
1125		      sizeof(union ccb));
1126
1127		periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
1128
1129		xpt_action(done_ccb);
1130
1131		break;
1132	}
1133
1134	/* decrement the retry count */
1135	/*
1136	 * XXX This isn't appropriate in all cases.  Restructure,
1137	 *     so that the retry count is only decremented on an
1138	 *     actual retry.  Remeber that the orignal ccb had its
1139	 *     retry count dropped before entering recovery, so
1140	 *     doing it again is a bug.
1141	 */
1142	if (done_ccb->ccb_h.retry_count > 0)
1143		done_ccb->ccb_h.retry_count--;
1144
1145	qfrozen_cnt = cam_release_devq(done_ccb->ccb_h.path,
1146				      /*relsim_flags*/relsim_flags,
1147				      /*openings*/0,
1148				      /*timeout*/timeout,
1149				      /*getcount_only*/0);
1150	if (xpt_done_ccb == TRUE)
1151		(*done_ccb->ccb_h.cbfcnp)(periph, done_ccb);
1152}
1153
1154/*
1155 * Generic Async Event handler.  Peripheral drivers usually
1156 * filter out the events that require personal attention,
1157 * and leave the rest to this function.
1158 */
1159void
1160cam_periph_async(struct cam_periph *periph, u_int32_t code,
1161		 struct cam_path *path, void *arg)
1162{
1163	switch (code) {
1164	case AC_LOST_DEVICE:
1165		cam_periph_invalidate(periph);
1166		break;
1167	case AC_SENT_BDR:
1168	case AC_BUS_RESET:
1169	{
1170		cam_periph_bus_settle(periph, scsi_delay);
1171		break;
1172	}
1173	default:
1174		break;
1175	}
1176}
1177
1178void
1179cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle)
1180{
1181	struct ccb_getdevstats cgds;
1182
1183	xpt_setup_ccb(&cgds.ccb_h, periph->path, /*priority*/1);
1184	cgds.ccb_h.func_code = XPT_GDEV_STATS;
1185	xpt_action((union ccb *)&cgds);
1186	cam_periph_freeze_after_event(periph, &cgds.last_reset, bus_settle);
1187}
1188
1189void
1190cam_periph_freeze_after_event(struct cam_periph *periph,
1191			      struct timeval* event_time, u_int duration_ms)
1192{
1193	struct timeval delta;
1194	struct timeval duration_tv;
1195
1196	microtime(&delta);
1197	timevalsub(&delta, event_time);
1198	duration_tv.tv_sec = duration_ms / 1000;
1199	duration_tv.tv_usec = (duration_ms % 1000) * 1000;
1200	if (timevalcmp(&delta, &duration_tv, <)) {
1201		timevalsub(&duration_tv, &delta);
1202
1203		duration_ms = duration_tv.tv_sec * 1000;
1204		duration_ms += duration_tv.tv_usec / 1000;
1205		cam_freeze_devq(periph->path);
1206		cam_release_devq(periph->path,
1207				RELSIM_RELEASE_AFTER_TIMEOUT,
1208				/*reduction*/0,
1209				/*timeout*/duration_ms,
1210				/*getcount_only*/0);
1211	}
1212
1213}
1214
1215static int
1216camperiphscsistatuserror(union ccb *ccb, cam_flags camflags,
1217			 u_int32_t sense_flags, union ccb *save_ccb,
1218			 int *openings, u_int32_t *relsim_flags,
1219			 u_int32_t *timeout)
1220{
1221	int error;
1222
1223	switch (ccb->csio.scsi_status) {
1224	case SCSI_STATUS_OK:
1225	case SCSI_STATUS_COND_MET:
1226	case SCSI_STATUS_INTERMED:
1227	case SCSI_STATUS_INTERMED_COND_MET:
1228		error = 0;
1229		break;
1230	case SCSI_STATUS_CMD_TERMINATED:
1231	case SCSI_STATUS_CHECK_COND:
1232		error = camperiphscsisenseerror(ccb,
1233					        camflags,
1234					        sense_flags,
1235					        save_ccb,
1236					        openings,
1237					        relsim_flags,
1238					        timeout);
1239		break;
1240	case SCSI_STATUS_QUEUE_FULL:
1241	{
1242		/* no decrement */
1243		struct ccb_getdevstats cgds;
1244
1245		/*
1246		 * First off, find out what the current
1247		 * transaction counts are.
1248		 */
1249		xpt_setup_ccb(&cgds.ccb_h,
1250			      ccb->ccb_h.path,
1251			      /*priority*/1);
1252		cgds.ccb_h.func_code = XPT_GDEV_STATS;
1253		xpt_action((union ccb *)&cgds);
1254
1255		/*
1256		 * If we were the only transaction active, treat
1257		 * the QUEUE FULL as if it were a BUSY condition.
1258		 */
1259		if (cgds.dev_active != 0) {
1260			int total_openings;
1261
1262			/*
1263		 	 * Reduce the number of openings to
1264			 * be 1 less than the amount it took
1265			 * to get a queue full bounded by the
1266			 * minimum allowed tag count for this
1267			 * device.
1268		 	 */
1269			total_openings = cgds.dev_active + cgds.dev_openings;
1270			*openings = cgds.dev_active;
1271			if (*openings < cgds.mintags)
1272				*openings = cgds.mintags;
1273			if (*openings < total_openings)
1274				*relsim_flags = RELSIM_ADJUST_OPENINGS;
1275			else {
1276				/*
1277				 * Some devices report queue full for
1278				 * temporary resource shortages.  For
1279				 * this reason, we allow a minimum
1280				 * tag count to be entered via a
1281				 * quirk entry to prevent the queue
1282				 * count on these devices from falling
1283				 * to a pessimisticly low value.  We
1284				 * still wait for the next successful
1285				 * completion, however, before queueing
1286				 * more transactions to the device.
1287				 */
1288				*relsim_flags = RELSIM_RELEASE_AFTER_CMDCMPLT;
1289			}
1290			*timeout = 0;
1291			error = ERESTART;
1292			if (bootverbose) {
1293				xpt_print(ccb->ccb_h.path, "Queue Full\n");
1294			}
1295			break;
1296		}
1297		/* FALLTHROUGH */
1298	}
1299	case SCSI_STATUS_BUSY:
1300		/*
1301		 * Restart the queue after either another
1302		 * command completes or a 1 second timeout.
1303		 */
1304		if (bootverbose) {
1305			xpt_print(ccb->ccb_h.path, "Device Busy\n");
1306		}
1307	 	if (ccb->ccb_h.retry_count > 0) {
1308	 		ccb->ccb_h.retry_count--;
1309			error = ERESTART;
1310			*relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT
1311				      | RELSIM_RELEASE_AFTER_CMDCMPLT;
1312			*timeout = 1000;
1313		} else {
1314			error = EIO;
1315		}
1316		break;
1317	case SCSI_STATUS_RESERV_CONFLICT:
1318		xpt_print(ccb->ccb_h.path, "Reservation Conflict\n");
1319		error = EIO;
1320		break;
1321	default:
1322		xpt_print(ccb->ccb_h.path, "SCSI Status 0x%x\n",
1323		    ccb->csio.scsi_status);
1324		error = EIO;
1325		break;
1326	}
1327	return (error);
1328}
1329
1330static int
1331camperiphscsisenseerror(union ccb *ccb, cam_flags camflags,
1332			u_int32_t sense_flags, union ccb *save_ccb,
1333		       int *openings, u_int32_t *relsim_flags,
1334		       u_int32_t *timeout)
1335{
1336	struct cam_periph *periph;
1337	int error;
1338
1339	periph = xpt_path_periph(ccb->ccb_h.path);
1340	if (periph->flags & CAM_PERIPH_RECOVERY_INPROG) {
1341
1342		/*
1343		 * If error recovery is already in progress, don't attempt
1344		 * to process this error, but requeue it unconditionally
1345		 * and attempt to process it once error recovery has
1346		 * completed.  This failed command is probably related to
1347		 * the error that caused the currently active error recovery
1348		 * action so our  current recovery efforts should also
1349		 * address this command.  Be aware that the error recovery
1350		 * code assumes that only one recovery action is in progress
1351		 * on a particular peripheral instance at any given time
1352		 * (e.g. only one saved CCB for error recovery) so it is
1353		 * imperitive that we don't violate this assumption.
1354		 */
1355		error = ERESTART;
1356	} else {
1357		scsi_sense_action err_action;
1358		struct ccb_getdev cgd;
1359		const char *action_string;
1360		union ccb* print_ccb;
1361
1362		/* A description of the error recovery action performed */
1363		action_string = NULL;
1364
1365		/*
1366		 * The location of the orignal ccb
1367		 * for sense printing purposes.
1368		 */
1369		print_ccb = ccb;
1370
1371		/*
1372		 * Grab the inquiry data for this device.
1373		 */
1374		xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path, /*priority*/ 1);
1375		cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1376		xpt_action((union ccb *)&cgd);
1377
1378		if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0)
1379			err_action = scsi_error_action(&ccb->csio,
1380						       &cgd.inq_data,
1381						       sense_flags);
1382		else if ((ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0)
1383			err_action = SS_REQSENSE;
1384		else
1385			err_action = SS_RETRY|SSQ_DECREMENT_COUNT|EIO;
1386
1387		error = err_action & SS_ERRMASK;
1388
1389		/*
1390		 * If the recovery action will consume a retry,
1391		 * make sure we actually have retries available.
1392		 */
1393		if ((err_action & SSQ_DECREMENT_COUNT) != 0) {
1394		 	if (ccb->ccb_h.retry_count > 0)
1395		 		ccb->ccb_h.retry_count--;
1396			else {
1397				action_string = "Retries Exhausted";
1398				goto sense_error_done;
1399			}
1400		}
1401
1402		if ((err_action & SS_MASK) >= SS_START) {
1403			/*
1404			 * Do common portions of commands that
1405			 * use recovery CCBs.
1406			 */
1407			if (save_ccb == NULL) {
1408				action_string = "No recovery CCB supplied";
1409				goto sense_error_done;
1410			}
1411			bcopy(ccb, save_ccb, sizeof(*save_ccb));
1412			print_ccb = save_ccb;
1413			periph->flags |= CAM_PERIPH_RECOVERY_INPROG;
1414		}
1415
1416		switch (err_action & SS_MASK) {
1417		case SS_NOP:
1418			action_string = "No Recovery Action Needed";
1419			error = 0;
1420			break;
1421		case SS_RETRY:
1422			action_string = "Retrying Command (per Sense Data)";
1423			error = ERESTART;
1424			break;
1425		case SS_FAIL:
1426			action_string = "Unretryable error";
1427			break;
1428		case SS_START:
1429		{
1430			int le;
1431
1432			/*
1433			 * Send a start unit command to the device, and
1434			 * then retry the command.
1435			 */
1436			action_string = "Attempting to Start Unit";
1437
1438			/*
1439			 * Check for removable media and set
1440			 * load/eject flag appropriately.
1441			 */
1442			if (SID_IS_REMOVABLE(&cgd.inq_data))
1443				le = TRUE;
1444			else
1445				le = FALSE;
1446
1447			scsi_start_stop(&ccb->csio,
1448					/*retries*/1,
1449					camperiphdone,
1450					MSG_SIMPLE_Q_TAG,
1451					/*start*/TRUE,
1452					/*load/eject*/le,
1453					/*immediate*/FALSE,
1454					SSD_FULL_SIZE,
1455					/*timeout*/50000);
1456			break;
1457		}
1458		case SS_TUR:
1459		{
1460			/*
1461			 * Send a Test Unit Ready to the device.
1462			 * If the 'many' flag is set, we send 120
1463			 * test unit ready commands, one every half
1464			 * second.  Otherwise, we just send one TUR.
1465			 * We only want to do this if the retry
1466			 * count has not been exhausted.
1467			 */
1468			int retries;
1469
1470			if ((err_action & SSQ_MANY) != 0) {
1471				action_string = "Polling device for readiness";
1472				retries = 120;
1473			} else {
1474				action_string = "Testing device for readiness";
1475				retries = 1;
1476			}
1477			scsi_test_unit_ready(&ccb->csio,
1478					     retries,
1479					     camperiphdone,
1480					     MSG_SIMPLE_Q_TAG,
1481					     SSD_FULL_SIZE,
1482					     /*timeout*/5000);
1483
1484			/*
1485			 * Accomplish our 500ms delay by deferring
1486			 * the release of our device queue appropriately.
1487			 */
1488			*relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1489			*timeout = 500;
1490			break;
1491		}
1492		case SS_REQSENSE:
1493		{
1494			/*
1495			 * Send a Request Sense to the device.  We
1496			 * assume that we are in a contingent allegiance
1497			 * condition so we do not tag this request.
1498			 */
1499			scsi_request_sense(&ccb->csio, /*retries*/1,
1500					   camperiphdone,
1501					   &save_ccb->csio.sense_data,
1502					   sizeof(save_ccb->csio.sense_data),
1503					   CAM_TAG_ACTION_NONE,
1504					   /*sense_len*/SSD_FULL_SIZE,
1505					   /*timeout*/5000);
1506			break;
1507		}
1508		default:
1509			panic("Unhandled error action %x", err_action);
1510		}
1511
1512		if ((err_action & SS_MASK) >= SS_START) {
1513			/*
1514			 * Drop the priority to 0 so that the recovery
1515			 * CCB is the first to execute.  Freeze the queue
1516			 * after this command is sent so that we can
1517			 * restore the old csio and have it queued in
1518			 * the proper order before we release normal
1519			 * transactions to the device.
1520			 */
1521			ccb->ccb_h.pinfo.priority = 0;
1522			ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1523			ccb->ccb_h.saved_ccb_ptr = save_ccb;
1524			error = ERESTART;
1525		}
1526
1527sense_error_done:
1528		if ((err_action & SSQ_PRINT_SENSE) != 0
1529		 && (ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0) {
1530			cam_error_print(print_ccb, CAM_ESF_ALL, CAM_EPF_ALL);
1531			xpt_print_path(ccb->ccb_h.path);
1532			if (bootverbose)
1533				scsi_sense_print(&print_ccb->csio);
1534			printf("%s\n", action_string);
1535		}
1536	}
1537	return (error);
1538}
1539
1540/*
1541 * Generic error handler.  Peripheral drivers usually filter
1542 * out the errors that they handle in a unique mannor, then
1543 * call this function.
1544 */
1545int
1546cam_periph_error(union ccb *ccb, cam_flags camflags,
1547		 u_int32_t sense_flags, union ccb *save_ccb)
1548{
1549	const char *action_string;
1550	cam_status  status;
1551	int	    frozen;
1552	int	    error, printed = 0;
1553	int         openings;
1554	u_int32_t   relsim_flags;
1555	u_int32_t   timeout = 0;
1556
1557	action_string = NULL;
1558	status = ccb->ccb_h.status;
1559	frozen = (status & CAM_DEV_QFRZN) != 0;
1560	status &= CAM_STATUS_MASK;
1561	openings = relsim_flags = 0;
1562
1563	switch (status) {
1564	case CAM_REQ_CMP:
1565		error = 0;
1566		break;
1567	case CAM_SCSI_STATUS_ERROR:
1568		error = camperiphscsistatuserror(ccb,
1569						 camflags,
1570						 sense_flags,
1571						 save_ccb,
1572						 &openings,
1573						 &relsim_flags,
1574						 &timeout);
1575		break;
1576	case CAM_AUTOSENSE_FAIL:
1577		xpt_print(ccb->ccb_h.path, "AutoSense Failed\n");
1578		error = EIO;	/* we have to kill the command */
1579		break;
1580	case CAM_REQ_CMP_ERR:
1581		if (bootverbose && printed == 0) {
1582			xpt_print(ccb->ccb_h.path,
1583			    "Request completed with CAM_REQ_CMP_ERR\n");
1584			printed++;
1585		}
1586		/* FALLTHROUGH */
1587	case CAM_CMD_TIMEOUT:
1588		if (bootverbose && printed == 0) {
1589			xpt_print(ccb->ccb_h.path, "Command timed out\n");
1590			printed++;
1591		}
1592		/* FALLTHROUGH */
1593	case CAM_UNEXP_BUSFREE:
1594		if (bootverbose && printed == 0) {
1595			xpt_print(ccb->ccb_h.path, "Unexpected Bus Free\n");
1596			printed++;
1597		}
1598		/* FALLTHROUGH */
1599	case CAM_UNCOR_PARITY:
1600		if (bootverbose && printed == 0) {
1601			xpt_print(ccb->ccb_h.path,
1602			    "Uncorrected Parity Error\n");
1603			printed++;
1604		}
1605		/* FALLTHROUGH */
1606	case CAM_DATA_RUN_ERR:
1607		if (bootverbose && printed == 0) {
1608			xpt_print(ccb->ccb_h.path, "Data Overrun\n");
1609			printed++;
1610		}
1611		error = EIO;	/* we have to kill the command */
1612		/* decrement the number of retries */
1613		if (ccb->ccb_h.retry_count > 0) {
1614			ccb->ccb_h.retry_count--;
1615			error = ERESTART;
1616		} else {
1617			action_string = "Retries Exausted";
1618			error = EIO;
1619		}
1620		break;
1621	case CAM_UA_ABORT:
1622	case CAM_UA_TERMIO:
1623	case CAM_MSG_REJECT_REC:
1624		/* XXX Don't know that these are correct */
1625		error = EIO;
1626		break;
1627	case CAM_SEL_TIMEOUT:
1628	{
1629		struct cam_path *newpath;
1630
1631		if ((camflags & CAM_RETRY_SELTO) != 0) {
1632			if (ccb->ccb_h.retry_count > 0) {
1633
1634				ccb->ccb_h.retry_count--;
1635				error = ERESTART;
1636				if (bootverbose && printed == 0) {
1637					xpt_print(ccb->ccb_h.path,
1638					    "Selection Timeout\n");
1639					printed++;
1640				}
1641
1642				/*
1643				 * Wait a bit to give the device
1644				 * time to recover before we try again.
1645				 */
1646				relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1647				timeout = periph_selto_delay;
1648				break;
1649			}
1650		}
1651		error = ENXIO;
1652		/* Should we do more if we can't create the path?? */
1653		if (xpt_create_path(&newpath, xpt_path_periph(ccb->ccb_h.path),
1654				    xpt_path_path_id(ccb->ccb_h.path),
1655				    xpt_path_target_id(ccb->ccb_h.path),
1656				    CAM_LUN_WILDCARD) != CAM_REQ_CMP)
1657			break;
1658
1659		/*
1660		 * Let peripheral drivers know that this device has gone
1661		 * away.
1662		 */
1663		xpt_async(AC_LOST_DEVICE, newpath, NULL);
1664		xpt_free_path(newpath);
1665		break;
1666	}
1667	case CAM_REQ_INVALID:
1668	case CAM_PATH_INVALID:
1669	case CAM_DEV_NOT_THERE:
1670	case CAM_NO_HBA:
1671	case CAM_PROVIDE_FAIL:
1672	case CAM_REQ_TOO_BIG:
1673	case CAM_LUN_INVALID:
1674	case CAM_TID_INVALID:
1675		error = EINVAL;
1676		break;
1677	case CAM_SCSI_BUS_RESET:
1678	case CAM_BDR_SENT:
1679		/*
1680		 * Commands that repeatedly timeout and cause these
1681		 * kinds of error recovery actions, should return
1682		 * CAM_CMD_TIMEOUT, which allows us to safely assume
1683		 * that this command was an innocent bystander to
1684		 * these events and should be unconditionally
1685		 * retried.
1686		 */
1687		if (bootverbose && printed == 0) {
1688			xpt_print_path(ccb->ccb_h.path);
1689			if (status == CAM_BDR_SENT)
1690				printf("Bus Device Reset sent\n");
1691			else
1692				printf("Bus Reset issued\n");
1693			printed++;
1694		}
1695		/* FALLTHROUGH */
1696	case CAM_REQUEUE_REQ:
1697		/* Unconditional requeue */
1698		error = ERESTART;
1699		if (bootverbose && printed == 0) {
1700			xpt_print(ccb->ccb_h.path, "Request Requeued\n");
1701			printed++;
1702		}
1703		break;
1704	case CAM_RESRC_UNAVAIL:
1705		/* Wait a bit for the resource shortage to abate. */
1706		timeout = periph_noresrc_delay;
1707		/* FALLTHROUGH */
1708	case CAM_BUSY:
1709		if (timeout == 0) {
1710			/* Wait a bit for the busy condition to abate. */
1711			timeout = periph_busy_delay;
1712		}
1713		relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1714		/* FALLTHROUGH */
1715	default:
1716		/* decrement the number of retries */
1717		if (ccb->ccb_h.retry_count > 0) {
1718			ccb->ccb_h.retry_count--;
1719			error = ERESTART;
1720			if (bootverbose && printed == 0) {
1721				xpt_print(ccb->ccb_h.path, "CAM Status 0x%x\n",
1722				    status);
1723				printed++;
1724			}
1725		} else {
1726			error = EIO;
1727			action_string = "Retries Exhausted";
1728		}
1729		break;
1730	}
1731
1732	/* Attempt a retry */
1733	if (error == ERESTART || error == 0) {
1734		if (frozen != 0)
1735			ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1736
1737		if (error == ERESTART) {
1738			action_string = "Retrying Command";
1739			xpt_action(ccb);
1740		}
1741
1742		if (frozen != 0)
1743			cam_release_devq(ccb->ccb_h.path,
1744					 relsim_flags,
1745					 openings,
1746					 timeout,
1747					 /*getcount_only*/0);
1748	}
1749
1750	/*
1751	 * If we have and error and are booting verbosely, whine
1752	 * *unless* this was a non-retryable selection timeout.
1753	 */
1754	if (error != 0 && bootverbose &&
1755	    !(status == CAM_SEL_TIMEOUT && (camflags & CAM_RETRY_SELTO) == 0)) {
1756
1757
1758		if (action_string == NULL)
1759			action_string = "Unretryable Error";
1760		if (error != ERESTART) {
1761			xpt_print(ccb->ccb_h.path, "error %d\n", error);
1762		}
1763		xpt_print(ccb->ccb_h.path, "%s\n", action_string);
1764	}
1765
1766	return (error);
1767}
1768