Deleted Added
full compact
1/*-
2 * Common functions for CAM "type" (peripheral) drivers.
3 *
4 * Copyright (c) 1997, 1998 Justin T. Gibbs.
5 * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: head/sys/cam/cam_periph.c 224496 2011-07-29 20:30:28Z mav $");
31__FBSDID("$FreeBSD: head/sys/cam/cam_periph.c 225950 2011-10-03 20:32:55Z ken $");
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/types.h>
36#include <sys/malloc.h>
37#include <sys/kernel.h>
38#include <sys/bio.h>
39#include <sys/lock.h>
40#include <sys/mutex.h>
41#include <sys/buf.h>
42#include <sys/proc.h>
43#include <sys/devicestat.h>
44#include <sys/bus.h>
45#include <sys/sbuf.h>
46#include <vm/vm.h>
47#include <vm/vm_extern.h>
48
49#include <cam/cam.h>
50#include <cam/cam_ccb.h>
51#include <cam/cam_queue.h>
52#include <cam/cam_xpt_periph.h>
53#include <cam/cam_periph.h>
54#include <cam/cam_debug.h>
55#include <cam/cam_sim.h>
56
57#include <cam/scsi/scsi_all.h>
58#include <cam/scsi/scsi_message.h>
59#include <cam/scsi/scsi_pass.h>
60
61static u_int camperiphnextunit(struct periph_driver *p_drv,
62 u_int newunit, int wired,
63 path_id_t pathid, target_id_t target,
64 lun_id_t lun);
65static u_int camperiphunit(struct periph_driver *p_drv,
66 path_id_t pathid, target_id_t target,
67 lun_id_t lun);
68static void camperiphdone(struct cam_periph *periph,
69 union ccb *done_ccb);
70static void camperiphfree(struct cam_periph *periph);
71static int camperiphscsistatuserror(union ccb *ccb,
72 cam_flags camflags,
73 u_int32_t sense_flags,
74 int *openings,
75 u_int32_t *relsim_flags,
76 u_int32_t *timeout,
77 const char **action_string);
78static int camperiphscsisenseerror(union ccb *ccb,
79 cam_flags camflags,
80 u_int32_t sense_flags,
81 int *openings,
82 u_int32_t *relsim_flags,
83 u_int32_t *timeout,
84 const char **action_string);
85
86static int nperiph_drivers;
87static int initialized = 0;
88struct periph_driver **periph_drivers;
89
90MALLOC_DEFINE(M_CAMPERIPH, "CAM periph", "CAM peripheral buffers");
91
92static int periph_selto_delay = 1000;
93TUNABLE_INT("kern.cam.periph_selto_delay", &periph_selto_delay);
94static int periph_noresrc_delay = 500;
95TUNABLE_INT("kern.cam.periph_noresrc_delay", &periph_noresrc_delay);
96static int periph_busy_delay = 500;
97TUNABLE_INT("kern.cam.periph_busy_delay", &periph_busy_delay);
98
99
100void
101periphdriver_register(void *data)
102{
103 struct periph_driver *drv = (struct periph_driver *)data;
104 struct periph_driver **newdrivers, **old;
105 int ndrivers;
106
107 ndrivers = nperiph_drivers + 2;
108 newdrivers = malloc(sizeof(*newdrivers) * ndrivers, M_CAMPERIPH,
109 M_WAITOK);
110 if (periph_drivers)
111 bcopy(periph_drivers, newdrivers,
112 sizeof(*newdrivers) * nperiph_drivers);
113 newdrivers[nperiph_drivers] = drv;
114 newdrivers[nperiph_drivers + 1] = NULL;
115 old = periph_drivers;
116 periph_drivers = newdrivers;
117 if (old)
118 free(old, M_CAMPERIPH);
119 nperiph_drivers++;
120 /* If driver marked as early or it is late now, initialize it. */
121 if (((drv->flags & CAM_PERIPH_DRV_EARLY) != 0 && initialized > 0) ||
122 initialized > 1)
123 (*drv->init)();
124}
125
126void
127periphdriver_init(int level)
128{
129 int i, early;
130
131 initialized = max(initialized, level);
132 for (i = 0; periph_drivers[i] != NULL; i++) {
133 early = (periph_drivers[i]->flags & CAM_PERIPH_DRV_EARLY) ? 1 : 2;
134 if (early == initialized)
135 (*periph_drivers[i]->init)();
136 }
137}
138
139cam_status
140cam_periph_alloc(periph_ctor_t *periph_ctor,
141 periph_oninv_t *periph_oninvalidate,
142 periph_dtor_t *periph_dtor, periph_start_t *periph_start,
143 char *name, cam_periph_type type, struct cam_path *path,
144 ac_callback_t *ac_callback, ac_code code, void *arg)
145{
146 struct periph_driver **p_drv;
147 struct cam_sim *sim;
148 struct cam_periph *periph;
149 struct cam_periph *cur_periph;
150 path_id_t path_id;
151 target_id_t target_id;
152 lun_id_t lun_id;
153 cam_status status;
154 u_int init_level;
155
156 init_level = 0;
157 /*
158 * Handle Hot-Plug scenarios. If there is already a peripheral
159 * of our type assigned to this path, we are likely waiting for
160 * final close on an old, invalidated, peripheral. If this is
161 * the case, queue up a deferred call to the peripheral's async
162 * handler. If it looks like a mistaken re-allocation, complain.
163 */
164 if ((periph = cam_periph_find(path, name)) != NULL) {
165
166 if ((periph->flags & CAM_PERIPH_INVALID) != 0
167 && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) {
168 periph->flags |= CAM_PERIPH_NEW_DEV_FOUND;
169 periph->deferred_callback = ac_callback;
170 periph->deferred_ac = code;
171 return (CAM_REQ_INPROG);
172 } else {
173 printf("cam_periph_alloc: attempt to re-allocate "
174 "valid device %s%d rejected\n",
175 periph->periph_name, periph->unit_number);
176 }
177 return (CAM_REQ_INVALID);
178 }
179
180 periph = (struct cam_periph *)malloc(sizeof(*periph), M_CAMPERIPH,
181 M_NOWAIT);
182
183 if (periph == NULL)
184 return (CAM_RESRC_UNAVAIL);
185
186 init_level++;
187
188
189 sim = xpt_path_sim(path);
190 path_id = xpt_path_path_id(path);
191 target_id = xpt_path_target_id(path);
192 lun_id = xpt_path_lun_id(path);
193 bzero(periph, sizeof(*periph));
194 cam_init_pinfo(&periph->pinfo);
195 periph->periph_start = periph_start;
196 periph->periph_dtor = periph_dtor;
197 periph->periph_oninval = periph_oninvalidate;
198 periph->type = type;
199 periph->periph_name = name;
200 periph->immediate_priority = CAM_PRIORITY_NONE;
201 periph->refcount = 0;
202 periph->sim = sim;
203 SLIST_INIT(&periph->ccb_list);
204 status = xpt_create_path(&path, periph, path_id, target_id, lun_id);
205 if (status != CAM_REQ_CMP)
206 goto failure;
207 periph->path = path;
208
209 xpt_lock_buses();
210 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
211 if (strcmp((*p_drv)->driver_name, name) == 0)
212 break;
213 }
214 if (*p_drv == NULL) {
215 printf("cam_periph_alloc: invalid periph name '%s'\n", name);
216 xpt_free_path(periph->path);
217 free(periph, M_CAMPERIPH);
218 xpt_unlock_buses();
219 return (CAM_REQ_INVALID);
220 }
221 periph->unit_number = camperiphunit(*p_drv, path_id, target_id, lun_id);
222 cur_periph = TAILQ_FIRST(&(*p_drv)->units);
223 while (cur_periph != NULL
224 && cur_periph->unit_number < periph->unit_number)
225 cur_periph = TAILQ_NEXT(cur_periph, unit_links);
226 if (cur_periph != NULL) {
227 KASSERT(cur_periph->unit_number != periph->unit_number, ("duplicate units on periph list"));
228 TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links);
229 } else {
230 TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links);
231 (*p_drv)->generation++;
232 }
233 xpt_unlock_buses();
234
235 init_level++;
236
237 status = xpt_add_periph(periph);
238 if (status != CAM_REQ_CMP)
239 goto failure;
240
241 init_level++;
242
243 status = periph_ctor(periph, arg);
244
245 if (status == CAM_REQ_CMP)
246 init_level++;
247
248failure:
249 switch (init_level) {
250 case 4:
251 /* Initialized successfully */
252 break;
253 case 3:
254 xpt_remove_periph(periph);
255 /* FALLTHROUGH */
256 case 2:
257 xpt_lock_buses();
258 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
259 xpt_unlock_buses();
260 xpt_free_path(periph->path);
261 /* FALLTHROUGH */
262 case 1:
263 free(periph, M_CAMPERIPH);
264 /* FALLTHROUGH */
265 case 0:
266 /* No cleanup to perform. */
267 break;
268 default:
269 panic("cam_periph_alloc: Unkown init level");
270 }
271 return(status);
272}
273
274/*
275 * Find a peripheral structure with the specified path, target, lun,
276 * and (optionally) type. If the name is NULL, this function will return
277 * the first peripheral driver that matches the specified path.
278 */
279struct cam_periph *
280cam_periph_find(struct cam_path *path, char *name)
281{
282 struct periph_driver **p_drv;
283 struct cam_periph *periph;
284
285 xpt_lock_buses();
286 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
287
288 if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0))
289 continue;
290
291 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
292 if (xpt_path_comp(periph->path, path) == 0) {
293 xpt_unlock_buses();
294 mtx_assert(periph->sim->mtx, MA_OWNED);
295 return(periph);
296 }
297 }
298 if (name != NULL) {
299 xpt_unlock_buses();
300 return(NULL);
301 }
302 }
303 xpt_unlock_buses();
304 return(NULL);
305}
306
307/*
308 * Find a peripheral structure with the specified path, target, lun,
309 * and (optionally) type. If the name is NULL, this function will return
310 * the first peripheral driver that matches the specified path.
311 */
312int
313cam_periph_list(struct cam_path *path, struct sbuf *sb)
314{
315 struct periph_driver **p_drv;
316 struct cam_periph *periph;
317 int count;
318
319 count = 0;
320 xpt_lock_buses();
321 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
322
323 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
324 if (xpt_path_comp(periph->path, path) != 0)
325 continue;
326
327 if (sbuf_len(sb) != 0)
328 sbuf_cat(sb, ",");
329
330 sbuf_printf(sb, "%s%d", periph->periph_name,
331 periph->unit_number);
332 count++;
333 }
334 }
335 xpt_unlock_buses();
336 return (count);
337}
338
339cam_status
340cam_periph_acquire(struct cam_periph *periph)
341{
342
343 if (periph == NULL)
344 return(CAM_REQ_CMP_ERR);
345
346 xpt_lock_buses();
347 periph->refcount++;
348 xpt_unlock_buses();
349
350 return(CAM_REQ_CMP);
351}
352
353void
354cam_periph_release_locked(struct cam_periph *periph)
355{
356
357 if (periph == NULL)
358 return;
359
360 xpt_lock_buses();
361 if (periph->refcount != 0) {
362 periph->refcount--;
363 } else {
364 xpt_print(periph->path, "%s: release %p when refcount is zero\n ", __func__, periph);
365 }
366 if (periph->refcount == 0
367 && (periph->flags & CAM_PERIPH_INVALID)) {
368 camperiphfree(periph);
369 }
370 xpt_unlock_buses();
371}
372
373void
374cam_periph_release(struct cam_periph *periph)
375{
376 struct cam_sim *sim;
377
378 if (periph == NULL)
379 return;
380
381 sim = periph->sim;
382 mtx_assert(sim->mtx, MA_NOTOWNED);
383 mtx_lock(sim->mtx);
384 cam_periph_release_locked(periph);
385 mtx_unlock(sim->mtx);
386}
387
388int
389cam_periph_hold(struct cam_periph *periph, int priority)
390{
391 int error;
392
393 /*
394 * Increment the reference count on the peripheral
395 * while we wait for our lock attempt to succeed
396 * to ensure the peripheral doesn't disappear out
397 * from user us while we sleep.
398 */
399
400 if (cam_periph_acquire(periph) != CAM_REQ_CMP)
401 return (ENXIO);
402
403 mtx_assert(periph->sim->mtx, MA_OWNED);
404 while ((periph->flags & CAM_PERIPH_LOCKED) != 0) {
405 periph->flags |= CAM_PERIPH_LOCK_WANTED;
406 if ((error = mtx_sleep(periph, periph->sim->mtx, priority,
407 "caplck", 0)) != 0) {
408 cam_periph_release_locked(periph);
409 return (error);
410 }
411 }
412
413 periph->flags |= CAM_PERIPH_LOCKED;
414 return (0);
415}
416
417void
418cam_periph_unhold(struct cam_periph *periph)
419{
420
421 mtx_assert(periph->sim->mtx, MA_OWNED);
422
423 periph->flags &= ~CAM_PERIPH_LOCKED;
424 if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) {
425 periph->flags &= ~CAM_PERIPH_LOCK_WANTED;
426 wakeup(periph);
427 }
428
429 cam_periph_release_locked(periph);
430}
431
432/*
433 * Look for the next unit number that is not currently in use for this
434 * peripheral type starting at "newunit". Also exclude unit numbers that
435 * are reserved by for future "hardwiring" unless we already know that this
436 * is a potential wired device. Only assume that the device is "wired" the
437 * first time through the loop since after that we'll be looking at unit
438 * numbers that did not match a wiring entry.
439 */
440static u_int
441camperiphnextunit(struct periph_driver *p_drv, u_int newunit, int wired,
442 path_id_t pathid, target_id_t target, lun_id_t lun)
443{
444 struct cam_periph *periph;
445 char *periph_name;
446 int i, val, dunit, r;
447 const char *dname, *strval;
448
449 periph_name = p_drv->driver_name;
450 for (;;newunit++) {
451
452 for (periph = TAILQ_FIRST(&p_drv->units);
453 periph != NULL && periph->unit_number != newunit;
454 periph = TAILQ_NEXT(periph, unit_links))
455 ;
456
457 if (periph != NULL && periph->unit_number == newunit) {
458 if (wired != 0) {
459 xpt_print(periph->path, "Duplicate Wired "
460 "Device entry!\n");
461 xpt_print(periph->path, "Second device (%s "
462 "device at scbus%d target %d lun %d) will "
463 "not be wired\n", periph_name, pathid,
464 target, lun);
465 wired = 0;
466 }
467 continue;
468 }
469 if (wired)
470 break;
471
472 /*
473 * Don't match entries like "da 4" as a wired down
474 * device, but do match entries like "da 4 target 5"
475 * or even "da 4 scbus 1".
476 */
477 i = 0;
478 dname = periph_name;
479 for (;;) {
480 r = resource_find_dev(&i, dname, &dunit, NULL, NULL);
481 if (r != 0)
482 break;
483 /* if no "target" and no specific scbus, skip */
484 if (resource_int_value(dname, dunit, "target", &val) &&
485 (resource_string_value(dname, dunit, "at",&strval)||
486 strcmp(strval, "scbus") == 0))
487 continue;
488 if (newunit == dunit)
489 break;
490 }
491 if (r != 0)
492 break;
493 }
494 return (newunit);
495}
496
497static u_int
498camperiphunit(struct periph_driver *p_drv, path_id_t pathid,
499 target_id_t target, lun_id_t lun)
500{
501 u_int unit;
502 int wired, i, val, dunit;
503 const char *dname, *strval;
504 char pathbuf[32], *periph_name;
505
506 periph_name = p_drv->driver_name;
507 snprintf(pathbuf, sizeof(pathbuf), "scbus%d", pathid);
508 unit = 0;
509 i = 0;
510 dname = periph_name;
511 for (wired = 0; resource_find_dev(&i, dname, &dunit, NULL, NULL) == 0;
512 wired = 0) {
513 if (resource_string_value(dname, dunit, "at", &strval) == 0) {
514 if (strcmp(strval, pathbuf) != 0)
515 continue;
516 wired++;
517 }
518 if (resource_int_value(dname, dunit, "target", &val) == 0) {
519 if (val != target)
520 continue;
521 wired++;
522 }
523 if (resource_int_value(dname, dunit, "lun", &val) == 0) {
524 if (val != lun)
525 continue;
526 wired++;
527 }
528 if (wired != 0) {
529 unit = dunit;
530 break;
531 }
532 }
533
534 /*
535 * Either start from 0 looking for the next unit or from
536 * the unit number given in the resource config. This way,
537 * if we have wildcard matches, we don't return the same
538 * unit number twice.
539 */
540 unit = camperiphnextunit(p_drv, unit, wired, pathid, target, lun);
541
542 return (unit);
543}
544
545void
546cam_periph_invalidate(struct cam_periph *periph)
547{
548
549 /*
550 * We only call this routine the first time a peripheral is
551 * invalidated.
552 */
553 if (((periph->flags & CAM_PERIPH_INVALID) == 0)
554 && (periph->periph_oninval != NULL))
555 periph->periph_oninval(periph);
556
557 periph->flags |= CAM_PERIPH_INVALID;
558 periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND;
559
560 xpt_lock_buses();
561 if (periph->refcount == 0)
562 camperiphfree(periph);
563 xpt_unlock_buses();
564}
565
566static void
567camperiphfree(struct cam_periph *periph)
568{
569 struct periph_driver **p_drv;
570
571 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
572 if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0)
573 break;
574 }
575 if (*p_drv == NULL) {
576 printf("camperiphfree: attempt to free non-existant periph\n");
577 return;
578 }
579
580 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
581 (*p_drv)->generation++;
582 xpt_unlock_buses();
583
584 if (periph->periph_dtor != NULL)
585 periph->periph_dtor(periph);
586 xpt_remove_periph(periph);
587
588 if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) {
589 union ccb ccb;
590 void *arg;
591
592 switch (periph->deferred_ac) {
593 case AC_FOUND_DEVICE:
594 ccb.ccb_h.func_code = XPT_GDEV_TYPE;
595 xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
596 xpt_action(&ccb);
597 arg = &ccb;
598 break;
599 case AC_PATH_REGISTERED:
600 ccb.ccb_h.func_code = XPT_PATH_INQ;
601 xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
602 xpt_action(&ccb);
603 arg = &ccb;
604 break;
605 default:
606 arg = NULL;
607 break;
608 }
609 periph->deferred_callback(NULL, periph->deferred_ac,
610 periph->path, arg);
611 }
612 xpt_free_path(periph->path);
613 free(periph, M_CAMPERIPH);
614 xpt_lock_buses();
615}
616
617/*
618 * Map user virtual pointers into kernel virtual address space, so we can
619 * access the memory. This won't work on physical pointers, for now it's
620 * up to the caller to check for that. (XXX KDM -- should we do that here
621 * instead?) This also only works for up to MAXPHYS memory. Since we use
622 * buffers to map stuff in and out, we're limited to the buffer size.
623 */
624int
625cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
626{
627 int numbufs, i, j;
628 int flags[CAM_PERIPH_MAXMAPS];
629 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
630 u_int32_t lengths[CAM_PERIPH_MAXMAPS];
631 u_int32_t dirs[CAM_PERIPH_MAXMAPS];
632 /* Some controllers may not be able to handle more data. */
633 size_t maxmap = DFLTPHYS;
634
635 switch(ccb->ccb_h.func_code) {
636 case XPT_DEV_MATCH:
637 if (ccb->cdm.match_buf_len == 0) {
638 printf("cam_periph_mapmem: invalid match buffer "
639 "length 0\n");
640 return(EINVAL);
641 }
642 if (ccb->cdm.pattern_buf_len > 0) {
643 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
644 lengths[0] = ccb->cdm.pattern_buf_len;
645 dirs[0] = CAM_DIR_OUT;
646 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
647 lengths[1] = ccb->cdm.match_buf_len;
648 dirs[1] = CAM_DIR_IN;
649 numbufs = 2;
650 } else {
651 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
652 lengths[0] = ccb->cdm.match_buf_len;
653 dirs[0] = CAM_DIR_IN;
654 numbufs = 1;
655 }
656 /*
657 * This request will not go to the hardware, no reason
658 * to be so strict. vmapbuf() is able to map up to MAXPHYS.
659 */
660 maxmap = MAXPHYS;
661 break;
662 case XPT_SCSI_IO:
663 case XPT_CONT_TARGET_IO:
664 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
665 return(0);
666
667 data_ptrs[0] = &ccb->csio.data_ptr;
668 lengths[0] = ccb->csio.dxfer_len;
669 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
670 numbufs = 1;
671 break;
672 case XPT_ATA_IO:
673 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
674 return(0);
675
676 data_ptrs[0] = &ccb->ataio.data_ptr;
677 lengths[0] = ccb->ataio.dxfer_len;
678 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
679 numbufs = 1;
680 break;
681 case XPT_SMP_IO:
682 data_ptrs[0] = &ccb->smpio.smp_request;
683 lengths[0] = ccb->smpio.smp_request_len;
684 dirs[0] = CAM_DIR_OUT;
685 data_ptrs[1] = &ccb->smpio.smp_response;
686 lengths[1] = ccb->smpio.smp_response_len;
687 dirs[1] = CAM_DIR_IN;
688 numbufs = 2;
689 break;
690 case XPT_DEV_ADVINFO:
691 if (ccb->cdai.bufsiz == 0)
692 return (0);
693
694 data_ptrs[0] = (uint8_t **)&ccb->cdai.buf;
695 lengths[0] = ccb->cdai.bufsiz;
696 dirs[0] = CAM_DIR_IN;
697 numbufs = 1;
698
699 /*
700 * This request will not go to the hardware, no reason
701 * to be so strict. vmapbuf() is able to map up to MAXPHYS.
702 */
703 maxmap = MAXPHYS;
704 break;
705 default:
706 return(EINVAL);
707 break; /* NOTREACHED */
708 }
709
710 /*
711 * Check the transfer length and permissions first, so we don't
712 * have to unmap any previously mapped buffers.
713 */
714 for (i = 0; i < numbufs; i++) {
715
716 flags[i] = 0;
717
718 /*
719 * The userland data pointer passed in may not be page
720 * aligned. vmapbuf() truncates the address to a page
721 * boundary, so if the address isn't page aligned, we'll
722 * need enough space for the given transfer length, plus
723 * whatever extra space is necessary to make it to the page
724 * boundary.
725 */
726 if ((lengths[i] +
727 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)) > maxmap){
728 printf("cam_periph_mapmem: attempt to map %lu bytes, "
729 "which is greater than %lu\n",
730 (long)(lengths[i] +
731 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)),
732 (u_long)maxmap);
733 return(E2BIG);
734 }
735
736 if (dirs[i] & CAM_DIR_OUT) {
737 flags[i] = BIO_WRITE;
738 }
739
740 if (dirs[i] & CAM_DIR_IN) {
741 flags[i] = BIO_READ;
742 }
743
744 }
745
746 /* this keeps the current process from getting swapped */
747 /*
748 * XXX KDM should I use P_NOSWAP instead?
749 */
750 PHOLD(curproc);
751
752 for (i = 0; i < numbufs; i++) {
753 /*
754 * Get the buffer.
755 */
756 mapinfo->bp[i] = getpbuf(NULL);
757
758 /* save the buffer's data address */
759 mapinfo->bp[i]->b_saveaddr = mapinfo->bp[i]->b_data;
760
761 /* put our pointer in the data slot */
762 mapinfo->bp[i]->b_data = *data_ptrs[i];
763
764 /* set the transfer length, we know it's < MAXPHYS */
765 mapinfo->bp[i]->b_bufsize = lengths[i];
766
767 /* set the direction */
768 mapinfo->bp[i]->b_iocmd = flags[i];
769
770 /*
771 * Map the buffer into kernel memory.
772 *
773 * Note that useracc() alone is not a sufficient test.
774 * vmapbuf() can still fail due to a smaller file mapped
775 * into a larger area of VM, or if userland races against
776 * vmapbuf() after the useracc() check.
777 */
778 if (vmapbuf(mapinfo->bp[i]) < 0) {
779 for (j = 0; j < i; ++j) {
780 *data_ptrs[j] = mapinfo->bp[j]->b_saveaddr;
781 vunmapbuf(mapinfo->bp[j]);
782 relpbuf(mapinfo->bp[j], NULL);
783 }
784 relpbuf(mapinfo->bp[i], NULL);
785 PRELE(curproc);
786 return(EACCES);
787 }
788
789 /* set our pointer to the new mapped area */
790 *data_ptrs[i] = mapinfo->bp[i]->b_data;
791
792 mapinfo->num_bufs_used++;
793 }
794
795 /*
796 * Now that we've gotten this far, change ownership to the kernel
797 * of the buffers so that we don't run afoul of returning to user
798 * space with locks (on the buffer) held.
799 */
800 for (i = 0; i < numbufs; i++) {
801 BUF_KERNPROC(mapinfo->bp[i]);
802 }
803
804
805 return(0);
806}
807
808/*
809 * Unmap memory segments mapped into kernel virtual address space by
810 * cam_periph_mapmem().
811 */
812void
813cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
814{
815 int numbufs, i;
816 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
817
818 if (mapinfo->num_bufs_used <= 0) {
819 /* allow ourselves to be swapped once again */
820 PRELE(curproc);
821 return;
822 }
823
824 switch (ccb->ccb_h.func_code) {
825 case XPT_DEV_MATCH:
826 numbufs = min(mapinfo->num_bufs_used, 2);
827
828 if (numbufs == 1) {
829 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
830 } else {
831 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
832 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
833 }
834 break;
835 case XPT_SCSI_IO:
836 case XPT_CONT_TARGET_IO:
837 data_ptrs[0] = &ccb->csio.data_ptr;
838 numbufs = min(mapinfo->num_bufs_used, 1);
839 break;
840 case XPT_ATA_IO:
841 data_ptrs[0] = &ccb->ataio.data_ptr;
842 numbufs = min(mapinfo->num_bufs_used, 1);
843 break;
844 case XPT_SMP_IO:
845 numbufs = min(mapinfo->num_bufs_used, 2);
846 data_ptrs[0] = &ccb->smpio.smp_request;
847 data_ptrs[1] = &ccb->smpio.smp_response;
848 break;
849 case XPT_DEV_ADVINFO:
850 numbufs = min(mapinfo->num_bufs_used, 1);
851 data_ptrs[0] = (uint8_t **)&ccb->cdai.buf;
852 break;
853 default:
854 /* allow ourselves to be swapped once again */
855 PRELE(curproc);
856 return;
857 break; /* NOTREACHED */
858 }
859
860 for (i = 0; i < numbufs; i++) {
861 /* Set the user's pointer back to the original value */
862 *data_ptrs[i] = mapinfo->bp[i]->b_saveaddr;
863
864 /* unmap the buffer */
865 vunmapbuf(mapinfo->bp[i]);
866
867 /* release the buffer */
868 relpbuf(mapinfo->bp[i], NULL);
869 }
870
871 /* allow ourselves to be swapped once again */
872 PRELE(curproc);
873}
874
875union ccb *
876cam_periph_getccb(struct cam_periph *periph, u_int32_t priority)
877{
878 struct ccb_hdr *ccb_h;
879
880 mtx_assert(periph->sim->mtx, MA_OWNED);
881 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdgetccb\n"));
882
883 while (SLIST_FIRST(&periph->ccb_list) == NULL) {
884 if (periph->immediate_priority > priority)
885 periph->immediate_priority = priority;
886 xpt_schedule(periph, priority);
887 if ((SLIST_FIRST(&periph->ccb_list) != NULL)
888 && (SLIST_FIRST(&periph->ccb_list)->pinfo.priority == priority))
889 break;
890 mtx_assert(periph->sim->mtx, MA_OWNED);
891 mtx_sleep(&periph->ccb_list, periph->sim->mtx, PRIBIO, "cgticb",
892 0);
893 }
894
895 ccb_h = SLIST_FIRST(&periph->ccb_list);
896 SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle);
897 return ((union ccb *)ccb_h);
898}
899
900void
901cam_periph_ccbwait(union ccb *ccb)
902{
903 struct cam_sim *sim;
904
905 sim = xpt_path_sim(ccb->ccb_h.path);
906 if ((ccb->ccb_h.pinfo.index != CAM_UNQUEUED_INDEX)
907 || ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG))
908 mtx_sleep(&ccb->ccb_h.cbfcnp, sim->mtx, PRIBIO, "cbwait", 0);
909}
910
911int
912cam_periph_ioctl(struct cam_periph *periph, u_long cmd, caddr_t addr,
913 int (*error_routine)(union ccb *ccb,
914 cam_flags camflags,
915 u_int32_t sense_flags))
916{
917 union ccb *ccb;
918 int error;
919 int found;
920
921 error = found = 0;
922
923 switch(cmd){
924 case CAMGETPASSTHRU:
925 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
926 xpt_setup_ccb(&ccb->ccb_h,
927 ccb->ccb_h.path,
928 CAM_PRIORITY_NORMAL);
929 ccb->ccb_h.func_code = XPT_GDEVLIST;
930
931 /*
932 * Basically, the point of this is that we go through
933 * getting the list of devices, until we find a passthrough
934 * device. In the current version of the CAM code, the
935 * only way to determine what type of device we're dealing
936 * with is by its name.
937 */
938 while (found == 0) {
939 ccb->cgdl.index = 0;
940 ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS;
941 while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) {
942
943 /* we want the next device in the list */
944 xpt_action(ccb);
945 if (strncmp(ccb->cgdl.periph_name,
946 "pass", 4) == 0){
947 found = 1;
948 break;
949 }
950 }
951 if ((ccb->cgdl.status == CAM_GDEVLIST_LAST_DEVICE) &&
952 (found == 0)) {
953 ccb->cgdl.periph_name[0] = '\0';
954 ccb->cgdl.unit_number = 0;
955 break;
956 }
957 }
958
959 /* copy the result back out */
960 bcopy(ccb, addr, sizeof(union ccb));
961
962 /* and release the ccb */
963 xpt_release_ccb(ccb);
964
965 break;
966 default:
967 error = ENOTTY;
968 break;
969 }
970 return(error);
971}
972
973int
974cam_periph_runccb(union ccb *ccb,
975 int (*error_routine)(union ccb *ccb,
976 cam_flags camflags,
977 u_int32_t sense_flags),
978 cam_flags camflags, u_int32_t sense_flags,
979 struct devstat *ds)
980{
981 struct cam_sim *sim;
982 int error;
983
984 error = 0;
985 sim = xpt_path_sim(ccb->ccb_h.path);
986 mtx_assert(sim->mtx, MA_OWNED);
987
988 /*
989 * If the user has supplied a stats structure, and if we understand
990 * this particular type of ccb, record the transaction start.
991 */
992 if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO ||
993 ccb->ccb_h.func_code == XPT_ATA_IO))
994 devstat_start_transaction(ds, NULL);
995
996 xpt_action(ccb);
997
998 do {
999 cam_periph_ccbwait(ccb);
1000 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
1001 error = 0;
1002 else if (error_routine != NULL)
1003 error = (*error_routine)(ccb, camflags, sense_flags);
1004 else
1005 error = 0;
1006
1007 } while (error == ERESTART);
1008
1009 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1010 cam_release_devq(ccb->ccb_h.path,
1011 /* relsim_flags */0,
1012 /* openings */0,
1013 /* timeout */0,
1014 /* getcount_only */ FALSE);
1015 ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1016 }
1017
1018 if (ds != NULL) {
1019 if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
1020 devstat_end_transaction(ds,
1021 ccb->csio.dxfer_len,
1022 ccb->csio.tag_action & 0x3,
1023 ((ccb->ccb_h.flags & CAM_DIR_MASK) ==
1024 CAM_DIR_NONE) ? DEVSTAT_NO_DATA :
1025 (ccb->ccb_h.flags & CAM_DIR_OUT) ?
1026 DEVSTAT_WRITE :
1027 DEVSTAT_READ, NULL, NULL);
1028 } else if (ccb->ccb_h.func_code == XPT_ATA_IO) {
1029 devstat_end_transaction(ds,
1030 ccb->ataio.dxfer_len,
1031 ccb->ataio.tag_action & 0x3,
1032 ((ccb->ccb_h.flags & CAM_DIR_MASK) ==
1033 CAM_DIR_NONE) ? DEVSTAT_NO_DATA :
1034 (ccb->ccb_h.flags & CAM_DIR_OUT) ?
1035 DEVSTAT_WRITE :
1036 DEVSTAT_READ, NULL, NULL);
1037 }
1038 }
1039
1040 return(error);
1041}
1042
1043void
1044cam_freeze_devq(struct cam_path *path)
1045{
1046
1047 cam_freeze_devq_arg(path, 0, 0);
1048}
1049
1050void
1051cam_freeze_devq_arg(struct cam_path *path, uint32_t flags, uint32_t arg)
1052{
1053 struct ccb_relsim crs;
1054
1055 xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NONE);
1056 crs.ccb_h.func_code = XPT_FREEZE_QUEUE;
1057 crs.release_flags = flags;
1058 crs.openings = arg;
1059 crs.release_timeout = arg;
1060 xpt_action((union ccb *)&crs);
1061}
1062
1063u_int32_t
1064cam_release_devq(struct cam_path *path, u_int32_t relsim_flags,
1065 u_int32_t openings, u_int32_t arg,
1066 int getcount_only)
1067{
1068 struct ccb_relsim crs;
1069
1070 xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
1071 crs.ccb_h.func_code = XPT_REL_SIMQ;
1072 crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0;
1073 crs.release_flags = relsim_flags;
1074 crs.openings = openings;
1075 crs.release_timeout = arg;
1076 xpt_action((union ccb *)&crs);
1077 return (crs.qfrozen_cnt);
1078}
1079
1080#define saved_ccb_ptr ppriv_ptr0
1081#define recovery_depth ppriv_field1
1082static void
1083camperiphsensedone(struct cam_periph *periph, union ccb *done_ccb)
1084{
1085 union ccb *saved_ccb = (union ccb *)done_ccb->ccb_h.saved_ccb_ptr;
1086 cam_status status;
1087 int frozen = 0;
1088 u_int sense_key;
1088 int depth = done_ccb->ccb_h.recovery_depth;
1089
1090 status = done_ccb->ccb_h.status;
1091 if (status & CAM_DEV_QFRZN) {
1092 frozen = 1;
1093 /*
1094 * Clear freeze flag now for case of retry,
1095 * freeze will be dropped later.
1096 */
1097 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1098 }
1099 status &= CAM_STATUS_MASK;
1100 switch (status) {
1101 case CAM_REQ_CMP:
1102 {
1103 int error_code, sense_key, asc, ascq;
1104
1105 scsi_extract_sense_len(&saved_ccb->csio.sense_data,
1106 saved_ccb->csio.sense_len -
1107 saved_ccb->csio.sense_resid,
1108 &error_code, &sense_key, &asc, &ascq,
1109 /*show_errors*/ 1);
1110 /*
1111 * If we manually retrieved sense into a CCB and got
1112 * something other than "NO SENSE" send the updated CCB
1113 * back to the client via xpt_done() to be processed via
1114 * the error recovery code again.
1115 */
1110 sense_key = saved_ccb->csio.sense_data.flags;
1111 sense_key &= SSD_KEY;
1112 if (sense_key != SSD_KEY_NO_SENSE) {
1113 saved_ccb->ccb_h.status |=
1114 CAM_AUTOSNS_VALID;
1116 if ((sense_key != -1)
1117 && (sense_key != SSD_KEY_NO_SENSE)) {
1118 saved_ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1119 } else {
1116 saved_ccb->ccb_h.status &=
1117 ~CAM_STATUS_MASK;
1118 saved_ccb->ccb_h.status |=
1119 CAM_AUTOSENSE_FAIL;
1120 saved_ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1121 saved_ccb->ccb_h.status |= CAM_AUTOSENSE_FAIL;
1122 }
1123 saved_ccb->csio.sense_resid = done_ccb->csio.resid;
1124 bcopy(saved_ccb, done_ccb, sizeof(union ccb));
1125 xpt_free_ccb(saved_ccb);
1126 break;
1127 }
1128 default:
1129 bcopy(saved_ccb, done_ccb, sizeof(union ccb));
1130 xpt_free_ccb(saved_ccb);
1131 done_ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1132 done_ccb->ccb_h.status |= CAM_AUTOSENSE_FAIL;
1133 break;
1134 }
1135 periph->flags &= ~CAM_PERIPH_SENSE_INPROG;
1136 /*
1137 * If it is the end of recovery, drop freeze, taken due to
1138 * CAM_DEV_QFREEZE flag, set on recovery request.
1139 */
1140 if (depth == 0) {
1141 cam_release_devq(done_ccb->ccb_h.path,
1142 /*relsim_flags*/0,
1143 /*openings*/0,
1144 /*timeout*/0,
1145 /*getcount_only*/0);
1146 }
1147 /*
1148 * Copy frozen flag from recovery request if it is set there
1149 * for some reason.
1150 */
1151 if (frozen != 0)
1152 done_ccb->ccb_h.status |= CAM_DEV_QFRZN;
1153 (*done_ccb->ccb_h.cbfcnp)(periph, done_ccb);
1154}
1155
1156static void
1157camperiphdone(struct cam_periph *periph, union ccb *done_ccb)
1158{
1159 union ccb *saved_ccb, *save_ccb;
1160 cam_status status;
1161 int frozen = 0;
1162 struct scsi_start_stop_unit *scsi_cmd;
1163 u_int32_t relsim_flags, timeout;
1164
1165 status = done_ccb->ccb_h.status;
1166 if (status & CAM_DEV_QFRZN) {
1167 frozen = 1;
1168 /*
1169 * Clear freeze flag now for case of retry,
1170 * freeze will be dropped later.
1171 */
1172 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1173 }
1174
1175 timeout = 0;
1176 relsim_flags = 0;
1177 saved_ccb = (union ccb *)done_ccb->ccb_h.saved_ccb_ptr;
1178
1179 switch (status & CAM_STATUS_MASK) {
1180 case CAM_REQ_CMP:
1181 {
1182 /*
1183 * If we have successfully taken a device from the not
1184 * ready to ready state, re-scan the device and re-get
1185 * the inquiry information. Many devices (mostly disks)
1186 * don't properly report their inquiry information unless
1187 * they are spun up.
1188 */
1189 scsi_cmd = (struct scsi_start_stop_unit *)
1190 &done_ccb->csio.cdb_io.cdb_bytes;
1191
1192 if (scsi_cmd->opcode == START_STOP_UNIT)
1193 xpt_async(AC_INQ_CHANGED,
1194 done_ccb->ccb_h.path, NULL);
1195 goto final;
1196 }
1197 case CAM_SCSI_STATUS_ERROR:
1198 scsi_cmd = (struct scsi_start_stop_unit *)
1199 &done_ccb->csio.cdb_io.cdb_bytes;
1200 if (status & CAM_AUTOSNS_VALID) {
1201 struct ccb_getdev cgd;
1202 struct scsi_sense_data *sense;
1201 int error_code, sense_key, asc, ascq;
1203 int error_code, sense_key, asc, ascq, sense_len;
1204 scsi_sense_action err_action;
1205
1206 sense = &done_ccb->csio.sense_data;
1205 scsi_extract_sense(sense, &error_code,
1206 &sense_key, &asc, &ascq);
1207 sense_len = done_ccb->csio.sense_len -
1208 done_ccb->csio.sense_resid;
1209 scsi_extract_sense_len(sense, sense_len, &error_code,
1210 &sense_key, &asc, &ascq,
1211 /*show_errors*/ 1);
1212 /*
1213 * Grab the inquiry data for this device.
1214 */
1215 xpt_setup_ccb(&cgd.ccb_h, done_ccb->ccb_h.path,
1216 CAM_PRIORITY_NORMAL);
1217 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1218 xpt_action((union ccb *)&cgd);
1219 err_action = scsi_error_action(&done_ccb->csio,
1220 &cgd.inq_data, 0);
1221 /*
1222 * If the error is "invalid field in CDB",
1223 * and the load/eject flag is set, turn the
1224 * flag off and try again. This is just in
1225 * case the drive in question barfs on the
1226 * load eject flag. The CAM code should set
1227 * the load/eject flag by default for
1228 * removable media.
1229 */
1230 /* XXX KDM
1231 * Should we check to see what the specific
1232 * scsi status is?? Or does it not matter
1233 * since we already know that there was an
1234 * error, and we know what the specific
1235 * error code was, and we know what the
1236 * opcode is..
1237 */
1238 if ((scsi_cmd->opcode == START_STOP_UNIT) &&
1239 ((scsi_cmd->how & SSS_LOEJ) != 0) &&
1240 (asc == 0x24) && (ascq == 0x00) &&
1241 (done_ccb->ccb_h.retry_count > 0)) {
1242
1243 scsi_cmd->how &= ~SSS_LOEJ;
1244 xpt_action(done_ccb);
1245 } else if ((done_ccb->ccb_h.retry_count > 1)
1246 && ((err_action & SS_MASK) != SS_FAIL)) {
1247
1248 /*
1249 * In this case, the error recovery
1250 * command failed, but we've got
1251 * some retries left on it. Give
1252 * it another try unless this is an
1253 * unretryable error.
1254 */
1255 /* set the timeout to .5 sec */
1256 relsim_flags =
1257 RELSIM_RELEASE_AFTER_TIMEOUT;
1258 timeout = 500;
1259 xpt_action(done_ccb);
1260 break;
1261 } else {
1262 /*
1263 * Perform the final retry with the original
1264 * CCB so that final error processing is
1265 * performed by the owner of the CCB.
1266 */
1267 goto final;
1268 }
1269 } else {
1270 save_ccb = xpt_alloc_ccb_nowait();
1271 if (save_ccb == NULL)
1272 goto final;
1273 bcopy(done_ccb, save_ccb, sizeof(*save_ccb));
1274 periph->flags |= CAM_PERIPH_SENSE_INPROG;
1275 /*
1276 * Send a Request Sense to the device. We
1277 * assume that we are in a contingent allegiance
1278 * condition so we do not tag this request.
1279 */
1280 scsi_request_sense(&done_ccb->csio, /*retries*/1,
1281 camperiphsensedone,
1282 &save_ccb->csio.sense_data,
1283 save_ccb->csio.sense_len,
1284 CAM_TAG_ACTION_NONE,
1285 /*sense_len*/SSD_FULL_SIZE,
1286 /*timeout*/5000);
1287 done_ccb->ccb_h.pinfo.priority--;
1288 done_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1289 done_ccb->ccb_h.saved_ccb_ptr = save_ccb;
1290 done_ccb->ccb_h.recovery_depth++;
1291 xpt_action(done_ccb);
1292 }
1293 break;
1294 default:
1295final:
1296 bcopy(saved_ccb, done_ccb, sizeof(*done_ccb));
1297 xpt_free_ccb(saved_ccb);
1298 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
1299 xpt_action(done_ccb);
1300 break;
1301 }
1302
1303 /* decrement the retry count */
1304 /*
1305 * XXX This isn't appropriate in all cases. Restructure,
1306 * so that the retry count is only decremented on an
1307 * actual retry. Remeber that the orignal ccb had its
1308 * retry count dropped before entering recovery, so
1309 * doing it again is a bug.
1310 */
1311 if (done_ccb->ccb_h.retry_count > 0)
1312 done_ccb->ccb_h.retry_count--;
1313 /*
1314 * Drop freeze taken due to CAM_DEV_QFREEZE flag set on recovery
1315 * request.
1316 */
1317 cam_release_devq(done_ccb->ccb_h.path,
1318 /*relsim_flags*/relsim_flags,
1319 /*openings*/0,
1320 /*timeout*/timeout,
1321 /*getcount_only*/0);
1322 /* Drop freeze taken, if this recovery request got error. */
1323 if (frozen != 0) {
1324 cam_release_devq(done_ccb->ccb_h.path,
1325 /*relsim_flags*/0,
1326 /*openings*/0,
1327 /*timeout*/0,
1328 /*getcount_only*/0);
1329 }
1330}
1331
1332/*
1333 * Generic Async Event handler. Peripheral drivers usually
1334 * filter out the events that require personal attention,
1335 * and leave the rest to this function.
1336 */
1337void
1338cam_periph_async(struct cam_periph *periph, u_int32_t code,
1339 struct cam_path *path, void *arg)
1340{
1341 switch (code) {
1342 case AC_LOST_DEVICE:
1343 cam_periph_invalidate(periph);
1344 break;
1345 default:
1346 break;
1347 }
1348}
1349
1350void
1351cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle)
1352{
1353 struct ccb_getdevstats cgds;
1354
1355 xpt_setup_ccb(&cgds.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1356 cgds.ccb_h.func_code = XPT_GDEV_STATS;
1357 xpt_action((union ccb *)&cgds);
1358 cam_periph_freeze_after_event(periph, &cgds.last_reset, bus_settle);
1359}
1360
1361void
1362cam_periph_freeze_after_event(struct cam_periph *periph,
1363 struct timeval* event_time, u_int duration_ms)
1364{
1365 struct timeval delta;
1366 struct timeval duration_tv;
1367
1368 microtime(&delta);
1369 timevalsub(&delta, event_time);
1370 duration_tv.tv_sec = duration_ms / 1000;
1371 duration_tv.tv_usec = (duration_ms % 1000) * 1000;
1372 if (timevalcmp(&delta, &duration_tv, <)) {
1373 timevalsub(&duration_tv, &delta);
1374
1375 duration_ms = duration_tv.tv_sec * 1000;
1376 duration_ms += duration_tv.tv_usec / 1000;
1377 cam_freeze_devq(periph->path);
1378 cam_release_devq(periph->path,
1379 RELSIM_RELEASE_AFTER_TIMEOUT,
1380 /*reduction*/0,
1381 /*timeout*/duration_ms,
1382 /*getcount_only*/0);
1383 }
1384
1385}
1386
1387static int
1388camperiphscsistatuserror(union ccb *ccb, cam_flags camflags,
1389 u_int32_t sense_flags,
1390 int *openings, u_int32_t *relsim_flags,
1391 u_int32_t *timeout, const char **action_string)
1392{
1393 int error;
1394
1395 switch (ccb->csio.scsi_status) {
1396 case SCSI_STATUS_OK:
1397 case SCSI_STATUS_COND_MET:
1398 case SCSI_STATUS_INTERMED:
1399 case SCSI_STATUS_INTERMED_COND_MET:
1400 error = 0;
1401 break;
1402 case SCSI_STATUS_CMD_TERMINATED:
1403 case SCSI_STATUS_CHECK_COND:
1404 if (bootverbose)
1405 xpt_print(ccb->ccb_h.path, "SCSI status error\n");
1406 error = camperiphscsisenseerror(ccb,
1407 camflags,
1408 sense_flags,
1409 openings,
1410 relsim_flags,
1411 timeout,
1412 action_string);
1413 break;
1414 case SCSI_STATUS_QUEUE_FULL:
1415 {
1416 /* no decrement */
1417 struct ccb_getdevstats cgds;
1418
1419 /*
1420 * First off, find out what the current
1421 * transaction counts are.
1422 */
1423 xpt_setup_ccb(&cgds.ccb_h,
1424 ccb->ccb_h.path,
1425 CAM_PRIORITY_NORMAL);
1426 cgds.ccb_h.func_code = XPT_GDEV_STATS;
1427 xpt_action((union ccb *)&cgds);
1428
1429 /*
1430 * If we were the only transaction active, treat
1431 * the QUEUE FULL as if it were a BUSY condition.
1432 */
1433 if (cgds.dev_active != 0) {
1434 int total_openings;
1435
1436 /*
1437 * Reduce the number of openings to
1438 * be 1 less than the amount it took
1439 * to get a queue full bounded by the
1440 * minimum allowed tag count for this
1441 * device.
1442 */
1443 total_openings = cgds.dev_active + cgds.dev_openings;
1444 *openings = cgds.dev_active;
1445 if (*openings < cgds.mintags)
1446 *openings = cgds.mintags;
1447 if (*openings < total_openings)
1448 *relsim_flags = RELSIM_ADJUST_OPENINGS;
1449 else {
1450 /*
1451 * Some devices report queue full for
1452 * temporary resource shortages. For
1453 * this reason, we allow a minimum
1454 * tag count to be entered via a
1455 * quirk entry to prevent the queue
1456 * count on these devices from falling
1457 * to a pessimisticly low value. We
1458 * still wait for the next successful
1459 * completion, however, before queueing
1460 * more transactions to the device.
1461 */
1462 *relsim_flags = RELSIM_RELEASE_AFTER_CMDCMPLT;
1463 }
1464 *timeout = 0;
1465 error = ERESTART;
1466 if (bootverbose) {
1467 xpt_print(ccb->ccb_h.path, "Queue full\n");
1468 }
1469 break;
1470 }
1471 /* FALLTHROUGH */
1472 }
1473 case SCSI_STATUS_BUSY:
1474 /*
1475 * Restart the queue after either another
1476 * command completes or a 1 second timeout.
1477 */
1478 if (bootverbose) {
1479 xpt_print(ccb->ccb_h.path, "Device busy\n");
1480 }
1481 if (ccb->ccb_h.retry_count > 0) {
1482 ccb->ccb_h.retry_count--;
1483 error = ERESTART;
1484 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT
1485 | RELSIM_RELEASE_AFTER_CMDCMPLT;
1486 *timeout = 1000;
1487 } else {
1488 error = EIO;
1489 }
1490 break;
1491 case SCSI_STATUS_RESERV_CONFLICT:
1492 xpt_print(ccb->ccb_h.path, "Reservation conflict\n");
1493 error = EIO;
1494 break;
1495 default:
1496 xpt_print(ccb->ccb_h.path, "SCSI status 0x%x\n",
1497 ccb->csio.scsi_status);
1498 error = EIO;
1499 break;
1500 }
1501 return (error);
1502}
1503
1504static int
1505camperiphscsisenseerror(union ccb *ccb, cam_flags camflags,
1506 u_int32_t sense_flags,
1507 int *openings, u_int32_t *relsim_flags,
1508 u_int32_t *timeout, const char **action_string)
1509{
1510 struct cam_periph *periph;
1511 union ccb *orig_ccb = ccb;
1512 int error;
1513
1514 periph = xpt_path_periph(ccb->ccb_h.path);
1515 if (periph->flags &
1516 (CAM_PERIPH_RECOVERY_INPROG | CAM_PERIPH_SENSE_INPROG)) {
1517 /*
1518 * If error recovery is already in progress, don't attempt
1519 * to process this error, but requeue it unconditionally
1520 * and attempt to process it once error recovery has
1521 * completed. This failed command is probably related to
1522 * the error that caused the currently active error recovery
1523 * action so our current recovery efforts should also
1524 * address this command. Be aware that the error recovery
1525 * code assumes that only one recovery action is in progress
1526 * on a particular peripheral instance at any given time
1527 * (e.g. only one saved CCB for error recovery) so it is
1528 * imperitive that we don't violate this assumption.
1529 */
1530 error = ERESTART;
1531 } else {
1532 scsi_sense_action err_action;
1533 struct ccb_getdev cgd;
1534
1535 /*
1536 * Grab the inquiry data for this device.
1537 */
1538 xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path, CAM_PRIORITY_NORMAL);
1539 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1540 xpt_action((union ccb *)&cgd);
1541
1542 if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0)
1543 err_action = scsi_error_action(&ccb->csio,
1544 &cgd.inq_data,
1545 sense_flags);
1546 else if ((ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0)
1547 err_action = SS_REQSENSE;
1548 else
1549 err_action = SS_RETRY|SSQ_DECREMENT_COUNT|EIO;
1550
1551 error = err_action & SS_ERRMASK;
1552
1553 /*
1554 * If the recovery action will consume a retry,
1555 * make sure we actually have retries available.
1556 */
1557 if ((err_action & SSQ_DECREMENT_COUNT) != 0) {
1558 if (ccb->ccb_h.retry_count > 0 &&
1559 (periph->flags & CAM_PERIPH_INVALID) == 0)
1560 ccb->ccb_h.retry_count--;
1561 else {
1562 *action_string = "Retries exhausted";
1563 goto sense_error_done;
1564 }
1565 }
1566
1567 if ((err_action & SS_MASK) >= SS_START) {
1568 /*
1569 * Do common portions of commands that
1570 * use recovery CCBs.
1571 */
1572 orig_ccb = xpt_alloc_ccb_nowait();
1573 if (orig_ccb == NULL) {
1574 *action_string = "Can't allocate recovery CCB";
1575 goto sense_error_done;
1576 }
1577 /*
1578 * Clear freeze flag for original request here, as
1579 * this freeze will be dropped as part of ERESTART.
1580 */
1581 ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1582 bcopy(ccb, orig_ccb, sizeof(*orig_ccb));
1583 }
1584
1585 switch (err_action & SS_MASK) {
1586 case SS_NOP:
1587 *action_string = "No recovery action needed";
1588 error = 0;
1589 break;
1590 case SS_RETRY:
1591 *action_string = "Retrying command (per sense data)";
1592 error = ERESTART;
1593 break;
1594 case SS_FAIL:
1595 *action_string = "Unretryable error";
1596 break;
1597 case SS_START:
1598 {
1599 int le;
1600 if (SID_TYPE(&cgd.inq_data) == T_SEQUENTIAL) {
1601 xpt_free_ccb(orig_ccb);
1602 ccb->ccb_h.status |= CAM_DEV_QFRZN;
1603 *action_string = "Will not autostart a "
1604 "sequential access device";
1605 err_action = SS_FAIL;
1606 error = EIO;
1607 break;
1608 }
1609
1610 /*
1611 * Send a start unit command to the device, and
1612 * then retry the command.
1613 */
1614 *action_string = "Attempting to start unit";
1615 periph->flags |= CAM_PERIPH_RECOVERY_INPROG;
1616
1617 /*
1618 * Check for removable media and set
1619 * load/eject flag appropriately.
1620 */
1621 if (SID_IS_REMOVABLE(&cgd.inq_data))
1622 le = TRUE;
1623 else
1624 le = FALSE;
1625
1626 scsi_start_stop(&ccb->csio,
1627 /*retries*/1,
1628 camperiphdone,
1629 MSG_SIMPLE_Q_TAG,
1630 /*start*/TRUE,
1631 /*load/eject*/le,
1632 /*immediate*/FALSE,
1633 SSD_FULL_SIZE,
1634 /*timeout*/50000);
1635 break;
1636 }
1637 case SS_TUR:
1638 {
1639 /*
1640 * Send a Test Unit Ready to the device.
1641 * If the 'many' flag is set, we send 120
1642 * test unit ready commands, one every half
1643 * second. Otherwise, we just send one TUR.
1644 * We only want to do this if the retry
1645 * count has not been exhausted.
1646 */
1647 int retries;
1648
1649 if ((err_action & SSQ_MANY) != 0) {
1650 *action_string = "Polling device for readiness";
1651 retries = 120;
1652 } else {
1653 *action_string = "Testing device for readiness";
1654 retries = 1;
1655 }
1656 periph->flags |= CAM_PERIPH_RECOVERY_INPROG;
1657 scsi_test_unit_ready(&ccb->csio,
1658 retries,
1659 camperiphdone,
1660 MSG_SIMPLE_Q_TAG,
1661 SSD_FULL_SIZE,
1662 /*timeout*/5000);
1663
1664 /*
1665 * Accomplish our 500ms delay by deferring
1666 * the release of our device queue appropriately.
1667 */
1668 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1669 *timeout = 500;
1670 break;
1671 }
1672 case SS_REQSENSE:
1673 {
1674 *action_string = "Requesting SCSI sense data";
1675 periph->flags |= CAM_PERIPH_SENSE_INPROG;
1676 /*
1677 * Send a Request Sense to the device. We
1678 * assume that we are in a contingent allegiance
1679 * condition so we do not tag this request.
1680 */
1681 scsi_request_sense(&ccb->csio, /*retries*/1,
1682 camperiphsensedone,
1683 &orig_ccb->csio.sense_data,
1684 orig_ccb->csio.sense_len,
1685 CAM_TAG_ACTION_NONE,
1686 /*sense_len*/SSD_FULL_SIZE,
1687 /*timeout*/5000);
1688 break;
1689 }
1690 default:
1691 panic("Unhandled error action %x", err_action);
1692 }
1693
1694 if ((err_action & SS_MASK) >= SS_START) {
1695 /*
1696 * Drop the priority, so that the recovery
1697 * CCB is the first to execute. Freeze the queue
1698 * after this command is sent so that we can
1699 * restore the old csio and have it queued in
1700 * the proper order before we release normal
1701 * transactions to the device.
1702 */
1703 ccb->ccb_h.pinfo.priority--;
1704 ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1705 ccb->ccb_h.saved_ccb_ptr = orig_ccb;
1706 ccb->ccb_h.recovery_depth = 0;
1707 error = ERESTART;
1708 }
1709
1710sense_error_done:
1711 if ((err_action & SSQ_PRINT_SENSE) != 0
1712 && (ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0)
1713 cam_error_print(orig_ccb, CAM_ESF_ALL, CAM_EPF_ALL);
1714 }
1715 return (error);
1716}
1717
1718/*
1719 * Generic error handler. Peripheral drivers usually filter
1720 * out the errors that they handle in a unique mannor, then
1721 * call this function.
1722 */
1723int
1724cam_periph_error(union ccb *ccb, cam_flags camflags,
1725 u_int32_t sense_flags, union ccb *save_ccb)
1726{
1727 struct cam_periph *periph;
1728 const char *action_string;
1729 cam_status status;
1730 int frozen;
1731 int error, printed = 0;
1732 int openings;
1733 u_int32_t relsim_flags;
1734 u_int32_t timeout = 0;
1735
1736 periph = xpt_path_periph(ccb->ccb_h.path);
1737 action_string = NULL;
1738 status = ccb->ccb_h.status;
1739 frozen = (status & CAM_DEV_QFRZN) != 0;
1740 status &= CAM_STATUS_MASK;
1741 openings = relsim_flags = 0;
1742
1743 switch (status) {
1744 case CAM_REQ_CMP:
1745 error = 0;
1746 break;
1747 case CAM_SCSI_STATUS_ERROR:
1748 error = camperiphscsistatuserror(ccb,
1749 camflags,
1750 sense_flags,
1751 &openings,
1752 &relsim_flags,
1753 &timeout,
1754 &action_string);
1755 break;
1756 case CAM_AUTOSENSE_FAIL:
1757 xpt_print(ccb->ccb_h.path, "AutoSense failed\n");
1758 error = EIO; /* we have to kill the command */
1759 break;
1760 case CAM_ATA_STATUS_ERROR:
1761 if (bootverbose && printed == 0) {
1762 xpt_print(ccb->ccb_h.path, "ATA status error\n");
1763 cam_error_print(ccb, CAM_ESF_ALL, CAM_EPF_ALL);
1764 printed++;
1765 }
1766 /* FALLTHROUGH */
1767 case CAM_REQ_CMP_ERR:
1768 if (bootverbose && printed == 0) {
1769 xpt_print(ccb->ccb_h.path,
1770 "Request completed with CAM_REQ_CMP_ERR\n");
1771 printed++;
1772 }
1773 /* FALLTHROUGH */
1774 case CAM_CMD_TIMEOUT:
1775 if (bootverbose && printed == 0) {
1776 xpt_print(ccb->ccb_h.path, "Command timed out\n");
1777 printed++;
1778 }
1779 /* FALLTHROUGH */
1780 case CAM_UNEXP_BUSFREE:
1781 if (bootverbose && printed == 0) {
1782 xpt_print(ccb->ccb_h.path, "Unexpected Bus Free\n");
1783 printed++;
1784 }
1785 /* FALLTHROUGH */
1786 case CAM_UNCOR_PARITY:
1787 if (bootverbose && printed == 0) {
1788 xpt_print(ccb->ccb_h.path,
1789 "Uncorrected parity error\n");
1790 printed++;
1791 }
1792 /* FALLTHROUGH */
1793 case CAM_DATA_RUN_ERR:
1794 if (bootverbose && printed == 0) {
1795 xpt_print(ccb->ccb_h.path, "Data overrun\n");
1796 printed++;
1797 }
1798 /* decrement the number of retries */
1799 if (ccb->ccb_h.retry_count > 0 &&
1800 (periph->flags & CAM_PERIPH_INVALID) == 0) {
1801 ccb->ccb_h.retry_count--;
1802 error = ERESTART;
1803 } else {
1804 action_string = "Retries exhausted";
1805 error = EIO;
1806 }
1807 break;
1808 case CAM_UA_ABORT:
1809 case CAM_UA_TERMIO:
1810 case CAM_MSG_REJECT_REC:
1811 /* XXX Don't know that these are correct */
1812 error = EIO;
1813 break;
1814 case CAM_SEL_TIMEOUT:
1815 {
1816 struct cam_path *newpath;
1817
1818 if ((camflags & CAM_RETRY_SELTO) != 0) {
1819 if (ccb->ccb_h.retry_count > 0 &&
1820 (periph->flags & CAM_PERIPH_INVALID) == 0) {
1821
1822 ccb->ccb_h.retry_count--;
1823 error = ERESTART;
1824 if (bootverbose && printed == 0) {
1825 xpt_print(ccb->ccb_h.path,
1826 "Selection timeout\n");
1827 printed++;
1828 }
1829
1830 /*
1831 * Wait a bit to give the device
1832 * time to recover before we try again.
1833 */
1834 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1835 timeout = periph_selto_delay;
1836 break;
1837 }
1838 action_string = "Retries exhausted";
1839 }
1840 error = ENXIO;
1841 /* Should we do more if we can't create the path?? */
1842 if (xpt_create_path(&newpath, periph,
1843 xpt_path_path_id(ccb->ccb_h.path),
1844 xpt_path_target_id(ccb->ccb_h.path),
1845 CAM_LUN_WILDCARD) != CAM_REQ_CMP)
1846 break;
1847
1848 /*
1849 * Let peripheral drivers know that this device has gone
1850 * away.
1851 */
1852 xpt_async(AC_LOST_DEVICE, newpath, NULL);
1853 xpt_free_path(newpath);
1854 break;
1855 }
1856 case CAM_REQ_INVALID:
1857 case CAM_PATH_INVALID:
1858 case CAM_DEV_NOT_THERE:
1859 case CAM_NO_HBA:
1860 case CAM_PROVIDE_FAIL:
1861 case CAM_REQ_TOO_BIG:
1862 case CAM_LUN_INVALID:
1863 case CAM_TID_INVALID:
1864 error = EINVAL;
1865 break;
1866 case CAM_SCSI_BUS_RESET:
1867 case CAM_BDR_SENT:
1868 /*
1869 * Commands that repeatedly timeout and cause these
1870 * kinds of error recovery actions, should return
1871 * CAM_CMD_TIMEOUT, which allows us to safely assume
1872 * that this command was an innocent bystander to
1873 * these events and should be unconditionally
1874 * retried.
1875 */
1876 if (bootverbose && printed == 0) {
1877 xpt_print_path(ccb->ccb_h.path);
1878 if (status == CAM_BDR_SENT)
1879 printf("Bus Device Reset sent\n");
1880 else
1881 printf("Bus Reset issued\n");
1882 printed++;
1883 }
1884 /* FALLTHROUGH */
1885 case CAM_REQUEUE_REQ:
1886 /* Unconditional requeue */
1887 if (bootverbose && printed == 0) {
1888 xpt_print(ccb->ccb_h.path, "Request requeued\n");
1889 printed++;
1890 }
1891 if ((periph->flags & CAM_PERIPH_INVALID) == 0)
1892 error = ERESTART;
1893 else {
1894 action_string = "Retries exhausted";
1895 error = EIO;
1896 }
1897 break;
1898 case CAM_RESRC_UNAVAIL:
1899 /* Wait a bit for the resource shortage to abate. */
1900 timeout = periph_noresrc_delay;
1901 /* FALLTHROUGH */
1902 case CAM_BUSY:
1903 if (timeout == 0) {
1904 /* Wait a bit for the busy condition to abate. */
1905 timeout = periph_busy_delay;
1906 }
1907 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1908 /* FALLTHROUGH */
1909 default:
1910 /* decrement the number of retries */
1911 if (ccb->ccb_h.retry_count > 0 &&
1912 (periph->flags & CAM_PERIPH_INVALID) == 0) {
1913 ccb->ccb_h.retry_count--;
1914 error = ERESTART;
1915 if (bootverbose && printed == 0) {
1916 xpt_print(ccb->ccb_h.path, "CAM status 0x%x\n",
1917 status);
1918 printed++;
1919 }
1920 } else {
1921 error = EIO;
1922 action_string = "Retries exhausted";
1923 }
1924 break;
1925 }
1926
1927 /*
1928 * If we have and error and are booting verbosely, whine
1929 * *unless* this was a non-retryable selection timeout.
1930 */
1931 if (error != 0 && bootverbose &&
1932 !(status == CAM_SEL_TIMEOUT && (camflags & CAM_RETRY_SELTO) == 0)) {
1933 if (error != ERESTART) {
1934 if (action_string == NULL)
1935 action_string = "Unretryable error";
1936 xpt_print(ccb->ccb_h.path, "Error %d, %s\n",
1937 error, action_string);
1938 } else if (action_string != NULL)
1939 xpt_print(ccb->ccb_h.path, "%s\n", action_string);
1940 else
1941 xpt_print(ccb->ccb_h.path, "Retrying command\n");
1942 }
1943
1944 /* Attempt a retry */
1945 if (error == ERESTART || error == 0) {
1946 if (frozen != 0)
1947 ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1948 if (error == ERESTART)
1949 xpt_action(ccb);
1950 if (frozen != 0)
1951 cam_release_devq(ccb->ccb_h.path,
1952 relsim_flags,
1953 openings,
1954 timeout,
1955 /*getcount_only*/0);
1956 }
1957
1958 return (error);
1959}