Deleted Added
sdiff udiff text old ( 319445 ) new ( 322661 )
full compact
1/*-
2 * Copyright (c) 2009 Yahoo! Inc.
3 * Copyright (c) 2011-2015 LSI Corp.
4 * Copyright (c) 2013-2015 Avago Technologies
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
29 *
30 * $FreeBSD: stable/10/sys/dev/mps/mps_sas.c 322661 2017-08-18 15:38:08Z ken $
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: stable/10/sys/dev/mps/mps_sas.c 322661 2017-08-18 15:38:08Z ken $");
35
36/* Communications core for Avago Technologies (LSI) MPT2 */
37
38/* TODO Move headers to mpsvar */
39#include <sys/types.h>
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/kernel.h>
43#include <sys/selinfo.h>
44#include <sys/module.h>
45#include <sys/bus.h>
46#include <sys/conf.h>
47#include <sys/bio.h>
48#include <sys/malloc.h>
49#include <sys/uio.h>
50#include <sys/sysctl.h>
51#include <sys/endian.h>
52#include <sys/queue.h>
53#include <sys/kthread.h>
54#include <sys/taskqueue.h>
55#include <sys/sbuf.h>
56
57#include <machine/bus.h>
58#include <machine/resource.h>
59#include <sys/rman.h>
60
61#include <machine/stdarg.h>
62
63#include <cam/cam.h>
64#include <cam/cam_ccb.h>
65#include <cam/cam_xpt.h>
66#include <cam/cam_debug.h>
67#include <cam/cam_sim.h>
68#include <cam/cam_xpt_sim.h>
69#include <cam/cam_xpt_periph.h>
70#include <cam/cam_periph.h>
71#include <cam/scsi/scsi_all.h>
72#include <cam/scsi/scsi_message.h>
73#if __FreeBSD_version >= 900026
74#include <cam/scsi/smp_all.h>
75#endif
76
77#include <dev/mps/mpi/mpi2_type.h>
78#include <dev/mps/mpi/mpi2.h>
79#include <dev/mps/mpi/mpi2_ioc.h>
80#include <dev/mps/mpi/mpi2_sas.h>
81#include <dev/mps/mpi/mpi2_cnfg.h>
82#include <dev/mps/mpi/mpi2_init.h>
83#include <dev/mps/mpi/mpi2_tool.h>
84#include <dev/mps/mps_ioctl.h>
85#include <dev/mps/mpsvar.h>
86#include <dev/mps/mps_table.h>
87#include <dev/mps/mps_sas.h>
88
89#define MPSSAS_DISCOVERY_TIMEOUT 20
90#define MPSSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */
91
92/*
93 * static array to check SCSI OpCode for EEDP protection bits
94 */
95#define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
96#define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
97#define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
98static uint8_t op_code_prot[256] = {
99 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
102 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
108 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
109 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
110 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
114 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
115};
116
117MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
118
119static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
120static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
121static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
122static void mpssas_poll(struct cam_sim *sim);
123static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm,
124 struct mps_command *cm);
125static void mpssas_scsiio_timeout(void *data);
126static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
127static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
128 struct mps_command *cm, union ccb *ccb);
129static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
130static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
131static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
132#if __FreeBSD_version >= 900026
133static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
134static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
135 uint64_t sasaddr);
136static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
137#endif //FreeBSD_version >= 900026
138static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
139static void mpssas_async(void *callback_arg, uint32_t code,
140 struct cam_path *path, void *arg);
141#if (__FreeBSD_version < 901503) || \
142 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
143static void mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
144 struct ccb_getdev *cgd);
145static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
146#endif
147static int mpssas_send_portenable(struct mps_softc *sc);
148static void mpssas_portenable_complete(struct mps_softc *sc,
149 struct mps_command *cm);
150
151struct mpssas_target *
152mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
153{
154 struct mpssas_target *target;
155 int i;
156
157 for (i = start; i < sassc->maxtargets; i++) {
158 target = &sassc->targets[i];
159 if (target->handle == handle)
160 return (target);
161 }
162
163 return (NULL);
164}
165
166/* we need to freeze the simq during attach and diag reset, to avoid failing
167 * commands before device handles have been found by discovery. Since
168 * discovery involves reading config pages and possibly sending commands,
169 * discovery actions may continue even after we receive the end of discovery
170 * event, so refcount discovery actions instead of assuming we can unfreeze
171 * the simq when we get the event.
172 */
173void
174mpssas_startup_increment(struct mpssas_softc *sassc)
175{
176 MPS_FUNCTRACE(sassc->sc);
177
178 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
179 if (sassc->startup_refcount++ == 0) {
180 /* just starting, freeze the simq */
181 mps_dprint(sassc->sc, MPS_INIT,
182 "%s freezing simq\n", __func__);
183#if __FreeBSD_version >= 1000039
184 xpt_hold_boot();
185#endif
186 xpt_freeze_simq(sassc->sim, 1);
187 }
188 mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
189 sassc->startup_refcount);
190 }
191}
192
193void
194mpssas_release_simq_reinit(struct mpssas_softc *sassc)
195{
196 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
197 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
198 xpt_release_simq(sassc->sim, 1);
199 mps_dprint(sassc->sc, MPS_INFO, "Unfreezing SIM queue\n");
200 }
201}
202
203void
204mpssas_startup_decrement(struct mpssas_softc *sassc)
205{
206 MPS_FUNCTRACE(sassc->sc);
207
208 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
209 if (--sassc->startup_refcount == 0) {
210 /* finished all discovery-related actions, release
211 * the simq and rescan for the latest topology.
212 */
213 mps_dprint(sassc->sc, MPS_INIT,
214 "%s releasing simq\n", __func__);
215 sassc->flags &= ~MPSSAS_IN_STARTUP;
216 xpt_release_simq(sassc->sim, 1);
217#if __FreeBSD_version >= 1000039
218 xpt_release_boot();
219#else
220 mpssas_rescan_target(sassc->sc, NULL);
221#endif
222 }
223 mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
224 sassc->startup_refcount);
225 }
226}
227
228/* The firmware requires us to stop sending commands when we're doing task
229 * management, so refcount the TMs and keep the simq frozen when any are in
230 * use.
231 */
232struct mps_command *
233mpssas_alloc_tm(struct mps_softc *sc)
234{
235 struct mps_command *tm;
236
237 tm = mps_alloc_high_priority_command(sc);
238 return tm;
239}
240
241void
242mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
243{
244 int target_id = 0xFFFFFFFF;
245
246 if (tm == NULL)
247 return;
248
249 /*
250 * For TM's the devq is frozen for the device. Unfreeze it here and
251 * free the resources used for freezing the devq. Must clear the
252 * INRESET flag as well or scsi I/O will not work.
253 */
254 if (tm->cm_targ != NULL) {
255 tm->cm_targ->flags &= ~MPSSAS_TARGET_INRESET;
256 target_id = tm->cm_targ->tid;
257 }
258 if (tm->cm_ccb) {
259 mps_dprint(sc, MPS_INFO, "Unfreezing devq for target ID %d\n",
260 target_id);
261 xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
262 xpt_free_path(tm->cm_ccb->ccb_h.path);
263 xpt_free_ccb(tm->cm_ccb);
264 }
265
266 mps_free_high_priority_command(sc, tm);
267}
268
269void
270mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
271{
272 struct mpssas_softc *sassc = sc->sassc;
273 path_id_t pathid;
274 target_id_t targetid;
275 union ccb *ccb;
276
277 MPS_FUNCTRACE(sc);
278 pathid = cam_sim_path(sassc->sim);
279 if (targ == NULL)
280 targetid = CAM_TARGET_WILDCARD;
281 else
282 targetid = targ - sassc->targets;
283
284 /*
285 * Allocate a CCB and schedule a rescan.
286 */
287 ccb = xpt_alloc_ccb_nowait();
288 if (ccb == NULL) {
289 mps_dprint(sc, MPS_ERROR, "unable to alloc CCB for rescan\n");
290 return;
291 }
292
293 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
294 targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
295 mps_dprint(sc, MPS_ERROR, "unable to create path for rescan\n");
296 xpt_free_ccb(ccb);
297 return;
298 }
299
300 if (targetid == CAM_TARGET_WILDCARD)
301 ccb->ccb_h.func_code = XPT_SCAN_BUS;
302 else
303 ccb->ccb_h.func_code = XPT_SCAN_TGT;
304
305 mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
306 xpt_rescan(ccb);
307}
308
309static void
310mpssas_log_command(struct mps_command *cm, u_int level, const char *fmt, ...)
311{
312 struct sbuf sb;
313 va_list ap;
314 char str[192];
315 char path_str[64];
316
317 if (cm == NULL)
318 return;
319
320 /* No need to be in here if debugging isn't enabled */
321 if ((cm->cm_sc->mps_debug & level) == 0)
322 return;
323
324 sbuf_new(&sb, str, sizeof(str), 0);
325
326 va_start(ap, fmt);
327
328 if (cm->cm_ccb != NULL) {
329 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
330 sizeof(path_str));
331 sbuf_cat(&sb, path_str);
332 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
333 scsi_command_string(&cm->cm_ccb->csio, &sb);
334 sbuf_printf(&sb, "length %d ",
335 cm->cm_ccb->csio.dxfer_len);
336 }
337 }
338 else {
339 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
340 cam_sim_name(cm->cm_sc->sassc->sim),
341 cam_sim_unit(cm->cm_sc->sassc->sim),
342 cam_sim_bus(cm->cm_sc->sassc->sim),
343 cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
344 cm->cm_lun);
345 }
346
347 sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
348 sbuf_vprintf(&sb, fmt, ap);
349 sbuf_finish(&sb);
350 mps_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
351
352 va_end(ap);
353}
354
355
356static void
357mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
358{
359 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
360 struct mpssas_target *targ;
361 uint16_t handle;
362
363 MPS_FUNCTRACE(sc);
364
365 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
366 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
367 targ = tm->cm_targ;
368
369 if (reply == NULL) {
370 /* XXX retry the remove after the diag reset completes? */
371 mps_dprint(sc, MPS_FAULT,
372 "%s NULL reply resetting device 0x%04x\n", __func__,
373 handle);
374 mpssas_free_tm(sc, tm);
375 return;
376 }
377
378 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
379 MPI2_IOCSTATUS_SUCCESS) {
380 mps_dprint(sc, MPS_ERROR,
381 "IOCStatus = 0x%x while resetting device 0x%x\n",
382 le16toh(reply->IOCStatus), handle);
383 }
384
385 mps_dprint(sc, MPS_XINFO,
386 "Reset aborted %u commands\n", reply->TerminationCount);
387 mps_free_reply(sc, tm->cm_reply_data);
388 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
389
390 mps_dprint(sc, MPS_XINFO,
391 "clearing target %u handle 0x%04x\n", targ->tid, handle);
392
393 /*
394 * Don't clear target if remove fails because things will get confusing.
395 * Leave the devname and sasaddr intact so that we know to avoid reusing
396 * this target id if possible, and so we can assign the same target id
397 * to this device if it comes back in the future.
398 */
399 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
400 MPI2_IOCSTATUS_SUCCESS) {
401 targ = tm->cm_targ;
402 targ->handle = 0x0;
403 targ->encl_handle = 0x0;
404 targ->encl_slot = 0x0;
405 targ->exp_dev_handle = 0x0;
406 targ->phy_num = 0x0;
407 targ->linkrate = 0x0;
408 targ->devinfo = 0x0;
409 targ->flags = 0x0;
410 }
411
412 mpssas_free_tm(sc, tm);
413}
414
415
416/*
417 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
418 * Otherwise Volume Delete is same as Bare Drive Removal.
419 */
420void
421mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
422{
423 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
424 struct mps_softc *sc;
425 struct mps_command *cm;
426 struct mpssas_target *targ = NULL;
427
428 MPS_FUNCTRACE(sassc->sc);
429 sc = sassc->sc;
430
431#ifdef WD_SUPPORT
432 /*
433 * If this is a WD controller, determine if the disk should be exposed
434 * to the OS or not. If disk should be exposed, return from this
435 * function without doing anything.
436 */
437 if (sc->WD_available && (sc->WD_hide_expose ==
438 MPS_WD_EXPOSE_ALWAYS)) {
439 return;
440 }
441#endif //WD_SUPPORT
442
443 targ = mpssas_find_target_by_handle(sassc, 0, handle);
444 if (targ == NULL) {
445 /* FIXME: what is the action? */
446 /* We don't know about this device? */
447 mps_dprint(sc, MPS_ERROR,
448 "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
449 return;
450 }
451
452 targ->flags |= MPSSAS_TARGET_INREMOVAL;
453
454 cm = mpssas_alloc_tm(sc);
455 if (cm == NULL) {
456 mps_dprint(sc, MPS_ERROR,
457 "%s: command alloc failure\n", __func__);
458 return;
459 }
460
461 mpssas_rescan_target(sc, targ);
462
463 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
464 req->DevHandle = targ->handle;
465 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
466 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
467
468 /* SAS Hard Link Reset / SATA Link Reset */
469 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
470
471 cm->cm_targ = targ;
472 cm->cm_data = NULL;
473 cm->cm_desc.HighPriority.RequestFlags =
474 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
475 cm->cm_complete = mpssas_remove_volume;
476 cm->cm_complete_data = (void *)(uintptr_t)handle;
477
478 mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
479 __func__, targ->tid);
480 mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
481
482 mps_map_command(sc, cm);
483}
484
485/*
486 * The MPT2 firmware performs debounce on the link to avoid transient link
487 * errors and false removals. When it does decide that link has been lost
488 * and a device need to go away, it expects that the host will perform a
489 * target reset and then an op remove. The reset has the side-effect of
490 * aborting any outstanding requests for the device, which is required for
491 * the op-remove to succeed. It's not clear if the host should check for
492 * the device coming back alive after the reset.
493 */
494void
495mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
496{
497 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
498 struct mps_softc *sc;
499 struct mps_command *cm;
500 struct mpssas_target *targ = NULL;
501
502 MPS_FUNCTRACE(sassc->sc);
503
504 sc = sassc->sc;
505
506 targ = mpssas_find_target_by_handle(sassc, 0, handle);
507 if (targ == NULL) {
508 /* FIXME: what is the action? */
509 /* We don't know about this device? */
510 mps_dprint(sc, MPS_ERROR,
511 "%s : invalid handle 0x%x \n", __func__, handle);
512 return;
513 }
514
515 targ->flags |= MPSSAS_TARGET_INREMOVAL;
516
517 cm = mpssas_alloc_tm(sc);
518 if (cm == NULL) {
519 mps_dprint(sc, MPS_ERROR,
520 "%s: command alloc failure\n", __func__);
521 return;
522 }
523
524 mpssas_rescan_target(sc, targ);
525
526 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
527 memset(req, 0, sizeof(*req));
528 req->DevHandle = htole16(targ->handle);
529 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
530 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
531
532 /* SAS Hard Link Reset / SATA Link Reset */
533 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
534
535 cm->cm_targ = targ;
536 cm->cm_data = NULL;
537 cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
538 cm->cm_complete = mpssas_remove_device;
539 cm->cm_complete_data = (void *)(uintptr_t)handle;
540
541 mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
542 __func__, targ->tid);
543 mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
544
545 mps_map_command(sc, cm);
546}
547
548static void
549mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
550{
551 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
552 MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
553 struct mpssas_target *targ;
554 struct mps_command *next_cm;
555 uint16_t handle;
556
557 MPS_FUNCTRACE(sc);
558
559 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
560 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
561 targ = tm->cm_targ;
562
563 /*
564 * Currently there should be no way we can hit this case. It only
565 * happens when we have a failure to allocate chain frames, and
566 * task management commands don't have S/G lists.
567 */
568 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
569 mps_dprint(sc, MPS_ERROR,
570 "%s: cm_flags = %#x for remove of handle %#04x! "
571 "This should not happen!\n", __func__, tm->cm_flags,
572 handle);
573 }
574
575 if (reply == NULL) {
576 /* XXX retry the remove after the diag reset completes? */
577 mps_dprint(sc, MPS_FAULT,
578 "%s NULL reply resetting device 0x%04x\n", __func__,
579 handle);
580 mpssas_free_tm(sc, tm);
581 return;
582 }
583
584 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
585 MPI2_IOCSTATUS_SUCCESS) {
586 mps_dprint(sc, MPS_ERROR,
587 "IOCStatus = 0x%x while resetting device 0x%x\n",
588 le16toh(reply->IOCStatus), handle);
589 }
590
591 mps_dprint(sc, MPS_XINFO, "Reset aborted %u commands\n",
592 le32toh(reply->TerminationCount));
593 mps_free_reply(sc, tm->cm_reply_data);
594 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
595
596 /* Reuse the existing command */
597 req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
598 memset(req, 0, sizeof(*req));
599 req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
600 req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
601 req->DevHandle = htole16(handle);
602 tm->cm_data = NULL;
603 tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
604 tm->cm_complete = mpssas_remove_complete;
605 tm->cm_complete_data = (void *)(uintptr_t)handle;
606
607 mps_map_command(sc, tm);
608
609 mps_dprint(sc, MPS_XINFO, "clearing target %u handle 0x%04x\n",
610 targ->tid, handle);
611 TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
612 union ccb *ccb;
613
614 mps_dprint(sc, MPS_XINFO, "Completing missed command %p\n", tm);
615 ccb = tm->cm_complete_data;
616 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
617 mpssas_scsiio_complete(sc, tm);
618 }
619}
620
621static void
622mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
623{
624 MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
625 uint16_t handle;
626 struct mpssas_target *targ;
627 struct mpssas_lun *lun;
628
629 MPS_FUNCTRACE(sc);
630
631 reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
632 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
633
634 /*
635 * Currently there should be no way we can hit this case. It only
636 * happens when we have a failure to allocate chain frames, and
637 * task management commands don't have S/G lists.
638 */
639 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
640 mps_dprint(sc, MPS_XINFO,
641 "%s: cm_flags = %#x for remove of handle %#04x! "
642 "This should not happen!\n", __func__, tm->cm_flags,
643 handle);
644 mpssas_free_tm(sc, tm);
645 return;
646 }
647
648 if (reply == NULL) {
649 /* most likely a chip reset */
650 mps_dprint(sc, MPS_FAULT,
651 "%s NULL reply removing device 0x%04x\n", __func__, handle);
652 mpssas_free_tm(sc, tm);
653 return;
654 }
655
656 mps_dprint(sc, MPS_XINFO,
657 "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
658 handle, le16toh(reply->IOCStatus));
659
660 /*
661 * Don't clear target if remove fails because things will get confusing.
662 * Leave the devname and sasaddr intact so that we know to avoid reusing
663 * this target id if possible, and so we can assign the same target id
664 * to this device if it comes back in the future.
665 */
666 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
667 MPI2_IOCSTATUS_SUCCESS) {
668 targ = tm->cm_targ;
669 targ->handle = 0x0;
670 targ->encl_handle = 0x0;
671 targ->encl_slot = 0x0;
672 targ->exp_dev_handle = 0x0;
673 targ->phy_num = 0x0;
674 targ->linkrate = 0x0;
675 targ->devinfo = 0x0;
676 targ->flags = 0x0;
677
678 while(!SLIST_EMPTY(&targ->luns)) {
679 lun = SLIST_FIRST(&targ->luns);
680 SLIST_REMOVE_HEAD(&targ->luns, lun_link);
681 free(lun, M_MPT2);
682 }
683 }
684
685
686 mpssas_free_tm(sc, tm);
687}
688
689static int
690mpssas_register_events(struct mps_softc *sc)
691{
692 u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
693
694 bzero(events, 16);
695 setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
696 setbit(events, MPI2_EVENT_SAS_DISCOVERY);
697 setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
698 setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
699 setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
700 setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
701 setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
702 setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
703 setbit(events, MPI2_EVENT_IR_VOLUME);
704 setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
705 setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
706 setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
707
708 mps_register_events(sc, events, mpssas_evt_handler, NULL,
709 &sc->sassc->mpssas_eh);
710
711 return (0);
712}
713
714int
715mps_attach_sas(struct mps_softc *sc)
716{
717 struct mpssas_softc *sassc;
718 cam_status status;
719 int unit, error = 0;
720
721 MPS_FUNCTRACE(sc);
722
723 sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
724 if(!sassc) {
725 device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
726 __func__, __LINE__);
727 return (ENOMEM);
728 }
729
730 /*
731 * XXX MaxTargets could change during a reinit. Since we don't
732 * resize the targets[] array during such an event, cache the value
733 * of MaxTargets here so that we don't get into trouble later. This
734 * should move into the reinit logic.
735 */
736 sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
737 sassc->targets = malloc(sizeof(struct mpssas_target) *
738 sassc->maxtargets, M_MPT2, M_WAITOK|M_ZERO);
739 if(!sassc->targets) {
740 device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
741 __func__, __LINE__);
742 free(sassc, M_MPT2);
743 return (ENOMEM);
744 }
745 sc->sassc = sassc;
746 sassc->sc = sc;
747
748 if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
749 mps_dprint(sc, MPS_ERROR, "Cannot allocate SIMQ\n");
750 error = ENOMEM;
751 goto out;
752 }
753
754 unit = device_get_unit(sc->mps_dev);
755 sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
756 unit, &sc->mps_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
757 if (sassc->sim == NULL) {
758 mps_dprint(sc, MPS_ERROR, "Cannot allocate SIM\n");
759 error = EINVAL;
760 goto out;
761 }
762
763 TAILQ_INIT(&sassc->ev_queue);
764
765 /* Initialize taskqueue for Event Handling */
766 TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
767 sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
768 taskqueue_thread_enqueue, &sassc->ev_tq);
769 taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
770 device_get_nameunit(sc->mps_dev));
771
772 mps_lock(sc);
773
774 /*
775 * XXX There should be a bus for every port on the adapter, but since
776 * we're just going to fake the topology for now, we'll pretend that
777 * everything is just a target on a single bus.
778 */
779 if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
780 mps_dprint(sc, MPS_ERROR, "Error %d registering SCSI bus\n",
781 error);
782 mps_unlock(sc);
783 goto out;
784 }
785
786 /*
787 * Assume that discovery events will start right away.
788 *
789 * Hold off boot until discovery is complete.
790 */
791 sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
792 sc->sassc->startup_refcount = 0;
793 mpssas_startup_increment(sassc);
794
795 callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
796
797 /*
798 * Register for async events so we can determine the EEDP
799 * capabilities of devices.
800 */
801 status = xpt_create_path(&sassc->path, /*periph*/NULL,
802 cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
803 CAM_LUN_WILDCARD);
804 if (status != CAM_REQ_CMP) {
805 mps_printf(sc, "Error %#x creating sim path\n", status);
806 sassc->path = NULL;
807 } else {
808 int event;
809
810#if (__FreeBSD_version >= 1000006) || \
811 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
812 event = AC_ADVINFO_CHANGED;
813#else
814 event = AC_FOUND_DEVICE;
815#endif
816 status = xpt_register_async(event, mpssas_async, sc,
817 sassc->path);
818 if (status != CAM_REQ_CMP) {
819 mps_dprint(sc, MPS_ERROR,
820 "Error %#x registering async handler for "
821 "AC_ADVINFO_CHANGED events\n", status);
822 xpt_free_path(sassc->path);
823 sassc->path = NULL;
824 }
825 }
826 if (status != CAM_REQ_CMP) {
827 /*
828 * EEDP use is the exception, not the rule.
829 * Warn the user, but do not fail to attach.
830 */
831 mps_printf(sc, "EEDP capabilities disabled.\n");
832 }
833
834 mps_unlock(sc);
835
836 mpssas_register_events(sc);
837out:
838 if (error)
839 mps_detach_sas(sc);
840 return (error);
841}
842
843int
844mps_detach_sas(struct mps_softc *sc)
845{
846 struct mpssas_softc *sassc;
847 struct mpssas_lun *lun, *lun_tmp;
848 struct mpssas_target *targ;
849 int i;
850
851 MPS_FUNCTRACE(sc);
852
853 if (sc->sassc == NULL)
854 return (0);
855
856 sassc = sc->sassc;
857 mps_deregister_events(sc, sassc->mpssas_eh);
858
859 /*
860 * Drain and free the event handling taskqueue with the lock
861 * unheld so that any parallel processing tasks drain properly
862 * without deadlocking.
863 */
864 if (sassc->ev_tq != NULL)
865 taskqueue_free(sassc->ev_tq);
866
867 /* Make sure CAM doesn't wedge if we had to bail out early. */
868 mps_lock(sc);
869
870 /* Deregister our async handler */
871 if (sassc->path != NULL) {
872 xpt_register_async(0, mpssas_async, sc, sassc->path);
873 xpt_free_path(sassc->path);
874 sassc->path = NULL;
875 }
876
877 if (sassc->flags & MPSSAS_IN_STARTUP)
878 xpt_release_simq(sassc->sim, 1);
879
880 if (sassc->sim != NULL) {
881 xpt_bus_deregister(cam_sim_path(sassc->sim));
882 cam_sim_free(sassc->sim, FALSE);
883 }
884
885 mps_unlock(sc);
886
887 if (sassc->devq != NULL)
888 cam_simq_free(sassc->devq);
889
890 for(i=0; i< sassc->maxtargets ;i++) {
891 targ = &sassc->targets[i];
892 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
893 free(lun, M_MPT2);
894 }
895 }
896 free(sassc->targets, M_MPT2);
897 free(sassc, M_MPT2);
898 sc->sassc = NULL;
899
900 return (0);
901}
902
903void
904mpssas_discovery_end(struct mpssas_softc *sassc)
905{
906 struct mps_softc *sc = sassc->sc;
907
908 MPS_FUNCTRACE(sc);
909
910 if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
911 callout_stop(&sassc->discovery_callout);
912
913 /*
914 * After discovery has completed, check the mapping table for any
915 * missing devices and update their missing counts. Only do this once
916 * whenever the driver is initialized so that missing counts aren't
917 * updated unnecessarily. Note that just because discovery has
918 * completed doesn't mean that events have been processed yet. The
919 * check_devices function is a callout timer that checks if ALL devices
920 * are missing. If so, it will wait a little longer for events to
921 * complete and keep resetting itself until some device in the mapping
922 * table is not missing, meaning that event processing has started.
923 */
924 if (sc->track_mapping_events) {
925 mps_dprint(sc, MPS_XINFO | MPS_MAPPING, "Discovery has "
926 "completed. Check for missing devices in the mapping "
927 "table.\n");
928 callout_reset(&sc->device_check_callout,
929 MPS_MISSING_CHECK_DELAY * hz, mps_mapping_check_devices,
930 sc);
931 }
932}
933
934static void
935mpssas_action(struct cam_sim *sim, union ccb *ccb)
936{
937 struct mpssas_softc *sassc;
938
939 sassc = cam_sim_softc(sim);
940
941 MPS_FUNCTRACE(sassc->sc);
942 mps_dprint(sassc->sc, MPS_TRACE, "ccb func_code 0x%x\n",
943 ccb->ccb_h.func_code);
944 mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
945
946 switch (ccb->ccb_h.func_code) {
947 case XPT_PATH_INQ:
948 {
949 struct ccb_pathinq *cpi = &ccb->cpi;
950 struct mps_softc *sc = sassc->sc;
951 uint8_t sges_per_frame;
952
953 cpi->version_num = 1;
954 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
955 cpi->target_sprt = 0;
956#if __FreeBSD_version >= 1000039
957 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
958#else
959 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
960#endif
961 cpi->hba_eng_cnt = 0;
962 cpi->max_target = sassc->maxtargets - 1;
963 cpi->max_lun = 255;
964
965 /*
966 * initiator_id is set here to an ID outside the set of valid
967 * target IDs (including volumes).
968 */
969 cpi->initiator_id = sassc->maxtargets;
970 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
971 strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
972 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
973 cpi->unit_number = cam_sim_unit(sim);
974 cpi->bus_id = cam_sim_bus(sim);
975 cpi->base_transfer_speed = 150000;
976 cpi->transport = XPORT_SAS;
977 cpi->transport_version = 0;
978 cpi->protocol = PROTO_SCSI;
979 cpi->protocol_version = SCSI_REV_SPC;
980
981 /*
982 * Max IO Size is Page Size * the following:
983 * ((SGEs per frame - 1 for chain element) *
984 * Max Chain Depth) + 1 for no chain needed in last frame
985 *
986 * If user suggests a Max IO size to use, use the smaller of the
987 * user's value and the calculated value as long as the user's
988 * value is larger than 0. The user's value is in pages.
989 */
990 sges_per_frame = ((sc->facts->IOCRequestFrameSize * 4) /
991 sizeof(MPI2_SGE_SIMPLE64)) - 1;
992 cpi->maxio = (sges_per_frame * sc->facts->MaxChainDepth) + 1;
993 cpi->maxio *= PAGE_SIZE;
994 if ((sc->max_io_pages > 0) && (sc->max_io_pages * PAGE_SIZE <
995 cpi->maxio))
996 cpi->maxio = sc->max_io_pages * PAGE_SIZE;
997 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
998 break;
999 }
1000 case XPT_GET_TRAN_SETTINGS:
1001 {
1002 struct ccb_trans_settings *cts;
1003 struct ccb_trans_settings_sas *sas;
1004 struct ccb_trans_settings_scsi *scsi;
1005 struct mpssas_target *targ;
1006
1007 cts = &ccb->cts;
1008 sas = &cts->xport_specific.sas;
1009 scsi = &cts->proto_specific.scsi;
1010
1011 KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1012 ("Target %d out of bounds in XPT_GET_TRANS_SETTINGS\n",
1013 cts->ccb_h.target_id));
1014 targ = &sassc->targets[cts->ccb_h.target_id];
1015 if (targ->handle == 0x0) {
1016 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1017 break;
1018 }
1019
1020 cts->protocol_version = SCSI_REV_SPC2;
1021 cts->transport = XPORT_SAS;
1022 cts->transport_version = 0;
1023
1024 sas->valid = CTS_SAS_VALID_SPEED;
1025 switch (targ->linkrate) {
1026 case 0x08:
1027 sas->bitrate = 150000;
1028 break;
1029 case 0x09:
1030 sas->bitrate = 300000;
1031 break;
1032 case 0x0a:
1033 sas->bitrate = 600000;
1034 break;
1035 default:
1036 sas->valid = 0;
1037 }
1038
1039 cts->protocol = PROTO_SCSI;
1040 scsi->valid = CTS_SCSI_VALID_TQ;
1041 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1042
1043 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1044 break;
1045 }
1046 case XPT_CALC_GEOMETRY:
1047 cam_calc_geometry(&ccb->ccg, /*extended*/1);
1048 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1049 break;
1050 case XPT_RESET_DEV:
1051 mps_dprint(sassc->sc, MPS_XINFO, "mpssas_action XPT_RESET_DEV\n");
1052 mpssas_action_resetdev(sassc, ccb);
1053 return;
1054 case XPT_RESET_BUS:
1055 case XPT_ABORT:
1056 case XPT_TERM_IO:
1057 mps_dprint(sassc->sc, MPS_XINFO,
1058 "mpssas_action faking success for abort or reset\n");
1059 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1060 break;
1061 case XPT_SCSI_IO:
1062 mpssas_action_scsiio(sassc, ccb);
1063 return;
1064#if __FreeBSD_version >= 900026
1065 case XPT_SMP_IO:
1066 mpssas_action_smpio(sassc, ccb);
1067 return;
1068#endif
1069 default:
1070 mpssas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1071 break;
1072 }
1073 xpt_done(ccb);
1074
1075}
1076
1077static void
1078mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
1079 target_id_t target_id, lun_id_t lun_id)
1080{
1081 path_id_t path_id = cam_sim_path(sc->sassc->sim);
1082 struct cam_path *path;
1083
1084 mps_dprint(sc, MPS_XINFO, "%s code %x target %d lun %jx\n", __func__,
1085 ac_code, target_id, (uintmax_t)lun_id);
1086
1087 if (xpt_create_path(&path, NULL,
1088 path_id, target_id, lun_id) != CAM_REQ_CMP) {
1089 mps_dprint(sc, MPS_ERROR, "unable to create path for reset "
1090 "notification\n");
1091 return;
1092 }
1093
1094 xpt_async(ac_code, path, NULL);
1095 xpt_free_path(path);
1096}
1097
1098static void
1099mpssas_complete_all_commands(struct mps_softc *sc)
1100{
1101 struct mps_command *cm;
1102 int i;
1103 int completed;
1104
1105 MPS_FUNCTRACE(sc);
1106 mtx_assert(&sc->mps_mtx, MA_OWNED);
1107
1108 /* complete all commands with a NULL reply */
1109 for (i = 1; i < sc->num_reqs; i++) {
1110 cm = &sc->commands[i];
1111 cm->cm_reply = NULL;
1112 completed = 0;
1113
1114 if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
1115 cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
1116
1117 if (cm->cm_complete != NULL) {
1118 mpssas_log_command(cm, MPS_RECOVERY,
1119 "completing cm %p state %x ccb %p for diag reset\n",
1120 cm, cm->cm_state, cm->cm_ccb);
1121
1122 cm->cm_complete(sc, cm);
1123 completed = 1;
1124 }
1125
1126 if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
1127 mpssas_log_command(cm, MPS_RECOVERY,
1128 "waking up cm %p state %x ccb %p for diag reset\n",
1129 cm, cm->cm_state, cm->cm_ccb);
1130 wakeup(cm);
1131 completed = 1;
1132 }
1133
1134 if (cm->cm_sc->io_cmds_active != 0)
1135 cm->cm_sc->io_cmds_active--;
1136
1137 if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
1138 /* this should never happen, but if it does, log */
1139 mpssas_log_command(cm, MPS_RECOVERY,
1140 "cm %p state %x flags 0x%x ccb %p during diag "
1141 "reset\n", cm, cm->cm_state, cm->cm_flags,
1142 cm->cm_ccb);
1143 }
1144 }
1145}
1146
1147void
1148mpssas_handle_reinit(struct mps_softc *sc)
1149{
1150 int i;
1151
1152 /* Go back into startup mode and freeze the simq, so that CAM
1153 * doesn't send any commands until after we've rediscovered all
1154 * targets and found the proper device handles for them.
1155 *
1156 * After the reset, portenable will trigger discovery, and after all
1157 * discovery-related activities have finished, the simq will be
1158 * released.
1159 */
1160 mps_dprint(sc, MPS_INIT, "%s startup\n", __func__);
1161 sc->sassc->flags |= MPSSAS_IN_STARTUP;
1162 sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
1163 mpssas_startup_increment(sc->sassc);
1164
1165 /* notify CAM of a bus reset */
1166 mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1167 CAM_LUN_WILDCARD);
1168
1169 /* complete and cleanup after all outstanding commands */
1170 mpssas_complete_all_commands(sc);
1171
1172 mps_dprint(sc, MPS_INIT,
1173 "%s startup %u after command completion\n", __func__,
1174 sc->sassc->startup_refcount);
1175
1176 /* zero all the target handles, since they may change after the
1177 * reset, and we have to rediscover all the targets and use the new
1178 * handles.
1179 */
1180 for (i = 0; i < sc->sassc->maxtargets; i++) {
1181 if (sc->sassc->targets[i].outstanding != 0)
1182 mps_dprint(sc, MPS_INIT, "target %u outstanding %u\n",
1183 i, sc->sassc->targets[i].outstanding);
1184 sc->sassc->targets[i].handle = 0x0;
1185 sc->sassc->targets[i].exp_dev_handle = 0x0;
1186 sc->sassc->targets[i].outstanding = 0;
1187 sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1188 }
1189}
1190
1191static void
1192mpssas_tm_timeout(void *data)
1193{
1194 struct mps_command *tm = data;
1195 struct mps_softc *sc = tm->cm_sc;
1196
1197 mtx_assert(&sc->mps_mtx, MA_OWNED);
1198
1199 mpssas_log_command(tm, MPS_INFO|MPS_RECOVERY,
1200 "task mgmt %p timed out\n", tm);
1201 mps_reinit(sc);
1202}
1203
1204static void
1205mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1206{
1207 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1208 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1209 unsigned int cm_count = 0;
1210 struct mps_command *cm;
1211 struct mpssas_target *targ;
1212
1213 callout_stop(&tm->cm_callout);
1214
1215 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1216 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1217 targ = tm->cm_targ;
1218
1219 /*
1220 * Currently there should be no way we can hit this case. It only
1221 * happens when we have a failure to allocate chain frames, and
1222 * task management commands don't have S/G lists.
1223 * XXXSL So should it be an assertion?
1224 */
1225 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1226 mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for LUN reset! "
1227 "This should not happen!\n", __func__, tm->cm_flags);
1228 mpssas_free_tm(sc, tm);
1229 return;
1230 }
1231
1232 if (reply == NULL) {
1233 mpssas_log_command(tm, MPS_RECOVERY,
1234 "NULL reset reply for tm %p\n", tm);
1235 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1236 /* this completion was due to a reset, just cleanup */
1237 targ->tm = NULL;
1238 mpssas_free_tm(sc, tm);
1239 }
1240 else {
1241 /* we should have gotten a reply. */
1242 mps_reinit(sc);
1243 }
1244 return;
1245 }
1246
1247 mpssas_log_command(tm, MPS_RECOVERY,
1248 "logical unit reset status 0x%x code 0x%x count %u\n",
1249 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1250 le32toh(reply->TerminationCount));
1251
1252 /* See if there are any outstanding commands for this LUN.
1253 * This could be made more efficient by using a per-LU data
1254 * structure of some sort.
1255 */
1256 TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1257 if (cm->cm_lun == tm->cm_lun)
1258 cm_count++;
1259 }
1260
1261 if (cm_count == 0) {
1262 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1263 "logical unit %u finished recovery after reset\n",
1264 tm->cm_lun, tm);
1265
1266 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1267 tm->cm_lun);
1268
1269 /* we've finished recovery for this logical unit. check and
1270 * see if some other logical unit has a timedout command
1271 * that needs to be processed.
1272 */
1273 cm = TAILQ_FIRST(&targ->timedout_commands);
1274 if (cm) {
1275 mpssas_send_abort(sc, tm, cm);
1276 }
1277 else {
1278 targ->tm = NULL;
1279 mpssas_free_tm(sc, tm);
1280 }
1281 }
1282 else {
1283 /* if we still have commands for this LUN, the reset
1284 * effectively failed, regardless of the status reported.
1285 * Escalate to a target reset.
1286 */
1287 mpssas_log_command(tm, MPS_RECOVERY,
1288 "logical unit reset complete for tm %p, but still have %u command(s)\n",
1289 tm, cm_count);
1290 mpssas_send_reset(sc, tm,
1291 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1292 }
1293}
1294
1295static void
1296mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1297{
1298 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1299 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1300 struct mpssas_target *targ;
1301
1302 callout_stop(&tm->cm_callout);
1303
1304 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1305 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1306 targ = tm->cm_targ;
1307
1308 /*
1309 * Currently there should be no way we can hit this case. It only
1310 * happens when we have a failure to allocate chain frames, and
1311 * task management commands don't have S/G lists.
1312 */
1313 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1314 mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x for target reset! "
1315 "This should not happen!\n", __func__, tm->cm_flags);
1316 mpssas_free_tm(sc, tm);
1317 return;
1318 }
1319
1320 if (reply == NULL) {
1321 mpssas_log_command(tm, MPS_RECOVERY,
1322 "NULL reset reply for tm %p\n", tm);
1323 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1324 /* this completion was due to a reset, just cleanup */
1325 targ->tm = NULL;
1326 mpssas_free_tm(sc, tm);
1327 }
1328 else {
1329 /* we should have gotten a reply. */
1330 mps_reinit(sc);
1331 }
1332 return;
1333 }
1334
1335 mpssas_log_command(tm, MPS_RECOVERY,
1336 "target reset status 0x%x code 0x%x count %u\n",
1337 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1338 le32toh(reply->TerminationCount));
1339
1340 if (targ->outstanding == 0) {
1341 /* we've finished recovery for this target and all
1342 * of its logical units.
1343 */
1344 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1345 "recovery finished after target reset\n");
1346
1347 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1348 CAM_LUN_WILDCARD);
1349
1350 targ->tm = NULL;
1351 mpssas_free_tm(sc, tm);
1352 }
1353 else {
1354 /* after a target reset, if this target still has
1355 * outstanding commands, the reset effectively failed,
1356 * regardless of the status reported. escalate.
1357 */
1358 mpssas_log_command(tm, MPS_RECOVERY,
1359 "target reset complete for tm %p, but still have %u command(s)\n",
1360 tm, targ->outstanding);
1361 mps_reinit(sc);
1362 }
1363}
1364
1365#define MPS_RESET_TIMEOUT 30
1366
1367int
1368mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1369{
1370 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1371 struct mpssas_target *target;
1372 int err;
1373
1374 target = tm->cm_targ;
1375 if (target->handle == 0) {
1376 mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1377 __func__, target->tid);
1378 return -1;
1379 }
1380
1381 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1382 req->DevHandle = htole16(target->handle);
1383 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1384 req->TaskType = type;
1385
1386 if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1387 /* XXX Need to handle invalid LUNs */
1388 MPS_SET_LUN(req->LUN, tm->cm_lun);
1389 tm->cm_targ->logical_unit_resets++;
1390 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1391 "sending logical unit reset\n");
1392 tm->cm_complete = mpssas_logical_unit_reset_complete;
1393 mpssas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1394 }
1395 else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1396 /*
1397 * Target reset method =
1398 * SAS Hard Link Reset / SATA Link Reset
1399 */
1400 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1401 tm->cm_targ->target_resets++;
1402 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1403 "sending target reset\n");
1404 tm->cm_complete = mpssas_target_reset_complete;
1405 mpssas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1406 }
1407 else {
1408 mps_dprint(sc, MPS_ERROR, "unexpected reset type 0x%x\n", type);
1409 return -1;
1410 }
1411
1412 tm->cm_data = NULL;
1413 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1414 tm->cm_complete_data = (void *)tm;
1415
1416 callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1417 mpssas_tm_timeout, tm);
1418
1419 err = mps_map_command(sc, tm);
1420 if (err)
1421 mpssas_log_command(tm, MPS_RECOVERY,
1422 "error %d sending reset type %u\n",
1423 err, type);
1424
1425 return err;
1426}
1427
1428
1429static void
1430mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1431{
1432 struct mps_command *cm;
1433 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1434 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1435 struct mpssas_target *targ;
1436
1437 callout_stop(&tm->cm_callout);
1438
1439 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1440 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1441 targ = tm->cm_targ;
1442
1443 /*
1444 * Currently there should be no way we can hit this case. It only
1445 * happens when we have a failure to allocate chain frames, and
1446 * task management commands don't have S/G lists.
1447 */
1448 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1449 mpssas_log_command(tm, MPS_RECOVERY,
1450 "cm_flags = %#x for abort %p TaskMID %u!\n",
1451 tm->cm_flags, tm, le16toh(req->TaskMID));
1452 mpssas_free_tm(sc, tm);
1453 return;
1454 }
1455
1456 if (reply == NULL) {
1457 mpssas_log_command(tm, MPS_RECOVERY,
1458 "NULL abort reply for tm %p TaskMID %u\n",
1459 tm, le16toh(req->TaskMID));
1460 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1461 /* this completion was due to a reset, just cleanup */
1462 targ->tm = NULL;
1463 mpssas_free_tm(sc, tm);
1464 }
1465 else {
1466 /* we should have gotten a reply. */
1467 mps_reinit(sc);
1468 }
1469 return;
1470 }
1471
1472 mpssas_log_command(tm, MPS_RECOVERY,
1473 "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1474 le16toh(req->TaskMID),
1475 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1476 le32toh(reply->TerminationCount));
1477
1478 cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1479 if (cm == NULL) {
1480 /* if there are no more timedout commands, we're done with
1481 * error recovery for this target.
1482 */
1483 mpssas_log_command(tm, MPS_RECOVERY,
1484 "finished recovery after aborting TaskMID %u\n",
1485 le16toh(req->TaskMID));
1486
1487 targ->tm = NULL;
1488 mpssas_free_tm(sc, tm);
1489 }
1490 else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1491 /* abort success, but we have more timedout commands to abort */
1492 mpssas_log_command(tm, MPS_RECOVERY,
1493 "continuing recovery after aborting TaskMID %u\n",
1494 le16toh(req->TaskMID));
1495
1496 mpssas_send_abort(sc, tm, cm);
1497 }
1498 else {
1499 /* we didn't get a command completion, so the abort
1500 * failed as far as we're concerned. escalate.
1501 */
1502 mpssas_log_command(tm, MPS_RECOVERY,
1503 "abort failed for TaskMID %u tm %p\n",
1504 le16toh(req->TaskMID), tm);
1505
1506 mpssas_send_reset(sc, tm,
1507 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1508 }
1509}
1510
1511#define MPS_ABORT_TIMEOUT 5
1512
1513static int
1514mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1515{
1516 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1517 struct mpssas_target *targ;
1518 int err;
1519
1520 targ = cm->cm_targ;
1521 if (targ->handle == 0) {
1522 mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1523 __func__, cm->cm_ccb->ccb_h.target_id);
1524 return -1;
1525 }
1526
1527 mpssas_log_command(cm, MPS_RECOVERY|MPS_INFO,
1528 "Aborting command %p\n", cm);
1529
1530 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1531 req->DevHandle = htole16(targ->handle);
1532 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1533 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1534
1535 /* XXX Need to handle invalid LUNs */
1536 MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1537
1538 req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1539
1540 tm->cm_data = NULL;
1541 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1542 tm->cm_complete = mpssas_abort_complete;
1543 tm->cm_complete_data = (void *)tm;
1544 tm->cm_targ = cm->cm_targ;
1545 tm->cm_lun = cm->cm_lun;
1546
1547 callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1548 mpssas_tm_timeout, tm);
1549
1550 targ->aborts++;
1551
1552 mps_dprint(sc, MPS_INFO, "Sending reset from %s for target ID %d\n",
1553 __func__, targ->tid);
1554 mpssas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1555
1556 err = mps_map_command(sc, tm);
1557 if (err)
1558 mps_dprint(sc, MPS_RECOVERY,
1559 "error %d sending abort for cm %p SMID %u\n",
1560 err, cm, req->TaskMID);
1561 return err;
1562}
1563
1564static void
1565mpssas_scsiio_timeout(void *data)
1566{
1567 struct mps_softc *sc;
1568 struct mps_command *cm;
1569 struct mpssas_target *targ;
1570
1571 cm = (struct mps_command *)data;
1572 sc = cm->cm_sc;
1573
1574 MPS_FUNCTRACE(sc);
1575 mtx_assert(&sc->mps_mtx, MA_OWNED);
1576
1577 mps_dprint(sc, MPS_XINFO, "Timeout checking cm %p\n", sc);
1578
1579 /*
1580 * Run the interrupt handler to make sure it's not pending. This
1581 * isn't perfect because the command could have already completed
1582 * and been re-used, though this is unlikely.
1583 */
1584 mps_intr_locked(sc);
1585 if (cm->cm_state == MPS_CM_STATE_FREE) {
1586 mpssas_log_command(cm, MPS_XINFO,
1587 "SCSI command %p almost timed out\n", cm);
1588 return;
1589 }
1590
1591 if (cm->cm_ccb == NULL) {
1592 mps_dprint(sc, MPS_ERROR, "command timeout with NULL ccb\n");
1593 return;
1594 }
1595
1596 targ = cm->cm_targ;
1597 targ->timeouts++;
1598
1599 mpssas_log_command(cm, MPS_ERROR, "command timeout %d cm %p target "
1600 "%u, handle(0x%04x)\n", cm->cm_ccb->ccb_h.timeout, cm, targ->tid,
1601 targ->handle);
1602
1603 /* XXX first, check the firmware state, to see if it's still
1604 * operational. if not, do a diag reset.
1605 */
1606 mpssas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1607 cm->cm_state = MPS_CM_STATE_TIMEDOUT;
1608 TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1609
1610 if (targ->tm != NULL) {
1611 /* target already in recovery, just queue up another
1612 * timedout command to be processed later.
1613 */
1614 mps_dprint(sc, MPS_RECOVERY,
1615 "queued timedout cm %p for processing by tm %p\n",
1616 cm, targ->tm);
1617 }
1618 else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1619 mps_dprint(sc, MPS_RECOVERY, "timedout cm %p allocated tm %p\n",
1620 cm, targ->tm);
1621
1622 /* start recovery by aborting the first timedout command */
1623 mpssas_send_abort(sc, targ->tm, cm);
1624 }
1625 else {
1626 /* XXX queue this target up for recovery once a TM becomes
1627 * available. The firmware only has a limited number of
1628 * HighPriority credits for the high priority requests used
1629 * for task management, and we ran out.
1630 *
1631 * Isilon: don't worry about this for now, since we have
1632 * more credits than disks in an enclosure, and limit
1633 * ourselves to one TM per target for recovery.
1634 */
1635 mps_dprint(sc, MPS_RECOVERY,
1636 "timedout cm %p failed to allocate a tm\n", cm);
1637 }
1638
1639}
1640
1641static void
1642mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1643{
1644 MPI2_SCSI_IO_REQUEST *req;
1645 struct ccb_scsiio *csio;
1646 struct mps_softc *sc;
1647 struct mpssas_target *targ;
1648 struct mpssas_lun *lun;
1649 struct mps_command *cm;
1650 uint8_t i, lba_byte, *ref_tag_addr;
1651 uint16_t eedp_flags;
1652 uint32_t mpi_control;
1653
1654 sc = sassc->sc;
1655 MPS_FUNCTRACE(sc);
1656 mtx_assert(&sc->mps_mtx, MA_OWNED);
1657
1658 csio = &ccb->csio;
1659 KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1660 ("Target %d out of bounds in XPT_SCSI_IO\n",
1661 csio->ccb_h.target_id));
1662 targ = &sassc->targets[csio->ccb_h.target_id];
1663 mps_dprint(sc, MPS_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1664 if (targ->handle == 0x0) {
1665 mps_dprint(sc, MPS_ERROR, "%s NULL handle for target %u\n",
1666 __func__, csio->ccb_h.target_id);
1667 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1668 xpt_done(ccb);
1669 return;
1670 }
1671 if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
1672 mps_dprint(sc, MPS_ERROR, "%s Raid component no SCSI IO "
1673 "supported %u\n", __func__, csio->ccb_h.target_id);
1674 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1675 xpt_done(ccb);
1676 return;
1677 }
1678 /*
1679 * Sometimes, it is possible to get a command that is not "In
1680 * Progress" and was actually aborted by the upper layer. Check for
1681 * this here and complete the command without error.
1682 */
1683 if (mpssas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1684 mps_dprint(sc, MPS_TRACE, "%s Command is not in progress for "
1685 "target %u\n", __func__, csio->ccb_h.target_id);
1686 xpt_done(ccb);
1687 return;
1688 }
1689 /*
1690 * If devinfo is 0 this will be a volume. In that case don't tell CAM
1691 * that the volume has timed out. We want volumes to be enumerated
1692 * until they are deleted/removed, not just failed.
1693 */
1694 if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1695 if (targ->devinfo == 0)
1696 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1697 else
1698 mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1699 xpt_done(ccb);
1700 return;
1701 }
1702
1703 if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1704 mps_dprint(sc, MPS_INFO, "%s shutting down\n", __func__);
1705 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1706 xpt_done(ccb);
1707 return;
1708 }
1709
1710 /*
1711 * If target has a reset in progress, freeze the devq and return. The
1712 * devq will be released when the TM reset is finished.
1713 */
1714 if (targ->flags & MPSSAS_TARGET_INRESET) {
1715 ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1716 mps_dprint(sc, MPS_INFO, "%s: Freezing devq for target ID %d\n",
1717 __func__, targ->tid);
1718 xpt_freeze_devq(ccb->ccb_h.path, 1);
1719 xpt_done(ccb);
1720 return;
1721 }
1722
1723 cm = mps_alloc_command(sc);
1724 if (cm == NULL || (sc->mps_flags & MPS_FLAGS_DIAGRESET)) {
1725 if (cm != NULL) {
1726 mps_free_command(sc, cm);
1727 }
1728 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1729 xpt_freeze_simq(sassc->sim, 1);
1730 sassc->flags |= MPSSAS_QUEUE_FROZEN;
1731 }
1732 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1733 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1734 xpt_done(ccb);
1735 return;
1736 }
1737
1738 req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1739 bzero(req, sizeof(*req));
1740 req->DevHandle = htole16(targ->handle);
1741 req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1742 req->MsgFlags = 0;
1743 req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1744 req->SenseBufferLength = MPS_SENSE_LEN;
1745 req->SGLFlags = 0;
1746 req->ChainOffset = 0;
1747 req->SGLOffset0 = 24; /* 32bit word offset to the SGL */
1748 req->SGLOffset1= 0;
1749 req->SGLOffset2= 0;
1750 req->SGLOffset3= 0;
1751 req->SkipCount = 0;
1752 req->DataLength = htole32(csio->dxfer_len);
1753 req->BidirectionalDataLength = 0;
1754 req->IoFlags = htole16(csio->cdb_len);
1755 req->EEDPFlags = 0;
1756
1757 /* Note: BiDirectional transfers are not supported */
1758 switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1759 case CAM_DIR_IN:
1760 mpi_control = MPI2_SCSIIO_CONTROL_READ;
1761 cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1762 break;
1763 case CAM_DIR_OUT:
1764 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1765 cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1766 break;
1767 case CAM_DIR_NONE:
1768 default:
1769 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1770 break;
1771 }
1772
1773 if (csio->cdb_len == 32)
1774 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1775 /*
1776 * It looks like the hardware doesn't require an explicit tag
1777 * number for each transaction. SAM Task Management not supported
1778 * at the moment.
1779 */
1780 switch (csio->tag_action) {
1781 case MSG_HEAD_OF_Q_TAG:
1782 mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1783 break;
1784 case MSG_ORDERED_Q_TAG:
1785 mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1786 break;
1787 case MSG_ACA_TASK:
1788 mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1789 break;
1790 case CAM_TAG_ACTION_NONE:
1791 case MSG_SIMPLE_Q_TAG:
1792 default:
1793 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1794 break;
1795 }
1796 mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1797 req->Control = htole32(mpi_control);
1798 if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1799 mps_free_command(sc, cm);
1800 mpssas_set_ccbstatus(ccb, CAM_LUN_INVALID);
1801 xpt_done(ccb);
1802 return;
1803 }
1804
1805 if (csio->ccb_h.flags & CAM_CDB_POINTER)
1806 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1807 else
1808 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1809 req->IoFlags = htole16(csio->cdb_len);
1810
1811 /*
1812 * Check if EEDP is supported and enabled. If it is then check if the
1813 * SCSI opcode could be using EEDP. If so, make sure the LUN exists and
1814 * is formatted for EEDP support. If all of this is true, set CDB up
1815 * for EEDP transfer.
1816 */
1817 eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1818 if (sc->eedp_enabled && eedp_flags) {
1819 SLIST_FOREACH(lun, &targ->luns, lun_link) {
1820 if (lun->lun_id == csio->ccb_h.target_lun) {
1821 break;
1822 }
1823 }
1824
1825 if ((lun != NULL) && (lun->eedp_formatted)) {
1826 req->EEDPBlockSize = htole16(lun->eedp_block_size);
1827 eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1828 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1829 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1830 req->EEDPFlags = htole16(eedp_flags);
1831
1832 /*
1833 * If CDB less than 32, fill in Primary Ref Tag with
1834 * low 4 bytes of LBA. If CDB is 32, tag stuff is
1835 * already there. Also, set protection bit. FreeBSD
1836 * currently does not support CDBs bigger than 16, but
1837 * the code doesn't hurt, and will be here for the
1838 * future.
1839 */
1840 if (csio->cdb_len != 32) {
1841 lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1842 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1843 PrimaryReferenceTag;
1844 for (i = 0; i < 4; i++) {
1845 *ref_tag_addr =
1846 req->CDB.CDB32[lba_byte + i];
1847 ref_tag_addr++;
1848 }
1849 req->CDB.EEDP32.PrimaryReferenceTag =
1850 htole32(req->CDB.EEDP32.PrimaryReferenceTag);
1851 req->CDB.EEDP32.PrimaryApplicationTagMask =
1852 0xFFFF;
1853 req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1854 0x20;
1855 } else {
1856 eedp_flags |=
1857 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1858 req->EEDPFlags = htole16(eedp_flags);
1859 req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1860 0x1F) | 0x20;
1861 }
1862 }
1863 }
1864
1865 cm->cm_length = csio->dxfer_len;
1866 if (cm->cm_length != 0) {
1867 cm->cm_data = ccb;
1868 cm->cm_flags |= MPS_CM_FLAGS_USE_CCB;
1869 } else {
1870 cm->cm_data = NULL;
1871 }
1872 cm->cm_sge = &req->SGL;
1873 cm->cm_sglsize = (32 - 24) * 4;
1874 cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1875 cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1876 cm->cm_complete = mpssas_scsiio_complete;
1877 cm->cm_complete_data = ccb;
1878 cm->cm_targ = targ;
1879 cm->cm_lun = csio->ccb_h.target_lun;
1880 cm->cm_ccb = ccb;
1881
1882 /*
1883 * If HBA is a WD and the command is not for a retry, try to build a
1884 * direct I/O message. If failed, or the command is for a retry, send
1885 * the I/O to the IR volume itself.
1886 */
1887 if (sc->WD_valid_config) {
1888 if (ccb->ccb_h.sim_priv.entries[0].field == MPS_WD_RETRY) {
1889 mpssas_direct_drive_io(sassc, cm, ccb);
1890 } else {
1891 mpssas_set_ccbstatus(ccb, CAM_REQ_INPROG);
1892 }
1893 }
1894
1895 callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1896 mpssas_scsiio_timeout, cm, 0);
1897
1898 targ->issued++;
1899 targ->outstanding++;
1900 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1901 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1902
1903 mpssas_log_command(cm, MPS_XINFO, "%s cm %p ccb %p outstanding %u\n",
1904 __func__, cm, ccb, targ->outstanding);
1905
1906 mps_map_command(sc, cm);
1907 return;
1908}
1909
1910static void
1911mps_response_code(struct mps_softc *sc, u8 response_code)
1912{
1913 char *desc;
1914
1915 switch (response_code) {
1916 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
1917 desc = "task management request completed";
1918 break;
1919 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
1920 desc = "invalid frame";
1921 break;
1922 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
1923 desc = "task management request not supported";
1924 break;
1925 case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
1926 desc = "task management request failed";
1927 break;
1928 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
1929 desc = "task management request succeeded";
1930 break;
1931 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
1932 desc = "invalid lun";
1933 break;
1934 case 0xA:
1935 desc = "overlapped tag attempted";
1936 break;
1937 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
1938 desc = "task queued, however not sent to target";
1939 break;
1940 default:
1941 desc = "unknown";
1942 break;
1943 }
1944 mps_dprint(sc, MPS_XINFO, "response_code(0x%01x): %s\n",
1945 response_code, desc);
1946}
1947/**
1948 * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request
1949 */
1950static void
1951mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio,
1952 Mpi2SCSIIOReply_t *mpi_reply)
1953{
1954 u32 response_info;
1955 u8 *response_bytes;
1956 u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1957 MPI2_IOCSTATUS_MASK;
1958 u8 scsi_state = mpi_reply->SCSIState;
1959 u8 scsi_status = mpi_reply->SCSIStatus;
1960 char *desc_ioc_state = NULL;
1961 char *desc_scsi_status = NULL;
1962 char *desc_scsi_state = sc->tmp_string;
1963 u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1964
1965 if (log_info == 0x31170000)
1966 return;
1967
1968 switch (ioc_status) {
1969 case MPI2_IOCSTATUS_SUCCESS:
1970 desc_ioc_state = "success";
1971 break;
1972 case MPI2_IOCSTATUS_INVALID_FUNCTION:
1973 desc_ioc_state = "invalid function";
1974 break;
1975 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1976 desc_ioc_state = "scsi recovered error";
1977 break;
1978 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1979 desc_ioc_state = "scsi invalid dev handle";
1980 break;
1981 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1982 desc_ioc_state = "scsi device not there";
1983 break;
1984 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1985 desc_ioc_state = "scsi data overrun";
1986 break;
1987 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1988 desc_ioc_state = "scsi data underrun";
1989 break;
1990 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
1991 desc_ioc_state = "scsi io data error";
1992 break;
1993 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
1994 desc_ioc_state = "scsi protocol error";
1995 break;
1996 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
1997 desc_ioc_state = "scsi task terminated";
1998 break;
1999 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2000 desc_ioc_state = "scsi residual mismatch";
2001 break;
2002 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2003 desc_ioc_state = "scsi task mgmt failed";
2004 break;
2005 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2006 desc_ioc_state = "scsi ioc terminated";
2007 break;
2008 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2009 desc_ioc_state = "scsi ext terminated";
2010 break;
2011 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
2012 desc_ioc_state = "eedp guard error";
2013 break;
2014 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
2015 desc_ioc_state = "eedp ref tag error";
2016 break;
2017 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
2018 desc_ioc_state = "eedp app tag error";
2019 break;
2020 default:
2021 desc_ioc_state = "unknown";
2022 break;
2023 }
2024
2025 switch (scsi_status) {
2026 case MPI2_SCSI_STATUS_GOOD:
2027 desc_scsi_status = "good";
2028 break;
2029 case MPI2_SCSI_STATUS_CHECK_CONDITION:
2030 desc_scsi_status = "check condition";
2031 break;
2032 case MPI2_SCSI_STATUS_CONDITION_MET:
2033 desc_scsi_status = "condition met";
2034 break;
2035 case MPI2_SCSI_STATUS_BUSY:
2036 desc_scsi_status = "busy";
2037 break;
2038 case MPI2_SCSI_STATUS_INTERMEDIATE:
2039 desc_scsi_status = "intermediate";
2040 break;
2041 case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
2042 desc_scsi_status = "intermediate condmet";
2043 break;
2044 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
2045 desc_scsi_status = "reservation conflict";
2046 break;
2047 case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
2048 desc_scsi_status = "command terminated";
2049 break;
2050 case MPI2_SCSI_STATUS_TASK_SET_FULL:
2051 desc_scsi_status = "task set full";
2052 break;
2053 case MPI2_SCSI_STATUS_ACA_ACTIVE:
2054 desc_scsi_status = "aca active";
2055 break;
2056 case MPI2_SCSI_STATUS_TASK_ABORTED:
2057 desc_scsi_status = "task aborted";
2058 break;
2059 default:
2060 desc_scsi_status = "unknown";
2061 break;
2062 }
2063
2064 desc_scsi_state[0] = '\0';
2065 if (!scsi_state)
2066 desc_scsi_state = " ";
2067 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
2068 strcat(desc_scsi_state, "response info ");
2069 if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
2070 strcat(desc_scsi_state, "state terminated ");
2071 if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
2072 strcat(desc_scsi_state, "no status ");
2073 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
2074 strcat(desc_scsi_state, "autosense failed ");
2075 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
2076 strcat(desc_scsi_state, "autosense valid ");
2077
2078 mps_dprint(sc, MPS_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2079 le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2080 /* We can add more detail about underflow data here
2081 * TO-DO
2082 * */
2083 mps_dprint(sc, MPS_XINFO, "\tscsi_status(%s)(0x%02x), "
2084 "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status,
2085 desc_scsi_state, scsi_state);
2086
2087 if (sc->mps_debug & MPS_XINFO &&
2088 scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2089 mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : Start :\n");
2090 scsi_sense_print(csio);
2091 mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : End :\n");
2092 }
2093
2094 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2095 response_info = le32toh(mpi_reply->ResponseInfo);
2096 response_bytes = (u8 *)&response_info;
2097 mps_response_code(sc,response_bytes[0]);
2098 }
2099}
2100
2101static void
2102mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
2103{
2104 MPI2_SCSI_IO_REPLY *rep;
2105 union ccb *ccb;
2106 struct ccb_scsiio *csio;
2107 struct mpssas_softc *sassc;
2108 struct scsi_vpd_supported_page_list *vpd_list = NULL;
2109 u8 *TLR_bits, TLR_on;
2110 int dir = 0, i;
2111 u16 alloc_len;
2112 struct mpssas_target *target;
2113 target_id_t target_id;
2114
2115 MPS_FUNCTRACE(sc);
2116 mps_dprint(sc, MPS_TRACE,
2117 "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2118 cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2119 cm->cm_targ->outstanding);
2120
2121 callout_stop(&cm->cm_callout);
2122 mtx_assert(&sc->mps_mtx, MA_OWNED);
2123
2124 sassc = sc->sassc;
2125 ccb = cm->cm_complete_data;
2126 csio = &ccb->csio;
2127 target_id = csio->ccb_h.target_id;
2128 rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2129 /*
2130 * XXX KDM if the chain allocation fails, does it matter if we do
2131 * the sync and unload here? It is simpler to do it in every case,
2132 * assuming it doesn't cause problems.
2133 */
2134 if (cm->cm_data != NULL) {
2135 if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
2136 dir = BUS_DMASYNC_POSTREAD;
2137 else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
2138 dir = BUS_DMASYNC_POSTWRITE;
2139 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2140 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2141 }
2142
2143 cm->cm_targ->completed++;
2144 cm->cm_targ->outstanding--;
2145 TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2146 ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2147
2148 if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) {
2149 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2150 if (cm->cm_reply != NULL)
2151 mpssas_log_command(cm, MPS_RECOVERY,
2152 "completed timedout cm %p ccb %p during recovery "
2153 "ioc %x scsi %x state %x xfer %u\n",
2154 cm, cm->cm_ccb,
2155 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2156 le32toh(rep->TransferCount));
2157 else
2158 mpssas_log_command(cm, MPS_RECOVERY,
2159 "completed timedout cm %p ccb %p during recovery\n",
2160 cm, cm->cm_ccb);
2161 } else if (cm->cm_targ->tm != NULL) {
2162 if (cm->cm_reply != NULL)
2163 mpssas_log_command(cm, MPS_RECOVERY,
2164 "completed cm %p ccb %p during recovery "
2165 "ioc %x scsi %x state %x xfer %u\n",
2166 cm, cm->cm_ccb,
2167 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2168 le32toh(rep->TransferCount));
2169 else
2170 mpssas_log_command(cm, MPS_RECOVERY,
2171 "completed cm %p ccb %p during recovery\n",
2172 cm, cm->cm_ccb);
2173 } else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
2174 mpssas_log_command(cm, MPS_RECOVERY,
2175 "reset completed cm %p ccb %p\n",
2176 cm, cm->cm_ccb);
2177 }
2178
2179 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2180 /*
2181 * We ran into an error after we tried to map the command,
2182 * so we're getting a callback without queueing the command
2183 * to the hardware. So we set the status here, and it will
2184 * be retained below. We'll go through the "fast path",
2185 * because there can be no reply when we haven't actually
2186 * gone out to the hardware.
2187 */
2188 mpssas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2189
2190 /*
2191 * Currently the only error included in the mask is
2192 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
2193 * chain frames. We need to freeze the queue until we get
2194 * a command that completed without this error, which will
2195 * hopefully have some chain frames attached that we can
2196 * use. If we wanted to get smarter about it, we would
2197 * only unfreeze the queue in this condition when we're
2198 * sure that we're getting some chain frames back. That's
2199 * probably unnecessary.
2200 */
2201 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
2202 xpt_freeze_simq(sassc->sim, 1);
2203 sassc->flags |= MPSSAS_QUEUE_FROZEN;
2204 mps_dprint(sc, MPS_XINFO, "Error sending command, "
2205 "freezing SIM queue\n");
2206 }
2207 }
2208
2209 /*
2210 * If this is a Start Stop Unit command and it was issued by the driver
2211 * during shutdown, decrement the refcount to account for all of the
2212 * commands that were sent. All SSU commands should be completed before
2213 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2214 * is TRUE.
2215 */
2216 if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) {
2217 mps_dprint(sc, MPS_INFO, "Decrementing SSU count.\n");
2218 sc->SSU_refcount--;
2219 }
2220
2221 /* Take the fast path to completion */
2222 if (cm->cm_reply == NULL) {
2223 if (mpssas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2224 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
2225 mpssas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2226 else {
2227 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2228 ccb->csio.scsi_status = SCSI_STATUS_OK;
2229 }
2230 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2231 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2232 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2233 mps_dprint(sc, MPS_XINFO,
2234 "Unfreezing SIM queue\n");
2235 }
2236 }
2237
2238 /*
2239 * There are two scenarios where the status won't be
2240 * CAM_REQ_CMP. The first is if MPS_CM_FLAGS_ERROR_MASK is
2241 * set, the second is in the MPS_FLAGS_DIAGRESET above.
2242 */
2243 if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2244 /*
2245 * Freeze the dev queue so that commands are
2246 * executed in the correct order after error
2247 * recovery.
2248 */
2249 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2250 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2251 }
2252 mps_free_command(sc, cm);
2253 xpt_done(ccb);
2254 return;
2255 }
2256
2257 mpssas_log_command(cm, MPS_XINFO,
2258 "ioc %x scsi %x state %x xfer %u\n",
2259 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2260 le32toh(rep->TransferCount));
2261
2262 /*
2263 * If this is a Direct Drive I/O, reissue the I/O to the original IR
2264 * Volume if an error occurred (normal I/O retry). Use the original
2265 * CCB, but set a flag that this will be a retry so that it's sent to
2266 * the original volume. Free the command but reuse the CCB.
2267 */
2268 if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
2269 mps_free_command(sc, cm);
2270 ccb->ccb_h.sim_priv.entries[0].field = MPS_WD_RETRY;
2271 mpssas_action_scsiio(sassc, ccb);
2272 return;
2273 } else
2274 ccb->ccb_h.sim_priv.entries[0].field = 0;
2275
2276 switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2277 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2278 csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2279 /* FALLTHROUGH */
2280 case MPI2_IOCSTATUS_SUCCESS:
2281 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2282
2283 if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2284 MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2285 mpssas_log_command(cm, MPS_XINFO, "recovered error\n");
2286
2287 /* Completion failed at the transport level. */
2288 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2289 MPI2_SCSI_STATE_TERMINATED)) {
2290 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2291 break;
2292 }
2293
2294 /* In a modern packetized environment, an autosense failure
2295 * implies that there's not much else that can be done to
2296 * recover the command.
2297 */
2298 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2299 mpssas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2300 break;
2301 }
2302
2303 /*
2304 * CAM doesn't care about SAS Response Info data, but if this is
2305 * the state check if TLR should be done. If not, clear the
2306 * TLR_bits for the target.
2307 */
2308 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2309 ((le32toh(rep->ResponseInfo) &
2310 MPI2_SCSI_RI_MASK_REASONCODE) ==
2311 MPS_SCSI_RI_INVALID_FRAME)) {
2312 sc->mapping_table[target_id].TLR_bits =
2313 (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2314 }
2315
2316 /*
2317 * Intentionally override the normal SCSI status reporting
2318 * for these two cases. These are likely to happen in a
2319 * multi-initiator environment, and we want to make sure that
2320 * CAM retries these commands rather than fail them.
2321 */
2322 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2323 (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2324 mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2325 break;
2326 }
2327
2328 /* Handle normal status and sense */
2329 csio->scsi_status = rep->SCSIStatus;
2330 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2331 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2332 else
2333 mpssas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2334
2335 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2336 int sense_len, returned_sense_len;
2337
2338 returned_sense_len = min(le32toh(rep->SenseCount),
2339 sizeof(struct scsi_sense_data));
2340 if (returned_sense_len < ccb->csio.sense_len)
2341 ccb->csio.sense_resid = ccb->csio.sense_len -
2342 returned_sense_len;
2343 else
2344 ccb->csio.sense_resid = 0;
2345
2346 sense_len = min(returned_sense_len,
2347 ccb->csio.sense_len - ccb->csio.sense_resid);
2348 bzero(&ccb->csio.sense_data,
2349 sizeof(ccb->csio.sense_data));
2350 bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
2351 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2352 }
2353
2354 /*
2355 * Check if this is an INQUIRY command. If it's a VPD inquiry,
2356 * and it's page code 0 (Supported Page List), and there is
2357 * inquiry data, and this is for a sequential access device, and
2358 * the device is an SSP target, and TLR is supported by the
2359 * controller, turn the TLR_bits value ON if page 0x90 is
2360 * supported.
2361 */
2362 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2363 (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2364 (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2365 ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2366 (csio->data_ptr != NULL) &&
2367 ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2368 (sc->control_TLR) &&
2369 (sc->mapping_table[target_id].device_info &
2370 MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2371 vpd_list = (struct scsi_vpd_supported_page_list *)
2372 csio->data_ptr;
2373 TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2374 *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2375 TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2376 alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2377 csio->cdb_io.cdb_bytes[4];
2378 alloc_len -= csio->resid;
2379 for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2380 if (vpd_list->list[i] == 0x90) {
2381 *TLR_bits = TLR_on;
2382 break;
2383 }
2384 }
2385 }
2386
2387 /*
2388 * If this is a SATA direct-access end device, mark it so that
2389 * a SCSI StartStopUnit command will be sent to it when the
2390 * driver is being shutdown.
2391 */
2392 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2393 ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2394 (sc->mapping_table[target_id].device_info &
2395 MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2396 ((sc->mapping_table[target_id].device_info &
2397 MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2398 MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2399 target = &sassc->targets[target_id];
2400 target->supports_SSU = TRUE;
2401 mps_dprint(sc, MPS_XINFO, "Target %d supports SSU\n",
2402 target_id);
2403 }
2404 break;
2405 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2406 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2407 /*
2408 * If devinfo is 0 this will be a volume. In that case don't
2409 * tell CAM that the volume is not there. We want volumes to
2410 * be enumerated until they are deleted/removed, not just
2411 * failed.
2412 */
2413 if (cm->cm_targ->devinfo == 0)
2414 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2415 else
2416 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2417 break;
2418 case MPI2_IOCSTATUS_INVALID_SGL:
2419 mps_print_scsiio_cmd(sc, cm);
2420 mpssas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2421 break;
2422 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2423 /*
2424 * This is one of the responses that comes back when an I/O
2425 * has been aborted. If it is because of a timeout that we
2426 * initiated, just set the status to CAM_CMD_TIMEOUT.
2427 * Otherwise set it to CAM_REQ_ABORTED. The effect on the
2428 * command is the same (it gets retried, subject to the
2429 * retry counter), the only difference is what gets printed
2430 * on the console.
2431 */
2432 if (cm->cm_state == MPS_CM_STATE_TIMEDOUT)
2433 mpssas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2434 else
2435 mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2436 break;
2437 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2438 /* resid is ignored for this condition */
2439 csio->resid = 0;
2440 mpssas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2441 break;
2442 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2443 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2444 /*
2445 * These can sometimes be transient transport-related
2446 * errors, and sometimes persistent drive-related errors.
2447 * We used to retry these without decrementing the retry
2448 * count by returning CAM_REQUEUE_REQ. Unfortunately, if
2449 * we hit a persistent drive problem that returns one of
2450 * these error codes, we would retry indefinitely. So,
2451 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2452 * count and avoid infinite retries. We're taking the
2453 * potential risk of flagging false failures in the event
2454 * of a topology-related error (e.g. a SAS expander problem
2455 * causes a command addressed to a drive to fail), but
2456 * avoiding getting into an infinite retry loop.
2457 */
2458 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2459 mpssas_log_command(cm, MPS_INFO,
2460 "terminated ioc %x loginfo %x scsi %x state %x xfer %u\n",
2461 le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2462 rep->SCSIStatus, rep->SCSIState,
2463 le32toh(rep->TransferCount));
2464 break;
2465 case MPI2_IOCSTATUS_INVALID_FUNCTION:
2466 case MPI2_IOCSTATUS_INTERNAL_ERROR:
2467 case MPI2_IOCSTATUS_INVALID_VPID:
2468 case MPI2_IOCSTATUS_INVALID_FIELD:
2469 case MPI2_IOCSTATUS_INVALID_STATE:
2470 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2471 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2472 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2473 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2474 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2475 default:
2476 mpssas_log_command(cm, MPS_XINFO,
2477 "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2478 le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2479 rep->SCSIStatus, rep->SCSIState,
2480 le32toh(rep->TransferCount));
2481 csio->resid = cm->cm_length;
2482 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2483 break;
2484 }
2485
2486 mps_sc_failed_io_info(sc,csio,rep);
2487
2488 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2489 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2490 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2491 mps_dprint(sc, MPS_XINFO, "Command completed, "
2492 "unfreezing SIM queue\n");
2493 }
2494
2495 if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2496 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2497 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2498 }
2499
2500 mps_free_command(sc, cm);
2501 xpt_done(ccb);
2502}
2503
2504/* All Request reached here are Endian safe */
2505static void
2506mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2507 union ccb *ccb) {
2508 pMpi2SCSIIORequest_t pIO_req;
2509 struct mps_softc *sc = sassc->sc;
2510 uint64_t virtLBA;
2511 uint32_t physLBA, stripe_offset, stripe_unit;
2512 uint32_t io_size, column;
2513 uint8_t *ptrLBA, lba_idx, physLBA_byte, *CDB;
2514
2515 /*
2516 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2517 * Write10, or Write16), build a direct I/O message. Otherwise, the I/O
2518 * will be sent to the IR volume itself. Since Read6 and Write6 are a
2519 * bit different than the 10/16 CDBs, handle them separately.
2520 */
2521 pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2522 CDB = pIO_req->CDB.CDB32;
2523
2524 /*
2525 * Handle 6 byte CDBs.
2526 */
2527 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2528 (CDB[0] == WRITE_6))) {
2529 /*
2530 * Get the transfer size in blocks.
2531 */
2532 io_size = (cm->cm_length >> sc->DD_block_exponent);
2533
2534 /*
2535 * Get virtual LBA given in the CDB.
2536 */
2537 virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2538 ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2539
2540 /*
2541 * Check that LBA range for I/O does not exceed volume's
2542 * MaxLBA.
2543 */
2544 if ((virtLBA + (uint64_t)io_size - 1) <=
2545 sc->DD_max_lba) {
2546 /*
2547 * Check if the I/O crosses a stripe boundary. If not,
2548 * translate the virtual LBA to a physical LBA and set
2549 * the DevHandle for the PhysDisk to be used. If it
2550 * does cross a boundry, do normal I/O. To get the
2551 * right DevHandle to use, get the map number for the
2552 * column, then use that map number to look up the
2553 * DevHandle of the PhysDisk.
2554 */
2555 stripe_offset = (uint32_t)virtLBA &
2556 (sc->DD_stripe_size - 1);
2557 if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2558 physLBA = (uint32_t)virtLBA >>
2559 sc->DD_stripe_exponent;
2560 stripe_unit = physLBA / sc->DD_num_phys_disks;
2561 column = physLBA % sc->DD_num_phys_disks;
2562 pIO_req->DevHandle =
2563 htole16(sc->DD_column_map[column].dev_handle);
2564 /* ???? Is this endian safe*/
2565 cm->cm_desc.SCSIIO.DevHandle =
2566 pIO_req->DevHandle;
2567
2568 physLBA = (stripe_unit <<
2569 sc->DD_stripe_exponent) + stripe_offset;
2570 ptrLBA = &pIO_req->CDB.CDB32[1];
2571 physLBA_byte = (uint8_t)(physLBA >> 16);
2572 *ptrLBA = physLBA_byte;
2573 ptrLBA = &pIO_req->CDB.CDB32[2];
2574 physLBA_byte = (uint8_t)(physLBA >> 8);
2575 *ptrLBA = physLBA_byte;
2576 ptrLBA = &pIO_req->CDB.CDB32[3];
2577 physLBA_byte = (uint8_t)physLBA;
2578 *ptrLBA = physLBA_byte;
2579
2580 /*
2581 * Set flag that Direct Drive I/O is
2582 * being done.
2583 */
2584 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2585 }
2586 }
2587 return;
2588 }
2589
2590 /*
2591 * Handle 10, 12 or 16 byte CDBs.
2592 */
2593 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2594 (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2595 (CDB[0] == WRITE_16) || (CDB[0] == READ_12) ||
2596 (CDB[0] == WRITE_12))) {
2597 /*
2598 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2599 * are 0. If not, this is accessing beyond 2TB so handle it in
2600 * the else section. 10-byte and 12-byte CDB's are OK.
2601 * FreeBSD sends very rare 12 byte READ/WRITE, but driver is
2602 * ready to accept 12byte CDB for Direct IOs.
2603 */
2604 if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) ||
2605 (CDB[0] == READ_12 || CDB[0] == WRITE_12) ||
2606 !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2607 /*
2608 * Get the transfer size in blocks.
2609 */
2610 io_size = (cm->cm_length >> sc->DD_block_exponent);
2611
2612 /*
2613 * Get virtual LBA. Point to correct lower 4 bytes of
2614 * LBA in the CDB depending on command.
2615 */
2616 lba_idx = ((CDB[0] == READ_12) ||
2617 (CDB[0] == WRITE_12) ||
2618 (CDB[0] == READ_10) ||
2619 (CDB[0] == WRITE_10))? 2 : 6;
2620 virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2621 ((uint64_t)CDB[lba_idx + 1] << 16) |
2622 ((uint64_t)CDB[lba_idx + 2] << 8) |
2623 (uint64_t)CDB[lba_idx + 3];
2624
2625 /*
2626 * Check that LBA range for I/O does not exceed volume's
2627 * MaxLBA.
2628 */
2629 if ((virtLBA + (uint64_t)io_size - 1) <=
2630 sc->DD_max_lba) {
2631 /*
2632 * Check if the I/O crosses a stripe boundary.
2633 * If not, translate the virtual LBA to a
2634 * physical LBA and set the DevHandle for the
2635 * PhysDisk to be used. If it does cross a
2636 * boundry, do normal I/O. To get the right
2637 * DevHandle to use, get the map number for the
2638 * column, then use that map number to look up
2639 * the DevHandle of the PhysDisk.
2640 */
2641 stripe_offset = (uint32_t)virtLBA &
2642 (sc->DD_stripe_size - 1);
2643 if ((stripe_offset + io_size) <=
2644 sc->DD_stripe_size) {
2645 physLBA = (uint32_t)virtLBA >>
2646 sc->DD_stripe_exponent;
2647 stripe_unit = physLBA /
2648 sc->DD_num_phys_disks;
2649 column = physLBA %
2650 sc->DD_num_phys_disks;
2651 pIO_req->DevHandle =
2652 htole16(sc->DD_column_map[column].
2653 dev_handle);
2654 cm->cm_desc.SCSIIO.DevHandle =
2655 pIO_req->DevHandle;
2656
2657 physLBA = (stripe_unit <<
2658 sc->DD_stripe_exponent) +
2659 stripe_offset;
2660 ptrLBA =
2661 &pIO_req->CDB.CDB32[lba_idx];
2662 physLBA_byte = (uint8_t)(physLBA >> 24);
2663 *ptrLBA = physLBA_byte;
2664 ptrLBA =
2665 &pIO_req->CDB.CDB32[lba_idx + 1];
2666 physLBA_byte = (uint8_t)(physLBA >> 16);
2667 *ptrLBA = physLBA_byte;
2668 ptrLBA =
2669 &pIO_req->CDB.CDB32[lba_idx + 2];
2670 physLBA_byte = (uint8_t)(physLBA >> 8);
2671 *ptrLBA = physLBA_byte;
2672 ptrLBA =
2673 &pIO_req->CDB.CDB32[lba_idx + 3];
2674 physLBA_byte = (uint8_t)physLBA;
2675 *ptrLBA = physLBA_byte;
2676
2677 /*
2678 * Set flag that Direct Drive I/O is
2679 * being done.
2680 */
2681 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2682 }
2683 }
2684 } else {
2685 /*
2686 * 16-byte CDB and the upper 4 bytes of the CDB are not
2687 * 0. Get the transfer size in blocks.
2688 */
2689 io_size = (cm->cm_length >> sc->DD_block_exponent);
2690
2691 /*
2692 * Get virtual LBA.
2693 */
2694 virtLBA = ((uint64_t)CDB[2] << 54) |
2695 ((uint64_t)CDB[3] << 48) |
2696 ((uint64_t)CDB[4] << 40) |
2697 ((uint64_t)CDB[5] << 32) |
2698 ((uint64_t)CDB[6] << 24) |
2699 ((uint64_t)CDB[7] << 16) |
2700 ((uint64_t)CDB[8] << 8) |
2701 (uint64_t)CDB[9];
2702
2703 /*
2704 * Check that LBA range for I/O does not exceed volume's
2705 * MaxLBA.
2706 */
2707 if ((virtLBA + (uint64_t)io_size - 1) <=
2708 sc->DD_max_lba) {
2709 /*
2710 * Check if the I/O crosses a stripe boundary.
2711 * If not, translate the virtual LBA to a
2712 * physical LBA and set the DevHandle for the
2713 * PhysDisk to be used. If it does cross a
2714 * boundry, do normal I/O. To get the right
2715 * DevHandle to use, get the map number for the
2716 * column, then use that map number to look up
2717 * the DevHandle of the PhysDisk.
2718 */
2719 stripe_offset = (uint32_t)virtLBA &
2720 (sc->DD_stripe_size - 1);
2721 if ((stripe_offset + io_size) <=
2722 sc->DD_stripe_size) {
2723 physLBA = (uint32_t)(virtLBA >>
2724 sc->DD_stripe_exponent);
2725 stripe_unit = physLBA /
2726 sc->DD_num_phys_disks;
2727 column = physLBA %
2728 sc->DD_num_phys_disks;
2729 pIO_req->DevHandle =
2730 htole16(sc->DD_column_map[column].
2731 dev_handle);
2732 cm->cm_desc.SCSIIO.DevHandle =
2733 pIO_req->DevHandle;
2734
2735 physLBA = (stripe_unit <<
2736 sc->DD_stripe_exponent) +
2737 stripe_offset;
2738
2739 /*
2740 * Set upper 4 bytes of LBA to 0. We
2741 * assume that the phys disks are less
2742 * than 2 TB's in size. Then, set the
2743 * lower 4 bytes.
2744 */
2745 pIO_req->CDB.CDB32[2] = 0;
2746 pIO_req->CDB.CDB32[3] = 0;
2747 pIO_req->CDB.CDB32[4] = 0;
2748 pIO_req->CDB.CDB32[5] = 0;
2749 ptrLBA = &pIO_req->CDB.CDB32[6];
2750 physLBA_byte = (uint8_t)(physLBA >> 24);
2751 *ptrLBA = physLBA_byte;
2752 ptrLBA = &pIO_req->CDB.CDB32[7];
2753 physLBA_byte = (uint8_t)(physLBA >> 16);
2754 *ptrLBA = physLBA_byte;
2755 ptrLBA = &pIO_req->CDB.CDB32[8];
2756 physLBA_byte = (uint8_t)(physLBA >> 8);
2757 *ptrLBA = physLBA_byte;
2758 ptrLBA = &pIO_req->CDB.CDB32[9];
2759 physLBA_byte = (uint8_t)physLBA;
2760 *ptrLBA = physLBA_byte;
2761
2762 /*
2763 * Set flag that Direct Drive I/O is
2764 * being done.
2765 */
2766 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2767 }
2768 }
2769 }
2770 }
2771}
2772
2773#if __FreeBSD_version >= 900026
2774static void
2775mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2776{
2777 MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2778 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2779 uint64_t sasaddr;
2780 union ccb *ccb;
2781
2782 ccb = cm->cm_complete_data;
2783
2784 /*
2785 * Currently there should be no way we can hit this case. It only
2786 * happens when we have a failure to allocate chain frames, and SMP
2787 * commands require two S/G elements only. That should be handled
2788 * in the standard request size.
2789 */
2790 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2791 mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x on SMP request!\n",
2792 __func__, cm->cm_flags);
2793 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2794 goto bailout;
2795 }
2796
2797 rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2798 if (rpl == NULL) {
2799 mps_dprint(sc, MPS_ERROR, "%s: NULL cm_reply!\n", __func__);
2800 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2801 goto bailout;
2802 }
2803
2804 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2805 sasaddr = le32toh(req->SASAddress.Low);
2806 sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2807
2808 if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2809 MPI2_IOCSTATUS_SUCCESS ||
2810 rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2811 mps_dprint(sc, MPS_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2812 __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2813 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2814 goto bailout;
2815 }
2816
2817 mps_dprint(sc, MPS_XINFO, "%s: SMP request to SAS address "
2818 "%#jx completed successfully\n", __func__,
2819 (uintmax_t)sasaddr);
2820
2821 if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2822 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2823 else
2824 mpssas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2825
2826bailout:
2827 /*
2828 * We sync in both directions because we had DMAs in the S/G list
2829 * in both directions.
2830 */
2831 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2832 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2833 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2834 mps_free_command(sc, cm);
2835 xpt_done(ccb);
2836}
2837
2838static void
2839mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2840{
2841 struct mps_command *cm;
2842 uint8_t *request, *response;
2843 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2844 struct mps_softc *sc;
2845 int error;
2846
2847 sc = sassc->sc;
2848 error = 0;
2849
2850 /*
2851 * XXX We don't yet support physical addresses here.
2852 */
2853 switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
2854 case CAM_DATA_PADDR:
2855 case CAM_DATA_SG_PADDR:
2856 mps_dprint(sc, MPS_ERROR,
2857 "%s: physical addresses not supported\n", __func__);
2858 mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2859 xpt_done(ccb);
2860 return;
2861 case CAM_DATA_SG:
2862 /*
2863 * The chip does not support more than one buffer for the
2864 * request or response.
2865 */
2866 if ((ccb->smpio.smp_request_sglist_cnt > 1)
2867 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2868 mps_dprint(sc, MPS_ERROR,
2869 "%s: multiple request or response "
2870 "buffer segments not supported for SMP\n",
2871 __func__);
2872 mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2873 xpt_done(ccb);
2874 return;
2875 }
2876
2877 /*
2878 * The CAM_SCATTER_VALID flag was originally implemented
2879 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2880 * We have two. So, just take that flag to mean that we
2881 * might have S/G lists, and look at the S/G segment count
2882 * to figure out whether that is the case for each individual
2883 * buffer.
2884 */
2885 if (ccb->smpio.smp_request_sglist_cnt != 0) {
2886 bus_dma_segment_t *req_sg;
2887
2888 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2889 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2890 } else
2891 request = ccb->smpio.smp_request;
2892
2893 if (ccb->smpio.smp_response_sglist_cnt != 0) {
2894 bus_dma_segment_t *rsp_sg;
2895
2896 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2897 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2898 } else
2899 response = ccb->smpio.smp_response;
2900 break;
2901 case CAM_DATA_VADDR:
2902 request = ccb->smpio.smp_request;
2903 response = ccb->smpio.smp_response;
2904 break;
2905 default:
2906 mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2907 xpt_done(ccb);
2908 return;
2909 }
2910
2911 cm = mps_alloc_command(sc);
2912 if (cm == NULL) {
2913 mps_dprint(sc, MPS_ERROR,
2914 "%s: cannot allocate command\n", __func__);
2915 mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2916 xpt_done(ccb);
2917 return;
2918 }
2919
2920 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2921 bzero(req, sizeof(*req));
2922 req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2923
2924 /* Allow the chip to use any route to this SAS address. */
2925 req->PhysicalPort = 0xff;
2926
2927 req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2928 req->SGLFlags =
2929 MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2930
2931 mps_dprint(sc, MPS_XINFO, "%s: sending SMP request to SAS "
2932 "address %#jx\n", __func__, (uintmax_t)sasaddr);
2933
2934 mpi_init_sge(cm, req, &req->SGL);
2935
2936 /*
2937 * Set up a uio to pass into mps_map_command(). This allows us to
2938 * do one map command, and one busdma call in there.
2939 */
2940 cm->cm_uio.uio_iov = cm->cm_iovec;
2941 cm->cm_uio.uio_iovcnt = 2;
2942 cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2943
2944 /*
2945 * The read/write flag isn't used by busdma, but set it just in
2946 * case. This isn't exactly accurate, either, since we're going in
2947 * both directions.
2948 */
2949 cm->cm_uio.uio_rw = UIO_WRITE;
2950
2951 cm->cm_iovec[0].iov_base = request;
2952 cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2953 cm->cm_iovec[1].iov_base = response;
2954 cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2955
2956 cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2957 cm->cm_iovec[1].iov_len;
2958
2959 /*
2960 * Trigger a warning message in mps_data_cb() for the user if we
2961 * wind up exceeding two S/G segments. The chip expects one
2962 * segment for the request and another for the response.
2963 */
2964 cm->cm_max_segs = 2;
2965
2966 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2967 cm->cm_complete = mpssas_smpio_complete;
2968 cm->cm_complete_data = ccb;
2969
2970 /*
2971 * Tell the mapping code that we're using a uio, and that this is
2972 * an SMP passthrough request. There is a little special-case
2973 * logic there (in mps_data_cb()) to handle the bidirectional
2974 * transfer.
2975 */
2976 cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2977 MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2978
2979 /* The chip data format is little endian. */
2980 req->SASAddress.High = htole32(sasaddr >> 32);
2981 req->SASAddress.Low = htole32(sasaddr);
2982
2983 /*
2984 * XXX Note that we don't have a timeout/abort mechanism here.
2985 * From the manual, it looks like task management requests only
2986 * work for SCSI IO and SATA passthrough requests. We may need to
2987 * have a mechanism to retry requests in the event of a chip reset
2988 * at least. Hopefully the chip will insure that any errors short
2989 * of that are relayed back to the driver.
2990 */
2991 error = mps_map_command(sc, cm);
2992 if ((error != 0) && (error != EINPROGRESS)) {
2993 mps_dprint(sc, MPS_ERROR,
2994 "%s: error %d returned from mps_map_command()\n",
2995 __func__, error);
2996 goto bailout_error;
2997 }
2998
2999 return;
3000
3001bailout_error:
3002 mps_free_command(sc, cm);
3003 mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3004 xpt_done(ccb);
3005 return;
3006
3007}
3008
3009static void
3010mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
3011{
3012 struct mps_softc *sc;
3013 struct mpssas_target *targ;
3014 uint64_t sasaddr = 0;
3015
3016 sc = sassc->sc;
3017
3018 /*
3019 * Make sure the target exists.
3020 */
3021 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3022 ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
3023 targ = &sassc->targets[ccb->ccb_h.target_id];
3024 if (targ->handle == 0x0) {
3025 mps_dprint(sc, MPS_ERROR,
3026 "%s: target %d does not exist!\n", __func__,
3027 ccb->ccb_h.target_id);
3028 mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
3029 xpt_done(ccb);
3030 return;
3031 }
3032
3033 /*
3034 * If this device has an embedded SMP target, we'll talk to it
3035 * directly.
3036 * figure out what the expander's address is.
3037 */
3038 if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
3039 sasaddr = targ->sasaddr;
3040
3041 /*
3042 * If we don't have a SAS address for the expander yet, try
3043 * grabbing it from the page 0x83 information cached in the
3044 * transport layer for this target. LSI expanders report the
3045 * expander SAS address as the port-associated SAS address in
3046 * Inquiry VPD page 0x83. Maxim expanders don't report it in page
3047 * 0x83.
3048 *
3049 * XXX KDM disable this for now, but leave it commented out so that
3050 * it is obvious that this is another possible way to get the SAS
3051 * address.
3052 *
3053 * The parent handle method below is a little more reliable, and
3054 * the other benefit is that it works for devices other than SES
3055 * devices. So you can send a SMP request to a da(4) device and it
3056 * will get routed to the expander that device is attached to.
3057 * (Assuming the da(4) device doesn't contain an SMP target...)
3058 */
3059#if 0
3060 if (sasaddr == 0)
3061 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
3062#endif
3063
3064 /*
3065 * If we still don't have a SAS address for the expander, look for
3066 * the parent device of this device, which is probably the expander.
3067 */
3068 if (sasaddr == 0) {
3069#ifdef OLD_MPS_PROBE
3070 struct mpssas_target *parent_target;
3071#endif
3072
3073 if (targ->parent_handle == 0x0) {
3074 mps_dprint(sc, MPS_ERROR,
3075 "%s: handle %d does not have a valid "
3076 "parent handle!\n", __func__, targ->handle);
3077 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3078 goto bailout;
3079 }
3080#ifdef OLD_MPS_PROBE
3081 parent_target = mpssas_find_target_by_handle(sassc, 0,
3082 targ->parent_handle);
3083
3084 if (parent_target == NULL) {
3085 mps_dprint(sc, MPS_ERROR,
3086 "%s: handle %d does not have a valid "
3087 "parent target!\n", __func__, targ->handle);
3088 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3089 goto bailout;
3090 }
3091
3092 if ((parent_target->devinfo &
3093 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3094 mps_dprint(sc, MPS_ERROR,
3095 "%s: handle %d parent %d does not "
3096 "have an SMP target!\n", __func__,
3097 targ->handle, parent_target->handle);
3098 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3099 goto bailout;
3100
3101 }
3102
3103 sasaddr = parent_target->sasaddr;
3104#else /* OLD_MPS_PROBE */
3105 if ((targ->parent_devinfo &
3106 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3107 mps_dprint(sc, MPS_ERROR,
3108 "%s: handle %d parent %d does not "
3109 "have an SMP target!\n", __func__,
3110 targ->handle, targ->parent_handle);
3111 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3112 goto bailout;
3113
3114 }
3115 if (targ->parent_sasaddr == 0x0) {
3116 mps_dprint(sc, MPS_ERROR,
3117 "%s: handle %d parent handle %d does "
3118 "not have a valid SAS address!\n",
3119 __func__, targ->handle, targ->parent_handle);
3120 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3121 goto bailout;
3122 }
3123
3124 sasaddr = targ->parent_sasaddr;
3125#endif /* OLD_MPS_PROBE */
3126
3127 }
3128
3129 if (sasaddr == 0) {
3130 mps_dprint(sc, MPS_INFO,
3131 "%s: unable to find SAS address for handle %d\n",
3132 __func__, targ->handle);
3133 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3134 goto bailout;
3135 }
3136 mpssas_send_smpcmd(sassc, ccb, sasaddr);
3137
3138 return;
3139
3140bailout:
3141 xpt_done(ccb);
3142
3143}
3144#endif //__FreeBSD_version >= 900026
3145
3146static void
3147mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
3148{
3149 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3150 struct mps_softc *sc;
3151 struct mps_command *tm;
3152 struct mpssas_target *targ;
3153
3154 MPS_FUNCTRACE(sassc->sc);
3155 mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
3156
3157 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3158 ("Target %d out of bounds in XPT_RESET_DEV\n",
3159 ccb->ccb_h.target_id));
3160 sc = sassc->sc;
3161 tm = mps_alloc_command(sc);
3162 if (tm == NULL) {
3163 mps_dprint(sc, MPS_ERROR,
3164 "command alloc failure in mpssas_action_resetdev\n");
3165 mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3166 xpt_done(ccb);
3167 return;
3168 }
3169
3170 targ = &sassc->targets[ccb->ccb_h.target_id];
3171 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3172 req->DevHandle = htole16(targ->handle);
3173 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3174 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3175
3176 /* SAS Hard Link Reset / SATA Link Reset */
3177 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3178
3179 tm->cm_data = NULL;
3180 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3181 tm->cm_complete = mpssas_resetdev_complete;
3182 tm->cm_complete_data = ccb;
3183 tm->cm_targ = targ;
3184 targ->flags |= MPSSAS_TARGET_INRESET;
3185
3186 mps_map_command(sc, tm);
3187}
3188
3189static void
3190mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
3191{
3192 MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3193 union ccb *ccb;
3194
3195 MPS_FUNCTRACE(sc);
3196 mtx_assert(&sc->mps_mtx, MA_OWNED);
3197
3198 resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3199 ccb = tm->cm_complete_data;
3200
3201 /*
3202 * Currently there should be no way we can hit this case. It only
3203 * happens when we have a failure to allocate chain frames, and
3204 * task management commands don't have S/G lists.
3205 */
3206 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3207 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3208
3209 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3210
3211 mps_dprint(sc, MPS_ERROR,
3212 "%s: cm_flags = %#x for reset of handle %#04x! "
3213 "This should not happen!\n", __func__, tm->cm_flags,
3214 req->DevHandle);
3215 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3216 goto bailout;
3217 }
3218
3219 mps_dprint(sc, MPS_XINFO,
3220 "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
3221 le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3222
3223 if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3224 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
3225 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3226 CAM_LUN_WILDCARD);
3227 }
3228 else
3229 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3230
3231bailout:
3232
3233 mpssas_free_tm(sc, tm);
3234 xpt_done(ccb);
3235}
3236
3237static void
3238mpssas_poll(struct cam_sim *sim)
3239{
3240 struct mpssas_softc *sassc;
3241
3242 sassc = cam_sim_softc(sim);
3243
3244 if (sassc->sc->mps_debug & MPS_TRACE) {
3245 /* frequent debug messages during a panic just slow
3246 * everything down too much.
3247 */
3248 mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
3249 sassc->sc->mps_debug &= ~MPS_TRACE;
3250 }
3251
3252 mps_intr_locked(sassc->sc);
3253}
3254
3255static void
3256mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3257 void *arg)
3258{
3259 struct mps_softc *sc;
3260
3261 sc = (struct mps_softc *)callback_arg;
3262
3263 switch (code) {
3264#if (__FreeBSD_version >= 1000006) || \
3265 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3266 case AC_ADVINFO_CHANGED: {
3267 struct mpssas_target *target;
3268 struct mpssas_softc *sassc;
3269 struct scsi_read_capacity_data_long rcap_buf;
3270 struct ccb_dev_advinfo cdai;
3271 struct mpssas_lun *lun;
3272 lun_id_t lunid;
3273 int found_lun;
3274 uintptr_t buftype;
3275
3276 buftype = (uintptr_t)arg;
3277
3278 found_lun = 0;
3279 sassc = sc->sassc;
3280
3281 /*
3282 * We're only interested in read capacity data changes.
3283 */
3284 if (buftype != CDAI_TYPE_RCAPLONG)
3285 break;
3286
3287 /*
3288 * We should have a handle for this, but check to make sure.
3289 */
3290 KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3291 ("Target %d out of bounds in mpssas_async\n",
3292 xpt_path_target_id(path)));
3293 target = &sassc->targets[xpt_path_target_id(path)];
3294 if (target->handle == 0)
3295 break;
3296
3297 lunid = xpt_path_lun_id(path);
3298
3299 SLIST_FOREACH(lun, &target->luns, lun_link) {
3300 if (lun->lun_id == lunid) {
3301 found_lun = 1;
3302 break;
3303 }
3304 }
3305
3306 if (found_lun == 0) {
3307 lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3308 M_NOWAIT | M_ZERO);
3309 if (lun == NULL) {
3310 mps_dprint(sc, MPS_ERROR, "Unable to alloc "
3311 "LUN for EEDP support.\n");
3312 break;
3313 }
3314 lun->lun_id = lunid;
3315 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3316 }
3317
3318 bzero(&rcap_buf, sizeof(rcap_buf));
3319 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3320 cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3321 cdai.ccb_h.flags = CAM_DIR_IN;
3322 cdai.buftype = CDAI_TYPE_RCAPLONG;
3323#if (__FreeBSD_version >= 1100061) || \
3324 ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000))
3325 cdai.flags = CDAI_FLAG_NONE;
3326#else
3327 cdai.flags = 0;
3328#endif
3329 cdai.bufsiz = sizeof(rcap_buf);
3330 cdai.buf = (uint8_t *)&rcap_buf;
3331 xpt_action((union ccb *)&cdai);
3332 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3333 cam_release_devq(cdai.ccb_h.path,
3334 0, 0, 0, FALSE);
3335
3336 if ((mpssas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3337 && (rcap_buf.prot & SRC16_PROT_EN)) {
3338 lun->eedp_formatted = TRUE;
3339 lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3340 } else {
3341 lun->eedp_formatted = FALSE;
3342 lun->eedp_block_size = 0;
3343 }
3344 break;
3345 }
3346#else
3347 case AC_FOUND_DEVICE: {
3348 struct ccb_getdev *cgd;
3349
3350 cgd = arg;
3351 mpssas_check_eedp(sc, path, cgd);
3352 break;
3353 }
3354#endif
3355 default:
3356 break;
3357 }
3358}
3359
3360#if (__FreeBSD_version < 901503) || \
3361 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3362static void
3363mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
3364 struct ccb_getdev *cgd)
3365{
3366 struct mpssas_softc *sassc = sc->sassc;
3367 struct ccb_scsiio *csio;
3368 struct scsi_read_capacity_16 *scsi_cmd;
3369 struct scsi_read_capacity_eedp *rcap_buf;
3370 path_id_t pathid;
3371 target_id_t targetid;
3372 lun_id_t lunid;
3373 union ccb *ccb;
3374 struct cam_path *local_path;
3375 struct mpssas_target *target;
3376 struct mpssas_lun *lun;
3377 uint8_t found_lun;
3378 char path_str[64];
3379
3380 sassc = sc->sassc;
3381 pathid = cam_sim_path(sassc->sim);
3382 targetid = xpt_path_target_id(path);
3383 lunid = xpt_path_lun_id(path);
3384
3385 KASSERT(targetid < sassc->maxtargets,
3386 ("Target %d out of bounds in mpssas_check_eedp\n",
3387 targetid));
3388 target = &sassc->targets[targetid];
3389 if (target->handle == 0x0)
3390 return;
3391
3392 /*
3393 * Determine if the device is EEDP capable.
3394 *
3395 * If this flag is set in the inquiry data,
3396 * the device supports protection information,
3397 * and must support the 16 byte read
3398 * capacity command, otherwise continue without
3399 * sending read cap 16
3400 */
3401 if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3402 return;
3403
3404 /*
3405 * Issue a READ CAPACITY 16 command. This info
3406 * is used to determine if the LUN is formatted
3407 * for EEDP support.
3408 */
3409 ccb = xpt_alloc_ccb_nowait();
3410 if (ccb == NULL) {
3411 mps_dprint(sc, MPS_ERROR, "Unable to alloc CCB "
3412 "for EEDP support.\n");
3413 return;
3414 }
3415
3416 if (xpt_create_path(&local_path, xpt_periph,
3417 pathid, targetid, lunid) != CAM_REQ_CMP) {
3418 mps_dprint(sc, MPS_ERROR, "Unable to create "
3419 "path for EEDP support\n");
3420 xpt_free_ccb(ccb);
3421 return;
3422 }
3423
3424 /*
3425 * If LUN is already in list, don't create a new
3426 * one.
3427 */
3428 found_lun = FALSE;
3429 SLIST_FOREACH(lun, &target->luns, lun_link) {
3430 if (lun->lun_id == lunid) {
3431 found_lun = TRUE;
3432 break;
3433 }
3434 }
3435 if (!found_lun) {
3436 lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3437 M_NOWAIT | M_ZERO);
3438 if (lun == NULL) {
3439 mps_dprint(sc, MPS_ERROR,
3440 "Unable to alloc LUN for EEDP support.\n");
3441 xpt_free_path(local_path);
3442 xpt_free_ccb(ccb);
3443 return;
3444 }
3445 lun->lun_id = lunid;
3446 SLIST_INSERT_HEAD(&target->luns, lun,
3447 lun_link);
3448 }
3449
3450 xpt_path_string(local_path, path_str, sizeof(path_str));
3451
3452 mps_dprint(sc, MPS_INFO, "Sending read cap: path %s handle %d\n",
3453 path_str, target->handle);
3454
3455 /*
3456 * Issue a READ CAPACITY 16 command for the LUN.
3457 * The mpssas_read_cap_done function will load
3458 * the read cap info into the LUN struct.
3459 */
3460 rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp),
3461 M_MPT2, M_NOWAIT | M_ZERO);
3462 if (rcap_buf == NULL) {
3463 mps_dprint(sc, MPS_FAULT,
3464 "Unable to alloc read capacity buffer for EEDP support.\n");
3465 xpt_free_path(ccb->ccb_h.path);
3466 xpt_free_ccb(ccb);
3467 return;
3468 }
3469 xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3470 csio = &ccb->csio;
3471 csio->ccb_h.func_code = XPT_SCSI_IO;
3472 csio->ccb_h.flags = CAM_DIR_IN;
3473 csio->ccb_h.retry_count = 4;
3474 csio->ccb_h.cbfcnp = mpssas_read_cap_done;
3475 csio->ccb_h.timeout = 60000;
3476 csio->data_ptr = (uint8_t *)rcap_buf;
3477 csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3478 csio->sense_len = MPS_SENSE_LEN;
3479 csio->cdb_len = sizeof(*scsi_cmd);
3480 csio->tag_action = MSG_SIMPLE_Q_TAG;
3481
3482 scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3483 bzero(scsi_cmd, sizeof(*scsi_cmd));
3484 scsi_cmd->opcode = 0x9E;
3485 scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3486 ((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3487
3488 ccb->ccb_h.ppriv_ptr1 = sassc;
3489 xpt_action(ccb);
3490}
3491
3492static void
3493mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3494{
3495 struct mpssas_softc *sassc;
3496 struct mpssas_target *target;
3497 struct mpssas_lun *lun;
3498 struct scsi_read_capacity_eedp *rcap_buf;
3499
3500 if (done_ccb == NULL)
3501 return;
3502
3503 /* Driver need to release devq, it Scsi command is
3504 * generated by driver internally.
3505 * Currently there is a single place where driver
3506 * calls scsi command internally. In future if driver
3507 * calls more scsi command internally, it needs to release
3508 * devq internally, since those command will not go back to
3509 * cam_periph.
3510 */
3511 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3512 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3513 xpt_release_devq(done_ccb->ccb_h.path,
3514 /*count*/ 1, /*run_queue*/TRUE);
3515 }
3516
3517 rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3518
3519 /*
3520 * Get the LUN ID for the path and look it up in the LUN list for the
3521 * target.
3522 */
3523 sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3524 KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets,
3525 ("Target %d out of bounds in mpssas_read_cap_done\n",
3526 done_ccb->ccb_h.target_id));
3527 target = &sassc->targets[done_ccb->ccb_h.target_id];
3528 SLIST_FOREACH(lun, &target->luns, lun_link) {
3529 if (lun->lun_id != done_ccb->ccb_h.target_lun)
3530 continue;
3531
3532 /*
3533 * Got the LUN in the target's LUN list. Fill it in
3534 * with EEDP info. If the READ CAP 16 command had some
3535 * SCSI error (common if command is not supported), mark
3536 * the lun as not supporting EEDP and set the block size
3537 * to 0.
3538 */
3539 if ((mpssas_get_ccbstatus(done_ccb) != CAM_REQ_CMP)
3540 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3541 lun->eedp_formatted = FALSE;
3542 lun->eedp_block_size = 0;
3543 break;
3544 }
3545
3546 if (rcap_buf->protect & 0x01) {
3547 mps_dprint(sassc->sc, MPS_INFO, "LUN %d for "
3548 "target ID %d is formatted for EEDP "
3549 "support.\n", done_ccb->ccb_h.target_lun,
3550 done_ccb->ccb_h.target_id);
3551 lun->eedp_formatted = TRUE;
3552 lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3553 }
3554 break;
3555 }
3556
3557 // Finished with this CCB and path.
3558 free(rcap_buf, M_MPT2);
3559 xpt_free_path(done_ccb->ccb_h.path);
3560 xpt_free_ccb(done_ccb);
3561}
3562#endif /* (__FreeBSD_version < 901503) || \
3563 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3564
3565void
3566mpssas_prepare_for_tm(struct mps_softc *sc, struct mps_command *tm,
3567 struct mpssas_target *target, lun_id_t lun_id)
3568{
3569 union ccb *ccb;
3570 path_id_t path_id;
3571
3572 /*
3573 * Set the INRESET flag for this target so that no I/O will be sent to
3574 * the target until the reset has completed. If an I/O request does
3575 * happen, the devq will be frozen. The CCB holds the path which is
3576 * used to release the devq. The devq is released and the CCB is freed
3577 * when the TM completes.
3578 */
3579 ccb = xpt_alloc_ccb_nowait();
3580 if (ccb) {
3581 path_id = cam_sim_path(sc->sassc->sim);
3582 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3583 target->tid, lun_id) != CAM_REQ_CMP) {
3584 xpt_free_ccb(ccb);
3585 } else {
3586 tm->cm_ccb = ccb;
3587 tm->cm_targ = target;
3588 target->flags |= MPSSAS_TARGET_INRESET;
3589 }
3590 }
3591}
3592
3593int
3594mpssas_startup(struct mps_softc *sc)
3595{
3596
3597 /*
3598 * Send the port enable message and set the wait_for_port_enable flag.
3599 * This flag helps to keep the simq frozen until all discovery events
3600 * are processed.
3601 */
3602 sc->wait_for_port_enable = 1;
3603 mpssas_send_portenable(sc);
3604 return (0);
3605}
3606
3607static int
3608mpssas_send_portenable(struct mps_softc *sc)
3609{
3610 MPI2_PORT_ENABLE_REQUEST *request;
3611 struct mps_command *cm;
3612
3613 MPS_FUNCTRACE(sc);
3614
3615 if ((cm = mps_alloc_command(sc)) == NULL)
3616 return (EBUSY);
3617 request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3618 request->Function = MPI2_FUNCTION_PORT_ENABLE;
3619 request->MsgFlags = 0;
3620 request->VP_ID = 0;
3621 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3622 cm->cm_complete = mpssas_portenable_complete;
3623 cm->cm_data = NULL;
3624 cm->cm_sge = NULL;
3625
3626 mps_map_command(sc, cm);
3627 mps_dprint(sc, MPS_XINFO,
3628 "mps_send_portenable finished cm %p req %p complete %p\n",
3629 cm, cm->cm_req, cm->cm_complete);
3630 return (0);
3631}
3632
3633static void
3634mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3635{
3636 MPI2_PORT_ENABLE_REPLY *reply;
3637 struct mpssas_softc *sassc;
3638
3639 MPS_FUNCTRACE(sc);
3640 sassc = sc->sassc;
3641
3642 /*
3643 * Currently there should be no way we can hit this case. It only
3644 * happens when we have a failure to allocate chain frames, and
3645 * port enable commands don't have S/G lists.
3646 */
3647 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3648 mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for port enable! "
3649 "This should not happen!\n", __func__, cm->cm_flags);
3650 }
3651
3652 reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3653 if (reply == NULL)
3654 mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3655 else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3656 MPI2_IOCSTATUS_SUCCESS)
3657 mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3658
3659 mps_free_command(sc, cm);
3660 if (sc->mps_ich.ich_arg != NULL) {
3661 mps_dprint(sc, MPS_XINFO, "disestablish config intrhook\n");
3662 config_intrhook_disestablish(&sc->mps_ich);
3663 sc->mps_ich.ich_arg = NULL;
3664 }
3665
3666 /*
3667 * Get WarpDrive info after discovery is complete but before the scan
3668 * starts. At this point, all devices are ready to be exposed to the
3669 * OS. If devices should be hidden instead, take them out of the
3670 * 'targets' array before the scan. The devinfo for a disk will have
3671 * some info and a volume's will be 0. Use that to remove disks.
3672 */
3673 mps_wd_config_pages(sc);
3674
3675 /*
3676 * Done waiting for port enable to complete. Decrement the refcount.
3677 * If refcount is 0, discovery is complete and a rescan of the bus can
3678 * take place. Since the simq was explicitly frozen before port
3679 * enable, it must be explicitly released here to keep the
3680 * freeze/release count in sync.
3681 */
3682 sc->wait_for_port_enable = 0;
3683 sc->port_enable_complete = 1;
3684 wakeup(&sc->port_enable_complete);
3685 mpssas_startup_decrement(sassc);
3686}
3687
3688int
3689mpssas_check_id(struct mpssas_softc *sassc, int id)
3690{
3691 struct mps_softc *sc = sassc->sc;
3692 char *ids;
3693 char *name;
3694
3695 ids = &sc->exclude_ids[0];
3696 while((name = strsep(&ids, ",")) != NULL) {
3697 if (name[0] == '\0')
3698 continue;
3699 if (strtol(name, NULL, 0) == (long)id)
3700 return (1);
3701 }
3702
3703 return (0);
3704}
3705
3706void
3707mpssas_realloc_targets(struct mps_softc *sc, int maxtargets)
3708{
3709 struct mpssas_softc *sassc;
3710 struct mpssas_lun *lun, *lun_tmp;
3711 struct mpssas_target *targ;
3712 int i;
3713
3714 sassc = sc->sassc;
3715 /*
3716 * The number of targets is based on IOC Facts, so free all of
3717 * the allocated LUNs for each target and then the target buffer
3718 * itself.
3719 */
3720 for (i=0; i< maxtargets; i++) {
3721 targ = &sassc->targets[i];
3722 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3723 free(lun, M_MPT2);
3724 }
3725 }
3726 free(sassc->targets, M_MPT2);
3727
3728 sassc->targets = malloc(sizeof(struct mpssas_target) * maxtargets,
3729 M_MPT2, M_WAITOK|M_ZERO);
3730 if (!sassc->targets) {
3731 panic("%s failed to alloc targets with error %d\n",
3732 __func__, ENOMEM);
3733 }
3734}