mps_sas.c revision 329189
1/*-
2 * Copyright (c) 2009 Yahoo! Inc.
3 * Copyright (c) 2011-2015 LSI Corp.
4 * Copyright (c) 2013-2015 Avago Technologies
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
29 *
30 * $FreeBSD: stable/11/sys/dev/mps/mps_sas.c 329189 2018-02-13 02:11:39Z mav $
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: stable/11/sys/dev/mps/mps_sas.c 329189 2018-02-13 02:11:39Z mav $");
35
36/* Communications core for Avago Technologies (LSI) MPT2 */
37
38/* TODO Move headers to mpsvar */
39#include <sys/types.h>
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/kernel.h>
43#include <sys/selinfo.h>
44#include <sys/module.h>
45#include <sys/bus.h>
46#include <sys/conf.h>
47#include <sys/bio.h>
48#include <sys/malloc.h>
49#include <sys/uio.h>
50#include <sys/sysctl.h>
51#include <sys/endian.h>
52#include <sys/queue.h>
53#include <sys/kthread.h>
54#include <sys/taskqueue.h>
55#include <sys/sbuf.h>
56
57#include <machine/bus.h>
58#include <machine/resource.h>
59#include <sys/rman.h>
60
61#include <machine/stdarg.h>
62
63#include <cam/cam.h>
64#include <cam/cam_ccb.h>
65#include <cam/cam_xpt.h>
66#include <cam/cam_debug.h>
67#include <cam/cam_sim.h>
68#include <cam/cam_xpt_sim.h>
69#include <cam/cam_xpt_periph.h>
70#include <cam/cam_periph.h>
71#include <cam/scsi/scsi_all.h>
72#include <cam/scsi/scsi_message.h>
73#if __FreeBSD_version >= 900026
74#include <cam/scsi/smp_all.h>
75#endif
76
77#include <dev/mps/mpi/mpi2_type.h>
78#include <dev/mps/mpi/mpi2.h>
79#include <dev/mps/mpi/mpi2_ioc.h>
80#include <dev/mps/mpi/mpi2_sas.h>
81#include <dev/mps/mpi/mpi2_cnfg.h>
82#include <dev/mps/mpi/mpi2_init.h>
83#include <dev/mps/mpi/mpi2_tool.h>
84#include <dev/mps/mps_ioctl.h>
85#include <dev/mps/mpsvar.h>
86#include <dev/mps/mps_table.h>
87#include <dev/mps/mps_sas.h>
88
89#define MPSSAS_DISCOVERY_TIMEOUT	20
90#define MPSSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
91
92/*
93 * static array to check SCSI OpCode for EEDP protection bits
94 */
95#define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
96#define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
97#define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
98static uint8_t op_code_prot[256] = {
99	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
102	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
108	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
109	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
110	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
114	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
115};
116
117MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
118
119static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
120static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
121static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
122static void mpssas_poll(struct cam_sim *sim);
123static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm,
124    struct mps_command *cm);
125static void mpssas_scsiio_timeout(void *data);
126static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
127static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
128    struct mps_command *cm, union ccb *ccb);
129static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
130static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
131static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
132#if __FreeBSD_version >= 900026
133static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
134static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
135			       uint64_t sasaddr);
136static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
137#endif //FreeBSD_version >= 900026
138static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
139static void mpssas_async(void *callback_arg, uint32_t code,
140			 struct cam_path *path, void *arg);
141#if (__FreeBSD_version < 901503) || \
142    ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
143static void mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
144			      struct ccb_getdev *cgd);
145static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
146#endif
147static int mpssas_send_portenable(struct mps_softc *sc);
148static void mpssas_portenable_complete(struct mps_softc *sc,
149    struct mps_command *cm);
150
151struct mpssas_target *
152mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
153{
154	struct mpssas_target *target;
155	int i;
156
157	for (i = start; i < sassc->maxtargets; i++) {
158		target = &sassc->targets[i];
159		if (target->handle == handle)
160			return (target);
161	}
162
163	return (NULL);
164}
165
166/* we need to freeze the simq during attach and diag reset, to avoid failing
167 * commands before device handles have been found by discovery.  Since
168 * discovery involves reading config pages and possibly sending commands,
169 * discovery actions may continue even after we receive the end of discovery
170 * event, so refcount discovery actions instead of assuming we can unfreeze
171 * the simq when we get the event.
172 */
173void
174mpssas_startup_increment(struct mpssas_softc *sassc)
175{
176	MPS_FUNCTRACE(sassc->sc);
177
178	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
179		if (sassc->startup_refcount++ == 0) {
180			/* just starting, freeze the simq */
181			mps_dprint(sassc->sc, MPS_INIT,
182			    "%s freezing simq\n", __func__);
183#if __FreeBSD_version >= 1000039
184			xpt_hold_boot();
185#endif
186			xpt_freeze_simq(sassc->sim, 1);
187		}
188		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
189		    sassc->startup_refcount);
190	}
191}
192
193void
194mpssas_release_simq_reinit(struct mpssas_softc *sassc)
195{
196	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
197		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
198		xpt_release_simq(sassc->sim, 1);
199		mps_dprint(sassc->sc, MPS_INFO, "Unfreezing SIM queue\n");
200	}
201}
202
203void
204mpssas_startup_decrement(struct mpssas_softc *sassc)
205{
206	MPS_FUNCTRACE(sassc->sc);
207
208	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
209		if (--sassc->startup_refcount == 0) {
210			/* finished all discovery-related actions, release
211			 * the simq and rescan for the latest topology.
212			 */
213			mps_dprint(sassc->sc, MPS_INIT,
214			    "%s releasing simq\n", __func__);
215			sassc->flags &= ~MPSSAS_IN_STARTUP;
216			xpt_release_simq(sassc->sim, 1);
217#if __FreeBSD_version >= 1000039
218			xpt_release_boot();
219#else
220			mpssas_rescan_target(sassc->sc, NULL);
221#endif
222		}
223		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
224		    sassc->startup_refcount);
225	}
226}
227
228/* The firmware requires us to stop sending commands when we're doing task
229 * management, so refcount the TMs and keep the simq frozen when any are in
230 * use.
231 */
232struct mps_command *
233mpssas_alloc_tm(struct mps_softc *sc)
234{
235	struct mps_command *tm;
236
237	tm = mps_alloc_high_priority_command(sc);
238	return tm;
239}
240
241void
242mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
243{
244	int target_id = 0xFFFFFFFF;
245
246	if (tm == NULL)
247		return;
248
249	/*
250	 * For TM's the devq is frozen for the device.  Unfreeze it here and
251	 * free the resources used for freezing the devq.  Must clear the
252	 * INRESET flag as well or scsi I/O will not work.
253	 */
254	if (tm->cm_targ != NULL) {
255		tm->cm_targ->flags &= ~MPSSAS_TARGET_INRESET;
256		target_id = tm->cm_targ->tid;
257	}
258	if (tm->cm_ccb) {
259		mps_dprint(sc, MPS_INFO, "Unfreezing devq for target ID %d\n",
260		    target_id);
261		xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
262		xpt_free_path(tm->cm_ccb->ccb_h.path);
263		xpt_free_ccb(tm->cm_ccb);
264	}
265
266	mps_free_high_priority_command(sc, tm);
267}
268
269void
270mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
271{
272	struct mpssas_softc *sassc = sc->sassc;
273	path_id_t pathid;
274	target_id_t targetid;
275	union ccb *ccb;
276
277	MPS_FUNCTRACE(sc);
278	pathid = cam_sim_path(sassc->sim);
279	if (targ == NULL)
280		targetid = CAM_TARGET_WILDCARD;
281	else
282		targetid = targ - sassc->targets;
283
284	/*
285	 * Allocate a CCB and schedule a rescan.
286	 */
287	ccb = xpt_alloc_ccb_nowait();
288	if (ccb == NULL) {
289		mps_dprint(sc, MPS_ERROR, "unable to alloc CCB for rescan\n");
290		return;
291	}
292
293	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
294	    targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
295		mps_dprint(sc, MPS_ERROR, "unable to create path for rescan\n");
296		xpt_free_ccb(ccb);
297		return;
298	}
299
300	if (targetid == CAM_TARGET_WILDCARD)
301		ccb->ccb_h.func_code = XPT_SCAN_BUS;
302	else
303		ccb->ccb_h.func_code = XPT_SCAN_TGT;
304
305	mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
306	xpt_rescan(ccb);
307}
308
309static void
310mpssas_log_command(struct mps_command *cm, u_int level, const char *fmt, ...)
311{
312	struct sbuf sb;
313	va_list ap;
314	char str[192];
315	char path_str[64];
316
317	if (cm == NULL)
318		return;
319
320	/* No need to be in here if debugging isn't enabled */
321	if ((cm->cm_sc->mps_debug & level) == 0)
322		return;
323
324	sbuf_new(&sb, str, sizeof(str), 0);
325
326	va_start(ap, fmt);
327
328	if (cm->cm_ccb != NULL) {
329		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
330				sizeof(path_str));
331		sbuf_cat(&sb, path_str);
332		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
333			scsi_command_string(&cm->cm_ccb->csio, &sb);
334			sbuf_printf(&sb, "length %d ",
335				    cm->cm_ccb->csio.dxfer_len);
336		}
337	}
338	else {
339		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
340		    cam_sim_name(cm->cm_sc->sassc->sim),
341		    cam_sim_unit(cm->cm_sc->sassc->sim),
342		    cam_sim_bus(cm->cm_sc->sassc->sim),
343		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
344		    cm->cm_lun);
345	}
346
347	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
348	sbuf_vprintf(&sb, fmt, ap);
349	sbuf_finish(&sb);
350	mps_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
351
352	va_end(ap);
353}
354
355
356static void
357mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
358{
359	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
360	struct mpssas_target *targ;
361	uint16_t handle;
362
363	MPS_FUNCTRACE(sc);
364
365	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
366	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
367	targ = tm->cm_targ;
368
369	if (reply == NULL) {
370		/* XXX retry the remove after the diag reset completes? */
371		mps_dprint(sc, MPS_FAULT,
372		    "%s NULL reply resetting device 0x%04x\n", __func__,
373		    handle);
374		mpssas_free_tm(sc, tm);
375		return;
376	}
377
378	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
379	    MPI2_IOCSTATUS_SUCCESS) {
380		mps_dprint(sc, MPS_ERROR,
381		   "IOCStatus = 0x%x while resetting device 0x%x\n",
382		   le16toh(reply->IOCStatus), handle);
383	}
384
385	mps_dprint(sc, MPS_XINFO,
386	    "Reset aborted %u commands\n", reply->TerminationCount);
387	mps_free_reply(sc, tm->cm_reply_data);
388	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
389
390	mps_dprint(sc, MPS_XINFO,
391	    "clearing target %u handle 0x%04x\n", targ->tid, handle);
392
393	/*
394	 * Don't clear target if remove fails because things will get confusing.
395	 * Leave the devname and sasaddr intact so that we know to avoid reusing
396	 * this target id if possible, and so we can assign the same target id
397	 * to this device if it comes back in the future.
398	 */
399	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
400	    MPI2_IOCSTATUS_SUCCESS) {
401		targ = tm->cm_targ;
402		targ->handle = 0x0;
403		targ->encl_handle = 0x0;
404		targ->encl_slot = 0x0;
405		targ->exp_dev_handle = 0x0;
406		targ->phy_num = 0x0;
407		targ->linkrate = 0x0;
408		targ->devinfo = 0x0;
409		targ->flags = 0x0;
410	}
411
412	mpssas_free_tm(sc, tm);
413}
414
415
416/*
417 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
418 * Otherwise Volume Delete is same as Bare Drive Removal.
419 */
420void
421mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
422{
423	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
424	struct mps_softc *sc;
425	struct mps_command *cm;
426	struct mpssas_target *targ = NULL;
427
428	MPS_FUNCTRACE(sassc->sc);
429	sc = sassc->sc;
430
431#ifdef WD_SUPPORT
432	/*
433	 * If this is a WD controller, determine if the disk should be exposed
434	 * to the OS or not.  If disk should be exposed, return from this
435	 * function without doing anything.
436	 */
437	if (sc->WD_available && (sc->WD_hide_expose ==
438	    MPS_WD_EXPOSE_ALWAYS)) {
439		return;
440	}
441#endif //WD_SUPPORT
442
443	targ = mpssas_find_target_by_handle(sassc, 0, handle);
444	if (targ == NULL) {
445		/* FIXME: what is the action? */
446		/* We don't know about this device? */
447		mps_dprint(sc, MPS_ERROR,
448		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
449		return;
450	}
451
452	targ->flags |= MPSSAS_TARGET_INREMOVAL;
453
454	cm = mpssas_alloc_tm(sc);
455	if (cm == NULL) {
456		mps_dprint(sc, MPS_ERROR,
457		    "%s: command alloc failure\n", __func__);
458		return;
459	}
460
461	mpssas_rescan_target(sc, targ);
462
463	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
464	req->DevHandle = targ->handle;
465	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
466	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
467
468	/* SAS Hard Link Reset / SATA Link Reset */
469	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
470
471	cm->cm_targ = targ;
472	cm->cm_data = NULL;
473	cm->cm_desc.HighPriority.RequestFlags =
474	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
475	cm->cm_complete = mpssas_remove_volume;
476	cm->cm_complete_data = (void *)(uintptr_t)handle;
477
478	mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
479	    __func__, targ->tid);
480	mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
481
482	mps_map_command(sc, cm);
483}
484
485/*
486 * The MPT2 firmware performs debounce on the link to avoid transient link
487 * errors and false removals.  When it does decide that link has been lost
488 * and a device need to go away, it expects that the host will perform a
489 * target reset and then an op remove.  The reset has the side-effect of
490 * aborting any outstanding requests for the device, which is required for
491 * the op-remove to succeed.  It's not clear if the host should check for
492 * the device coming back alive after the reset.
493 */
494void
495mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
496{
497	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
498	struct mps_softc *sc;
499	struct mps_command *cm;
500	struct mpssas_target *targ = NULL;
501
502	MPS_FUNCTRACE(sassc->sc);
503
504	sc = sassc->sc;
505
506	targ = mpssas_find_target_by_handle(sassc, 0, handle);
507	if (targ == NULL) {
508		/* FIXME: what is the action? */
509		/* We don't know about this device? */
510		mps_dprint(sc, MPS_ERROR,
511		    "%s : invalid handle 0x%x \n", __func__, handle);
512		return;
513	}
514
515	targ->flags |= MPSSAS_TARGET_INREMOVAL;
516
517	cm = mpssas_alloc_tm(sc);
518	if (cm == NULL) {
519		mps_dprint(sc, MPS_ERROR,
520		    "%s: command alloc failure\n", __func__);
521		return;
522	}
523
524	mpssas_rescan_target(sc, targ);
525
526	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
527	memset(req, 0, sizeof(*req));
528	req->DevHandle = htole16(targ->handle);
529	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
530	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
531
532	/* SAS Hard Link Reset / SATA Link Reset */
533	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
534
535	cm->cm_targ = targ;
536	cm->cm_data = NULL;
537	cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
538	cm->cm_complete = mpssas_remove_device;
539	cm->cm_complete_data = (void *)(uintptr_t)handle;
540
541	mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
542	    __func__, targ->tid);
543	mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
544
545	mps_map_command(sc, cm);
546}
547
548static void
549mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
550{
551	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
552	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
553	struct mpssas_target *targ;
554	struct mps_command *next_cm;
555	uint16_t handle;
556
557	MPS_FUNCTRACE(sc);
558
559	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
560	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
561	targ = tm->cm_targ;
562
563	/*
564	 * Currently there should be no way we can hit this case.  It only
565	 * happens when we have a failure to allocate chain frames, and
566	 * task management commands don't have S/G lists.
567	 */
568	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
569		mps_dprint(sc, MPS_ERROR,
570		    "%s: cm_flags = %#x for remove of handle %#04x! "
571		    "This should not happen!\n", __func__, tm->cm_flags,
572		    handle);
573	}
574
575	if (reply == NULL) {
576		/* XXX retry the remove after the diag reset completes? */
577		mps_dprint(sc, MPS_FAULT,
578		    "%s NULL reply resetting device 0x%04x\n", __func__,
579		    handle);
580		mpssas_free_tm(sc, tm);
581		return;
582	}
583
584	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
585	    MPI2_IOCSTATUS_SUCCESS) {
586		mps_dprint(sc, MPS_ERROR,
587		   "IOCStatus = 0x%x while resetting device 0x%x\n",
588		   le16toh(reply->IOCStatus), handle);
589	}
590
591	mps_dprint(sc, MPS_XINFO, "Reset aborted %u commands\n",
592	    le32toh(reply->TerminationCount));
593	mps_free_reply(sc, tm->cm_reply_data);
594	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
595
596	/* Reuse the existing command */
597	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
598	memset(req, 0, sizeof(*req));
599	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
600	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
601	req->DevHandle = htole16(handle);
602	tm->cm_data = NULL;
603	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
604	tm->cm_complete = mpssas_remove_complete;
605	tm->cm_complete_data = (void *)(uintptr_t)handle;
606
607	mps_map_command(sc, tm);
608
609	mps_dprint(sc, MPS_XINFO, "clearing target %u handle 0x%04x\n",
610		   targ->tid, handle);
611	TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
612		union ccb *ccb;
613
614		mps_dprint(sc, MPS_XINFO, "Completing missed command %p\n", tm);
615		ccb = tm->cm_complete_data;
616		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
617		mpssas_scsiio_complete(sc, tm);
618	}
619}
620
621static void
622mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
623{
624	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
625	uint16_t handle;
626	struct mpssas_target *targ;
627	struct mpssas_lun *lun;
628
629	MPS_FUNCTRACE(sc);
630
631	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
632	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
633
634	/*
635	 * Currently there should be no way we can hit this case.  It only
636	 * happens when we have a failure to allocate chain frames, and
637	 * task management commands don't have S/G lists.
638	 */
639	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
640		mps_dprint(sc, MPS_XINFO,
641			   "%s: cm_flags = %#x for remove of handle %#04x! "
642			   "This should not happen!\n", __func__, tm->cm_flags,
643			   handle);
644		mpssas_free_tm(sc, tm);
645		return;
646	}
647
648	if (reply == NULL) {
649		/* most likely a chip reset */
650		mps_dprint(sc, MPS_FAULT,
651		    "%s NULL reply removing device 0x%04x\n", __func__, handle);
652		mpssas_free_tm(sc, tm);
653		return;
654	}
655
656	mps_dprint(sc, MPS_XINFO,
657	    "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
658	    handle, le16toh(reply->IOCStatus));
659
660	/*
661	 * Don't clear target if remove fails because things will get confusing.
662	 * Leave the devname and sasaddr intact so that we know to avoid reusing
663	 * this target id if possible, and so we can assign the same target id
664	 * to this device if it comes back in the future.
665	 */
666	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
667	    MPI2_IOCSTATUS_SUCCESS) {
668		targ = tm->cm_targ;
669		targ->handle = 0x0;
670		targ->encl_handle = 0x0;
671		targ->encl_slot = 0x0;
672		targ->exp_dev_handle = 0x0;
673		targ->phy_num = 0x0;
674		targ->linkrate = 0x0;
675		targ->devinfo = 0x0;
676		targ->flags = 0x0;
677
678		while(!SLIST_EMPTY(&targ->luns)) {
679			lun = SLIST_FIRST(&targ->luns);
680			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
681			free(lun, M_MPT2);
682		}
683	}
684
685
686	mpssas_free_tm(sc, tm);
687}
688
689static int
690mpssas_register_events(struct mps_softc *sc)
691{
692	u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
693
694	bzero(events, 16);
695	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
696	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
697	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
698	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
699	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
700	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
701	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
702	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
703	setbit(events, MPI2_EVENT_IR_VOLUME);
704	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
705	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
706	setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
707
708	mps_register_events(sc, events, mpssas_evt_handler, NULL,
709	    &sc->sassc->mpssas_eh);
710
711	return (0);
712}
713
714int
715mps_attach_sas(struct mps_softc *sc)
716{
717	struct mpssas_softc *sassc;
718	cam_status status;
719	int unit, error = 0, reqs;
720
721	MPS_FUNCTRACE(sc);
722
723	sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
724	if(!sassc) {
725		device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
726		__func__, __LINE__);
727		return (ENOMEM);
728	}
729
730	/*
731	 * XXX MaxTargets could change during a reinit.  Since we don't
732	 * resize the targets[] array during such an event, cache the value
733	 * of MaxTargets here so that we don't get into trouble later.  This
734	 * should move into the reinit logic.
735	 */
736	sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
737	sassc->targets = malloc(sizeof(struct mpssas_target) *
738	    sassc->maxtargets, M_MPT2, M_WAITOK|M_ZERO);
739	if(!sassc->targets) {
740		device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
741		__func__, __LINE__);
742		free(sassc, M_MPT2);
743		return (ENOMEM);
744	}
745	sc->sassc = sassc;
746	sassc->sc = sc;
747
748	reqs = sc->num_reqs - sc->num_prireqs - 1;
749	if ((sassc->devq = cam_simq_alloc(reqs)) == NULL) {
750		mps_dprint(sc, MPS_ERROR, "Cannot allocate SIMQ\n");
751		error = ENOMEM;
752		goto out;
753	}
754
755	unit = device_get_unit(sc->mps_dev);
756	sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
757	    unit, &sc->mps_mtx, reqs, reqs, sassc->devq);
758	if (sassc->sim == NULL) {
759		mps_dprint(sc, MPS_ERROR, "Cannot allocate SIM\n");
760		error = EINVAL;
761		goto out;
762	}
763
764	TAILQ_INIT(&sassc->ev_queue);
765
766	/* Initialize taskqueue for Event Handling */
767	TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
768	sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
769	    taskqueue_thread_enqueue, &sassc->ev_tq);
770	taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
771	    device_get_nameunit(sc->mps_dev));
772
773	mps_lock(sc);
774
775	/*
776	 * XXX There should be a bus for every port on the adapter, but since
777	 * we're just going to fake the topology for now, we'll pretend that
778	 * everything is just a target on a single bus.
779	 */
780	if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
781		mps_dprint(sc, MPS_ERROR, "Error %d registering SCSI bus\n",
782		    error);
783		mps_unlock(sc);
784		goto out;
785	}
786
787	/*
788	 * Assume that discovery events will start right away.
789	 *
790	 * Hold off boot until discovery is complete.
791	 */
792	sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
793	sc->sassc->startup_refcount = 0;
794	mpssas_startup_increment(sassc);
795
796	callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
797
798	/*
799	 * Register for async events so we can determine the EEDP
800	 * capabilities of devices.
801	 */
802	status = xpt_create_path(&sassc->path, /*periph*/NULL,
803	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
804	    CAM_LUN_WILDCARD);
805	if (status != CAM_REQ_CMP) {
806		mps_printf(sc, "Error %#x creating sim path\n", status);
807		sassc->path = NULL;
808	} else {
809		int event;
810
811#if (__FreeBSD_version >= 1000006) || \
812    ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
813		event = AC_ADVINFO_CHANGED;
814#else
815		event = AC_FOUND_DEVICE;
816#endif
817		status = xpt_register_async(event, mpssas_async, sc,
818					    sassc->path);
819		if (status != CAM_REQ_CMP) {
820			mps_dprint(sc, MPS_ERROR,
821			    "Error %#x registering async handler for "
822			    "AC_ADVINFO_CHANGED events\n", status);
823			xpt_free_path(sassc->path);
824			sassc->path = NULL;
825		}
826	}
827	if (status != CAM_REQ_CMP) {
828		/*
829		 * EEDP use is the exception, not the rule.
830		 * Warn the user, but do not fail to attach.
831		 */
832		mps_printf(sc, "EEDP capabilities disabled.\n");
833	}
834
835	mps_unlock(sc);
836
837	mpssas_register_events(sc);
838out:
839	if (error)
840		mps_detach_sas(sc);
841	return (error);
842}
843
844int
845mps_detach_sas(struct mps_softc *sc)
846{
847	struct mpssas_softc *sassc;
848	struct mpssas_lun *lun, *lun_tmp;
849	struct mpssas_target *targ;
850	int i;
851
852	MPS_FUNCTRACE(sc);
853
854	if (sc->sassc == NULL)
855		return (0);
856
857	sassc = sc->sassc;
858	mps_deregister_events(sc, sassc->mpssas_eh);
859
860	/*
861	 * Drain and free the event handling taskqueue with the lock
862	 * unheld so that any parallel processing tasks drain properly
863	 * without deadlocking.
864	 */
865	if (sassc->ev_tq != NULL)
866		taskqueue_free(sassc->ev_tq);
867
868	/* Make sure CAM doesn't wedge if we had to bail out early. */
869	mps_lock(sc);
870
871	/* Deregister our async handler */
872	if (sassc->path != NULL) {
873		xpt_register_async(0, mpssas_async, sc, sassc->path);
874		xpt_free_path(sassc->path);
875		sassc->path = NULL;
876	}
877
878	if (sassc->flags & MPSSAS_IN_STARTUP)
879		xpt_release_simq(sassc->sim, 1);
880
881	if (sassc->sim != NULL) {
882		xpt_bus_deregister(cam_sim_path(sassc->sim));
883		cam_sim_free(sassc->sim, FALSE);
884	}
885
886	mps_unlock(sc);
887
888	if (sassc->devq != NULL)
889		cam_simq_free(sassc->devq);
890
891	for(i=0; i< sassc->maxtargets ;i++) {
892		targ = &sassc->targets[i];
893		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
894			free(lun, M_MPT2);
895		}
896	}
897	free(sassc->targets, M_MPT2);
898	free(sassc, M_MPT2);
899	sc->sassc = NULL;
900
901	return (0);
902}
903
904void
905mpssas_discovery_end(struct mpssas_softc *sassc)
906{
907	struct mps_softc *sc = sassc->sc;
908
909	MPS_FUNCTRACE(sc);
910
911	if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
912		callout_stop(&sassc->discovery_callout);
913
914	/*
915	 * After discovery has completed, check the mapping table for any
916	 * missing devices and update their missing counts. Only do this once
917	 * whenever the driver is initialized so that missing counts aren't
918	 * updated unnecessarily. Note that just because discovery has
919	 * completed doesn't mean that events have been processed yet. The
920	 * check_devices function is a callout timer that checks if ALL devices
921	 * are missing. If so, it will wait a little longer for events to
922	 * complete and keep resetting itself until some device in the mapping
923	 * table is not missing, meaning that event processing has started.
924	 */
925	if (sc->track_mapping_events) {
926		mps_dprint(sc, MPS_XINFO | MPS_MAPPING, "Discovery has "
927		    "completed. Check for missing devices in the mapping "
928		    "table.\n");
929		callout_reset(&sc->device_check_callout,
930		    MPS_MISSING_CHECK_DELAY * hz, mps_mapping_check_devices,
931		    sc);
932	}
933}
934
935static void
936mpssas_action(struct cam_sim *sim, union ccb *ccb)
937{
938	struct mpssas_softc *sassc;
939
940	sassc = cam_sim_softc(sim);
941
942	MPS_FUNCTRACE(sassc->sc);
943	mps_dprint(sassc->sc, MPS_TRACE, "ccb func_code 0x%x\n",
944	    ccb->ccb_h.func_code);
945	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
946
947	switch (ccb->ccb_h.func_code) {
948	case XPT_PATH_INQ:
949	{
950		struct ccb_pathinq *cpi = &ccb->cpi;
951		struct mps_softc *sc = sassc->sc;
952		uint8_t sges_per_frame;
953
954		cpi->version_num = 1;
955		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
956		cpi->target_sprt = 0;
957#if __FreeBSD_version >= 1000039
958		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
959#else
960		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
961#endif
962		cpi->hba_eng_cnt = 0;
963		cpi->max_target = sassc->maxtargets - 1;
964		cpi->max_lun = 255;
965
966		/*
967		 * initiator_id is set here to an ID outside the set of valid
968		 * target IDs (including volumes).
969		 */
970		cpi->initiator_id = sassc->maxtargets;
971		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
972		strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
973		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
974		cpi->unit_number = cam_sim_unit(sim);
975		cpi->bus_id = cam_sim_bus(sim);
976		cpi->base_transfer_speed = 150000;
977		cpi->transport = XPORT_SAS;
978		cpi->transport_version = 0;
979		cpi->protocol = PROTO_SCSI;
980		cpi->protocol_version = SCSI_REV_SPC;
981
982		/*
983		 * Max IO Size is Page Size * the following:
984		 * ((SGEs per frame - 1 for chain element) *
985		 * Max Chain Depth) + 1 for no chain needed in last frame
986		 *
987		 * If user suggests a Max IO size to use, use the smaller of the
988		 * user's value and the calculated value as long as the user's
989		 * value is larger than 0. The user's value is in pages.
990		 */
991		sges_per_frame = ((sc->facts->IOCRequestFrameSize * 4) /
992		    sizeof(MPI2_SGE_SIMPLE64)) - 1;
993		cpi->maxio = (sges_per_frame * sc->facts->MaxChainDepth) + 1;
994		cpi->maxio *= PAGE_SIZE;
995		if ((sc->max_io_pages > 0) && (sc->max_io_pages * PAGE_SIZE <
996		    cpi->maxio))
997			cpi->maxio = sc->max_io_pages * PAGE_SIZE;
998		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
999		break;
1000	}
1001	case XPT_GET_TRAN_SETTINGS:
1002	{
1003		struct ccb_trans_settings	*cts;
1004		struct ccb_trans_settings_sas	*sas;
1005		struct ccb_trans_settings_scsi	*scsi;
1006		struct mpssas_target *targ;
1007
1008		cts = &ccb->cts;
1009		sas = &cts->xport_specific.sas;
1010		scsi = &cts->proto_specific.scsi;
1011
1012		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1013		    ("Target %d out of bounds in XPT_GET_TRANS_SETTINGS\n",
1014		    cts->ccb_h.target_id));
1015		targ = &sassc->targets[cts->ccb_h.target_id];
1016		if (targ->handle == 0x0) {
1017			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1018			break;
1019		}
1020
1021		cts->protocol_version = SCSI_REV_SPC2;
1022		cts->transport = XPORT_SAS;
1023		cts->transport_version = 0;
1024
1025		sas->valid = CTS_SAS_VALID_SPEED;
1026		switch (targ->linkrate) {
1027		case 0x08:
1028			sas->bitrate = 150000;
1029			break;
1030		case 0x09:
1031			sas->bitrate = 300000;
1032			break;
1033		case 0x0a:
1034			sas->bitrate = 600000;
1035			break;
1036		default:
1037			sas->valid = 0;
1038		}
1039
1040		cts->protocol = PROTO_SCSI;
1041		scsi->valid = CTS_SCSI_VALID_TQ;
1042		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1043
1044		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1045		break;
1046	}
1047	case XPT_CALC_GEOMETRY:
1048		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1049		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1050		break;
1051	case XPT_RESET_DEV:
1052		mps_dprint(sassc->sc, MPS_XINFO, "mpssas_action XPT_RESET_DEV\n");
1053		mpssas_action_resetdev(sassc, ccb);
1054		return;
1055	case XPT_RESET_BUS:
1056	case XPT_ABORT:
1057	case XPT_TERM_IO:
1058		mps_dprint(sassc->sc, MPS_XINFO,
1059		    "mpssas_action faking success for abort or reset\n");
1060		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1061		break;
1062	case XPT_SCSI_IO:
1063		mpssas_action_scsiio(sassc, ccb);
1064		return;
1065#if __FreeBSD_version >= 900026
1066	case XPT_SMP_IO:
1067		mpssas_action_smpio(sassc, ccb);
1068		return;
1069#endif
1070	default:
1071		mpssas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1072		break;
1073	}
1074	xpt_done(ccb);
1075
1076}
1077
1078static void
1079mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
1080    target_id_t target_id, lun_id_t lun_id)
1081{
1082	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1083	struct cam_path *path;
1084
1085	mps_dprint(sc, MPS_XINFO, "%s code %x target %d lun %jx\n", __func__,
1086	    ac_code, target_id, (uintmax_t)lun_id);
1087
1088	if (xpt_create_path(&path, NULL,
1089		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1090		mps_dprint(sc, MPS_ERROR, "unable to create path for reset "
1091			   "notification\n");
1092		return;
1093	}
1094
1095	xpt_async(ac_code, path, NULL);
1096	xpt_free_path(path);
1097}
1098
1099static void
1100mpssas_complete_all_commands(struct mps_softc *sc)
1101{
1102	struct mps_command *cm;
1103	int i;
1104	int completed;
1105
1106	MPS_FUNCTRACE(sc);
1107	mtx_assert(&sc->mps_mtx, MA_OWNED);
1108
1109	/* complete all commands with a NULL reply */
1110	for (i = 1; i < sc->num_reqs; i++) {
1111		cm = &sc->commands[i];
1112		cm->cm_reply = NULL;
1113		completed = 0;
1114
1115		if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
1116			cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
1117
1118		if (cm->cm_complete != NULL) {
1119			mpssas_log_command(cm, MPS_RECOVERY,
1120			    "completing cm %p state %x ccb %p for diag reset\n",
1121			    cm, cm->cm_state, cm->cm_ccb);
1122
1123			cm->cm_complete(sc, cm);
1124			completed = 1;
1125		}
1126
1127		if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
1128			mpssas_log_command(cm, MPS_RECOVERY,
1129			    "waking up cm %p state %x ccb %p for diag reset\n",
1130			    cm, cm->cm_state, cm->cm_ccb);
1131			wakeup(cm);
1132			completed = 1;
1133		}
1134
1135		if (cm->cm_sc->io_cmds_active != 0)
1136			cm->cm_sc->io_cmds_active--;
1137
1138		if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
1139			/* this should never happen, but if it does, log */
1140			mpssas_log_command(cm, MPS_RECOVERY,
1141			    "cm %p state %x flags 0x%x ccb %p during diag "
1142			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1143			    cm->cm_ccb);
1144		}
1145	}
1146}
1147
1148void
1149mpssas_handle_reinit(struct mps_softc *sc)
1150{
1151	int i;
1152
1153	/* Go back into startup mode and freeze the simq, so that CAM
1154	 * doesn't send any commands until after we've rediscovered all
1155	 * targets and found the proper device handles for them.
1156	 *
1157	 * After the reset, portenable will trigger discovery, and after all
1158	 * discovery-related activities have finished, the simq will be
1159	 * released.
1160	 */
1161	mps_dprint(sc, MPS_INIT, "%s startup\n", __func__);
1162	sc->sassc->flags |= MPSSAS_IN_STARTUP;
1163	sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
1164	mpssas_startup_increment(sc->sassc);
1165
1166	/* notify CAM of a bus reset */
1167	mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1168	    CAM_LUN_WILDCARD);
1169
1170	/* complete and cleanup after all outstanding commands */
1171	mpssas_complete_all_commands(sc);
1172
1173	mps_dprint(sc, MPS_INIT,
1174	    "%s startup %u after command completion\n", __func__,
1175	    sc->sassc->startup_refcount);
1176
1177	/* zero all the target handles, since they may change after the
1178	 * reset, and we have to rediscover all the targets and use the new
1179	 * handles.
1180	 */
1181	for (i = 0; i < sc->sassc->maxtargets; i++) {
1182		if (sc->sassc->targets[i].outstanding != 0)
1183			mps_dprint(sc, MPS_INIT, "target %u outstanding %u\n",
1184			    i, sc->sassc->targets[i].outstanding);
1185		sc->sassc->targets[i].handle = 0x0;
1186		sc->sassc->targets[i].exp_dev_handle = 0x0;
1187		sc->sassc->targets[i].outstanding = 0;
1188		sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1189	}
1190}
1191
1192static void
1193mpssas_tm_timeout(void *data)
1194{
1195	struct mps_command *tm = data;
1196	struct mps_softc *sc = tm->cm_sc;
1197
1198	mtx_assert(&sc->mps_mtx, MA_OWNED);
1199
1200	mpssas_log_command(tm, MPS_INFO|MPS_RECOVERY,
1201	    "task mgmt %p timed out\n", tm);
1202	mps_reinit(sc);
1203}
1204
1205static void
1206mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1207{
1208	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1209	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1210	unsigned int cm_count = 0;
1211	struct mps_command *cm;
1212	struct mpssas_target *targ;
1213
1214	callout_stop(&tm->cm_callout);
1215
1216	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1217	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1218	targ = tm->cm_targ;
1219
1220	/*
1221	 * Currently there should be no way we can hit this case.  It only
1222	 * happens when we have a failure to allocate chain frames, and
1223	 * task management commands don't have S/G lists.
1224	 * XXXSL So should it be an assertion?
1225	 */
1226	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1227		mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for LUN reset! "
1228			   "This should not happen!\n", __func__, tm->cm_flags);
1229		mpssas_free_tm(sc, tm);
1230		return;
1231	}
1232
1233	if (reply == NULL) {
1234		mpssas_log_command(tm, MPS_RECOVERY,
1235		    "NULL reset reply for tm %p\n", tm);
1236		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1237			/* this completion was due to a reset, just cleanup */
1238			targ->tm = NULL;
1239			mpssas_free_tm(sc, tm);
1240		}
1241		else {
1242			/* we should have gotten a reply. */
1243			mps_reinit(sc);
1244		}
1245		return;
1246	}
1247
1248	mpssas_log_command(tm, MPS_RECOVERY,
1249	    "logical unit reset status 0x%x code 0x%x count %u\n",
1250	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1251	    le32toh(reply->TerminationCount));
1252
1253	/* See if there are any outstanding commands for this LUN.
1254	 * This could be made more efficient by using a per-LU data
1255	 * structure of some sort.
1256	 */
1257	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1258		if (cm->cm_lun == tm->cm_lun)
1259			cm_count++;
1260	}
1261
1262	if (cm_count == 0) {
1263		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1264		    "logical unit %u finished recovery after reset\n",
1265		    tm->cm_lun, tm);
1266
1267		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1268		    tm->cm_lun);
1269
1270		/* we've finished recovery for this logical unit.  check and
1271		 * see if some other logical unit has a timedout command
1272		 * that needs to be processed.
1273		 */
1274		cm = TAILQ_FIRST(&targ->timedout_commands);
1275		if (cm) {
1276			mpssas_send_abort(sc, tm, cm);
1277		}
1278		else {
1279			targ->tm = NULL;
1280			mpssas_free_tm(sc, tm);
1281		}
1282	}
1283	else {
1284		/* if we still have commands for this LUN, the reset
1285		 * effectively failed, regardless of the status reported.
1286		 * Escalate to a target reset.
1287		 */
1288		mpssas_log_command(tm, MPS_RECOVERY,
1289		    "logical unit reset complete for tm %p, but still have %u command(s)\n",
1290		    tm, cm_count);
1291		mpssas_send_reset(sc, tm,
1292		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1293	}
1294}
1295
1296static void
1297mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1298{
1299	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1300	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1301	struct mpssas_target *targ;
1302
1303	callout_stop(&tm->cm_callout);
1304
1305	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1306	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1307	targ = tm->cm_targ;
1308
1309	/*
1310	 * Currently there should be no way we can hit this case.  It only
1311	 * happens when we have a failure to allocate chain frames, and
1312	 * task management commands don't have S/G lists.
1313	 */
1314	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1315		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x for target reset! "
1316			   "This should not happen!\n", __func__, tm->cm_flags);
1317		mpssas_free_tm(sc, tm);
1318		return;
1319	}
1320
1321	if (reply == NULL) {
1322		mpssas_log_command(tm, MPS_RECOVERY,
1323		    "NULL reset reply for tm %p\n", tm);
1324		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1325			/* this completion was due to a reset, just cleanup */
1326			targ->tm = NULL;
1327			mpssas_free_tm(sc, tm);
1328		}
1329		else {
1330			/* we should have gotten a reply. */
1331			mps_reinit(sc);
1332		}
1333		return;
1334	}
1335
1336	mpssas_log_command(tm, MPS_RECOVERY,
1337	    "target reset status 0x%x code 0x%x count %u\n",
1338	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1339	    le32toh(reply->TerminationCount));
1340
1341	if (targ->outstanding == 0) {
1342		/* we've finished recovery for this target and all
1343		 * of its logical units.
1344		 */
1345		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1346		    "recovery finished after target reset\n");
1347
1348		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1349		    CAM_LUN_WILDCARD);
1350
1351		targ->tm = NULL;
1352		mpssas_free_tm(sc, tm);
1353	}
1354	else {
1355		/* after a target reset, if this target still has
1356		 * outstanding commands, the reset effectively failed,
1357		 * regardless of the status reported.  escalate.
1358		 */
1359		mpssas_log_command(tm, MPS_RECOVERY,
1360		    "target reset complete for tm %p, but still have %u command(s)\n",
1361		    tm, targ->outstanding);
1362		mps_reinit(sc);
1363	}
1364}
1365
1366#define MPS_RESET_TIMEOUT 30
1367
1368int
1369mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1370{
1371	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1372	struct mpssas_target *target;
1373	int err;
1374
1375	target = tm->cm_targ;
1376	if (target->handle == 0) {
1377		mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1378		    __func__, target->tid);
1379		return -1;
1380	}
1381
1382	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1383	req->DevHandle = htole16(target->handle);
1384	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1385	req->TaskType = type;
1386
1387	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1388		/* XXX Need to handle invalid LUNs */
1389		MPS_SET_LUN(req->LUN, tm->cm_lun);
1390		tm->cm_targ->logical_unit_resets++;
1391		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1392		    "sending logical unit reset\n");
1393		tm->cm_complete = mpssas_logical_unit_reset_complete;
1394		mpssas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1395	}
1396	else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1397		/*
1398		 * Target reset method =
1399		 * 	SAS Hard Link Reset / SATA Link Reset
1400		 */
1401		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1402		tm->cm_targ->target_resets++;
1403		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1404		    "sending target reset\n");
1405		tm->cm_complete = mpssas_target_reset_complete;
1406		mpssas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1407	}
1408	else {
1409		mps_dprint(sc, MPS_ERROR, "unexpected reset type 0x%x\n", type);
1410		return -1;
1411	}
1412
1413	tm->cm_data = NULL;
1414	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1415	tm->cm_complete_data = (void *)tm;
1416
1417	callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1418	    mpssas_tm_timeout, tm);
1419
1420	err = mps_map_command(sc, tm);
1421	if (err)
1422		mpssas_log_command(tm, MPS_RECOVERY,
1423		    "error %d sending reset type %u\n",
1424		    err, type);
1425
1426	return err;
1427}
1428
1429
1430static void
1431mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1432{
1433	struct mps_command *cm;
1434	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1435	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1436	struct mpssas_target *targ;
1437
1438	callout_stop(&tm->cm_callout);
1439
1440	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1441	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1442	targ = tm->cm_targ;
1443
1444	/*
1445	 * Currently there should be no way we can hit this case.  It only
1446	 * happens when we have a failure to allocate chain frames, and
1447	 * task management commands don't have S/G lists.
1448	 */
1449	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1450		mpssas_log_command(tm, MPS_RECOVERY,
1451		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1452		    tm->cm_flags, tm, le16toh(req->TaskMID));
1453		mpssas_free_tm(sc, tm);
1454		return;
1455	}
1456
1457	if (reply == NULL) {
1458		mpssas_log_command(tm, MPS_RECOVERY,
1459		    "NULL abort reply for tm %p TaskMID %u\n",
1460		    tm, le16toh(req->TaskMID));
1461		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1462			/* this completion was due to a reset, just cleanup */
1463			targ->tm = NULL;
1464			mpssas_free_tm(sc, tm);
1465		}
1466		else {
1467			/* we should have gotten a reply. */
1468			mps_reinit(sc);
1469		}
1470		return;
1471	}
1472
1473	mpssas_log_command(tm, MPS_RECOVERY,
1474	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1475	    le16toh(req->TaskMID),
1476	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1477	    le32toh(reply->TerminationCount));
1478
1479	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1480	if (cm == NULL) {
1481		/* if there are no more timedout commands, we're done with
1482		 * error recovery for this target.
1483		 */
1484		mpssas_log_command(tm, MPS_RECOVERY,
1485		    "finished recovery after aborting TaskMID %u\n",
1486		    le16toh(req->TaskMID));
1487
1488		targ->tm = NULL;
1489		mpssas_free_tm(sc, tm);
1490	}
1491	else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1492		/* abort success, but we have more timedout commands to abort */
1493		mpssas_log_command(tm, MPS_RECOVERY,
1494		    "continuing recovery after aborting TaskMID %u\n",
1495		    le16toh(req->TaskMID));
1496
1497		mpssas_send_abort(sc, tm, cm);
1498	}
1499	else {
1500		/* we didn't get a command completion, so the abort
1501		 * failed as far as we're concerned.  escalate.
1502		 */
1503		mpssas_log_command(tm, MPS_RECOVERY,
1504		    "abort failed for TaskMID %u tm %p\n",
1505		    le16toh(req->TaskMID), tm);
1506
1507		mpssas_send_reset(sc, tm,
1508		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1509	}
1510}
1511
1512#define MPS_ABORT_TIMEOUT 5
1513
1514static int
1515mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1516{
1517	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1518	struct mpssas_target *targ;
1519	int err;
1520
1521	targ = cm->cm_targ;
1522	if (targ->handle == 0) {
1523		mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1524		    __func__, cm->cm_ccb->ccb_h.target_id);
1525		return -1;
1526	}
1527
1528	mpssas_log_command(cm, MPS_RECOVERY|MPS_INFO,
1529	    "Aborting command %p\n", cm);
1530
1531	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1532	req->DevHandle = htole16(targ->handle);
1533	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1534	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1535
1536	/* XXX Need to handle invalid LUNs */
1537	MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1538
1539	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1540
1541	tm->cm_data = NULL;
1542	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1543	tm->cm_complete = mpssas_abort_complete;
1544	tm->cm_complete_data = (void *)tm;
1545	tm->cm_targ = cm->cm_targ;
1546	tm->cm_lun = cm->cm_lun;
1547
1548	callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1549	    mpssas_tm_timeout, tm);
1550
1551	targ->aborts++;
1552
1553	mps_dprint(sc, MPS_INFO, "Sending reset from %s for target ID %d\n",
1554	    __func__, targ->tid);
1555	mpssas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1556
1557	err = mps_map_command(sc, tm);
1558	if (err)
1559		mps_dprint(sc, MPS_RECOVERY,
1560		    "error %d sending abort for cm %p SMID %u\n",
1561		    err, cm, req->TaskMID);
1562	return err;
1563}
1564
1565static void
1566mpssas_scsiio_timeout(void *data)
1567{
1568	struct mps_softc *sc;
1569	struct mps_command *cm;
1570	struct mpssas_target *targ;
1571
1572	cm = (struct mps_command *)data;
1573	sc = cm->cm_sc;
1574
1575	MPS_FUNCTRACE(sc);
1576	mtx_assert(&sc->mps_mtx, MA_OWNED);
1577
1578	mps_dprint(sc, MPS_XINFO, "Timeout checking cm %p\n", sc);
1579
1580	/*
1581	 * Run the interrupt handler to make sure it's not pending.  This
1582	 * isn't perfect because the command could have already completed
1583	 * and been re-used, though this is unlikely.
1584	 */
1585	mps_intr_locked(sc);
1586	if (cm->cm_state == MPS_CM_STATE_FREE) {
1587		mpssas_log_command(cm, MPS_XINFO,
1588		    "SCSI command %p almost timed out\n", cm);
1589		return;
1590	}
1591
1592	if (cm->cm_ccb == NULL) {
1593		mps_dprint(sc, MPS_ERROR, "command timeout with NULL ccb\n");
1594		return;
1595	}
1596
1597	targ = cm->cm_targ;
1598	targ->timeouts++;
1599
1600	mpssas_log_command(cm, MPS_ERROR, "command timeout %d cm %p target "
1601	    "%u, handle(0x%04x)\n", cm->cm_ccb->ccb_h.timeout, cm,  targ->tid,
1602	    targ->handle);
1603
1604	/* XXX first, check the firmware state, to see if it's still
1605	 * operational.  if not, do a diag reset.
1606	 */
1607	mpssas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1608	cm->cm_state = MPS_CM_STATE_TIMEDOUT;
1609	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1610
1611	if (targ->tm != NULL) {
1612		/* target already in recovery, just queue up another
1613		 * timedout command to be processed later.
1614		 */
1615		mps_dprint(sc, MPS_RECOVERY,
1616		    "queued timedout cm %p for processing by tm %p\n",
1617		    cm, targ->tm);
1618	}
1619	else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1620		mps_dprint(sc, MPS_RECOVERY, "timedout cm %p allocated tm %p\n",
1621		    cm, targ->tm);
1622
1623		/* start recovery by aborting the first timedout command */
1624		mpssas_send_abort(sc, targ->tm, cm);
1625	}
1626	else {
1627		/* XXX queue this target up for recovery once a TM becomes
1628		 * available.  The firmware only has a limited number of
1629		 * HighPriority credits for the high priority requests used
1630		 * for task management, and we ran out.
1631		 *
1632		 * Isilon: don't worry about this for now, since we have
1633		 * more credits than disks in an enclosure, and limit
1634		 * ourselves to one TM per target for recovery.
1635		 */
1636		mps_dprint(sc, MPS_RECOVERY,
1637		    "timedout cm %p failed to allocate a tm\n", cm);
1638	}
1639
1640}
1641
1642static void
1643mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1644{
1645	MPI2_SCSI_IO_REQUEST *req;
1646	struct ccb_scsiio *csio;
1647	struct mps_softc *sc;
1648	struct mpssas_target *targ;
1649	struct mpssas_lun *lun;
1650	struct mps_command *cm;
1651	uint8_t i, lba_byte, *ref_tag_addr;
1652	uint16_t eedp_flags;
1653	uint32_t mpi_control;
1654
1655	sc = sassc->sc;
1656	MPS_FUNCTRACE(sc);
1657	mtx_assert(&sc->mps_mtx, MA_OWNED);
1658
1659	csio = &ccb->csio;
1660	KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1661	    ("Target %d out of bounds in XPT_SCSI_IO\n",
1662	     csio->ccb_h.target_id));
1663	targ = &sassc->targets[csio->ccb_h.target_id];
1664	mps_dprint(sc, MPS_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1665	if (targ->handle == 0x0) {
1666		mps_dprint(sc, MPS_ERROR, "%s NULL handle for target %u\n",
1667		    __func__, csio->ccb_h.target_id);
1668		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1669		xpt_done(ccb);
1670		return;
1671	}
1672	if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
1673		mps_dprint(sc, MPS_ERROR, "%s Raid component no SCSI IO "
1674		    "supported %u\n", __func__, csio->ccb_h.target_id);
1675		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1676		xpt_done(ccb);
1677		return;
1678	}
1679	/*
1680	 * Sometimes, it is possible to get a command that is not "In
1681	 * Progress" and was actually aborted by the upper layer.  Check for
1682	 * this here and complete the command without error.
1683	 */
1684	if (mpssas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1685		mps_dprint(sc, MPS_TRACE, "%s Command is not in progress for "
1686		    "target %u\n", __func__, csio->ccb_h.target_id);
1687		xpt_done(ccb);
1688		return;
1689	}
1690	/*
1691	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1692	 * that the volume has timed out.  We want volumes to be enumerated
1693	 * until they are deleted/removed, not just failed.
1694	 */
1695	if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1696		if (targ->devinfo == 0)
1697			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1698		else
1699			mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1700		xpt_done(ccb);
1701		return;
1702	}
1703
1704	if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1705		mps_dprint(sc, MPS_INFO, "%s shutting down\n", __func__);
1706		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1707		xpt_done(ccb);
1708		return;
1709	}
1710
1711	/*
1712	 * If target has a reset in progress, freeze the devq and return.  The
1713	 * devq will be released when the TM reset is finished.
1714	 */
1715	if (targ->flags & MPSSAS_TARGET_INRESET) {
1716		ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1717		mps_dprint(sc, MPS_INFO, "%s: Freezing devq for target ID %d\n",
1718		    __func__, targ->tid);
1719		xpt_freeze_devq(ccb->ccb_h.path, 1);
1720		xpt_done(ccb);
1721		return;
1722	}
1723
1724	cm = mps_alloc_command(sc);
1725	if (cm == NULL || (sc->mps_flags & MPS_FLAGS_DIAGRESET)) {
1726		if (cm != NULL) {
1727			mps_free_command(sc, cm);
1728		}
1729		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1730			xpt_freeze_simq(sassc->sim, 1);
1731			sassc->flags |= MPSSAS_QUEUE_FROZEN;
1732		}
1733		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1734		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1735		xpt_done(ccb);
1736		return;
1737	}
1738
1739	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1740	bzero(req, sizeof(*req));
1741	req->DevHandle = htole16(targ->handle);
1742	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1743	req->MsgFlags = 0;
1744	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1745	req->SenseBufferLength = MPS_SENSE_LEN;
1746	req->SGLFlags = 0;
1747	req->ChainOffset = 0;
1748	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1749	req->SGLOffset1= 0;
1750	req->SGLOffset2= 0;
1751	req->SGLOffset3= 0;
1752	req->SkipCount = 0;
1753	req->DataLength = htole32(csio->dxfer_len);
1754	req->BidirectionalDataLength = 0;
1755	req->IoFlags = htole16(csio->cdb_len);
1756	req->EEDPFlags = 0;
1757
1758	/* Note: BiDirectional transfers are not supported */
1759	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1760	case CAM_DIR_IN:
1761		mpi_control = MPI2_SCSIIO_CONTROL_READ;
1762		cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1763		break;
1764	case CAM_DIR_OUT:
1765		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1766		cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1767		break;
1768	case CAM_DIR_NONE:
1769	default:
1770		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1771		break;
1772	}
1773
1774	if (csio->cdb_len == 32)
1775                mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1776	/*
1777	 * It looks like the hardware doesn't require an explicit tag
1778	 * number for each transaction.  SAM Task Management not supported
1779	 * at the moment.
1780	 */
1781	switch (csio->tag_action) {
1782	case MSG_HEAD_OF_Q_TAG:
1783		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1784		break;
1785	case MSG_ORDERED_Q_TAG:
1786		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1787		break;
1788	case MSG_ACA_TASK:
1789		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1790		break;
1791	case CAM_TAG_ACTION_NONE:
1792	case MSG_SIMPLE_Q_TAG:
1793	default:
1794		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1795		break;
1796	}
1797	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1798	req->Control = htole32(mpi_control);
1799	if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1800		mps_free_command(sc, cm);
1801		mpssas_set_ccbstatus(ccb, CAM_LUN_INVALID);
1802		xpt_done(ccb);
1803		return;
1804	}
1805
1806	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1807		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1808	else
1809		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1810	req->IoFlags = htole16(csio->cdb_len);
1811
1812	/*
1813	 * Check if EEDP is supported and enabled.  If it is then check if the
1814	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
1815	 * is formatted for EEDP support.  If all of this is true, set CDB up
1816	 * for EEDP transfer.
1817	 */
1818	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1819	if (sc->eedp_enabled && eedp_flags) {
1820		SLIST_FOREACH(lun, &targ->luns, lun_link) {
1821			if (lun->lun_id == csio->ccb_h.target_lun) {
1822				break;
1823			}
1824		}
1825
1826		if ((lun != NULL) && (lun->eedp_formatted)) {
1827			req->EEDPBlockSize = htole16(lun->eedp_block_size);
1828			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1829			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1830			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1831			req->EEDPFlags = htole16(eedp_flags);
1832
1833			/*
1834			 * If CDB less than 32, fill in Primary Ref Tag with
1835			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
1836			 * already there.  Also, set protection bit.  FreeBSD
1837			 * currently does not support CDBs bigger than 16, but
1838			 * the code doesn't hurt, and will be here for the
1839			 * future.
1840			 */
1841			if (csio->cdb_len != 32) {
1842				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1843				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1844				    PrimaryReferenceTag;
1845				for (i = 0; i < 4; i++) {
1846					*ref_tag_addr =
1847					    req->CDB.CDB32[lba_byte + i];
1848					ref_tag_addr++;
1849				}
1850				req->CDB.EEDP32.PrimaryReferenceTag =
1851					htole32(req->CDB.EEDP32.PrimaryReferenceTag);
1852				req->CDB.EEDP32.PrimaryApplicationTagMask =
1853				    0xFFFF;
1854				req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1855				    0x20;
1856			} else {
1857				eedp_flags |=
1858				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1859				req->EEDPFlags = htole16(eedp_flags);
1860				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1861				    0x1F) | 0x20;
1862			}
1863		}
1864	}
1865
1866	cm->cm_length = csio->dxfer_len;
1867	if (cm->cm_length != 0) {
1868		cm->cm_data = ccb;
1869		cm->cm_flags |= MPS_CM_FLAGS_USE_CCB;
1870	} else {
1871		cm->cm_data = NULL;
1872	}
1873	cm->cm_sge = &req->SGL;
1874	cm->cm_sglsize = (32 - 24) * 4;
1875	cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1876	cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1877	cm->cm_complete = mpssas_scsiio_complete;
1878	cm->cm_complete_data = ccb;
1879	cm->cm_targ = targ;
1880	cm->cm_lun = csio->ccb_h.target_lun;
1881	cm->cm_ccb = ccb;
1882
1883	/*
1884	 * If HBA is a WD and the command is not for a retry, try to build a
1885	 * direct I/O message. If failed, or the command is for a retry, send
1886	 * the I/O to the IR volume itself.
1887	 */
1888	if (sc->WD_valid_config) {
1889		if (ccb->ccb_h.sim_priv.entries[0].field == MPS_WD_RETRY) {
1890			mpssas_direct_drive_io(sassc, cm, ccb);
1891		} else {
1892			mpssas_set_ccbstatus(ccb, CAM_REQ_INPROG);
1893		}
1894	}
1895
1896	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1897	    mpssas_scsiio_timeout, cm, 0);
1898
1899	targ->issued++;
1900	targ->outstanding++;
1901	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1902	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1903
1904	mpssas_log_command(cm, MPS_XINFO, "%s cm %p ccb %p outstanding %u\n",
1905	    __func__, cm, ccb, targ->outstanding);
1906
1907	mps_map_command(sc, cm);
1908	return;
1909}
1910
1911static void
1912mps_response_code(struct mps_softc *sc, u8 response_code)
1913{
1914        char *desc;
1915
1916        switch (response_code) {
1917        case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
1918                desc = "task management request completed";
1919                break;
1920        case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
1921                desc = "invalid frame";
1922                break;
1923        case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
1924                desc = "task management request not supported";
1925                break;
1926        case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
1927                desc = "task management request failed";
1928                break;
1929        case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
1930                desc = "task management request succeeded";
1931                break;
1932        case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
1933                desc = "invalid lun";
1934                break;
1935        case 0xA:
1936                desc = "overlapped tag attempted";
1937                break;
1938        case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
1939                desc = "task queued, however not sent to target";
1940                break;
1941        default:
1942                desc = "unknown";
1943                break;
1944        }
1945		mps_dprint(sc, MPS_XINFO, "response_code(0x%01x): %s\n",
1946                response_code, desc);
1947}
1948/**
1949 * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request
1950 */
1951static void
1952mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio,
1953    Mpi2SCSIIOReply_t *mpi_reply)
1954{
1955	u32 response_info;
1956	u8 *response_bytes;
1957	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1958	    MPI2_IOCSTATUS_MASK;
1959	u8 scsi_state = mpi_reply->SCSIState;
1960	u8 scsi_status = mpi_reply->SCSIStatus;
1961	char *desc_ioc_state = NULL;
1962	char *desc_scsi_status = NULL;
1963	char *desc_scsi_state = sc->tmp_string;
1964	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1965
1966	if (log_info == 0x31170000)
1967		return;
1968
1969	switch (ioc_status) {
1970	case MPI2_IOCSTATUS_SUCCESS:
1971		desc_ioc_state = "success";
1972		break;
1973	case MPI2_IOCSTATUS_INVALID_FUNCTION:
1974		desc_ioc_state = "invalid function";
1975		break;
1976	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1977		desc_ioc_state = "scsi recovered error";
1978		break;
1979	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1980		desc_ioc_state = "scsi invalid dev handle";
1981		break;
1982	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1983		desc_ioc_state = "scsi device not there";
1984		break;
1985	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1986		desc_ioc_state = "scsi data overrun";
1987		break;
1988	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1989		desc_ioc_state = "scsi data underrun";
1990		break;
1991	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
1992		desc_ioc_state = "scsi io data error";
1993		break;
1994	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
1995		desc_ioc_state = "scsi protocol error";
1996		break;
1997	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
1998		desc_ioc_state = "scsi task terminated";
1999		break;
2000	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2001		desc_ioc_state = "scsi residual mismatch";
2002		break;
2003	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2004		desc_ioc_state = "scsi task mgmt failed";
2005		break;
2006	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2007		desc_ioc_state = "scsi ioc terminated";
2008		break;
2009	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2010		desc_ioc_state = "scsi ext terminated";
2011		break;
2012	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
2013		desc_ioc_state = "eedp guard error";
2014		break;
2015	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
2016		desc_ioc_state = "eedp ref tag error";
2017		break;
2018	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
2019		desc_ioc_state = "eedp app tag error";
2020		break;
2021	default:
2022		desc_ioc_state = "unknown";
2023		break;
2024	}
2025
2026	switch (scsi_status) {
2027	case MPI2_SCSI_STATUS_GOOD:
2028		desc_scsi_status = "good";
2029		break;
2030	case MPI2_SCSI_STATUS_CHECK_CONDITION:
2031		desc_scsi_status = "check condition";
2032		break;
2033	case MPI2_SCSI_STATUS_CONDITION_MET:
2034		desc_scsi_status = "condition met";
2035		break;
2036	case MPI2_SCSI_STATUS_BUSY:
2037		desc_scsi_status = "busy";
2038		break;
2039	case MPI2_SCSI_STATUS_INTERMEDIATE:
2040		desc_scsi_status = "intermediate";
2041		break;
2042	case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
2043		desc_scsi_status = "intermediate condmet";
2044		break;
2045	case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
2046		desc_scsi_status = "reservation conflict";
2047		break;
2048	case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
2049		desc_scsi_status = "command terminated";
2050		break;
2051	case MPI2_SCSI_STATUS_TASK_SET_FULL:
2052		desc_scsi_status = "task set full";
2053		break;
2054	case MPI2_SCSI_STATUS_ACA_ACTIVE:
2055		desc_scsi_status = "aca active";
2056		break;
2057	case MPI2_SCSI_STATUS_TASK_ABORTED:
2058		desc_scsi_status = "task aborted";
2059		break;
2060	default:
2061		desc_scsi_status = "unknown";
2062		break;
2063	}
2064
2065	desc_scsi_state[0] = '\0';
2066	if (!scsi_state)
2067		desc_scsi_state = " ";
2068	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
2069		strcat(desc_scsi_state, "response info ");
2070	if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
2071		strcat(desc_scsi_state, "state terminated ");
2072	if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
2073		strcat(desc_scsi_state, "no status ");
2074	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
2075		strcat(desc_scsi_state, "autosense failed ");
2076	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
2077		strcat(desc_scsi_state, "autosense valid ");
2078
2079	mps_dprint(sc, MPS_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2080	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2081	/* We can add more detail about underflow data here
2082	 * TO-DO
2083	 * */
2084	mps_dprint(sc, MPS_XINFO, "\tscsi_status(%s)(0x%02x), "
2085	    "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status,
2086	    desc_scsi_state, scsi_state);
2087
2088	if (sc->mps_debug & MPS_XINFO &&
2089		scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2090		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : Start :\n");
2091		scsi_sense_print(csio);
2092		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : End :\n");
2093	}
2094
2095	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2096		response_info = le32toh(mpi_reply->ResponseInfo);
2097		response_bytes = (u8 *)&response_info;
2098		mps_response_code(sc,response_bytes[0]);
2099	}
2100}
2101
2102static void
2103mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
2104{
2105	MPI2_SCSI_IO_REPLY *rep;
2106	union ccb *ccb;
2107	struct ccb_scsiio *csio;
2108	struct mpssas_softc *sassc;
2109	struct scsi_vpd_supported_page_list *vpd_list = NULL;
2110	u8 *TLR_bits, TLR_on;
2111	int dir = 0, i;
2112	u16 alloc_len;
2113	struct mpssas_target *target;
2114	target_id_t target_id;
2115
2116	MPS_FUNCTRACE(sc);
2117	mps_dprint(sc, MPS_TRACE,
2118	    "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2119	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2120	    cm->cm_targ->outstanding);
2121
2122	callout_stop(&cm->cm_callout);
2123	mtx_assert(&sc->mps_mtx, MA_OWNED);
2124
2125	sassc = sc->sassc;
2126	ccb = cm->cm_complete_data;
2127	csio = &ccb->csio;
2128	target_id = csio->ccb_h.target_id;
2129	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2130	/*
2131	 * XXX KDM if the chain allocation fails, does it matter if we do
2132	 * the sync and unload here?  It is simpler to do it in every case,
2133	 * assuming it doesn't cause problems.
2134	 */
2135	if (cm->cm_data != NULL) {
2136		if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
2137			dir = BUS_DMASYNC_POSTREAD;
2138		else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
2139			dir = BUS_DMASYNC_POSTWRITE;
2140		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2141		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2142	}
2143
2144	cm->cm_targ->completed++;
2145	cm->cm_targ->outstanding--;
2146	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2147	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2148
2149	if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) {
2150		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2151		if (cm->cm_reply != NULL)
2152			mpssas_log_command(cm, MPS_RECOVERY,
2153			    "completed timedout cm %p ccb %p during recovery "
2154			    "ioc %x scsi %x state %x xfer %u\n",
2155			    cm, cm->cm_ccb,
2156			    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2157			    le32toh(rep->TransferCount));
2158		else
2159			mpssas_log_command(cm, MPS_RECOVERY,
2160			    "completed timedout cm %p ccb %p during recovery\n",
2161			    cm, cm->cm_ccb);
2162	} else if (cm->cm_targ->tm != NULL) {
2163		if (cm->cm_reply != NULL)
2164			mpssas_log_command(cm, MPS_RECOVERY,
2165			    "completed cm %p ccb %p during recovery "
2166			    "ioc %x scsi %x state %x xfer %u\n",
2167			    cm, cm->cm_ccb,
2168			    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2169			    le32toh(rep->TransferCount));
2170		else
2171			mpssas_log_command(cm, MPS_RECOVERY,
2172			    "completed cm %p ccb %p during recovery\n",
2173			    cm, cm->cm_ccb);
2174	} else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
2175		mpssas_log_command(cm, MPS_RECOVERY,
2176		    "reset completed cm %p ccb %p\n",
2177		    cm, cm->cm_ccb);
2178	}
2179
2180	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2181		/*
2182		 * We ran into an error after we tried to map the command,
2183		 * so we're getting a callback without queueing the command
2184		 * to the hardware.  So we set the status here, and it will
2185		 * be retained below.  We'll go through the "fast path",
2186		 * because there can be no reply when we haven't actually
2187		 * gone out to the hardware.
2188		 */
2189		mpssas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2190
2191		/*
2192		 * Currently the only error included in the mask is
2193		 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
2194		 * chain frames.  We need to freeze the queue until we get
2195		 * a command that completed without this error, which will
2196		 * hopefully have some chain frames attached that we can
2197		 * use.  If we wanted to get smarter about it, we would
2198		 * only unfreeze the queue in this condition when we're
2199		 * sure that we're getting some chain frames back.  That's
2200		 * probably unnecessary.
2201		 */
2202		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
2203			xpt_freeze_simq(sassc->sim, 1);
2204			sassc->flags |= MPSSAS_QUEUE_FROZEN;
2205			mps_dprint(sc, MPS_XINFO, "Error sending command, "
2206				   "freezing SIM queue\n");
2207		}
2208	}
2209
2210	/*
2211	 * If this is a Start Stop Unit command and it was issued by the driver
2212	 * during shutdown, decrement the refcount to account for all of the
2213	 * commands that were sent.  All SSU commands should be completed before
2214	 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2215	 * is TRUE.
2216	 */
2217	if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) {
2218		mps_dprint(sc, MPS_INFO, "Decrementing SSU count.\n");
2219		sc->SSU_refcount--;
2220	}
2221
2222	/* Take the fast path to completion */
2223	if (cm->cm_reply == NULL) {
2224		if (mpssas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2225			if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
2226				mpssas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2227			else {
2228				mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2229				ccb->csio.scsi_status = SCSI_STATUS_OK;
2230			}
2231			if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2232				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2233				sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2234				mps_dprint(sc, MPS_XINFO,
2235				    "Unfreezing SIM queue\n");
2236			}
2237		}
2238
2239		/*
2240		 * There are two scenarios where the status won't be
2241		 * CAM_REQ_CMP.  The first is if MPS_CM_FLAGS_ERROR_MASK is
2242		 * set, the second is in the MPS_FLAGS_DIAGRESET above.
2243		 */
2244		if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2245			/*
2246			 * Freeze the dev queue so that commands are
2247			 * executed in the correct order after error
2248			 * recovery.
2249			 */
2250			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2251			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2252		}
2253		mps_free_command(sc, cm);
2254		xpt_done(ccb);
2255		return;
2256	}
2257
2258	mpssas_log_command(cm, MPS_XINFO,
2259	    "ioc %x scsi %x state %x xfer %u\n",
2260	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2261	    le32toh(rep->TransferCount));
2262
2263	/*
2264	 * If this is a Direct Drive I/O, reissue the I/O to the original IR
2265	 * Volume if an error occurred (normal I/O retry).  Use the original
2266	 * CCB, but set a flag that this will be a retry so that it's sent to
2267	 * the original volume.  Free the command but reuse the CCB.
2268	 */
2269	if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
2270		mps_free_command(sc, cm);
2271		ccb->ccb_h.sim_priv.entries[0].field = MPS_WD_RETRY;
2272		mpssas_action_scsiio(sassc, ccb);
2273		return;
2274	} else
2275		ccb->ccb_h.sim_priv.entries[0].field = 0;
2276
2277	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2278	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2279		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2280		/* FALLTHROUGH */
2281	case MPI2_IOCSTATUS_SUCCESS:
2282	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2283
2284		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2285		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2286			mpssas_log_command(cm, MPS_XINFO, "recovered error\n");
2287
2288		/* Completion failed at the transport level. */
2289		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2290		    MPI2_SCSI_STATE_TERMINATED)) {
2291			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2292			break;
2293		}
2294
2295		/* In a modern packetized environment, an autosense failure
2296		 * implies that there's not much else that can be done to
2297		 * recover the command.
2298		 */
2299		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2300			mpssas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2301			break;
2302		}
2303
2304		/*
2305		 * CAM doesn't care about SAS Response Info data, but if this is
2306		 * the state check if TLR should be done.  If not, clear the
2307		 * TLR_bits for the target.
2308		 */
2309		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2310		    ((le32toh(rep->ResponseInfo) &
2311		    MPI2_SCSI_RI_MASK_REASONCODE) ==
2312		    MPS_SCSI_RI_INVALID_FRAME)) {
2313			sc->mapping_table[target_id].TLR_bits =
2314			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2315		}
2316
2317		/*
2318		 * Intentionally override the normal SCSI status reporting
2319		 * for these two cases.  These are likely to happen in a
2320		 * multi-initiator environment, and we want to make sure that
2321		 * CAM retries these commands rather than fail them.
2322		 */
2323		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2324		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2325			mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2326			break;
2327		}
2328
2329		/* Handle normal status and sense */
2330		csio->scsi_status = rep->SCSIStatus;
2331		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2332			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2333		else
2334			mpssas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2335
2336		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2337			int sense_len, returned_sense_len;
2338
2339			returned_sense_len = min(le32toh(rep->SenseCount),
2340			    sizeof(struct scsi_sense_data));
2341			if (returned_sense_len < ccb->csio.sense_len)
2342				ccb->csio.sense_resid = ccb->csio.sense_len -
2343					returned_sense_len;
2344			else
2345				ccb->csio.sense_resid = 0;
2346
2347			sense_len = min(returned_sense_len,
2348			    ccb->csio.sense_len - ccb->csio.sense_resid);
2349			bzero(&ccb->csio.sense_data,
2350			      sizeof(ccb->csio.sense_data));
2351			bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
2352			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2353		}
2354
2355		/*
2356		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2357		 * and it's page code 0 (Supported Page List), and there is
2358		 * inquiry data, and this is for a sequential access device, and
2359		 * the device is an SSP target, and TLR is supported by the
2360		 * controller, turn the TLR_bits value ON if page 0x90 is
2361		 * supported.
2362		 */
2363		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2364		    (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2365		    (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2366		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2367		    (csio->data_ptr != NULL) &&
2368		    ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2369		    (sc->control_TLR) &&
2370		    (sc->mapping_table[target_id].device_info &
2371		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2372			vpd_list = (struct scsi_vpd_supported_page_list *)
2373			    csio->data_ptr;
2374			TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2375			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2376			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2377			alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2378			    csio->cdb_io.cdb_bytes[4];
2379			alloc_len -= csio->resid;
2380			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2381				if (vpd_list->list[i] == 0x90) {
2382					*TLR_bits = TLR_on;
2383					break;
2384				}
2385			}
2386		}
2387
2388		/*
2389		 * If this is a SATA direct-access end device, mark it so that
2390		 * a SCSI StartStopUnit command will be sent to it when the
2391		 * driver is being shutdown.
2392		 */
2393		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2394		    ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2395		    (sc->mapping_table[target_id].device_info &
2396		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2397		    ((sc->mapping_table[target_id].device_info &
2398		    MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2399		    MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2400			target = &sassc->targets[target_id];
2401			target->supports_SSU = TRUE;
2402			mps_dprint(sc, MPS_XINFO, "Target %d supports SSU\n",
2403			    target_id);
2404		}
2405		break;
2406	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2407	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2408		/*
2409		 * If devinfo is 0 this will be a volume.  In that case don't
2410		 * tell CAM that the volume is not there.  We want volumes to
2411		 * be enumerated until they are deleted/removed, not just
2412		 * failed.
2413		 */
2414		if (cm->cm_targ->devinfo == 0)
2415			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2416		else
2417			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2418		break;
2419	case MPI2_IOCSTATUS_INVALID_SGL:
2420		mps_print_scsiio_cmd(sc, cm);
2421		mpssas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2422		break;
2423	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2424		/*
2425		 * This is one of the responses that comes back when an I/O
2426		 * has been aborted.  If it is because of a timeout that we
2427		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2428		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2429		 * command is the same (it gets retried, subject to the
2430		 * retry counter), the only difference is what gets printed
2431		 * on the console.
2432		 */
2433		if (cm->cm_state == MPS_CM_STATE_TIMEDOUT)
2434			mpssas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2435		else
2436			mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2437		break;
2438	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2439		/* resid is ignored for this condition */
2440		csio->resid = 0;
2441		mpssas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2442		break;
2443	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2444	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2445		/*
2446		 * These can sometimes be transient transport-related
2447		 * errors, and sometimes persistent drive-related errors.
2448		 * We used to retry these without decrementing the retry
2449		 * count by returning CAM_REQUEUE_REQ.  Unfortunately, if
2450		 * we hit a persistent drive problem that returns one of
2451		 * these error codes, we would retry indefinitely.  So,
2452		 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2453		 * count and avoid infinite retries.  We're taking the
2454		 * potential risk of flagging false failures in the event
2455		 * of a topology-related error (e.g. a SAS expander problem
2456		 * causes a command addressed to a drive to fail), but
2457		 * avoiding getting into an infinite retry loop.
2458		 */
2459		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2460		mpssas_log_command(cm, MPS_INFO,
2461		    "terminated ioc %x loginfo %x scsi %x state %x xfer %u\n",
2462		    le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2463		    rep->SCSIStatus, rep->SCSIState,
2464		    le32toh(rep->TransferCount));
2465		break;
2466	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2467	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2468	case MPI2_IOCSTATUS_INVALID_VPID:
2469	case MPI2_IOCSTATUS_INVALID_FIELD:
2470	case MPI2_IOCSTATUS_INVALID_STATE:
2471	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2472	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2473	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2474	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2475	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2476	default:
2477		mpssas_log_command(cm, MPS_XINFO,
2478		    "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2479		    le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2480		    rep->SCSIStatus, rep->SCSIState,
2481		    le32toh(rep->TransferCount));
2482		csio->resid = cm->cm_length;
2483		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2484		break;
2485	}
2486
2487	mps_sc_failed_io_info(sc,csio,rep);
2488
2489	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2490		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2491		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2492		mps_dprint(sc, MPS_XINFO, "Command completed, "
2493		    "unfreezing SIM queue\n");
2494	}
2495
2496	if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2497		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2498		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2499	}
2500
2501	mps_free_command(sc, cm);
2502	xpt_done(ccb);
2503}
2504
2505/* All Request reached here are Endian safe */
2506static void
2507mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2508    union ccb *ccb) {
2509	pMpi2SCSIIORequest_t	pIO_req;
2510	struct mps_softc	*sc = sassc->sc;
2511	uint64_t		virtLBA;
2512	uint32_t		physLBA, stripe_offset, stripe_unit;
2513	uint32_t		io_size, column;
2514	uint8_t			*ptrLBA, lba_idx, physLBA_byte, *CDB;
2515
2516	/*
2517	 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2518	 * Write10, or Write16), build a direct I/O message.  Otherwise, the I/O
2519	 * will be sent to the IR volume itself.  Since Read6 and Write6 are a
2520	 * bit different than the 10/16 CDBs, handle them separately.
2521	 */
2522	pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2523	CDB = pIO_req->CDB.CDB32;
2524
2525	/*
2526	 * Handle 6 byte CDBs.
2527	 */
2528	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2529	    (CDB[0] == WRITE_6))) {
2530		/*
2531		 * Get the transfer size in blocks.
2532		 */
2533		io_size = (cm->cm_length >> sc->DD_block_exponent);
2534
2535		/*
2536		 * Get virtual LBA given in the CDB.
2537		 */
2538		virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2539		    ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2540
2541		/*
2542		 * Check that LBA range for I/O does not exceed volume's
2543		 * MaxLBA.
2544		 */
2545		if ((virtLBA + (uint64_t)io_size - 1) <=
2546		    sc->DD_max_lba) {
2547			/*
2548			 * Check if the I/O crosses a stripe boundary.  If not,
2549			 * translate the virtual LBA to a physical LBA and set
2550			 * the DevHandle for the PhysDisk to be used.  If it
2551			 * does cross a boundary, do normal I/O.  To get the
2552			 * right DevHandle to use, get the map number for the
2553			 * column, then use that map number to look up the
2554			 * DevHandle of the PhysDisk.
2555			 */
2556			stripe_offset = (uint32_t)virtLBA &
2557			    (sc->DD_stripe_size - 1);
2558			if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2559				physLBA = (uint32_t)virtLBA >>
2560				    sc->DD_stripe_exponent;
2561				stripe_unit = physLBA / sc->DD_num_phys_disks;
2562				column = physLBA % sc->DD_num_phys_disks;
2563				pIO_req->DevHandle =
2564				    htole16(sc->DD_column_map[column].dev_handle);
2565				/* ???? Is this endian safe*/
2566				cm->cm_desc.SCSIIO.DevHandle =
2567				    pIO_req->DevHandle;
2568
2569				physLBA = (stripe_unit <<
2570				    sc->DD_stripe_exponent) + stripe_offset;
2571				ptrLBA = &pIO_req->CDB.CDB32[1];
2572				physLBA_byte = (uint8_t)(physLBA >> 16);
2573				*ptrLBA = physLBA_byte;
2574				ptrLBA = &pIO_req->CDB.CDB32[2];
2575				physLBA_byte = (uint8_t)(physLBA >> 8);
2576				*ptrLBA = physLBA_byte;
2577				ptrLBA = &pIO_req->CDB.CDB32[3];
2578				physLBA_byte = (uint8_t)physLBA;
2579				*ptrLBA = physLBA_byte;
2580
2581				/*
2582				 * Set flag that Direct Drive I/O is
2583				 * being done.
2584				 */
2585				cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2586			}
2587		}
2588		return;
2589	}
2590
2591	/*
2592	 * Handle 10, 12 or 16 byte CDBs.
2593	 */
2594	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2595	    (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2596	    (CDB[0] == WRITE_16) || (CDB[0] == READ_12) ||
2597	    (CDB[0] == WRITE_12))) {
2598		/*
2599		 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2600		 * are 0.  If not, this is accessing beyond 2TB so handle it in
2601		 * the else section.  10-byte and 12-byte CDB's are OK.
2602		 * FreeBSD sends very rare 12 byte READ/WRITE, but driver is
2603		 * ready to accept 12byte CDB for Direct IOs.
2604		 */
2605		if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) ||
2606		    (CDB[0] == READ_12 || CDB[0] == WRITE_12) ||
2607		    !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2608			/*
2609			 * Get the transfer size in blocks.
2610			 */
2611			io_size = (cm->cm_length >> sc->DD_block_exponent);
2612
2613			/*
2614			 * Get virtual LBA.  Point to correct lower 4 bytes of
2615			 * LBA in the CDB depending on command.
2616			 */
2617			lba_idx = ((CDB[0] == READ_12) ||
2618				(CDB[0] == WRITE_12) ||
2619				(CDB[0] == READ_10) ||
2620				(CDB[0] == WRITE_10))? 2 : 6;
2621			virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2622			    ((uint64_t)CDB[lba_idx + 1] << 16) |
2623			    ((uint64_t)CDB[lba_idx + 2] << 8) |
2624			    (uint64_t)CDB[lba_idx + 3];
2625
2626			/*
2627			 * Check that LBA range for I/O does not exceed volume's
2628			 * MaxLBA.
2629			 */
2630			if ((virtLBA + (uint64_t)io_size - 1) <=
2631			    sc->DD_max_lba) {
2632				/*
2633				 * Check if the I/O crosses a stripe boundary.
2634				 * If not, translate the virtual LBA to a
2635				 * physical LBA and set the DevHandle for the
2636				 * PhysDisk to be used.  If it does cross a
2637				 * boundary, do normal I/O.  To get the right
2638				 * DevHandle to use, get the map number for the
2639				 * column, then use that map number to look up
2640				 * the DevHandle of the PhysDisk.
2641				 */
2642				stripe_offset = (uint32_t)virtLBA &
2643				    (sc->DD_stripe_size - 1);
2644				if ((stripe_offset + io_size) <=
2645				    sc->DD_stripe_size) {
2646					physLBA = (uint32_t)virtLBA >>
2647					    sc->DD_stripe_exponent;
2648					stripe_unit = physLBA /
2649					    sc->DD_num_phys_disks;
2650					column = physLBA %
2651					    sc->DD_num_phys_disks;
2652					pIO_req->DevHandle =
2653					    htole16(sc->DD_column_map[column].
2654					    dev_handle);
2655					cm->cm_desc.SCSIIO.DevHandle =
2656					    pIO_req->DevHandle;
2657
2658					physLBA = (stripe_unit <<
2659					    sc->DD_stripe_exponent) +
2660					    stripe_offset;
2661					ptrLBA =
2662					    &pIO_req->CDB.CDB32[lba_idx];
2663					physLBA_byte = (uint8_t)(physLBA >> 24);
2664					*ptrLBA = physLBA_byte;
2665					ptrLBA =
2666					    &pIO_req->CDB.CDB32[lba_idx + 1];
2667					physLBA_byte = (uint8_t)(physLBA >> 16);
2668					*ptrLBA = physLBA_byte;
2669					ptrLBA =
2670					    &pIO_req->CDB.CDB32[lba_idx + 2];
2671					physLBA_byte = (uint8_t)(physLBA >> 8);
2672					*ptrLBA = physLBA_byte;
2673					ptrLBA =
2674					    &pIO_req->CDB.CDB32[lba_idx + 3];
2675					physLBA_byte = (uint8_t)physLBA;
2676					*ptrLBA = physLBA_byte;
2677
2678					/*
2679					 * Set flag that Direct Drive I/O is
2680					 * being done.
2681					 */
2682					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2683				}
2684			}
2685		} else {
2686			/*
2687			 * 16-byte CDB and the upper 4 bytes of the CDB are not
2688			 * 0.  Get the transfer size in blocks.
2689			 */
2690			io_size = (cm->cm_length >> sc->DD_block_exponent);
2691
2692			/*
2693			 * Get virtual LBA.
2694			 */
2695			virtLBA = ((uint64_t)CDB[2] << 54) |
2696			    ((uint64_t)CDB[3] << 48) |
2697			    ((uint64_t)CDB[4] << 40) |
2698			    ((uint64_t)CDB[5] << 32) |
2699			    ((uint64_t)CDB[6] << 24) |
2700			    ((uint64_t)CDB[7] << 16) |
2701			    ((uint64_t)CDB[8] << 8) |
2702			    (uint64_t)CDB[9];
2703
2704			/*
2705			 * Check that LBA range for I/O does not exceed volume's
2706			 * MaxLBA.
2707			 */
2708			if ((virtLBA + (uint64_t)io_size - 1) <=
2709			    sc->DD_max_lba) {
2710				/*
2711				 * Check if the I/O crosses a stripe boundary.
2712				 * If not, translate the virtual LBA to a
2713				 * physical LBA and set the DevHandle for the
2714				 * PhysDisk to be used.  If it does cross a
2715				 * boundary, do normal I/O.  To get the right
2716				 * DevHandle to use, get the map number for the
2717				 * column, then use that map number to look up
2718				 * the DevHandle of the PhysDisk.
2719				 */
2720				stripe_offset = (uint32_t)virtLBA &
2721				    (sc->DD_stripe_size - 1);
2722				if ((stripe_offset + io_size) <=
2723				    sc->DD_stripe_size) {
2724					physLBA = (uint32_t)(virtLBA >>
2725					    sc->DD_stripe_exponent);
2726					stripe_unit = physLBA /
2727					    sc->DD_num_phys_disks;
2728					column = physLBA %
2729					    sc->DD_num_phys_disks;
2730					pIO_req->DevHandle =
2731					    htole16(sc->DD_column_map[column].
2732					    dev_handle);
2733					cm->cm_desc.SCSIIO.DevHandle =
2734					    pIO_req->DevHandle;
2735
2736					physLBA = (stripe_unit <<
2737					    sc->DD_stripe_exponent) +
2738					    stripe_offset;
2739
2740					/*
2741					 * Set upper 4 bytes of LBA to 0.  We
2742					 * assume that the phys disks are less
2743					 * than 2 TB's in size.  Then, set the
2744					 * lower 4 bytes.
2745					 */
2746					pIO_req->CDB.CDB32[2] = 0;
2747					pIO_req->CDB.CDB32[3] = 0;
2748					pIO_req->CDB.CDB32[4] = 0;
2749					pIO_req->CDB.CDB32[5] = 0;
2750					ptrLBA = &pIO_req->CDB.CDB32[6];
2751					physLBA_byte = (uint8_t)(physLBA >> 24);
2752					*ptrLBA = physLBA_byte;
2753					ptrLBA = &pIO_req->CDB.CDB32[7];
2754					physLBA_byte = (uint8_t)(physLBA >> 16);
2755					*ptrLBA = physLBA_byte;
2756					ptrLBA = &pIO_req->CDB.CDB32[8];
2757					physLBA_byte = (uint8_t)(physLBA >> 8);
2758					*ptrLBA = physLBA_byte;
2759					ptrLBA = &pIO_req->CDB.CDB32[9];
2760					physLBA_byte = (uint8_t)physLBA;
2761					*ptrLBA = physLBA_byte;
2762
2763					/*
2764					 * Set flag that Direct Drive I/O is
2765					 * being done.
2766					 */
2767					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2768				}
2769			}
2770		}
2771	}
2772}
2773
2774#if __FreeBSD_version >= 900026
2775static void
2776mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2777{
2778	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2779	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2780	uint64_t sasaddr;
2781	union ccb *ccb;
2782
2783	ccb = cm->cm_complete_data;
2784
2785	/*
2786	 * Currently there should be no way we can hit this case.  It only
2787	 * happens when we have a failure to allocate chain frames, and SMP
2788	 * commands require two S/G elements only.  That should be handled
2789	 * in the standard request size.
2790	 */
2791	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2792		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x on SMP request!\n",
2793			   __func__, cm->cm_flags);
2794		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2795		goto bailout;
2796        }
2797
2798	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2799	if (rpl == NULL) {
2800		mps_dprint(sc, MPS_ERROR, "%s: NULL cm_reply!\n", __func__);
2801		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2802		goto bailout;
2803	}
2804
2805	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2806	sasaddr = le32toh(req->SASAddress.Low);
2807	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2808
2809	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2810	    MPI2_IOCSTATUS_SUCCESS ||
2811	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2812		mps_dprint(sc, MPS_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2813		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2814		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2815		goto bailout;
2816	}
2817
2818	mps_dprint(sc, MPS_XINFO, "%s: SMP request to SAS address "
2819		   "%#jx completed successfully\n", __func__,
2820		   (uintmax_t)sasaddr);
2821
2822	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2823		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2824	else
2825		mpssas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2826
2827bailout:
2828	/*
2829	 * We sync in both directions because we had DMAs in the S/G list
2830	 * in both directions.
2831	 */
2832	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2833			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2834	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2835	mps_free_command(sc, cm);
2836	xpt_done(ccb);
2837}
2838
2839static void
2840mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2841{
2842	struct mps_command *cm;
2843	uint8_t *request, *response;
2844	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2845	struct mps_softc *sc;
2846	int error;
2847
2848	sc = sassc->sc;
2849	error = 0;
2850
2851	/*
2852	 * XXX We don't yet support physical addresses here.
2853	 */
2854	switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
2855	case CAM_DATA_PADDR:
2856	case CAM_DATA_SG_PADDR:
2857		mps_dprint(sc, MPS_ERROR,
2858			   "%s: physical addresses not supported\n", __func__);
2859		mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2860		xpt_done(ccb);
2861		return;
2862	case CAM_DATA_SG:
2863		/*
2864		 * The chip does not support more than one buffer for the
2865		 * request or response.
2866		 */
2867	 	if ((ccb->smpio.smp_request_sglist_cnt > 1)
2868		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2869			mps_dprint(sc, MPS_ERROR,
2870				   "%s: multiple request or response "
2871				   "buffer segments not supported for SMP\n",
2872				   __func__);
2873			mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2874			xpt_done(ccb);
2875			return;
2876		}
2877
2878		/*
2879		 * The CAM_SCATTER_VALID flag was originally implemented
2880		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2881		 * We have two.  So, just take that flag to mean that we
2882		 * might have S/G lists, and look at the S/G segment count
2883		 * to figure out whether that is the case for each individual
2884		 * buffer.
2885		 */
2886		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2887			bus_dma_segment_t *req_sg;
2888
2889			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2890			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2891		} else
2892			request = ccb->smpio.smp_request;
2893
2894		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2895			bus_dma_segment_t *rsp_sg;
2896
2897			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2898			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2899		} else
2900			response = ccb->smpio.smp_response;
2901		break;
2902	case CAM_DATA_VADDR:
2903		request = ccb->smpio.smp_request;
2904		response = ccb->smpio.smp_response;
2905		break;
2906	default:
2907		mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2908		xpt_done(ccb);
2909		return;
2910	}
2911
2912	cm = mps_alloc_command(sc);
2913	if (cm == NULL) {
2914		mps_dprint(sc, MPS_ERROR,
2915		    "%s: cannot allocate command\n", __func__);
2916		mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2917		xpt_done(ccb);
2918		return;
2919	}
2920
2921	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2922	bzero(req, sizeof(*req));
2923	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2924
2925	/* Allow the chip to use any route to this SAS address. */
2926	req->PhysicalPort = 0xff;
2927
2928	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2929	req->SGLFlags =
2930	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2931
2932	mps_dprint(sc, MPS_XINFO, "%s: sending SMP request to SAS "
2933	    "address %#jx\n", __func__, (uintmax_t)sasaddr);
2934
2935	mpi_init_sge(cm, req, &req->SGL);
2936
2937	/*
2938	 * Set up a uio to pass into mps_map_command().  This allows us to
2939	 * do one map command, and one busdma call in there.
2940	 */
2941	cm->cm_uio.uio_iov = cm->cm_iovec;
2942	cm->cm_uio.uio_iovcnt = 2;
2943	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2944
2945	/*
2946	 * The read/write flag isn't used by busdma, but set it just in
2947	 * case.  This isn't exactly accurate, either, since we're going in
2948	 * both directions.
2949	 */
2950	cm->cm_uio.uio_rw = UIO_WRITE;
2951
2952	cm->cm_iovec[0].iov_base = request;
2953	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2954	cm->cm_iovec[1].iov_base = response;
2955	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2956
2957	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2958			       cm->cm_iovec[1].iov_len;
2959
2960	/*
2961	 * Trigger a warning message in mps_data_cb() for the user if we
2962	 * wind up exceeding two S/G segments.  The chip expects one
2963	 * segment for the request and another for the response.
2964	 */
2965	cm->cm_max_segs = 2;
2966
2967	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2968	cm->cm_complete = mpssas_smpio_complete;
2969	cm->cm_complete_data = ccb;
2970
2971	/*
2972	 * Tell the mapping code that we're using a uio, and that this is
2973	 * an SMP passthrough request.  There is a little special-case
2974	 * logic there (in mps_data_cb()) to handle the bidirectional
2975	 * transfer.
2976	 */
2977	cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2978			MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2979
2980	/* The chip data format is little endian. */
2981	req->SASAddress.High = htole32(sasaddr >> 32);
2982	req->SASAddress.Low = htole32(sasaddr);
2983
2984	/*
2985	 * XXX Note that we don't have a timeout/abort mechanism here.
2986	 * From the manual, it looks like task management requests only
2987	 * work for SCSI IO and SATA passthrough requests.  We may need to
2988	 * have a mechanism to retry requests in the event of a chip reset
2989	 * at least.  Hopefully the chip will insure that any errors short
2990	 * of that are relayed back to the driver.
2991	 */
2992	error = mps_map_command(sc, cm);
2993	if ((error != 0) && (error != EINPROGRESS)) {
2994		mps_dprint(sc, MPS_ERROR,
2995			   "%s: error %d returned from mps_map_command()\n",
2996			   __func__, error);
2997		goto bailout_error;
2998	}
2999
3000	return;
3001
3002bailout_error:
3003	mps_free_command(sc, cm);
3004	mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3005	xpt_done(ccb);
3006	return;
3007
3008}
3009
3010static void
3011mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
3012{
3013	struct mps_softc *sc;
3014	struct mpssas_target *targ;
3015	uint64_t sasaddr = 0;
3016
3017	sc = sassc->sc;
3018
3019	/*
3020	 * Make sure the target exists.
3021	 */
3022	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3023	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
3024	targ = &sassc->targets[ccb->ccb_h.target_id];
3025	if (targ->handle == 0x0) {
3026		mps_dprint(sc, MPS_ERROR,
3027			   "%s: target %d does not exist!\n", __func__,
3028			   ccb->ccb_h.target_id);
3029		mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
3030		xpt_done(ccb);
3031		return;
3032	}
3033
3034	/*
3035	 * If this device has an embedded SMP target, we'll talk to it
3036	 * directly.
3037	 * figure out what the expander's address is.
3038	 */
3039	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
3040		sasaddr = targ->sasaddr;
3041
3042	/*
3043	 * If we don't have a SAS address for the expander yet, try
3044	 * grabbing it from the page 0x83 information cached in the
3045	 * transport layer for this target.  LSI expanders report the
3046	 * expander SAS address as the port-associated SAS address in
3047	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
3048	 * 0x83.
3049	 *
3050	 * XXX KDM disable this for now, but leave it commented out so that
3051	 * it is obvious that this is another possible way to get the SAS
3052	 * address.
3053	 *
3054	 * The parent handle method below is a little more reliable, and
3055	 * the other benefit is that it works for devices other than SES
3056	 * devices.  So you can send a SMP request to a da(4) device and it
3057	 * will get routed to the expander that device is attached to.
3058	 * (Assuming the da(4) device doesn't contain an SMP target...)
3059	 */
3060#if 0
3061	if (sasaddr == 0)
3062		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
3063#endif
3064
3065	/*
3066	 * If we still don't have a SAS address for the expander, look for
3067	 * the parent device of this device, which is probably the expander.
3068	 */
3069	if (sasaddr == 0) {
3070#ifdef OLD_MPS_PROBE
3071		struct mpssas_target *parent_target;
3072#endif
3073
3074		if (targ->parent_handle == 0x0) {
3075			mps_dprint(sc, MPS_ERROR,
3076				   "%s: handle %d does not have a valid "
3077				   "parent handle!\n", __func__, targ->handle);
3078			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3079			goto bailout;
3080		}
3081#ifdef OLD_MPS_PROBE
3082		parent_target = mpssas_find_target_by_handle(sassc, 0,
3083			targ->parent_handle);
3084
3085		if (parent_target == NULL) {
3086			mps_dprint(sc, MPS_ERROR,
3087				   "%s: handle %d does not have a valid "
3088				   "parent target!\n", __func__, targ->handle);
3089			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3090			goto bailout;
3091		}
3092
3093		if ((parent_target->devinfo &
3094		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3095			mps_dprint(sc, MPS_ERROR,
3096				   "%s: handle %d parent %d does not "
3097				   "have an SMP target!\n", __func__,
3098				   targ->handle, parent_target->handle);
3099			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3100			goto bailout;
3101
3102		}
3103
3104		sasaddr = parent_target->sasaddr;
3105#else /* OLD_MPS_PROBE */
3106		if ((targ->parent_devinfo &
3107		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3108			mps_dprint(sc, MPS_ERROR,
3109				   "%s: handle %d parent %d does not "
3110				   "have an SMP target!\n", __func__,
3111				   targ->handle, targ->parent_handle);
3112			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3113			goto bailout;
3114
3115		}
3116		if (targ->parent_sasaddr == 0x0) {
3117			mps_dprint(sc, MPS_ERROR,
3118				   "%s: handle %d parent handle %d does "
3119				   "not have a valid SAS address!\n",
3120				   __func__, targ->handle, targ->parent_handle);
3121			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3122			goto bailout;
3123		}
3124
3125		sasaddr = targ->parent_sasaddr;
3126#endif /* OLD_MPS_PROBE */
3127
3128	}
3129
3130	if (sasaddr == 0) {
3131		mps_dprint(sc, MPS_INFO,
3132			   "%s: unable to find SAS address for handle %d\n",
3133			   __func__, targ->handle);
3134		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3135		goto bailout;
3136	}
3137	mpssas_send_smpcmd(sassc, ccb, sasaddr);
3138
3139	return;
3140
3141bailout:
3142	xpt_done(ccb);
3143
3144}
3145#endif //__FreeBSD_version >= 900026
3146
3147static void
3148mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
3149{
3150	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3151	struct mps_softc *sc;
3152	struct mps_command *tm;
3153	struct mpssas_target *targ;
3154
3155	MPS_FUNCTRACE(sassc->sc);
3156	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
3157
3158	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3159	    ("Target %d out of bounds in XPT_RESET_DEV\n",
3160	     ccb->ccb_h.target_id));
3161	sc = sassc->sc;
3162	tm = mps_alloc_command(sc);
3163	if (tm == NULL) {
3164		mps_dprint(sc, MPS_ERROR,
3165		    "command alloc failure in mpssas_action_resetdev\n");
3166		mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3167		xpt_done(ccb);
3168		return;
3169	}
3170
3171	targ = &sassc->targets[ccb->ccb_h.target_id];
3172	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3173	req->DevHandle = htole16(targ->handle);
3174	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3175	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3176
3177	/* SAS Hard Link Reset / SATA Link Reset */
3178	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3179
3180	tm->cm_data = NULL;
3181	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3182	tm->cm_complete = mpssas_resetdev_complete;
3183	tm->cm_complete_data = ccb;
3184	tm->cm_targ = targ;
3185	targ->flags |= MPSSAS_TARGET_INRESET;
3186
3187	mps_map_command(sc, tm);
3188}
3189
3190static void
3191mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
3192{
3193	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3194	union ccb *ccb;
3195
3196	MPS_FUNCTRACE(sc);
3197	mtx_assert(&sc->mps_mtx, MA_OWNED);
3198
3199	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3200	ccb = tm->cm_complete_data;
3201
3202	/*
3203	 * Currently there should be no way we can hit this case.  It only
3204	 * happens when we have a failure to allocate chain frames, and
3205	 * task management commands don't have S/G lists.
3206	 */
3207	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3208		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3209
3210		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3211
3212		mps_dprint(sc, MPS_ERROR,
3213			   "%s: cm_flags = %#x for reset of handle %#04x! "
3214			   "This should not happen!\n", __func__, tm->cm_flags,
3215			   req->DevHandle);
3216		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3217		goto bailout;
3218	}
3219
3220	mps_dprint(sc, MPS_XINFO,
3221	    "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
3222	    le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3223
3224	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3225		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
3226		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3227		    CAM_LUN_WILDCARD);
3228	}
3229	else
3230		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3231
3232bailout:
3233
3234	mpssas_free_tm(sc, tm);
3235	xpt_done(ccb);
3236}
3237
3238static void
3239mpssas_poll(struct cam_sim *sim)
3240{
3241	struct mpssas_softc *sassc;
3242
3243	sassc = cam_sim_softc(sim);
3244
3245	if (sassc->sc->mps_debug & MPS_TRACE) {
3246		/* frequent debug messages during a panic just slow
3247		 * everything down too much.
3248		 */
3249		mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
3250		sassc->sc->mps_debug &= ~MPS_TRACE;
3251	}
3252
3253	mps_intr_locked(sassc->sc);
3254}
3255
3256static void
3257mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3258	     void *arg)
3259{
3260	struct mps_softc *sc;
3261
3262	sc = (struct mps_softc *)callback_arg;
3263
3264	switch (code) {
3265#if (__FreeBSD_version >= 1000006) || \
3266    ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3267	case AC_ADVINFO_CHANGED: {
3268		struct mpssas_target *target;
3269		struct mpssas_softc *sassc;
3270		struct scsi_read_capacity_data_long rcap_buf;
3271		struct ccb_dev_advinfo cdai;
3272		struct mpssas_lun *lun;
3273		lun_id_t lunid;
3274		int found_lun;
3275		uintptr_t buftype;
3276
3277		buftype = (uintptr_t)arg;
3278
3279		found_lun = 0;
3280		sassc = sc->sassc;
3281
3282		/*
3283		 * We're only interested in read capacity data changes.
3284		 */
3285		if (buftype != CDAI_TYPE_RCAPLONG)
3286			break;
3287
3288		/*
3289		 * We should have a handle for this, but check to make sure.
3290		 */
3291		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3292		    ("Target %d out of bounds in mpssas_async\n",
3293		    xpt_path_target_id(path)));
3294		target = &sassc->targets[xpt_path_target_id(path)];
3295		if (target->handle == 0)
3296			break;
3297
3298		lunid = xpt_path_lun_id(path);
3299
3300		SLIST_FOREACH(lun, &target->luns, lun_link) {
3301			if (lun->lun_id == lunid) {
3302				found_lun = 1;
3303				break;
3304			}
3305		}
3306
3307		if (found_lun == 0) {
3308			lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3309				     M_NOWAIT | M_ZERO);
3310			if (lun == NULL) {
3311				mps_dprint(sc, MPS_ERROR, "Unable to alloc "
3312					   "LUN for EEDP support.\n");
3313				break;
3314			}
3315			lun->lun_id = lunid;
3316			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3317		}
3318
3319		bzero(&rcap_buf, sizeof(rcap_buf));
3320		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3321		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3322		cdai.ccb_h.flags = CAM_DIR_IN;
3323		cdai.buftype = CDAI_TYPE_RCAPLONG;
3324#if (__FreeBSD_version >= 1100061) || \
3325    ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000))
3326		cdai.flags = CDAI_FLAG_NONE;
3327#else
3328		cdai.flags = 0;
3329#endif
3330		cdai.bufsiz = sizeof(rcap_buf);
3331		cdai.buf = (uint8_t *)&rcap_buf;
3332		xpt_action((union ccb *)&cdai);
3333		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3334			cam_release_devq(cdai.ccb_h.path,
3335					 0, 0, 0, FALSE);
3336
3337		if ((mpssas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3338		 && (rcap_buf.prot & SRC16_PROT_EN)) {
3339			lun->eedp_formatted = TRUE;
3340			lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3341		} else {
3342			lun->eedp_formatted = FALSE;
3343			lun->eedp_block_size = 0;
3344		}
3345		break;
3346	}
3347#else
3348	case AC_FOUND_DEVICE: {
3349		struct ccb_getdev *cgd;
3350
3351		cgd = arg;
3352		mpssas_check_eedp(sc, path, cgd);
3353		break;
3354	}
3355#endif
3356	default:
3357		break;
3358	}
3359}
3360
3361#if (__FreeBSD_version < 901503) || \
3362    ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3363static void
3364mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
3365		  struct ccb_getdev *cgd)
3366{
3367	struct mpssas_softc *sassc = sc->sassc;
3368	struct ccb_scsiio *csio;
3369	struct scsi_read_capacity_16 *scsi_cmd;
3370	struct scsi_read_capacity_eedp *rcap_buf;
3371	path_id_t pathid;
3372	target_id_t targetid;
3373	lun_id_t lunid;
3374	union ccb *ccb;
3375	struct cam_path *local_path;
3376	struct mpssas_target *target;
3377	struct mpssas_lun *lun;
3378	uint8_t	found_lun;
3379	char path_str[64];
3380
3381	sassc = sc->sassc;
3382	pathid = cam_sim_path(sassc->sim);
3383	targetid = xpt_path_target_id(path);
3384	lunid = xpt_path_lun_id(path);
3385
3386	KASSERT(targetid < sassc->maxtargets,
3387	    ("Target %d out of bounds in mpssas_check_eedp\n",
3388	     targetid));
3389	target = &sassc->targets[targetid];
3390	if (target->handle == 0x0)
3391		return;
3392
3393	/*
3394	 * Determine if the device is EEDP capable.
3395	 *
3396	 * If this flag is set in the inquiry data,
3397	 * the device supports protection information,
3398	 * and must support the 16 byte read
3399	 * capacity command, otherwise continue without
3400	 * sending read cap 16
3401	 */
3402	if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3403		return;
3404
3405	/*
3406	 * Issue a READ CAPACITY 16 command.  This info
3407	 * is used to determine if the LUN is formatted
3408	 * for EEDP support.
3409	 */
3410	ccb = xpt_alloc_ccb_nowait();
3411	if (ccb == NULL) {
3412		mps_dprint(sc, MPS_ERROR, "Unable to alloc CCB "
3413		    "for EEDP support.\n");
3414		return;
3415	}
3416
3417	if (xpt_create_path(&local_path, xpt_periph,
3418	    pathid, targetid, lunid) != CAM_REQ_CMP) {
3419		mps_dprint(sc, MPS_ERROR, "Unable to create "
3420		    "path for EEDP support\n");
3421		xpt_free_ccb(ccb);
3422		return;
3423	}
3424
3425	/*
3426	 * If LUN is already in list, don't create a new
3427	 * one.
3428	 */
3429	found_lun = FALSE;
3430	SLIST_FOREACH(lun, &target->luns, lun_link) {
3431		if (lun->lun_id == lunid) {
3432			found_lun = TRUE;
3433			break;
3434		}
3435	}
3436	if (!found_lun) {
3437		lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3438		    M_NOWAIT | M_ZERO);
3439		if (lun == NULL) {
3440			mps_dprint(sc, MPS_ERROR,
3441			    "Unable to alloc LUN for EEDP support.\n");
3442			xpt_free_path(local_path);
3443			xpt_free_ccb(ccb);
3444			return;
3445		}
3446		lun->lun_id = lunid;
3447		SLIST_INSERT_HEAD(&target->luns, lun,
3448		    lun_link);
3449	}
3450
3451	xpt_path_string(local_path, path_str, sizeof(path_str));
3452
3453	mps_dprint(sc, MPS_INFO, "Sending read cap: path %s handle %d\n",
3454	    path_str, target->handle);
3455
3456	/*
3457	 * Issue a READ CAPACITY 16 command for the LUN.
3458	 * The mpssas_read_cap_done function will load
3459	 * the read cap info into the LUN struct.
3460	 */
3461	rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp),
3462	    M_MPT2, M_NOWAIT | M_ZERO);
3463	if (rcap_buf == NULL) {
3464		mps_dprint(sc, MPS_FAULT,
3465		    "Unable to alloc read capacity buffer for EEDP support.\n");
3466		xpt_free_path(ccb->ccb_h.path);
3467		xpt_free_ccb(ccb);
3468		return;
3469	}
3470	xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3471	csio = &ccb->csio;
3472	csio->ccb_h.func_code = XPT_SCSI_IO;
3473	csio->ccb_h.flags = CAM_DIR_IN;
3474	csio->ccb_h.retry_count = 4;
3475	csio->ccb_h.cbfcnp = mpssas_read_cap_done;
3476	csio->ccb_h.timeout = 60000;
3477	csio->data_ptr = (uint8_t *)rcap_buf;
3478	csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3479	csio->sense_len = MPS_SENSE_LEN;
3480	csio->cdb_len = sizeof(*scsi_cmd);
3481	csio->tag_action = MSG_SIMPLE_Q_TAG;
3482
3483	scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3484	bzero(scsi_cmd, sizeof(*scsi_cmd));
3485	scsi_cmd->opcode = 0x9E;
3486	scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3487	((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3488
3489	ccb->ccb_h.ppriv_ptr1 = sassc;
3490	xpt_action(ccb);
3491}
3492
3493static void
3494mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3495{
3496	struct mpssas_softc *sassc;
3497	struct mpssas_target *target;
3498	struct mpssas_lun *lun;
3499	struct scsi_read_capacity_eedp *rcap_buf;
3500
3501	if (done_ccb == NULL)
3502		return;
3503
3504	/* Driver need to release devq, it Scsi command is
3505	 * generated by driver internally.
3506	 * Currently there is a single place where driver
3507	 * calls scsi command internally. In future if driver
3508	 * calls more scsi command internally, it needs to release
3509	 * devq internally, since those command will not go back to
3510	 * cam_periph.
3511	 */
3512	if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3513        	done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3514		xpt_release_devq(done_ccb->ccb_h.path,
3515			       	/*count*/ 1, /*run_queue*/TRUE);
3516	}
3517
3518	rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3519
3520	/*
3521	 * Get the LUN ID for the path and look it up in the LUN list for the
3522	 * target.
3523	 */
3524	sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3525	KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets,
3526	    ("Target %d out of bounds in mpssas_read_cap_done\n",
3527	     done_ccb->ccb_h.target_id));
3528	target = &sassc->targets[done_ccb->ccb_h.target_id];
3529	SLIST_FOREACH(lun, &target->luns, lun_link) {
3530		if (lun->lun_id != done_ccb->ccb_h.target_lun)
3531			continue;
3532
3533		/*
3534		 * Got the LUN in the target's LUN list.  Fill it in
3535		 * with EEDP info.  If the READ CAP 16 command had some
3536		 * SCSI error (common if command is not supported), mark
3537		 * the lun as not supporting EEDP and set the block size
3538		 * to 0.
3539		 */
3540		if ((mpssas_get_ccbstatus(done_ccb) != CAM_REQ_CMP)
3541		 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3542			lun->eedp_formatted = FALSE;
3543			lun->eedp_block_size = 0;
3544			break;
3545		}
3546
3547		if (rcap_buf->protect & 0x01) {
3548			mps_dprint(sassc->sc, MPS_INFO, "LUN %d for "
3549 			    "target ID %d is formatted for EEDP "
3550 			    "support.\n", done_ccb->ccb_h.target_lun,
3551 			    done_ccb->ccb_h.target_id);
3552			lun->eedp_formatted = TRUE;
3553			lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3554		}
3555		break;
3556	}
3557
3558	// Finished with this CCB and path.
3559	free(rcap_buf, M_MPT2);
3560	xpt_free_path(done_ccb->ccb_h.path);
3561	xpt_free_ccb(done_ccb);
3562}
3563#endif /* (__FreeBSD_version < 901503) || \
3564          ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3565
3566void
3567mpssas_prepare_for_tm(struct mps_softc *sc, struct mps_command *tm,
3568    struct mpssas_target *target, lun_id_t lun_id)
3569{
3570	union ccb *ccb;
3571	path_id_t path_id;
3572
3573	/*
3574	 * Set the INRESET flag for this target so that no I/O will be sent to
3575	 * the target until the reset has completed.  If an I/O request does
3576	 * happen, the devq will be frozen.  The CCB holds the path which is
3577	 * used to release the devq.  The devq is released and the CCB is freed
3578	 * when the TM completes.
3579	 */
3580	ccb = xpt_alloc_ccb_nowait();
3581	if (ccb) {
3582		path_id = cam_sim_path(sc->sassc->sim);
3583		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3584		    target->tid, lun_id) != CAM_REQ_CMP) {
3585			xpt_free_ccb(ccb);
3586		} else {
3587			tm->cm_ccb = ccb;
3588			tm->cm_targ = target;
3589			target->flags |= MPSSAS_TARGET_INRESET;
3590		}
3591	}
3592}
3593
3594int
3595mpssas_startup(struct mps_softc *sc)
3596{
3597
3598	/*
3599	 * Send the port enable message and set the wait_for_port_enable flag.
3600	 * This flag helps to keep the simq frozen until all discovery events
3601	 * are processed.
3602	 */
3603	sc->wait_for_port_enable = 1;
3604	mpssas_send_portenable(sc);
3605	return (0);
3606}
3607
3608static int
3609mpssas_send_portenable(struct mps_softc *sc)
3610{
3611	MPI2_PORT_ENABLE_REQUEST *request;
3612	struct mps_command *cm;
3613
3614	MPS_FUNCTRACE(sc);
3615
3616	if ((cm = mps_alloc_command(sc)) == NULL)
3617		return (EBUSY);
3618	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3619	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3620	request->MsgFlags = 0;
3621	request->VP_ID = 0;
3622	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3623	cm->cm_complete = mpssas_portenable_complete;
3624	cm->cm_data = NULL;
3625	cm->cm_sge = NULL;
3626
3627	mps_map_command(sc, cm);
3628	mps_dprint(sc, MPS_XINFO,
3629	    "mps_send_portenable finished cm %p req %p complete %p\n",
3630	    cm, cm->cm_req, cm->cm_complete);
3631	return (0);
3632}
3633
3634static void
3635mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3636{
3637	MPI2_PORT_ENABLE_REPLY *reply;
3638	struct mpssas_softc *sassc;
3639
3640	MPS_FUNCTRACE(sc);
3641	sassc = sc->sassc;
3642
3643	/*
3644	 * Currently there should be no way we can hit this case.  It only
3645	 * happens when we have a failure to allocate chain frames, and
3646	 * port enable commands don't have S/G lists.
3647	 */
3648	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3649		mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for port enable! "
3650			   "This should not happen!\n", __func__, cm->cm_flags);
3651	}
3652
3653	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3654	if (reply == NULL)
3655		mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3656	else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3657	    MPI2_IOCSTATUS_SUCCESS)
3658		mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3659
3660	mps_free_command(sc, cm);
3661	if (sc->mps_ich.ich_arg != NULL) {
3662		mps_dprint(sc, MPS_XINFO, "disestablish config intrhook\n");
3663		config_intrhook_disestablish(&sc->mps_ich);
3664		sc->mps_ich.ich_arg = NULL;
3665	}
3666
3667	/*
3668	 * Get WarpDrive info after discovery is complete but before the scan
3669	 * starts.  At this point, all devices are ready to be exposed to the
3670	 * OS.  If devices should be hidden instead, take them out of the
3671	 * 'targets' array before the scan.  The devinfo for a disk will have
3672	 * some info and a volume's will be 0.  Use that to remove disks.
3673	 */
3674	mps_wd_config_pages(sc);
3675
3676	/*
3677	 * Done waiting for port enable to complete.  Decrement the refcount.
3678	 * If refcount is 0, discovery is complete and a rescan of the bus can
3679	 * take place.  Since the simq was explicitly frozen before port
3680	 * enable, it must be explicitly released here to keep the
3681	 * freeze/release count in sync.
3682	 */
3683	sc->wait_for_port_enable = 0;
3684	sc->port_enable_complete = 1;
3685	wakeup(&sc->port_enable_complete);
3686	mpssas_startup_decrement(sassc);
3687}
3688
3689int
3690mpssas_check_id(struct mpssas_softc *sassc, int id)
3691{
3692	struct mps_softc *sc = sassc->sc;
3693	char *ids;
3694	char *name;
3695
3696	ids = &sc->exclude_ids[0];
3697	while((name = strsep(&ids, ",")) != NULL) {
3698		if (name[0] == '\0')
3699			continue;
3700		if (strtol(name, NULL, 0) == (long)id)
3701			return (1);
3702	}
3703
3704	return (0);
3705}
3706
3707void
3708mpssas_realloc_targets(struct mps_softc *sc, int maxtargets)
3709{
3710	struct mpssas_softc *sassc;
3711	struct mpssas_lun *lun, *lun_tmp;
3712	struct mpssas_target *targ;
3713	int i;
3714
3715	sassc = sc->sassc;
3716	/*
3717	 * The number of targets is based on IOC Facts, so free all of
3718	 * the allocated LUNs for each target and then the target buffer
3719	 * itself.
3720	 */
3721	for (i=0; i< maxtargets; i++) {
3722		targ = &sassc->targets[i];
3723		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3724			free(lun, M_MPT2);
3725		}
3726	}
3727	free(sassc->targets, M_MPT2);
3728
3729	sassc->targets = malloc(sizeof(struct mpssas_target) * maxtargets,
3730	    M_MPT2, M_WAITOK|M_ZERO);
3731	if (!sassc->targets) {
3732		panic("%s failed to alloc targets with error %d\n",
3733		    __func__, ENOMEM);
3734	}
3735}
3736