143561Skato/*-
243561Skato * Copyright (c) 2009 Yahoo! Inc.
343561Skato * Copyright (c) 2011-2015 LSI Corp.
443561Skato * Copyright (c) 2013-2015 Avago Technologies
543561Skato * All rights reserved.
643561Skato *
743561Skato * Redistribution and use in source and binary forms, with or without
843561Skato * modification, are permitted provided that the following conditions
943561Skato * are met:
1043561Skato * 1. Redistributions of source code must retain the above copyright
1143561Skato *    notice, this list of conditions and the following disclaimer.
1243561Skato * 2. Redistributions in binary form must reproduce the above copyright
1343561Skato *    notice, this list of conditions and the following disclaimer in the
1443561Skato *    documentation and/or other materials provided with the distribution.
1543561Skato *
1643561Skato * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1743561Skato * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1843561Skato * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1943561Skato * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
2043561Skato * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2143561Skato * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2243561Skato * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2343561Skato * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2443561Skato * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2543561Skato * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2643561Skato * SUCH DAMAGE.
27119880Sobrien *
28119880Sobrien * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
29119880Sobrien *
3043561Skato * $FreeBSD: stable/10/sys/dev/mps/mps_sas.c 322661 2017-08-18 15:38:08Z ken $
3143561Skato */
3243561Skato
3343561Skato#include <sys/cdefs.h>
3443561Skato__FBSDID("$FreeBSD: stable/10/sys/dev/mps/mps_sas.c 322661 2017-08-18 15:38:08Z ken $");
3543561Skato
3643561Skato/* Communications core for Avago Technologies (LSI) MPT2 */
3743561Skato
3843561Skato/* TODO Move headers to mpsvar */
3943561Skato#include <sys/types.h>
4043561Skato#include <sys/param.h>
4143561Skato#include <sys/systm.h>
4243561Skato#include <sys/kernel.h>
43104621Snyan#include <sys/selinfo.h>
44113083Sphk#include <sys/module.h>
4543561Skato#include <sys/bus.h>
4643561Skato#include <sys/conf.h>
4743561Skato#include <sys/bio.h>
4843561Skato#include <sys/malloc.h>
4943561Skato#include <sys/uio.h>
5043561Skato#include <sys/sysctl.h>
5143561Skato#include <sys/endian.h>
5263101Snyan#include <sys/queue.h>
5343561Skato#include <sys/kthread.h>
5443561Skato#include <sys/taskqueue.h>
5543561Skato#include <sys/sbuf.h>
5643561Skato
5743561Skato#include <machine/bus.h>
5843561Skato#include <machine/resource.h>
5943561Skato#include <sys/rman.h>
6043561Skato
6143561Skato#include <machine/stdarg.h>
6243561Skato
6343561Skato#include <cam/cam.h>
6487599Sobrien#include <cam/cam_ccb.h>
6543561Skato#include <cam/cam_xpt.h>
6643561Skato#include <cam/cam_debug.h>
6743561Skato#include <cam/cam_sim.h>
6843561Skato#include <cam/cam_xpt_sim.h>
6943561Skato#include <cam/cam_xpt_periph.h>
7043561Skato#include <cam/cam_periph.h>
7143561Skato#include <cam/scsi/scsi_all.h>
7243561Skato#include <cam/scsi/scsi_message.h>
7343561Skato#if __FreeBSD_version >= 900026
7443561Skato#include <cam/scsi/smp_all.h>
7543561Skato#endif
7643561Skato
7759777Snyan#include <dev/mps/mpi/mpi2_type.h>
7859777Snyan#include <dev/mps/mpi/mpi2.h>
7959777Snyan#include <dev/mps/mpi/mpi2_ioc.h>
8063101Snyan#include <dev/mps/mpi/mpi2_sas.h>
8159777Snyan#include <dev/mps/mpi/mpi2_cnfg.h>
8259777Snyan#include <dev/mps/mpi/mpi2_init.h>
8359777Snyan#include <dev/mps/mpi/mpi2_tool.h>
84108791Snyan#include <dev/mps/mps_ioctl.h>
8543561Skato#include <dev/mps/mpsvar.h>
8659777Snyan#include <dev/mps/mps_table.h>
87109638Snyan#include <dev/mps/mps_sas.h>
8843561Skato
8943561Skato#define MPSSAS_DISCOVERY_TIMEOUT	20
9043561Skato#define MPSSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
9143561Skato
9243561Skato/*
9343561Skato * static array to check SCSI OpCode for EEDP protection bits
9443561Skato */
9543561Skato#define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
9643561Skato#define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
9743561Skato#define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
9843561Skatostatic uint8_t op_code_prot[256] = {
9953207Snyan	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
10043561Skato	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
10143561Skato	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
10243561Skato	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
10343561Skato	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
10459777Snyan	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
10559777Snyan	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
10687734Snyan	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
10787734Snyan	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
10843561Skato	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
10943561Skato	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
11043561Skato	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111108650Snyan	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
11268358Snyan	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
11368358Snyan	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
11468358Snyan	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
11543561Skato};
11643561Skato
11759777SnyanMALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
11868358Snyan
11959777Snyanstatic void mpssas_remove_device(struct mps_softc *, struct mps_command *);
12068358Snyanstatic void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
12143561Skatostatic void mpssas_action(struct cam_sim *sim, union ccb *ccb);
12243561Skatostatic void mpssas_poll(struct cam_sim *sim);
12343561Skatostatic int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm,
12443561Skato    struct mps_command *cm);
12543561Skatostatic void mpssas_scsiio_timeout(void *data);
12643561Skatostatic void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
12743561Skatostatic void mpssas_direct_drive_io(struct mpssas_softc *sassc,
12843561Skato    struct mps_command *cm, union ccb *ccb);
12943561Skatostatic void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
13043561Skatostatic void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
13143561Skatostatic void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
13243561Skato#if __FreeBSD_version >= 900026
13368358Snyanstatic void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
13468358Snyanstatic void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
13543561Skato			       uint64_t sasaddr);
13643561Skatostatic void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
13743561Skato#endif //FreeBSD_version >= 900026
13843561Skatostatic void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
13959777Snyanstatic void mpssas_async(void *callback_arg, uint32_t code,
14059777Snyan			 struct cam_path *path, void *arg);
14143561Skato#if (__FreeBSD_version < 901503) || \
14243561Skato    ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
14343561Skatostatic void mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
14443561Skato			      struct ccb_getdev *cgd);
14543561Skatostatic void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
14643561Skato#endif
14743561Skatostatic int mpssas_send_portenable(struct mps_softc *sc);
14843561Skatostatic void mpssas_portenable_complete(struct mps_softc *sc,
14943561Skato    struct mps_command *cm);
15043561Skato
15143561Skatostruct mpssas_target *
15243561Skatompssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
15343561Skato{
15443561Skato	struct mpssas_target *target;
15543561Skato	int i;
15643561Skato
15743561Skato	for (i = start; i < sassc->maxtargets; i++) {
15843561Skato		target = &sassc->targets[i];
15943561Skato		if (target->handle == handle)
16043561Skato			return (target);
16143561Skato	}
16243561Skato
16343561Skato	return (NULL);
16443561Skato}
16543561Skato
16643561Skato/* we need to freeze the simq during attach and diag reset, to avoid failing
16743561Skato * commands before device handles have been found by discovery.  Since
16843561Skato * discovery involves reading config pages and possibly sending commands,
16943561Skato * discovery actions may continue even after we receive the end of discovery
17043561Skato * event, so refcount discovery actions instead of assuming we can unfreeze
17143561Skato * the simq when we get the event.
17243561Skato */
17343561Skatovoid
17463101Snyanmpssas_startup_increment(struct mpssas_softc *sassc)
17543561Skato{
17643561Skato	MPS_FUNCTRACE(sassc->sc);
17743561Skato
17843561Skato	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
17943561Skato		if (sassc->startup_refcount++ == 0) {
18043561Skato			/* just starting, freeze the simq */
18143561Skato			mps_dprint(sassc->sc, MPS_INIT,
18244463Skato			    "%s freezing simq\n", __func__);
18349425Skato#if __FreeBSD_version >= 1000039
18449425Skato			xpt_hold_boot();
18544463Skato#endif
18644463Skato			xpt_freeze_simq(sassc->sim, 1);
18744463Skato		}
18844463Skato		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
18943561Skato		    sassc->startup_refcount);
19043561Skato	}
19144467Skato}
19259777Snyan
19344467Skatovoid
19444467Skatompssas_release_simq_reinit(struct mpssas_softc *sassc)
19544467Skato{
19644467Skato	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
19743561Skato		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
19849426Skato		xpt_release_simq(sassc->sim, 1);
199108791Snyan		mps_dprint(sassc->sc, MPS_INFO, "Unfreezing SIM queue\n");
20049426Skato	}
20149426Skato}
20243561Skato
20343561Skatovoid
20449426Skatompssas_startup_decrement(struct mpssas_softc *sassc)
20543561Skato{
20643561Skato	MPS_FUNCTRACE(sassc->sc);
20743561Skato
20843561Skato	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
20943561Skato		if (--sassc->startup_refcount == 0) {
21043561Skato			/* finished all discovery-related actions, release
21143561Skato			 * the simq and rescan for the latest topology.
21243561Skato			 */
21343561Skato			mps_dprint(sassc->sc, MPS_INIT,
21443561Skato			    "%s releasing simq\n", __func__);
21543561Skato			sassc->flags &= ~MPSSAS_IN_STARTUP;
21643561Skato			xpt_release_simq(sassc->sim, 1);
21743561Skato#if __FreeBSD_version >= 1000039
21863101Snyan			xpt_release_boot();
21959777Snyan#else
22043561Skato			mpssas_rescan_target(sassc->sc, NULL);
22159777Snyan#endif
22243561Skato		}
22343561Skato		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
22443561Skato		    sassc->startup_refcount);
22543561Skato	}
22643561Skato}
22743561Skato
22843561Skato/* The firmware requires us to stop sending commands when we're doing task
22943561Skato * management, so refcount the TMs and keep the simq frozen when any are in
23043561Skato * use.
231108791Snyan */
232108791Snyanstruct mps_command *
233108791Snyanmpssas_alloc_tm(struct mps_softc *sc)
234108791Snyan{
235108791Snyan	struct mps_command *tm;
236108791Snyan
237108791Snyan	tm = mps_alloc_high_priority_command(sc);
238108791Snyan	return tm;
23943561Skato}
24043561Skato
24143561Skatovoid
24243561Skatompssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
24343561Skato{
24443561Skato	int target_id = 0xFFFFFFFF;
24543561Skato
24643561Skato	if (tm == NULL)
24743561Skato		return;
24843561Skato
24943561Skato	/*
25043561Skato	 * For TM's the devq is frozen for the device.  Unfreeze it here and
25143561Skato	 * free the resources used for freezing the devq.  Must clear the
252108650Snyan	 * INRESET flag as well or scsi I/O will not work.
25343561Skato	 */
25443561Skato	if (tm->cm_targ != NULL) {
25549426Skato		tm->cm_targ->flags &= ~MPSSAS_TARGET_INRESET;
25643561Skato		target_id = tm->cm_targ->tid;
25743561Skato	}
25843561Skato	if (tm->cm_ccb) {
259163897Smarcel		mps_dprint(sc, MPS_INFO, "Unfreezing devq for target ID %d\n",
26043561Skato		    target_id);
26143561Skato		xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
26243561Skato		xpt_free_path(tm->cm_ccb->ccb_h.path);
26343561Skato		xpt_free_ccb(tm->cm_ccb);
26443561Skato	}
26543561Skato
26643561Skato	mps_free_high_priority_command(sc, tm);
26759777Snyan}
26843561Skato
26968358Snyanvoid
27059777Snyanmpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
27143561Skato{
27243561Skato	struct mpssas_softc *sassc = sc->sassc;
27343561Skato	path_id_t pathid;
27459777Snyan	target_id_t targetid;
27559777Snyan	union ccb *ccb;
27659777Snyan
27768358Snyan	MPS_FUNCTRACE(sc);
27843561Skato	pathid = cam_sim_path(sassc->sim);
27943561Skato	if (targ == NULL)
280130601Sphk		targetid = CAM_TARGET_WILDCARD;
28143561Skato	else
28243561Skato		targetid = targ - sassc->targets;
28343561Skato
28443561Skato	/*
28543561Skato	 * Allocate a CCB and schedule a rescan.
28643561Skato	 */
28743561Skato	ccb = xpt_alloc_ccb_nowait();
28843561Skato	if (ccb == NULL) {
28959777Snyan		mps_dprint(sc, MPS_ERROR, "unable to alloc CCB for rescan\n");
29068358Snyan		return;
29168358Snyan	}
29268358Snyan
29359777Snyan	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
29468358Snyan	    targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
29568358Snyan		mps_dprint(sc, MPS_ERROR, "unable to create path for rescan\n");
29659777Snyan		xpt_free_ccb(ccb);
29743561Skato		return;
29868358Snyan	}
29943561Skato
30043561Skato	if (targetid == CAM_TARGET_WILDCARD)
30143561Skato		ccb->ccb_h.func_code = XPT_SCAN_BUS;
30243561Skato	else
30343561Skato		ccb->ccb_h.func_code = XPT_SCAN_TGT;
30443561Skato
30543561Skato	mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
30643561Skato	xpt_rescan(ccb);
30763101Snyan}
30843561Skato
30943561Skatostatic void
31043561Skatompssas_log_command(struct mps_command *cm, u_int level, const char *fmt, ...)
31143561Skato{
31243561Skato	struct sbuf sb;
31343561Skato	va_list ap;
31468358Snyan	char str[192];
31568358Snyan	char path_str[64];
31668358Snyan
31768358Snyan	if (cm == NULL)
31868358Snyan		return;
31968358Snyan
32063101Snyan	/* No need to be in here if debugging isn't enabled */
32163101Snyan	if ((cm->cm_sc->mps_debug & level) == 0)
32263101Snyan		return;
32343561Skato
32468358Snyan	sbuf_new(&sb, str, sizeof(str), 0);
32568358Snyan
32668358Snyan	va_start(ap, fmt);
32768358Snyan
32868358Snyan	if (cm->cm_ccb != NULL) {
32963101Snyan		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
33068358Snyan				sizeof(path_str));
33168358Snyan		sbuf_cat(&sb, path_str);
33268358Snyan		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
33368358Snyan			scsi_command_string(&cm->cm_ccb->csio, &sb);
33468358Snyan			sbuf_printf(&sb, "length %d ",
33568358Snyan				    cm->cm_ccb->csio.dxfer_len);
33668358Snyan		}
33768358Snyan	}
33868358Snyan	else {
33968358Snyan		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
34043561Skato		    cam_sim_name(cm->cm_sc->sassc->sim),
34143561Skato		    cam_sim_unit(cm->cm_sc->sassc->sim),
34243561Skato		    cam_sim_bus(cm->cm_sc->sassc->sim),
34343561Skato		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
34443561Skato		    cm->cm_lun);
34543561Skato	}
34643561Skato
34743561Skato	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
34843561Skato	sbuf_vprintf(&sb, fmt, ap);
34943561Skato	sbuf_finish(&sb);
35043561Skato	mps_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
35143561Skato
35243561Skato	va_end(ap);
35343561Skato}
35443561Skato
35543561Skato
35643561Skatostatic void
35743561Skatompssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
35843561Skato{
35943561Skato	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
36043561Skato	struct mpssas_target *targ;
36143561Skato	uint16_t handle;
36243561Skato
36343561Skato	MPS_FUNCTRACE(sc);
36443561Skato
36543561Skato	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
36643561Skato	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
36743561Skato	targ = tm->cm_targ;
36843561Skato
36943561Skato	if (reply == NULL) {
37043561Skato		/* XXX retry the remove after the diag reset completes? */
37143561Skato		mps_dprint(sc, MPS_FAULT,
37243561Skato		    "%s NULL reply resetting device 0x%04x\n", __func__,
37343561Skato		    handle);
37443561Skato		mpssas_free_tm(sc, tm);
37543561Skato		return;
37643561Skato	}
37743561Skato
37843561Skato	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
37943561Skato	    MPI2_IOCSTATUS_SUCCESS) {
38043561Skato		mps_dprint(sc, MPS_ERROR,
381108650Snyan		   "IOCStatus = 0x%x while resetting device 0x%x\n",
38243561Skato		   le16toh(reply->IOCStatus), handle);
38343561Skato	}
38443561Skato
38543561Skato	mps_dprint(sc, MPS_XINFO,
38668358Snyan	    "Reset aborted %u commands\n", reply->TerminationCount);
38743561Skato	mps_free_reply(sc, tm->cm_reply_data);
388163897Smarcel	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
38943561Skato
39043561Skato	mps_dprint(sc, MPS_XINFO,
39143561Skato	    "clearing target %u handle 0x%04x\n", targ->tid, handle);
39243561Skato
39343561Skato	/*
39443561Skato	 * Don't clear target if remove fails because things will get confusing.
39543561Skato	 * Leave the devname and sasaddr intact so that we know to avoid reusing
39643561Skato	 * this target id if possible, and so we can assign the same target id
39743561Skato	 * to this device if it comes back in the future.
39843561Skato	 */
39943561Skato	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
400163897Smarcel	    MPI2_IOCSTATUS_SUCCESS) {
40143561Skato		targ = tm->cm_targ;
40243561Skato		targ->handle = 0x0;
40343561Skato		targ->encl_handle = 0x0;
40459777Snyan		targ->encl_slot = 0x0;
40543561Skato		targ->exp_dev_handle = 0x0;
40643561Skato		targ->phy_num = 0x0;
407163897Smarcel		targ->linkrate = 0x0;
40843561Skato		targ->devinfo = 0x0;
40943561Skato		targ->flags = 0x0;
41043561Skato	}
41143561Skato
41243561Skato	mpssas_free_tm(sc, tm);
41343561Skato}
41443561Skato
41543561Skato
41643561Skato/*
41743561Skato * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
41843561Skato * Otherwise Volume Delete is same as Bare Drive Removal.
41943561Skato */
42043561Skatovoid
42143561Skatompssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
42243561Skato{
42343561Skato	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
42443561Skato	struct mps_softc *sc;
42543561Skato	struct mps_command *cm;
42643561Skato	struct mpssas_target *targ = NULL;
42743561Skato
42843561Skato	MPS_FUNCTRACE(sassc->sc);
42943561Skato	sc = sassc->sc;
43043561Skato
43143561Skato#ifdef WD_SUPPORT
43243561Skato	/*
43343561Skato	 * If this is a WD controller, determine if the disk should be exposed
43443561Skato	 * to the OS or not.  If disk should be exposed, return from this
43543561Skato	 * function without doing anything.
43643561Skato	 */
43743561Skato	if (sc->WD_available && (sc->WD_hide_expose ==
43843561Skato	    MPS_WD_EXPOSE_ALWAYS)) {
43968358Snyan		return;
44043561Skato	}
44143561Skato#endif //WD_SUPPORT
44243561Skato
44343561Skato	targ = mpssas_find_target_by_handle(sassc, 0, handle);
44443561Skato	if (targ == NULL) {
44543561Skato		/* FIXME: what is the action? */
44643561Skato		/* We don't know about this device? */
44743561Skato		mps_dprint(sc, MPS_ERROR,
44843561Skato		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
44943561Skato		return;
45043561Skato	}
45143561Skato
45243561Skato	targ->flags |= MPSSAS_TARGET_INREMOVAL;
45343561Skato
45459777Snyan	cm = mpssas_alloc_tm(sc);
45559777Snyan	if (cm == NULL) {
45659777Snyan		mps_dprint(sc, MPS_ERROR,
45759777Snyan		    "%s: command alloc failure\n", __func__);
45859777Snyan		return;
459108650Snyan	}
46059777Snyan
46143561Skato	mpssas_rescan_target(sc, targ);
46259777Snyan
46343561Skato	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
46443561Skato	req->DevHandle = targ->handle;
46543561Skato	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
46643561Skato	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
46743561Skato
46843561Skato	/* SAS Hard Link Reset / SATA Link Reset */
46943561Skato	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
47059777Snyan
47159777Snyan	cm->cm_targ = targ;
47259777Snyan	cm->cm_data = NULL;
47359777Snyan	cm->cm_desc.HighPriority.RequestFlags =
47459777Snyan	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
47559777Snyan	cm->cm_complete = mpssas_remove_volume;
47659777Snyan	cm->cm_complete_data = (void *)(uintptr_t)handle;
47759777Snyan
47859777Snyan	mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
47959777Snyan	    __func__, targ->tid);
48059777Snyan	mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
48143561Skato
48259777Snyan	mps_map_command(sc, cm);
48359777Snyan}
48459777Snyan
48559777Snyan/*
48659777Snyan * The MPT2 firmware performs debounce on the link to avoid transient link
48759777Snyan * errors and false removals.  When it does decide that link has been lost
48859777Snyan * and a device need to go away, it expects that the host will perform a
48959777Snyan * target reset and then an op remove.  The reset has the side-effect of
49059777Snyan * aborting any outstanding requests for the device, which is required for
49159777Snyan * the op-remove to succeed.  It's not clear if the host should check for
49259777Snyan * the device coming back alive after the reset.
49343561Skato */
49443561Skatovoid
49543561Skatompssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
49643561Skato{
49743561Skato	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
49859777Snyan	struct mps_softc *sc;
49959777Snyan	struct mps_command *cm;
50043561Skato	struct mpssas_target *targ = NULL;
50159777Snyan
50259777Snyan	MPS_FUNCTRACE(sassc->sc);
50359777Snyan
50459777Snyan	sc = sassc->sc;
50543561Skato
50643561Skato	targ = mpssas_find_target_by_handle(sassc, 0, handle);
50743561Skato	if (targ == NULL) {
50843561Skato		/* FIXME: what is the action? */
50943561Skato		/* We don't know about this device? */
51043561Skato		mps_dprint(sc, MPS_ERROR,
51143561Skato		    "%s : invalid handle 0x%x \n", __func__, handle);
51243561Skato		return;
51343561Skato	}
51443561Skato
51543561Skato	targ->flags |= MPSSAS_TARGET_INREMOVAL;
51643561Skato
51743561Skato	cm = mpssas_alloc_tm(sc);
51843561Skato	if (cm == NULL) {
51943561Skato		mps_dprint(sc, MPS_ERROR,
52043561Skato		    "%s: command alloc failure\n", __func__);
52143561Skato		return;
52243561Skato	}
52343561Skato
52453207Snyan	mpssas_rescan_target(sc, targ);
52543561Skato
52643561Skato	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
52743561Skato	memset(req, 0, sizeof(*req));
52843561Skato	req->DevHandle = htole16(targ->handle);
52943561Skato	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
53043561Skato	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
53143561Skato
53243561Skato	/* SAS Hard Link Reset / SATA Link Reset */
53343561Skato	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
53443561Skato
53543561Skato	cm->cm_targ = targ;
53643561Skato	cm->cm_data = NULL;
53743561Skato	cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
53843561Skato	cm->cm_complete = mpssas_remove_device;
53943561Skato	cm->cm_complete_data = (void *)(uintptr_t)handle;
54043561Skato
54143561Skato	mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
54243561Skato	    __func__, targ->tid);
54343561Skato	mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
54443561Skato
54543561Skato	mps_map_command(sc, cm);
54643561Skato}
54743561Skato
54868358Snyanstatic void
54968358Snyanmpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
55043561Skato{
55168358Snyan	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
55243561Skato	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
55368358Snyan	struct mpssas_target *targ;
55443561Skato	struct mps_command *next_cm;
555146010Snyan	uint16_t handle;
556146010Snyan
557146010Snyan	MPS_FUNCTRACE(sc);
558146010Snyan
55943561Skato	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
56043561Skato	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
56143561Skato	targ = tm->cm_targ;
56243561Skato
56343561Skato	/*
56443561Skato	 * Currently there should be no way we can hit this case.  It only
56543561Skato	 * happens when we have a failure to allocate chain frames, and
56643561Skato	 * task management commands don't have S/G lists.
56743561Skato	 */
56843561Skato	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
56943561Skato		mps_dprint(sc, MPS_ERROR,
57043561Skato		    "%s: cm_flags = %#x for remove of handle %#04x! "
57143561Skato		    "This should not happen!\n", __func__, tm->cm_flags,
57243561Skato		    handle);
57343561Skato	}
57443561Skato
57559777Snyan	if (reply == NULL) {
57659777Snyan		/* XXX retry the remove after the diag reset completes? */
57759777Snyan		mps_dprint(sc, MPS_FAULT,
57859777Snyan		    "%s NULL reply resetting device 0x%04x\n", __func__,
57943561Skato		    handle);
58059777Snyan		mpssas_free_tm(sc, tm);
58159777Snyan		return;
58259777Snyan	}
58359777Snyan
58459777Snyan	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
58559777Snyan	    MPI2_IOCSTATUS_SUCCESS) {
58659777Snyan		mps_dprint(sc, MPS_ERROR,
58759777Snyan		   "IOCStatus = 0x%x while resetting device 0x%x\n",
58843561Skato		   le16toh(reply->IOCStatus), handle);
58959777Snyan	}
59059777Snyan
59159777Snyan	mps_dprint(sc, MPS_XINFO, "Reset aborted %u commands\n",
59243561Skato	    le32toh(reply->TerminationCount));
59359777Snyan	mps_free_reply(sc, tm->cm_reply_data);
59443561Skato	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
595108650Snyan
59659777Snyan	/* Reuse the existing command */
59759777Snyan	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
59853207Snyan	memset(req, 0, sizeof(*req));
59959777Snyan	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
60059777Snyan	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
60143561Skato	req->DevHandle = htole16(handle);
60259777Snyan	tm->cm_data = NULL;
60359777Snyan	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
60459777Snyan	tm->cm_complete = mpssas_remove_complete;
60559777Snyan	tm->cm_complete_data = (void *)(uintptr_t)handle;
60659777Snyan
60759777Snyan	mps_map_command(sc, tm);
60859777Snyan
60959777Snyan	mps_dprint(sc, MPS_XINFO, "clearing target %u handle 0x%04x\n",
61059777Snyan		   targ->tid, handle);
61159777Snyan	TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
61259777Snyan		union ccb *ccb;
61359777Snyan
61459777Snyan		mps_dprint(sc, MPS_XINFO, "Completing missed command %p\n", tm);
61559777Snyan		ccb = tm->cm_complete_data;
61659777Snyan		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
61759777Snyan		mpssas_scsiio_complete(sc, tm);
61859777Snyan	}
61959777Snyan}
62059777Snyan
62159777Snyanstatic void
62259777Snyanmpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
62359777Snyan{
62459777Snyan	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
62559777Snyan	uint16_t handle;
62659777Snyan	struct mpssas_target *targ;
62759777Snyan	struct mpssas_lun *lun;
62859777Snyan
62959777Snyan	MPS_FUNCTRACE(sc);
63059777Snyan
63159777Snyan	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
63243561Skato	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
63359777Snyan
63443561Skato	/*
63587734Snyan	 * Currently there should be no way we can hit this case.  It only
63643561Skato	 * happens when we have a failure to allocate chain frames, and
63743561Skato	 * task management commands don't have S/G lists.
63843561Skato	 */
63943561Skato	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
64043561Skato		mps_dprint(sc, MPS_XINFO,
64143561Skato			   "%s: cm_flags = %#x for remove of handle %#04x! "
64243561Skato			   "This should not happen!\n", __func__, tm->cm_flags,
64343561Skato			   handle);
64443561Skato		mpssas_free_tm(sc, tm);
64543561Skato		return;
64643561Skato	}
64743561Skato
64843561Skato	if (reply == NULL) {
64943561Skato		/* most likely a chip reset */
65043561Skato		mps_dprint(sc, MPS_FAULT,
65143561Skato		    "%s NULL reply removing device 0x%04x\n", __func__, handle);
65243561Skato		mpssas_free_tm(sc, tm);
65343561Skato		return;
65443561Skato	}
65543561Skato
65643561Skato	mps_dprint(sc, MPS_XINFO,
65743561Skato	    "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
65868358Snyan	    handle, le16toh(reply->IOCStatus));
65943561Skato
66043561Skato	/*
66158165Snyan	 * Don't clear target if remove fails because things will get confusing.
66258165Snyan	 * Leave the devname and sasaddr intact so that we know to avoid reusing
66343561Skato	 * this target id if possible, and so we can assign the same target id
66443561Skato	 * to this device if it comes back in the future.
66558165Snyan	 */
66643561Skato	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
66743561Skato	    MPI2_IOCSTATUS_SUCCESS) {
66843561Skato		targ = tm->cm_targ;
66968358Snyan		targ->handle = 0x0;
67043561Skato		targ->encl_handle = 0x0;
67143561Skato		targ->encl_slot = 0x0;
67243561Skato		targ->exp_dev_handle = 0x0;
67343561Skato		targ->phy_num = 0x0;
67443561Skato		targ->linkrate = 0x0;
67543561Skato		targ->devinfo = 0x0;
67643561Skato		targ->flags = 0x0;
67743561Skato
67843561Skato		while(!SLIST_EMPTY(&targ->luns)) {
67943561Skato			lun = SLIST_FIRST(&targ->luns);
68043561Skato			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
68143561Skato			free(lun, M_MPT2);
68243561Skato		}
68343561Skato	}
68443561Skato
68543561Skato
68643561Skato	mpssas_free_tm(sc, tm);
687172925Snyan}
688172925Snyan
689172925Snyanstatic int
690172925Snyanmpssas_register_events(struct mps_softc *sc)
691172925Snyan{
692172925Snyan	u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
693172925Snyan
694172925Snyan	bzero(events, 16);
695172925Snyan	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
69643561Skato	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
697172925Snyan	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
698172925Snyan	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
699172925Snyan	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
700172925Snyan	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
701172925Snyan	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
702172925Snyan	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
703172925Snyan	setbit(events, MPI2_EVENT_IR_VOLUME);
70443561Skato	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
705172925Snyan	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
706172925Snyan	setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
707172925Snyan
70887734Snyan	mps_register_events(sc, events, mpssas_evt_handler, NULL,
709172925Snyan	    &sc->sassc->mpssas_eh);
710172925Snyan
711172925Snyan	return (0);
712172925Snyan}
71387734Snyan
71487734Snyanint
715172925Snyanmps_attach_sas(struct mps_softc *sc)
716172925Snyan{
71787734Snyan	struct mpssas_softc *sassc;
71887734Snyan	cam_status status;
719172925Snyan	int unit, error = 0;
720172925Snyan
721172925Snyan	MPS_FUNCTRACE(sc);
722172925Snyan
723172925Snyan	sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
72487734Snyan	if(!sassc) {
72587734Snyan		device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
72687734Snyan		__func__, __LINE__);
72787734Snyan		return (ENOMEM);
72843561Skato	}
72943561Skato
73043561Skato	/*
73143561Skato	 * XXX MaxTargets could change during a reinit.  Since we don't
73243561Skato	 * resize the targets[] array during such an event, cache the value
73343561Skato	 * of MaxTargets here so that we don't get into trouble later.  This
73443561Skato	 * should move into the reinit logic.
73543561Skato	 */
73668358Snyan	sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
73743561Skato	sassc->targets = malloc(sizeof(struct mpssas_target) *
73843561Skato	    sassc->maxtargets, M_MPT2, M_WAITOK|M_ZERO);
73968358Snyan	if(!sassc->targets) {
74068358Snyan		device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
74168358Snyan		__func__, __LINE__);
74268358Snyan		free(sassc, M_MPT2);
74343561Skato		return (ENOMEM);
74443561Skato	}
74543561Skato	sc->sassc = sassc;
74643561Skato	sassc->sc = sc;
74743561Skato
748153598Snyan	if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
749153598Snyan		mps_dprint(sc, MPS_ERROR, "Cannot allocate SIMQ\n");
75043561Skato		error = ENOMEM;
75143561Skato		goto out;
752153598Snyan	}
753153598Snyan
75443561Skato	unit = device_get_unit(sc->mps_dev);
75543561Skato	sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
75643561Skato	    unit, &sc->mps_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
75743561Skato	if (sassc->sim == NULL) {
75868358Snyan		mps_dprint(sc, MPS_ERROR, "Cannot allocate SIM\n");
759153598Snyan		error = EINVAL;
760146010Snyan		goto out;
76143561Skato	}
76243561Skato
76343561Skato	TAILQ_INIT(&sassc->ev_queue);
76443561Skato
76543561Skato	/* Initialize taskqueue for Event Handling */
76643561Skato	TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
76768358Snyan	sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
76843561Skato	    taskqueue_thread_enqueue, &sassc->ev_tq);
76943561Skato	taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
77043561Skato	    device_get_nameunit(sc->mps_dev));
77143561Skato
77243561Skato	mps_lock(sc);
77343561Skato
77443561Skato	/*
77543561Skato	 * XXX There should be a bus for every port on the adapter, but since
77643561Skato	 * we're just going to fake the topology for now, we'll pretend that
77743561Skato	 * everything is just a target on a single bus.
77843561Skato	 */
77943561Skato	if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
78043561Skato		mps_dprint(sc, MPS_ERROR, "Error %d registering SCSI bus\n",
78143561Skato		    error);
78243561Skato		mps_unlock(sc);
78343561Skato		goto out;
78443561Skato	}
78543561Skato
78643561Skato	/*
78743561Skato	 * Assume that discovery events will start right away.
78843561Skato	 *
78943561Skato	 * Hold off boot until discovery is complete.
79043561Skato	 */
79143561Skato	sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
79243561Skato	sc->sassc->startup_refcount = 0;
79343561Skato	mpssas_startup_increment(sassc);
79451586Skato
79551586Skato	callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
79651586Skato
79743561Skato	/*
79843561Skato	 * Register for async events so we can determine the EEDP
79943561Skato	 * capabilities of devices.
80051586Skato	 */
80143561Skato	status = xpt_create_path(&sassc->path, /*periph*/NULL,
80243561Skato	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
80343561Skato	    CAM_LUN_WILDCARD);
80443561Skato	if (status != CAM_REQ_CMP) {
80543561Skato		mps_printf(sc, "Error %#x creating sim path\n", status);
80643561Skato		sassc->path = NULL;
80743561Skato	} else {
80843561Skato		int event;
80943561Skato
810108791Snyan#if (__FreeBSD_version >= 1000006) || \
811108791Snyan    ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
812108791Snyan		event = AC_ADVINFO_CHANGED;
813108791Snyan#else
814108791Snyan		event = AC_FOUND_DEVICE;
815108791Snyan#endif
816108791Snyan		status = xpt_register_async(event, mpssas_async, sc,
81743561Skato					    sassc->path);
81843561Skato		if (status != CAM_REQ_CMP) {
81943561Skato			mps_dprint(sc, MPS_ERROR,
82043561Skato			    "Error %#x registering async handler for "
82143561Skato			    "AC_ADVINFO_CHANGED events\n", status);
82243561Skato			xpt_free_path(sassc->path);
82343561Skato			sassc->path = NULL;
82443561Skato		}
82543561Skato	}
82643561Skato	if (status != CAM_REQ_CMP) {
82743561Skato		/*
82843561Skato		 * EEDP use is the exception, not the rule.
82943561Skato		 * Warn the user, but do not fail to attach.
83043561Skato		 */
83143561Skato		mps_printf(sc, "EEDP capabilities disabled.\n");
83243561Skato	}
83343561Skato
83443561Skato	mps_unlock(sc);
83543561Skato
83643561Skato	mpssas_register_events(sc);
83743561Skatoout:
83843561Skato	if (error)
83943561Skato		mps_detach_sas(sc);
84043561Skato	return (error);
84143561Skato}
84243561Skato
84343561Skatoint
84443561Skatomps_detach_sas(struct mps_softc *sc)
84543561Skato{
84687734Snyan	struct mpssas_softc *sassc;
84743561Skato	struct mpssas_lun *lun, *lun_tmp;
84887734Snyan	struct mpssas_target *targ;
84987734Snyan	int i;
85087734Snyan
85187734Snyan	MPS_FUNCTRACE(sc);
85287734Snyan
85387734Snyan	if (sc->sassc == NULL)
85487734Snyan		return (0);
85587734Snyan
85687734Snyan	sassc = sc->sassc;
85787734Snyan	mps_deregister_events(sc, sassc->mpssas_eh);
85887734Snyan
85987734Snyan	/*
86087734Snyan	 * Drain and free the event handling taskqueue with the lock
86187734Snyan	 * unheld so that any parallel processing tasks drain properly
862153598Snyan	 * without deadlocking.
863153598Snyan	 */
86487734Snyan	if (sassc->ev_tq != NULL)
86587734Snyan		taskqueue_free(sassc->ev_tq);
866153598Snyan
867153598Snyan	/* Make sure CAM doesn't wedge if we had to bail out early. */
86887734Snyan	mps_lock(sc);
86987734Snyan
87087734Snyan	/* Deregister our async handler */
87187734Snyan	if (sassc->path != NULL) {
87287734Snyan		xpt_register_async(0, mpssas_async, sc, sassc->path);
873153598Snyan		xpt_free_path(sassc->path);
874146010Snyan		sassc->path = NULL;
87587734Snyan	}
87687734Snyan
87787734Snyan	if (sassc->flags & MPSSAS_IN_STARTUP)
87887734Snyan		xpt_release_simq(sassc->sim, 1);
87987734Snyan
88087734Snyan	if (sassc->sim != NULL) {
88187734Snyan		xpt_bus_deregister(cam_sim_path(sassc->sim));
88287734Snyan		cam_sim_free(sassc->sim, FALSE);
88387734Snyan	}
88487734Snyan
88587734Snyan	mps_unlock(sc);
88687734Snyan
88787734Snyan	if (sassc->devq != NULL)
88887734Snyan		cam_simq_free(sassc->devq);
88987734Snyan
89087734Snyan	for(i=0; i< sassc->maxtargets ;i++) {
89187734Snyan		targ = &sassc->targets[i];
89287734Snyan		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
89387734Snyan			free(lun, M_MPT2);
89487734Snyan		}
89587734Snyan	}
89687734Snyan	free(sassc->targets, M_MPT2);
89787734Snyan	free(sassc, M_MPT2);
89887734Snyan	sc->sassc = NULL;
89987734Snyan
90087734Snyan	return (0);
90187734Snyan}
90287734Snyan
90387734Snyanvoid
90487734Snyanmpssas_discovery_end(struct mpssas_softc *sassc)
90587734Snyan{
90687734Snyan	struct mps_softc *sc = sassc->sc;
90787734Snyan
90887734Snyan	MPS_FUNCTRACE(sc);
90987734Snyan
91087734Snyan	if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
91187734Snyan		callout_stop(&sassc->discovery_callout);
91287734Snyan
91387734Snyan	/*
91487734Snyan	 * After discovery has completed, check the mapping table for any
91587734Snyan	 * missing devices and update their missing counts. Only do this once
91687734Snyan	 * whenever the driver is initialized so that missing counts aren't
91787734Snyan	 * updated unnecessarily. Note that just because discovery has
91887734Snyan	 * completed doesn't mean that events have been processed yet. The
91987734Snyan	 * check_devices function is a callout timer that checks if ALL devices
92087734Snyan	 * are missing. If so, it will wait a little longer for events to
92187734Snyan	 * complete and keep resetting itself until some device in the mapping
92287734Snyan	 * table is not missing, meaning that event processing has started.
92387734Snyan	 */
92487734Snyan	if (sc->track_mapping_events) {
92587734Snyan		mps_dprint(sc, MPS_XINFO | MPS_MAPPING, "Discovery has "
92687734Snyan		    "completed. Check for missing devices in the mapping "
92787734Snyan		    "table.\n");
92887734Snyan		callout_reset(&sc->device_check_callout,
92987734Snyan		    MPS_MISSING_CHECK_DELAY * hz, mps_mapping_check_devices,
93087734Snyan		    sc);
93187734Snyan	}
93287734Snyan}
93387734Snyan
93487734Snyanstatic void
93587734Snyanmpssas_action(struct cam_sim *sim, union ccb *ccb)
93687734Snyan{
93787734Snyan	struct mpssas_softc *sassc;
93887734Snyan
93987734Snyan	sassc = cam_sim_softc(sim);
94087734Snyan
94187734Snyan	MPS_FUNCTRACE(sassc->sc);
94287734Snyan	mps_dprint(sassc->sc, MPS_TRACE, "ccb func_code 0x%x\n",
94387734Snyan	    ccb->ccb_h.func_code);
94487734Snyan	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
94587734Snyan
94687734Snyan	switch (ccb->ccb_h.func_code) {
94787734Snyan	case XPT_PATH_INQ:
94887734Snyan	{
94987734Snyan		struct ccb_pathinq *cpi = &ccb->cpi;
950146011Snyan		struct mps_softc *sc = sassc->sc;
95187734Snyan		uint8_t sges_per_frame;
95287734Snyan
95387734Snyan		cpi->version_num = 1;
95487734Snyan		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
95587734Snyan		cpi->target_sprt = 0;
95687734Snyan#if __FreeBSD_version >= 1000039
95787734Snyan		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
95887734Snyan#else
95987734Snyan		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
96043561Skato#endif
96143561Skato		cpi->hba_eng_cnt = 0;
96243561Skato		cpi->max_target = sassc->maxtargets - 1;
96343561Skato		cpi->max_lun = 255;
96454819Snyan
96543561Skato		/*
96643561Skato		 * initiator_id is set here to an ID outside the set of valid
967108791Snyan		 * target IDs (including volumes).
968108791Snyan		 */
969108791Snyan		cpi->initiator_id = sassc->maxtargets;
970108791Snyan		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
97159777Snyan		strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
97254819Snyan		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
97343561Skato		cpi->unit_number = cam_sim_unit(sim);
97443561Skato		cpi->bus_id = cam_sim_bus(sim);
97543561Skato		cpi->base_transfer_speed = 150000;
97643561Skato		cpi->transport = XPORT_SAS;
97743561Skato		cpi->transport_version = 0;
97843561Skato		cpi->protocol = PROTO_SCSI;
97943561Skato		cpi->protocol_version = SCSI_REV_SPC;
98051586Skato
98151586Skato		/*
98243561Skato		 * Max IO Size is Page Size * the following:
98343561Skato		 * ((SGEs per frame - 1 for chain element) *
98443561Skato		 * Max Chain Depth) + 1 for no chain needed in last frame
98543561Skato		 *
98643561Skato		 * If user suggests a Max IO size to use, use the smaller of the
98743561Skato		 * user's value and the calculated value as long as the user's
98843561Skato		 * value is larger than 0. The user's value is in pages.
98953207Snyan		 */
99053207Snyan		sges_per_frame = ((sc->facts->IOCRequestFrameSize * 4) /
99153207Snyan		    sizeof(MPI2_SGE_SIMPLE64)) - 1;
99253207Snyan		cpi->maxio = (sges_per_frame * sc->facts->MaxChainDepth) + 1;
99353207Snyan		cpi->maxio *= PAGE_SIZE;
99453207Snyan		if ((sc->max_io_pages > 0) && (sc->max_io_pages * PAGE_SIZE <
99553207Snyan		    cpi->maxio))
99653207Snyan			cpi->maxio = sc->max_io_pages * PAGE_SIZE;
99753207Snyan		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
99853207Snyan		break;
99953207Snyan	}
100053207Snyan	case XPT_GET_TRAN_SETTINGS:
100153207Snyan	{
100253207Snyan		struct ccb_trans_settings	*cts;
100353207Snyan		struct ccb_trans_settings_sas	*sas;
100453207Snyan		struct ccb_trans_settings_scsi	*scsi;
100554819Snyan		struct mpssas_target *targ;
100655339Snyan
100754819Snyan		cts = &ccb->cts;
100855339Snyan		sas = &cts->xport_specific.sas;
100954819Snyan		scsi = &cts->proto_specific.scsi;
101055339Snyan
101155339Snyan		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
101254819Snyan		    ("Target %d out of bounds in XPT_GET_TRANS_SETTINGS\n",
1013108791Snyan		    cts->ccb_h.target_id));
1014108791Snyan		targ = &sassc->targets[cts->ccb_h.target_id];
1015108791Snyan		if (targ->handle == 0x0) {
1016108791Snyan			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1017108791Snyan			break;
1018108791Snyan		}
1019108791Snyan
102054819Snyan		cts->protocol_version = SCSI_REV_SPC2;
1021108791Snyan		cts->transport = XPORT_SAS;
102254819Snyan		cts->transport_version = 0;
102354819Snyan
102454819Snyan		sas->valid = CTS_SAS_VALID_SPEED;
102554819Snyan		switch (targ->linkrate) {
1026108791Snyan		case 0x08:
102753207Snyan			sas->bitrate = 150000;
102854819Snyan			break;
102954819Snyan		case 0x09:
103054819Snyan			sas->bitrate = 300000;
103154819Snyan			break;
1032108791Snyan		case 0x0a:
103354819Snyan			sas->bitrate = 600000;
103453207Snyan			break;
103553207Snyan		default:
103653207Snyan			sas->valid = 0;
1037130603Sphk		}
103843561Skato
103943561Skato		cts->protocol = PROTO_SCSI;
104043561Skato		scsi->valid = CTS_SCSI_VALID_TQ;
104143561Skato		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
104243561Skato
104343561Skato		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
104443561Skato		break;
104543561Skato	}
104643561Skato	case XPT_CALC_GEOMETRY:
104743561Skato		cam_calc_geometry(&ccb->ccg, /*extended*/1);
104843561Skato		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
104943561Skato		break;
105043561Skato	case XPT_RESET_DEV:
105143561Skato		mps_dprint(sassc->sc, MPS_XINFO, "mpssas_action XPT_RESET_DEV\n");
1052163897Smarcel		mpssas_action_resetdev(sassc, ccb);
1053163897Smarcel		return;
105443561Skato	case XPT_RESET_BUS:
105543561Skato	case XPT_ABORT:
105643561Skato	case XPT_TERM_IO:
105743561Skato		mps_dprint(sassc->sc, MPS_XINFO,
105843561Skato		    "mpssas_action faking success for abort or reset\n");
105943561Skato		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
106043561Skato		break;
1061163897Smarcel	case XPT_SCSI_IO:
106243561Skato		mpssas_action_scsiio(sassc, ccb);
106343561Skato		return;
106443561Skato#if __FreeBSD_version >= 900026
106543561Skato	case XPT_SMP_IO:
106643561Skato		mpssas_action_smpio(sassc, ccb);
106743561Skato		return;
106843561Skato#endif
106943561Skato	default:
107043561Skato		mpssas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
107143561Skato		break;
107243561Skato	}
107343561Skato	xpt_done(ccb);
107443561Skato
107543561Skato}
107643561Skato
107743561Skatostatic void
107843561Skatompssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
107943561Skato    target_id_t target_id, lun_id_t lun_id)
108043561Skato{
108143561Skato	path_id_t path_id = cam_sim_path(sc->sassc->sim);
108243561Skato	struct cam_path *path;
108343561Skato
108443561Skato	mps_dprint(sc, MPS_XINFO, "%s code %x target %d lun %jx\n", __func__,
108568358Snyan	    ac_code, target_id, (uintmax_t)lun_id);
108668358Snyan
1087163897Smarcel	if (xpt_create_path(&path, NULL,
108868358Snyan		path_id, target_id, lun_id) != CAM_REQ_CMP) {
108968358Snyan		mps_dprint(sc, MPS_ERROR, "unable to create path for reset "
109068358Snyan			   "notification\n");
109143561Skato		return;
109243561Skato	}
109343561Skato
109443561Skato	xpt_async(ac_code, path, NULL);
109543561Skato	xpt_free_path(path);
109643561Skato}
109743561Skato
109843561Skatostatic void
1099172921Sjhbmpssas_complete_all_commands(struct mps_softc *sc)
1100172921Sjhb{
110143561Skato	struct mps_command *cm;
110243561Skato	int i;
110343561Skato	int completed;
1104
1105	MPS_FUNCTRACE(sc);
1106	mtx_assert(&sc->mps_mtx, MA_OWNED);
1107
1108	/* complete all commands with a NULL reply */
1109	for (i = 1; i < sc->num_reqs; i++) {
1110		cm = &sc->commands[i];
1111		cm->cm_reply = NULL;
1112		completed = 0;
1113
1114		if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
1115			cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
1116
1117		if (cm->cm_complete != NULL) {
1118			mpssas_log_command(cm, MPS_RECOVERY,
1119			    "completing cm %p state %x ccb %p for diag reset\n",
1120			    cm, cm->cm_state, cm->cm_ccb);
1121
1122			cm->cm_complete(sc, cm);
1123			completed = 1;
1124		}
1125
1126		if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
1127			mpssas_log_command(cm, MPS_RECOVERY,
1128			    "waking up cm %p state %x ccb %p for diag reset\n",
1129			    cm, cm->cm_state, cm->cm_ccb);
1130			wakeup(cm);
1131			completed = 1;
1132		}
1133
1134		if (cm->cm_sc->io_cmds_active != 0)
1135			cm->cm_sc->io_cmds_active--;
1136
1137		if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
1138			/* this should never happen, but if it does, log */
1139			mpssas_log_command(cm, MPS_RECOVERY,
1140			    "cm %p state %x flags 0x%x ccb %p during diag "
1141			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1142			    cm->cm_ccb);
1143		}
1144	}
1145}
1146
1147void
1148mpssas_handle_reinit(struct mps_softc *sc)
1149{
1150	int i;
1151
1152	/* Go back into startup mode and freeze the simq, so that CAM
1153	 * doesn't send any commands until after we've rediscovered all
1154	 * targets and found the proper device handles for them.
1155	 *
1156	 * After the reset, portenable will trigger discovery, and after all
1157	 * discovery-related activities have finished, the simq will be
1158	 * released.
1159	 */
1160	mps_dprint(sc, MPS_INIT, "%s startup\n", __func__);
1161	sc->sassc->flags |= MPSSAS_IN_STARTUP;
1162	sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
1163	mpssas_startup_increment(sc->sassc);
1164
1165	/* notify CAM of a bus reset */
1166	mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1167	    CAM_LUN_WILDCARD);
1168
1169	/* complete and cleanup after all outstanding commands */
1170	mpssas_complete_all_commands(sc);
1171
1172	mps_dprint(sc, MPS_INIT,
1173	    "%s startup %u after command completion\n", __func__,
1174	    sc->sassc->startup_refcount);
1175
1176	/* zero all the target handles, since they may change after the
1177	 * reset, and we have to rediscover all the targets and use the new
1178	 * handles.
1179	 */
1180	for (i = 0; i < sc->sassc->maxtargets; i++) {
1181		if (sc->sassc->targets[i].outstanding != 0)
1182			mps_dprint(sc, MPS_INIT, "target %u outstanding %u\n",
1183			    i, sc->sassc->targets[i].outstanding);
1184		sc->sassc->targets[i].handle = 0x0;
1185		sc->sassc->targets[i].exp_dev_handle = 0x0;
1186		sc->sassc->targets[i].outstanding = 0;
1187		sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1188	}
1189}
1190
1191static void
1192mpssas_tm_timeout(void *data)
1193{
1194	struct mps_command *tm = data;
1195	struct mps_softc *sc = tm->cm_sc;
1196
1197	mtx_assert(&sc->mps_mtx, MA_OWNED);
1198
1199	mpssas_log_command(tm, MPS_INFO|MPS_RECOVERY,
1200	    "task mgmt %p timed out\n", tm);
1201	mps_reinit(sc);
1202}
1203
1204static void
1205mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1206{
1207	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1208	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1209	unsigned int cm_count = 0;
1210	struct mps_command *cm;
1211	struct mpssas_target *targ;
1212
1213	callout_stop(&tm->cm_callout);
1214
1215	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1216	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1217	targ = tm->cm_targ;
1218
1219	/*
1220	 * Currently there should be no way we can hit this case.  It only
1221	 * happens when we have a failure to allocate chain frames, and
1222	 * task management commands don't have S/G lists.
1223	 * XXXSL So should it be an assertion?
1224	 */
1225	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1226		mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for LUN reset! "
1227			   "This should not happen!\n", __func__, tm->cm_flags);
1228		mpssas_free_tm(sc, tm);
1229		return;
1230	}
1231
1232	if (reply == NULL) {
1233		mpssas_log_command(tm, MPS_RECOVERY,
1234		    "NULL reset reply for tm %p\n", tm);
1235		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1236			/* this completion was due to a reset, just cleanup */
1237			targ->tm = NULL;
1238			mpssas_free_tm(sc, tm);
1239		}
1240		else {
1241			/* we should have gotten a reply. */
1242			mps_reinit(sc);
1243		}
1244		return;
1245	}
1246
1247	mpssas_log_command(tm, MPS_RECOVERY,
1248	    "logical unit reset status 0x%x code 0x%x count %u\n",
1249	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1250	    le32toh(reply->TerminationCount));
1251
1252	/* See if there are any outstanding commands for this LUN.
1253	 * This could be made more efficient by using a per-LU data
1254	 * structure of some sort.
1255	 */
1256	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1257		if (cm->cm_lun == tm->cm_lun)
1258			cm_count++;
1259	}
1260
1261	if (cm_count == 0) {
1262		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1263		    "logical unit %u finished recovery after reset\n",
1264		    tm->cm_lun, tm);
1265
1266		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1267		    tm->cm_lun);
1268
1269		/* we've finished recovery for this logical unit.  check and
1270		 * see if some other logical unit has a timedout command
1271		 * that needs to be processed.
1272		 */
1273		cm = TAILQ_FIRST(&targ->timedout_commands);
1274		if (cm) {
1275			mpssas_send_abort(sc, tm, cm);
1276		}
1277		else {
1278			targ->tm = NULL;
1279			mpssas_free_tm(sc, tm);
1280		}
1281	}
1282	else {
1283		/* if we still have commands for this LUN, the reset
1284		 * effectively failed, regardless of the status reported.
1285		 * Escalate to a target reset.
1286		 */
1287		mpssas_log_command(tm, MPS_RECOVERY,
1288		    "logical unit reset complete for tm %p, but still have %u command(s)\n",
1289		    tm, cm_count);
1290		mpssas_send_reset(sc, tm,
1291		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1292	}
1293}
1294
1295static void
1296mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1297{
1298	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1299	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1300	struct mpssas_target *targ;
1301
1302	callout_stop(&tm->cm_callout);
1303
1304	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1305	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1306	targ = tm->cm_targ;
1307
1308	/*
1309	 * Currently there should be no way we can hit this case.  It only
1310	 * happens when we have a failure to allocate chain frames, and
1311	 * task management commands don't have S/G lists.
1312	 */
1313	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1314		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x for target reset! "
1315			   "This should not happen!\n", __func__, tm->cm_flags);
1316		mpssas_free_tm(sc, tm);
1317		return;
1318	}
1319
1320	if (reply == NULL) {
1321		mpssas_log_command(tm, MPS_RECOVERY,
1322		    "NULL reset reply for tm %p\n", tm);
1323		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1324			/* this completion was due to a reset, just cleanup */
1325			targ->tm = NULL;
1326			mpssas_free_tm(sc, tm);
1327		}
1328		else {
1329			/* we should have gotten a reply. */
1330			mps_reinit(sc);
1331		}
1332		return;
1333	}
1334
1335	mpssas_log_command(tm, MPS_RECOVERY,
1336	    "target reset status 0x%x code 0x%x count %u\n",
1337	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1338	    le32toh(reply->TerminationCount));
1339
1340	if (targ->outstanding == 0) {
1341		/* we've finished recovery for this target and all
1342		 * of its logical units.
1343		 */
1344		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1345		    "recovery finished after target reset\n");
1346
1347		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1348		    CAM_LUN_WILDCARD);
1349
1350		targ->tm = NULL;
1351		mpssas_free_tm(sc, tm);
1352	}
1353	else {
1354		/* after a target reset, if this target still has
1355		 * outstanding commands, the reset effectively failed,
1356		 * regardless of the status reported.  escalate.
1357		 */
1358		mpssas_log_command(tm, MPS_RECOVERY,
1359		    "target reset complete for tm %p, but still have %u command(s)\n",
1360		    tm, targ->outstanding);
1361		mps_reinit(sc);
1362	}
1363}
1364
1365#define MPS_RESET_TIMEOUT 30
1366
1367int
1368mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1369{
1370	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1371	struct mpssas_target *target;
1372	int err;
1373
1374	target = tm->cm_targ;
1375	if (target->handle == 0) {
1376		mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1377		    __func__, target->tid);
1378		return -1;
1379	}
1380
1381	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1382	req->DevHandle = htole16(target->handle);
1383	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1384	req->TaskType = type;
1385
1386	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1387		/* XXX Need to handle invalid LUNs */
1388		MPS_SET_LUN(req->LUN, tm->cm_lun);
1389		tm->cm_targ->logical_unit_resets++;
1390		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1391		    "sending logical unit reset\n");
1392		tm->cm_complete = mpssas_logical_unit_reset_complete;
1393		mpssas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1394	}
1395	else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1396		/*
1397		 * Target reset method =
1398		 * 	SAS Hard Link Reset / SATA Link Reset
1399		 */
1400		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1401		tm->cm_targ->target_resets++;
1402		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1403		    "sending target reset\n");
1404		tm->cm_complete = mpssas_target_reset_complete;
1405		mpssas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1406	}
1407	else {
1408		mps_dprint(sc, MPS_ERROR, "unexpected reset type 0x%x\n", type);
1409		return -1;
1410	}
1411
1412	tm->cm_data = NULL;
1413	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1414	tm->cm_complete_data = (void *)tm;
1415
1416	callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1417	    mpssas_tm_timeout, tm);
1418
1419	err = mps_map_command(sc, tm);
1420	if (err)
1421		mpssas_log_command(tm, MPS_RECOVERY,
1422		    "error %d sending reset type %u\n",
1423		    err, type);
1424
1425	return err;
1426}
1427
1428
1429static void
1430mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1431{
1432	struct mps_command *cm;
1433	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1434	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1435	struct mpssas_target *targ;
1436
1437	callout_stop(&tm->cm_callout);
1438
1439	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1440	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1441	targ = tm->cm_targ;
1442
1443	/*
1444	 * Currently there should be no way we can hit this case.  It only
1445	 * happens when we have a failure to allocate chain frames, and
1446	 * task management commands don't have S/G lists.
1447	 */
1448	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1449		mpssas_log_command(tm, MPS_RECOVERY,
1450		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1451		    tm->cm_flags, tm, le16toh(req->TaskMID));
1452		mpssas_free_tm(sc, tm);
1453		return;
1454	}
1455
1456	if (reply == NULL) {
1457		mpssas_log_command(tm, MPS_RECOVERY,
1458		    "NULL abort reply for tm %p TaskMID %u\n",
1459		    tm, le16toh(req->TaskMID));
1460		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1461			/* this completion was due to a reset, just cleanup */
1462			targ->tm = NULL;
1463			mpssas_free_tm(sc, tm);
1464		}
1465		else {
1466			/* we should have gotten a reply. */
1467			mps_reinit(sc);
1468		}
1469		return;
1470	}
1471
1472	mpssas_log_command(tm, MPS_RECOVERY,
1473	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1474	    le16toh(req->TaskMID),
1475	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1476	    le32toh(reply->TerminationCount));
1477
1478	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1479	if (cm == NULL) {
1480		/* if there are no more timedout commands, we're done with
1481		 * error recovery for this target.
1482		 */
1483		mpssas_log_command(tm, MPS_RECOVERY,
1484		    "finished recovery after aborting TaskMID %u\n",
1485		    le16toh(req->TaskMID));
1486
1487		targ->tm = NULL;
1488		mpssas_free_tm(sc, tm);
1489	}
1490	else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1491		/* abort success, but we have more timedout commands to abort */
1492		mpssas_log_command(tm, MPS_RECOVERY,
1493		    "continuing recovery after aborting TaskMID %u\n",
1494		    le16toh(req->TaskMID));
1495
1496		mpssas_send_abort(sc, tm, cm);
1497	}
1498	else {
1499		/* we didn't get a command completion, so the abort
1500		 * failed as far as we're concerned.  escalate.
1501		 */
1502		mpssas_log_command(tm, MPS_RECOVERY,
1503		    "abort failed for TaskMID %u tm %p\n",
1504		    le16toh(req->TaskMID), tm);
1505
1506		mpssas_send_reset(sc, tm,
1507		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1508	}
1509}
1510
1511#define MPS_ABORT_TIMEOUT 5
1512
1513static int
1514mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1515{
1516	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1517	struct mpssas_target *targ;
1518	int err;
1519
1520	targ = cm->cm_targ;
1521	if (targ->handle == 0) {
1522		mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1523		    __func__, cm->cm_ccb->ccb_h.target_id);
1524		return -1;
1525	}
1526
1527	mpssas_log_command(cm, MPS_RECOVERY|MPS_INFO,
1528	    "Aborting command %p\n", cm);
1529
1530	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1531	req->DevHandle = htole16(targ->handle);
1532	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1533	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1534
1535	/* XXX Need to handle invalid LUNs */
1536	MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1537
1538	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1539
1540	tm->cm_data = NULL;
1541	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1542	tm->cm_complete = mpssas_abort_complete;
1543	tm->cm_complete_data = (void *)tm;
1544	tm->cm_targ = cm->cm_targ;
1545	tm->cm_lun = cm->cm_lun;
1546
1547	callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1548	    mpssas_tm_timeout, tm);
1549
1550	targ->aborts++;
1551
1552	mps_dprint(sc, MPS_INFO, "Sending reset from %s for target ID %d\n",
1553	    __func__, targ->tid);
1554	mpssas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1555
1556	err = mps_map_command(sc, tm);
1557	if (err)
1558		mps_dprint(sc, MPS_RECOVERY,
1559		    "error %d sending abort for cm %p SMID %u\n",
1560		    err, cm, req->TaskMID);
1561	return err;
1562}
1563
1564static void
1565mpssas_scsiio_timeout(void *data)
1566{
1567	struct mps_softc *sc;
1568	struct mps_command *cm;
1569	struct mpssas_target *targ;
1570
1571	cm = (struct mps_command *)data;
1572	sc = cm->cm_sc;
1573
1574	MPS_FUNCTRACE(sc);
1575	mtx_assert(&sc->mps_mtx, MA_OWNED);
1576
1577	mps_dprint(sc, MPS_XINFO, "Timeout checking cm %p\n", sc);
1578
1579	/*
1580	 * Run the interrupt handler to make sure it's not pending.  This
1581	 * isn't perfect because the command could have already completed
1582	 * and been re-used, though this is unlikely.
1583	 */
1584	mps_intr_locked(sc);
1585	if (cm->cm_state == MPS_CM_STATE_FREE) {
1586		mpssas_log_command(cm, MPS_XINFO,
1587		    "SCSI command %p almost timed out\n", cm);
1588		return;
1589	}
1590
1591	if (cm->cm_ccb == NULL) {
1592		mps_dprint(sc, MPS_ERROR, "command timeout with NULL ccb\n");
1593		return;
1594	}
1595
1596	targ = cm->cm_targ;
1597	targ->timeouts++;
1598
1599	mpssas_log_command(cm, MPS_ERROR, "command timeout %d cm %p target "
1600	    "%u, handle(0x%04x)\n", cm->cm_ccb->ccb_h.timeout, cm,  targ->tid,
1601	    targ->handle);
1602
1603	/* XXX first, check the firmware state, to see if it's still
1604	 * operational.  if not, do a diag reset.
1605	 */
1606	mpssas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1607	cm->cm_state = MPS_CM_STATE_TIMEDOUT;
1608	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1609
1610	if (targ->tm != NULL) {
1611		/* target already in recovery, just queue up another
1612		 * timedout command to be processed later.
1613		 */
1614		mps_dprint(sc, MPS_RECOVERY,
1615		    "queued timedout cm %p for processing by tm %p\n",
1616		    cm, targ->tm);
1617	}
1618	else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1619		mps_dprint(sc, MPS_RECOVERY, "timedout cm %p allocated tm %p\n",
1620		    cm, targ->tm);
1621
1622		/* start recovery by aborting the first timedout command */
1623		mpssas_send_abort(sc, targ->tm, cm);
1624	}
1625	else {
1626		/* XXX queue this target up for recovery once a TM becomes
1627		 * available.  The firmware only has a limited number of
1628		 * HighPriority credits for the high priority requests used
1629		 * for task management, and we ran out.
1630		 *
1631		 * Isilon: don't worry about this for now, since we have
1632		 * more credits than disks in an enclosure, and limit
1633		 * ourselves to one TM per target for recovery.
1634		 */
1635		mps_dprint(sc, MPS_RECOVERY,
1636		    "timedout cm %p failed to allocate a tm\n", cm);
1637	}
1638
1639}
1640
1641static void
1642mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1643{
1644	MPI2_SCSI_IO_REQUEST *req;
1645	struct ccb_scsiio *csio;
1646	struct mps_softc *sc;
1647	struct mpssas_target *targ;
1648	struct mpssas_lun *lun;
1649	struct mps_command *cm;
1650	uint8_t i, lba_byte, *ref_tag_addr;
1651	uint16_t eedp_flags;
1652	uint32_t mpi_control;
1653
1654	sc = sassc->sc;
1655	MPS_FUNCTRACE(sc);
1656	mtx_assert(&sc->mps_mtx, MA_OWNED);
1657
1658	csio = &ccb->csio;
1659	KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1660	    ("Target %d out of bounds in XPT_SCSI_IO\n",
1661	     csio->ccb_h.target_id));
1662	targ = &sassc->targets[csio->ccb_h.target_id];
1663	mps_dprint(sc, MPS_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1664	if (targ->handle == 0x0) {
1665		mps_dprint(sc, MPS_ERROR, "%s NULL handle for target %u\n",
1666		    __func__, csio->ccb_h.target_id);
1667		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1668		xpt_done(ccb);
1669		return;
1670	}
1671	if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
1672		mps_dprint(sc, MPS_ERROR, "%s Raid component no SCSI IO "
1673		    "supported %u\n", __func__, csio->ccb_h.target_id);
1674		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1675		xpt_done(ccb);
1676		return;
1677	}
1678	/*
1679	 * Sometimes, it is possible to get a command that is not "In
1680	 * Progress" and was actually aborted by the upper layer.  Check for
1681	 * this here and complete the command without error.
1682	 */
1683	if (mpssas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1684		mps_dprint(sc, MPS_TRACE, "%s Command is not in progress for "
1685		    "target %u\n", __func__, csio->ccb_h.target_id);
1686		xpt_done(ccb);
1687		return;
1688	}
1689	/*
1690	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1691	 * that the volume has timed out.  We want volumes to be enumerated
1692	 * until they are deleted/removed, not just failed.
1693	 */
1694	if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1695		if (targ->devinfo == 0)
1696			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1697		else
1698			mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1699		xpt_done(ccb);
1700		return;
1701	}
1702
1703	if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1704		mps_dprint(sc, MPS_INFO, "%s shutting down\n", __func__);
1705		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1706		xpt_done(ccb);
1707		return;
1708	}
1709
1710	/*
1711	 * If target has a reset in progress, freeze the devq and return.  The
1712	 * devq will be released when the TM reset is finished.
1713	 */
1714	if (targ->flags & MPSSAS_TARGET_INRESET) {
1715		ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1716		mps_dprint(sc, MPS_INFO, "%s: Freezing devq for target ID %d\n",
1717		    __func__, targ->tid);
1718		xpt_freeze_devq(ccb->ccb_h.path, 1);
1719		xpt_done(ccb);
1720		return;
1721	}
1722
1723	cm = mps_alloc_command(sc);
1724	if (cm == NULL || (sc->mps_flags & MPS_FLAGS_DIAGRESET)) {
1725		if (cm != NULL) {
1726			mps_free_command(sc, cm);
1727		}
1728		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1729			xpt_freeze_simq(sassc->sim, 1);
1730			sassc->flags |= MPSSAS_QUEUE_FROZEN;
1731		}
1732		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1733		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1734		xpt_done(ccb);
1735		return;
1736	}
1737
1738	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1739	bzero(req, sizeof(*req));
1740	req->DevHandle = htole16(targ->handle);
1741	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1742	req->MsgFlags = 0;
1743	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1744	req->SenseBufferLength = MPS_SENSE_LEN;
1745	req->SGLFlags = 0;
1746	req->ChainOffset = 0;
1747	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1748	req->SGLOffset1= 0;
1749	req->SGLOffset2= 0;
1750	req->SGLOffset3= 0;
1751	req->SkipCount = 0;
1752	req->DataLength = htole32(csio->dxfer_len);
1753	req->BidirectionalDataLength = 0;
1754	req->IoFlags = htole16(csio->cdb_len);
1755	req->EEDPFlags = 0;
1756
1757	/* Note: BiDirectional transfers are not supported */
1758	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1759	case CAM_DIR_IN:
1760		mpi_control = MPI2_SCSIIO_CONTROL_READ;
1761		cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1762		break;
1763	case CAM_DIR_OUT:
1764		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1765		cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1766		break;
1767	case CAM_DIR_NONE:
1768	default:
1769		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1770		break;
1771	}
1772
1773	if (csio->cdb_len == 32)
1774                mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1775	/*
1776	 * It looks like the hardware doesn't require an explicit tag
1777	 * number for each transaction.  SAM Task Management not supported
1778	 * at the moment.
1779	 */
1780	switch (csio->tag_action) {
1781	case MSG_HEAD_OF_Q_TAG:
1782		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1783		break;
1784	case MSG_ORDERED_Q_TAG:
1785		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1786		break;
1787	case MSG_ACA_TASK:
1788		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1789		break;
1790	case CAM_TAG_ACTION_NONE:
1791	case MSG_SIMPLE_Q_TAG:
1792	default:
1793		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1794		break;
1795	}
1796	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1797	req->Control = htole32(mpi_control);
1798	if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1799		mps_free_command(sc, cm);
1800		mpssas_set_ccbstatus(ccb, CAM_LUN_INVALID);
1801		xpt_done(ccb);
1802		return;
1803	}
1804
1805	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1806		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1807	else
1808		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1809	req->IoFlags = htole16(csio->cdb_len);
1810
1811	/*
1812	 * Check if EEDP is supported and enabled.  If it is then check if the
1813	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
1814	 * is formatted for EEDP support.  If all of this is true, set CDB up
1815	 * for EEDP transfer.
1816	 */
1817	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1818	if (sc->eedp_enabled && eedp_flags) {
1819		SLIST_FOREACH(lun, &targ->luns, lun_link) {
1820			if (lun->lun_id == csio->ccb_h.target_lun) {
1821				break;
1822			}
1823		}
1824
1825		if ((lun != NULL) && (lun->eedp_formatted)) {
1826			req->EEDPBlockSize = htole16(lun->eedp_block_size);
1827			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1828			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1829			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1830			req->EEDPFlags = htole16(eedp_flags);
1831
1832			/*
1833			 * If CDB less than 32, fill in Primary Ref Tag with
1834			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
1835			 * already there.  Also, set protection bit.  FreeBSD
1836			 * currently does not support CDBs bigger than 16, but
1837			 * the code doesn't hurt, and will be here for the
1838			 * future.
1839			 */
1840			if (csio->cdb_len != 32) {
1841				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1842				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1843				    PrimaryReferenceTag;
1844				for (i = 0; i < 4; i++) {
1845					*ref_tag_addr =
1846					    req->CDB.CDB32[lba_byte + i];
1847					ref_tag_addr++;
1848				}
1849				req->CDB.EEDP32.PrimaryReferenceTag =
1850					htole32(req->CDB.EEDP32.PrimaryReferenceTag);
1851				req->CDB.EEDP32.PrimaryApplicationTagMask =
1852				    0xFFFF;
1853				req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1854				    0x20;
1855			} else {
1856				eedp_flags |=
1857				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1858				req->EEDPFlags = htole16(eedp_flags);
1859				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1860				    0x1F) | 0x20;
1861			}
1862		}
1863	}
1864
1865	cm->cm_length = csio->dxfer_len;
1866	if (cm->cm_length != 0) {
1867		cm->cm_data = ccb;
1868		cm->cm_flags |= MPS_CM_FLAGS_USE_CCB;
1869	} else {
1870		cm->cm_data = NULL;
1871	}
1872	cm->cm_sge = &req->SGL;
1873	cm->cm_sglsize = (32 - 24) * 4;
1874	cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1875	cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1876	cm->cm_complete = mpssas_scsiio_complete;
1877	cm->cm_complete_data = ccb;
1878	cm->cm_targ = targ;
1879	cm->cm_lun = csio->ccb_h.target_lun;
1880	cm->cm_ccb = ccb;
1881
1882	/*
1883	 * If HBA is a WD and the command is not for a retry, try to build a
1884	 * direct I/O message. If failed, or the command is for a retry, send
1885	 * the I/O to the IR volume itself.
1886	 */
1887	if (sc->WD_valid_config) {
1888		if (ccb->ccb_h.sim_priv.entries[0].field == MPS_WD_RETRY) {
1889			mpssas_direct_drive_io(sassc, cm, ccb);
1890		} else {
1891			mpssas_set_ccbstatus(ccb, CAM_REQ_INPROG);
1892		}
1893	}
1894
1895	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1896	    mpssas_scsiio_timeout, cm, 0);
1897
1898	targ->issued++;
1899	targ->outstanding++;
1900	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1901	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1902
1903	mpssas_log_command(cm, MPS_XINFO, "%s cm %p ccb %p outstanding %u\n",
1904	    __func__, cm, ccb, targ->outstanding);
1905
1906	mps_map_command(sc, cm);
1907	return;
1908}
1909
1910static void
1911mps_response_code(struct mps_softc *sc, u8 response_code)
1912{
1913        char *desc;
1914
1915        switch (response_code) {
1916        case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
1917                desc = "task management request completed";
1918                break;
1919        case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
1920                desc = "invalid frame";
1921                break;
1922        case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
1923                desc = "task management request not supported";
1924                break;
1925        case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
1926                desc = "task management request failed";
1927                break;
1928        case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
1929                desc = "task management request succeeded";
1930                break;
1931        case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
1932                desc = "invalid lun";
1933                break;
1934        case 0xA:
1935                desc = "overlapped tag attempted";
1936                break;
1937        case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
1938                desc = "task queued, however not sent to target";
1939                break;
1940        default:
1941                desc = "unknown";
1942                break;
1943        }
1944		mps_dprint(sc, MPS_XINFO, "response_code(0x%01x): %s\n",
1945                response_code, desc);
1946}
1947/**
1948 * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request
1949 */
1950static void
1951mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio,
1952    Mpi2SCSIIOReply_t *mpi_reply)
1953{
1954	u32 response_info;
1955	u8 *response_bytes;
1956	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1957	    MPI2_IOCSTATUS_MASK;
1958	u8 scsi_state = mpi_reply->SCSIState;
1959	u8 scsi_status = mpi_reply->SCSIStatus;
1960	char *desc_ioc_state = NULL;
1961	char *desc_scsi_status = NULL;
1962	char *desc_scsi_state = sc->tmp_string;
1963	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1964
1965	if (log_info == 0x31170000)
1966		return;
1967
1968	switch (ioc_status) {
1969	case MPI2_IOCSTATUS_SUCCESS:
1970		desc_ioc_state = "success";
1971		break;
1972	case MPI2_IOCSTATUS_INVALID_FUNCTION:
1973		desc_ioc_state = "invalid function";
1974		break;
1975	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1976		desc_ioc_state = "scsi recovered error";
1977		break;
1978	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1979		desc_ioc_state = "scsi invalid dev handle";
1980		break;
1981	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1982		desc_ioc_state = "scsi device not there";
1983		break;
1984	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1985		desc_ioc_state = "scsi data overrun";
1986		break;
1987	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1988		desc_ioc_state = "scsi data underrun";
1989		break;
1990	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
1991		desc_ioc_state = "scsi io data error";
1992		break;
1993	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
1994		desc_ioc_state = "scsi protocol error";
1995		break;
1996	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
1997		desc_ioc_state = "scsi task terminated";
1998		break;
1999	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2000		desc_ioc_state = "scsi residual mismatch";
2001		break;
2002	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2003		desc_ioc_state = "scsi task mgmt failed";
2004		break;
2005	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2006		desc_ioc_state = "scsi ioc terminated";
2007		break;
2008	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2009		desc_ioc_state = "scsi ext terminated";
2010		break;
2011	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
2012		desc_ioc_state = "eedp guard error";
2013		break;
2014	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
2015		desc_ioc_state = "eedp ref tag error";
2016		break;
2017	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
2018		desc_ioc_state = "eedp app tag error";
2019		break;
2020	default:
2021		desc_ioc_state = "unknown";
2022		break;
2023	}
2024
2025	switch (scsi_status) {
2026	case MPI2_SCSI_STATUS_GOOD:
2027		desc_scsi_status = "good";
2028		break;
2029	case MPI2_SCSI_STATUS_CHECK_CONDITION:
2030		desc_scsi_status = "check condition";
2031		break;
2032	case MPI2_SCSI_STATUS_CONDITION_MET:
2033		desc_scsi_status = "condition met";
2034		break;
2035	case MPI2_SCSI_STATUS_BUSY:
2036		desc_scsi_status = "busy";
2037		break;
2038	case MPI2_SCSI_STATUS_INTERMEDIATE:
2039		desc_scsi_status = "intermediate";
2040		break;
2041	case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
2042		desc_scsi_status = "intermediate condmet";
2043		break;
2044	case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
2045		desc_scsi_status = "reservation conflict";
2046		break;
2047	case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
2048		desc_scsi_status = "command terminated";
2049		break;
2050	case MPI2_SCSI_STATUS_TASK_SET_FULL:
2051		desc_scsi_status = "task set full";
2052		break;
2053	case MPI2_SCSI_STATUS_ACA_ACTIVE:
2054		desc_scsi_status = "aca active";
2055		break;
2056	case MPI2_SCSI_STATUS_TASK_ABORTED:
2057		desc_scsi_status = "task aborted";
2058		break;
2059	default:
2060		desc_scsi_status = "unknown";
2061		break;
2062	}
2063
2064	desc_scsi_state[0] = '\0';
2065	if (!scsi_state)
2066		desc_scsi_state = " ";
2067	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
2068		strcat(desc_scsi_state, "response info ");
2069	if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
2070		strcat(desc_scsi_state, "state terminated ");
2071	if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
2072		strcat(desc_scsi_state, "no status ");
2073	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
2074		strcat(desc_scsi_state, "autosense failed ");
2075	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
2076		strcat(desc_scsi_state, "autosense valid ");
2077
2078	mps_dprint(sc, MPS_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2079	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2080	/* We can add more detail about underflow data here
2081	 * TO-DO
2082	 * */
2083	mps_dprint(sc, MPS_XINFO, "\tscsi_status(%s)(0x%02x), "
2084	    "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status,
2085	    desc_scsi_state, scsi_state);
2086
2087	if (sc->mps_debug & MPS_XINFO &&
2088		scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2089		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : Start :\n");
2090		scsi_sense_print(csio);
2091		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : End :\n");
2092	}
2093
2094	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2095		response_info = le32toh(mpi_reply->ResponseInfo);
2096		response_bytes = (u8 *)&response_info;
2097		mps_response_code(sc,response_bytes[0]);
2098	}
2099}
2100
2101static void
2102mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
2103{
2104	MPI2_SCSI_IO_REPLY *rep;
2105	union ccb *ccb;
2106	struct ccb_scsiio *csio;
2107	struct mpssas_softc *sassc;
2108	struct scsi_vpd_supported_page_list *vpd_list = NULL;
2109	u8 *TLR_bits, TLR_on;
2110	int dir = 0, i;
2111	u16 alloc_len;
2112	struct mpssas_target *target;
2113	target_id_t target_id;
2114
2115	MPS_FUNCTRACE(sc);
2116	mps_dprint(sc, MPS_TRACE,
2117	    "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2118	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2119	    cm->cm_targ->outstanding);
2120
2121	callout_stop(&cm->cm_callout);
2122	mtx_assert(&sc->mps_mtx, MA_OWNED);
2123
2124	sassc = sc->sassc;
2125	ccb = cm->cm_complete_data;
2126	csio = &ccb->csio;
2127	target_id = csio->ccb_h.target_id;
2128	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2129	/*
2130	 * XXX KDM if the chain allocation fails, does it matter if we do
2131	 * the sync and unload here?  It is simpler to do it in every case,
2132	 * assuming it doesn't cause problems.
2133	 */
2134	if (cm->cm_data != NULL) {
2135		if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
2136			dir = BUS_DMASYNC_POSTREAD;
2137		else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
2138			dir = BUS_DMASYNC_POSTWRITE;
2139		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2140		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2141	}
2142
2143	cm->cm_targ->completed++;
2144	cm->cm_targ->outstanding--;
2145	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2146	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2147
2148	if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) {
2149		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2150		if (cm->cm_reply != NULL)
2151			mpssas_log_command(cm, MPS_RECOVERY,
2152			    "completed timedout cm %p ccb %p during recovery "
2153			    "ioc %x scsi %x state %x xfer %u\n",
2154			    cm, cm->cm_ccb,
2155			    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2156			    le32toh(rep->TransferCount));
2157		else
2158			mpssas_log_command(cm, MPS_RECOVERY,
2159			    "completed timedout cm %p ccb %p during recovery\n",
2160			    cm, cm->cm_ccb);
2161	} else if (cm->cm_targ->tm != NULL) {
2162		if (cm->cm_reply != NULL)
2163			mpssas_log_command(cm, MPS_RECOVERY,
2164			    "completed cm %p ccb %p during recovery "
2165			    "ioc %x scsi %x state %x xfer %u\n",
2166			    cm, cm->cm_ccb,
2167			    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2168			    le32toh(rep->TransferCount));
2169		else
2170			mpssas_log_command(cm, MPS_RECOVERY,
2171			    "completed cm %p ccb %p during recovery\n",
2172			    cm, cm->cm_ccb);
2173	} else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
2174		mpssas_log_command(cm, MPS_RECOVERY,
2175		    "reset completed cm %p ccb %p\n",
2176		    cm, cm->cm_ccb);
2177	}
2178
2179	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2180		/*
2181		 * We ran into an error after we tried to map the command,
2182		 * so we're getting a callback without queueing the command
2183		 * to the hardware.  So we set the status here, and it will
2184		 * be retained below.  We'll go through the "fast path",
2185		 * because there can be no reply when we haven't actually
2186		 * gone out to the hardware.
2187		 */
2188		mpssas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2189
2190		/*
2191		 * Currently the only error included in the mask is
2192		 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
2193		 * chain frames.  We need to freeze the queue until we get
2194		 * a command that completed without this error, which will
2195		 * hopefully have some chain frames attached that we can
2196		 * use.  If we wanted to get smarter about it, we would
2197		 * only unfreeze the queue in this condition when we're
2198		 * sure that we're getting some chain frames back.  That's
2199		 * probably unnecessary.
2200		 */
2201		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
2202			xpt_freeze_simq(sassc->sim, 1);
2203			sassc->flags |= MPSSAS_QUEUE_FROZEN;
2204			mps_dprint(sc, MPS_XINFO, "Error sending command, "
2205				   "freezing SIM queue\n");
2206		}
2207	}
2208
2209	/*
2210	 * If this is a Start Stop Unit command and it was issued by the driver
2211	 * during shutdown, decrement the refcount to account for all of the
2212	 * commands that were sent.  All SSU commands should be completed before
2213	 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2214	 * is TRUE.
2215	 */
2216	if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) {
2217		mps_dprint(sc, MPS_INFO, "Decrementing SSU count.\n");
2218		sc->SSU_refcount--;
2219	}
2220
2221	/* Take the fast path to completion */
2222	if (cm->cm_reply == NULL) {
2223		if (mpssas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2224			if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
2225				mpssas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2226			else {
2227				mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2228				ccb->csio.scsi_status = SCSI_STATUS_OK;
2229			}
2230			if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2231				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2232				sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2233				mps_dprint(sc, MPS_XINFO,
2234				    "Unfreezing SIM queue\n");
2235			}
2236		}
2237
2238		/*
2239		 * There are two scenarios where the status won't be
2240		 * CAM_REQ_CMP.  The first is if MPS_CM_FLAGS_ERROR_MASK is
2241		 * set, the second is in the MPS_FLAGS_DIAGRESET above.
2242		 */
2243		if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2244			/*
2245			 * Freeze the dev queue so that commands are
2246			 * executed in the correct order after error
2247			 * recovery.
2248			 */
2249			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2250			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2251		}
2252		mps_free_command(sc, cm);
2253		xpt_done(ccb);
2254		return;
2255	}
2256
2257	mpssas_log_command(cm, MPS_XINFO,
2258	    "ioc %x scsi %x state %x xfer %u\n",
2259	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2260	    le32toh(rep->TransferCount));
2261
2262	/*
2263	 * If this is a Direct Drive I/O, reissue the I/O to the original IR
2264	 * Volume if an error occurred (normal I/O retry).  Use the original
2265	 * CCB, but set a flag that this will be a retry so that it's sent to
2266	 * the original volume.  Free the command but reuse the CCB.
2267	 */
2268	if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
2269		mps_free_command(sc, cm);
2270		ccb->ccb_h.sim_priv.entries[0].field = MPS_WD_RETRY;
2271		mpssas_action_scsiio(sassc, ccb);
2272		return;
2273	} else
2274		ccb->ccb_h.sim_priv.entries[0].field = 0;
2275
2276	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2277	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2278		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2279		/* FALLTHROUGH */
2280	case MPI2_IOCSTATUS_SUCCESS:
2281	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2282
2283		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2284		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2285			mpssas_log_command(cm, MPS_XINFO, "recovered error\n");
2286
2287		/* Completion failed at the transport level. */
2288		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2289		    MPI2_SCSI_STATE_TERMINATED)) {
2290			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2291			break;
2292		}
2293
2294		/* In a modern packetized environment, an autosense failure
2295		 * implies that there's not much else that can be done to
2296		 * recover the command.
2297		 */
2298		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2299			mpssas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2300			break;
2301		}
2302
2303		/*
2304		 * CAM doesn't care about SAS Response Info data, but if this is
2305		 * the state check if TLR should be done.  If not, clear the
2306		 * TLR_bits for the target.
2307		 */
2308		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2309		    ((le32toh(rep->ResponseInfo) &
2310		    MPI2_SCSI_RI_MASK_REASONCODE) ==
2311		    MPS_SCSI_RI_INVALID_FRAME)) {
2312			sc->mapping_table[target_id].TLR_bits =
2313			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2314		}
2315
2316		/*
2317		 * Intentionally override the normal SCSI status reporting
2318		 * for these two cases.  These are likely to happen in a
2319		 * multi-initiator environment, and we want to make sure that
2320		 * CAM retries these commands rather than fail them.
2321		 */
2322		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2323		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2324			mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2325			break;
2326		}
2327
2328		/* Handle normal status and sense */
2329		csio->scsi_status = rep->SCSIStatus;
2330		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2331			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2332		else
2333			mpssas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2334
2335		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2336			int sense_len, returned_sense_len;
2337
2338			returned_sense_len = min(le32toh(rep->SenseCount),
2339			    sizeof(struct scsi_sense_data));
2340			if (returned_sense_len < ccb->csio.sense_len)
2341				ccb->csio.sense_resid = ccb->csio.sense_len -
2342					returned_sense_len;
2343			else
2344				ccb->csio.sense_resid = 0;
2345
2346			sense_len = min(returned_sense_len,
2347			    ccb->csio.sense_len - ccb->csio.sense_resid);
2348			bzero(&ccb->csio.sense_data,
2349			      sizeof(ccb->csio.sense_data));
2350			bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
2351			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2352		}
2353
2354		/*
2355		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2356		 * and it's page code 0 (Supported Page List), and there is
2357		 * inquiry data, and this is for a sequential access device, and
2358		 * the device is an SSP target, and TLR is supported by the
2359		 * controller, turn the TLR_bits value ON if page 0x90 is
2360		 * supported.
2361		 */
2362		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2363		    (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2364		    (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2365		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2366		    (csio->data_ptr != NULL) &&
2367		    ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2368		    (sc->control_TLR) &&
2369		    (sc->mapping_table[target_id].device_info &
2370		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2371			vpd_list = (struct scsi_vpd_supported_page_list *)
2372			    csio->data_ptr;
2373			TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2374			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2375			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2376			alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2377			    csio->cdb_io.cdb_bytes[4];
2378			alloc_len -= csio->resid;
2379			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2380				if (vpd_list->list[i] == 0x90) {
2381					*TLR_bits = TLR_on;
2382					break;
2383				}
2384			}
2385		}
2386
2387		/*
2388		 * If this is a SATA direct-access end device, mark it so that
2389		 * a SCSI StartStopUnit command will be sent to it when the
2390		 * driver is being shutdown.
2391		 */
2392		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2393		    ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2394		    (sc->mapping_table[target_id].device_info &
2395		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2396		    ((sc->mapping_table[target_id].device_info &
2397		    MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2398		    MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2399			target = &sassc->targets[target_id];
2400			target->supports_SSU = TRUE;
2401			mps_dprint(sc, MPS_XINFO, "Target %d supports SSU\n",
2402			    target_id);
2403		}
2404		break;
2405	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2406	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2407		/*
2408		 * If devinfo is 0 this will be a volume.  In that case don't
2409		 * tell CAM that the volume is not there.  We want volumes to
2410		 * be enumerated until they are deleted/removed, not just
2411		 * failed.
2412		 */
2413		if (cm->cm_targ->devinfo == 0)
2414			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2415		else
2416			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2417		break;
2418	case MPI2_IOCSTATUS_INVALID_SGL:
2419		mps_print_scsiio_cmd(sc, cm);
2420		mpssas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2421		break;
2422	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2423		/*
2424		 * This is one of the responses that comes back when an I/O
2425		 * has been aborted.  If it is because of a timeout that we
2426		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2427		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2428		 * command is the same (it gets retried, subject to the
2429		 * retry counter), the only difference is what gets printed
2430		 * on the console.
2431		 */
2432		if (cm->cm_state == MPS_CM_STATE_TIMEDOUT)
2433			mpssas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2434		else
2435			mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2436		break;
2437	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2438		/* resid is ignored for this condition */
2439		csio->resid = 0;
2440		mpssas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2441		break;
2442	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2443	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2444		/*
2445		 * These can sometimes be transient transport-related
2446		 * errors, and sometimes persistent drive-related errors.
2447		 * We used to retry these without decrementing the retry
2448		 * count by returning CAM_REQUEUE_REQ.  Unfortunately, if
2449		 * we hit a persistent drive problem that returns one of
2450		 * these error codes, we would retry indefinitely.  So,
2451		 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2452		 * count and avoid infinite retries.  We're taking the
2453		 * potential risk of flagging false failures in the event
2454		 * of a topology-related error (e.g. a SAS expander problem
2455		 * causes a command addressed to a drive to fail), but
2456		 * avoiding getting into an infinite retry loop.
2457		 */
2458		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2459		mpssas_log_command(cm, MPS_INFO,
2460		    "terminated ioc %x loginfo %x scsi %x state %x xfer %u\n",
2461		    le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2462		    rep->SCSIStatus, rep->SCSIState,
2463		    le32toh(rep->TransferCount));
2464		break;
2465	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2466	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2467	case MPI2_IOCSTATUS_INVALID_VPID:
2468	case MPI2_IOCSTATUS_INVALID_FIELD:
2469	case MPI2_IOCSTATUS_INVALID_STATE:
2470	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2471	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2472	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2473	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2474	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2475	default:
2476		mpssas_log_command(cm, MPS_XINFO,
2477		    "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2478		    le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2479		    rep->SCSIStatus, rep->SCSIState,
2480		    le32toh(rep->TransferCount));
2481		csio->resid = cm->cm_length;
2482		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2483		break;
2484	}
2485
2486	mps_sc_failed_io_info(sc,csio,rep);
2487
2488	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2489		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2490		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2491		mps_dprint(sc, MPS_XINFO, "Command completed, "
2492		    "unfreezing SIM queue\n");
2493	}
2494
2495	if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2496		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2497		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2498	}
2499
2500	mps_free_command(sc, cm);
2501	xpt_done(ccb);
2502}
2503
2504/* All Request reached here are Endian safe */
2505static void
2506mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2507    union ccb *ccb) {
2508	pMpi2SCSIIORequest_t	pIO_req;
2509	struct mps_softc	*sc = sassc->sc;
2510	uint64_t		virtLBA;
2511	uint32_t		physLBA, stripe_offset, stripe_unit;
2512	uint32_t		io_size, column;
2513	uint8_t			*ptrLBA, lba_idx, physLBA_byte, *CDB;
2514
2515	/*
2516	 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2517	 * Write10, or Write16), build a direct I/O message.  Otherwise, the I/O
2518	 * will be sent to the IR volume itself.  Since Read6 and Write6 are a
2519	 * bit different than the 10/16 CDBs, handle them separately.
2520	 */
2521	pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2522	CDB = pIO_req->CDB.CDB32;
2523
2524	/*
2525	 * Handle 6 byte CDBs.
2526	 */
2527	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2528	    (CDB[0] == WRITE_6))) {
2529		/*
2530		 * Get the transfer size in blocks.
2531		 */
2532		io_size = (cm->cm_length >> sc->DD_block_exponent);
2533
2534		/*
2535		 * Get virtual LBA given in the CDB.
2536		 */
2537		virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2538		    ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2539
2540		/*
2541		 * Check that LBA range for I/O does not exceed volume's
2542		 * MaxLBA.
2543		 */
2544		if ((virtLBA + (uint64_t)io_size - 1) <=
2545		    sc->DD_max_lba) {
2546			/*
2547			 * Check if the I/O crosses a stripe boundary.  If not,
2548			 * translate the virtual LBA to a physical LBA and set
2549			 * the DevHandle for the PhysDisk to be used.  If it
2550			 * does cross a boundry, do normal I/O.  To get the
2551			 * right DevHandle to use, get the map number for the
2552			 * column, then use that map number to look up the
2553			 * DevHandle of the PhysDisk.
2554			 */
2555			stripe_offset = (uint32_t)virtLBA &
2556			    (sc->DD_stripe_size - 1);
2557			if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2558				physLBA = (uint32_t)virtLBA >>
2559				    sc->DD_stripe_exponent;
2560				stripe_unit = physLBA / sc->DD_num_phys_disks;
2561				column = physLBA % sc->DD_num_phys_disks;
2562				pIO_req->DevHandle =
2563				    htole16(sc->DD_column_map[column].dev_handle);
2564				/* ???? Is this endian safe*/
2565				cm->cm_desc.SCSIIO.DevHandle =
2566				    pIO_req->DevHandle;
2567
2568				physLBA = (stripe_unit <<
2569				    sc->DD_stripe_exponent) + stripe_offset;
2570				ptrLBA = &pIO_req->CDB.CDB32[1];
2571				physLBA_byte = (uint8_t)(physLBA >> 16);
2572				*ptrLBA = physLBA_byte;
2573				ptrLBA = &pIO_req->CDB.CDB32[2];
2574				physLBA_byte = (uint8_t)(physLBA >> 8);
2575				*ptrLBA = physLBA_byte;
2576				ptrLBA = &pIO_req->CDB.CDB32[3];
2577				physLBA_byte = (uint8_t)physLBA;
2578				*ptrLBA = physLBA_byte;
2579
2580				/*
2581				 * Set flag that Direct Drive I/O is
2582				 * being done.
2583				 */
2584				cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2585			}
2586		}
2587		return;
2588	}
2589
2590	/*
2591	 * Handle 10, 12 or 16 byte CDBs.
2592	 */
2593	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2594	    (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2595	    (CDB[0] == WRITE_16) || (CDB[0] == READ_12) ||
2596	    (CDB[0] == WRITE_12))) {
2597		/*
2598		 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2599		 * are 0.  If not, this is accessing beyond 2TB so handle it in
2600		 * the else section.  10-byte and 12-byte CDB's are OK.
2601		 * FreeBSD sends very rare 12 byte READ/WRITE, but driver is
2602		 * ready to accept 12byte CDB for Direct IOs.
2603		 */
2604		if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) ||
2605		    (CDB[0] == READ_12 || CDB[0] == WRITE_12) ||
2606		    !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2607			/*
2608			 * Get the transfer size in blocks.
2609			 */
2610			io_size = (cm->cm_length >> sc->DD_block_exponent);
2611
2612			/*
2613			 * Get virtual LBA.  Point to correct lower 4 bytes of
2614			 * LBA in the CDB depending on command.
2615			 */
2616			lba_idx = ((CDB[0] == READ_12) ||
2617				(CDB[0] == WRITE_12) ||
2618				(CDB[0] == READ_10) ||
2619				(CDB[0] == WRITE_10))? 2 : 6;
2620			virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2621			    ((uint64_t)CDB[lba_idx + 1] << 16) |
2622			    ((uint64_t)CDB[lba_idx + 2] << 8) |
2623			    (uint64_t)CDB[lba_idx + 3];
2624
2625			/*
2626			 * Check that LBA range for I/O does not exceed volume's
2627			 * MaxLBA.
2628			 */
2629			if ((virtLBA + (uint64_t)io_size - 1) <=
2630			    sc->DD_max_lba) {
2631				/*
2632				 * Check if the I/O crosses a stripe boundary.
2633				 * If not, translate the virtual LBA to a
2634				 * physical LBA and set the DevHandle for the
2635				 * PhysDisk to be used.  If it does cross a
2636				 * boundry, do normal I/O.  To get the right
2637				 * DevHandle to use, get the map number for the
2638				 * column, then use that map number to look up
2639				 * the DevHandle of the PhysDisk.
2640				 */
2641				stripe_offset = (uint32_t)virtLBA &
2642				    (sc->DD_stripe_size - 1);
2643				if ((stripe_offset + io_size) <=
2644				    sc->DD_stripe_size) {
2645					physLBA = (uint32_t)virtLBA >>
2646					    sc->DD_stripe_exponent;
2647					stripe_unit = physLBA /
2648					    sc->DD_num_phys_disks;
2649					column = physLBA %
2650					    sc->DD_num_phys_disks;
2651					pIO_req->DevHandle =
2652					    htole16(sc->DD_column_map[column].
2653					    dev_handle);
2654					cm->cm_desc.SCSIIO.DevHandle =
2655					    pIO_req->DevHandle;
2656
2657					physLBA = (stripe_unit <<
2658					    sc->DD_stripe_exponent) +
2659					    stripe_offset;
2660					ptrLBA =
2661					    &pIO_req->CDB.CDB32[lba_idx];
2662					physLBA_byte = (uint8_t)(physLBA >> 24);
2663					*ptrLBA = physLBA_byte;
2664					ptrLBA =
2665					    &pIO_req->CDB.CDB32[lba_idx + 1];
2666					physLBA_byte = (uint8_t)(physLBA >> 16);
2667					*ptrLBA = physLBA_byte;
2668					ptrLBA =
2669					    &pIO_req->CDB.CDB32[lba_idx + 2];
2670					physLBA_byte = (uint8_t)(physLBA >> 8);
2671					*ptrLBA = physLBA_byte;
2672					ptrLBA =
2673					    &pIO_req->CDB.CDB32[lba_idx + 3];
2674					physLBA_byte = (uint8_t)physLBA;
2675					*ptrLBA = physLBA_byte;
2676
2677					/*
2678					 * Set flag that Direct Drive I/O is
2679					 * being done.
2680					 */
2681					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2682				}
2683			}
2684		} else {
2685			/*
2686			 * 16-byte CDB and the upper 4 bytes of the CDB are not
2687			 * 0.  Get the transfer size in blocks.
2688			 */
2689			io_size = (cm->cm_length >> sc->DD_block_exponent);
2690
2691			/*
2692			 * Get virtual LBA.
2693			 */
2694			virtLBA = ((uint64_t)CDB[2] << 54) |
2695			    ((uint64_t)CDB[3] << 48) |
2696			    ((uint64_t)CDB[4] << 40) |
2697			    ((uint64_t)CDB[5] << 32) |
2698			    ((uint64_t)CDB[6] << 24) |
2699			    ((uint64_t)CDB[7] << 16) |
2700			    ((uint64_t)CDB[8] << 8) |
2701			    (uint64_t)CDB[9];
2702
2703			/*
2704			 * Check that LBA range for I/O does not exceed volume's
2705			 * MaxLBA.
2706			 */
2707			if ((virtLBA + (uint64_t)io_size - 1) <=
2708			    sc->DD_max_lba) {
2709				/*
2710				 * Check if the I/O crosses a stripe boundary.
2711				 * If not, translate the virtual LBA to a
2712				 * physical LBA and set the DevHandle for the
2713				 * PhysDisk to be used.  If it does cross a
2714				 * boundry, do normal I/O.  To get the right
2715				 * DevHandle to use, get the map number for the
2716				 * column, then use that map number to look up
2717				 * the DevHandle of the PhysDisk.
2718				 */
2719				stripe_offset = (uint32_t)virtLBA &
2720				    (sc->DD_stripe_size - 1);
2721				if ((stripe_offset + io_size) <=
2722				    sc->DD_stripe_size) {
2723					physLBA = (uint32_t)(virtLBA >>
2724					    sc->DD_stripe_exponent);
2725					stripe_unit = physLBA /
2726					    sc->DD_num_phys_disks;
2727					column = physLBA %
2728					    sc->DD_num_phys_disks;
2729					pIO_req->DevHandle =
2730					    htole16(sc->DD_column_map[column].
2731					    dev_handle);
2732					cm->cm_desc.SCSIIO.DevHandle =
2733					    pIO_req->DevHandle;
2734
2735					physLBA = (stripe_unit <<
2736					    sc->DD_stripe_exponent) +
2737					    stripe_offset;
2738
2739					/*
2740					 * Set upper 4 bytes of LBA to 0.  We
2741					 * assume that the phys disks are less
2742					 * than 2 TB's in size.  Then, set the
2743					 * lower 4 bytes.
2744					 */
2745					pIO_req->CDB.CDB32[2] = 0;
2746					pIO_req->CDB.CDB32[3] = 0;
2747					pIO_req->CDB.CDB32[4] = 0;
2748					pIO_req->CDB.CDB32[5] = 0;
2749					ptrLBA = &pIO_req->CDB.CDB32[6];
2750					physLBA_byte = (uint8_t)(physLBA >> 24);
2751					*ptrLBA = physLBA_byte;
2752					ptrLBA = &pIO_req->CDB.CDB32[7];
2753					physLBA_byte = (uint8_t)(physLBA >> 16);
2754					*ptrLBA = physLBA_byte;
2755					ptrLBA = &pIO_req->CDB.CDB32[8];
2756					physLBA_byte = (uint8_t)(physLBA >> 8);
2757					*ptrLBA = physLBA_byte;
2758					ptrLBA = &pIO_req->CDB.CDB32[9];
2759					physLBA_byte = (uint8_t)physLBA;
2760					*ptrLBA = physLBA_byte;
2761
2762					/*
2763					 * Set flag that Direct Drive I/O is
2764					 * being done.
2765					 */
2766					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2767				}
2768			}
2769		}
2770	}
2771}
2772
2773#if __FreeBSD_version >= 900026
2774static void
2775mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2776{
2777	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2778	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2779	uint64_t sasaddr;
2780	union ccb *ccb;
2781
2782	ccb = cm->cm_complete_data;
2783
2784	/*
2785	 * Currently there should be no way we can hit this case.  It only
2786	 * happens when we have a failure to allocate chain frames, and SMP
2787	 * commands require two S/G elements only.  That should be handled
2788	 * in the standard request size.
2789	 */
2790	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2791		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x on SMP request!\n",
2792			   __func__, cm->cm_flags);
2793		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2794		goto bailout;
2795        }
2796
2797	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2798	if (rpl == NULL) {
2799		mps_dprint(sc, MPS_ERROR, "%s: NULL cm_reply!\n", __func__);
2800		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2801		goto bailout;
2802	}
2803
2804	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2805	sasaddr = le32toh(req->SASAddress.Low);
2806	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2807
2808	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2809	    MPI2_IOCSTATUS_SUCCESS ||
2810	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2811		mps_dprint(sc, MPS_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2812		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2813		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2814		goto bailout;
2815	}
2816
2817	mps_dprint(sc, MPS_XINFO, "%s: SMP request to SAS address "
2818		   "%#jx completed successfully\n", __func__,
2819		   (uintmax_t)sasaddr);
2820
2821	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2822		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2823	else
2824		mpssas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2825
2826bailout:
2827	/*
2828	 * We sync in both directions because we had DMAs in the S/G list
2829	 * in both directions.
2830	 */
2831	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2832			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2833	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2834	mps_free_command(sc, cm);
2835	xpt_done(ccb);
2836}
2837
2838static void
2839mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2840{
2841	struct mps_command *cm;
2842	uint8_t *request, *response;
2843	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2844	struct mps_softc *sc;
2845	int error;
2846
2847	sc = sassc->sc;
2848	error = 0;
2849
2850	/*
2851	 * XXX We don't yet support physical addresses here.
2852	 */
2853	switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
2854	case CAM_DATA_PADDR:
2855	case CAM_DATA_SG_PADDR:
2856		mps_dprint(sc, MPS_ERROR,
2857			   "%s: physical addresses not supported\n", __func__);
2858		mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2859		xpt_done(ccb);
2860		return;
2861	case CAM_DATA_SG:
2862		/*
2863		 * The chip does not support more than one buffer for the
2864		 * request or response.
2865		 */
2866	 	if ((ccb->smpio.smp_request_sglist_cnt > 1)
2867		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2868			mps_dprint(sc, MPS_ERROR,
2869				   "%s: multiple request or response "
2870				   "buffer segments not supported for SMP\n",
2871				   __func__);
2872			mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2873			xpt_done(ccb);
2874			return;
2875		}
2876
2877		/*
2878		 * The CAM_SCATTER_VALID flag was originally implemented
2879		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2880		 * We have two.  So, just take that flag to mean that we
2881		 * might have S/G lists, and look at the S/G segment count
2882		 * to figure out whether that is the case for each individual
2883		 * buffer.
2884		 */
2885		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2886			bus_dma_segment_t *req_sg;
2887
2888			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2889			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2890		} else
2891			request = ccb->smpio.smp_request;
2892
2893		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2894			bus_dma_segment_t *rsp_sg;
2895
2896			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2897			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2898		} else
2899			response = ccb->smpio.smp_response;
2900		break;
2901	case CAM_DATA_VADDR:
2902		request = ccb->smpio.smp_request;
2903		response = ccb->smpio.smp_response;
2904		break;
2905	default:
2906		mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2907		xpt_done(ccb);
2908		return;
2909	}
2910
2911	cm = mps_alloc_command(sc);
2912	if (cm == NULL) {
2913		mps_dprint(sc, MPS_ERROR,
2914		    "%s: cannot allocate command\n", __func__);
2915		mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2916		xpt_done(ccb);
2917		return;
2918	}
2919
2920	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2921	bzero(req, sizeof(*req));
2922	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2923
2924	/* Allow the chip to use any route to this SAS address. */
2925	req->PhysicalPort = 0xff;
2926
2927	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2928	req->SGLFlags =
2929	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2930
2931	mps_dprint(sc, MPS_XINFO, "%s: sending SMP request to SAS "
2932	    "address %#jx\n", __func__, (uintmax_t)sasaddr);
2933
2934	mpi_init_sge(cm, req, &req->SGL);
2935
2936	/*
2937	 * Set up a uio to pass into mps_map_command().  This allows us to
2938	 * do one map command, and one busdma call in there.
2939	 */
2940	cm->cm_uio.uio_iov = cm->cm_iovec;
2941	cm->cm_uio.uio_iovcnt = 2;
2942	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2943
2944	/*
2945	 * The read/write flag isn't used by busdma, but set it just in
2946	 * case.  This isn't exactly accurate, either, since we're going in
2947	 * both directions.
2948	 */
2949	cm->cm_uio.uio_rw = UIO_WRITE;
2950
2951	cm->cm_iovec[0].iov_base = request;
2952	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2953	cm->cm_iovec[1].iov_base = response;
2954	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2955
2956	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2957			       cm->cm_iovec[1].iov_len;
2958
2959	/*
2960	 * Trigger a warning message in mps_data_cb() for the user if we
2961	 * wind up exceeding two S/G segments.  The chip expects one
2962	 * segment for the request and another for the response.
2963	 */
2964	cm->cm_max_segs = 2;
2965
2966	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2967	cm->cm_complete = mpssas_smpio_complete;
2968	cm->cm_complete_data = ccb;
2969
2970	/*
2971	 * Tell the mapping code that we're using a uio, and that this is
2972	 * an SMP passthrough request.  There is a little special-case
2973	 * logic there (in mps_data_cb()) to handle the bidirectional
2974	 * transfer.
2975	 */
2976	cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2977			MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2978
2979	/* The chip data format is little endian. */
2980	req->SASAddress.High = htole32(sasaddr >> 32);
2981	req->SASAddress.Low = htole32(sasaddr);
2982
2983	/*
2984	 * XXX Note that we don't have a timeout/abort mechanism here.
2985	 * From the manual, it looks like task management requests only
2986	 * work for SCSI IO and SATA passthrough requests.  We may need to
2987	 * have a mechanism to retry requests in the event of a chip reset
2988	 * at least.  Hopefully the chip will insure that any errors short
2989	 * of that are relayed back to the driver.
2990	 */
2991	error = mps_map_command(sc, cm);
2992	if ((error != 0) && (error != EINPROGRESS)) {
2993		mps_dprint(sc, MPS_ERROR,
2994			   "%s: error %d returned from mps_map_command()\n",
2995			   __func__, error);
2996		goto bailout_error;
2997	}
2998
2999	return;
3000
3001bailout_error:
3002	mps_free_command(sc, cm);
3003	mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3004	xpt_done(ccb);
3005	return;
3006
3007}
3008
3009static void
3010mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
3011{
3012	struct mps_softc *sc;
3013	struct mpssas_target *targ;
3014	uint64_t sasaddr = 0;
3015
3016	sc = sassc->sc;
3017
3018	/*
3019	 * Make sure the target exists.
3020	 */
3021	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3022	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
3023	targ = &sassc->targets[ccb->ccb_h.target_id];
3024	if (targ->handle == 0x0) {
3025		mps_dprint(sc, MPS_ERROR,
3026			   "%s: target %d does not exist!\n", __func__,
3027			   ccb->ccb_h.target_id);
3028		mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
3029		xpt_done(ccb);
3030		return;
3031	}
3032
3033	/*
3034	 * If this device has an embedded SMP target, we'll talk to it
3035	 * directly.
3036	 * figure out what the expander's address is.
3037	 */
3038	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
3039		sasaddr = targ->sasaddr;
3040
3041	/*
3042	 * If we don't have a SAS address for the expander yet, try
3043	 * grabbing it from the page 0x83 information cached in the
3044	 * transport layer for this target.  LSI expanders report the
3045	 * expander SAS address as the port-associated SAS address in
3046	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
3047	 * 0x83.
3048	 *
3049	 * XXX KDM disable this for now, but leave it commented out so that
3050	 * it is obvious that this is another possible way to get the SAS
3051	 * address.
3052	 *
3053	 * The parent handle method below is a little more reliable, and
3054	 * the other benefit is that it works for devices other than SES
3055	 * devices.  So you can send a SMP request to a da(4) device and it
3056	 * will get routed to the expander that device is attached to.
3057	 * (Assuming the da(4) device doesn't contain an SMP target...)
3058	 */
3059#if 0
3060	if (sasaddr == 0)
3061		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
3062#endif
3063
3064	/*
3065	 * If we still don't have a SAS address for the expander, look for
3066	 * the parent device of this device, which is probably the expander.
3067	 */
3068	if (sasaddr == 0) {
3069#ifdef OLD_MPS_PROBE
3070		struct mpssas_target *parent_target;
3071#endif
3072
3073		if (targ->parent_handle == 0x0) {
3074			mps_dprint(sc, MPS_ERROR,
3075				   "%s: handle %d does not have a valid "
3076				   "parent handle!\n", __func__, targ->handle);
3077			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3078			goto bailout;
3079		}
3080#ifdef OLD_MPS_PROBE
3081		parent_target = mpssas_find_target_by_handle(sassc, 0,
3082			targ->parent_handle);
3083
3084		if (parent_target == NULL) {
3085			mps_dprint(sc, MPS_ERROR,
3086				   "%s: handle %d does not have a valid "
3087				   "parent target!\n", __func__, targ->handle);
3088			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3089			goto bailout;
3090		}
3091
3092		if ((parent_target->devinfo &
3093		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3094			mps_dprint(sc, MPS_ERROR,
3095				   "%s: handle %d parent %d does not "
3096				   "have an SMP target!\n", __func__,
3097				   targ->handle, parent_target->handle);
3098			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3099			goto bailout;
3100
3101		}
3102
3103		sasaddr = parent_target->sasaddr;
3104#else /* OLD_MPS_PROBE */
3105		if ((targ->parent_devinfo &
3106		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3107			mps_dprint(sc, MPS_ERROR,
3108				   "%s: handle %d parent %d does not "
3109				   "have an SMP target!\n", __func__,
3110				   targ->handle, targ->parent_handle);
3111			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3112			goto bailout;
3113
3114		}
3115		if (targ->parent_sasaddr == 0x0) {
3116			mps_dprint(sc, MPS_ERROR,
3117				   "%s: handle %d parent handle %d does "
3118				   "not have a valid SAS address!\n",
3119				   __func__, targ->handle, targ->parent_handle);
3120			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3121			goto bailout;
3122		}
3123
3124		sasaddr = targ->parent_sasaddr;
3125#endif /* OLD_MPS_PROBE */
3126
3127	}
3128
3129	if (sasaddr == 0) {
3130		mps_dprint(sc, MPS_INFO,
3131			   "%s: unable to find SAS address for handle %d\n",
3132			   __func__, targ->handle);
3133		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3134		goto bailout;
3135	}
3136	mpssas_send_smpcmd(sassc, ccb, sasaddr);
3137
3138	return;
3139
3140bailout:
3141	xpt_done(ccb);
3142
3143}
3144#endif //__FreeBSD_version >= 900026
3145
3146static void
3147mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
3148{
3149	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3150	struct mps_softc *sc;
3151	struct mps_command *tm;
3152	struct mpssas_target *targ;
3153
3154	MPS_FUNCTRACE(sassc->sc);
3155	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
3156
3157	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3158	    ("Target %d out of bounds in XPT_RESET_DEV\n",
3159	     ccb->ccb_h.target_id));
3160	sc = sassc->sc;
3161	tm = mps_alloc_command(sc);
3162	if (tm == NULL) {
3163		mps_dprint(sc, MPS_ERROR,
3164		    "command alloc failure in mpssas_action_resetdev\n");
3165		mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3166		xpt_done(ccb);
3167		return;
3168	}
3169
3170	targ = &sassc->targets[ccb->ccb_h.target_id];
3171	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3172	req->DevHandle = htole16(targ->handle);
3173	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3174	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3175
3176	/* SAS Hard Link Reset / SATA Link Reset */
3177	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3178
3179	tm->cm_data = NULL;
3180	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3181	tm->cm_complete = mpssas_resetdev_complete;
3182	tm->cm_complete_data = ccb;
3183	tm->cm_targ = targ;
3184	targ->flags |= MPSSAS_TARGET_INRESET;
3185
3186	mps_map_command(sc, tm);
3187}
3188
3189static void
3190mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
3191{
3192	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3193	union ccb *ccb;
3194
3195	MPS_FUNCTRACE(sc);
3196	mtx_assert(&sc->mps_mtx, MA_OWNED);
3197
3198	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3199	ccb = tm->cm_complete_data;
3200
3201	/*
3202	 * Currently there should be no way we can hit this case.  It only
3203	 * happens when we have a failure to allocate chain frames, and
3204	 * task management commands don't have S/G lists.
3205	 */
3206	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3207		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3208
3209		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3210
3211		mps_dprint(sc, MPS_ERROR,
3212			   "%s: cm_flags = %#x for reset of handle %#04x! "
3213			   "This should not happen!\n", __func__, tm->cm_flags,
3214			   req->DevHandle);
3215		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3216		goto bailout;
3217	}
3218
3219	mps_dprint(sc, MPS_XINFO,
3220	    "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
3221	    le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3222
3223	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3224		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
3225		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3226		    CAM_LUN_WILDCARD);
3227	}
3228	else
3229		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3230
3231bailout:
3232
3233	mpssas_free_tm(sc, tm);
3234	xpt_done(ccb);
3235}
3236
3237static void
3238mpssas_poll(struct cam_sim *sim)
3239{
3240	struct mpssas_softc *sassc;
3241
3242	sassc = cam_sim_softc(sim);
3243
3244	if (sassc->sc->mps_debug & MPS_TRACE) {
3245		/* frequent debug messages during a panic just slow
3246		 * everything down too much.
3247		 */
3248		mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
3249		sassc->sc->mps_debug &= ~MPS_TRACE;
3250	}
3251
3252	mps_intr_locked(sassc->sc);
3253}
3254
3255static void
3256mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3257	     void *arg)
3258{
3259	struct mps_softc *sc;
3260
3261	sc = (struct mps_softc *)callback_arg;
3262
3263	switch (code) {
3264#if (__FreeBSD_version >= 1000006) || \
3265    ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3266	case AC_ADVINFO_CHANGED: {
3267		struct mpssas_target *target;
3268		struct mpssas_softc *sassc;
3269		struct scsi_read_capacity_data_long rcap_buf;
3270		struct ccb_dev_advinfo cdai;
3271		struct mpssas_lun *lun;
3272		lun_id_t lunid;
3273		int found_lun;
3274		uintptr_t buftype;
3275
3276		buftype = (uintptr_t)arg;
3277
3278		found_lun = 0;
3279		sassc = sc->sassc;
3280
3281		/*
3282		 * We're only interested in read capacity data changes.
3283		 */
3284		if (buftype != CDAI_TYPE_RCAPLONG)
3285			break;
3286
3287		/*
3288		 * We should have a handle for this, but check to make sure.
3289		 */
3290		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3291		    ("Target %d out of bounds in mpssas_async\n",
3292		    xpt_path_target_id(path)));
3293		target = &sassc->targets[xpt_path_target_id(path)];
3294		if (target->handle == 0)
3295			break;
3296
3297		lunid = xpt_path_lun_id(path);
3298
3299		SLIST_FOREACH(lun, &target->luns, lun_link) {
3300			if (lun->lun_id == lunid) {
3301				found_lun = 1;
3302				break;
3303			}
3304		}
3305
3306		if (found_lun == 0) {
3307			lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3308				     M_NOWAIT | M_ZERO);
3309			if (lun == NULL) {
3310				mps_dprint(sc, MPS_ERROR, "Unable to alloc "
3311					   "LUN for EEDP support.\n");
3312				break;
3313			}
3314			lun->lun_id = lunid;
3315			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3316		}
3317
3318		bzero(&rcap_buf, sizeof(rcap_buf));
3319		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3320		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3321		cdai.ccb_h.flags = CAM_DIR_IN;
3322		cdai.buftype = CDAI_TYPE_RCAPLONG;
3323#if (__FreeBSD_version >= 1100061) || \
3324    ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000))
3325		cdai.flags = CDAI_FLAG_NONE;
3326#else
3327		cdai.flags = 0;
3328#endif
3329		cdai.bufsiz = sizeof(rcap_buf);
3330		cdai.buf = (uint8_t *)&rcap_buf;
3331		xpt_action((union ccb *)&cdai);
3332		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3333			cam_release_devq(cdai.ccb_h.path,
3334					 0, 0, 0, FALSE);
3335
3336		if ((mpssas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3337		 && (rcap_buf.prot & SRC16_PROT_EN)) {
3338			lun->eedp_formatted = TRUE;
3339			lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3340		} else {
3341			lun->eedp_formatted = FALSE;
3342			lun->eedp_block_size = 0;
3343		}
3344		break;
3345	}
3346#else
3347	case AC_FOUND_DEVICE: {
3348		struct ccb_getdev *cgd;
3349
3350		cgd = arg;
3351		mpssas_check_eedp(sc, path, cgd);
3352		break;
3353	}
3354#endif
3355	default:
3356		break;
3357	}
3358}
3359
3360#if (__FreeBSD_version < 901503) || \
3361    ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3362static void
3363mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
3364		  struct ccb_getdev *cgd)
3365{
3366	struct mpssas_softc *sassc = sc->sassc;
3367	struct ccb_scsiio *csio;
3368	struct scsi_read_capacity_16 *scsi_cmd;
3369	struct scsi_read_capacity_eedp *rcap_buf;
3370	path_id_t pathid;
3371	target_id_t targetid;
3372	lun_id_t lunid;
3373	union ccb *ccb;
3374	struct cam_path *local_path;
3375	struct mpssas_target *target;
3376	struct mpssas_lun *lun;
3377	uint8_t	found_lun;
3378	char path_str[64];
3379
3380	sassc = sc->sassc;
3381	pathid = cam_sim_path(sassc->sim);
3382	targetid = xpt_path_target_id(path);
3383	lunid = xpt_path_lun_id(path);
3384
3385	KASSERT(targetid < sassc->maxtargets,
3386	    ("Target %d out of bounds in mpssas_check_eedp\n",
3387	     targetid));
3388	target = &sassc->targets[targetid];
3389	if (target->handle == 0x0)
3390		return;
3391
3392	/*
3393	 * Determine if the device is EEDP capable.
3394	 *
3395	 * If this flag is set in the inquiry data,
3396	 * the device supports protection information,
3397	 * and must support the 16 byte read
3398	 * capacity command, otherwise continue without
3399	 * sending read cap 16
3400	 */
3401	if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3402		return;
3403
3404	/*
3405	 * Issue a READ CAPACITY 16 command.  This info
3406	 * is used to determine if the LUN is formatted
3407	 * for EEDP support.
3408	 */
3409	ccb = xpt_alloc_ccb_nowait();
3410	if (ccb == NULL) {
3411		mps_dprint(sc, MPS_ERROR, "Unable to alloc CCB "
3412		    "for EEDP support.\n");
3413		return;
3414	}
3415
3416	if (xpt_create_path(&local_path, xpt_periph,
3417	    pathid, targetid, lunid) != CAM_REQ_CMP) {
3418		mps_dprint(sc, MPS_ERROR, "Unable to create "
3419		    "path for EEDP support\n");
3420		xpt_free_ccb(ccb);
3421		return;
3422	}
3423
3424	/*
3425	 * If LUN is already in list, don't create a new
3426	 * one.
3427	 */
3428	found_lun = FALSE;
3429	SLIST_FOREACH(lun, &target->luns, lun_link) {
3430		if (lun->lun_id == lunid) {
3431			found_lun = TRUE;
3432			break;
3433		}
3434	}
3435	if (!found_lun) {
3436		lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3437		    M_NOWAIT | M_ZERO);
3438		if (lun == NULL) {
3439			mps_dprint(sc, MPS_ERROR,
3440			    "Unable to alloc LUN for EEDP support.\n");
3441			xpt_free_path(local_path);
3442			xpt_free_ccb(ccb);
3443			return;
3444		}
3445		lun->lun_id = lunid;
3446		SLIST_INSERT_HEAD(&target->luns, lun,
3447		    lun_link);
3448	}
3449
3450	xpt_path_string(local_path, path_str, sizeof(path_str));
3451
3452	mps_dprint(sc, MPS_INFO, "Sending read cap: path %s handle %d\n",
3453	    path_str, target->handle);
3454
3455	/*
3456	 * Issue a READ CAPACITY 16 command for the LUN.
3457	 * The mpssas_read_cap_done function will load
3458	 * the read cap info into the LUN struct.
3459	 */
3460	rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp),
3461	    M_MPT2, M_NOWAIT | M_ZERO);
3462	if (rcap_buf == NULL) {
3463		mps_dprint(sc, MPS_FAULT,
3464		    "Unable to alloc read capacity buffer for EEDP support.\n");
3465		xpt_free_path(ccb->ccb_h.path);
3466		xpt_free_ccb(ccb);
3467		return;
3468	}
3469	xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3470	csio = &ccb->csio;
3471	csio->ccb_h.func_code = XPT_SCSI_IO;
3472	csio->ccb_h.flags = CAM_DIR_IN;
3473	csio->ccb_h.retry_count = 4;
3474	csio->ccb_h.cbfcnp = mpssas_read_cap_done;
3475	csio->ccb_h.timeout = 60000;
3476	csio->data_ptr = (uint8_t *)rcap_buf;
3477	csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3478	csio->sense_len = MPS_SENSE_LEN;
3479	csio->cdb_len = sizeof(*scsi_cmd);
3480	csio->tag_action = MSG_SIMPLE_Q_TAG;
3481
3482	scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3483	bzero(scsi_cmd, sizeof(*scsi_cmd));
3484	scsi_cmd->opcode = 0x9E;
3485	scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3486	((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3487
3488	ccb->ccb_h.ppriv_ptr1 = sassc;
3489	xpt_action(ccb);
3490}
3491
3492static void
3493mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3494{
3495	struct mpssas_softc *sassc;
3496	struct mpssas_target *target;
3497	struct mpssas_lun *lun;
3498	struct scsi_read_capacity_eedp *rcap_buf;
3499
3500	if (done_ccb == NULL)
3501		return;
3502
3503	/* Driver need to release devq, it Scsi command is
3504	 * generated by driver internally.
3505	 * Currently there is a single place where driver
3506	 * calls scsi command internally. In future if driver
3507	 * calls more scsi command internally, it needs to release
3508	 * devq internally, since those command will not go back to
3509	 * cam_periph.
3510	 */
3511	if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3512        	done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3513		xpt_release_devq(done_ccb->ccb_h.path,
3514			       	/*count*/ 1, /*run_queue*/TRUE);
3515	}
3516
3517	rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3518
3519	/*
3520	 * Get the LUN ID for the path and look it up in the LUN list for the
3521	 * target.
3522	 */
3523	sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3524	KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets,
3525	    ("Target %d out of bounds in mpssas_read_cap_done\n",
3526	     done_ccb->ccb_h.target_id));
3527	target = &sassc->targets[done_ccb->ccb_h.target_id];
3528	SLIST_FOREACH(lun, &target->luns, lun_link) {
3529		if (lun->lun_id != done_ccb->ccb_h.target_lun)
3530			continue;
3531
3532		/*
3533		 * Got the LUN in the target's LUN list.  Fill it in
3534		 * with EEDP info.  If the READ CAP 16 command had some
3535		 * SCSI error (common if command is not supported), mark
3536		 * the lun as not supporting EEDP and set the block size
3537		 * to 0.
3538		 */
3539		if ((mpssas_get_ccbstatus(done_ccb) != CAM_REQ_CMP)
3540		 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3541			lun->eedp_formatted = FALSE;
3542			lun->eedp_block_size = 0;
3543			break;
3544		}
3545
3546		if (rcap_buf->protect & 0x01) {
3547			mps_dprint(sassc->sc, MPS_INFO, "LUN %d for "
3548 			    "target ID %d is formatted for EEDP "
3549 			    "support.\n", done_ccb->ccb_h.target_lun,
3550 			    done_ccb->ccb_h.target_id);
3551			lun->eedp_formatted = TRUE;
3552			lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3553		}
3554		break;
3555	}
3556
3557	// Finished with this CCB and path.
3558	free(rcap_buf, M_MPT2);
3559	xpt_free_path(done_ccb->ccb_h.path);
3560	xpt_free_ccb(done_ccb);
3561}
3562#endif /* (__FreeBSD_version < 901503) || \
3563          ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3564
3565void
3566mpssas_prepare_for_tm(struct mps_softc *sc, struct mps_command *tm,
3567    struct mpssas_target *target, lun_id_t lun_id)
3568{
3569	union ccb *ccb;
3570	path_id_t path_id;
3571
3572	/*
3573	 * Set the INRESET flag for this target so that no I/O will be sent to
3574	 * the target until the reset has completed.  If an I/O request does
3575	 * happen, the devq will be frozen.  The CCB holds the path which is
3576	 * used to release the devq.  The devq is released and the CCB is freed
3577	 * when the TM completes.
3578	 */
3579	ccb = xpt_alloc_ccb_nowait();
3580	if (ccb) {
3581		path_id = cam_sim_path(sc->sassc->sim);
3582		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3583		    target->tid, lun_id) != CAM_REQ_CMP) {
3584			xpt_free_ccb(ccb);
3585		} else {
3586			tm->cm_ccb = ccb;
3587			tm->cm_targ = target;
3588			target->flags |= MPSSAS_TARGET_INRESET;
3589		}
3590	}
3591}
3592
3593int
3594mpssas_startup(struct mps_softc *sc)
3595{
3596
3597	/*
3598	 * Send the port enable message and set the wait_for_port_enable flag.
3599	 * This flag helps to keep the simq frozen until all discovery events
3600	 * are processed.
3601	 */
3602	sc->wait_for_port_enable = 1;
3603	mpssas_send_portenable(sc);
3604	return (0);
3605}
3606
3607static int
3608mpssas_send_portenable(struct mps_softc *sc)
3609{
3610	MPI2_PORT_ENABLE_REQUEST *request;
3611	struct mps_command *cm;
3612
3613	MPS_FUNCTRACE(sc);
3614
3615	if ((cm = mps_alloc_command(sc)) == NULL)
3616		return (EBUSY);
3617	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3618	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3619	request->MsgFlags = 0;
3620	request->VP_ID = 0;
3621	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3622	cm->cm_complete = mpssas_portenable_complete;
3623	cm->cm_data = NULL;
3624	cm->cm_sge = NULL;
3625
3626	mps_map_command(sc, cm);
3627	mps_dprint(sc, MPS_XINFO,
3628	    "mps_send_portenable finished cm %p req %p complete %p\n",
3629	    cm, cm->cm_req, cm->cm_complete);
3630	return (0);
3631}
3632
3633static void
3634mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3635{
3636	MPI2_PORT_ENABLE_REPLY *reply;
3637	struct mpssas_softc *sassc;
3638
3639	MPS_FUNCTRACE(sc);
3640	sassc = sc->sassc;
3641
3642	/*
3643	 * Currently there should be no way we can hit this case.  It only
3644	 * happens when we have a failure to allocate chain frames, and
3645	 * port enable commands don't have S/G lists.
3646	 */
3647	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3648		mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for port enable! "
3649			   "This should not happen!\n", __func__, cm->cm_flags);
3650	}
3651
3652	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3653	if (reply == NULL)
3654		mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3655	else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3656	    MPI2_IOCSTATUS_SUCCESS)
3657		mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3658
3659	mps_free_command(sc, cm);
3660	if (sc->mps_ich.ich_arg != NULL) {
3661		mps_dprint(sc, MPS_XINFO, "disestablish config intrhook\n");
3662		config_intrhook_disestablish(&sc->mps_ich);
3663		sc->mps_ich.ich_arg = NULL;
3664	}
3665
3666	/*
3667	 * Get WarpDrive info after discovery is complete but before the scan
3668	 * starts.  At this point, all devices are ready to be exposed to the
3669	 * OS.  If devices should be hidden instead, take them out of the
3670	 * 'targets' array before the scan.  The devinfo for a disk will have
3671	 * some info and a volume's will be 0.  Use that to remove disks.
3672	 */
3673	mps_wd_config_pages(sc);
3674
3675	/*
3676	 * Done waiting for port enable to complete.  Decrement the refcount.
3677	 * If refcount is 0, discovery is complete and a rescan of the bus can
3678	 * take place.  Since the simq was explicitly frozen before port
3679	 * enable, it must be explicitly released here to keep the
3680	 * freeze/release count in sync.
3681	 */
3682	sc->wait_for_port_enable = 0;
3683	sc->port_enable_complete = 1;
3684	wakeup(&sc->port_enable_complete);
3685	mpssas_startup_decrement(sassc);
3686}
3687
3688int
3689mpssas_check_id(struct mpssas_softc *sassc, int id)
3690{
3691	struct mps_softc *sc = sassc->sc;
3692	char *ids;
3693	char *name;
3694
3695	ids = &sc->exclude_ids[0];
3696	while((name = strsep(&ids, ",")) != NULL) {
3697		if (name[0] == '\0')
3698			continue;
3699		if (strtol(name, NULL, 0) == (long)id)
3700			return (1);
3701	}
3702
3703	return (0);
3704}
3705
3706void
3707mpssas_realloc_targets(struct mps_softc *sc, int maxtargets)
3708{
3709	struct mpssas_softc *sassc;
3710	struct mpssas_lun *lun, *lun_tmp;
3711	struct mpssas_target *targ;
3712	int i;
3713
3714	sassc = sc->sassc;
3715	/*
3716	 * The number of targets is based on IOC Facts, so free all of
3717	 * the allocated LUNs for each target and then the target buffer
3718	 * itself.
3719	 */
3720	for (i=0; i< maxtargets; i++) {
3721		targ = &sassc->targets[i];
3722		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3723			free(lun, M_MPT2);
3724		}
3725	}
3726	free(sassc->targets, M_MPT2);
3727
3728	sassc->targets = malloc(sizeof(struct mpssas_target) * maxtargets,
3729	    M_MPT2, M_WAITOK|M_ZERO);
3730	if (!sassc->targets) {
3731		panic("%s failed to alloc targets with error %d\n",
3732		    __func__, ENOMEM);
3733	}
3734}
3735