mpr_sas.c revision 279329
1/*-
2 * Copyright (c) 2009 Yahoo! Inc.
3 * Copyright (c) 2011-2014 LSI Corp.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: stable/10/sys/dev/mpr/mpr_sas.c 279329 2015-02-26 20:46:16Z ken $");
30
31/* Communications core for LSI MPT2 */
32
33/* TODO Move headers to mprvar */
34#include <sys/types.h>
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/kernel.h>
38#include <sys/selinfo.h>
39#include <sys/module.h>
40#include <sys/bus.h>
41#include <sys/conf.h>
42#include <sys/bio.h>
43#include <sys/malloc.h>
44#include <sys/uio.h>
45#include <sys/sysctl.h>
46#include <sys/endian.h>
47#include <sys/queue.h>
48#include <sys/kthread.h>
49#include <sys/taskqueue.h>
50#include <sys/sbuf.h>
51
52#include <machine/bus.h>
53#include <machine/resource.h>
54#include <sys/rman.h>
55
56#include <machine/stdarg.h>
57
58#include <cam/cam.h>
59#include <cam/cam_ccb.h>
60#include <cam/cam_debug.h>
61#include <cam/cam_sim.h>
62#include <cam/cam_xpt_sim.h>
63#include <cam/cam_xpt_periph.h>
64#include <cam/cam_periph.h>
65#include <cam/scsi/scsi_all.h>
66#include <cam/scsi/scsi_message.h>
67#if __FreeBSD_version >= 900026
68#include <cam/scsi/smp_all.h>
69#endif
70
71#include <dev/mpr/mpi/mpi2_type.h>
72#include <dev/mpr/mpi/mpi2.h>
73#include <dev/mpr/mpi/mpi2_ioc.h>
74#include <dev/mpr/mpi/mpi2_sas.h>
75#include <dev/mpr/mpi/mpi2_cnfg.h>
76#include <dev/mpr/mpi/mpi2_init.h>
77#include <dev/mpr/mpi/mpi2_tool.h>
78#include <dev/mpr/mpr_ioctl.h>
79#include <dev/mpr/mprvar.h>
80#include <dev/mpr/mpr_table.h>
81#include <dev/mpr/mpr_sas.h>
82
83#define MPRSAS_DISCOVERY_TIMEOUT	20
84#define MPRSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
85
86/*
87 * static array to check SCSI OpCode for EEDP protection bits
88 */
89#define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
90#define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
91#define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
92static uint8_t op_code_prot[256] = {
93	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
94	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
95	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
96	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
97	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
98	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
99	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
102	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
104	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
109};
110
111MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory");
112
113static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *);
114static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *);
115static void mprsas_action(struct cam_sim *sim, union ccb *ccb);
116static void mprsas_poll(struct cam_sim *sim);
117static void mprsas_scsiio_timeout(void *data);
118static void mprsas_abort_complete(struct mpr_softc *sc,
119    struct mpr_command *cm);
120static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *);
121static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *);
122static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *);
123static void mprsas_resetdev_complete(struct mpr_softc *,
124    struct mpr_command *);
125static int  mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
126    struct mpr_command *cm);
127static int  mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm,
128    uint8_t type);
129static void mprsas_async(void *callback_arg, uint32_t code,
130    struct cam_path *path, void *arg);
131static void mprsas_prepare_ssu(struct mpr_softc *sc, struct cam_path *path,
132    struct ccb_getdev *cgd);
133#if (__FreeBSD_version < 901503) || \
134    ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
135static void mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
136    struct ccb_getdev *cgd);
137static void mprsas_read_cap_done(struct cam_periph *periph,
138    union ccb *done_ccb);
139#endif
140static int mprsas_send_portenable(struct mpr_softc *sc);
141static void mprsas_portenable_complete(struct mpr_softc *sc,
142    struct mpr_command *cm);
143
144#if __FreeBSD_version >= 900026
145static void
146mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm);
147static void mprsas_send_smpcmd(struct mprsas_softc *sassc,
148	       	union ccb *ccb, uint64_t sasaddr);
149static void
150mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb);
151#endif
152
153struct mprsas_target *
154mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start,
155    uint16_t handle)
156{
157	struct mprsas_target *target;
158	int i;
159
160	for (i = start; i < sassc->maxtargets; i++) {
161		target = &sassc->targets[i];
162		if (target->handle == handle)
163			return (target);
164	}
165
166	return (NULL);
167}
168
169/* we need to freeze the simq during attach and diag reset, to avoid failing
170 * commands before device handles have been found by discovery.  Since
171 * discovery involves reading config pages and possibly sending commands,
172 * discovery actions may continue even after we receive the end of discovery
173 * event, so refcount discovery actions instead of assuming we can unfreeze
174 * the simq when we get the event.
175 */
176void
177mprsas_startup_increment(struct mprsas_softc *sassc)
178{
179	MPR_FUNCTRACE(sassc->sc);
180
181	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
182		if (sassc->startup_refcount++ == 0) {
183			/* just starting, freeze the simq */
184			mpr_dprint(sassc->sc, MPR_INIT,
185			    "%s freezing simq\n", __func__);
186#if (__FreeBSD_version >= 1000039) || \
187    ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
188			xpt_hold_boot();
189#endif
190			xpt_freeze_simq(sassc->sim, 1);
191		}
192		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
193		    sassc->startup_refcount);
194	}
195}
196
197void
198mprsas_release_simq_reinit(struct mprsas_softc *sassc)
199{
200	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
201		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
202		xpt_release_simq(sassc->sim, 1);
203		mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n");
204	}
205}
206
207void
208mprsas_startup_decrement(struct mprsas_softc *sassc)
209{
210	MPR_FUNCTRACE(sassc->sc);
211
212	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
213		if (--sassc->startup_refcount == 0) {
214			/* finished all discovery-related actions, release
215			 * the simq and rescan for the latest topology.
216			 */
217			mpr_dprint(sassc->sc, MPR_INIT,
218			    "%s releasing simq\n", __func__);
219			sassc->flags &= ~MPRSAS_IN_STARTUP;
220			xpt_release_simq(sassc->sim, 1);
221#if (__FreeBSD_version >= 1000039) || \
222    ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
223			xpt_release_boot();
224#else
225			mprsas_rescan_target(sassc->sc, NULL);
226#endif
227		}
228		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
229		    sassc->startup_refcount);
230	}
231}
232
233/* LSI's firmware requires us to stop sending commands when we're doing task
234 * management, so refcount the TMs and keep the simq frozen when any are in
235 * use.
236 */
237struct mpr_command *
238mprsas_alloc_tm(struct mpr_softc *sc)
239{
240	struct mpr_command *tm;
241
242	MPR_FUNCTRACE(sc);
243	tm = mpr_alloc_high_priority_command(sc);
244	if (tm != NULL) {
245		if (sc->sassc->tm_count++ == 0) {
246			mpr_dprint(sc, MPR_RECOVERY,
247			    "%s freezing simq\n", __func__);
248			xpt_freeze_simq(sc->sassc->sim, 1);
249		}
250		mpr_dprint(sc, MPR_RECOVERY, "%s tm_count %u\n", __func__,
251		    sc->sassc->tm_count);
252	}
253	return tm;
254}
255
256void
257mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm)
258{
259	mpr_dprint(sc, MPR_TRACE, "%s", __func__);
260	if (tm == NULL)
261		return;
262
263	/* if there are no TMs in use, we can release the simq.  We use our
264	 * own refcount so that it's easier for a diag reset to cleanup and
265	 * release the simq.
266	 */
267	if (--sc->sassc->tm_count == 0) {
268		mpr_dprint(sc, MPR_RECOVERY, "%s releasing simq\n", __func__);
269		xpt_release_simq(sc->sassc->sim, 1);
270	}
271	mpr_dprint(sc, MPR_RECOVERY, "%s tm_count %u\n", __func__,
272	    sc->sassc->tm_count);
273
274	mpr_free_high_priority_command(sc, tm);
275}
276
277void
278mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ)
279{
280	struct mprsas_softc *sassc = sc->sassc;
281	path_id_t pathid;
282	target_id_t targetid;
283	union ccb *ccb;
284
285	MPR_FUNCTRACE(sc);
286	pathid = cam_sim_path(sassc->sim);
287	if (targ == NULL)
288		targetid = CAM_TARGET_WILDCARD;
289	else
290		targetid = targ - sassc->targets;
291
292	/*
293	 * Allocate a CCB and schedule a rescan.
294	 */
295	ccb = xpt_alloc_ccb_nowait();
296	if (ccb == NULL) {
297		mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n");
298		return;
299	}
300
301	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
302	    targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
303		mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n");
304		xpt_free_ccb(ccb);
305		return;
306	}
307
308	if (targetid == CAM_TARGET_WILDCARD)
309		ccb->ccb_h.func_code = XPT_SCAN_BUS;
310	else
311		ccb->ccb_h.func_code = XPT_SCAN_TGT;
312
313	mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid);
314	xpt_rescan(ccb);
315}
316
317static void
318mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...)
319{
320	struct sbuf sb;
321	va_list ap;
322	char str[192];
323	char path_str[64];
324
325	if (cm == NULL)
326		return;
327
328	/* No need to be in here if debugging isn't enabled */
329	if ((cm->cm_sc->mpr_debug & level) == 0)
330		return;
331
332	sbuf_new(&sb, str, sizeof(str), 0);
333
334	va_start(ap, fmt);
335
336	if (cm->cm_ccb != NULL) {
337		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
338		    sizeof(path_str));
339		sbuf_cat(&sb, path_str);
340		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
341			scsi_command_string(&cm->cm_ccb->csio, &sb);
342			sbuf_printf(&sb, "length %d ",
343			    cm->cm_ccb->csio.dxfer_len);
344		}
345	} else {
346		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
347		    cam_sim_name(cm->cm_sc->sassc->sim),
348		    cam_sim_unit(cm->cm_sc->sassc->sim),
349		    cam_sim_bus(cm->cm_sc->sassc->sim),
350		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
351		    cm->cm_lun);
352	}
353
354	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
355	sbuf_vprintf(&sb, fmt, ap);
356	sbuf_finish(&sb);
357	mpr_dprint_field(cm->cm_sc, level, "%s", sbuf_data(&sb));
358
359	va_end(ap);
360}
361
362static void
363mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm)
364{
365	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
366	struct mprsas_target *targ;
367	uint16_t handle;
368
369	MPR_FUNCTRACE(sc);
370
371	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
372	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
373	targ = tm->cm_targ;
374
375	if (reply == NULL) {
376		/* XXX retry the remove after the diag reset completes? */
377		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
378		    "0x%04x\n", __func__, handle);
379		mprsas_free_tm(sc, tm);
380		return;
381	}
382
383	if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
384		mpr_dprint(sc, MPR_FAULT, "IOCStatus = 0x%x while resetting "
385		    "device 0x%x\n", reply->IOCStatus, handle);
386		mprsas_free_tm(sc, tm);
387		return;
388	}
389
390	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
391	    reply->TerminationCount);
392	mpr_free_reply(sc, tm->cm_reply_data);
393	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
394
395	mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
396	    targ->tid, handle);
397
398	/*
399	 * Don't clear target if remove fails because things will get confusing.
400	 * Leave the devname and sasaddr intact so that we know to avoid reusing
401	 * this target id if possible, and so we can assign the same target id
402	 * to this device if it comes back in the future.
403	 */
404	if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
405		targ = tm->cm_targ;
406		targ->handle = 0x0;
407		targ->encl_handle = 0x0;
408		targ->encl_level_valid = 0x0;
409		targ->encl_level = 0x0;
410		targ->connector_name[0] = ' ';
411		targ->connector_name[1] = ' ';
412		targ->connector_name[2] = ' ';
413		targ->connector_name[3] = ' ';
414		targ->encl_slot = 0x0;
415		targ->exp_dev_handle = 0x0;
416		targ->phy_num = 0x0;
417		targ->linkrate = 0x0;
418		targ->devinfo = 0x0;
419		targ->flags = 0x0;
420		targ->scsi_req_desc_type = 0;
421	}
422
423	mprsas_free_tm(sc, tm);
424}
425
426
427/*
428 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
429 * Otherwise Volume Delete is same as Bare Drive Removal.
430 */
431void
432mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle)
433{
434	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
435	struct mpr_softc *sc;
436	struct mpr_command *cm;
437	struct mprsas_target *targ = NULL;
438
439	MPR_FUNCTRACE(sassc->sc);
440	sc = sassc->sc;
441
442	targ = mprsas_find_target_by_handle(sassc, 0, handle);
443	if (targ == NULL) {
444		/* FIXME: what is the action? */
445		/* We don't know about this device? */
446		mpr_dprint(sc, MPR_ERROR,
447		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
448		return;
449	}
450
451	targ->flags |= MPRSAS_TARGET_INREMOVAL;
452
453	cm = mprsas_alloc_tm(sc);
454	if (cm == NULL) {
455		mpr_dprint(sc, MPR_ERROR,
456		    "%s: command alloc failure\n", __func__);
457		return;
458	}
459
460	mprsas_rescan_target(sc, targ);
461
462	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
463	req->DevHandle = targ->handle;
464	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
465	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
466
467	/* SAS Hard Link Reset / SATA Link Reset */
468	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
469
470	cm->cm_targ = targ;
471	cm->cm_data = NULL;
472	cm->cm_desc.HighPriority.RequestFlags =
473	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
474	cm->cm_complete = mprsas_remove_volume;
475	cm->cm_complete_data = (void *)(uintptr_t)handle;
476	mpr_map_command(sc, cm);
477}
478
479/*
480 * The MPT2 firmware performs debounce on the link to avoid transient link
481 * errors and false removals.  When it does decide that link has been lost
482 * and a device needs to go away, it expects that the host will perform a
483 * target reset and then an op remove.  The reset has the side-effect of
484 * aborting any outstanding requests for the device, which is required for
485 * the op-remove to succeed.  It's not clear if the host should check for
486 * the device coming back alive after the reset.
487 */
488void
489mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle)
490{
491	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
492	struct mpr_softc *sc;
493	struct mpr_command *cm;
494	struct mprsas_target *targ = NULL;
495
496	MPR_FUNCTRACE(sassc->sc);
497
498	sc = sassc->sc;
499
500	targ = mprsas_find_target_by_handle(sassc, 0, handle);
501	if (targ == NULL) {
502		/* FIXME: what is the action? */
503		/* We don't know about this device? */
504		mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n",
505		    __func__, handle);
506		return;
507	}
508
509	targ->flags |= MPRSAS_TARGET_INREMOVAL;
510
511	cm = mprsas_alloc_tm(sc);
512	if (cm == NULL) {
513		mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n",
514		    __func__);
515		return;
516	}
517
518	mprsas_rescan_target(sc, targ);
519
520	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
521	memset(req, 0, sizeof(*req));
522	req->DevHandle = htole16(targ->handle);
523	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
524	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
525
526	/* SAS Hard Link Reset / SATA Link Reset */
527	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
528
529	cm->cm_targ = targ;
530	cm->cm_data = NULL;
531	cm->cm_desc.HighPriority.RequestFlags =
532	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
533	cm->cm_complete = mprsas_remove_device;
534	cm->cm_complete_data = (void *)(uintptr_t)handle;
535	mpr_map_command(sc, cm);
536}
537
538static void
539mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm)
540{
541	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
542	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
543	struct mprsas_target *targ;
544	struct mpr_command *next_cm;
545	uint16_t handle;
546
547	MPR_FUNCTRACE(sc);
548
549	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
550	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
551	targ = tm->cm_targ;
552
553	/*
554	 * Currently there should be no way we can hit this case.  It only
555	 * happens when we have a failure to allocate chain frames, and
556	 * task management commands don't have S/G lists.
557	 */
558	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
559		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of "
560		    "handle %#04x! This should not happen!\n", __func__,
561		    tm->cm_flags, handle);
562		mprsas_free_tm(sc, tm);
563		return;
564	}
565
566	if (reply == NULL) {
567		/* XXX retry the remove after the diag reset completes? */
568		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
569		    "0x%04x\n", __func__, handle);
570		mprsas_free_tm(sc, tm);
571		return;
572	}
573
574	if (le16toh(reply->IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
575		mpr_dprint(sc, MPR_FAULT, "IOCStatus = 0x%x while resetting "
576		    "device 0x%x\n", le16toh(reply->IOCStatus), handle);
577		mprsas_free_tm(sc, tm);
578		return;
579	}
580
581	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
582	    le32toh(reply->TerminationCount));
583	mpr_free_reply(sc, tm->cm_reply_data);
584	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
585
586	/* Reuse the existing command */
587	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
588	memset(req, 0, sizeof(*req));
589	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
590	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
591	req->DevHandle = htole16(handle);
592	tm->cm_data = NULL;
593	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
594	tm->cm_complete = mprsas_remove_complete;
595	tm->cm_complete_data = (void *)(uintptr_t)handle;
596
597	mpr_map_command(sc, tm);
598
599	mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
600	    targ->tid, handle);
601	if (targ->encl_level_valid) {
602		mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
603		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
604		    targ->connector_name);
605	}
606	TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
607		union ccb *ccb;
608
609		mpr_dprint(sc, MPR_XINFO, "Completing missed command %p\n", tm);
610		ccb = tm->cm_complete_data;
611		ccb->ccb_h.status = CAM_DEV_NOT_THERE;
612		mprsas_scsiio_complete(sc, tm);
613	}
614}
615
616static void
617mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm)
618{
619	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
620	uint16_t handle;
621	struct mprsas_target *targ;
622	struct mprsas_lun *lun;
623
624	MPR_FUNCTRACE(sc);
625
626	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
627	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
628
629	/*
630	 * Currently there should be no way we can hit this case.  It only
631	 * happens when we have a failure to allocate chain frames, and
632	 * task management commands don't have S/G lists.
633	 */
634	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
635		mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of "
636		    "handle %#04x! This should not happen!\n", __func__,
637		    tm->cm_flags, handle);
638		mprsas_free_tm(sc, tm);
639		return;
640	}
641
642	if (reply == NULL) {
643		/* most likely a chip reset */
644		mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device "
645		    "0x%04x\n", __func__, handle);
646		mprsas_free_tm(sc, tm);
647		return;
648	}
649
650	mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n",
651	    __func__, handle, le16toh(reply->IOCStatus));
652
653	/*
654	 * Don't clear target if remove fails because things will get confusing.
655	 * Leave the devname and sasaddr intact so that we know to avoid reusing
656	 * this target id if possible, and so we can assign the same target id
657	 * to this device if it comes back in the future.
658	 */
659	if (le16toh(reply->IOCStatus) == MPI2_IOCSTATUS_SUCCESS) {
660		targ = tm->cm_targ;
661		targ->handle = 0x0;
662		targ->encl_handle = 0x0;
663		targ->encl_level_valid = 0x0;
664		targ->encl_level = 0x0;
665		targ->connector_name[0] = ' ';
666		targ->connector_name[1] = ' ';
667		targ->connector_name[2] = ' ';
668		targ->connector_name[3] = ' ';
669		targ->encl_slot = 0x0;
670		targ->exp_dev_handle = 0x0;
671		targ->phy_num = 0x0;
672		targ->linkrate = 0x0;
673		targ->devinfo = 0x0;
674		targ->flags = 0x0;
675		targ->scsi_req_desc_type = 0;
676
677		while (!SLIST_EMPTY(&targ->luns)) {
678			lun = SLIST_FIRST(&targ->luns);
679			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
680			free(lun, M_MPR);
681		}
682	}
683
684	mprsas_free_tm(sc, tm);
685}
686
687static int
688mprsas_register_events(struct mpr_softc *sc)
689{
690	uint8_t events[16];
691
692	bzero(events, 16);
693	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
694	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
695	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
696	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
697	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
698	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
699	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
700	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
701	setbit(events, MPI2_EVENT_IR_VOLUME);
702	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
703	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
704	setbit(events, MPI2_EVENT_TEMP_THRESHOLD);
705
706	mpr_register_events(sc, events, mprsas_evt_handler, NULL,
707	    &sc->sassc->mprsas_eh);
708
709	return (0);
710}
711
712int
713mpr_attach_sas(struct mpr_softc *sc)
714{
715	struct mprsas_softc *sassc;
716	cam_status status;
717	int unit, error = 0;
718
719	MPR_FUNCTRACE(sc);
720
721	sassc = malloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO);
722	if (!sassc) {
723		device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n",
724		    __func__, __LINE__);
725		return (ENOMEM);
726	}
727
728	/*
729	 * XXX MaxTargets could change during a reinit.  since we don't
730	 * resize the targets[] array during such an event, cache the value
731	 * of MaxTargets here so that we don't get into trouble later.  This
732	 * should move into the reinit logic.
733	 */
734	sassc->maxtargets = sc->facts->MaxTargets;
735	sassc->targets = malloc(sizeof(struct mprsas_target) *
736	    sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO);
737	if (!sassc->targets) {
738		device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n",
739		    __func__, __LINE__);
740		free(sassc, M_MPR);
741		return (ENOMEM);
742	}
743	sc->sassc = sassc;
744	sassc->sc = sc;
745
746	if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
747		mpr_dprint(sc, MPR_ERROR, "Cannot allocate SIMQ\n");
748		error = ENOMEM;
749		goto out;
750	}
751
752	unit = device_get_unit(sc->mpr_dev);
753	sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc,
754	    unit, &sc->mpr_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
755	if (sassc->sim == NULL) {
756		mpr_dprint(sc, MPR_ERROR, "Cannot allocate SIM\n");
757		error = EINVAL;
758		goto out;
759	}
760
761	TAILQ_INIT(&sassc->ev_queue);
762
763	/* Initialize taskqueue for Event Handling */
764	TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc);
765	sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO,
766	    taskqueue_thread_enqueue, &sassc->ev_tq);
767
768	/* Run the task queue with lowest priority */
769	taskqueue_start_threads(&sassc->ev_tq, 1, 255, "%s taskq",
770	    device_get_nameunit(sc->mpr_dev));
771
772	mpr_lock(sc);
773
774	/*
775	 * XXX There should be a bus for every port on the adapter, but since
776	 * we're just going to fake the topology for now, we'll pretend that
777	 * everything is just a target on a single bus.
778	 */
779	if ((error = xpt_bus_register(sassc->sim, sc->mpr_dev, 0)) != 0) {
780		mpr_dprint(sc, MPR_ERROR, "Error %d registering SCSI bus\n",
781		    error);
782		mpr_unlock(sc);
783		goto out;
784	}
785
786	/*
787	 * Assume that discovery events will start right away.  Freezing
788	 *
789	 * Hold off boot until discovery is complete.
790	 */
791	sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY;
792	sc->sassc->startup_refcount = 0;
793	mprsas_startup_increment(sassc);
794
795	callout_init(&sassc->discovery_callout, 1 /*mprafe*/);
796
797	sassc->tm_count = 0;
798
799	/*
800	 * Register for async events so we can determine the EEDP
801	 * capabilities of devices.
802	 */
803	status = xpt_create_path(&sassc->path, /*periph*/NULL,
804	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
805	    CAM_LUN_WILDCARD);
806	if (status != CAM_REQ_CMP) {
807		mpr_printf(sc, "Error %#x creating sim path\n", status);
808		sassc->path = NULL;
809	} else {
810		int event;
811
812#if (__FreeBSD_version >= 1000006) || \
813    ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
814		event = AC_ADVINFO_CHANGED | AC_FOUND_DEVICE;
815#else
816		event = AC_FOUND_DEVICE;
817#endif
818
819		/*
820		 * Prior to the CAM locking improvements, we can't call
821		 * xpt_register_async() with a particular path specified.
822		 *
823		 * If a path isn't specified, xpt_register_async() will
824		 * generate a wildcard path and acquire the XPT lock while
825		 * it calls xpt_action() to execute the XPT_SASYNC_CB CCB.
826		 * It will then drop the XPT lock once that is done.
827		 *
828		 * If a path is specified for xpt_register_async(), it will
829		 * not acquire and drop the XPT lock around the call to
830		 * xpt_action().  xpt_action() asserts that the caller
831		 * holds the SIM lock, so the SIM lock has to be held when
832		 * calling xpt_register_async() when the path is specified.
833		 *
834		 * But xpt_register_async calls xpt_for_all_devices(),
835		 * which calls xptbustraverse(), which will acquire each
836		 * SIM lock.  When it traverses our particular bus, it will
837		 * necessarily acquire the SIM lock, which will lead to a
838		 * recursive lock acquisition.
839		 *
840		 * The CAM locking changes fix this problem by acquiring
841		 * the XPT topology lock around bus traversal in
842		 * xptbustraverse(), so the caller can hold the SIM lock
843		 * and it does not cause a recursive lock acquisition.
844		 *
845		 * These __FreeBSD_version values are approximate, especially
846		 * for stable/10, which is two months later than the actual
847		 * change.
848		 */
849
850#if (__FreeBSD_version < 1000703) || \
851    ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
852		mpr_unlock(sc);
853		status = xpt_register_async(event, mprsas_async, sc,
854					    NULL);
855		mpr_lock(sc);
856#else
857		status = xpt_register_async(event, mprsas_async, sc,
858					    sassc->path);
859#endif
860
861		if (status != CAM_REQ_CMP) {
862			mpr_dprint(sc, MPR_ERROR,
863			    "Error %#x registering async handler for "
864			    "AC_ADVINFO_CHANGED events\n", status);
865			xpt_free_path(sassc->path);
866			sassc->path = NULL;
867		}
868	}
869	if (status != CAM_REQ_CMP) {
870		/*
871		 * EEDP use is the exception, not the rule.
872		 * Warn the user, but do not fail to attach.
873		 */
874		mpr_printf(sc, "EEDP capabilities disabled.\n");
875	}
876
877	mpr_unlock(sc);
878
879	mprsas_register_events(sc);
880out:
881	if (error)
882		mpr_detach_sas(sc);
883	return (error);
884}
885
886int
887mpr_detach_sas(struct mpr_softc *sc)
888{
889	struct mprsas_softc *sassc;
890	struct mprsas_lun *lun, *lun_tmp;
891	struct mprsas_target *targ;
892	int i;
893
894	MPR_FUNCTRACE(sc);
895
896	if (sc->sassc == NULL)
897		return (0);
898
899	sassc = sc->sassc;
900	mpr_deregister_events(sc, sassc->mprsas_eh);
901
902	/*
903	 * Drain and free the event handling taskqueue with the lock
904	 * unheld so that any parallel processing tasks drain properly
905	 * without deadlocking.
906	 */
907	if (sassc->ev_tq != NULL)
908		taskqueue_free(sassc->ev_tq);
909
910	/* Make sure CAM doesn't wedge if we had to bail out early. */
911	mpr_lock(sc);
912
913	/* Deregister our async handler */
914	if (sassc->path != NULL) {
915		xpt_register_async(0, mprsas_async, sc, sassc->path);
916		xpt_free_path(sassc->path);
917		sassc->path = NULL;
918	}
919
920	if (sassc->flags & MPRSAS_IN_STARTUP)
921		xpt_release_simq(sassc->sim, 1);
922
923	if (sassc->sim != NULL) {
924		xpt_bus_deregister(cam_sim_path(sassc->sim));
925		cam_sim_free(sassc->sim, FALSE);
926	}
927
928	sassc->flags |= MPRSAS_SHUTDOWN;
929	mpr_unlock(sc);
930
931	if (sassc->devq != NULL)
932		cam_simq_free(sassc->devq);
933
934	for (i = 0; i < sassc->maxtargets; i++) {
935		targ = &sassc->targets[i];
936		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
937			free(lun, M_MPR);
938		}
939	}
940	free(sassc->targets, M_MPR);
941	free(sassc, M_MPR);
942	sc->sassc = NULL;
943
944	return (0);
945}
946
947void
948mprsas_discovery_end(struct mprsas_softc *sassc)
949{
950	struct mpr_softc *sc = sassc->sc;
951
952	MPR_FUNCTRACE(sc);
953
954	if (sassc->flags & MPRSAS_DISCOVERY_TIMEOUT_PENDING)
955		callout_stop(&sassc->discovery_callout);
956
957}
958
959static void
960mprsas_action(struct cam_sim *sim, union ccb *ccb)
961{
962	struct mprsas_softc *sassc;
963
964	sassc = cam_sim_softc(sim);
965
966	MPR_FUNCTRACE(sassc->sc);
967	mpr_dprint(sassc->sc, MPR_TRACE, "%s func 0x%x\n", __func__,
968	    ccb->ccb_h.func_code);
969	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
970
971	switch (ccb->ccb_h.func_code) {
972	case XPT_PATH_INQ:
973	{
974		struct ccb_pathinq *cpi = &ccb->cpi;
975
976		cpi->version_num = 1;
977		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
978		cpi->target_sprt = 0;
979#if (__FreeBSD_version >= 1000039) || \
980    ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
981		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
982#else
983		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
984#endif
985		cpi->hba_eng_cnt = 0;
986		cpi->max_target = sassc->maxtargets - 1;
987		cpi->max_lun = 255;
988		cpi->initiator_id = sassc->maxtargets - 1;
989		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
990		strncpy(cpi->hba_vid, "LSILogic", HBA_IDLEN);
991		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
992		cpi->unit_number = cam_sim_unit(sim);
993		cpi->bus_id = cam_sim_bus(sim);
994		/*
995		 * XXXSLM-I think this needs to change based on config page or
996		 * something instead of hardcoded to 150000.
997		 */
998		cpi->base_transfer_speed = 150000;
999		cpi->transport = XPORT_SAS;
1000		cpi->transport_version = 0;
1001		cpi->protocol = PROTO_SCSI;
1002		cpi->protocol_version = SCSI_REV_SPC;
1003#if __FreeBSD_version >= 800001
1004		/*
1005		 * XXXSLM-probably need to base this number on max SGL's and
1006		 * page size.
1007		 */
1008		cpi->maxio = 256 * 1024;
1009#endif
1010		cpi->ccb_h.status = CAM_REQ_CMP;
1011		break;
1012	}
1013	case XPT_GET_TRAN_SETTINGS:
1014	{
1015		struct ccb_trans_settings	*cts;
1016		struct ccb_trans_settings_sas	*sas;
1017		struct ccb_trans_settings_scsi	*scsi;
1018		struct mprsas_target *targ;
1019
1020		cts = &ccb->cts;
1021		sas = &cts->xport_specific.sas;
1022		scsi = &cts->proto_specific.scsi;
1023
1024		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1025		    ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
1026		    cts->ccb_h.target_id));
1027		targ = &sassc->targets[cts->ccb_h.target_id];
1028		if (targ->handle == 0x0) {
1029			cts->ccb_h.status = CAM_DEV_NOT_THERE;
1030			break;
1031		}
1032
1033		cts->protocol_version = SCSI_REV_SPC2;
1034		cts->transport = XPORT_SAS;
1035		cts->transport_version = 0;
1036
1037		sas->valid = CTS_SAS_VALID_SPEED;
1038		switch (targ->linkrate) {
1039		case 0x08:
1040			sas->bitrate = 150000;
1041			break;
1042		case 0x09:
1043			sas->bitrate = 300000;
1044			break;
1045		case 0x0a:
1046			sas->bitrate = 600000;
1047			break;
1048		case 0x0b:
1049			sas->bitrate = 1200000;
1050			break;
1051		default:
1052			sas->valid = 0;
1053		}
1054
1055		cts->protocol = PROTO_SCSI;
1056		scsi->valid = CTS_SCSI_VALID_TQ;
1057		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1058
1059		cts->ccb_h.status = CAM_REQ_CMP;
1060		break;
1061	}
1062	case XPT_CALC_GEOMETRY:
1063		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1064		ccb->ccb_h.status = CAM_REQ_CMP;
1065		break;
1066	case XPT_RESET_DEV:
1067		mpr_dprint(sassc->sc, MPR_XINFO,
1068		    "mprsas_action XPT_RESET_DEV\n");
1069		mprsas_action_resetdev(sassc, ccb);
1070		return;
1071	case XPT_RESET_BUS:
1072	case XPT_ABORT:
1073	case XPT_TERM_IO:
1074		mpr_dprint(sassc->sc, MPR_XINFO,
1075		    "mprsas_action faking success for abort or reset\n");
1076		ccb->ccb_h.status = CAM_REQ_CMP;
1077		break;
1078	case XPT_SCSI_IO:
1079		mprsas_action_scsiio(sassc, ccb);
1080		return;
1081#if __FreeBSD_version >= 900026
1082	case XPT_SMP_IO:
1083		mprsas_action_smpio(sassc, ccb);
1084		return;
1085#endif
1086	default:
1087		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1088		break;
1089	}
1090	xpt_done(ccb);
1091
1092}
1093
1094static void
1095mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code,
1096    target_id_t target_id, lun_id_t lun_id)
1097{
1098	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1099	struct cam_path *path;
1100
1101	mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__,
1102	    ac_code, target_id, (uintmax_t)lun_id);
1103
1104	if (xpt_create_path(&path, NULL,
1105		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1106		mpr_dprint(sc, MPR_ERROR, "unable to create path for reset "
1107			   "notification\n");
1108		return;
1109	}
1110
1111	xpt_async(ac_code, path, NULL);
1112	xpt_free_path(path);
1113}
1114
1115static void
1116mprsas_complete_all_commands(struct mpr_softc *sc)
1117{
1118	struct mpr_command *cm;
1119	int i;
1120	int completed;
1121
1122	MPR_FUNCTRACE(sc);
1123	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1124
1125	/* complete all commands with a NULL reply */
1126	for (i = 1; i < sc->num_reqs; i++) {
1127		cm = &sc->commands[i];
1128		cm->cm_reply = NULL;
1129		completed = 0;
1130
1131		if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
1132			cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
1133
1134		if (cm->cm_complete != NULL) {
1135			mprsas_log_command(cm, MPR_RECOVERY,
1136			    "completing cm %p state %x ccb %p for diag reset\n",
1137			    cm, cm->cm_state, cm->cm_ccb);
1138			cm->cm_complete(sc, cm);
1139			completed = 1;
1140		}
1141
1142		if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
1143			mprsas_log_command(cm, MPR_RECOVERY,
1144			    "waking up cm %p state %x ccb %p for diag reset\n",
1145			    cm, cm->cm_state, cm->cm_ccb);
1146			wakeup(cm);
1147			completed = 1;
1148		}
1149
1150		if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) {
1151			/* this should never happen, but if it does, log */
1152			mprsas_log_command(cm, MPR_RECOVERY,
1153			    "cm %p state %x flags 0x%x ccb %p during diag "
1154			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1155			    cm->cm_ccb);
1156		}
1157	}
1158}
1159
1160void
1161mprsas_handle_reinit(struct mpr_softc *sc)
1162{
1163	int i;
1164
1165	/* Go back into startup mode and freeze the simq, so that CAM
1166	 * doesn't send any commands until after we've rediscovered all
1167	 * targets and found the proper device handles for them.
1168	 *
1169	 * After the reset, portenable will trigger discovery, and after all
1170	 * discovery-related activities have finished, the simq will be
1171	 * released.
1172	 */
1173	mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__);
1174	sc->sassc->flags |= MPRSAS_IN_STARTUP;
1175	sc->sassc->flags |= MPRSAS_IN_DISCOVERY;
1176	mprsas_startup_increment(sc->sassc);
1177
1178	/* notify CAM of a bus reset */
1179	mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1180	    CAM_LUN_WILDCARD);
1181
1182	/* complete and cleanup after all outstanding commands */
1183	mprsas_complete_all_commands(sc);
1184
1185	mpr_dprint(sc, MPR_INIT, "%s startup %u tm %u after command "
1186	    "completion\n", __func__, sc->sassc->startup_refcount,
1187	    sc->sassc->tm_count);
1188
1189	/* zero all the target handles, since they may change after the
1190	 * reset, and we have to rediscover all the targets and use the new
1191	 * handles.
1192	 */
1193	for (i = 0; i < sc->sassc->maxtargets; i++) {
1194		if (sc->sassc->targets[i].outstanding != 0)
1195			mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n",
1196			    i, sc->sassc->targets[i].outstanding);
1197		sc->sassc->targets[i].handle = 0x0;
1198		sc->sassc->targets[i].exp_dev_handle = 0x0;
1199		sc->sassc->targets[i].outstanding = 0;
1200		sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET;
1201	}
1202}
1203static void
1204mprsas_tm_timeout(void *data)
1205{
1206	struct mpr_command *tm = data;
1207	struct mpr_softc *sc = tm->cm_sc;
1208
1209	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1210
1211	mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY,
1212	    "task mgmt %p timed out\n", tm);
1213	mpr_reinit(sc);
1214}
1215
1216static void
1217mprsas_logical_unit_reset_complete(struct mpr_softc *sc,
1218    struct mpr_command *tm)
1219{
1220	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1221	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1222	unsigned int cm_count = 0;
1223	struct mpr_command *cm;
1224	struct mprsas_target *targ;
1225
1226	callout_stop(&tm->cm_callout);
1227
1228	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1229	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1230	targ = tm->cm_targ;
1231
1232	/*
1233	 * Currently there should be no way we can hit this case.  It only
1234	 * happens when we have a failure to allocate chain frames, and
1235	 * task management commands don't have S/G lists.
1236	 */
1237	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1238		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for LUN reset! "
1239		    "This should not happen!\n", __func__, tm->cm_flags);
1240		mprsas_free_tm(sc, tm);
1241		return;
1242	}
1243
1244	if (reply == NULL) {
1245		mprsas_log_command(tm, MPR_RECOVERY,
1246		    "NULL reset reply for tm %p\n", tm);
1247		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1248			/* this completion was due to a reset, just cleanup */
1249			targ->flags &= ~MPRSAS_TARGET_INRESET;
1250			targ->tm = NULL;
1251			mprsas_free_tm(sc, tm);
1252		}
1253		else {
1254			/* we should have gotten a reply. */
1255			mpr_reinit(sc);
1256		}
1257		return;
1258	}
1259
1260	mprsas_log_command(tm, MPR_RECOVERY,
1261	    "logical unit reset status 0x%x code 0x%x count %u\n",
1262	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1263	    le32toh(reply->TerminationCount));
1264
1265	/* See if there are any outstanding commands for this LUN.
1266	 * This could be made more efficient by using a per-LU data
1267	 * structure of some sort.
1268	 */
1269	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1270		if (cm->cm_lun == tm->cm_lun)
1271			cm_count++;
1272	}
1273
1274	if (cm_count == 0) {
1275		mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1276		    "logical unit %u finished recovery after reset\n",
1277		    tm->cm_lun, tm);
1278
1279		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1280		    tm->cm_lun);
1281
1282		/* we've finished recovery for this logical unit.  check and
1283		 * see if some other logical unit has a timedout command
1284		 * that needs to be processed.
1285		 */
1286		cm = TAILQ_FIRST(&targ->timedout_commands);
1287		if (cm) {
1288			mprsas_send_abort(sc, tm, cm);
1289		}
1290		else {
1291			targ->tm = NULL;
1292			mprsas_free_tm(sc, tm);
1293		}
1294	}
1295	else {
1296		/* if we still have commands for this LUN, the reset
1297		 * effectively failed, regardless of the status reported.
1298		 * Escalate to a target reset.
1299		 */
1300		mprsas_log_command(tm, MPR_RECOVERY,
1301		    "logical unit reset complete for tm %p, but still have %u "
1302		    "command(s)\n", tm, cm_count);
1303		mprsas_send_reset(sc, tm,
1304		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1305	}
1306}
1307
1308static void
1309mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1310{
1311	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1312	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1313	struct mprsas_target *targ;
1314
1315	callout_stop(&tm->cm_callout);
1316
1317	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1318	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1319	targ = tm->cm_targ;
1320
1321	/*
1322	 * Currently there should be no way we can hit this case.  It only
1323	 * happens when we have a failure to allocate chain frames, and
1324	 * task management commands don't have S/G lists.
1325	 */
1326	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1327		mpr_dprint(sc, MPR_ERROR,"%s: cm_flags = %#x for target reset! "
1328		    "This should not happen!\n", __func__, tm->cm_flags);
1329		mprsas_free_tm(sc, tm);
1330		return;
1331	}
1332
1333	if (reply == NULL) {
1334		mprsas_log_command(tm, MPR_RECOVERY,
1335		    "NULL reset reply for tm %p\n", tm);
1336		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1337			/* this completion was due to a reset, just cleanup */
1338			targ->flags &= ~MPRSAS_TARGET_INRESET;
1339			targ->tm = NULL;
1340			mprsas_free_tm(sc, tm);
1341		}
1342		else {
1343			/* we should have gotten a reply. */
1344			mpr_reinit(sc);
1345		}
1346		return;
1347	}
1348
1349	mprsas_log_command(tm, MPR_RECOVERY,
1350	    "target reset status 0x%x code 0x%x count %u\n",
1351	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1352	    le32toh(reply->TerminationCount));
1353
1354	targ->flags &= ~MPRSAS_TARGET_INRESET;
1355
1356	if (targ->outstanding == 0) {
1357		/* we've finished recovery for this target and all
1358		 * of its logical units.
1359		 */
1360		mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1361		    "recovery finished after target reset\n");
1362
1363		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1364		    CAM_LUN_WILDCARD);
1365
1366		targ->tm = NULL;
1367		mprsas_free_tm(sc, tm);
1368	}
1369	else {
1370		/* after a target reset, if this target still has
1371		 * outstanding commands, the reset effectively failed,
1372		 * regardless of the status reported.  escalate.
1373		 */
1374		mprsas_log_command(tm, MPR_RECOVERY,
1375		    "target reset complete for tm %p, but still have %u "
1376		    "command(s)\n", tm, targ->outstanding);
1377		mpr_reinit(sc);
1378	}
1379}
1380
1381#define MPR_RESET_TIMEOUT 30
1382
1383static int
1384mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type)
1385{
1386	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1387	struct mprsas_target *target;
1388	int err;
1389
1390	target = tm->cm_targ;
1391	if (target->handle == 0) {
1392		mpr_dprint(sc, MPR_ERROR,"%s null devhandle for target_id %d\n",
1393		    __func__, target->tid);
1394		return -1;
1395	}
1396
1397	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1398	req->DevHandle = htole16(target->handle);
1399	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1400	req->TaskType = type;
1401
1402	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1403		/* XXX Need to handle invalid LUNs */
1404		MPR_SET_LUN(req->LUN, tm->cm_lun);
1405		tm->cm_targ->logical_unit_resets++;
1406		mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1407		    "sending logical unit reset\n");
1408		tm->cm_complete = mprsas_logical_unit_reset_complete;
1409	}
1410	else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1411		/*
1412		 * Target reset method =
1413		 *     SAS Hard Link Reset / SATA Link Reset
1414		 */
1415		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1416		tm->cm_targ->target_resets++;
1417		tm->cm_targ->flags |= MPRSAS_TARGET_INRESET;
1418		mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1419		    "sending target reset\n");
1420		tm->cm_complete = mprsas_target_reset_complete;
1421	}
1422	else {
1423		mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type);
1424		return -1;
1425	}
1426
1427	mpr_dprint(sc, MPR_XINFO, "to target %u handle 0x%04x\n", target->tid,
1428	    target->handle);
1429	if (target->encl_level_valid) {
1430		mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
1431		    "connector name (%4s)\n", target->encl_level,
1432		    target->encl_slot, target->connector_name);
1433	}
1434
1435	tm->cm_data = NULL;
1436	tm->cm_desc.HighPriority.RequestFlags =
1437	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1438	tm->cm_complete_data = (void *)tm;
1439
1440	callout_reset(&tm->cm_callout, MPR_RESET_TIMEOUT * hz,
1441	    mprsas_tm_timeout, tm);
1442
1443	err = mpr_map_command(sc, tm);
1444	if (err)
1445		mprsas_log_command(tm, MPR_RECOVERY,
1446		    "error %d sending reset type %u\n",
1447		    err, type);
1448
1449	return err;
1450}
1451
1452
1453static void
1454mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm)
1455{
1456	struct mpr_command *cm;
1457	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1458	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1459	struct mprsas_target *targ;
1460
1461	callout_stop(&tm->cm_callout);
1462
1463	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1464	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1465	targ = tm->cm_targ;
1466
1467	/*
1468	 * Currently there should be no way we can hit this case.  It only
1469	 * happens when we have a failure to allocate chain frames, and
1470	 * task management commands don't have S/G lists.
1471	 */
1472	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1473		mprsas_log_command(tm, MPR_RECOVERY,
1474		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1475		    tm->cm_flags, tm, le16toh(req->TaskMID));
1476		mprsas_free_tm(sc, tm);
1477		return;
1478	}
1479
1480	if (reply == NULL) {
1481		mprsas_log_command(tm, MPR_RECOVERY,
1482		    "NULL abort reply for tm %p TaskMID %u\n",
1483		    tm, le16toh(req->TaskMID));
1484		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1485			/* this completion was due to a reset, just cleanup */
1486			targ->tm = NULL;
1487			mprsas_free_tm(sc, tm);
1488		}
1489		else {
1490			/* we should have gotten a reply. */
1491			mpr_reinit(sc);
1492		}
1493		return;
1494	}
1495
1496	mprsas_log_command(tm, MPR_RECOVERY,
1497	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1498	    le16toh(req->TaskMID),
1499	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1500	    le32toh(reply->TerminationCount));
1501
1502	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1503	if (cm == NULL) {
1504		/* if there are no more timedout commands, we're done with
1505		 * error recovery for this target.
1506		 */
1507		mprsas_log_command(tm, MPR_RECOVERY,
1508		    "finished recovery after aborting TaskMID %u\n",
1509		    le16toh(req->TaskMID));
1510
1511		targ->tm = NULL;
1512		mprsas_free_tm(sc, tm);
1513	}
1514	else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1515		/* abort success, but we have more timedout commands to abort */
1516		mprsas_log_command(tm, MPR_RECOVERY,
1517		    "continuing recovery after aborting TaskMID %u\n",
1518		    le16toh(req->TaskMID));
1519
1520		mprsas_send_abort(sc, tm, cm);
1521	}
1522	else {
1523		/* we didn't get a command completion, so the abort
1524		 * failed as far as we're concerned.  escalate.
1525		 */
1526		mprsas_log_command(tm, MPR_RECOVERY,
1527		    "abort failed for TaskMID %u tm %p\n",
1528		    le16toh(req->TaskMID), tm);
1529
1530		mprsas_send_reset(sc, tm,
1531		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1532	}
1533}
1534
1535#define MPR_ABORT_TIMEOUT 5
1536
1537static int
1538mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
1539    struct mpr_command *cm)
1540{
1541	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1542	struct mprsas_target *targ;
1543	int err;
1544
1545	targ = cm->cm_targ;
1546	if (targ->handle == 0) {
1547		mpr_dprint(sc, MPR_ERROR,"%s null devhandle for target_id %d\n",
1548		    __func__, cm->cm_ccb->ccb_h.target_id);
1549		return -1;
1550	}
1551
1552	mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1553	    "Aborting command %p\n", cm);
1554
1555	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1556	req->DevHandle = htole16(targ->handle);
1557	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1558	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1559
1560	/* XXX Need to handle invalid LUNs */
1561	MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1562
1563	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1564
1565	tm->cm_data = NULL;
1566	tm->cm_desc.HighPriority.RequestFlags =
1567	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1568	tm->cm_complete = mprsas_abort_complete;
1569	tm->cm_complete_data = (void *)tm;
1570	tm->cm_targ = cm->cm_targ;
1571	tm->cm_lun = cm->cm_lun;
1572
1573	callout_reset(&tm->cm_callout, MPR_ABORT_TIMEOUT * hz,
1574	    mprsas_tm_timeout, tm);
1575
1576	targ->aborts++;
1577
1578	err = mpr_map_command(sc, tm);
1579	if (err)
1580		mprsas_log_command(tm, MPR_RECOVERY,
1581		    "error %d sending abort for cm %p SMID %u\n",
1582		    err, cm, req->TaskMID);
1583	return err;
1584}
1585
1586
1587static void
1588mprsas_scsiio_timeout(void *data)
1589{
1590	struct mpr_softc *sc;
1591	struct mpr_command *cm;
1592	struct mprsas_target *targ;
1593
1594	cm = (struct mpr_command *)data;
1595	sc = cm->cm_sc;
1596
1597	MPR_FUNCTRACE(sc);
1598	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1599
1600	mpr_dprint(sc, MPR_XINFO, "Timeout checking cm %p\n", cm);
1601
1602	/*
1603	 * Run the interrupt handler to make sure it's not pending.  This
1604	 * isn't perfect because the command could have already completed
1605	 * and been re-used, though this is unlikely.
1606	 */
1607	mpr_intr_locked(sc);
1608	if (cm->cm_state == MPR_CM_STATE_FREE) {
1609		mprsas_log_command(cm, MPR_XINFO,
1610		    "SCSI command %p almost timed out\n", cm);
1611		return;
1612	}
1613
1614	if (cm->cm_ccb == NULL) {
1615		mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n");
1616		return;
1617	}
1618
1619	targ = cm->cm_targ;
1620	targ->timeouts++;
1621
1622	mprsas_log_command(cm, MPR_XINFO, "command timeout cm %p ccb %p "
1623	    "target %u, handle(0x%04x)\n", cm, cm->cm_ccb, targ->tid,
1624	    targ->handle);
1625	if (targ->encl_level_valid) {
1626		mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
1627		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
1628		    targ->connector_name);
1629	}
1630
1631	/* XXX first, check the firmware state, to see if it's still
1632	 * operational.  if not, do a diag reset.
1633	 */
1634
1635	cm->cm_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1636	cm->cm_state = MPR_CM_STATE_TIMEDOUT;
1637	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1638
1639	if (targ->tm != NULL) {
1640		/* target already in recovery, just queue up another
1641		 * timedout command to be processed later.
1642		 */
1643		mpr_dprint(sc, MPR_RECOVERY, "queued timedout cm %p for "
1644		    "processing by tm %p\n", cm, targ->tm);
1645	}
1646	else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) {
1647		mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n",
1648		    cm, targ->tm);
1649
1650		/* start recovery by aborting the first timedout command */
1651		mprsas_send_abort(sc, targ->tm, cm);
1652	}
1653	else {
1654		/* XXX queue this target up for recovery once a TM becomes
1655		 * available.  The firmware only has a limited number of
1656		 * HighPriority credits for the high priority requests used
1657		 * for task management, and we ran out.
1658		 *
1659		 * Isilon: don't worry about this for now, since we have
1660		 * more credits than disks in an enclosure, and limit
1661		 * ourselves to one TM per target for recovery.
1662		 */
1663		mpr_dprint(sc, MPR_RECOVERY,
1664		    "timedout cm %p failed to allocate a tm\n", cm);
1665	}
1666}
1667
1668static void
1669mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb)
1670{
1671	MPI2_SCSI_IO_REQUEST *req;
1672	struct ccb_scsiio *csio;
1673	struct mpr_softc *sc;
1674	struct mprsas_target *targ;
1675	struct mprsas_lun *lun;
1676	struct mpr_command *cm;
1677	uint8_t i, lba_byte, *ref_tag_addr;
1678	uint16_t eedp_flags;
1679	uint32_t mpi_control;
1680
1681	sc = sassc->sc;
1682	MPR_FUNCTRACE(sc);
1683	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1684
1685	csio = &ccb->csio;
1686	targ = &sassc->targets[csio->ccb_h.target_id];
1687	mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1688	if (targ->handle == 0x0) {
1689		mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n",
1690		    __func__, csio->ccb_h.target_id);
1691		csio->ccb_h.status = CAM_DEV_NOT_THERE;
1692		xpt_done(ccb);
1693		return;
1694	}
1695	if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) {
1696		mpr_dprint(sc, MPR_TRACE, "%s Raid component no SCSI IO "
1697		    "supported %u\n", __func__, csio->ccb_h.target_id);
1698		csio->ccb_h.status = CAM_DEV_NOT_THERE;
1699		xpt_done(ccb);
1700		return;
1701	}
1702	/*
1703	 * Sometimes, it is possible to get a command that is not "In
1704	 * Progress" and was actually aborted by the upper layer.  Check for
1705	 * this here and complete the command without error.
1706	 */
1707	if (ccb->ccb_h.status != CAM_REQ_INPROG) {
1708		mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for "
1709		    "target %u\n", __func__, csio->ccb_h.target_id);
1710		xpt_done(ccb);
1711		return;
1712	}
1713	/*
1714	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1715	 * that the volume has timed out.  We want volumes to be enumerated
1716	 * until they are deleted/removed, not just failed.
1717	 */
1718	if (targ->flags & MPRSAS_TARGET_INREMOVAL) {
1719		if (targ->devinfo == 0)
1720			csio->ccb_h.status = CAM_REQ_CMP;
1721		else
1722			csio->ccb_h.status = CAM_SEL_TIMEOUT;
1723		xpt_done(ccb);
1724		return;
1725	}
1726
1727	if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) {
1728		mpr_dprint(sc, MPR_TRACE, "%s shutting down\n", __func__);
1729		csio->ccb_h.status = CAM_DEV_NOT_THERE;
1730		xpt_done(ccb);
1731		return;
1732	}
1733
1734	cm = mpr_alloc_command(sc);
1735	if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) {
1736		if (cm != NULL) {
1737			mpr_free_command(sc, cm);
1738		}
1739		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
1740			xpt_freeze_simq(sassc->sim, 1);
1741			sassc->flags |= MPRSAS_QUEUE_FROZEN;
1742		}
1743		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1744		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1745		xpt_done(ccb);
1746		return;
1747	}
1748
1749	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1750	bzero(req, sizeof(*req));
1751	req->DevHandle = htole16(targ->handle);
1752	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1753	req->MsgFlags = 0;
1754	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1755	req->SenseBufferLength = MPR_SENSE_LEN;
1756	req->SGLFlags = 0;
1757	req->ChainOffset = 0;
1758	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1759	req->SGLOffset1= 0;
1760	req->SGLOffset2= 0;
1761	req->SGLOffset3= 0;
1762	req->SkipCount = 0;
1763	req->DataLength = htole32(csio->dxfer_len);
1764	req->BidirectionalDataLength = 0;
1765	req->IoFlags = htole16(csio->cdb_len);
1766	req->EEDPFlags = 0;
1767
1768	/* Note: BiDirectional transfers are not supported */
1769	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1770	case CAM_DIR_IN:
1771		mpi_control = MPI2_SCSIIO_CONTROL_READ;
1772		cm->cm_flags |= MPR_CM_FLAGS_DATAIN;
1773		break;
1774	case CAM_DIR_OUT:
1775		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1776		cm->cm_flags |= MPR_CM_FLAGS_DATAOUT;
1777		break;
1778	case CAM_DIR_NONE:
1779	default:
1780		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1781		break;
1782	}
1783
1784	if (csio->cdb_len == 32)
1785		mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1786	/*
1787	 * It looks like the hardware doesn't require an explicit tag
1788	 * number for each transaction.  SAM Task Management not supported
1789	 * at the moment.
1790	 */
1791	switch (csio->tag_action) {
1792	case MSG_HEAD_OF_Q_TAG:
1793		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1794		break;
1795	case MSG_ORDERED_Q_TAG:
1796		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1797		break;
1798	case MSG_ACA_TASK:
1799		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1800		break;
1801	case CAM_TAG_ACTION_NONE:
1802	case MSG_SIMPLE_Q_TAG:
1803	default:
1804		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1805		break;
1806	}
1807	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1808	req->Control = htole32(mpi_control);
1809
1810	if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1811		mpr_free_command(sc, cm);
1812		ccb->ccb_h.status = CAM_LUN_INVALID;
1813		xpt_done(ccb);
1814		return;
1815	}
1816
1817	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1818		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1819	else
1820		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1821	req->IoFlags = htole16(csio->cdb_len);
1822
1823	/*
1824	 * Check if EEDP is supported and enabled.  If it is then check if the
1825	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
1826	 * is formatted for EEDP support.  If all of this is true, set CDB up
1827	 * for EEDP transfer.
1828	 */
1829	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1830	if (sc->eedp_enabled && eedp_flags) {
1831		SLIST_FOREACH(lun, &targ->luns, lun_link) {
1832			if (lun->lun_id == csio->ccb_h.target_lun) {
1833				break;
1834			}
1835		}
1836
1837		if ((lun != NULL) && (lun->eedp_formatted)) {
1838			req->EEDPBlockSize = htole16(lun->eedp_block_size);
1839			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1840			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1841			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1842			req->EEDPFlags = htole16(eedp_flags);
1843
1844			/*
1845			 * If CDB less than 32, fill in Primary Ref Tag with
1846			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
1847			 * already there.  Also, set protection bit.  FreeBSD
1848			 * currently does not support CDBs bigger than 16, but
1849			 * the code doesn't hurt, and will be here for the
1850			 * future.
1851			 */
1852			if (csio->cdb_len != 32) {
1853				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1854				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1855				    PrimaryReferenceTag;
1856				for (i = 0; i < 4; i++) {
1857					*ref_tag_addr =
1858					    req->CDB.CDB32[lba_byte + i];
1859					ref_tag_addr++;
1860				}
1861				req->CDB.EEDP32.PrimaryReferenceTag =
1862				    htole32(req->
1863				    CDB.EEDP32.PrimaryReferenceTag);
1864				req->CDB.EEDP32.PrimaryApplicationTagMask =
1865				    0xFFFF;
1866				req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1867				    0x20;
1868			} else {
1869				eedp_flags |=
1870				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1871				req->EEDPFlags = htole16(eedp_flags);
1872				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1873				    0x1F) | 0x20;
1874			}
1875		}
1876	}
1877
1878	cm->cm_length = csio->dxfer_len;
1879	if (cm->cm_length != 0) {
1880		cm->cm_data = ccb;
1881		cm->cm_flags |= MPR_CM_FLAGS_USE_CCB;
1882	} else {
1883		cm->cm_data = NULL;
1884	}
1885	cm->cm_sge = &req->SGL;
1886	cm->cm_sglsize = (32 - 24) * 4;
1887	cm->cm_complete = mprsas_scsiio_complete;
1888	cm->cm_complete_data = ccb;
1889	cm->cm_targ = targ;
1890	cm->cm_lun = csio->ccb_h.target_lun;
1891	cm->cm_ccb = ccb;
1892	/*
1893	 * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0)
1894	 * and set descriptor type.
1895	 */
1896	if (targ->scsi_req_desc_type ==
1897	    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) {
1898		req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
1899		cm->cm_desc.FastPathSCSIIO.RequestFlags =
1900		    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
1901		cm->cm_desc.FastPathSCSIIO.DevHandle = htole16(targ->handle);
1902	} else {
1903		cm->cm_desc.SCSIIO.RequestFlags =
1904		    MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1905		cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1906	}
1907
1908	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1909	   mprsas_scsiio_timeout, cm, 0);
1910
1911	targ->issued++;
1912	targ->outstanding++;
1913	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1914	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1915
1916	mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
1917	    __func__, cm, ccb, targ->outstanding);
1918
1919	mpr_map_command(sc, cm);
1920	return;
1921}
1922
1923static void
1924mpr_response_code(struct mpr_softc *sc, u8 response_code)
1925{
1926        char *desc;
1927
1928        switch (response_code) {
1929        case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
1930                desc = "task management request completed";
1931                break;
1932        case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
1933                desc = "invalid frame";
1934                break;
1935        case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
1936                desc = "task management request not supported";
1937                break;
1938        case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
1939                desc = "task management request failed";
1940                break;
1941        case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
1942                desc = "task management request succeeded";
1943                break;
1944        case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
1945                desc = "invalid lun";
1946                break;
1947        case 0xA:
1948                desc = "overlapped tag attempted";
1949                break;
1950        case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
1951                desc = "task queued, however not sent to target";
1952                break;
1953        default:
1954                desc = "unknown";
1955                break;
1956        }
1957	mpr_dprint(sc, MPR_XINFO, "response_code(0x%01x): %s\n", response_code,
1958	    desc);
1959}
1960
1961/**
1962 * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request
1963 */
1964static void
1965mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio,
1966    Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ)
1967{
1968	u32 response_info;
1969	u8 *response_bytes;
1970	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1971	    MPI2_IOCSTATUS_MASK;
1972	u8 scsi_state = mpi_reply->SCSIState;
1973	u8 scsi_status = mpi_reply->SCSIStatus;
1974	char *desc_ioc_state = NULL;
1975	char *desc_scsi_status = NULL;
1976	char *desc_scsi_state = sc->tmp_string;
1977	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1978
1979	if (log_info == 0x31170000)
1980		return;
1981
1982	switch (ioc_status) {
1983	case MPI2_IOCSTATUS_SUCCESS:
1984		desc_ioc_state = "success";
1985		break;
1986	case MPI2_IOCSTATUS_INVALID_FUNCTION:
1987		desc_ioc_state = "invalid function";
1988		break;
1989	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1990		desc_ioc_state = "scsi recovered error";
1991		break;
1992	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1993		desc_ioc_state = "scsi invalid dev handle";
1994		break;
1995	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1996		desc_ioc_state = "scsi device not there";
1997		break;
1998	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1999		desc_ioc_state = "scsi data overrun";
2000		break;
2001	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2002		desc_ioc_state = "scsi data underrun";
2003		break;
2004	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2005		desc_ioc_state = "scsi io data error";
2006		break;
2007	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2008		desc_ioc_state = "scsi protocol error";
2009		break;
2010	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2011		desc_ioc_state = "scsi task terminated";
2012		break;
2013	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2014		desc_ioc_state = "scsi residual mismatch";
2015		break;
2016	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2017		desc_ioc_state = "scsi task mgmt failed";
2018		break;
2019	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2020		desc_ioc_state = "scsi ioc terminated";
2021		break;
2022	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2023		desc_ioc_state = "scsi ext terminated";
2024		break;
2025	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
2026		desc_ioc_state = "eedp guard error";
2027		break;
2028	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
2029		desc_ioc_state = "eedp ref tag error";
2030		break;
2031	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
2032		desc_ioc_state = "eedp app tag error";
2033		break;
2034	default:
2035		desc_ioc_state = "unknown";
2036		break;
2037	}
2038
2039	switch (scsi_status) {
2040	case MPI2_SCSI_STATUS_GOOD:
2041		desc_scsi_status = "good";
2042		break;
2043	case MPI2_SCSI_STATUS_CHECK_CONDITION:
2044		desc_scsi_status = "check condition";
2045		break;
2046	case MPI2_SCSI_STATUS_CONDITION_MET:
2047		desc_scsi_status = "condition met";
2048		break;
2049	case MPI2_SCSI_STATUS_BUSY:
2050		desc_scsi_status = "busy";
2051		break;
2052	case MPI2_SCSI_STATUS_INTERMEDIATE:
2053		desc_scsi_status = "intermediate";
2054		break;
2055	case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
2056		desc_scsi_status = "intermediate condmet";
2057		break;
2058	case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
2059		desc_scsi_status = "reservation conflict";
2060		break;
2061	case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
2062		desc_scsi_status = "command terminated";
2063		break;
2064	case MPI2_SCSI_STATUS_TASK_SET_FULL:
2065		desc_scsi_status = "task set full";
2066		break;
2067	case MPI2_SCSI_STATUS_ACA_ACTIVE:
2068		desc_scsi_status = "aca active";
2069		break;
2070	case MPI2_SCSI_STATUS_TASK_ABORTED:
2071		desc_scsi_status = "task aborted";
2072		break;
2073	default:
2074		desc_scsi_status = "unknown";
2075		break;
2076	}
2077
2078	desc_scsi_state[0] = '\0';
2079	if (!scsi_state)
2080		desc_scsi_state = " ";
2081	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
2082		strcat(desc_scsi_state, "response info ");
2083	if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
2084		strcat(desc_scsi_state, "state terminated ");
2085	if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
2086		strcat(desc_scsi_state, "no status ");
2087	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
2088		strcat(desc_scsi_state, "autosense failed ");
2089	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
2090		strcat(desc_scsi_state, "autosense valid ");
2091
2092	mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2093	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2094	if (targ->encl_level_valid) {
2095		mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
2096		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
2097		    targ->connector_name);
2098	}
2099	/* We can add more detail about underflow data here
2100	 * TO-DO
2101	 * */
2102	mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), "
2103	    "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status,
2104	    desc_scsi_state, scsi_state);
2105
2106	if (sc->mpr_debug & MPR_XINFO &&
2107		scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2108		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n");
2109		scsi_sense_print(csio);
2110		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n");
2111	}
2112
2113	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2114		response_info = le32toh(mpi_reply->ResponseInfo);
2115		response_bytes = (u8 *)&response_info;
2116		mpr_response_code(sc,response_bytes[0]);
2117	}
2118}
2119
2120static void
2121mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2122{
2123	MPI2_SCSI_IO_REPLY *rep;
2124	union ccb *ccb;
2125	struct ccb_scsiio *csio;
2126	struct mprsas_softc *sassc;
2127	struct scsi_vpd_supported_page_list *vpd_list = NULL;
2128	u8 *TLR_bits, TLR_on;
2129	int dir = 0, i;
2130	u16 alloc_len;
2131
2132	MPR_FUNCTRACE(sc);
2133	mpr_dprint(sc, MPR_TRACE,
2134	    "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2135	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2136	    cm->cm_targ->outstanding);
2137
2138	callout_stop(&cm->cm_callout);
2139	mtx_assert(&sc->mpr_mtx, MA_OWNED);
2140
2141	sassc = sc->sassc;
2142	ccb = cm->cm_complete_data;
2143	csio = &ccb->csio;
2144	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2145	/*
2146	 * XXX KDM if the chain allocation fails, does it matter if we do
2147	 * the sync and unload here?  It is simpler to do it in every case,
2148	 * assuming it doesn't cause problems.
2149	 */
2150	if (cm->cm_data != NULL) {
2151		if (cm->cm_flags & MPR_CM_FLAGS_DATAIN)
2152			dir = BUS_DMASYNC_POSTREAD;
2153		else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT)
2154			dir = BUS_DMASYNC_POSTWRITE;
2155		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2156		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2157	}
2158
2159	cm->cm_targ->completed++;
2160	cm->cm_targ->outstanding--;
2161	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2162	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2163
2164	if (cm->cm_state == MPR_CM_STATE_TIMEDOUT) {
2165		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2166		if (cm->cm_reply != NULL)
2167			mprsas_log_command(cm, MPR_RECOVERY,
2168			    "completed timedout cm %p ccb %p during recovery "
2169			    "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb,
2170			    le16toh(rep->IOCStatus), rep->SCSIStatus,
2171			    rep->SCSIState, le32toh(rep->TransferCount));
2172		else
2173			mprsas_log_command(cm, MPR_RECOVERY,
2174			    "completed timedout cm %p ccb %p during recovery\n",
2175			    cm, cm->cm_ccb);
2176	} else if (cm->cm_targ->tm != NULL) {
2177		if (cm->cm_reply != NULL)
2178			mprsas_log_command(cm, MPR_RECOVERY,
2179			    "completed cm %p ccb %p during recovery "
2180			    "ioc %x scsi %x state %x xfer %u\n",
2181			    cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2182			    rep->SCSIStatus, rep->SCSIState,
2183			    le32toh(rep->TransferCount));
2184		else
2185			mprsas_log_command(cm, MPR_RECOVERY,
2186			    "completed cm %p ccb %p during recovery\n",
2187			    cm, cm->cm_ccb);
2188	} else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
2189		mprsas_log_command(cm, MPR_RECOVERY,
2190		    "reset completed cm %p ccb %p\n", cm, cm->cm_ccb);
2191	}
2192
2193	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2194		/*
2195		 * We ran into an error after we tried to map the command,
2196		 * so we're getting a callback without queueing the command
2197		 * to the hardware.  So we set the status here, and it will
2198		 * be retained below.  We'll go through the "fast path",
2199		 * because there can be no reply when we haven't actually
2200		 * gone out to the hardware.
2201		 */
2202		ccb->ccb_h.status = CAM_REQUEUE_REQ;
2203
2204		/*
2205		 * Currently the only error included in the mask is
2206		 * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of
2207		 * chain frames.  We need to freeze the queue until we get
2208		 * a command that completed without this error, which will
2209		 * hopefully have some chain frames attached that we can
2210		 * use.  If we wanted to get smarter about it, we would
2211		 * only unfreeze the queue in this condition when we're
2212		 * sure that we're getting some chain frames back.  That's
2213		 * probably unnecessary.
2214		 */
2215		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2216			xpt_freeze_simq(sassc->sim, 1);
2217			sassc->flags |= MPRSAS_QUEUE_FROZEN;
2218			mpr_dprint(sc, MPR_INFO, "Error sending command, "
2219				   "freezing SIM queue\n");
2220		}
2221	}
2222
2223	/*
2224	 * If this is a Start Stop Unit command and it was issued by the driver
2225	 * during shutdown, decrement the refcount to account for all of the
2226	 * commands that were sent.  All SSU commands should be completed before
2227	 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2228	 * is TRUE.
2229	 */
2230	if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) {
2231		mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n");
2232		sc->SSU_refcount--;
2233	}
2234
2235	/* Take the fast path to completion */
2236	if (cm->cm_reply == NULL) {
2237		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
2238			if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0)
2239				ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
2240			else {
2241				ccb->ccb_h.status = CAM_REQ_CMP;
2242				ccb->csio.scsi_status = SCSI_STATUS_OK;
2243			}
2244			if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2245				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2246				sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2247				mpr_dprint(sc, MPR_XINFO,
2248				    "Unfreezing SIM queue\n");
2249			}
2250		}
2251
2252		/*
2253		 * There are two scenarios where the status won't be
2254		 * CAM_REQ_CMP.  The first is if MPR_CM_FLAGS_ERROR_MASK is
2255		 * set, the second is in the MPR_FLAGS_DIAGRESET above.
2256		 */
2257		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2258			/*
2259			 * Freeze the dev queue so that commands are
2260			 * executed in the correct order with after error
2261			 * recovery.
2262			 */
2263			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2264			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2265		}
2266		mpr_free_command(sc, cm);
2267		xpt_done(ccb);
2268		return;
2269	}
2270
2271	mprsas_log_command(cm, MPR_XINFO,
2272	    "ioc %x scsi %x state %x xfer %u\n",
2273	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2274	    le32toh(rep->TransferCount));
2275
2276	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2277	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2278		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2279		/* FALLTHROUGH */
2280	case MPI2_IOCSTATUS_SUCCESS:
2281	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2282
2283		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2284		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2285			mprsas_log_command(cm, MPR_XINFO, "recovered error\n");
2286
2287		/* Completion failed at the transport level. */
2288		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2289		    MPI2_SCSI_STATE_TERMINATED)) {
2290			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2291			break;
2292		}
2293
2294		/* In a modern packetized environment, an autosense failure
2295		 * implies that there's not much else that can be done to
2296		 * recover the command.
2297		 */
2298		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2299			ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
2300			break;
2301		}
2302
2303		/*
2304		 * CAM doesn't care about SAS Response Info data, but if this is
2305		 * the state check if TLR should be done.  If not, clear the
2306		 * TLR_bits for the target.
2307		 */
2308		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2309		    ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE)
2310		    == MPR_SCSI_RI_INVALID_FRAME)) {
2311			sc->mapping_table[csio->ccb_h.target_id].TLR_bits =
2312			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2313		}
2314
2315		/*
2316		 * Intentionally override the normal SCSI status reporting
2317		 * for these two cases.  These are likely to happen in a
2318		 * multi-initiator environment, and we want to make sure that
2319		 * CAM retries these commands rather than fail them.
2320		 */
2321		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2322		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2323			ccb->ccb_h.status = CAM_REQ_ABORTED;
2324			break;
2325		}
2326
2327		/* Handle normal status and sense */
2328		csio->scsi_status = rep->SCSIStatus;
2329		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2330			ccb->ccb_h.status = CAM_REQ_CMP;
2331		else
2332			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
2333
2334		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2335			int sense_len, returned_sense_len;
2336
2337			returned_sense_len = min(le32toh(rep->SenseCount),
2338			    sizeof(struct scsi_sense_data));
2339			if (returned_sense_len < csio->sense_len)
2340				csio->sense_resid = csio->sense_len -
2341				    returned_sense_len;
2342			else
2343				csio->sense_resid = 0;
2344
2345			sense_len = min(returned_sense_len,
2346			    csio->sense_len - csio->sense_resid);
2347			bzero(&csio->sense_data, sizeof(csio->sense_data));
2348			bcopy(cm->cm_sense, &csio->sense_data, sense_len);
2349			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2350		}
2351
2352		/*
2353		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2354		 * and it's page code 0 (Supported Page List), and there is
2355		 * inquiry data, and this is for a sequential access device, and
2356		 * the device is an SSP target, and TLR is supported by the
2357		 * controller, turn the TLR_bits value ON if page 0x90 is
2358		 * supported.
2359		 */
2360		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2361		    (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2362		    (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2363		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2364		    (csio->data_ptr != NULL) &&
2365		    ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2366		    (sc->control_TLR) &&
2367		    (sc->mapping_table[csio->ccb_h.target_id].device_info &
2368		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2369			vpd_list = (struct scsi_vpd_supported_page_list *)
2370			    csio->data_ptr;
2371			TLR_bits = &sc->mapping_table[csio->ccb_h.target_id].
2372			    TLR_bits;
2373			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2374			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2375			alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2376			    csio->cdb_io.cdb_bytes[4];
2377			alloc_len -= csio->resid;
2378			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2379				if (vpd_list->list[i] == 0x90) {
2380					*TLR_bits = TLR_on;
2381					break;
2382				}
2383			}
2384		}
2385		break;
2386	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2387	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2388		/*
2389		 * If devinfo is 0 this will be a volume.  In that case don't
2390		 * tell CAM that the volume is not there.  We want volumes to
2391		 * be enumerated until they are deleted/removed, not just
2392		 * failed.
2393		 */
2394		if (cm->cm_targ->devinfo == 0)
2395			ccb->ccb_h.status = CAM_REQ_CMP;
2396		else
2397			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2398		break;
2399	case MPI2_IOCSTATUS_INVALID_SGL:
2400		mpr_print_scsiio_cmd(sc, cm);
2401		ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
2402		break;
2403	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2404		/*
2405		 * This is one of the responses that comes back when an I/O
2406		 * has been aborted.  If it is because of a timeout that we
2407		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2408		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2409		 * command is the same (it gets retried, subject to the
2410		 * retry counter), the only difference is what gets printed
2411		 * on the console.
2412		 */
2413		if (cm->cm_state == MPR_CM_STATE_TIMEDOUT)
2414			ccb->ccb_h.status = CAM_CMD_TIMEOUT;
2415		else
2416			ccb->ccb_h.status = CAM_REQ_ABORTED;
2417		break;
2418	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2419		/* resid is ignored for this condition */
2420		csio->resid = 0;
2421		ccb->ccb_h.status = CAM_DATA_RUN_ERR;
2422		break;
2423	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2424	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2425		/*
2426		 * Since these are generally external (i.e. hopefully
2427		 * transient transport-related) errors, retry these without
2428		 * decrementing the retry count.
2429		 */
2430		ccb->ccb_h.status = CAM_REQUEUE_REQ;
2431		mprsas_log_command(cm, MPR_INFO,
2432		    "terminated ioc %x scsi %x state %x xfer %u\n",
2433		    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2434		    le32toh(rep->TransferCount));
2435		break;
2436	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2437	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2438	case MPI2_IOCSTATUS_INVALID_VPID:
2439	case MPI2_IOCSTATUS_INVALID_FIELD:
2440	case MPI2_IOCSTATUS_INVALID_STATE:
2441	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2442	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2443	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2444	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2445	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2446	default:
2447		mprsas_log_command(cm, MPR_XINFO,
2448		    "completed ioc %x scsi %x state %x xfer %u\n",
2449		    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2450		    le32toh(rep->TransferCount));
2451		csio->resid = cm->cm_length;
2452		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2453		break;
2454	}
2455
2456	mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ);
2457
2458	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2459		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2460		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2461		mpr_dprint(sc, MPR_XINFO, "Command completed, unfreezing SIM "
2462		    "queue\n");
2463	}
2464
2465	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2466		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2467		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2468	}
2469
2470	mpr_free_command(sc, cm);
2471	xpt_done(ccb);
2472}
2473
2474#if __FreeBSD_version >= 900026
2475static void
2476mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2477{
2478	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2479	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2480	uint64_t sasaddr;
2481	union ccb *ccb;
2482
2483	ccb = cm->cm_complete_data;
2484
2485	/*
2486	 * Currently there should be no way we can hit this case.  It only
2487	 * happens when we have a failure to allocate chain frames, and SMP
2488	 * commands require two S/G elements only.  That should be handled
2489	 * in the standard request size.
2490	 */
2491	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2492		mpr_dprint(sc, MPR_ERROR,"%s: cm_flags = %#x on SMP request!\n",
2493		    __func__, cm->cm_flags);
2494		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2495		goto bailout;
2496        }
2497
2498	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2499	if (rpl == NULL) {
2500		mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__);
2501		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2502		goto bailout;
2503	}
2504
2505	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2506	sasaddr = le32toh(req->SASAddress.Low);
2507	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2508
2509	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2510	    MPI2_IOCSTATUS_SUCCESS ||
2511	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2512		mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2513		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2514		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2515		goto bailout;
2516	}
2517
2518	mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address "
2519	    "%#jx completed successfully\n", __func__, (uintmax_t)sasaddr);
2520
2521	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2522		ccb->ccb_h.status = CAM_REQ_CMP;
2523	else
2524		ccb->ccb_h.status = CAM_SMP_STATUS_ERROR;
2525
2526bailout:
2527	/*
2528	 * We sync in both directions because we had DMAs in the S/G list
2529	 * in both directions.
2530	 */
2531	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2532			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2533	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2534	mpr_free_command(sc, cm);
2535	xpt_done(ccb);
2536}
2537
2538static void
2539mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb,
2540    uint64_t sasaddr)
2541{
2542	struct mpr_command *cm;
2543	uint8_t *request, *response;
2544	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2545	struct mpr_softc *sc;
2546	struct sglist *sg;
2547	int error;
2548
2549	sc = sassc->sc;
2550	sg = NULL;
2551	error = 0;
2552
2553#if (__FreeBSD_version >= 1000028) || \
2554    ((__FreeBSD_version >= 902001) && (__FreeBSD_version < 1000000))
2555	switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
2556	case CAM_DATA_PADDR:
2557	case CAM_DATA_SG_PADDR:
2558		/*
2559		 * XXX We don't yet support physical addresses here.
2560		 */
2561		mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
2562		    "supported\n", __func__);
2563		ccb->ccb_h.status = CAM_REQ_INVALID;
2564		xpt_done(ccb);
2565		return;
2566	case CAM_DATA_SG:
2567		/*
2568		 * The chip does not support more than one buffer for the
2569		 * request or response.
2570		 */
2571		if ((ccb->smpio.smp_request_sglist_cnt > 1)
2572		    || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2573			mpr_dprint(sc, MPR_ERROR,
2574			    "%s: multiple request or response buffer segments "
2575			    "not supported for SMP\n", __func__);
2576			ccb->ccb_h.status = CAM_REQ_INVALID;
2577			xpt_done(ccb);
2578			return;
2579		}
2580
2581		/*
2582		 * The CAM_SCATTER_VALID flag was originally implemented
2583		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2584		 * We have two.  So, just take that flag to mean that we
2585		 * might have S/G lists, and look at the S/G segment count
2586		 * to figure out whether that is the case for each individual
2587		 * buffer.
2588		 */
2589		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2590			bus_dma_segment_t *req_sg;
2591
2592			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2593			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2594		} else
2595			request = ccb->smpio.smp_request;
2596
2597		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2598			bus_dma_segment_t *rsp_sg;
2599
2600			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2601			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2602		} else
2603			response = ccb->smpio.smp_response;
2604		break;
2605	case CAM_DATA_VADDR:
2606		request = ccb->smpio.smp_request;
2607		response = ccb->smpio.smp_response;
2608		break;
2609	default:
2610		ccb->ccb_h.status = CAM_REQ_INVALID;
2611		xpt_done(ccb);
2612		return;
2613	}
2614#else /* __FreeBSD_version < 1000028 */
2615	/*
2616	 * XXX We don't yet support physical addresses here.
2617	 */
2618	if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
2619		mpr_printf(sc, "%s: physical addresses not supported\n",
2620			   __func__);
2621		ccb->ccb_h.status = CAM_REQ_INVALID;
2622		xpt_done(ccb);
2623		return;
2624	}
2625
2626	/*
2627	 * If the user wants to send an S/G list, check to make sure they
2628	 * have single buffers.
2629	 */
2630	if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
2631		/*
2632		 * The chip does not support more than one buffer for the
2633		 * request or response.
2634		 */
2635	 	if ((ccb->smpio.smp_request_sglist_cnt > 1)
2636		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2637			mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
2638			    "response buffer segments not supported for SMP\n",
2639			    __func__);
2640			ccb->ccb_h.status = CAM_REQ_INVALID;
2641			xpt_done(ccb);
2642			return;
2643		}
2644
2645		/*
2646		 * The CAM_SCATTER_VALID flag was originally implemented
2647		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2648		 * We have two.  So, just take that flag to mean that we
2649		 * might have S/G lists, and look at the S/G segment count
2650		 * to figure out whether that is the case for each individual
2651		 * buffer.
2652		 */
2653		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2654			bus_dma_segment_t *req_sg;
2655
2656			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2657			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2658		} else
2659			request = ccb->smpio.smp_request;
2660
2661		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2662			bus_dma_segment_t *rsp_sg;
2663
2664			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2665			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2666		} else
2667			response = ccb->smpio.smp_response;
2668	} else {
2669		request = ccb->smpio.smp_request;
2670		response = ccb->smpio.smp_response;
2671	}
2672#endif /* __FreeBSD_version < 1000028 */
2673
2674	cm = mpr_alloc_command(sc);
2675	if (cm == NULL) {
2676		mpr_dprint(sc, MPR_ERROR,
2677		    "%s: cannot allocate command\n", __func__);
2678		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2679		xpt_done(ccb);
2680		return;
2681	}
2682
2683	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2684	bzero(req, sizeof(*req));
2685	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2686
2687	/* Allow the chip to use any route to this SAS address. */
2688	req->PhysicalPort = 0xff;
2689
2690	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2691	req->SGLFlags =
2692	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2693
2694	mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address "
2695	    "%#jx\n", __func__, (uintmax_t)sasaddr);
2696
2697	mpr_init_sge(cm, req, &req->SGL);
2698
2699	/*
2700	 * Set up a uio to pass into mpr_map_command().  This allows us to
2701	 * do one map command, and one busdma call in there.
2702	 */
2703	cm->cm_uio.uio_iov = cm->cm_iovec;
2704	cm->cm_uio.uio_iovcnt = 2;
2705	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2706
2707	/*
2708	 * The read/write flag isn't used by busdma, but set it just in
2709	 * case.  This isn't exactly accurate, either, since we're going in
2710	 * both directions.
2711	 */
2712	cm->cm_uio.uio_rw = UIO_WRITE;
2713
2714	cm->cm_iovec[0].iov_base = request;
2715	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2716	cm->cm_iovec[1].iov_base = response;
2717	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2718
2719	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2720			       cm->cm_iovec[1].iov_len;
2721
2722	/*
2723	 * Trigger a warning message in mpr_data_cb() for the user if we
2724	 * wind up exceeding two S/G segments.  The chip expects one
2725	 * segment for the request and another for the response.
2726	 */
2727	cm->cm_max_segs = 2;
2728
2729	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2730	cm->cm_complete = mprsas_smpio_complete;
2731	cm->cm_complete_data = ccb;
2732
2733	/*
2734	 * Tell the mapping code that we're using a uio, and that this is
2735	 * an SMP passthrough request.  There is a little special-case
2736	 * logic there (in mpr_data_cb()) to handle the bidirectional
2737	 * transfer.
2738	 */
2739	cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS |
2740			MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT;
2741
2742	/* The chip data format is little endian. */
2743	req->SASAddress.High = htole32(sasaddr >> 32);
2744	req->SASAddress.Low = htole32(sasaddr);
2745
2746	/*
2747	 * XXX Note that we don't have a timeout/abort mechanism here.
2748	 * From the manual, it looks like task management requests only
2749	 * work for SCSI IO and SATA passthrough requests.  We may need to
2750	 * have a mechanism to retry requests in the event of a chip reset
2751	 * at least.  Hopefully the chip will insure that any errors short
2752	 * of that are relayed back to the driver.
2753	 */
2754	error = mpr_map_command(sc, cm);
2755	if ((error != 0) && (error != EINPROGRESS)) {
2756		mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from "
2757		    "mpr_map_command()\n", __func__, error);
2758		goto bailout_error;
2759	}
2760
2761	return;
2762
2763bailout_error:
2764	mpr_free_command(sc, cm);
2765	ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2766	xpt_done(ccb);
2767	return;
2768}
2769
2770static void
2771mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb)
2772{
2773	struct mpr_softc *sc;
2774	struct mprsas_target *targ;
2775	uint64_t sasaddr = 0;
2776
2777	sc = sassc->sc;
2778
2779	/*
2780	 * Make sure the target exists.
2781	 */
2782	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2783	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
2784	targ = &sassc->targets[ccb->ccb_h.target_id];
2785	if (targ->handle == 0x0) {
2786		mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n",
2787		    __func__, ccb->ccb_h.target_id);
2788		ccb->ccb_h.status = CAM_SEL_TIMEOUT;
2789		xpt_done(ccb);
2790		return;
2791	}
2792
2793	/*
2794	 * If this device has an embedded SMP target, we'll talk to it
2795	 * directly.
2796	 * figure out what the expander's address is.
2797	 */
2798	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2799		sasaddr = targ->sasaddr;
2800
2801	/*
2802	 * If we don't have a SAS address for the expander yet, try
2803	 * grabbing it from the page 0x83 information cached in the
2804	 * transport layer for this target.  LSI expanders report the
2805	 * expander SAS address as the port-associated SAS address in
2806	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
2807	 * 0x83.
2808	 *
2809	 * XXX KDM disable this for now, but leave it commented out so that
2810	 * it is obvious that this is another possible way to get the SAS
2811	 * address.
2812	 *
2813	 * The parent handle method below is a little more reliable, and
2814	 * the other benefit is that it works for devices other than SES
2815	 * devices.  So you can send a SMP request to a da(4) device and it
2816	 * will get routed to the expander that device is attached to.
2817	 * (Assuming the da(4) device doesn't contain an SMP target...)
2818	 */
2819#if 0
2820	if (sasaddr == 0)
2821		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2822#endif
2823
2824	/*
2825	 * If we still don't have a SAS address for the expander, look for
2826	 * the parent device of this device, which is probably the expander.
2827	 */
2828	if (sasaddr == 0) {
2829#ifdef OLD_MPR_PROBE
2830		struct mprsas_target *parent_target;
2831#endif
2832
2833		if (targ->parent_handle == 0x0) {
2834			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
2835			    "a valid parent handle!\n", __func__, targ->handle);
2836			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2837			goto bailout;
2838		}
2839#ifdef OLD_MPR_PROBE
2840		parent_target = mprsas_find_target_by_handle(sassc, 0,
2841		    targ->parent_handle);
2842
2843		if (parent_target == NULL) {
2844			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
2845			    "a valid parent target!\n", __func__, targ->handle);
2846			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2847			goto bailout;
2848		}
2849
2850		if ((parent_target->devinfo &
2851		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2852			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
2853			    "does not have an SMP target!\n", __func__,
2854			    targ->handle, parent_target->handle);
2855			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2856			goto bailout;
2857
2858		}
2859
2860		sasaddr = parent_target->sasaddr;
2861#else /* OLD_MPR_PROBE */
2862		if ((targ->parent_devinfo &
2863		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2864			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
2865			    "does not have an SMP target!\n", __func__,
2866			    targ->handle, targ->parent_handle);
2867			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2868			goto bailout;
2869
2870		}
2871		if (targ->parent_sasaddr == 0x0) {
2872			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle "
2873			    "%d does not have a valid SAS address!\n", __func__,
2874			    targ->handle, targ->parent_handle);
2875			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2876			goto bailout;
2877		}
2878
2879		sasaddr = targ->parent_sasaddr;
2880#endif /* OLD_MPR_PROBE */
2881
2882	}
2883
2884	if (sasaddr == 0) {
2885		mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for "
2886		    "handle %d\n", __func__, targ->handle);
2887		ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2888		goto bailout;
2889	}
2890	mprsas_send_smpcmd(sassc, ccb, sasaddr);
2891
2892	return;
2893
2894bailout:
2895	xpt_done(ccb);
2896
2897}
2898#endif //__FreeBSD_version >= 900026
2899
2900static void
2901mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb)
2902{
2903	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
2904	struct mpr_softc *sc;
2905	struct mpr_command *tm;
2906	struct mprsas_target *targ;
2907
2908	MPR_FUNCTRACE(sassc->sc);
2909	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
2910
2911	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2912	    ("Target %d out of bounds in XPT_RESET_DEV\n",
2913	    ccb->ccb_h.target_id));
2914	sc = sassc->sc;
2915	tm = mpr_alloc_command(sc);
2916	if (tm == NULL) {
2917		mpr_dprint(sc, MPR_ERROR,
2918		    "command alloc failure in mprsas_action_resetdev\n");
2919		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2920		xpt_done(ccb);
2921		return;
2922	}
2923
2924	targ = &sassc->targets[ccb->ccb_h.target_id];
2925	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
2926	req->DevHandle = htole16(targ->handle);
2927	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
2928	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
2929
2930	/* SAS Hard Link Reset / SATA Link Reset */
2931	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
2932
2933	tm->cm_data = NULL;
2934	tm->cm_desc.HighPriority.RequestFlags =
2935	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
2936	tm->cm_complete = mprsas_resetdev_complete;
2937	tm->cm_complete_data = ccb;
2938	tm->cm_targ = targ;
2939	mpr_map_command(sc, tm);
2940}
2941
2942static void
2943mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm)
2944{
2945	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
2946	union ccb *ccb;
2947
2948	MPR_FUNCTRACE(sc);
2949	mtx_assert(&sc->mpr_mtx, MA_OWNED);
2950
2951	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
2952	ccb = tm->cm_complete_data;
2953
2954	/*
2955	 * Currently there should be no way we can hit this case.  It only
2956	 * happens when we have a failure to allocate chain frames, and
2957	 * task management commands don't have S/G lists.
2958	 */
2959	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2960		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
2961
2962		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
2963
2964		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of "
2965		    "handle %#04x! This should not happen!\n", __func__,
2966		    tm->cm_flags, req->DevHandle);
2967		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2968		goto bailout;
2969	}
2970
2971	mpr_dprint(sc, MPR_XINFO,
2972	    "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
2973	    le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
2974
2975	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
2976		ccb->ccb_h.status = CAM_REQ_CMP;
2977		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
2978		    CAM_LUN_WILDCARD);
2979	}
2980	else
2981		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2982
2983bailout:
2984
2985	mprsas_free_tm(sc, tm);
2986	xpt_done(ccb);
2987}
2988
2989static void
2990mprsas_poll(struct cam_sim *sim)
2991{
2992	struct mprsas_softc *sassc;
2993
2994	sassc = cam_sim_softc(sim);
2995
2996	if (sassc->sc->mpr_debug & MPR_TRACE) {
2997		/* frequent debug messages during a panic just slow
2998		 * everything down too much.
2999		 */
3000		mpr_printf(sassc->sc, "%s clearing MPR_TRACE\n", __func__);
3001		sassc->sc->mpr_debug &= ~MPR_TRACE;
3002	}
3003
3004	mpr_intr_locked(sassc->sc);
3005}
3006
3007static void
3008mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3009    void *arg)
3010{
3011	struct mpr_softc *sc;
3012
3013	sc = (struct mpr_softc *)callback_arg;
3014
3015	switch (code) {
3016#if (__FreeBSD_version >= 1000006) || \
3017    ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3018	case AC_ADVINFO_CHANGED: {
3019		struct mprsas_target *target;
3020		struct mprsas_softc *sassc;
3021		struct scsi_read_capacity_data_long rcap_buf;
3022		struct ccb_dev_advinfo cdai;
3023		struct mprsas_lun *lun;
3024		lun_id_t lunid;
3025		int found_lun;
3026		uintptr_t buftype;
3027
3028		buftype = (uintptr_t)arg;
3029
3030		found_lun = 0;
3031		sassc = sc->sassc;
3032
3033		/*
3034		 * We're only interested in read capacity data changes.
3035		 */
3036		if (buftype != CDAI_TYPE_RCAPLONG)
3037			break;
3038
3039		/*
3040		 * See the comment in mpr_attach_sas() for a detailed
3041		 * explanation.  In these versions of FreeBSD we register
3042		 * for all events and filter out the events that don't
3043		 * apply to us.
3044		 */
3045#if (__FreeBSD_version < 1000703) || \
3046    ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3047		if (xpt_path_path_id(path) != sassc->sim->path_id)
3048			break;
3049#endif
3050
3051		/*
3052		 * We should have a handle for this, but check to make sure.
3053		 */
3054		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3055		    ("Target %d out of bounds in mprsas_async\n",
3056		    xpt_path_target_id(path)));
3057		target = &sassc->targets[xpt_path_target_id(path)];
3058		if (target->handle == 0)
3059			break;
3060
3061		lunid = xpt_path_lun_id(path);
3062
3063		SLIST_FOREACH(lun, &target->luns, lun_link) {
3064			if (lun->lun_id == lunid) {
3065				found_lun = 1;
3066				break;
3067			}
3068		}
3069
3070		if (found_lun == 0) {
3071			lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3072			    M_NOWAIT | M_ZERO);
3073			if (lun == NULL) {
3074				mpr_dprint(sc, MPR_ERROR, "Unable to alloc "
3075				    "LUN for EEDP support.\n");
3076				break;
3077			}
3078			lun->lun_id = lunid;
3079			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3080		}
3081
3082		bzero(&rcap_buf, sizeof(rcap_buf));
3083		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3084		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3085		cdai.ccb_h.flags = CAM_DIR_IN;
3086		cdai.buftype = CDAI_TYPE_RCAPLONG;
3087#if __FreeBSD_version >= 1100061
3088		cdai.flags = CDAI_FLAG_NONE;
3089#else
3090		cdai.flags = 0;
3091#endif
3092		cdai.bufsiz = sizeof(rcap_buf);
3093		cdai.buf = (uint8_t *)&rcap_buf;
3094		xpt_action((union ccb *)&cdai);
3095		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3096			cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
3097
3098		if (((cdai.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
3099		    && (rcap_buf.prot & SRC16_PROT_EN)) {
3100			lun->eedp_formatted = TRUE;
3101			lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3102		} else {
3103			lun->eedp_formatted = FALSE;
3104			lun->eedp_block_size = 0;
3105		}
3106		break;
3107	}
3108#endif
3109	case AC_FOUND_DEVICE: {
3110		struct ccb_getdev *cgd;
3111
3112		/*
3113		 * See the comment in mpr_attach_sas() for a detailed
3114		 * explanation.  In these versions of FreeBSD we register
3115		 * for all events and filter out the events that don't
3116		 * apply to us.
3117		 */
3118#if (__FreeBSD_version < 1000703) || \
3119    ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3120		if (xpt_path_path_id(path) != sc->sassc->sim->path_id)
3121			break;
3122#endif
3123
3124		cgd = arg;
3125		mprsas_prepare_ssu(sc, path, cgd);
3126
3127#if (__FreeBSD_version < 901503) || \
3128    ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3129		mprsas_check_eedp(sc, path, cgd);
3130#endif
3131		break;
3132	}
3133	default:
3134		break;
3135	}
3136}
3137
3138static void
3139mprsas_prepare_ssu(struct mpr_softc *sc, struct cam_path *path,
3140    struct ccb_getdev *cgd)
3141{
3142	struct mprsas_softc *sassc = sc->sassc;
3143	path_id_t pathid;
3144	target_id_t targetid;
3145	lun_id_t lunid;
3146	struct mprsas_target *target;
3147	struct mprsas_lun *lun;
3148	uint8_t	found_lun;
3149
3150	sassc = sc->sassc;
3151	pathid = cam_sim_path(sassc->sim);
3152	targetid = xpt_path_target_id(path);
3153	lunid = xpt_path_lun_id(path);
3154
3155	KASSERT(targetid < sassc->maxtargets,
3156	    ("Target %d out of bounds in mprsas_prepare_ssu\n", targetid));
3157	target = &sassc->targets[targetid];
3158	if (target->handle == 0x0)
3159		return;
3160
3161	/*
3162	 * If LUN is already in list, don't create a new one.
3163	 */
3164	found_lun = FALSE;
3165	SLIST_FOREACH(lun, &target->luns, lun_link) {
3166		if (lun->lun_id == lunid) {
3167			found_lun = TRUE;
3168			break;
3169		}
3170	}
3171	if (!found_lun) {
3172		lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3173		    M_NOWAIT | M_ZERO);
3174		if (lun == NULL) {
3175			mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for "
3176			    "preparing SSU.\n");
3177			return;
3178		}
3179		lun->lun_id = lunid;
3180		SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3181	}
3182
3183	/*
3184	 * If this is a SATA direct-access end device, mark it so that a SCSI
3185	 * StartStopUnit command will be sent to it when the driver is being
3186	 * shutdown.
3187	 */
3188	if (((cgd->inq_data.device & 0x1F) == T_DIRECT) &&
3189	    (target->devinfo & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
3190	    ((target->devinfo & MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
3191	    MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
3192		lun->stop_at_shutdown = TRUE;
3193	}
3194}
3195
3196#if (__FreeBSD_version < 901503) || \
3197    ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3198static void
3199mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
3200    struct ccb_getdev *cgd)
3201{
3202	struct mprsas_softc *sassc = sc->sassc;
3203	struct ccb_scsiio *csio;
3204	struct scsi_read_capacity_16 *scsi_cmd;
3205	struct scsi_read_capacity_eedp *rcap_buf;
3206	path_id_t pathid;
3207	target_id_t targetid;
3208	lun_id_t lunid;
3209	union ccb *ccb;
3210	struct cam_path *local_path;
3211	struct mprsas_target *target;
3212	struct mprsas_lun *lun;
3213	uint8_t	found_lun;
3214	char path_str[64];
3215
3216	sassc = sc->sassc;
3217	pathid = cam_sim_path(sassc->sim);
3218	targetid = xpt_path_target_id(path);
3219	lunid = xpt_path_lun_id(path);
3220
3221	KASSERT(targetid < sassc->maxtargets,
3222	    ("Target %d out of bounds in mprsas_check_eedp\n", targetid));
3223	target = &sassc->targets[targetid];
3224	if (target->handle == 0x0)
3225		return;
3226
3227	/*
3228	 * Determine if the device is EEDP capable.
3229	 *
3230	 * If this flag is set in the inquiry data, the device supports
3231	 * protection information, and must support the 16 byte read capacity
3232	 * command, otherwise continue without sending read cap 16
3233	 */
3234	if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3235		return;
3236
3237	/*
3238	 * Issue a READ CAPACITY 16 command.  This info is used to determine if
3239	 * the LUN is formatted for EEDP support.
3240	 */
3241	ccb = xpt_alloc_ccb_nowait();
3242	if (ccb == NULL) {
3243		mpr_dprint(sc, MPR_ERROR, "Unable to alloc CCB for EEDP "
3244		    "support.\n");
3245		return;
3246	}
3247
3248	if (xpt_create_path(&local_path, xpt_periph, pathid, targetid, lunid)
3249	    != CAM_REQ_CMP) {
3250		mpr_dprint(sc, MPR_ERROR, "Unable to create path for EEDP "
3251		    "support\n");
3252		xpt_free_ccb(ccb);
3253		return;
3254	}
3255
3256	/*
3257	 * If LUN is already in list, don't create a new one.
3258	 */
3259	found_lun = FALSE;
3260	SLIST_FOREACH(lun, &target->luns, lun_link) {
3261		if (lun->lun_id == lunid) {
3262			found_lun = TRUE;
3263			break;
3264		}
3265	}
3266	if (!found_lun) {
3267		lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3268		    M_NOWAIT | M_ZERO);
3269		if (lun == NULL) {
3270			mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for "
3271			    "EEDP support.\n");
3272			xpt_free_path(local_path);
3273			xpt_free_ccb(ccb);
3274			return;
3275		}
3276		lun->lun_id = lunid;
3277		SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3278	}
3279
3280	xpt_path_string(local_path, path_str, sizeof(path_str));
3281	mpr_dprint(sc, MPR_INFO, "Sending read cap: path %s handle %d\n",
3282	    path_str, target->handle);
3283
3284	/*
3285	 * Issue a READ CAPACITY 16 command for the LUN.  The
3286	 * mprsas_read_cap_done function will load the read cap info into the
3287	 * LUN struct.
3288	 */
3289	rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp), M_MPR,
3290	    M_NOWAIT | M_ZERO);
3291	if (rcap_buf == NULL) {
3292		mpr_dprint(sc, MPR_FAULT, "Unable to alloc read capacity "
3293		    "buffer for EEDP support.\n");
3294		xpt_free_path(ccb->ccb_h.path);
3295		xpt_free_ccb(ccb);
3296		return;
3297	}
3298	xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3299	csio = &ccb->csio;
3300	csio->ccb_h.func_code = XPT_SCSI_IO;
3301	csio->ccb_h.flags = CAM_DIR_IN;
3302	csio->ccb_h.retry_count = 4;
3303	csio->ccb_h.cbfcnp = mprsas_read_cap_done;
3304	csio->ccb_h.timeout = 60000;
3305	csio->data_ptr = (uint8_t *)rcap_buf;
3306	csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3307	csio->sense_len = MPR_SENSE_LEN;
3308	csio->cdb_len = sizeof(*scsi_cmd);
3309	csio->tag_action = MSG_SIMPLE_Q_TAG;
3310
3311	scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3312	bzero(scsi_cmd, sizeof(*scsi_cmd));
3313	scsi_cmd->opcode = 0x9E;
3314	scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3315	((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3316
3317	ccb->ccb_h.ppriv_ptr1 = sassc;
3318	xpt_action(ccb);
3319}
3320
3321static void
3322mprsas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3323{
3324	struct mprsas_softc *sassc;
3325	struct mprsas_target *target;
3326	struct mprsas_lun *lun;
3327	struct scsi_read_capacity_eedp *rcap_buf;
3328
3329	if (done_ccb == NULL)
3330		return;
3331
3332	/* Driver need to release devq, it Scsi command is
3333	 * generated by driver internally.
3334	 * Currently there is a single place where driver
3335	 * calls scsi command internally. In future if driver
3336	 * calls more scsi command internally, it needs to release
3337	 * devq internally, since those command will not go back to
3338	 * cam_periph.
3339	 */
3340	if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3341        	done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3342		xpt_release_devq(done_ccb->ccb_h.path,
3343			       	/*count*/ 1, /*run_queue*/TRUE);
3344	}
3345
3346	rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3347
3348	/*
3349	 * Get the LUN ID for the path and look it up in the LUN list for the
3350	 * target.
3351	 */
3352	sassc = (struct mprsas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3353	KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets,
3354	    ("Target %d out of bounds in mprsas_read_cap_done\n",
3355	    done_ccb->ccb_h.target_id));
3356	target = &sassc->targets[done_ccb->ccb_h.target_id];
3357	SLIST_FOREACH(lun, &target->luns, lun_link) {
3358		if (lun->lun_id != done_ccb->ccb_h.target_lun)
3359			continue;
3360
3361		/*
3362		 * Got the LUN in the target's LUN list.  Fill it in with EEDP
3363		 * info.  If the READ CAP 16 command had some SCSI error (common
3364		 * if command is not supported), mark the lun as not supporting
3365		 * EEDP and set the block size to 0.
3366		 */
3367		if (((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
3368		    || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3369			lun->eedp_formatted = FALSE;
3370			lun->eedp_block_size = 0;
3371			break;
3372		}
3373
3374		if (rcap_buf->protect & 0x01) {
3375			mpr_dprint(sassc->sc, MPR_INFO, "LUN %d for "
3376			    "target ID %d is formatted for EEDP "
3377			    "support.\n", done_ccb->ccb_h.target_lun,
3378			    done_ccb->ccb_h.target_id);
3379			lun->eedp_formatted = TRUE;
3380			lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3381		}
3382		break;
3383	}
3384
3385	// Finished with this CCB and path.
3386	free(rcap_buf, M_MPR);
3387	xpt_free_path(done_ccb->ccb_h.path);
3388	xpt_free_ccb(done_ccb);
3389}
3390#endif /* (__FreeBSD_version < 901503) || \
3391          ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3392
3393int
3394mprsas_startup(struct mpr_softc *sc)
3395{
3396	/*
3397	 * Send the port enable message and set the wait_for_port_enable flag.
3398	 * This flag helps to keep the simq frozen until all discovery events
3399	 * are processed.
3400	 */
3401	sc->wait_for_port_enable = 1;
3402	mprsas_send_portenable(sc);
3403	return (0);
3404}
3405
3406static int
3407mprsas_send_portenable(struct mpr_softc *sc)
3408{
3409	MPI2_PORT_ENABLE_REQUEST *request;
3410	struct mpr_command *cm;
3411
3412	MPR_FUNCTRACE(sc);
3413
3414	if ((cm = mpr_alloc_command(sc)) == NULL)
3415		return (EBUSY);
3416	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3417	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3418	request->MsgFlags = 0;
3419	request->VP_ID = 0;
3420	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3421	cm->cm_complete = mprsas_portenable_complete;
3422	cm->cm_data = NULL;
3423	cm->cm_sge = NULL;
3424
3425	mpr_map_command(sc, cm);
3426	mpr_dprint(sc, MPR_XINFO,
3427	    "mpr_send_portenable finished cm %p req %p complete %p\n",
3428	    cm, cm->cm_req, cm->cm_complete);
3429	return (0);
3430}
3431
3432static void
3433mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm)
3434{
3435	MPI2_PORT_ENABLE_REPLY *reply;
3436	struct mprsas_softc *sassc;
3437
3438	MPR_FUNCTRACE(sc);
3439	sassc = sc->sassc;
3440
3441	/*
3442	 * Currently there should be no way we can hit this case.  It only
3443	 * happens when we have a failure to allocate chain frames, and
3444	 * port enable commands don't have S/G lists.
3445	 */
3446	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3447		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! "
3448		    "This should not happen!\n", __func__, cm->cm_flags);
3449	}
3450
3451	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3452	if (reply == NULL)
3453		mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n");
3454	else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3455	    MPI2_IOCSTATUS_SUCCESS)
3456		mpr_dprint(sc, MPR_FAULT, "Portenable failed\n");
3457
3458	mpr_free_command(sc, cm);
3459	if (sc->mpr_ich.ich_arg != NULL) {
3460		mpr_dprint(sc, MPR_XINFO, "disestablish config intrhook\n");
3461		config_intrhook_disestablish(&sc->mpr_ich);
3462		sc->mpr_ich.ich_arg = NULL;
3463	}
3464
3465	/*
3466	 * Done waiting for port enable to complete.  Decrement the refcount.
3467	 * If refcount is 0, discovery is complete and a rescan of the bus can
3468	 * take place.
3469	 */
3470	sc->wait_for_port_enable = 0;
3471	sc->port_enable_complete = 1;
3472	wakeup(&sc->port_enable_complete);
3473	mprsas_startup_decrement(sassc);
3474}
3475
3476int
3477mprsas_check_id(struct mprsas_softc *sassc, int id)
3478{
3479	struct mpr_softc *sc = sassc->sc;
3480	char *ids;
3481	char *name;
3482
3483	ids = &sc->exclude_ids[0];
3484	while((name = strsep(&ids, ",")) != NULL) {
3485		if (name[0] == '\0')
3486			continue;
3487		if (strtol(name, NULL, 0) == (long)id)
3488			return (1);
3489	}
3490
3491	return (0);
3492}
3493