1/*-
2 * Copyright (c) 2009 Yahoo! Inc.
3 * Copyright (c) 2011-2015 LSI Corp.
4 * Copyright (c) 2013-2016 Avago Technologies
5 * Copyright 2000-2020 Broadcom Inc.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * Broadcom Inc. (LSI) MPT-Fusion Host Adapter FreeBSD
30 *
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD$");
35
36/* Communications core for Avago Technologies (LSI) MPT3 */
37
38/* TODO Move headers to mprvar */
39#include <sys/types.h>
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/kernel.h>
43#include <sys/selinfo.h>
44#include <sys/module.h>
45#include <sys/bus.h>
46#include <sys/conf.h>
47#include <sys/bio.h>
48#include <sys/malloc.h>
49#include <sys/uio.h>
50#include <sys/sysctl.h>
51#include <sys/endian.h>
52#include <sys/queue.h>
53#include <sys/kthread.h>
54#include <sys/taskqueue.h>
55#include <sys/sbuf.h>
56
57#include <machine/bus.h>
58#include <machine/resource.h>
59#include <sys/rman.h>
60
61#include <machine/stdarg.h>
62
63#include <cam/cam.h>
64#include <cam/cam_ccb.h>
65#include <cam/cam_debug.h>
66#include <cam/cam_sim.h>
67#include <cam/cam_xpt_sim.h>
68#include <cam/cam_xpt_periph.h>
69#include <cam/cam_periph.h>
70#include <cam/scsi/scsi_all.h>
71#include <cam/scsi/scsi_message.h>
72#if __FreeBSD_version >= 900026
73#include <cam/scsi/smp_all.h>
74#endif
75
76#include <dev/nvme/nvme.h>
77
78#include <dev/mpr/mpi/mpi2_type.h>
79#include <dev/mpr/mpi/mpi2.h>
80#include <dev/mpr/mpi/mpi2_ioc.h>
81#include <dev/mpr/mpi/mpi2_sas.h>
82#include <dev/mpr/mpi/mpi2_pci.h>
83#include <dev/mpr/mpi/mpi2_cnfg.h>
84#include <dev/mpr/mpi/mpi2_init.h>
85#include <dev/mpr/mpi/mpi2_tool.h>
86#include <dev/mpr/mpr_ioctl.h>
87#include <dev/mpr/mprvar.h>
88#include <dev/mpr/mpr_table.h>
89#include <dev/mpr/mpr_sas.h>
90
91#define MPRSAS_DISCOVERY_TIMEOUT	20
92#define MPRSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
93
94/*
95 * static array to check SCSI OpCode for EEDP protection bits
96 */
97#define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
98#define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
99#define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
100static uint8_t op_code_prot[256] = {
101	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
104	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
109	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
110	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
112	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
114	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
115	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
116	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
117};
118
119MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory");
120
121static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *);
122static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *);
123static void mprsas_action(struct cam_sim *sim, union ccb *ccb);
124static void mprsas_poll(struct cam_sim *sim);
125static void mprsas_scsiio_timeout(void *data);
126static void mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *cm);
127static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *);
128static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *);
129static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *);
130static void mprsas_resetdev_complete(struct mpr_softc *, struct mpr_command *);
131static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
132    struct mpr_command *cm);
133static void mprsas_async(void *callback_arg, uint32_t code,
134    struct cam_path *path, void *arg);
135#if (__FreeBSD_version < 901503) || \
136    ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
137static void mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
138    struct ccb_getdev *cgd);
139static void mprsas_read_cap_done(struct cam_periph *periph,
140    union ccb *done_ccb);
141#endif
142static int mprsas_send_portenable(struct mpr_softc *sc);
143static void mprsas_portenable_complete(struct mpr_softc *sc,
144    struct mpr_command *cm);
145
146#if __FreeBSD_version >= 900026
147static void mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm);
148static void mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb,
149    uint64_t sasaddr);
150static void mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb);
151#endif //FreeBSD_version >= 900026
152
153struct mprsas_target *
154mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start,
155    uint16_t handle)
156{
157	struct mprsas_target *target;
158	int i;
159
160	for (i = start; i < sassc->maxtargets; i++) {
161		target = &sassc->targets[i];
162		if (target->handle == handle)
163			return (target);
164	}
165
166	return (NULL);
167}
168
169/* we need to freeze the simq during attach and diag reset, to avoid failing
170 * commands before device handles have been found by discovery.  Since
171 * discovery involves reading config pages and possibly sending commands,
172 * discovery actions may continue even after we receive the end of discovery
173 * event, so refcount discovery actions instead of assuming we can unfreeze
174 * the simq when we get the event.
175 */
176void
177mprsas_startup_increment(struct mprsas_softc *sassc)
178{
179	MPR_FUNCTRACE(sassc->sc);
180
181	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
182		if (sassc->startup_refcount++ == 0) {
183			/* just starting, freeze the simq */
184			mpr_dprint(sassc->sc, MPR_INIT,
185			    "%s freezing simq\n", __func__);
186#if (__FreeBSD_version >= 1000039) || \
187    ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
188			xpt_hold_boot();
189#endif
190			xpt_freeze_simq(sassc->sim, 1);
191		}
192		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
193		    sassc->startup_refcount);
194	}
195}
196
197void
198mprsas_release_simq_reinit(struct mprsas_softc *sassc)
199{
200	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
201		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
202		xpt_release_simq(sassc->sim, 1);
203		mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n");
204	}
205}
206
207void
208mprsas_startup_decrement(struct mprsas_softc *sassc)
209{
210	MPR_FUNCTRACE(sassc->sc);
211
212	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
213		if (--sassc->startup_refcount == 0) {
214			/* finished all discovery-related actions, release
215			 * the simq and rescan for the latest topology.
216			 */
217			mpr_dprint(sassc->sc, MPR_INIT,
218			    "%s releasing simq\n", __func__);
219			sassc->flags &= ~MPRSAS_IN_STARTUP;
220			xpt_release_simq(sassc->sim, 1);
221#if (__FreeBSD_version >= 1000039) || \
222    ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
223			xpt_release_boot();
224#else
225			mprsas_rescan_target(sassc->sc, NULL);
226#endif
227		}
228		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
229		    sassc->startup_refcount);
230	}
231}
232
233/*
234 * The firmware requires us to stop sending commands when we're doing task
235 * management.
236 * use.
237 * XXX The logic for serializing the device has been made lazy and moved to
238 * mprsas_prepare_for_tm().
239 */
240struct mpr_command *
241mprsas_alloc_tm(struct mpr_softc *sc)
242{
243	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
244	struct mpr_command *tm;
245
246	MPR_FUNCTRACE(sc);
247	tm = mpr_alloc_high_priority_command(sc);
248	if (tm == NULL)
249		return (NULL);
250
251	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
252	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
253	return tm;
254}
255
256void
257mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm)
258{
259	int target_id = 0xFFFFFFFF;
260
261	MPR_FUNCTRACE(sc);
262	if (tm == NULL)
263		return;
264
265	/*
266	 * For TM's the devq is frozen for the device.  Unfreeze it here and
267	 * free the resources used for freezing the devq.  Must clear the
268	 * INRESET flag as well or scsi I/O will not work.
269	 */
270	if (tm->cm_targ != NULL) {
271		tm->cm_targ->flags &= ~MPRSAS_TARGET_INRESET;
272		target_id = tm->cm_targ->tid;
273	}
274	if (tm->cm_ccb) {
275		mpr_dprint(sc, MPR_INFO, "Unfreezing devq for target ID %d\n",
276		    target_id);
277		xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
278		xpt_free_path(tm->cm_ccb->ccb_h.path);
279		xpt_free_ccb(tm->cm_ccb);
280	}
281
282	mpr_free_high_priority_command(sc, tm);
283}
284
285void
286mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ)
287{
288	struct mprsas_softc *sassc = sc->sassc;
289	path_id_t pathid;
290	target_id_t targetid;
291	union ccb *ccb;
292
293	MPR_FUNCTRACE(sc);
294	pathid = cam_sim_path(sassc->sim);
295	if (targ == NULL)
296		targetid = CAM_TARGET_WILDCARD;
297	else
298		targetid = targ - sassc->targets;
299
300	/*
301	 * Allocate a CCB and schedule a rescan.
302	 */
303	ccb = xpt_alloc_ccb_nowait();
304	if (ccb == NULL) {
305		mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n");
306		return;
307	}
308
309	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid,
310	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
311		mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n");
312		xpt_free_ccb(ccb);
313		return;
314	}
315
316	if (targetid == CAM_TARGET_WILDCARD)
317		ccb->ccb_h.func_code = XPT_SCAN_BUS;
318	else
319		ccb->ccb_h.func_code = XPT_SCAN_TGT;
320
321	mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid);
322	xpt_rescan(ccb);
323}
324
325static void
326mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...)
327{
328	struct sbuf sb;
329	va_list ap;
330	char str[224];
331	char path_str[64];
332
333	if (cm == NULL)
334		return;
335
336	/* No need to be in here if debugging isn't enabled */
337	if ((cm->cm_sc->mpr_debug & level) == 0)
338		return;
339
340	sbuf_new(&sb, str, sizeof(str), 0);
341
342	va_start(ap, fmt);
343
344	if (cm->cm_ccb != NULL) {
345		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
346		    sizeof(path_str));
347		sbuf_cat(&sb, path_str);
348		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
349			scsi_command_string(&cm->cm_ccb->csio, &sb);
350			sbuf_printf(&sb, "length %d ",
351			    cm->cm_ccb->csio.dxfer_len);
352		}
353	} else {
354		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
355		    cam_sim_name(cm->cm_sc->sassc->sim),
356		    cam_sim_unit(cm->cm_sc->sassc->sim),
357		    cam_sim_bus(cm->cm_sc->sassc->sim),
358		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
359		    cm->cm_lun);
360	}
361
362	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
363	sbuf_vprintf(&sb, fmt, ap);
364	sbuf_finish(&sb);
365	mpr_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
366
367	va_end(ap);
368}
369
370static void
371mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm)
372{
373	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
374	struct mprsas_target *targ;
375	uint16_t handle;
376
377	MPR_FUNCTRACE(sc);
378
379	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
380	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
381	targ = tm->cm_targ;
382
383	if (reply == NULL) {
384		/* XXX retry the remove after the diag reset completes? */
385		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
386		    "0x%04x\n", __func__, handle);
387		mprsas_free_tm(sc, tm);
388		return;
389	}
390
391	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
392	    MPI2_IOCSTATUS_SUCCESS) {
393		mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
394		    "device 0x%x\n", le16toh(reply->IOCStatus), handle);
395	}
396
397	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
398	    le32toh(reply->TerminationCount));
399	mpr_free_reply(sc, tm->cm_reply_data);
400	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
401
402	mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
403	    targ->tid, handle);
404
405	/*
406	 * Don't clear target if remove fails because things will get confusing.
407	 * Leave the devname and sasaddr intact so that we know to avoid reusing
408	 * this target id if possible, and so we can assign the same target id
409	 * to this device if it comes back in the future.
410	 */
411	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
412	    MPI2_IOCSTATUS_SUCCESS) {
413		targ = tm->cm_targ;
414		targ->handle = 0x0;
415		targ->encl_handle = 0x0;
416		targ->encl_level_valid = 0x0;
417		targ->encl_level = 0x0;
418		targ->connector_name[0] = ' ';
419		targ->connector_name[1] = ' ';
420		targ->connector_name[2] = ' ';
421		targ->connector_name[3] = ' ';
422		targ->encl_slot = 0x0;
423		targ->exp_dev_handle = 0x0;
424		targ->phy_num = 0x0;
425		targ->linkrate = 0x0;
426		targ->devinfo = 0x0;
427		targ->flags = 0x0;
428		targ->scsi_req_desc_type = 0;
429	}
430
431	mprsas_free_tm(sc, tm);
432}
433
434
435/*
436 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
437 * Otherwise Volume Delete is same as Bare Drive Removal.
438 */
439void
440mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle)
441{
442	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
443	struct mpr_softc *sc;
444	struct mpr_command *cm;
445	struct mprsas_target *targ = NULL;
446
447	MPR_FUNCTRACE(sassc->sc);
448	sc = sassc->sc;
449
450	targ = mprsas_find_target_by_handle(sassc, 0, handle);
451	if (targ == NULL) {
452		/* FIXME: what is the action? */
453		/* We don't know about this device? */
454		mpr_dprint(sc, MPR_ERROR,
455		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
456		return;
457	}
458
459	targ->flags |= MPRSAS_TARGET_INREMOVAL;
460
461	cm = mprsas_alloc_tm(sc);
462	if (cm == NULL) {
463		mpr_dprint(sc, MPR_ERROR,
464		    "%s: command alloc failure\n", __func__);
465		return;
466	}
467
468	mprsas_rescan_target(sc, targ);
469
470	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
471	req->DevHandle = targ->handle;
472	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
473
474	if (!targ->is_nvme || sc->custom_nvme_tm_handling) {
475		/* SAS Hard Link Reset / SATA Link Reset */
476		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
477	} else {
478		/* PCIe Protocol Level Reset*/
479		req->MsgFlags =
480		    MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
481	}
482
483	cm->cm_targ = targ;
484	cm->cm_data = NULL;
485	cm->cm_complete = mprsas_remove_volume;
486	cm->cm_complete_data = (void *)(uintptr_t)handle;
487
488	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
489	    __func__, targ->tid);
490	mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
491
492	mpr_map_command(sc, cm);
493}
494
495/*
496 * The firmware performs debounce on the link to avoid transient link errors
497 * and false removals.  When it does decide that link has been lost and a
498 * device needs to go away, it expects that the host will perform a target reset
499 * and then an op remove.  The reset has the side-effect of aborting any
500 * outstanding requests for the device, which is required for the op-remove to
501 * succeed.  It's not clear if the host should check for the device coming back
502 * alive after the reset.
503 */
504void
505mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle)
506{
507	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
508	struct mpr_softc *sc;
509	struct mpr_command *tm;
510	struct mprsas_target *targ = NULL;
511
512	MPR_FUNCTRACE(sassc->sc);
513
514	sc = sassc->sc;
515
516	targ = mprsas_find_target_by_handle(sassc, 0, handle);
517	if (targ == NULL) {
518		/* FIXME: what is the action? */
519		/* We don't know about this device? */
520		mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n",
521		    __func__, handle);
522		return;
523	}
524
525	targ->flags |= MPRSAS_TARGET_INREMOVAL;
526
527	tm = mprsas_alloc_tm(sc);
528	if (tm == NULL) {
529		mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n",
530		    __func__);
531		return;
532	}
533
534	mprsas_rescan_target(sc, targ);
535
536	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
537	req->DevHandle = htole16(targ->handle);
538	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
539
540	/* SAS Hard Link Reset / SATA Link Reset */
541	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
542
543	tm->cm_targ = targ;
544	tm->cm_data = NULL;
545	tm->cm_complete = mprsas_remove_device;
546	tm->cm_complete_data = (void *)(uintptr_t)handle;
547
548	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
549	    __func__, targ->tid);
550	mprsas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
551
552	mpr_map_command(sc, tm);
553}
554
555static void
556mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm)
557{
558	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
559	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
560	struct mprsas_target *targ;
561	uint16_t handle;
562
563	MPR_FUNCTRACE(sc);
564
565	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
566	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
567	targ = tm->cm_targ;
568
569	/*
570	 * Currently there should be no way we can hit this case.  It only
571	 * happens when we have a failure to allocate chain frames, and
572	 * task management commands don't have S/G lists.
573	 */
574	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
575		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of "
576		    "handle %#04x! This should not happen!\n", __func__,
577		    tm->cm_flags, handle);
578	}
579
580	if (reply == NULL) {
581		/* XXX retry the remove after the diag reset completes? */
582		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
583		    "0x%04x\n", __func__, handle);
584		mprsas_free_tm(sc, tm);
585		return;
586	}
587
588	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
589	    MPI2_IOCSTATUS_SUCCESS) {
590		mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
591		    "device 0x%x\n", le16toh(reply->IOCStatus), handle);
592	}
593
594	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
595	    le32toh(reply->TerminationCount));
596	mpr_free_reply(sc, tm->cm_reply_data);
597	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
598
599	/* Reuse the existing command */
600	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
601	memset(req, 0, sizeof(*req));
602	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
603	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
604	req->DevHandle = htole16(handle);
605	tm->cm_data = NULL;
606	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
607	tm->cm_complete = mprsas_remove_complete;
608	tm->cm_complete_data = (void *)(uintptr_t)handle;
609
610	/*
611	 * Wait to send the REMOVE_DEVICE until all the commands have cleared.
612	 * They should be aborted or time out and we'll kick thus off there
613	 * if so.
614	 */
615	if (TAILQ_FIRST(&targ->commands) == NULL) {
616		mpr_dprint(sc, MPR_INFO, "No pending commands: starting remove_device\n");
617		mpr_map_command(sc, tm);
618		targ->pending_remove_tm = NULL;
619	} else {
620		targ->pending_remove_tm = tm;
621	}
622
623	mpr_dprint(sc, MPR_INFO, "clearing target %u handle 0x%04x\n",
624	    targ->tid, handle);
625	if (targ->encl_level_valid) {
626		mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, "
627		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
628		    targ->connector_name);
629	}
630}
631
632static void
633mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm)
634{
635	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
636	uint16_t handle;
637	struct mprsas_target *targ;
638	struct mprsas_lun *lun;
639
640	MPR_FUNCTRACE(sc);
641
642	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
643	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
644
645	targ = tm->cm_targ;
646
647	/*
648	 * At this point, we should have no pending commands for the target.
649	 * The remove target has just completed.
650	 */
651	KASSERT(TAILQ_FIRST(&targ->commands) == NULL,
652	    ("%s: no commands should be pending\n", __func__));
653
654	/*
655	 * Currently there should be no way we can hit this case.  It only
656	 * happens when we have a failure to allocate chain frames, and
657	 * task management commands don't have S/G lists.
658	 */
659	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
660		mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of "
661		    "handle %#04x! This should not happen!\n", __func__,
662		    tm->cm_flags, handle);
663		mprsas_free_tm(sc, tm);
664		return;
665	}
666
667	if (reply == NULL) {
668		/* most likely a chip reset */
669		mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device "
670		    "0x%04x\n", __func__, handle);
671		mprsas_free_tm(sc, tm);
672		return;
673	}
674
675	mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n",
676	    __func__, handle, le16toh(reply->IOCStatus));
677
678	/*
679	 * Don't clear target if remove fails because things will get confusing.
680	 * Leave the devname and sasaddr intact so that we know to avoid reusing
681	 * this target id if possible, and so we can assign the same target id
682	 * to this device if it comes back in the future.
683	 */
684	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
685	    MPI2_IOCSTATUS_SUCCESS) {
686		targ->handle = 0x0;
687		targ->encl_handle = 0x0;
688		targ->encl_level_valid = 0x0;
689		targ->encl_level = 0x0;
690		targ->connector_name[0] = ' ';
691		targ->connector_name[1] = ' ';
692		targ->connector_name[2] = ' ';
693		targ->connector_name[3] = ' ';
694		targ->encl_slot = 0x0;
695		targ->exp_dev_handle = 0x0;
696		targ->phy_num = 0x0;
697		targ->linkrate = 0x0;
698		targ->devinfo = 0x0;
699		targ->flags = 0x0;
700		targ->scsi_req_desc_type = 0;
701
702		while (!SLIST_EMPTY(&targ->luns)) {
703			lun = SLIST_FIRST(&targ->luns);
704			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
705			free(lun, M_MPR);
706		}
707	}
708
709	mprsas_free_tm(sc, tm);
710}
711
712static int
713mprsas_register_events(struct mpr_softc *sc)
714{
715	uint8_t events[16];
716
717	bzero(events, 16);
718	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
719	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
720	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
721	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
722	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
723	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
724	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
725	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
726	setbit(events, MPI2_EVENT_IR_VOLUME);
727	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
728	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
729	setbit(events, MPI2_EVENT_TEMP_THRESHOLD);
730	setbit(events, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
731	if (sc->facts->MsgVersion >= MPI2_VERSION_02_06) {
732		setbit(events, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
733		if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
734			setbit(events, MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
735			setbit(events, MPI2_EVENT_PCIE_ENUMERATION);
736			setbit(events, MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
737		}
738	}
739
740	mpr_register_events(sc, events, mprsas_evt_handler, NULL,
741	    &sc->sassc->mprsas_eh);
742
743	return (0);
744}
745
746int
747mpr_attach_sas(struct mpr_softc *sc)
748{
749	struct mprsas_softc *sassc;
750	cam_status status;
751	int unit, error = 0, reqs;
752
753	MPR_FUNCTRACE(sc);
754	mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
755
756	sassc = malloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO);
757
758	/*
759	 * XXX MaxTargets could change during a reinit.  Since we don't
760	 * resize the targets[] array during such an event, cache the value
761	 * of MaxTargets here so that we don't get into trouble later.  This
762	 * should move into the reinit logic.
763	 */
764	sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
765	sassc->targets = malloc(sizeof(struct mprsas_target) *
766	    sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO);
767	sc->sassc = sassc;
768	sassc->sc = sc;
769
770	reqs = sc->num_reqs - sc->num_prireqs - 1;
771	if ((sassc->devq = cam_simq_alloc(reqs)) == NULL) {
772		mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIMQ\n");
773		error = ENOMEM;
774		goto out;
775	}
776
777	unit = device_get_unit(sc->mpr_dev);
778	sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc,
779	    unit, &sc->mpr_mtx, reqs, reqs, sassc->devq);
780	if (sassc->sim == NULL) {
781		mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIM\n");
782		error = EINVAL;
783		goto out;
784	}
785
786	TAILQ_INIT(&sassc->ev_queue);
787
788	/* Initialize taskqueue for Event Handling */
789	TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc);
790	sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO,
791	    taskqueue_thread_enqueue, &sassc->ev_tq);
792	taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
793	    device_get_nameunit(sc->mpr_dev));
794
795	mpr_lock(sc);
796
797	/*
798	 * XXX There should be a bus for every port on the adapter, but since
799	 * we're just going to fake the topology for now, we'll pretend that
800	 * everything is just a target on a single bus.
801	 */
802	if ((error = xpt_bus_register(sassc->sim, sc->mpr_dev, 0)) != 0) {
803		mpr_dprint(sc, MPR_INIT|MPR_ERROR,
804		    "Error %d registering SCSI bus\n", error);
805		mpr_unlock(sc);
806		goto out;
807	}
808
809	/*
810	 * Assume that discovery events will start right away.
811	 *
812	 * Hold off boot until discovery is complete.
813	 */
814	sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY;
815	sc->sassc->startup_refcount = 0;
816	mprsas_startup_increment(sassc);
817
818	callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
819
820	/*
821	 * Register for async events so we can determine the EEDP
822	 * capabilities of devices.
823	 */
824	status = xpt_create_path(&sassc->path, /*periph*/NULL,
825	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
826	    CAM_LUN_WILDCARD);
827	if (status != CAM_REQ_CMP) {
828		mpr_dprint(sc, MPR_INIT|MPR_ERROR,
829		    "Error %#x creating sim path\n", status);
830		sassc->path = NULL;
831	} else {
832		int event;
833
834#if (__FreeBSD_version >= 1000006) || \
835    ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
836		event = AC_ADVINFO_CHANGED | AC_FOUND_DEVICE;
837#else
838		event = AC_FOUND_DEVICE;
839#endif
840
841		/*
842		 * Prior to the CAM locking improvements, we can't call
843		 * xpt_register_async() with a particular path specified.
844		 *
845		 * If a path isn't specified, xpt_register_async() will
846		 * generate a wildcard path and acquire the XPT lock while
847		 * it calls xpt_action() to execute the XPT_SASYNC_CB CCB.
848		 * It will then drop the XPT lock once that is done.
849		 *
850		 * If a path is specified for xpt_register_async(), it will
851		 * not acquire and drop the XPT lock around the call to
852		 * xpt_action().  xpt_action() asserts that the caller
853		 * holds the SIM lock, so the SIM lock has to be held when
854		 * calling xpt_register_async() when the path is specified.
855		 *
856		 * But xpt_register_async calls xpt_for_all_devices(),
857		 * which calls xptbustraverse(), which will acquire each
858		 * SIM lock.  When it traverses our particular bus, it will
859		 * necessarily acquire the SIM lock, which will lead to a
860		 * recursive lock acquisition.
861		 *
862		 * The CAM locking changes fix this problem by acquiring
863		 * the XPT topology lock around bus traversal in
864		 * xptbustraverse(), so the caller can hold the SIM lock
865		 * and it does not cause a recursive lock acquisition.
866		 *
867		 * These __FreeBSD_version values are approximate, especially
868		 * for stable/10, which is two months later than the actual
869		 * change.
870		 */
871
872#if (__FreeBSD_version < 1000703) || \
873    ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
874		mpr_unlock(sc);
875		status = xpt_register_async(event, mprsas_async, sc,
876					    NULL);
877		mpr_lock(sc);
878#else
879		status = xpt_register_async(event, mprsas_async, sc,
880					    sassc->path);
881#endif
882
883		if (status != CAM_REQ_CMP) {
884			mpr_dprint(sc, MPR_ERROR,
885			    "Error %#x registering async handler for "
886			    "AC_ADVINFO_CHANGED events\n", status);
887			xpt_free_path(sassc->path);
888			sassc->path = NULL;
889		}
890	}
891	if (status != CAM_REQ_CMP) {
892		/*
893		 * EEDP use is the exception, not the rule.
894		 * Warn the user, but do not fail to attach.
895		 */
896		mpr_printf(sc, "EEDP capabilities disabled.\n");
897	}
898
899	mpr_unlock(sc);
900
901	mprsas_register_events(sc);
902out:
903	if (error)
904		mpr_detach_sas(sc);
905
906	mpr_dprint(sc, MPR_INIT, "%s exit, error= %d\n", __func__, error);
907	return (error);
908}
909
910int
911mpr_detach_sas(struct mpr_softc *sc)
912{
913	struct mprsas_softc *sassc;
914	struct mprsas_lun *lun, *lun_tmp;
915	struct mprsas_target *targ;
916	int i;
917
918	MPR_FUNCTRACE(sc);
919
920	if (sc->sassc == NULL)
921		return (0);
922
923	sassc = sc->sassc;
924	mpr_deregister_events(sc, sassc->mprsas_eh);
925
926	/*
927	 * Drain and free the event handling taskqueue with the lock
928	 * unheld so that any parallel processing tasks drain properly
929	 * without deadlocking.
930	 */
931	if (sassc->ev_tq != NULL)
932		taskqueue_free(sassc->ev_tq);
933
934	/* Make sure CAM doesn't wedge if we had to bail out early. */
935	mpr_lock(sc);
936
937	while (sassc->startup_refcount != 0)
938		mprsas_startup_decrement(sassc);
939
940	/* Deregister our async handler */
941	if (sassc->path != NULL) {
942		xpt_register_async(0, mprsas_async, sc, sassc->path);
943		xpt_free_path(sassc->path);
944		sassc->path = NULL;
945	}
946
947	if (sassc->flags & MPRSAS_IN_STARTUP)
948		xpt_release_simq(sassc->sim, 1);
949
950	if (sassc->sim != NULL) {
951		xpt_bus_deregister(cam_sim_path(sassc->sim));
952		cam_sim_free(sassc->sim, FALSE);
953	}
954
955	mpr_unlock(sc);
956
957	if (sassc->devq != NULL)
958		cam_simq_free(sassc->devq);
959
960	for (i = 0; i < sassc->maxtargets; i++) {
961		targ = &sassc->targets[i];
962		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
963			free(lun, M_MPR);
964		}
965	}
966	free(sassc->targets, M_MPR);
967	free(sassc, M_MPR);
968	sc->sassc = NULL;
969
970	return (0);
971}
972
973void
974mprsas_discovery_end(struct mprsas_softc *sassc)
975{
976	struct mpr_softc *sc = sassc->sc;
977
978	MPR_FUNCTRACE(sc);
979
980	if (sassc->flags & MPRSAS_DISCOVERY_TIMEOUT_PENDING)
981		callout_stop(&sassc->discovery_callout);
982
983	/*
984	 * After discovery has completed, check the mapping table for any
985	 * missing devices and update their missing counts. Only do this once
986	 * whenever the driver is initialized so that missing counts aren't
987	 * updated unnecessarily. Note that just because discovery has
988	 * completed doesn't mean that events have been processed yet. The
989	 * check_devices function is a callout timer that checks if ALL devices
990	 * are missing. If so, it will wait a little longer for events to
991	 * complete and keep resetting itself until some device in the mapping
992	 * table is not missing, meaning that event processing has started.
993	 */
994	if (sc->track_mapping_events) {
995		mpr_dprint(sc, MPR_XINFO | MPR_MAPPING, "Discovery has "
996		    "completed. Check for missing devices in the mapping "
997		    "table.\n");
998		callout_reset(&sc->device_check_callout,
999		    MPR_MISSING_CHECK_DELAY * hz, mpr_mapping_check_devices,
1000		    sc);
1001	}
1002}
1003
1004static void
1005mprsas_action(struct cam_sim *sim, union ccb *ccb)
1006{
1007	struct mprsas_softc *sassc;
1008
1009	sassc = cam_sim_softc(sim);
1010
1011	MPR_FUNCTRACE(sassc->sc);
1012	mpr_dprint(sassc->sc, MPR_TRACE, "ccb func_code 0x%x\n",
1013	    ccb->ccb_h.func_code);
1014	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
1015
1016	switch (ccb->ccb_h.func_code) {
1017	case XPT_PATH_INQ:
1018	{
1019		struct ccb_pathinq *cpi = &ccb->cpi;
1020		struct mpr_softc *sc = sassc->sc;
1021
1022		cpi->version_num = 1;
1023		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
1024		cpi->target_sprt = 0;
1025#if (__FreeBSD_version >= 1000039) || \
1026    ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
1027		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
1028#else
1029		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
1030#endif
1031		cpi->hba_eng_cnt = 0;
1032		cpi->max_target = sassc->maxtargets - 1;
1033		cpi->max_lun = 255;
1034
1035		/*
1036		 * initiator_id is set here to an ID outside the set of valid
1037		 * target IDs (including volumes).
1038		 */
1039		cpi->initiator_id = sassc->maxtargets;
1040		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1041		strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
1042		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1043		cpi->unit_number = cam_sim_unit(sim);
1044		cpi->bus_id = cam_sim_bus(sim);
1045		/*
1046		 * XXXSLM-I think this needs to change based on config page or
1047		 * something instead of hardcoded to 150000.
1048		 */
1049		cpi->base_transfer_speed = 150000;
1050		cpi->transport = XPORT_SAS;
1051		cpi->transport_version = 0;
1052		cpi->protocol = PROTO_SCSI;
1053		cpi->protocol_version = SCSI_REV_SPC;
1054		cpi->maxio = sc->maxio;
1055		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1056		break;
1057	}
1058	case XPT_GET_TRAN_SETTINGS:
1059	{
1060		struct ccb_trans_settings	*cts;
1061		struct ccb_trans_settings_sas	*sas;
1062		struct ccb_trans_settings_scsi	*scsi;
1063		struct mprsas_target *targ;
1064
1065		cts = &ccb->cts;
1066		sas = &cts->xport_specific.sas;
1067		scsi = &cts->proto_specific.scsi;
1068
1069		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1070		    ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
1071		    cts->ccb_h.target_id));
1072		targ = &sassc->targets[cts->ccb_h.target_id];
1073		if (targ->handle == 0x0) {
1074			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1075			break;
1076		}
1077
1078		cts->protocol_version = SCSI_REV_SPC2;
1079		cts->transport = XPORT_SAS;
1080		cts->transport_version = 0;
1081
1082		sas->valid = CTS_SAS_VALID_SPEED;
1083		switch (targ->linkrate) {
1084		case 0x08:
1085			sas->bitrate = 150000;
1086			break;
1087		case 0x09:
1088			sas->bitrate = 300000;
1089			break;
1090		case 0x0a:
1091			sas->bitrate = 600000;
1092			break;
1093		case 0x0b:
1094			sas->bitrate = 1200000;
1095			break;
1096		default:
1097			sas->valid = 0;
1098		}
1099
1100		cts->protocol = PROTO_SCSI;
1101		scsi->valid = CTS_SCSI_VALID_TQ;
1102		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1103
1104		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1105		break;
1106	}
1107	case XPT_CALC_GEOMETRY:
1108		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1109		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1110		break;
1111	case XPT_RESET_DEV:
1112		mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action "
1113		    "XPT_RESET_DEV\n");
1114		mprsas_action_resetdev(sassc, ccb);
1115		return;
1116	case XPT_RESET_BUS:
1117	case XPT_ABORT:
1118	case XPT_TERM_IO:
1119		mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action faking success "
1120		    "for abort or reset\n");
1121		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1122		break;
1123	case XPT_SCSI_IO:
1124		mprsas_action_scsiio(sassc, ccb);
1125		return;
1126#if __FreeBSD_version >= 900026
1127	case XPT_SMP_IO:
1128		mprsas_action_smpio(sassc, ccb);
1129		return;
1130#endif
1131	default:
1132		mprsas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1133		break;
1134	}
1135	xpt_done(ccb);
1136
1137}
1138
1139static void
1140mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code,
1141    target_id_t target_id, lun_id_t lun_id)
1142{
1143	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1144	struct cam_path *path;
1145
1146	mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__,
1147	    ac_code, target_id, (uintmax_t)lun_id);
1148
1149	if (xpt_create_path(&path, NULL,
1150		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1151		mpr_dprint(sc, MPR_ERROR, "unable to create path for reset "
1152		    "notification\n");
1153		return;
1154	}
1155
1156	xpt_async(ac_code, path, NULL);
1157	xpt_free_path(path);
1158}
1159
1160static void
1161mprsas_complete_all_commands(struct mpr_softc *sc)
1162{
1163	struct mpr_command *cm;
1164	int i;
1165	int completed;
1166
1167	MPR_FUNCTRACE(sc);
1168	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1169
1170	/* complete all commands with a NULL reply */
1171	for (i = 1; i < sc->num_reqs; i++) {
1172		cm = &sc->commands[i];
1173		if (cm->cm_state == MPR_CM_STATE_FREE)
1174			continue;
1175
1176		cm->cm_state = MPR_CM_STATE_BUSY;
1177		cm->cm_reply = NULL;
1178		completed = 0;
1179
1180		if (cm->cm_flags & MPR_CM_FLAGS_SATA_ID_TIMEOUT) {
1181			MPASS(cm->cm_data);
1182			free(cm->cm_data, M_MPR);
1183			cm->cm_data = NULL;
1184		}
1185
1186		if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
1187			cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
1188
1189		if (cm->cm_complete != NULL) {
1190			mprsas_log_command(cm, MPR_RECOVERY,
1191			    "completing cm %p state %x ccb %p for diag reset\n",
1192			    cm, cm->cm_state, cm->cm_ccb);
1193			cm->cm_complete(sc, cm);
1194			completed = 1;
1195		} else if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
1196			mprsas_log_command(cm, MPR_RECOVERY,
1197			    "waking up cm %p state %x ccb %p for diag reset\n",
1198			    cm, cm->cm_state, cm->cm_ccb);
1199			wakeup(cm);
1200			completed = 1;
1201		}
1202
1203		if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) {
1204			/* this should never happen, but if it does, log */
1205			mprsas_log_command(cm, MPR_RECOVERY,
1206			    "cm %p state %x flags 0x%x ccb %p during diag "
1207			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1208			    cm->cm_ccb);
1209		}
1210	}
1211
1212	sc->io_cmds_active = 0;
1213}
1214
1215void
1216mprsas_handle_reinit(struct mpr_softc *sc)
1217{
1218	int i;
1219
1220	/* Go back into startup mode and freeze the simq, so that CAM
1221	 * doesn't send any commands until after we've rediscovered all
1222	 * targets and found the proper device handles for them.
1223	 *
1224	 * After the reset, portenable will trigger discovery, and after all
1225	 * discovery-related activities have finished, the simq will be
1226	 * released.
1227	 */
1228	mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__);
1229	sc->sassc->flags |= MPRSAS_IN_STARTUP;
1230	sc->sassc->flags |= MPRSAS_IN_DISCOVERY;
1231	mprsas_startup_increment(sc->sassc);
1232
1233	/* notify CAM of a bus reset */
1234	mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1235	    CAM_LUN_WILDCARD);
1236
1237	/* complete and cleanup after all outstanding commands */
1238	mprsas_complete_all_commands(sc);
1239
1240	mpr_dprint(sc, MPR_INIT, "%s startup %u after command completion\n",
1241	    __func__, sc->sassc->startup_refcount);
1242
1243	/* zero all the target handles, since they may change after the
1244	 * reset, and we have to rediscover all the targets and use the new
1245	 * handles.
1246	 */
1247	for (i = 0; i < sc->sassc->maxtargets; i++) {
1248		if (sc->sassc->targets[i].outstanding != 0)
1249			mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n",
1250			    i, sc->sassc->targets[i].outstanding);
1251		sc->sassc->targets[i].handle = 0x0;
1252		sc->sassc->targets[i].exp_dev_handle = 0x0;
1253		sc->sassc->targets[i].outstanding = 0;
1254		sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET;
1255	}
1256}
1257static void
1258mprsas_tm_timeout(void *data)
1259{
1260	struct mpr_command *tm = data;
1261	struct mpr_softc *sc = tm->cm_sc;
1262
1263	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1264
1265	mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY, "task mgmt %p timed "
1266	    "out\n", tm);
1267
1268	KASSERT(tm->cm_state == MPR_CM_STATE_INQUEUE,
1269	    ("command not inqueue\n"));
1270
1271	tm->cm_state = MPR_CM_STATE_BUSY;
1272	mpr_reinit(sc);
1273}
1274
1275static void
1276mprsas_logical_unit_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1277{
1278	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1279	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1280	unsigned int cm_count = 0;
1281	struct mpr_command *cm;
1282	struct mprsas_target *targ;
1283
1284	callout_stop(&tm->cm_callout);
1285
1286	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1287	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1288	targ = tm->cm_targ;
1289
1290	/*
1291	 * Currently there should be no way we can hit this case.  It only
1292	 * happens when we have a failure to allocate chain frames, and
1293	 * task management commands don't have S/G lists.
1294	 */
1295	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1296		mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1297		    "%s: cm_flags = %#x for LUN reset! "
1298		    "This should not happen!\n", __func__, tm->cm_flags);
1299		mprsas_free_tm(sc, tm);
1300		return;
1301	}
1302
1303	if (reply == NULL) {
1304		mpr_dprint(sc, MPR_RECOVERY, "NULL reset reply for tm %p\n",
1305		    tm);
1306		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1307			/* this completion was due to a reset, just cleanup */
1308			mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1309			    "reset, ignoring NULL LUN reset reply\n");
1310			targ->tm = NULL;
1311			mprsas_free_tm(sc, tm);
1312		}
1313		else {
1314			/* we should have gotten a reply. */
1315			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1316			    "LUN reset attempt, resetting controller\n");
1317			mpr_reinit(sc);
1318		}
1319		return;
1320	}
1321
1322	mpr_dprint(sc, MPR_RECOVERY,
1323	    "logical unit reset status 0x%x code 0x%x count %u\n",
1324	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1325	    le32toh(reply->TerminationCount));
1326
1327	/*
1328	 * See if there are any outstanding commands for this LUN.
1329	 * This could be made more efficient by using a per-LU data
1330	 * structure of some sort.
1331	 */
1332	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1333		if (cm->cm_lun == tm->cm_lun)
1334			cm_count++;
1335	}
1336
1337	if (cm_count == 0) {
1338		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1339		    "Finished recovery after LUN reset for target %u\n",
1340		    targ->tid);
1341
1342		mprsas_announce_reset(sc, AC_SENT_BDR, targ->tid,
1343		    tm->cm_lun);
1344
1345		/*
1346		 * We've finished recovery for this logical unit.  check and
1347		 * see if some other logical unit has a timedout command
1348		 * that needs to be processed.
1349		 */
1350		cm = TAILQ_FIRST(&targ->timedout_commands);
1351		if (cm) {
1352			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1353			   "More commands to abort for target %u\n", targ->tid);
1354			mprsas_send_abort(sc, tm, cm);
1355		} else {
1356			targ->tm = NULL;
1357			mprsas_free_tm(sc, tm);
1358		}
1359	} else {
1360		/* if we still have commands for this LUN, the reset
1361		 * effectively failed, regardless of the status reported.
1362		 * Escalate to a target reset.
1363		 */
1364		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1365		    "logical unit reset complete for target %u, but still "
1366		    "have %u command(s), sending target reset\n", targ->tid,
1367		    cm_count);
1368		if (!targ->is_nvme || sc->custom_nvme_tm_handling)
1369			mprsas_send_reset(sc, tm,
1370			    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1371		else
1372			mpr_reinit(sc);
1373	}
1374}
1375
1376static void
1377mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1378{
1379	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1380	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1381	struct mprsas_target *targ;
1382
1383	callout_stop(&tm->cm_callout);
1384
1385	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1386	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1387	targ = tm->cm_targ;
1388
1389	/*
1390	 * Currently there should be no way we can hit this case.  It only
1391	 * happens when we have a failure to allocate chain frames, and
1392	 * task management commands don't have S/G lists.
1393	 */
1394	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1395		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for target "
1396		    "reset! This should not happen!\n", __func__, tm->cm_flags);
1397		mprsas_free_tm(sc, tm);
1398		return;
1399	}
1400
1401	if (reply == NULL) {
1402		mpr_dprint(sc, MPR_RECOVERY,
1403		    "NULL target reset reply for tm %p TaskMID %u\n",
1404		    tm, le16toh(req->TaskMID));
1405		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1406			/* this completion was due to a reset, just cleanup */
1407			mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1408			    "reset, ignoring NULL target reset reply\n");
1409			targ->tm = NULL;
1410			mprsas_free_tm(sc, tm);
1411		}
1412		else {
1413			/* we should have gotten a reply. */
1414			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1415			    "target reset attempt, resetting controller\n");
1416			mpr_reinit(sc);
1417		}
1418		return;
1419	}
1420
1421	mpr_dprint(sc, MPR_RECOVERY,
1422	    "target reset status 0x%x code 0x%x count %u\n",
1423	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1424	    le32toh(reply->TerminationCount));
1425
1426	if (targ->outstanding == 0) {
1427		/*
1428		 * We've finished recovery for this target and all
1429		 * of its logical units.
1430		 */
1431		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1432		    "Finished reset recovery for target %u\n", targ->tid);
1433
1434		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1435		    CAM_LUN_WILDCARD);
1436
1437		targ->tm = NULL;
1438		mprsas_free_tm(sc, tm);
1439	} else {
1440		/*
1441		 * After a target reset, if this target still has
1442		 * outstanding commands, the reset effectively failed,
1443		 * regardless of the status reported.  escalate.
1444		 */
1445		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1446		    "Target reset complete for target %u, but still have %u "
1447		    "command(s), resetting controller\n", targ->tid,
1448		    targ->outstanding);
1449		mpr_reinit(sc);
1450	}
1451}
1452
1453#define MPR_RESET_TIMEOUT 30
1454
1455int
1456mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type)
1457{
1458	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1459	struct mprsas_target *target;
1460	int err, timeout;
1461
1462	target = tm->cm_targ;
1463	if (target->handle == 0) {
1464		mpr_dprint(sc, MPR_ERROR, "%s null devhandle for target_id "
1465		    "%d\n", __func__, target->tid);
1466		return -1;
1467	}
1468
1469	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1470	req->DevHandle = htole16(target->handle);
1471	req->TaskType = type;
1472
1473	if (!target->is_nvme || sc->custom_nvme_tm_handling) {
1474		timeout = MPR_RESET_TIMEOUT;
1475		/*
1476		 * Target reset method =
1477		 *     SAS Hard Link Reset / SATA Link Reset
1478		 */
1479		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1480	} else {
1481		timeout = (target->controller_reset_timeout) ? (
1482		    target->controller_reset_timeout) : (MPR_RESET_TIMEOUT);
1483		/* PCIe Protocol Level Reset*/
1484		req->MsgFlags =
1485		    MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
1486	}
1487
1488	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1489		/* XXX Need to handle invalid LUNs */
1490		MPR_SET_LUN(req->LUN, tm->cm_lun);
1491		tm->cm_targ->logical_unit_resets++;
1492		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1493		    "Sending logical unit reset to target %u lun %d\n",
1494		    target->tid, tm->cm_lun);
1495		tm->cm_complete = mprsas_logical_unit_reset_complete;
1496		mprsas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1497	} else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1498		tm->cm_targ->target_resets++;
1499		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1500		    "Sending target reset to target %u\n", target->tid);
1501		tm->cm_complete = mprsas_target_reset_complete;
1502		mprsas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1503	}
1504	else {
1505		mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type);
1506		return -1;
1507	}
1508
1509	if (target->encl_level_valid) {
1510		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1511		    "At enclosure level %d, slot %d, connector name (%4s)\n",
1512		    target->encl_level, target->encl_slot,
1513		    target->connector_name);
1514	}
1515
1516	tm->cm_data = NULL;
1517	tm->cm_complete_data = (void *)tm;
1518
1519	callout_reset(&tm->cm_callout, timeout * hz,
1520	    mprsas_tm_timeout, tm);
1521
1522	err = mpr_map_command(sc, tm);
1523	if (err)
1524		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1525		    "error %d sending reset type %u\n", err, type);
1526
1527	return err;
1528}
1529
1530
1531static void
1532mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm)
1533{
1534	struct mpr_command *cm;
1535	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1536	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1537	struct mprsas_target *targ;
1538
1539	callout_stop(&tm->cm_callout);
1540
1541	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1542	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1543	targ = tm->cm_targ;
1544
1545	/*
1546	 * Currently there should be no way we can hit this case.  It only
1547	 * happens when we have a failure to allocate chain frames, and
1548	 * task management commands don't have S/G lists.
1549	 */
1550	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1551		mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1552		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1553		    tm->cm_flags, tm, le16toh(req->TaskMID));
1554		mprsas_free_tm(sc, tm);
1555		return;
1556	}
1557
1558	if (reply == NULL) {
1559		mpr_dprint(sc, MPR_RECOVERY,
1560		    "NULL abort reply for tm %p TaskMID %u\n",
1561		    tm, le16toh(req->TaskMID));
1562		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1563			/* this completion was due to a reset, just cleanup */
1564			mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1565			    "reset, ignoring NULL abort reply\n");
1566			targ->tm = NULL;
1567			mprsas_free_tm(sc, tm);
1568		} else {
1569			/* we should have gotten a reply. */
1570			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1571			    "abort attempt, resetting controller\n");
1572			mpr_reinit(sc);
1573		}
1574		return;
1575	}
1576
1577	mpr_dprint(sc, MPR_RECOVERY,
1578	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1579	    le16toh(req->TaskMID),
1580	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1581	    le32toh(reply->TerminationCount));
1582
1583	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1584	if (cm == NULL) {
1585		/*
1586		 * if there are no more timedout commands, we're done with
1587		 * error recovery for this target.
1588		 */
1589		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1590		    "Finished abort recovery for target %u\n", targ->tid);
1591		targ->tm = NULL;
1592		mprsas_free_tm(sc, tm);
1593	} else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1594		/* abort success, but we have more timedout commands to abort */
1595		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1596		    "Continuing abort recovery for target %u\n", targ->tid);
1597		mprsas_send_abort(sc, tm, cm);
1598	} else {
1599		/*
1600		 * we didn't get a command completion, so the abort
1601		 * failed as far as we're concerned.  escalate.
1602		 */
1603		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1604		    "Abort failed for target %u, sending logical unit reset\n",
1605		    targ->tid);
1606
1607		mprsas_send_reset(sc, tm,
1608		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1609	}
1610}
1611
1612#define MPR_ABORT_TIMEOUT 5
1613
1614static int
1615mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
1616    struct mpr_command *cm)
1617{
1618	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1619	struct mprsas_target *targ;
1620	int err, timeout;
1621
1622	targ = cm->cm_targ;
1623	if (targ->handle == 0) {
1624		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1625		   "%s null devhandle for target_id %d\n",
1626		    __func__, cm->cm_ccb->ccb_h.target_id);
1627		return -1;
1628	}
1629
1630	mprsas_log_command(cm, MPR_RECOVERY|MPR_INFO,
1631	    "Aborting command %p\n", cm);
1632
1633	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1634	req->DevHandle = htole16(targ->handle);
1635	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1636
1637	/* XXX Need to handle invalid LUNs */
1638	MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1639
1640	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1641
1642	tm->cm_data = NULL;
1643	tm->cm_complete = mprsas_abort_complete;
1644	tm->cm_complete_data = (void *)tm;
1645	tm->cm_targ = cm->cm_targ;
1646	tm->cm_lun = cm->cm_lun;
1647
1648	if (!targ->is_nvme || sc->custom_nvme_tm_handling)
1649		timeout	= MPR_ABORT_TIMEOUT;
1650	else
1651		timeout = sc->nvme_abort_timeout;
1652
1653	callout_reset(&tm->cm_callout, timeout * hz,
1654	    mprsas_tm_timeout, tm);
1655
1656	targ->aborts++;
1657
1658	mprsas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1659
1660	err = mpr_map_command(sc, tm);
1661	if (err)
1662		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1663		    "error %d sending abort for cm %p SMID %u\n",
1664		    err, cm, req->TaskMID);
1665	return err;
1666}
1667
1668static void
1669mprsas_scsiio_timeout(void *data)
1670{
1671	sbintime_t elapsed, now;
1672	union ccb *ccb;
1673	struct mpr_softc *sc;
1674	struct mpr_command *cm;
1675	struct mprsas_target *targ;
1676
1677	cm = (struct mpr_command *)data;
1678	sc = cm->cm_sc;
1679	ccb = cm->cm_ccb;
1680	now = sbinuptime();
1681
1682	MPR_FUNCTRACE(sc);
1683	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1684
1685	mpr_dprint(sc, MPR_XINFO|MPR_RECOVERY, "Timeout checking cm %p\n", cm);
1686
1687	/*
1688	 * Run the interrupt handler to make sure it's not pending.  This
1689	 * isn't perfect because the command could have already completed
1690	 * and been re-used, though this is unlikely.
1691	 */
1692	mpr_intr_locked(sc);
1693	if (cm->cm_flags & MPR_CM_FLAGS_ON_RECOVERY) {
1694		mprsas_log_command(cm, MPR_XINFO,
1695		    "SCSI command %p almost timed out\n", cm);
1696		return;
1697	}
1698
1699	if (cm->cm_ccb == NULL) {
1700		mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n");
1701		return;
1702	}
1703
1704	targ = cm->cm_targ;
1705	targ->timeouts++;
1706
1707	elapsed = now - ccb->ccb_h.qos.sim_data;
1708	mprsas_log_command(cm, MPR_INFO|MPR_RECOVERY,
1709	    "Command timeout on target %u(0x%04x), %d set, %d.%d elapsed\n",
1710	    targ->tid, targ->handle, ccb->ccb_h.timeout,
1711	    sbintime_getsec(elapsed), elapsed & 0xffffffff);
1712	if (targ->encl_level_valid) {
1713		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1714		    "At enclosure level %d, slot %d, connector name (%4s)\n",
1715		    targ->encl_level, targ->encl_slot, targ->connector_name);
1716	}
1717
1718	/* XXX first, check the firmware state, to see if it's still
1719	 * operational.  if not, do a diag reset.
1720	 */
1721	mprsas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1722	cm->cm_flags |= MPR_CM_FLAGS_ON_RECOVERY | MPR_CM_FLAGS_TIMEDOUT;
1723	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1724
1725	if (targ->tm != NULL) {
1726		/* target already in recovery, just queue up another
1727		 * timedout command to be processed later.
1728		 */
1729		mpr_dprint(sc, MPR_RECOVERY, "queued timedout cm %p for "
1730		    "processing by tm %p\n", cm, targ->tm);
1731	}
1732	else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) {
1733
1734		/* start recovery by aborting the first timedout command */
1735		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1736		    "Sending abort to target %u for SMID %d\n", targ->tid,
1737		    cm->cm_desc.Default.SMID);
1738		mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n",
1739		    cm, targ->tm);
1740		mprsas_send_abort(sc, targ->tm, cm);
1741	}
1742	else {
1743		/* XXX queue this target up for recovery once a TM becomes
1744		 * available.  The firmware only has a limited number of
1745		 * HighPriority credits for the high priority requests used
1746		 * for task management, and we ran out.
1747		 *
1748		 * Isilon: don't worry about this for now, since we have
1749		 * more credits than disks in an enclosure, and limit
1750		 * ourselves to one TM per target for recovery.
1751		 */
1752		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1753		    "timedout cm %p failed to allocate a tm\n", cm);
1754	}
1755}
1756
1757/**
1758 * mprsas_build_nvme_unmap - Build Native NVMe DSM command equivalent
1759 *			     to SCSI Unmap.
1760 * Return 0 - for success,
1761 *	  1 - to immediately return back the command with success status to CAM
1762 *	  negative value - to fallback to firmware path i.e. issue scsi unmap
1763 *			   to FW without any translation.
1764 */
1765static int
1766mprsas_build_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm,
1767    union ccb *ccb, struct mprsas_target *targ)
1768{
1769	Mpi26NVMeEncapsulatedRequest_t *req = NULL;
1770	struct ccb_scsiio *csio;
1771	struct unmap_parm_list *plist;
1772	struct nvme_dsm_range *nvme_dsm_ranges = NULL;
1773	struct nvme_command *c;
1774	int i, res;
1775	uint16_t ndesc, list_len, data_length;
1776	struct mpr_prp_page *prp_page_info;
1777	uint64_t nvme_dsm_ranges_dma_handle;
1778
1779	csio = &ccb->csio;
1780#if __FreeBSD_version >= 1100103
1781	list_len = (scsiio_cdb_ptr(csio)[7] << 8 | scsiio_cdb_ptr(csio)[8]);
1782#else
1783	if (csio->ccb_h.flags & CAM_CDB_POINTER) {
1784		list_len = (ccb->csio.cdb_io.cdb_ptr[7] << 8 |
1785		    ccb->csio.cdb_io.cdb_ptr[8]);
1786	} else {
1787		list_len = (ccb->csio.cdb_io.cdb_bytes[7] << 8 |
1788		    ccb->csio.cdb_io.cdb_bytes[8]);
1789	}
1790#endif
1791	if (!list_len) {
1792		mpr_dprint(sc, MPR_ERROR, "Parameter list length is Zero\n");
1793		return -EINVAL;
1794	}
1795
1796	plist = malloc(csio->dxfer_len, M_MPR, M_ZERO|M_NOWAIT);
1797	if (!plist) {
1798		mpr_dprint(sc, MPR_ERROR, "Unable to allocate memory to "
1799		    "save UNMAP data\n");
1800		return -ENOMEM;
1801	}
1802
1803	/* Copy SCSI unmap data to a local buffer */
1804	bcopy(csio->data_ptr, plist, csio->dxfer_len);
1805
1806	/* return back the unmap command to CAM with success status,
1807	 * if number of descripts is zero.
1808	 */
1809	ndesc = be16toh(plist->unmap_blk_desc_data_len) >> 4;
1810	if (!ndesc) {
1811		mpr_dprint(sc, MPR_XINFO, "Number of descriptors in "
1812		    "UNMAP cmd is Zero\n");
1813		res = 1;
1814		goto out;
1815	}
1816
1817	data_length = ndesc * sizeof(struct nvme_dsm_range);
1818	if (data_length > targ->MDTS) {
1819		mpr_dprint(sc, MPR_ERROR, "data length: %d is greater than "
1820		    "Device's MDTS: %d\n", data_length, targ->MDTS);
1821		res = -EINVAL;
1822		goto out;
1823	}
1824
1825	prp_page_info = mpr_alloc_prp_page(sc);
1826	KASSERT(prp_page_info != NULL, ("%s: There is no PRP Page for "
1827	    "UNMAP command.\n", __func__));
1828
1829	/*
1830	 * Insert the allocated PRP page into the command's PRP page list. This
1831	 * will be freed when the command is freed.
1832	 */
1833	TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link);
1834
1835	nvme_dsm_ranges = (struct nvme_dsm_range *)prp_page_info->prp_page;
1836	nvme_dsm_ranges_dma_handle = prp_page_info->prp_page_busaddr;
1837
1838	bzero(nvme_dsm_ranges, data_length);
1839
1840	/* Convert SCSI unmap's descriptor data to NVMe DSM specific Range data
1841	 * for each descriptors contained in SCSI UNMAP data.
1842	 */
1843	for (i = 0; i < ndesc; i++) {
1844		nvme_dsm_ranges[i].length =
1845		    htole32(be32toh(plist->desc[i].nlb));
1846		nvme_dsm_ranges[i].starting_lba =
1847		    htole64(be64toh(plist->desc[i].slba));
1848		nvme_dsm_ranges[i].attributes = 0;
1849	}
1850
1851	/* Build MPI2.6's NVMe Encapsulated Request Message */
1852	req = (Mpi26NVMeEncapsulatedRequest_t *)cm->cm_req;
1853	bzero(req, sizeof(*req));
1854	req->DevHandle = htole16(targ->handle);
1855	req->Function = MPI2_FUNCTION_NVME_ENCAPSULATED;
1856	req->Flags = MPI26_NVME_FLAGS_WRITE;
1857	req->ErrorResponseBaseAddress.High =
1858	    htole32((uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32));
1859	req->ErrorResponseBaseAddress.Low =
1860	    htole32(cm->cm_sense_busaddr);
1861	req->ErrorResponseAllocationLength =
1862	    htole16(sizeof(struct nvme_completion));
1863	req->EncapsulatedCommandLength =
1864	    htole16(sizeof(struct nvme_command));
1865	req->DataLength = htole32(data_length);
1866
1867	/* Build NVMe DSM command */
1868	c = (struct nvme_command *) req->NVMe_Command;
1869	c->opc = NVME_OPC_DATASET_MANAGEMENT;
1870	c->nsid = htole32(csio->ccb_h.target_lun + 1);
1871	c->cdw10 = htole32(ndesc - 1);
1872	c->cdw11 = htole32(NVME_DSM_ATTR_DEALLOCATE);
1873
1874	cm->cm_length = data_length;
1875	cm->cm_data = NULL;
1876
1877	cm->cm_complete = mprsas_scsiio_complete;
1878	cm->cm_complete_data = ccb;
1879	cm->cm_targ = targ;
1880	cm->cm_lun = csio->ccb_h.target_lun;
1881	cm->cm_ccb = ccb;
1882
1883	cm->cm_desc.Default.RequestFlags =
1884	    MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
1885
1886	csio->ccb_h.qos.sim_data = sbinuptime();
1887#if __FreeBSD_version >= 1000029
1888	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1889	    mprsas_scsiio_timeout, cm, 0);
1890#else //__FreeBSD_version < 1000029
1891	callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1892	    mprsas_scsiio_timeout, cm);
1893#endif //__FreeBSD_version >= 1000029
1894
1895	targ->issued++;
1896	targ->outstanding++;
1897	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1898	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1899
1900	mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
1901	    __func__, cm, ccb, targ->outstanding);
1902
1903	mpr_build_nvme_prp(sc, cm, req,
1904	    (void *)(uintptr_t)nvme_dsm_ranges_dma_handle, 0, data_length);
1905	mpr_map_command(sc, cm);
1906
1907out:
1908	free(plist, M_MPR);
1909	return 0;
1910}
1911
1912static void
1913mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb)
1914{
1915	MPI2_SCSI_IO_REQUEST *req;
1916	struct ccb_scsiio *csio;
1917	struct mpr_softc *sc;
1918	struct mprsas_target *targ;
1919	struct mprsas_lun *lun;
1920	struct mpr_command *cm;
1921	uint8_t i, lba_byte, *ref_tag_addr, scsi_opcode;
1922	uint16_t eedp_flags;
1923	uint32_t mpi_control;
1924	int rc;
1925
1926	sc = sassc->sc;
1927	MPR_FUNCTRACE(sc);
1928	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1929
1930	csio = &ccb->csio;
1931	KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1932	    ("Target %d out of bounds in XPT_SCSI_IO\n",
1933	     csio->ccb_h.target_id));
1934	targ = &sassc->targets[csio->ccb_h.target_id];
1935	mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1936	if (targ->handle == 0x0) {
1937		mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n",
1938		    __func__, csio->ccb_h.target_id);
1939		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1940		xpt_done(ccb);
1941		return;
1942	}
1943	if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) {
1944		mpr_dprint(sc, MPR_ERROR, "%s Raid component no SCSI IO "
1945		    "supported %u\n", __func__, csio->ccb_h.target_id);
1946		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1947		xpt_done(ccb);
1948		return;
1949	}
1950	/*
1951	 * Sometimes, it is possible to get a command that is not "In
1952	 * Progress" and was actually aborted by the upper layer.  Check for
1953	 * this here and complete the command without error.
1954	 */
1955	if (mprsas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1956		mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for "
1957		    "target %u\n", __func__, csio->ccb_h.target_id);
1958		xpt_done(ccb);
1959		return;
1960	}
1961	/*
1962	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1963	 * that the volume has timed out.  We want volumes to be enumerated
1964	 * until they are deleted/removed, not just failed. In either event,
1965	 * we're removing the target due to a firmware event telling us
1966	 * the device is now gone (as opposed to some transient event). Since
1967	 * we're opting to remove failed devices from the OS's view, we need
1968	 * to propagate that status up the stack.
1969	 */
1970	if (targ->flags & MPRSAS_TARGET_INREMOVAL) {
1971		if (targ->devinfo == 0)
1972			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1973		else
1974			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1975		xpt_done(ccb);
1976		return;
1977	}
1978
1979	if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) {
1980		mpr_dprint(sc, MPR_INFO, "%s shutting down\n", __func__);
1981		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1982		xpt_done(ccb);
1983		return;
1984	}
1985
1986	/*
1987	 * If target has a reset in progress, freeze the devq and return.  The
1988	 * devq will be released when the TM reset is finished.
1989	 */
1990	if (targ->flags & MPRSAS_TARGET_INRESET) {
1991		ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1992		mpr_dprint(sc, MPR_INFO, "%s: Freezing devq for target ID %d\n",
1993		    __func__, targ->tid);
1994		xpt_freeze_devq(ccb->ccb_h.path, 1);
1995		xpt_done(ccb);
1996		return;
1997	}
1998
1999	cm = mpr_alloc_command(sc);
2000	if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) {
2001		if (cm != NULL) {
2002			mpr_free_command(sc, cm);
2003		}
2004		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2005			xpt_freeze_simq(sassc->sim, 1);
2006			sassc->flags |= MPRSAS_QUEUE_FROZEN;
2007		}
2008		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2009		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2010		xpt_done(ccb);
2011		return;
2012	}
2013
2014	/* For NVME device's issue UNMAP command directly to NVME drives by
2015	 * constructing equivalent native NVMe DataSetManagement command.
2016	 */
2017#if __FreeBSD_version >= 1100103
2018	scsi_opcode = scsiio_cdb_ptr(csio)[0];
2019#else
2020	if (csio->ccb_h.flags & CAM_CDB_POINTER)
2021		scsi_opcode = csio->cdb_io.cdb_ptr[0];
2022	else
2023		scsi_opcode = csio->cdb_io.cdb_bytes[0];
2024#endif
2025	if (scsi_opcode == UNMAP &&
2026	    targ->is_nvme &&
2027	    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
2028		rc = mprsas_build_nvme_unmap(sc, cm, ccb, targ);
2029		if (rc == 1) { /* return command to CAM with success status */
2030			mpr_free_command(sc, cm);
2031			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2032			xpt_done(ccb);
2033			return;
2034		} else if (!rc) /* Issued NVMe Encapsulated Request Message */
2035			return;
2036	}
2037
2038	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
2039	bzero(req, sizeof(*req));
2040	req->DevHandle = htole16(targ->handle);
2041	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
2042	req->MsgFlags = 0;
2043	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
2044	req->SenseBufferLength = MPR_SENSE_LEN;
2045	req->SGLFlags = 0;
2046	req->ChainOffset = 0;
2047	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
2048	req->SGLOffset1= 0;
2049	req->SGLOffset2= 0;
2050	req->SGLOffset3= 0;
2051	req->SkipCount = 0;
2052	req->DataLength = htole32(csio->dxfer_len);
2053	req->BidirectionalDataLength = 0;
2054	req->IoFlags = htole16(csio->cdb_len);
2055	req->EEDPFlags = 0;
2056
2057	/* Note: BiDirectional transfers are not supported */
2058	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
2059	case CAM_DIR_IN:
2060		mpi_control = MPI2_SCSIIO_CONTROL_READ;
2061		cm->cm_flags |= MPR_CM_FLAGS_DATAIN;
2062		break;
2063	case CAM_DIR_OUT:
2064		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
2065		cm->cm_flags |= MPR_CM_FLAGS_DATAOUT;
2066		break;
2067	case CAM_DIR_NONE:
2068	default:
2069		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
2070		break;
2071	}
2072
2073	if (csio->cdb_len == 32)
2074		mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
2075	/*
2076	 * It looks like the hardware doesn't require an explicit tag
2077	 * number for each transaction.  SAM Task Management not supported
2078	 * at the moment.
2079	 */
2080	switch (csio->tag_action) {
2081	case MSG_HEAD_OF_Q_TAG:
2082		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
2083		break;
2084	case MSG_ORDERED_Q_TAG:
2085		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
2086		break;
2087	case MSG_ACA_TASK:
2088		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
2089		break;
2090	case CAM_TAG_ACTION_NONE:
2091	case MSG_SIMPLE_Q_TAG:
2092	default:
2093		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
2094		break;
2095	}
2096	mpi_control |= (csio->priority << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT) &
2097	    MPI2_SCSIIO_CONTROL_CMDPRI_MASK;
2098	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
2099	req->Control = htole32(mpi_control);
2100
2101	if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
2102		mpr_free_command(sc, cm);
2103		mprsas_set_ccbstatus(ccb, CAM_LUN_INVALID);
2104		xpt_done(ccb);
2105		return;
2106	}
2107
2108	if (csio->ccb_h.flags & CAM_CDB_POINTER)
2109		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
2110	else {
2111		KASSERT(csio->cdb_len <= IOCDBLEN,
2112		    ("cdb_len %d is greater than IOCDBLEN but CAM_CDB_POINTER "
2113		    "is not set", csio->cdb_len));
2114		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
2115	}
2116	req->IoFlags = htole16(csio->cdb_len);
2117
2118	/*
2119	 * Check if EEDP is supported and enabled.  If it is then check if the
2120	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
2121	 * is formatted for EEDP support.  If all of this is true, set CDB up
2122	 * for EEDP transfer.
2123	 */
2124	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
2125	if (sc->eedp_enabled && eedp_flags) {
2126		SLIST_FOREACH(lun, &targ->luns, lun_link) {
2127			if (lun->lun_id == csio->ccb_h.target_lun) {
2128				break;
2129			}
2130		}
2131
2132		if ((lun != NULL) && (lun->eedp_formatted)) {
2133			req->EEDPBlockSize = htole16(lun->eedp_block_size);
2134			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2135			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
2136			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
2137			if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
2138				eedp_flags |=
2139				    MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
2140			}
2141			req->EEDPFlags = htole16(eedp_flags);
2142
2143			/*
2144			 * If CDB less than 32, fill in Primary Ref Tag with
2145			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
2146			 * already there.  Also, set protection bit.  FreeBSD
2147			 * currently does not support CDBs bigger than 16, but
2148			 * the code doesn't hurt, and will be here for the
2149			 * future.
2150			 */
2151			if (csio->cdb_len != 32) {
2152				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
2153				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
2154				    PrimaryReferenceTag;
2155				for (i = 0; i < 4; i++) {
2156					*ref_tag_addr =
2157					    req->CDB.CDB32[lba_byte + i];
2158					ref_tag_addr++;
2159				}
2160				req->CDB.EEDP32.PrimaryReferenceTag =
2161				    htole32(req->
2162				    CDB.EEDP32.PrimaryReferenceTag);
2163				req->CDB.EEDP32.PrimaryApplicationTagMask =
2164				    0xFFFF;
2165				req->CDB.CDB32[1] =
2166				    (req->CDB.CDB32[1] & 0x1F) | 0x20;
2167			} else {
2168				eedp_flags |=
2169				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
2170				req->EEDPFlags = htole16(eedp_flags);
2171				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
2172				    0x1F) | 0x20;
2173			}
2174		}
2175	}
2176
2177	cm->cm_length = csio->dxfer_len;
2178	if (cm->cm_length != 0) {
2179		cm->cm_data = ccb;
2180		cm->cm_flags |= MPR_CM_FLAGS_USE_CCB;
2181	} else {
2182		cm->cm_data = NULL;
2183	}
2184	cm->cm_sge = &req->SGL;
2185	cm->cm_sglsize = (32 - 24) * 4;
2186	cm->cm_complete = mprsas_scsiio_complete;
2187	cm->cm_complete_data = ccb;
2188	cm->cm_targ = targ;
2189	cm->cm_lun = csio->ccb_h.target_lun;
2190	cm->cm_ccb = ccb;
2191	/*
2192	 * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0)
2193	 * and set descriptor type.
2194	 */
2195	if (targ->scsi_req_desc_type ==
2196	    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) {
2197		req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
2198		cm->cm_desc.FastPathSCSIIO.RequestFlags =
2199		    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
2200		if (!sc->atomic_desc_capable) {
2201			cm->cm_desc.FastPathSCSIIO.DevHandle =
2202			    htole16(targ->handle);
2203		}
2204	} else {
2205		cm->cm_desc.SCSIIO.RequestFlags =
2206		    MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
2207		if (!sc->atomic_desc_capable)
2208			cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
2209	}
2210
2211	csio->ccb_h.qos.sim_data = sbinuptime();
2212#if __FreeBSD_version >= 1000029
2213	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
2214	    mprsas_scsiio_timeout, cm, 0);
2215#else //__FreeBSD_version < 1000029
2216	callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
2217	    mprsas_scsiio_timeout, cm);
2218#endif //__FreeBSD_version >= 1000029
2219
2220	targ->issued++;
2221	targ->outstanding++;
2222	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
2223	ccb->ccb_h.status |= CAM_SIM_QUEUED;
2224
2225	mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
2226	    __func__, cm, ccb, targ->outstanding);
2227
2228	mpr_map_command(sc, cm);
2229	return;
2230}
2231
2232/**
2233 * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request
2234 */
2235static void
2236mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio,
2237    Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ)
2238{
2239	u32 response_info;
2240	u8 *response_bytes;
2241	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
2242	    MPI2_IOCSTATUS_MASK;
2243	u8 scsi_state = mpi_reply->SCSIState;
2244	u8 scsi_status = mpi_reply->SCSIStatus;
2245	char *desc_ioc_state = NULL;
2246	char *desc_scsi_status = NULL;
2247	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
2248
2249	if (log_info == 0x31170000)
2250		return;
2251
2252	desc_ioc_state = mpr_describe_table(mpr_iocstatus_string,
2253	     ioc_status);
2254	desc_scsi_status = mpr_describe_table(mpr_scsi_status_string,
2255	    scsi_status);
2256
2257	mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2258	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2259	if (targ->encl_level_valid) {
2260		mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
2261		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
2262		    targ->connector_name);
2263	}
2264
2265	/*
2266	 * We can add more detail about underflow data here
2267	 * TO-DO
2268	 */
2269	mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), "
2270	    "scsi_state %b\n", desc_scsi_status, scsi_status,
2271	    scsi_state, "\20" "\1AutosenseValid" "\2AutosenseFailed"
2272	    "\3NoScsiStatus" "\4Terminated" "\5Response InfoValid");
2273
2274	if (sc->mpr_debug & MPR_XINFO &&
2275	    scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2276		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n");
2277		scsi_sense_print(csio);
2278		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n");
2279	}
2280
2281	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2282		response_info = le32toh(mpi_reply->ResponseInfo);
2283		response_bytes = (u8 *)&response_info;
2284		mpr_dprint(sc, MPR_XINFO, "response code(0x%01x): %s\n",
2285		    response_bytes[0],
2286		    mpr_describe_table(mpr_scsi_taskmgmt_string,
2287		    response_bytes[0]));
2288	}
2289}
2290
2291/** mprsas_nvme_trans_status_code
2292 *
2293 * Convert Native NVMe command error status to
2294 * equivalent SCSI error status.
2295 *
2296 * Returns appropriate scsi_status
2297 */
2298static u8
2299mprsas_nvme_trans_status_code(uint16_t nvme_status,
2300    struct mpr_command *cm)
2301{
2302	u8 status = MPI2_SCSI_STATUS_GOOD;
2303	int skey, asc, ascq;
2304	union ccb *ccb = cm->cm_complete_data;
2305	int returned_sense_len;
2306	uint8_t sct, sc;
2307
2308	sct = NVME_STATUS_GET_SCT(nvme_status);
2309	sc = NVME_STATUS_GET_SC(nvme_status);
2310
2311	status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2312	skey = SSD_KEY_ILLEGAL_REQUEST;
2313	asc = SCSI_ASC_NO_SENSE;
2314	ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2315
2316	switch (sct) {
2317	case NVME_SCT_GENERIC:
2318		switch (sc) {
2319		case NVME_SC_SUCCESS:
2320			status = MPI2_SCSI_STATUS_GOOD;
2321			skey = SSD_KEY_NO_SENSE;
2322			asc = SCSI_ASC_NO_SENSE;
2323			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2324			break;
2325		case NVME_SC_INVALID_OPCODE:
2326			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2327			skey = SSD_KEY_ILLEGAL_REQUEST;
2328			asc = SCSI_ASC_ILLEGAL_COMMAND;
2329			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2330			break;
2331		case NVME_SC_INVALID_FIELD:
2332			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2333			skey = SSD_KEY_ILLEGAL_REQUEST;
2334			asc = SCSI_ASC_INVALID_CDB;
2335			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2336			break;
2337		case NVME_SC_DATA_TRANSFER_ERROR:
2338			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2339			skey = SSD_KEY_MEDIUM_ERROR;
2340			asc = SCSI_ASC_NO_SENSE;
2341			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2342			break;
2343		case NVME_SC_ABORTED_POWER_LOSS:
2344			status = MPI2_SCSI_STATUS_TASK_ABORTED;
2345			skey = SSD_KEY_ABORTED_COMMAND;
2346			asc = SCSI_ASC_WARNING;
2347			ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED;
2348			break;
2349		case NVME_SC_INTERNAL_DEVICE_ERROR:
2350			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2351			skey = SSD_KEY_HARDWARE_ERROR;
2352			asc = SCSI_ASC_INTERNAL_TARGET_FAILURE;
2353			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2354			break;
2355		case NVME_SC_ABORTED_BY_REQUEST:
2356		case NVME_SC_ABORTED_SQ_DELETION:
2357		case NVME_SC_ABORTED_FAILED_FUSED:
2358		case NVME_SC_ABORTED_MISSING_FUSED:
2359			status = MPI2_SCSI_STATUS_TASK_ABORTED;
2360			skey = SSD_KEY_ABORTED_COMMAND;
2361			asc = SCSI_ASC_NO_SENSE;
2362			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2363			break;
2364		case NVME_SC_INVALID_NAMESPACE_OR_FORMAT:
2365			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2366			skey = SSD_KEY_ILLEGAL_REQUEST;
2367			asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2368			ascq = SCSI_ASCQ_INVALID_LUN_ID;
2369			break;
2370		case NVME_SC_LBA_OUT_OF_RANGE:
2371			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2372			skey = SSD_KEY_ILLEGAL_REQUEST;
2373			asc = SCSI_ASC_ILLEGAL_BLOCK;
2374			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2375			break;
2376		case NVME_SC_CAPACITY_EXCEEDED:
2377			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2378			skey = SSD_KEY_MEDIUM_ERROR;
2379			asc = SCSI_ASC_NO_SENSE;
2380			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2381			break;
2382		case NVME_SC_NAMESPACE_NOT_READY:
2383			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2384			skey = SSD_KEY_NOT_READY;
2385			asc = SCSI_ASC_LUN_NOT_READY;
2386			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2387			break;
2388		}
2389		break;
2390	case NVME_SCT_COMMAND_SPECIFIC:
2391		switch (sc) {
2392		case NVME_SC_INVALID_FORMAT:
2393			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2394			skey = SSD_KEY_ILLEGAL_REQUEST;
2395			asc = SCSI_ASC_FORMAT_COMMAND_FAILED;
2396			ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED;
2397			break;
2398		case NVME_SC_CONFLICTING_ATTRIBUTES:
2399			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2400			skey = SSD_KEY_ILLEGAL_REQUEST;
2401			asc = SCSI_ASC_INVALID_CDB;
2402			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2403			break;
2404		}
2405		break;
2406	case NVME_SCT_MEDIA_ERROR:
2407		switch (sc) {
2408		case NVME_SC_WRITE_FAULTS:
2409			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2410			skey = SSD_KEY_MEDIUM_ERROR;
2411			asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT;
2412			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2413			break;
2414		case NVME_SC_UNRECOVERED_READ_ERROR:
2415			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2416			skey = SSD_KEY_MEDIUM_ERROR;
2417			asc = SCSI_ASC_UNRECOVERED_READ_ERROR;
2418			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2419			break;
2420		case NVME_SC_GUARD_CHECK_ERROR:
2421			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2422			skey = SSD_KEY_MEDIUM_ERROR;
2423			asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED;
2424			ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED;
2425			break;
2426		case NVME_SC_APPLICATION_TAG_CHECK_ERROR:
2427			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2428			skey = SSD_KEY_MEDIUM_ERROR;
2429			asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED;
2430			ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED;
2431			break;
2432		case NVME_SC_REFERENCE_TAG_CHECK_ERROR:
2433			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2434			skey = SSD_KEY_MEDIUM_ERROR;
2435			asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED;
2436			ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED;
2437			break;
2438		case NVME_SC_COMPARE_FAILURE:
2439			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2440			skey = SSD_KEY_MISCOMPARE;
2441			asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY;
2442			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2443			break;
2444		case NVME_SC_ACCESS_DENIED:
2445			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2446			skey = SSD_KEY_ILLEGAL_REQUEST;
2447			asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2448			ascq = SCSI_ASCQ_INVALID_LUN_ID;
2449			break;
2450		}
2451		break;
2452	}
2453
2454	returned_sense_len = sizeof(struct scsi_sense_data);
2455	if (returned_sense_len < ccb->csio.sense_len)
2456		ccb->csio.sense_resid = ccb->csio.sense_len -
2457		    returned_sense_len;
2458	else
2459		ccb->csio.sense_resid = 0;
2460
2461	scsi_set_sense_data(&ccb->csio.sense_data, SSD_TYPE_FIXED,
2462	    1, skey, asc, ascq, SSD_ELEM_NONE);
2463	ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2464
2465	return status;
2466}
2467
2468/** mprsas_complete_nvme_unmap
2469 *
2470 * Complete native NVMe command issued using NVMe Encapsulated
2471 * Request Message.
2472 */
2473static u8
2474mprsas_complete_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm)
2475{
2476	Mpi26NVMeEncapsulatedErrorReply_t *mpi_reply;
2477	struct nvme_completion *nvme_completion = NULL;
2478	u8 scsi_status = MPI2_SCSI_STATUS_GOOD;
2479
2480	mpi_reply =(Mpi26NVMeEncapsulatedErrorReply_t *)cm->cm_reply;
2481	if (le16toh(mpi_reply->ErrorResponseCount)){
2482		nvme_completion = (struct nvme_completion *)cm->cm_sense;
2483		scsi_status = mprsas_nvme_trans_status_code(
2484		    nvme_completion->status, cm);
2485	}
2486	return scsi_status;
2487}
2488
2489static void
2490mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2491{
2492	MPI2_SCSI_IO_REPLY *rep;
2493	union ccb *ccb;
2494	struct ccb_scsiio *csio;
2495	struct mprsas_softc *sassc;
2496	struct scsi_vpd_supported_page_list *vpd_list = NULL;
2497	u8 *TLR_bits, TLR_on, *scsi_cdb;
2498	int dir = 0, i;
2499	u16 alloc_len;
2500	struct mprsas_target *target;
2501	target_id_t target_id;
2502
2503	MPR_FUNCTRACE(sc);
2504	mpr_dprint(sc, MPR_TRACE,
2505	    "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2506	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2507	    cm->cm_targ->outstanding);
2508
2509	callout_stop(&cm->cm_callout);
2510	mtx_assert(&sc->mpr_mtx, MA_OWNED);
2511
2512	sassc = sc->sassc;
2513	ccb = cm->cm_complete_data;
2514	csio = &ccb->csio;
2515	target_id = csio->ccb_h.target_id;
2516	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2517	/*
2518	 * XXX KDM if the chain allocation fails, does it matter if we do
2519	 * the sync and unload here?  It is simpler to do it in every case,
2520	 * assuming it doesn't cause problems.
2521	 */
2522	if (cm->cm_data != NULL) {
2523		if (cm->cm_flags & MPR_CM_FLAGS_DATAIN)
2524			dir = BUS_DMASYNC_POSTREAD;
2525		else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT)
2526			dir = BUS_DMASYNC_POSTWRITE;
2527		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2528		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2529	}
2530
2531	cm->cm_targ->completed++;
2532	cm->cm_targ->outstanding--;
2533	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2534	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2535
2536	if (cm->cm_flags & MPR_CM_FLAGS_ON_RECOVERY) {
2537		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2538		KASSERT(cm->cm_state == MPR_CM_STATE_BUSY,
2539		    ("Not busy for CM_FLAGS_TIMEDOUT: %d\n", cm->cm_state));
2540		cm->cm_flags &= ~MPR_CM_FLAGS_ON_RECOVERY;
2541		if (cm->cm_reply != NULL)
2542			mprsas_log_command(cm, MPR_RECOVERY,
2543			    "completed timedout cm %p ccb %p during recovery "
2544			    "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb,
2545			    le16toh(rep->IOCStatus), rep->SCSIStatus,
2546			    rep->SCSIState, le32toh(rep->TransferCount));
2547		else
2548			mprsas_log_command(cm, MPR_RECOVERY,
2549			    "completed timedout cm %p ccb %p during recovery\n",
2550			    cm, cm->cm_ccb);
2551	} else if (cm->cm_targ->tm != NULL) {
2552		if (cm->cm_reply != NULL)
2553			mprsas_log_command(cm, MPR_RECOVERY,
2554			    "completed cm %p ccb %p during recovery "
2555			    "ioc %x scsi %x state %x xfer %u\n",
2556			    cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2557			    rep->SCSIStatus, rep->SCSIState,
2558			    le32toh(rep->TransferCount));
2559		else
2560			mprsas_log_command(cm, MPR_RECOVERY,
2561			    "completed cm %p ccb %p during recovery\n",
2562			    cm, cm->cm_ccb);
2563	} else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
2564		mprsas_log_command(cm, MPR_RECOVERY,
2565		    "reset completed cm %p ccb %p\n", cm, cm->cm_ccb);
2566	}
2567
2568	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2569		/*
2570		 * We ran into an error after we tried to map the command,
2571		 * so we're getting a callback without queueing the command
2572		 * to the hardware.  So we set the status here, and it will
2573		 * be retained below.  We'll go through the "fast path",
2574		 * because there can be no reply when we haven't actually
2575		 * gone out to the hardware.
2576		 */
2577		mprsas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2578
2579		/*
2580		 * Currently the only error included in the mask is
2581		 * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of
2582		 * chain frames.  We need to freeze the queue until we get
2583		 * a command that completed without this error, which will
2584		 * hopefully have some chain frames attached that we can
2585		 * use.  If we wanted to get smarter about it, we would
2586		 * only unfreeze the queue in this condition when we're
2587		 * sure that we're getting some chain frames back.  That's
2588		 * probably unnecessary.
2589		 */
2590		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2591			xpt_freeze_simq(sassc->sim, 1);
2592			sassc->flags |= MPRSAS_QUEUE_FROZEN;
2593			mpr_dprint(sc, MPR_XINFO, "Error sending command, "
2594			    "freezing SIM queue\n");
2595		}
2596	}
2597
2598	/*
2599	 * Point to the SCSI CDB, which is dependent on the CAM_CDB_POINTER
2600	 * flag, and use it in a few places in the rest of this function for
2601	 * convenience. Use the macro if available.
2602	 */
2603#if __FreeBSD_version >= 1100103
2604	scsi_cdb = scsiio_cdb_ptr(csio);
2605#else
2606	if (csio->ccb_h.flags & CAM_CDB_POINTER)
2607		scsi_cdb = csio->cdb_io.cdb_ptr;
2608	else
2609		scsi_cdb = csio->cdb_io.cdb_bytes;
2610#endif
2611
2612	/*
2613	 * If this is a Start Stop Unit command and it was issued by the driver
2614	 * during shutdown, decrement the refcount to account for all of the
2615	 * commands that were sent.  All SSU commands should be completed before
2616	 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2617	 * is TRUE.
2618	 */
2619	if (sc->SSU_started && (scsi_cdb[0] == START_STOP_UNIT)) {
2620		mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n");
2621		sc->SSU_refcount--;
2622	}
2623
2624	/* Take the fast path to completion */
2625	if (cm->cm_reply == NULL) {
2626		if (mprsas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2627			if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0)
2628				mprsas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2629			else {
2630				mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2631				csio->scsi_status = SCSI_STATUS_OK;
2632			}
2633			if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2634				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2635				sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2636				mpr_dprint(sc, MPR_XINFO,
2637				    "Unfreezing SIM queue\n");
2638			}
2639		}
2640
2641		/*
2642		 * There are two scenarios where the status won't be
2643		 * CAM_REQ_CMP.  The first is if MPR_CM_FLAGS_ERROR_MASK is
2644		 * set, the second is in the MPR_FLAGS_DIAGRESET above.
2645		 */
2646		if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2647			/*
2648			 * Freeze the dev queue so that commands are
2649			 * executed in the correct order after error
2650			 * recovery.
2651			 */
2652			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2653			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2654		}
2655		mpr_free_command(sc, cm);
2656		xpt_done(ccb);
2657		return;
2658	}
2659
2660	target = &sassc->targets[target_id];
2661	if (scsi_cdb[0] == UNMAP &&
2662	    target->is_nvme &&
2663	    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
2664		rep->SCSIStatus = mprsas_complete_nvme_unmap(sc, cm);
2665		csio->scsi_status = rep->SCSIStatus;
2666	}
2667
2668	mprsas_log_command(cm, MPR_XINFO,
2669	    "ioc %x scsi %x state %x xfer %u\n",
2670	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2671	    le32toh(rep->TransferCount));
2672
2673	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2674	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2675		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2676		/* FALLTHROUGH */
2677	case MPI2_IOCSTATUS_SUCCESS:
2678	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2679		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2680		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2681			mprsas_log_command(cm, MPR_XINFO, "recovered error\n");
2682
2683		/* Completion failed at the transport level. */
2684		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2685		    MPI2_SCSI_STATE_TERMINATED)) {
2686			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2687			break;
2688		}
2689
2690		/* In a modern packetized environment, an autosense failure
2691		 * implies that there's not much else that can be done to
2692		 * recover the command.
2693		 */
2694		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2695			mprsas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2696			break;
2697		}
2698
2699		/*
2700		 * CAM doesn't care about SAS Response Info data, but if this is
2701		 * the state check if TLR should be done.  If not, clear the
2702		 * TLR_bits for the target.
2703		 */
2704		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2705		    ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE)
2706		    == MPR_SCSI_RI_INVALID_FRAME)) {
2707			sc->mapping_table[target_id].TLR_bits =
2708			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2709		}
2710
2711		/*
2712		 * Intentionally override the normal SCSI status reporting
2713		 * for these two cases.  These are likely to happen in a
2714		 * multi-initiator environment, and we want to make sure that
2715		 * CAM retries these commands rather than fail them.
2716		 */
2717		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2718		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2719			mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2720			break;
2721		}
2722
2723		/* Handle normal status and sense */
2724		csio->scsi_status = rep->SCSIStatus;
2725		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2726			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2727		else
2728			mprsas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2729
2730		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2731			int sense_len, returned_sense_len;
2732
2733			returned_sense_len = min(le32toh(rep->SenseCount),
2734			    sizeof(struct scsi_sense_data));
2735			if (returned_sense_len < csio->sense_len)
2736				csio->sense_resid = csio->sense_len -
2737				    returned_sense_len;
2738			else
2739				csio->sense_resid = 0;
2740
2741			sense_len = min(returned_sense_len,
2742			    csio->sense_len - csio->sense_resid);
2743			bzero(&csio->sense_data, sizeof(csio->sense_data));
2744			bcopy(cm->cm_sense, &csio->sense_data, sense_len);
2745			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2746		}
2747
2748		/*
2749		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2750		 * and it's page code 0 (Supported Page List), and there is
2751		 * inquiry data, and this is for a sequential access device, and
2752		 * the device is an SSP target, and TLR is supported by the
2753		 * controller, turn the TLR_bits value ON if page 0x90 is
2754		 * supported.
2755		 */
2756		if ((scsi_cdb[0] == INQUIRY) &&
2757		    (scsi_cdb[1] & SI_EVPD) &&
2758		    (scsi_cdb[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2759		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2760		    (csio->data_ptr != NULL) &&
2761		    ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2762		    (sc->control_TLR) &&
2763		    (sc->mapping_table[target_id].device_info &
2764		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2765			vpd_list = (struct scsi_vpd_supported_page_list *)
2766			    csio->data_ptr;
2767			TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2768			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2769			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2770			alloc_len = ((u16)scsi_cdb[3] << 8) + scsi_cdb[4];
2771			alloc_len -= csio->resid;
2772			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2773				if (vpd_list->list[i] == 0x90) {
2774					*TLR_bits = TLR_on;
2775					break;
2776				}
2777			}
2778		}
2779
2780		/*
2781		 * If this is a SATA direct-access end device, mark it so that
2782		 * a SCSI StartStopUnit command will be sent to it when the
2783		 * driver is being shutdown.
2784		 */
2785		if ((scsi_cdb[0] == INQUIRY) &&
2786		    (csio->data_ptr != NULL) &&
2787		    ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2788		    (sc->mapping_table[target_id].device_info &
2789		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2790		    ((sc->mapping_table[target_id].device_info &
2791		    MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2792		    MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2793			target = &sassc->targets[target_id];
2794			target->supports_SSU = TRUE;
2795			mpr_dprint(sc, MPR_XINFO, "Target %d supports SSU\n",
2796			    target_id);
2797		}
2798		break;
2799	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2800	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2801		/*
2802		 * If devinfo is 0 this will be a volume.  In that case don't
2803		 * tell CAM that the volume is not there.  We want volumes to
2804		 * be enumerated until they are deleted/removed, not just
2805		 * failed.
2806		 */
2807		if (cm->cm_targ->devinfo == 0)
2808			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2809		else
2810			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2811		break;
2812	case MPI2_IOCSTATUS_INVALID_SGL:
2813		mpr_print_scsiio_cmd(sc, cm);
2814		mprsas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2815		break;
2816	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2817		/*
2818		 * This is one of the responses that comes back when an I/O
2819		 * has been aborted.  If it is because of a timeout that we
2820		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2821		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2822		 * command is the same (it gets retried, subject to the
2823		 * retry counter), the only difference is what gets printed
2824		 * on the console.
2825		 */
2826		if (cm->cm_flags & MPR_CM_FLAGS_TIMEDOUT)
2827			mprsas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2828		else
2829			mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2830		break;
2831	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2832		/* resid is ignored for this condition */
2833		csio->resid = 0;
2834		mprsas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2835		break;
2836	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2837	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2838		/*
2839		 * These can sometimes be transient transport-related
2840		 * errors, and sometimes persistent drive-related errors.
2841		 * We used to retry these without decrementing the retry
2842		 * count by returning CAM_REQUEUE_REQ.  Unfortunately, if
2843		 * we hit a persistent drive problem that returns one of
2844		 * these error codes, we would retry indefinitely.  So,
2845		 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2846		 * count and avoid infinite retries.  We're taking the
2847		 * potential risk of flagging false failures in the event
2848		 * of a topology-related error (e.g. a SAS expander problem
2849		 * causes a command addressed to a drive to fail), but
2850		 * avoiding getting into an infinite retry loop. However,
2851		 * if we get them while were moving a device, we should
2852		 * fail the request as 'not there' because the device
2853		 * is effectively gone.
2854		 */
2855		if (cm->cm_targ->flags & MPRSAS_TARGET_INREMOVAL)
2856			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2857		else
2858			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2859		mpr_dprint(sc, MPR_INFO,
2860		    "Controller reported %s tgt %u SMID %u loginfo %x%s\n",
2861		    mpr_describe_table(mpr_iocstatus_string,
2862		    le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK),
2863		    target_id, cm->cm_desc.Default.SMID,
2864		    le32toh(rep->IOCLogInfo),
2865		    (cm->cm_targ->flags & MPRSAS_TARGET_INREMOVAL) ? " departing" : "");
2866		mpr_dprint(sc, MPR_XINFO,
2867		    "SCSIStatus %x SCSIState %x xfercount %u\n",
2868		    rep->SCSIStatus, rep->SCSIState,
2869		    le32toh(rep->TransferCount));
2870		break;
2871	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2872	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2873	case MPI2_IOCSTATUS_INVALID_VPID:
2874	case MPI2_IOCSTATUS_INVALID_FIELD:
2875	case MPI2_IOCSTATUS_INVALID_STATE:
2876	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2877	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2878	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2879	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2880	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2881	default:
2882		mprsas_log_command(cm, MPR_XINFO,
2883		    "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2884		    le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2885		    rep->SCSIStatus, rep->SCSIState,
2886		    le32toh(rep->TransferCount));
2887		csio->resid = cm->cm_length;
2888
2889		if (scsi_cdb[0] == UNMAP &&
2890		    target->is_nvme &&
2891		    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR)
2892			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2893		else
2894			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2895
2896		break;
2897	}
2898
2899	mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ);
2900
2901	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2902		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2903		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2904		mpr_dprint(sc, MPR_XINFO, "Command completed, unfreezing SIM "
2905		    "queue\n");
2906	}
2907
2908	if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2909		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2910		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2911	}
2912
2913	/*
2914	 * Check to see if we're removing the device. If so, and this is the
2915	 * last command on the queue, proceed with the deferred removal of the
2916	 * device.  Note, for removing a volume, this won't trigger because
2917	 * pending_remove_tm will be NULL.
2918	 */
2919	if (cm->cm_targ->flags & MPRSAS_TARGET_INREMOVAL) {
2920		if (TAILQ_FIRST(&cm->cm_targ->commands) == NULL &&
2921		    cm->cm_targ->pending_remove_tm != NULL) {
2922			mpr_dprint(sc, MPR_INFO, "Last pending command complete: starting remove_device\n");
2923			mpr_map_command(sc, cm->cm_targ->pending_remove_tm);
2924			cm->cm_targ->pending_remove_tm = NULL;
2925		}
2926	}
2927
2928	mpr_free_command(sc, cm);
2929	xpt_done(ccb);
2930}
2931
2932#if __FreeBSD_version >= 900026
2933static void
2934mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2935{
2936	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2937	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2938	uint64_t sasaddr;
2939	union ccb *ccb;
2940
2941	ccb = cm->cm_complete_data;
2942
2943	/*
2944	 * Currently there should be no way we can hit this case.  It only
2945	 * happens when we have a failure to allocate chain frames, and SMP
2946	 * commands require two S/G elements only.  That should be handled
2947	 * in the standard request size.
2948	 */
2949	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2950		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x on SMP "
2951		    "request!\n", __func__, cm->cm_flags);
2952		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2953		goto bailout;
2954        }
2955
2956	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2957	if (rpl == NULL) {
2958		mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__);
2959		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2960		goto bailout;
2961	}
2962
2963	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2964	sasaddr = le32toh(req->SASAddress.Low);
2965	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2966
2967	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2968	    MPI2_IOCSTATUS_SUCCESS ||
2969	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2970		mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2971		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2972		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2973		goto bailout;
2974	}
2975
2976	mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address %#jx "
2977	    "completed successfully\n", __func__, (uintmax_t)sasaddr);
2978
2979	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2980		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2981	else
2982		mprsas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2983
2984bailout:
2985	/*
2986	 * We sync in both directions because we had DMAs in the S/G list
2987	 * in both directions.
2988	 */
2989	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2990			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2991	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2992	mpr_free_command(sc, cm);
2993	xpt_done(ccb);
2994}
2995
2996static void
2997mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2998{
2999	struct mpr_command *cm;
3000	uint8_t *request, *response;
3001	MPI2_SMP_PASSTHROUGH_REQUEST *req;
3002	struct mpr_softc *sc;
3003	struct sglist *sg;
3004	int error;
3005
3006	sc = sassc->sc;
3007	sg = NULL;
3008	error = 0;
3009
3010#if (__FreeBSD_version >= 1000028) || \
3011    ((__FreeBSD_version >= 902001) && (__FreeBSD_version < 1000000))
3012	switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
3013	case CAM_DATA_PADDR:
3014	case CAM_DATA_SG_PADDR:
3015		/*
3016		 * XXX We don't yet support physical addresses here.
3017		 */
3018		mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
3019		    "supported\n", __func__);
3020		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3021		xpt_done(ccb);
3022		return;
3023	case CAM_DATA_SG:
3024		/*
3025		 * The chip does not support more than one buffer for the
3026		 * request or response.
3027		 */
3028		if ((ccb->smpio.smp_request_sglist_cnt > 1)
3029		    || (ccb->smpio.smp_response_sglist_cnt > 1)) {
3030			mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
3031			    "response buffer segments not supported for SMP\n",
3032			    __func__);
3033			mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3034			xpt_done(ccb);
3035			return;
3036		}
3037
3038		/*
3039		 * The CAM_SCATTER_VALID flag was originally implemented
3040		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
3041		 * We have two.  So, just take that flag to mean that we
3042		 * might have S/G lists, and look at the S/G segment count
3043		 * to figure out whether that is the case for each individual
3044		 * buffer.
3045		 */
3046		if (ccb->smpio.smp_request_sglist_cnt != 0) {
3047			bus_dma_segment_t *req_sg;
3048
3049			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
3050			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
3051		} else
3052			request = ccb->smpio.smp_request;
3053
3054		if (ccb->smpio.smp_response_sglist_cnt != 0) {
3055			bus_dma_segment_t *rsp_sg;
3056
3057			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
3058			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
3059		} else
3060			response = ccb->smpio.smp_response;
3061		break;
3062	case CAM_DATA_VADDR:
3063		request = ccb->smpio.smp_request;
3064		response = ccb->smpio.smp_response;
3065		break;
3066	default:
3067		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3068		xpt_done(ccb);
3069		return;
3070	}
3071#else /* __FreeBSD_version < 1000028 */
3072	/*
3073	 * XXX We don't yet support physical addresses here.
3074	 */
3075	if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
3076		mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
3077		    "supported\n", __func__);
3078		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3079		xpt_done(ccb);
3080		return;
3081	}
3082
3083	/*
3084	 * If the user wants to send an S/G list, check to make sure they
3085	 * have single buffers.
3086	 */
3087	if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
3088		/*
3089		 * The chip does not support more than one buffer for the
3090		 * request or response.
3091		 */
3092	 	if ((ccb->smpio.smp_request_sglist_cnt > 1)
3093		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
3094			mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
3095			    "response buffer segments not supported for SMP\n",
3096			    __func__);
3097			mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3098			xpt_done(ccb);
3099			return;
3100		}
3101
3102		/*
3103		 * The CAM_SCATTER_VALID flag was originally implemented
3104		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
3105		 * We have two.  So, just take that flag to mean that we
3106		 * might have S/G lists, and look at the S/G segment count
3107		 * to figure out whether that is the case for each individual
3108		 * buffer.
3109		 */
3110		if (ccb->smpio.smp_request_sglist_cnt != 0) {
3111			bus_dma_segment_t *req_sg;
3112
3113			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
3114			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
3115		} else
3116			request = ccb->smpio.smp_request;
3117
3118		if (ccb->smpio.smp_response_sglist_cnt != 0) {
3119			bus_dma_segment_t *rsp_sg;
3120
3121			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
3122			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
3123		} else
3124			response = ccb->smpio.smp_response;
3125	} else {
3126		request = ccb->smpio.smp_request;
3127		response = ccb->smpio.smp_response;
3128	}
3129#endif /* __FreeBSD_version < 1000028 */
3130
3131	cm = mpr_alloc_command(sc);
3132	if (cm == NULL) {
3133		mpr_dprint(sc, MPR_ERROR, "%s: cannot allocate command\n",
3134		    __func__);
3135		mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3136		xpt_done(ccb);
3137		return;
3138	}
3139
3140	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
3141	bzero(req, sizeof(*req));
3142	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
3143
3144	/* Allow the chip to use any route to this SAS address. */
3145	req->PhysicalPort = 0xff;
3146
3147	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
3148	req->SGLFlags =
3149	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
3150
3151	mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address "
3152	    "%#jx\n", __func__, (uintmax_t)sasaddr);
3153
3154	mpr_init_sge(cm, req, &req->SGL);
3155
3156	/*
3157	 * Set up a uio to pass into mpr_map_command().  This allows us to
3158	 * do one map command, and one busdma call in there.
3159	 */
3160	cm->cm_uio.uio_iov = cm->cm_iovec;
3161	cm->cm_uio.uio_iovcnt = 2;
3162	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
3163
3164	/*
3165	 * The read/write flag isn't used by busdma, but set it just in
3166	 * case.  This isn't exactly accurate, either, since we're going in
3167	 * both directions.
3168	 */
3169	cm->cm_uio.uio_rw = UIO_WRITE;
3170
3171	cm->cm_iovec[0].iov_base = request;
3172	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
3173	cm->cm_iovec[1].iov_base = response;
3174	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
3175
3176	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
3177			       cm->cm_iovec[1].iov_len;
3178
3179	/*
3180	 * Trigger a warning message in mpr_data_cb() for the user if we
3181	 * wind up exceeding two S/G segments.  The chip expects one
3182	 * segment for the request and another for the response.
3183	 */
3184	cm->cm_max_segs = 2;
3185
3186	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3187	cm->cm_complete = mprsas_smpio_complete;
3188	cm->cm_complete_data = ccb;
3189
3190	/*
3191	 * Tell the mapping code that we're using a uio, and that this is
3192	 * an SMP passthrough request.  There is a little special-case
3193	 * logic there (in mpr_data_cb()) to handle the bidirectional
3194	 * transfer.
3195	 */
3196	cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS |
3197			MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT;
3198
3199	/* The chip data format is little endian. */
3200	req->SASAddress.High = htole32(sasaddr >> 32);
3201	req->SASAddress.Low = htole32(sasaddr);
3202
3203	/*
3204	 * XXX Note that we don't have a timeout/abort mechanism here.
3205	 * From the manual, it looks like task management requests only
3206	 * work for SCSI IO and SATA passthrough requests.  We may need to
3207	 * have a mechanism to retry requests in the event of a chip reset
3208	 * at least.  Hopefully the chip will insure that any errors short
3209	 * of that are relayed back to the driver.
3210	 */
3211	error = mpr_map_command(sc, cm);
3212	if ((error != 0) && (error != EINPROGRESS)) {
3213		mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from "
3214		    "mpr_map_command()\n", __func__, error);
3215		goto bailout_error;
3216	}
3217
3218	return;
3219
3220bailout_error:
3221	mpr_free_command(sc, cm);
3222	mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3223	xpt_done(ccb);
3224	return;
3225}
3226
3227static void
3228mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb)
3229{
3230	struct mpr_softc *sc;
3231	struct mprsas_target *targ;
3232	uint64_t sasaddr = 0;
3233
3234	sc = sassc->sc;
3235
3236	/*
3237	 * Make sure the target exists.
3238	 */
3239	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3240	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
3241	targ = &sassc->targets[ccb->ccb_h.target_id];
3242	if (targ->handle == 0x0) {
3243		mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n",
3244		    __func__, ccb->ccb_h.target_id);
3245		mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
3246		xpt_done(ccb);
3247		return;
3248	}
3249
3250	/*
3251	 * If this device has an embedded SMP target, we'll talk to it
3252	 * directly.
3253	 * figure out what the expander's address is.
3254	 */
3255	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
3256		sasaddr = targ->sasaddr;
3257
3258	/*
3259	 * If we don't have a SAS address for the expander yet, try
3260	 * grabbing it from the page 0x83 information cached in the
3261	 * transport layer for this target.  LSI expanders report the
3262	 * expander SAS address as the port-associated SAS address in
3263	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
3264	 * 0x83.
3265	 *
3266	 * XXX KDM disable this for now, but leave it commented out so that
3267	 * it is obvious that this is another possible way to get the SAS
3268	 * address.
3269	 *
3270	 * The parent handle method below is a little more reliable, and
3271	 * the other benefit is that it works for devices other than SES
3272	 * devices.  So you can send a SMP request to a da(4) device and it
3273	 * will get routed to the expander that device is attached to.
3274	 * (Assuming the da(4) device doesn't contain an SMP target...)
3275	 */
3276#if 0
3277	if (sasaddr == 0)
3278		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
3279#endif
3280
3281	/*
3282	 * If we still don't have a SAS address for the expander, look for
3283	 * the parent device of this device, which is probably the expander.
3284	 */
3285	if (sasaddr == 0) {
3286#ifdef OLD_MPR_PROBE
3287		struct mprsas_target *parent_target;
3288#endif
3289
3290		if (targ->parent_handle == 0x0) {
3291			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3292			    "a valid parent handle!\n", __func__, targ->handle);
3293			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3294			goto bailout;
3295		}
3296#ifdef OLD_MPR_PROBE
3297		parent_target = mprsas_find_target_by_handle(sassc, 0,
3298		    targ->parent_handle);
3299
3300		if (parent_target == NULL) {
3301			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3302			    "a valid parent target!\n", __func__, targ->handle);
3303			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3304			goto bailout;
3305		}
3306
3307		if ((parent_target->devinfo &
3308		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3309			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3310			    "does not have an SMP target!\n", __func__,
3311			    targ->handle, parent_target->handle);
3312			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3313			goto bailout;
3314		}
3315
3316		sasaddr = parent_target->sasaddr;
3317#else /* OLD_MPR_PROBE */
3318		if ((targ->parent_devinfo &
3319		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3320			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3321			    "does not have an SMP target!\n", __func__,
3322			    targ->handle, targ->parent_handle);
3323			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3324			goto bailout;
3325
3326		}
3327		if (targ->parent_sasaddr == 0x0) {
3328			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle "
3329			    "%d does not have a valid SAS address!\n", __func__,
3330			    targ->handle, targ->parent_handle);
3331			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3332			goto bailout;
3333		}
3334
3335		sasaddr = targ->parent_sasaddr;
3336#endif /* OLD_MPR_PROBE */
3337
3338	}
3339
3340	if (sasaddr == 0) {
3341		mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for "
3342		    "handle %d\n", __func__, targ->handle);
3343		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3344		goto bailout;
3345	}
3346	mprsas_send_smpcmd(sassc, ccb, sasaddr);
3347
3348	return;
3349
3350bailout:
3351	xpt_done(ccb);
3352
3353}
3354#endif //__FreeBSD_version >= 900026
3355
3356static void
3357mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb)
3358{
3359	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3360	struct mpr_softc *sc;
3361	struct mpr_command *tm;
3362	struct mprsas_target *targ;
3363
3364	MPR_FUNCTRACE(sassc->sc);
3365	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
3366
3367	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out of "
3368	    "bounds in XPT_RESET_DEV\n", ccb->ccb_h.target_id));
3369	sc = sassc->sc;
3370	tm = mprsas_alloc_tm(sc);
3371	if (tm == NULL) {
3372		mpr_dprint(sc, MPR_ERROR, "command alloc failure in "
3373		    "mprsas_action_resetdev\n");
3374		mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3375		xpt_done(ccb);
3376		return;
3377	}
3378
3379	targ = &sassc->targets[ccb->ccb_h.target_id];
3380	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3381	req->DevHandle = htole16(targ->handle);
3382	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3383
3384	if (!targ->is_nvme || sc->custom_nvme_tm_handling) {
3385		/* SAS Hard Link Reset / SATA Link Reset */
3386		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3387	} else {
3388		/* PCIe Protocol Level Reset*/
3389		req->MsgFlags =
3390		    MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3391	}
3392
3393	tm->cm_data = NULL;
3394	tm->cm_complete = mprsas_resetdev_complete;
3395	tm->cm_complete_data = ccb;
3396
3397	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
3398	    __func__, targ->tid);
3399	tm->cm_targ = targ;
3400
3401	mprsas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
3402	mpr_map_command(sc, tm);
3403}
3404
3405static void
3406mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm)
3407{
3408	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3409	union ccb *ccb;
3410
3411	MPR_FUNCTRACE(sc);
3412	mtx_assert(&sc->mpr_mtx, MA_OWNED);
3413
3414	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3415	ccb = tm->cm_complete_data;
3416
3417	/*
3418	 * Currently there should be no way we can hit this case.  It only
3419	 * happens when we have a failure to allocate chain frames, and
3420	 * task management commands don't have S/G lists.
3421	 */
3422	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3423		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3424
3425		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3426
3427		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of "
3428		    "handle %#04x! This should not happen!\n", __func__,
3429		    tm->cm_flags, req->DevHandle);
3430		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3431		goto bailout;
3432	}
3433
3434	mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n",
3435	    __func__, le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3436
3437	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3438		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
3439		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3440		    CAM_LUN_WILDCARD);
3441	}
3442	else
3443		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3444
3445bailout:
3446
3447	mprsas_free_tm(sc, tm);
3448	xpt_done(ccb);
3449}
3450
3451static void
3452mprsas_poll(struct cam_sim *sim)
3453{
3454	struct mprsas_softc *sassc;
3455
3456	sassc = cam_sim_softc(sim);
3457
3458	if (sassc->sc->mpr_debug & MPR_TRACE) {
3459		/* frequent debug messages during a panic just slow
3460		 * everything down too much.
3461		 */
3462		mpr_dprint(sassc->sc, MPR_XINFO, "%s clearing MPR_TRACE\n",
3463		    __func__);
3464		sassc->sc->mpr_debug &= ~MPR_TRACE;
3465	}
3466
3467	mpr_intr_locked(sassc->sc);
3468}
3469
3470static void
3471mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3472    void *arg)
3473{
3474	struct mpr_softc *sc;
3475
3476	sc = (struct mpr_softc *)callback_arg;
3477
3478	switch (code) {
3479#if (__FreeBSD_version >= 1000006) || \
3480    ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3481	case AC_ADVINFO_CHANGED: {
3482		struct mprsas_target *target;
3483		struct mprsas_softc *sassc;
3484		struct scsi_read_capacity_data_long rcap_buf;
3485		struct ccb_dev_advinfo cdai;
3486		struct mprsas_lun *lun;
3487		lun_id_t lunid;
3488		int found_lun;
3489		uintptr_t buftype;
3490
3491		buftype = (uintptr_t)arg;
3492
3493		found_lun = 0;
3494		sassc = sc->sassc;
3495
3496		/*
3497		 * We're only interested in read capacity data changes.
3498		 */
3499		if (buftype != CDAI_TYPE_RCAPLONG)
3500			break;
3501
3502		/*
3503		 * See the comment in mpr_attach_sas() for a detailed
3504		 * explanation.  In these versions of FreeBSD we register
3505		 * for all events and filter out the events that don't
3506		 * apply to us.
3507		 */
3508#if (__FreeBSD_version < 1000703) || \
3509    ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3510		if (xpt_path_path_id(path) != sassc->sim->path_id)
3511			break;
3512#endif
3513
3514		/*
3515		 * We should have a handle for this, but check to make sure.
3516		 */
3517		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3518		    ("Target %d out of bounds in mprsas_async\n",
3519		    xpt_path_target_id(path)));
3520		target = &sassc->targets[xpt_path_target_id(path)];
3521		if (target->handle == 0)
3522			break;
3523
3524		lunid = xpt_path_lun_id(path);
3525
3526		SLIST_FOREACH(lun, &target->luns, lun_link) {
3527			if (lun->lun_id == lunid) {
3528				found_lun = 1;
3529				break;
3530			}
3531		}
3532
3533		if (found_lun == 0) {
3534			lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3535			    M_NOWAIT | M_ZERO);
3536			if (lun == NULL) {
3537				mpr_dprint(sc, MPR_ERROR, "Unable to alloc "
3538				    "LUN for EEDP support.\n");
3539				break;
3540			}
3541			lun->lun_id = lunid;
3542			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3543		}
3544
3545		bzero(&rcap_buf, sizeof(rcap_buf));
3546		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3547		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3548		cdai.ccb_h.flags = CAM_DIR_IN;
3549		cdai.buftype = CDAI_TYPE_RCAPLONG;
3550#if (__FreeBSD_version >= 1100061) || \
3551    ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000))
3552		cdai.flags = CDAI_FLAG_NONE;
3553#else
3554		cdai.flags = 0;
3555#endif
3556		cdai.bufsiz = sizeof(rcap_buf);
3557		cdai.buf = (uint8_t *)&rcap_buf;
3558		xpt_action((union ccb *)&cdai);
3559		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3560			cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
3561
3562		if ((mprsas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3563		    && (rcap_buf.prot & SRC16_PROT_EN)) {
3564			switch (rcap_buf.prot & SRC16_P_TYPE) {
3565			case SRC16_PTYPE_1:
3566			case SRC16_PTYPE_3:
3567				lun->eedp_formatted = TRUE;
3568				lun->eedp_block_size =
3569				    scsi_4btoul(rcap_buf.length);
3570				break;
3571			case SRC16_PTYPE_2:
3572			default:
3573				lun->eedp_formatted = FALSE;
3574				lun->eedp_block_size = 0;
3575				break;
3576			}
3577		} else {
3578			lun->eedp_formatted = FALSE;
3579			lun->eedp_block_size = 0;
3580		}
3581		break;
3582	}
3583#endif
3584	case AC_FOUND_DEVICE: {
3585		struct ccb_getdev *cgd;
3586
3587		/*
3588		 * See the comment in mpr_attach_sas() for a detailed
3589		 * explanation.  In these versions of FreeBSD we register
3590		 * for all events and filter out the events that don't
3591		 * apply to us.
3592		 */
3593#if (__FreeBSD_version < 1000703) || \
3594    ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3595		if (xpt_path_path_id(path) != sc->sassc->sim->path_id)
3596			break;
3597#endif
3598
3599		cgd = arg;
3600#if (__FreeBSD_version < 901503) || \
3601    ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3602		mprsas_check_eedp(sc, path, cgd);
3603#endif
3604		break;
3605	}
3606	default:
3607		break;
3608	}
3609}
3610
3611#if (__FreeBSD_version < 901503) || \
3612    ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3613static void
3614mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
3615    struct ccb_getdev *cgd)
3616{
3617	struct mprsas_softc *sassc = sc->sassc;
3618	struct ccb_scsiio *csio;
3619	struct scsi_read_capacity_16 *scsi_cmd;
3620	struct scsi_read_capacity_eedp *rcap_buf;
3621	path_id_t pathid;
3622	target_id_t targetid;
3623	lun_id_t lunid;
3624	union ccb *ccb;
3625	struct cam_path *local_path;
3626	struct mprsas_target *target;
3627	struct mprsas_lun *lun;
3628	uint8_t	found_lun;
3629	char path_str[64];
3630
3631	pathid = cam_sim_path(sassc->sim);
3632	targetid = xpt_path_target_id(path);
3633	lunid = xpt_path_lun_id(path);
3634
3635	KASSERT(targetid < sassc->maxtargets, ("Target %d out of bounds in "
3636	    "mprsas_check_eedp\n", targetid));
3637	target = &sassc->targets[targetid];
3638	if (target->handle == 0x0)
3639		return;
3640
3641	/*
3642	 * Determine if the device is EEDP capable.
3643	 *
3644	 * If this flag is set in the inquiry data, the device supports
3645	 * protection information, and must support the 16 byte read capacity
3646	 * command, otherwise continue without sending read cap 16.
3647	 */
3648	if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3649		return;
3650
3651	/*
3652	 * Issue a READ CAPACITY 16 command.  This info is used to determine if
3653	 * the LUN is formatted for EEDP support.
3654	 */
3655	ccb = xpt_alloc_ccb_nowait();
3656	if (ccb == NULL) {
3657		mpr_dprint(sc, MPR_ERROR, "Unable to alloc CCB for EEDP "
3658		    "support.\n");
3659		return;
3660	}
3661
3662	if (xpt_create_path(&local_path, xpt_periph, pathid, targetid, lunid) !=
3663	    CAM_REQ_CMP) {
3664		mpr_dprint(sc, MPR_ERROR, "Unable to create path for EEDP "
3665		    "support.\n");
3666		xpt_free_ccb(ccb);
3667		return;
3668	}
3669
3670	/*
3671	 * If LUN is already in list, don't create a new one.
3672	 */
3673	found_lun = FALSE;
3674	SLIST_FOREACH(lun, &target->luns, lun_link) {
3675		if (lun->lun_id == lunid) {
3676			found_lun = TRUE;
3677			break;
3678		}
3679	}
3680	if (!found_lun) {
3681		lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3682		    M_NOWAIT | M_ZERO);
3683		if (lun == NULL) {
3684			mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for "
3685			    "EEDP support.\n");
3686			xpt_free_path(local_path);
3687			xpt_free_ccb(ccb);
3688			return;
3689		}
3690		lun->lun_id = lunid;
3691		SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3692	}
3693
3694	xpt_path_string(local_path, path_str, sizeof(path_str));
3695	mpr_dprint(sc, MPR_INFO, "Sending read cap: path %s handle %d\n",
3696	    path_str, target->handle);
3697
3698	/*
3699	 * Issue a READ CAPACITY 16 command for the LUN.  The
3700	 * mprsas_read_cap_done function will load the read cap info into the
3701	 * LUN struct.
3702	 */
3703	rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp), M_MPR,
3704	    M_NOWAIT | M_ZERO);
3705	if (rcap_buf == NULL) {
3706		mpr_dprint(sc, MPR_ERROR, "Unable to alloc read capacity "
3707		    "buffer for EEDP support.\n");
3708		xpt_free_path(ccb->ccb_h.path);
3709		xpt_free_ccb(ccb);
3710		return;
3711	}
3712	xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3713	csio = &ccb->csio;
3714	csio->ccb_h.func_code = XPT_SCSI_IO;
3715	csio->ccb_h.flags = CAM_DIR_IN;
3716	csio->ccb_h.retry_count = 4;
3717	csio->ccb_h.cbfcnp = mprsas_read_cap_done;
3718	csio->ccb_h.timeout = 60000;
3719	csio->data_ptr = (uint8_t *)rcap_buf;
3720	csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3721	csio->sense_len = MPR_SENSE_LEN;
3722	csio->cdb_len = sizeof(*scsi_cmd);
3723	csio->tag_action = MSG_SIMPLE_Q_TAG;
3724
3725	scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3726	bzero(scsi_cmd, sizeof(*scsi_cmd));
3727	scsi_cmd->opcode = 0x9E;
3728	scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3729	((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3730
3731	ccb->ccb_h.ppriv_ptr1 = sassc;
3732	xpt_action(ccb);
3733}
3734
3735static void
3736mprsas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3737{
3738	struct mprsas_softc *sassc;
3739	struct mprsas_target *target;
3740	struct mprsas_lun *lun;
3741	struct scsi_read_capacity_eedp *rcap_buf;
3742
3743	if (done_ccb == NULL)
3744		return;
3745
3746	/* Driver need to release devq, it Scsi command is
3747	 * generated by driver internally.
3748	 * Currently there is a single place where driver
3749	 * calls scsi command internally. In future if driver
3750	 * calls more scsi command internally, it needs to release
3751	 * devq internally, since those command will not go back to
3752	 * cam_periph.
3753	 */
3754	if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3755        	done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3756		xpt_release_devq(done_ccb->ccb_h.path,
3757			       	/*count*/ 1, /*run_queue*/TRUE);
3758	}
3759
3760	rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3761
3762	/*
3763	 * Get the LUN ID for the path and look it up in the LUN list for the
3764	 * target.
3765	 */
3766	sassc = (struct mprsas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3767	KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out "
3768	    "of bounds in mprsas_read_cap_done\n", done_ccb->ccb_h.target_id));
3769	target = &sassc->targets[done_ccb->ccb_h.target_id];
3770	SLIST_FOREACH(lun, &target->luns, lun_link) {
3771		if (lun->lun_id != done_ccb->ccb_h.target_lun)
3772			continue;
3773
3774		/*
3775		 * Got the LUN in the target's LUN list.  Fill it in with EEDP
3776		 * info. If the READ CAP 16 command had some SCSI error (common
3777		 * if command is not supported), mark the lun as not supporting
3778		 * EEDP and set the block size to 0.
3779		 */
3780		if ((mprsas_get_ccbstatus(done_ccb) != CAM_REQ_CMP) ||
3781		    (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3782			lun->eedp_formatted = FALSE;
3783			lun->eedp_block_size = 0;
3784			break;
3785		}
3786
3787		if (rcap_buf->protect & 0x01) {
3788			mpr_dprint(sassc->sc, MPR_INFO, "LUN %d for target ID "
3789			    "%d is formatted for EEDP support.\n",
3790			    done_ccb->ccb_h.target_lun,
3791			    done_ccb->ccb_h.target_id);
3792			lun->eedp_formatted = TRUE;
3793			lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3794		}
3795		break;
3796	}
3797
3798	// Finished with this CCB and path.
3799	free(rcap_buf, M_MPR);
3800	xpt_free_path(done_ccb->ccb_h.path);
3801	xpt_free_ccb(done_ccb);
3802}
3803#endif /* (__FreeBSD_version < 901503) || \
3804          ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3805
3806/*
3807 * Set the INRESET flag for this target so that no I/O will be sent to
3808 * the target until the reset has completed.  If an I/O request does
3809 * happen, the devq will be frozen.  The CCB holds the path which is
3810 * used to release the devq.  The devq is released and the CCB is freed
3811 * when the TM completes.
3812 */
3813void
3814mprsas_prepare_for_tm(struct mpr_softc *sc, struct mpr_command *tm,
3815    struct mprsas_target *target, lun_id_t lun_id)
3816{
3817	union ccb *ccb;
3818	path_id_t path_id;
3819
3820	ccb = xpt_alloc_ccb_nowait();
3821	if (ccb) {
3822		path_id = cam_sim_path(sc->sassc->sim);
3823		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3824		    target->tid, lun_id) != CAM_REQ_CMP) {
3825			xpt_free_ccb(ccb);
3826		} else {
3827			tm->cm_ccb = ccb;
3828			tm->cm_targ = target;
3829			target->flags |= MPRSAS_TARGET_INRESET;
3830		}
3831	}
3832}
3833
3834int
3835mprsas_startup(struct mpr_softc *sc)
3836{
3837	/*
3838	 * Send the port enable message and set the wait_for_port_enable flag.
3839	 * This flag helps to keep the simq frozen until all discovery events
3840	 * are processed.
3841	 */
3842	sc->wait_for_port_enable = 1;
3843	mprsas_send_portenable(sc);
3844	return (0);
3845}
3846
3847static int
3848mprsas_send_portenable(struct mpr_softc *sc)
3849{
3850	MPI2_PORT_ENABLE_REQUEST *request;
3851	struct mpr_command *cm;
3852
3853	MPR_FUNCTRACE(sc);
3854
3855	if ((cm = mpr_alloc_command(sc)) == NULL)
3856		return (EBUSY);
3857	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3858	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3859	request->MsgFlags = 0;
3860	request->VP_ID = 0;
3861	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3862	cm->cm_complete = mprsas_portenable_complete;
3863	cm->cm_data = NULL;
3864	cm->cm_sge = NULL;
3865
3866	mpr_map_command(sc, cm);
3867	mpr_dprint(sc, MPR_XINFO,
3868	    "mpr_send_portenable finished cm %p req %p complete %p\n",
3869	    cm, cm->cm_req, cm->cm_complete);
3870	return (0);
3871}
3872
3873static void
3874mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm)
3875{
3876	MPI2_PORT_ENABLE_REPLY *reply;
3877	struct mprsas_softc *sassc;
3878
3879	MPR_FUNCTRACE(sc);
3880	sassc = sc->sassc;
3881
3882	/*
3883	 * Currently there should be no way we can hit this case.  It only
3884	 * happens when we have a failure to allocate chain frames, and
3885	 * port enable commands don't have S/G lists.
3886	 */
3887	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3888		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! "
3889		    "This should not happen!\n", __func__, cm->cm_flags);
3890	}
3891
3892	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3893	if (reply == NULL)
3894		mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n");
3895	else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3896	    MPI2_IOCSTATUS_SUCCESS)
3897		mpr_dprint(sc, MPR_FAULT, "Portenable failed\n");
3898
3899	mpr_free_command(sc, cm);
3900	/*
3901	 * Done waiting for port enable to complete.  Decrement the refcount.
3902	 * If refcount is 0, discovery is complete and a rescan of the bus can
3903	 * take place.
3904	 */
3905	sc->wait_for_port_enable = 0;
3906	sc->port_enable_complete = 1;
3907	wakeup(&sc->port_enable_complete);
3908	mprsas_startup_decrement(sassc);
3909}
3910
3911int
3912mprsas_check_id(struct mprsas_softc *sassc, int id)
3913{
3914	struct mpr_softc *sc = sassc->sc;
3915	char *ids;
3916	char *name;
3917
3918	ids = &sc->exclude_ids[0];
3919	while((name = strsep(&ids, ",")) != NULL) {
3920		if (name[0] == '\0')
3921			continue;
3922		if (strtol(name, NULL, 0) == (long)id)
3923			return (1);
3924	}
3925
3926	return (0);
3927}
3928
3929void
3930mprsas_realloc_targets(struct mpr_softc *sc, int maxtargets)
3931{
3932	struct mprsas_softc *sassc;
3933	struct mprsas_lun *lun, *lun_tmp;
3934	struct mprsas_target *targ;
3935	int i;
3936
3937	sassc = sc->sassc;
3938	/*
3939	 * The number of targets is based on IOC Facts, so free all of
3940	 * the allocated LUNs for each target and then the target buffer
3941	 * itself.
3942	 */
3943	for (i=0; i< maxtargets; i++) {
3944		targ = &sassc->targets[i];
3945		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3946			free(lun, M_MPR);
3947		}
3948	}
3949	free(sassc->targets, M_MPR);
3950
3951	sassc->targets = malloc(sizeof(struct mprsas_target) * maxtargets,
3952	    M_MPR, M_WAITOK|M_ZERO);
3953}
3954