1/*-
2 * Copyright (c) 2009 Yahoo! Inc.
3 * Copyright (c) 2011-2015 LSI Corp.
4 * Copyright (c) 2013-2016 Avago Technologies
5 * Copyright 2000-2020 Broadcom Inc.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * Broadcom Inc. (LSI) MPT-Fusion Host Adapter FreeBSD
30 *
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD$");
35
36/* Communications core for Avago Technologies (LSI) MPT3 */
37
38/* TODO Move headers to mprvar */
39#include <sys/types.h>
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/kernel.h>
43#include <sys/selinfo.h>
44#include <sys/module.h>
45#include <sys/bus.h>
46#include <sys/conf.h>
47#include <sys/bio.h>
48#include <sys/malloc.h>
49#include <sys/uio.h>
50#include <sys/sysctl.h>
51#include <sys/endian.h>
52#include <sys/queue.h>
53#include <sys/kthread.h>
54#include <sys/taskqueue.h>
55#include <sys/sbuf.h>
56
57#include <machine/bus.h>
58#include <machine/resource.h>
59#include <sys/rman.h>
60
61#include <machine/stdarg.h>
62
63#include <cam/cam.h>
64#include <cam/cam_ccb.h>
65#include <cam/cam_debug.h>
66#include <cam/cam_sim.h>
67#include <cam/cam_xpt_sim.h>
68#include <cam/cam_xpt_periph.h>
69#include <cam/cam_periph.h>
70#include <cam/scsi/scsi_all.h>
71#include <cam/scsi/scsi_message.h>
72#include <cam/scsi/smp_all.h>
73
74#include <dev/nvme/nvme.h>
75
76#include <dev/mpr/mpi/mpi2_type.h>
77#include <dev/mpr/mpi/mpi2.h>
78#include <dev/mpr/mpi/mpi2_ioc.h>
79#include <dev/mpr/mpi/mpi2_sas.h>
80#include <dev/mpr/mpi/mpi2_pci.h>
81#include <dev/mpr/mpi/mpi2_cnfg.h>
82#include <dev/mpr/mpi/mpi2_init.h>
83#include <dev/mpr/mpi/mpi2_tool.h>
84#include <dev/mpr/mpr_ioctl.h>
85#include <dev/mpr/mprvar.h>
86#include <dev/mpr/mpr_table.h>
87#include <dev/mpr/mpr_sas.h>
88
89#define MPRSAS_DISCOVERY_TIMEOUT	20
90#define MPRSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
91
92/*
93 * static array to check SCSI OpCode for EEDP protection bits
94 */
95#define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
96#define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
97#define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
98static uint8_t op_code_prot[256] = {
99	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
102	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
108	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
109	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
110	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
114	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
115};
116
117MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory");
118
119static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *);
120static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *);
121static void mprsas_action(struct cam_sim *sim, union ccb *ccb);
122static void mprsas_poll(struct cam_sim *sim);
123static void mprsas_scsiio_timeout(void *data);
124static void mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *cm);
125static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *);
126static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *);
127static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *);
128static void mprsas_resetdev_complete(struct mpr_softc *, struct mpr_command *);
129static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
130    struct mpr_command *cm);
131static void mprsas_async(void *callback_arg, uint32_t code,
132    struct cam_path *path, void *arg);
133static int mprsas_send_portenable(struct mpr_softc *sc);
134static void mprsas_portenable_complete(struct mpr_softc *sc,
135    struct mpr_command *cm);
136
137static void mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm);
138static void mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb,
139    uint64_t sasaddr);
140static void mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb);
141
142struct mprsas_target *
143mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start,
144    uint16_t handle)
145{
146	struct mprsas_target *target;
147	int i;
148
149	for (i = start; i < sassc->maxtargets; i++) {
150		target = &sassc->targets[i];
151		if (target->handle == handle)
152			return (target);
153	}
154
155	return (NULL);
156}
157
158/* we need to freeze the simq during attach and diag reset, to avoid failing
159 * commands before device handles have been found by discovery.  Since
160 * discovery involves reading config pages and possibly sending commands,
161 * discovery actions may continue even after we receive the end of discovery
162 * event, so refcount discovery actions instead of assuming we can unfreeze
163 * the simq when we get the event.
164 */
165void
166mprsas_startup_increment(struct mprsas_softc *sassc)
167{
168	MPR_FUNCTRACE(sassc->sc);
169
170	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
171		if (sassc->startup_refcount++ == 0) {
172			/* just starting, freeze the simq */
173			mpr_dprint(sassc->sc, MPR_INIT,
174			    "%s freezing simq\n", __func__);
175			xpt_hold_boot();
176			xpt_freeze_simq(sassc->sim, 1);
177		}
178		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
179		    sassc->startup_refcount);
180	}
181}
182
183void
184mprsas_release_simq_reinit(struct mprsas_softc *sassc)
185{
186	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
187		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
188		xpt_release_simq(sassc->sim, 1);
189		mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n");
190	}
191}
192
193void
194mprsas_startup_decrement(struct mprsas_softc *sassc)
195{
196	MPR_FUNCTRACE(sassc->sc);
197
198	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
199		if (--sassc->startup_refcount == 0) {
200			/* finished all discovery-related actions, release
201			 * the simq and rescan for the latest topology.
202			 */
203			mpr_dprint(sassc->sc, MPR_INIT,
204			    "%s releasing simq\n", __func__);
205			sassc->flags &= ~MPRSAS_IN_STARTUP;
206			xpt_release_simq(sassc->sim, 1);
207			xpt_release_boot();
208		}
209		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
210		    sassc->startup_refcount);
211	}
212}
213
214/*
215 * The firmware requires us to stop sending commands when we're doing task
216 * management.
217 * use.
218 * XXX The logic for serializing the device has been made lazy and moved to
219 * mprsas_prepare_for_tm().
220 */
221struct mpr_command *
222mprsas_alloc_tm(struct mpr_softc *sc)
223{
224	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
225	struct mpr_command *tm;
226
227	MPR_FUNCTRACE(sc);
228	tm = mpr_alloc_high_priority_command(sc);
229	if (tm == NULL)
230		return (NULL);
231
232	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
233	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
234	return tm;
235}
236
237void
238mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm)
239{
240	int target_id = 0xFFFFFFFF;
241
242	MPR_FUNCTRACE(sc);
243	if (tm == NULL)
244		return;
245
246	/*
247	 * For TM's the devq is frozen for the device.  Unfreeze it here and
248	 * free the resources used for freezing the devq.  Must clear the
249	 * INRESET flag as well or scsi I/O will not work.
250	 */
251	if (tm->cm_targ != NULL) {
252		tm->cm_targ->flags &= ~MPRSAS_TARGET_INRESET;
253		target_id = tm->cm_targ->tid;
254	}
255	if (tm->cm_ccb) {
256		mpr_dprint(sc, MPR_INFO, "Unfreezing devq for target ID %d\n",
257		    target_id);
258		xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
259		xpt_free_path(tm->cm_ccb->ccb_h.path);
260		xpt_free_ccb(tm->cm_ccb);
261	}
262
263	mpr_free_high_priority_command(sc, tm);
264}
265
266void
267mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ)
268{
269	struct mprsas_softc *sassc = sc->sassc;
270	path_id_t pathid;
271	target_id_t targetid;
272	union ccb *ccb;
273
274	MPR_FUNCTRACE(sc);
275	pathid = cam_sim_path(sassc->sim);
276	if (targ == NULL)
277		targetid = CAM_TARGET_WILDCARD;
278	else
279		targetid = targ - sassc->targets;
280
281	/*
282	 * Allocate a CCB and schedule a rescan.
283	 */
284	ccb = xpt_alloc_ccb_nowait();
285	if (ccb == NULL) {
286		mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n");
287		return;
288	}
289
290	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid,
291	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
292		mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n");
293		xpt_free_ccb(ccb);
294		return;
295	}
296
297	if (targetid == CAM_TARGET_WILDCARD)
298		ccb->ccb_h.func_code = XPT_SCAN_BUS;
299	else
300		ccb->ccb_h.func_code = XPT_SCAN_TGT;
301
302	mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid);
303	xpt_rescan(ccb);
304}
305
306static void
307mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...)
308{
309	struct sbuf sb;
310	va_list ap;
311	char str[224];
312	char path_str[64];
313
314	if (cm == NULL)
315		return;
316
317	/* No need to be in here if debugging isn't enabled */
318	if ((cm->cm_sc->mpr_debug & level) == 0)
319		return;
320
321	sbuf_new(&sb, str, sizeof(str), 0);
322
323	va_start(ap, fmt);
324
325	if (cm->cm_ccb != NULL) {
326		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
327		    sizeof(path_str));
328		sbuf_cat(&sb, path_str);
329		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
330			scsi_command_string(&cm->cm_ccb->csio, &sb);
331			sbuf_printf(&sb, "length %d ",
332			    cm->cm_ccb->csio.dxfer_len);
333		}
334	} else {
335		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
336		    cam_sim_name(cm->cm_sc->sassc->sim),
337		    cam_sim_unit(cm->cm_sc->sassc->sim),
338		    cam_sim_bus(cm->cm_sc->sassc->sim),
339		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
340		    cm->cm_lun);
341	}
342
343	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
344	sbuf_vprintf(&sb, fmt, ap);
345	sbuf_finish(&sb);
346	mpr_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
347
348	va_end(ap);
349}
350
351static void
352mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm)
353{
354	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
355	struct mprsas_target *targ;
356	uint16_t handle;
357
358	MPR_FUNCTRACE(sc);
359
360	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
361	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
362	targ = tm->cm_targ;
363
364	if (reply == NULL) {
365		/* XXX retry the remove after the diag reset completes? */
366		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
367		    "0x%04x\n", __func__, handle);
368		mprsas_free_tm(sc, tm);
369		return;
370	}
371
372	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
373	    MPI2_IOCSTATUS_SUCCESS) {
374		mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
375		    "device 0x%x\n", le16toh(reply->IOCStatus), handle);
376	}
377
378	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
379	    le32toh(reply->TerminationCount));
380	mpr_free_reply(sc, tm->cm_reply_data);
381	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
382
383	mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
384	    targ->tid, handle);
385
386	/*
387	 * Don't clear target if remove fails because things will get confusing.
388	 * Leave the devname and sasaddr intact so that we know to avoid reusing
389	 * this target id if possible, and so we can assign the same target id
390	 * to this device if it comes back in the future.
391	 */
392	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
393	    MPI2_IOCSTATUS_SUCCESS) {
394		targ = tm->cm_targ;
395		targ->handle = 0x0;
396		targ->encl_handle = 0x0;
397		targ->encl_level_valid = 0x0;
398		targ->encl_level = 0x0;
399		targ->connector_name[0] = ' ';
400		targ->connector_name[1] = ' ';
401		targ->connector_name[2] = ' ';
402		targ->connector_name[3] = ' ';
403		targ->encl_slot = 0x0;
404		targ->exp_dev_handle = 0x0;
405		targ->phy_num = 0x0;
406		targ->linkrate = 0x0;
407		targ->devinfo = 0x0;
408		targ->flags = 0x0;
409		targ->scsi_req_desc_type = 0;
410	}
411
412	mprsas_free_tm(sc, tm);
413}
414
415/*
416 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
417 * Otherwise Volume Delete is same as Bare Drive Removal.
418 */
419void
420mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle)
421{
422	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
423	struct mpr_softc *sc;
424	struct mpr_command *cm;
425	struct mprsas_target *targ = NULL;
426
427	MPR_FUNCTRACE(sassc->sc);
428	sc = sassc->sc;
429
430	targ = mprsas_find_target_by_handle(sassc, 0, handle);
431	if (targ == NULL) {
432		/* FIXME: what is the action? */
433		/* We don't know about this device? */
434		mpr_dprint(sc, MPR_ERROR,
435		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
436		return;
437	}
438
439	targ->flags |= MPRSAS_TARGET_INREMOVAL;
440
441	cm = mprsas_alloc_tm(sc);
442	if (cm == NULL) {
443		mpr_dprint(sc, MPR_ERROR,
444		    "%s: command alloc failure\n", __func__);
445		return;
446	}
447
448	mprsas_rescan_target(sc, targ);
449
450	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
451	req->DevHandle = targ->handle;
452	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
453
454	if (!targ->is_nvme || sc->custom_nvme_tm_handling) {
455		/* SAS Hard Link Reset / SATA Link Reset */
456		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
457	} else {
458		/* PCIe Protocol Level Reset*/
459		req->MsgFlags =
460		    MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
461	}
462
463	cm->cm_targ = targ;
464	cm->cm_data = NULL;
465	cm->cm_complete = mprsas_remove_volume;
466	cm->cm_complete_data = (void *)(uintptr_t)handle;
467
468	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
469	    __func__, targ->tid);
470	mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
471
472	mpr_map_command(sc, cm);
473}
474
475/*
476 * The firmware performs debounce on the link to avoid transient link errors
477 * and false removals.  When it does decide that link has been lost and a
478 * device needs to go away, it expects that the host will perform a target reset
479 * and then an op remove.  The reset has the side-effect of aborting any
480 * outstanding requests for the device, which is required for the op-remove to
481 * succeed.  It's not clear if the host should check for the device coming back
482 * alive after the reset.
483 */
484void
485mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle)
486{
487	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
488	struct mpr_softc *sc;
489	struct mpr_command *tm;
490	struct mprsas_target *targ = NULL;
491
492	MPR_FUNCTRACE(sassc->sc);
493
494	sc = sassc->sc;
495
496	targ = mprsas_find_target_by_handle(sassc, 0, handle);
497	if (targ == NULL) {
498		/* FIXME: what is the action? */
499		/* We don't know about this device? */
500		mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n",
501		    __func__, handle);
502		return;
503	}
504
505	targ->flags |= MPRSAS_TARGET_INREMOVAL;
506
507	tm = mprsas_alloc_tm(sc);
508	if (tm == NULL) {
509		mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n",
510		    __func__);
511		return;
512	}
513
514	mprsas_rescan_target(sc, targ);
515
516	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
517	req->DevHandle = htole16(targ->handle);
518	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
519
520	/* SAS Hard Link Reset / SATA Link Reset */
521	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
522
523	tm->cm_targ = targ;
524	tm->cm_data = NULL;
525	tm->cm_complete = mprsas_remove_device;
526	tm->cm_complete_data = (void *)(uintptr_t)handle;
527
528	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
529	    __func__, targ->tid);
530	mprsas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
531
532	mpr_map_command(sc, tm);
533}
534
535static void
536mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm)
537{
538	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
539	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
540	struct mprsas_target *targ;
541	uint16_t handle;
542
543	MPR_FUNCTRACE(sc);
544
545	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
546	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
547	targ = tm->cm_targ;
548
549	/*
550	 * Currently there should be no way we can hit this case.  It only
551	 * happens when we have a failure to allocate chain frames, and
552	 * task management commands don't have S/G lists.
553	 */
554	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
555		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of "
556		    "handle %#04x! This should not happen!\n", __func__,
557		    tm->cm_flags, handle);
558	}
559
560	if (reply == NULL) {
561		/* XXX retry the remove after the diag reset completes? */
562		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
563		    "0x%04x\n", __func__, handle);
564		mprsas_free_tm(sc, tm);
565		return;
566	}
567
568	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
569	    MPI2_IOCSTATUS_SUCCESS) {
570		mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
571		    "device 0x%x\n", le16toh(reply->IOCStatus), handle);
572	}
573
574	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
575	    le32toh(reply->TerminationCount));
576	mpr_free_reply(sc, tm->cm_reply_data);
577	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
578
579	/* Reuse the existing command */
580	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
581	memset(req, 0, sizeof(*req));
582	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
583	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
584	req->DevHandle = htole16(handle);
585	tm->cm_data = NULL;
586	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
587	tm->cm_complete = mprsas_remove_complete;
588	tm->cm_complete_data = (void *)(uintptr_t)handle;
589
590	/*
591	 * Wait to send the REMOVE_DEVICE until all the commands have cleared.
592	 * They should be aborted or time out and we'll kick thus off there
593	 * if so.
594	 */
595	if (TAILQ_FIRST(&targ->commands) == NULL) {
596		mpr_dprint(sc, MPR_INFO, "No pending commands: starting remove_device\n");
597		mpr_map_command(sc, tm);
598		targ->pending_remove_tm = NULL;
599	} else {
600		targ->pending_remove_tm = tm;
601	}
602
603	mpr_dprint(sc, MPR_INFO, "clearing target %u handle 0x%04x\n",
604	    targ->tid, handle);
605	if (targ->encl_level_valid) {
606		mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, "
607		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
608		    targ->connector_name);
609	}
610}
611
612static void
613mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm)
614{
615	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
616	uint16_t handle;
617	struct mprsas_target *targ;
618	struct mprsas_lun *lun;
619
620	MPR_FUNCTRACE(sc);
621
622	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
623	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
624
625	targ = tm->cm_targ;
626
627	/*
628	 * At this point, we should have no pending commands for the target.
629	 * The remove target has just completed.
630	 */
631	KASSERT(TAILQ_FIRST(&targ->commands) == NULL,
632	    ("%s: no commands should be pending\n", __func__));
633
634	/*
635	 * Currently there should be no way we can hit this case.  It only
636	 * happens when we have a failure to allocate chain frames, and
637	 * task management commands don't have S/G lists.
638	 */
639	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
640		mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of "
641		    "handle %#04x! This should not happen!\n", __func__,
642		    tm->cm_flags, handle);
643		mprsas_free_tm(sc, tm);
644		return;
645	}
646
647	if (reply == NULL) {
648		/* most likely a chip reset */
649		mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device "
650		    "0x%04x\n", __func__, handle);
651		mprsas_free_tm(sc, tm);
652		return;
653	}
654
655	mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n",
656	    __func__, handle, le16toh(reply->IOCStatus));
657
658	/*
659	 * Don't clear target if remove fails because things will get confusing.
660	 * Leave the devname and sasaddr intact so that we know to avoid reusing
661	 * this target id if possible, and so we can assign the same target id
662	 * to this device if it comes back in the future.
663	 */
664	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
665	    MPI2_IOCSTATUS_SUCCESS) {
666		targ->handle = 0x0;
667		targ->encl_handle = 0x0;
668		targ->encl_level_valid = 0x0;
669		targ->encl_level = 0x0;
670		targ->connector_name[0] = ' ';
671		targ->connector_name[1] = ' ';
672		targ->connector_name[2] = ' ';
673		targ->connector_name[3] = ' ';
674		targ->encl_slot = 0x0;
675		targ->exp_dev_handle = 0x0;
676		targ->phy_num = 0x0;
677		targ->linkrate = 0x0;
678		targ->devinfo = 0x0;
679		targ->flags = 0x0;
680		targ->scsi_req_desc_type = 0;
681
682		while (!SLIST_EMPTY(&targ->luns)) {
683			lun = SLIST_FIRST(&targ->luns);
684			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
685			free(lun, M_MPR);
686		}
687	}
688
689	mprsas_free_tm(sc, tm);
690}
691
692static int
693mprsas_register_events(struct mpr_softc *sc)
694{
695	uint8_t events[16];
696
697	bzero(events, 16);
698	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
699	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
700	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
701	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
702	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
703	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
704	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
705	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
706	setbit(events, MPI2_EVENT_IR_VOLUME);
707	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
708	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
709	setbit(events, MPI2_EVENT_TEMP_THRESHOLD);
710	setbit(events, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
711	if (sc->facts->MsgVersion >= MPI2_VERSION_02_06) {
712		setbit(events, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
713		if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
714			setbit(events, MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
715			setbit(events, MPI2_EVENT_PCIE_ENUMERATION);
716			setbit(events, MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
717		}
718	}
719
720	mpr_register_events(sc, events, mprsas_evt_handler, NULL,
721	    &sc->sassc->mprsas_eh);
722
723	return (0);
724}
725
726int
727mpr_attach_sas(struct mpr_softc *sc)
728{
729	struct mprsas_softc *sassc;
730	cam_status status;
731	int unit, error = 0, reqs;
732
733	MPR_FUNCTRACE(sc);
734	mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
735
736	sassc = malloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO);
737
738	/*
739	 * XXX MaxTargets could change during a reinit.  Since we don't
740	 * resize the targets[] array during such an event, cache the value
741	 * of MaxTargets here so that we don't get into trouble later.  This
742	 * should move into the reinit logic.
743	 */
744	sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
745	sassc->targets = malloc(sizeof(struct mprsas_target) *
746	    sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO);
747	sc->sassc = sassc;
748	sassc->sc = sc;
749
750	reqs = sc->num_reqs - sc->num_prireqs - 1;
751	if ((sassc->devq = cam_simq_alloc(reqs)) == NULL) {
752		mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIMQ\n");
753		error = ENOMEM;
754		goto out;
755	}
756
757	unit = device_get_unit(sc->mpr_dev);
758	sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc,
759	    unit, &sc->mpr_mtx, reqs, reqs, sassc->devq);
760	if (sassc->sim == NULL) {
761		mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIM\n");
762		error = EINVAL;
763		goto out;
764	}
765
766	TAILQ_INIT(&sassc->ev_queue);
767
768	/* Initialize taskqueue for Event Handling */
769	TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc);
770	sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO,
771	    taskqueue_thread_enqueue, &sassc->ev_tq);
772	taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
773	    device_get_nameunit(sc->mpr_dev));
774
775	mpr_lock(sc);
776
777	/*
778	 * XXX There should be a bus for every port on the adapter, but since
779	 * we're just going to fake the topology for now, we'll pretend that
780	 * everything is just a target on a single bus.
781	 */
782	if ((error = xpt_bus_register(sassc->sim, sc->mpr_dev, 0)) != 0) {
783		mpr_dprint(sc, MPR_INIT|MPR_ERROR,
784		    "Error %d registering SCSI bus\n", error);
785		mpr_unlock(sc);
786		goto out;
787	}
788
789	/*
790	 * Assume that discovery events will start right away.
791	 *
792	 * Hold off boot until discovery is complete.
793	 */
794	sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY;
795	sc->sassc->startup_refcount = 0;
796	mprsas_startup_increment(sassc);
797
798	callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
799
800	/*
801	 * Register for async events so we can determine the EEDP
802	 * capabilities of devices.
803	 */
804	status = xpt_create_path(&sassc->path, /*periph*/NULL,
805	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
806	    CAM_LUN_WILDCARD);
807	if (status != CAM_REQ_CMP) {
808		mpr_dprint(sc, MPR_INIT|MPR_ERROR,
809		    "Error %#x creating sim path\n", status);
810		sassc->path = NULL;
811	} else {
812		int event;
813
814		event = AC_ADVINFO_CHANGED | AC_FOUND_DEVICE;
815		status = xpt_register_async(event, mprsas_async, sc,
816					    sassc->path);
817
818		if (status != CAM_REQ_CMP) {
819			mpr_dprint(sc, MPR_ERROR,
820			    "Error %#x registering async handler for "
821			    "AC_ADVINFO_CHANGED events\n", status);
822			xpt_free_path(sassc->path);
823			sassc->path = NULL;
824		}
825	}
826	if (status != CAM_REQ_CMP) {
827		/*
828		 * EEDP use is the exception, not the rule.
829		 * Warn the user, but do not fail to attach.
830		 */
831		mpr_printf(sc, "EEDP capabilities disabled.\n");
832	}
833
834	mpr_unlock(sc);
835
836	mprsas_register_events(sc);
837out:
838	if (error)
839		mpr_detach_sas(sc);
840
841	mpr_dprint(sc, MPR_INIT, "%s exit, error= %d\n", __func__, error);
842	return (error);
843}
844
845int
846mpr_detach_sas(struct mpr_softc *sc)
847{
848	struct mprsas_softc *sassc;
849	struct mprsas_lun *lun, *lun_tmp;
850	struct mprsas_target *targ;
851	int i;
852
853	MPR_FUNCTRACE(sc);
854
855	if (sc->sassc == NULL)
856		return (0);
857
858	sassc = sc->sassc;
859	mpr_deregister_events(sc, sassc->mprsas_eh);
860
861	/*
862	 * Drain and free the event handling taskqueue with the lock
863	 * unheld so that any parallel processing tasks drain properly
864	 * without deadlocking.
865	 */
866	if (sassc->ev_tq != NULL)
867		taskqueue_free(sassc->ev_tq);
868
869	/* Make sure CAM doesn't wedge if we had to bail out early. */
870	mpr_lock(sc);
871
872	while (sassc->startup_refcount != 0)
873		mprsas_startup_decrement(sassc);
874
875	/* Deregister our async handler */
876	if (sassc->path != NULL) {
877		xpt_register_async(0, mprsas_async, sc, sassc->path);
878		xpt_free_path(sassc->path);
879		sassc->path = NULL;
880	}
881
882	if (sassc->flags & MPRSAS_IN_STARTUP)
883		xpt_release_simq(sassc->sim, 1);
884
885	if (sassc->sim != NULL) {
886		xpt_bus_deregister(cam_sim_path(sassc->sim));
887		cam_sim_free(sassc->sim, FALSE);
888	}
889
890	mpr_unlock(sc);
891
892	if (sassc->devq != NULL)
893		cam_simq_free(sassc->devq);
894
895	for (i = 0; i < sassc->maxtargets; i++) {
896		targ = &sassc->targets[i];
897		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
898			free(lun, M_MPR);
899		}
900	}
901	free(sassc->targets, M_MPR);
902	free(sassc, M_MPR);
903	sc->sassc = NULL;
904
905	return (0);
906}
907
908void
909mprsas_discovery_end(struct mprsas_softc *sassc)
910{
911	struct mpr_softc *sc = sassc->sc;
912
913	MPR_FUNCTRACE(sc);
914
915	if (sassc->flags & MPRSAS_DISCOVERY_TIMEOUT_PENDING)
916		callout_stop(&sassc->discovery_callout);
917
918	/*
919	 * After discovery has completed, check the mapping table for any
920	 * missing devices and update their missing counts. Only do this once
921	 * whenever the driver is initialized so that missing counts aren't
922	 * updated unnecessarily. Note that just because discovery has
923	 * completed doesn't mean that events have been processed yet. The
924	 * check_devices function is a callout timer that checks if ALL devices
925	 * are missing. If so, it will wait a little longer for events to
926	 * complete and keep resetting itself until some device in the mapping
927	 * table is not missing, meaning that event processing has started.
928	 */
929	if (sc->track_mapping_events) {
930		mpr_dprint(sc, MPR_XINFO | MPR_MAPPING, "Discovery has "
931		    "completed. Check for missing devices in the mapping "
932		    "table.\n");
933		callout_reset(&sc->device_check_callout,
934		    MPR_MISSING_CHECK_DELAY * hz, mpr_mapping_check_devices,
935		    sc);
936	}
937}
938
939static void
940mprsas_action(struct cam_sim *sim, union ccb *ccb)
941{
942	struct mprsas_softc *sassc;
943
944	sassc = cam_sim_softc(sim);
945
946	MPR_FUNCTRACE(sassc->sc);
947	mpr_dprint(sassc->sc, MPR_TRACE, "ccb func_code 0x%x\n",
948	    ccb->ccb_h.func_code);
949	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
950
951	switch (ccb->ccb_h.func_code) {
952	case XPT_PATH_INQ:
953	{
954		struct ccb_pathinq *cpi = &ccb->cpi;
955		struct mpr_softc *sc = sassc->sc;
956
957		cpi->version_num = 1;
958		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
959		cpi->target_sprt = 0;
960		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
961		cpi->hba_eng_cnt = 0;
962		cpi->max_target = sassc->maxtargets - 1;
963		cpi->max_lun = 255;
964
965		/*
966		 * initiator_id is set here to an ID outside the set of valid
967		 * target IDs (including volumes).
968		 */
969		cpi->initiator_id = sassc->maxtargets;
970		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
971		strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
972		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
973		cpi->unit_number = cam_sim_unit(sim);
974		cpi->bus_id = cam_sim_bus(sim);
975		/*
976		 * XXXSLM-I think this needs to change based on config page or
977		 * something instead of hardcoded to 150000.
978		 */
979		cpi->base_transfer_speed = 150000;
980		cpi->transport = XPORT_SAS;
981		cpi->transport_version = 0;
982		cpi->protocol = PROTO_SCSI;
983		cpi->protocol_version = SCSI_REV_SPC;
984		cpi->maxio = sc->maxio;
985		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
986		break;
987	}
988	case XPT_GET_TRAN_SETTINGS:
989	{
990		struct ccb_trans_settings	*cts;
991		struct ccb_trans_settings_sas	*sas;
992		struct ccb_trans_settings_scsi	*scsi;
993		struct mprsas_target *targ;
994
995		cts = &ccb->cts;
996		sas = &cts->xport_specific.sas;
997		scsi = &cts->proto_specific.scsi;
998
999		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1000		    ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
1001		    cts->ccb_h.target_id));
1002		targ = &sassc->targets[cts->ccb_h.target_id];
1003		if (targ->handle == 0x0) {
1004			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1005			break;
1006		}
1007
1008		cts->protocol_version = SCSI_REV_SPC2;
1009		cts->transport = XPORT_SAS;
1010		cts->transport_version = 0;
1011
1012		sas->valid = CTS_SAS_VALID_SPEED;
1013		switch (targ->linkrate) {
1014		case 0x08:
1015			sas->bitrate = 150000;
1016			break;
1017		case 0x09:
1018			sas->bitrate = 300000;
1019			break;
1020		case 0x0a:
1021			sas->bitrate = 600000;
1022			break;
1023		case 0x0b:
1024			sas->bitrate = 1200000;
1025			break;
1026		default:
1027			sas->valid = 0;
1028		}
1029
1030		cts->protocol = PROTO_SCSI;
1031		scsi->valid = CTS_SCSI_VALID_TQ;
1032		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1033
1034		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1035		break;
1036	}
1037	case XPT_CALC_GEOMETRY:
1038		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1039		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1040		break;
1041	case XPT_RESET_DEV:
1042		mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action "
1043		    "XPT_RESET_DEV\n");
1044		mprsas_action_resetdev(sassc, ccb);
1045		return;
1046	case XPT_RESET_BUS:
1047	case XPT_ABORT:
1048	case XPT_TERM_IO:
1049		mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action faking success "
1050		    "for abort or reset\n");
1051		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1052		break;
1053	case XPT_SCSI_IO:
1054		mprsas_action_scsiio(sassc, ccb);
1055		return;
1056	case XPT_SMP_IO:
1057		mprsas_action_smpio(sassc, ccb);
1058		return;
1059	default:
1060		mprsas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1061		break;
1062	}
1063	xpt_done(ccb);
1064
1065}
1066
1067static void
1068mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code,
1069    target_id_t target_id, lun_id_t lun_id)
1070{
1071	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1072	struct cam_path *path;
1073
1074	mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__,
1075	    ac_code, target_id, (uintmax_t)lun_id);
1076
1077	if (xpt_create_path(&path, NULL,
1078		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1079		mpr_dprint(sc, MPR_ERROR, "unable to create path for reset "
1080		    "notification\n");
1081		return;
1082	}
1083
1084	xpt_async(ac_code, path, NULL);
1085	xpt_free_path(path);
1086}
1087
1088static void
1089mprsas_complete_all_commands(struct mpr_softc *sc)
1090{
1091	struct mpr_command *cm;
1092	int i;
1093	int completed;
1094
1095	MPR_FUNCTRACE(sc);
1096	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1097
1098	/* complete all commands with a NULL reply */
1099	for (i = 1; i < sc->num_reqs; i++) {
1100		cm = &sc->commands[i];
1101		if (cm->cm_state == MPR_CM_STATE_FREE)
1102			continue;
1103
1104		cm->cm_state = MPR_CM_STATE_BUSY;
1105		cm->cm_reply = NULL;
1106		completed = 0;
1107
1108		if (cm->cm_flags & MPR_CM_FLAGS_SATA_ID_TIMEOUT) {
1109			MPASS(cm->cm_data);
1110			free(cm->cm_data, M_MPR);
1111			cm->cm_data = NULL;
1112		}
1113
1114		if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
1115			cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
1116
1117		if (cm->cm_complete != NULL) {
1118			mprsas_log_command(cm, MPR_RECOVERY,
1119			    "completing cm %p state %x ccb %p for diag reset\n",
1120			    cm, cm->cm_state, cm->cm_ccb);
1121			cm->cm_complete(sc, cm);
1122			completed = 1;
1123		} else if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
1124			mprsas_log_command(cm, MPR_RECOVERY,
1125			    "waking up cm %p state %x ccb %p for diag reset\n",
1126			    cm, cm->cm_state, cm->cm_ccb);
1127			wakeup(cm);
1128			completed = 1;
1129		}
1130
1131		if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) {
1132			/* this should never happen, but if it does, log */
1133			mprsas_log_command(cm, MPR_RECOVERY,
1134			    "cm %p state %x flags 0x%x ccb %p during diag "
1135			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1136			    cm->cm_ccb);
1137		}
1138	}
1139
1140	sc->io_cmds_active = 0;
1141}
1142
1143void
1144mprsas_handle_reinit(struct mpr_softc *sc)
1145{
1146	int i;
1147
1148	/* Go back into startup mode and freeze the simq, so that CAM
1149	 * doesn't send any commands until after we've rediscovered all
1150	 * targets and found the proper device handles for them.
1151	 *
1152	 * After the reset, portenable will trigger discovery, and after all
1153	 * discovery-related activities have finished, the simq will be
1154	 * released.
1155	 */
1156	mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__);
1157	sc->sassc->flags |= MPRSAS_IN_STARTUP;
1158	sc->sassc->flags |= MPRSAS_IN_DISCOVERY;
1159	mprsas_startup_increment(sc->sassc);
1160
1161	/* notify CAM of a bus reset */
1162	mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1163	    CAM_LUN_WILDCARD);
1164
1165	/* complete and cleanup after all outstanding commands */
1166	mprsas_complete_all_commands(sc);
1167
1168	mpr_dprint(sc, MPR_INIT, "%s startup %u after command completion\n",
1169	    __func__, sc->sassc->startup_refcount);
1170
1171	/* zero all the target handles, since they may change after the
1172	 * reset, and we have to rediscover all the targets and use the new
1173	 * handles.
1174	 */
1175	for (i = 0; i < sc->sassc->maxtargets; i++) {
1176		if (sc->sassc->targets[i].outstanding != 0)
1177			mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n",
1178			    i, sc->sassc->targets[i].outstanding);
1179		sc->sassc->targets[i].handle = 0x0;
1180		sc->sassc->targets[i].exp_dev_handle = 0x0;
1181		sc->sassc->targets[i].outstanding = 0;
1182		sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET;
1183	}
1184}
1185static void
1186mprsas_tm_timeout(void *data)
1187{
1188	struct mpr_command *tm = data;
1189	struct mpr_softc *sc = tm->cm_sc;
1190
1191	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1192
1193	mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY, "task mgmt %p timed "
1194	    "out\n", tm);
1195
1196	KASSERT(tm->cm_state == MPR_CM_STATE_INQUEUE,
1197	    ("command not inqueue\n"));
1198
1199	tm->cm_state = MPR_CM_STATE_BUSY;
1200	mpr_reinit(sc);
1201}
1202
1203static void
1204mprsas_logical_unit_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1205{
1206	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1207	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1208	unsigned int cm_count = 0;
1209	struct mpr_command *cm;
1210	struct mprsas_target *targ;
1211
1212	callout_stop(&tm->cm_callout);
1213
1214	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1215	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1216	targ = tm->cm_targ;
1217
1218	/*
1219	 * Currently there should be no way we can hit this case.  It only
1220	 * happens when we have a failure to allocate chain frames, and
1221	 * task management commands don't have S/G lists.
1222	 */
1223	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1224		mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1225		    "%s: cm_flags = %#x for LUN reset! "
1226		    "This should not happen!\n", __func__, tm->cm_flags);
1227		mprsas_free_tm(sc, tm);
1228		return;
1229	}
1230
1231	if (reply == NULL) {
1232		mpr_dprint(sc, MPR_RECOVERY, "NULL reset reply for tm %p\n",
1233		    tm);
1234		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1235			/* this completion was due to a reset, just cleanup */
1236			mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1237			    "reset, ignoring NULL LUN reset reply\n");
1238			targ->tm = NULL;
1239			mprsas_free_tm(sc, tm);
1240		}
1241		else {
1242			/* we should have gotten a reply. */
1243			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1244			    "LUN reset attempt, resetting controller\n");
1245			mpr_reinit(sc);
1246		}
1247		return;
1248	}
1249
1250	mpr_dprint(sc, MPR_RECOVERY,
1251	    "logical unit reset status 0x%x code 0x%x count %u\n",
1252	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1253	    le32toh(reply->TerminationCount));
1254
1255	/*
1256	 * See if there are any outstanding commands for this LUN.
1257	 * This could be made more efficient by using a per-LU data
1258	 * structure of some sort.
1259	 */
1260	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1261		if (cm->cm_lun == tm->cm_lun)
1262			cm_count++;
1263	}
1264
1265	if (cm_count == 0) {
1266		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1267		    "Finished recovery after LUN reset for target %u\n",
1268		    targ->tid);
1269
1270		mprsas_announce_reset(sc, AC_SENT_BDR, targ->tid,
1271		    tm->cm_lun);
1272
1273		/*
1274		 * We've finished recovery for this logical unit.  check and
1275		 * see if some other logical unit has a timedout command
1276		 * that needs to be processed.
1277		 */
1278		cm = TAILQ_FIRST(&targ->timedout_commands);
1279		if (cm) {
1280			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1281			   "More commands to abort for target %u\n", targ->tid);
1282			mprsas_send_abort(sc, tm, cm);
1283		} else {
1284			targ->tm = NULL;
1285			mprsas_free_tm(sc, tm);
1286		}
1287	} else {
1288		/* if we still have commands for this LUN, the reset
1289		 * effectively failed, regardless of the status reported.
1290		 * Escalate to a target reset.
1291		 */
1292		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1293		    "logical unit reset complete for target %u, but still "
1294		    "have %u command(s), sending target reset\n", targ->tid,
1295		    cm_count);
1296		if (!targ->is_nvme || sc->custom_nvme_tm_handling)
1297			mprsas_send_reset(sc, tm,
1298			    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1299		else
1300			mpr_reinit(sc);
1301	}
1302}
1303
1304static void
1305mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1306{
1307	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1308	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1309	struct mprsas_target *targ;
1310
1311	callout_stop(&tm->cm_callout);
1312
1313	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1314	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1315	targ = tm->cm_targ;
1316
1317	/*
1318	 * Currently there should be no way we can hit this case.  It only
1319	 * happens when we have a failure to allocate chain frames, and
1320	 * task management commands don't have S/G lists.
1321	 */
1322	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1323		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for target "
1324		    "reset! This should not happen!\n", __func__, tm->cm_flags);
1325		mprsas_free_tm(sc, tm);
1326		return;
1327	}
1328
1329	if (reply == NULL) {
1330		mpr_dprint(sc, MPR_RECOVERY,
1331		    "NULL target reset reply for tm %p TaskMID %u\n",
1332		    tm, le16toh(req->TaskMID));
1333		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1334			/* this completion was due to a reset, just cleanup */
1335			mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1336			    "reset, ignoring NULL target reset reply\n");
1337			targ->tm = NULL;
1338			mprsas_free_tm(sc, tm);
1339		}
1340		else {
1341			/* we should have gotten a reply. */
1342			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1343			    "target reset attempt, resetting controller\n");
1344			mpr_reinit(sc);
1345		}
1346		return;
1347	}
1348
1349	mpr_dprint(sc, MPR_RECOVERY,
1350	    "target reset status 0x%x code 0x%x count %u\n",
1351	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1352	    le32toh(reply->TerminationCount));
1353
1354	if (targ->outstanding == 0) {
1355		/*
1356		 * We've finished recovery for this target and all
1357		 * of its logical units.
1358		 */
1359		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1360		    "Finished reset recovery for target %u\n", targ->tid);
1361
1362		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1363		    CAM_LUN_WILDCARD);
1364
1365		targ->tm = NULL;
1366		mprsas_free_tm(sc, tm);
1367	} else {
1368		/*
1369		 * After a target reset, if this target still has
1370		 * outstanding commands, the reset effectively failed,
1371		 * regardless of the status reported.  escalate.
1372		 */
1373		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1374		    "Target reset complete for target %u, but still have %u "
1375		    "command(s), resetting controller\n", targ->tid,
1376		    targ->outstanding);
1377		mpr_reinit(sc);
1378	}
1379}
1380
1381#define MPR_RESET_TIMEOUT 30
1382
1383int
1384mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type)
1385{
1386	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1387	struct mprsas_target *target;
1388	int err, timeout;
1389
1390	target = tm->cm_targ;
1391	if (target->handle == 0) {
1392		mpr_dprint(sc, MPR_ERROR, "%s null devhandle for target_id "
1393		    "%d\n", __func__, target->tid);
1394		return -1;
1395	}
1396
1397	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1398	req->DevHandle = htole16(target->handle);
1399	req->TaskType = type;
1400
1401	if (!target->is_nvme || sc->custom_nvme_tm_handling) {
1402		timeout = MPR_RESET_TIMEOUT;
1403		/*
1404		 * Target reset method =
1405		 *     SAS Hard Link Reset / SATA Link Reset
1406		 */
1407		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1408	} else {
1409		timeout = (target->controller_reset_timeout) ? (
1410		    target->controller_reset_timeout) : (MPR_RESET_TIMEOUT);
1411		/* PCIe Protocol Level Reset*/
1412		req->MsgFlags =
1413		    MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
1414	}
1415
1416	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1417		/* XXX Need to handle invalid LUNs */
1418		MPR_SET_LUN(req->LUN, tm->cm_lun);
1419		tm->cm_targ->logical_unit_resets++;
1420		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1421		    "Sending logical unit reset to target %u lun %d\n",
1422		    target->tid, tm->cm_lun);
1423		tm->cm_complete = mprsas_logical_unit_reset_complete;
1424		mprsas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1425	} else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1426		tm->cm_targ->target_resets++;
1427		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1428		    "Sending target reset to target %u\n", target->tid);
1429		tm->cm_complete = mprsas_target_reset_complete;
1430		mprsas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1431	}
1432	else {
1433		mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type);
1434		return -1;
1435	}
1436
1437	if (target->encl_level_valid) {
1438		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1439		    "At enclosure level %d, slot %d, connector name (%4s)\n",
1440		    target->encl_level, target->encl_slot,
1441		    target->connector_name);
1442	}
1443
1444	tm->cm_data = NULL;
1445	tm->cm_complete_data = (void *)tm;
1446
1447	callout_reset(&tm->cm_callout, timeout * hz,
1448	    mprsas_tm_timeout, tm);
1449
1450	err = mpr_map_command(sc, tm);
1451	if (err)
1452		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1453		    "error %d sending reset type %u\n", err, type);
1454
1455	return err;
1456}
1457
1458static void
1459mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm)
1460{
1461	struct mpr_command *cm;
1462	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1463	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1464	struct mprsas_target *targ;
1465
1466	callout_stop(&tm->cm_callout);
1467
1468	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1469	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1470	targ = tm->cm_targ;
1471
1472	/*
1473	 * Currently there should be no way we can hit this case.  It only
1474	 * happens when we have a failure to allocate chain frames, and
1475	 * task management commands don't have S/G lists.
1476	 */
1477	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1478		mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1479		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1480		    tm->cm_flags, tm, le16toh(req->TaskMID));
1481		mprsas_free_tm(sc, tm);
1482		return;
1483	}
1484
1485	if (reply == NULL) {
1486		mpr_dprint(sc, MPR_RECOVERY,
1487		    "NULL abort reply for tm %p TaskMID %u\n",
1488		    tm, le16toh(req->TaskMID));
1489		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1490			/* this completion was due to a reset, just cleanup */
1491			mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1492			    "reset, ignoring NULL abort reply\n");
1493			targ->tm = NULL;
1494			mprsas_free_tm(sc, tm);
1495		} else {
1496			/* we should have gotten a reply. */
1497			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1498			    "abort attempt, resetting controller\n");
1499			mpr_reinit(sc);
1500		}
1501		return;
1502	}
1503
1504	mpr_dprint(sc, MPR_RECOVERY,
1505	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1506	    le16toh(req->TaskMID),
1507	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1508	    le32toh(reply->TerminationCount));
1509
1510	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1511	if (cm == NULL) {
1512		/*
1513		 * if there are no more timedout commands, we're done with
1514		 * error recovery for this target.
1515		 */
1516		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1517		    "Finished abort recovery for target %u\n", targ->tid);
1518		targ->tm = NULL;
1519		mprsas_free_tm(sc, tm);
1520	} else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1521		/* abort success, but we have more timedout commands to abort */
1522		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1523		    "Continuing abort recovery for target %u\n", targ->tid);
1524		mprsas_send_abort(sc, tm, cm);
1525	} else {
1526		/*
1527		 * we didn't get a command completion, so the abort
1528		 * failed as far as we're concerned.  escalate.
1529		 */
1530		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1531		    "Abort failed for target %u, sending logical unit reset\n",
1532		    targ->tid);
1533
1534		mprsas_send_reset(sc, tm,
1535		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1536	}
1537}
1538
1539#define MPR_ABORT_TIMEOUT 5
1540
1541static int
1542mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
1543    struct mpr_command *cm)
1544{
1545	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1546	struct mprsas_target *targ;
1547	int err, timeout;
1548
1549	targ = cm->cm_targ;
1550	if (targ->handle == 0) {
1551		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1552		   "%s null devhandle for target_id %d\n",
1553		    __func__, cm->cm_ccb->ccb_h.target_id);
1554		return -1;
1555	}
1556
1557	mprsas_log_command(cm, MPR_RECOVERY|MPR_INFO,
1558	    "Aborting command %p\n", cm);
1559
1560	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1561	req->DevHandle = htole16(targ->handle);
1562	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1563
1564	/* XXX Need to handle invalid LUNs */
1565	MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1566
1567	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1568
1569	tm->cm_data = NULL;
1570	tm->cm_complete = mprsas_abort_complete;
1571	tm->cm_complete_data = (void *)tm;
1572	tm->cm_targ = cm->cm_targ;
1573	tm->cm_lun = cm->cm_lun;
1574
1575	if (!targ->is_nvme || sc->custom_nvme_tm_handling)
1576		timeout	= MPR_ABORT_TIMEOUT;
1577	else
1578		timeout = sc->nvme_abort_timeout;
1579
1580	callout_reset(&tm->cm_callout, timeout * hz,
1581	    mprsas_tm_timeout, tm);
1582
1583	targ->aborts++;
1584
1585	mprsas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1586
1587	err = mpr_map_command(sc, tm);
1588	if (err)
1589		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1590		    "error %d sending abort for cm %p SMID %u\n",
1591		    err, cm, req->TaskMID);
1592	return err;
1593}
1594
1595static void
1596mprsas_scsiio_timeout(void *data)
1597{
1598	sbintime_t elapsed, now;
1599	union ccb *ccb;
1600	struct mpr_softc *sc;
1601	struct mpr_command *cm;
1602	struct mprsas_target *targ;
1603
1604	cm = (struct mpr_command *)data;
1605	sc = cm->cm_sc;
1606	ccb = cm->cm_ccb;
1607	now = sbinuptime();
1608
1609	MPR_FUNCTRACE(sc);
1610	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1611
1612	mpr_dprint(sc, MPR_XINFO|MPR_RECOVERY, "Timeout checking cm %p\n", cm);
1613
1614	/*
1615	 * Run the interrupt handler to make sure it's not pending.  This
1616	 * isn't perfect because the command could have already completed
1617	 * and been re-used, though this is unlikely.
1618	 */
1619	mpr_intr_locked(sc);
1620	if (cm->cm_flags & MPR_CM_FLAGS_ON_RECOVERY) {
1621		mprsas_log_command(cm, MPR_XINFO,
1622		    "SCSI command %p almost timed out\n", cm);
1623		return;
1624	}
1625
1626	if (cm->cm_ccb == NULL) {
1627		mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n");
1628		return;
1629	}
1630
1631	targ = cm->cm_targ;
1632	targ->timeouts++;
1633
1634	elapsed = now - ccb->ccb_h.qos.sim_data;
1635	mprsas_log_command(cm, MPR_INFO|MPR_RECOVERY,
1636	    "Command timeout on target %u(0x%04x), %d set, %d.%d elapsed\n",
1637	    targ->tid, targ->handle, ccb->ccb_h.timeout,
1638	    sbintime_getsec(elapsed), elapsed & 0xffffffff);
1639	if (targ->encl_level_valid) {
1640		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1641		    "At enclosure level %d, slot %d, connector name (%4s)\n",
1642		    targ->encl_level, targ->encl_slot, targ->connector_name);
1643	}
1644
1645	/* XXX first, check the firmware state, to see if it's still
1646	 * operational.  if not, do a diag reset.
1647	 */
1648	mprsas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1649	cm->cm_flags |= MPR_CM_FLAGS_ON_RECOVERY | MPR_CM_FLAGS_TIMEDOUT;
1650	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1651
1652	if (targ->tm != NULL) {
1653		/* target already in recovery, just queue up another
1654		 * timedout command to be processed later.
1655		 */
1656		mpr_dprint(sc, MPR_RECOVERY, "queued timedout cm %p for "
1657		    "processing by tm %p\n", cm, targ->tm);
1658	}
1659	else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) {
1660		/* start recovery by aborting the first timedout command */
1661		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1662		    "Sending abort to target %u for SMID %d\n", targ->tid,
1663		    cm->cm_desc.Default.SMID);
1664		mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n",
1665		    cm, targ->tm);
1666		mprsas_send_abort(sc, targ->tm, cm);
1667	}
1668	else {
1669		/* XXX queue this target up for recovery once a TM becomes
1670		 * available.  The firmware only has a limited number of
1671		 * HighPriority credits for the high priority requests used
1672		 * for task management, and we ran out.
1673		 *
1674		 * Isilon: don't worry about this for now, since we have
1675		 * more credits than disks in an enclosure, and limit
1676		 * ourselves to one TM per target for recovery.
1677		 */
1678		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1679		    "timedout cm %p failed to allocate a tm\n", cm);
1680	}
1681}
1682
1683/**
1684 * mprsas_build_nvme_unmap - Build Native NVMe DSM command equivalent
1685 *			     to SCSI Unmap.
1686 * Return 0 - for success,
1687 *	  1 - to immediately return back the command with success status to CAM
1688 *	  negative value - to fallback to firmware path i.e. issue scsi unmap
1689 *			   to FW without any translation.
1690 */
1691static int
1692mprsas_build_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm,
1693    union ccb *ccb, struct mprsas_target *targ)
1694{
1695	Mpi26NVMeEncapsulatedRequest_t *req = NULL;
1696	struct ccb_scsiio *csio;
1697	struct unmap_parm_list *plist;
1698	struct nvme_dsm_range *nvme_dsm_ranges = NULL;
1699	struct nvme_command *c;
1700	int i, res;
1701	uint16_t ndesc, list_len, data_length;
1702	struct mpr_prp_page *prp_page_info;
1703	uint64_t nvme_dsm_ranges_dma_handle;
1704
1705	csio = &ccb->csio;
1706	list_len = (scsiio_cdb_ptr(csio)[7] << 8 | scsiio_cdb_ptr(csio)[8]);
1707	if (!list_len) {
1708		mpr_dprint(sc, MPR_ERROR, "Parameter list length is Zero\n");
1709		return -EINVAL;
1710	}
1711
1712	plist = malloc(csio->dxfer_len, M_MPR, M_ZERO|M_NOWAIT);
1713	if (!plist) {
1714		mpr_dprint(sc, MPR_ERROR, "Unable to allocate memory to "
1715		    "save UNMAP data\n");
1716		return -ENOMEM;
1717	}
1718
1719	/* Copy SCSI unmap data to a local buffer */
1720	bcopy(csio->data_ptr, plist, csio->dxfer_len);
1721
1722	/* return back the unmap command to CAM with success status,
1723	 * if number of descripts is zero.
1724	 */
1725	ndesc = be16toh(plist->unmap_blk_desc_data_len) >> 4;
1726	if (!ndesc) {
1727		mpr_dprint(sc, MPR_XINFO, "Number of descriptors in "
1728		    "UNMAP cmd is Zero\n");
1729		res = 1;
1730		goto out;
1731	}
1732
1733	data_length = ndesc * sizeof(struct nvme_dsm_range);
1734	if (data_length > targ->MDTS) {
1735		mpr_dprint(sc, MPR_ERROR, "data length: %d is greater than "
1736		    "Device's MDTS: %d\n", data_length, targ->MDTS);
1737		res = -EINVAL;
1738		goto out;
1739	}
1740
1741	prp_page_info = mpr_alloc_prp_page(sc);
1742	KASSERT(prp_page_info != NULL, ("%s: There is no PRP Page for "
1743	    "UNMAP command.\n", __func__));
1744
1745	/*
1746	 * Insert the allocated PRP page into the command's PRP page list. This
1747	 * will be freed when the command is freed.
1748	 */
1749	TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link);
1750
1751	nvme_dsm_ranges = (struct nvme_dsm_range *)prp_page_info->prp_page;
1752	nvme_dsm_ranges_dma_handle = prp_page_info->prp_page_busaddr;
1753
1754	bzero(nvme_dsm_ranges, data_length);
1755
1756	/* Convert SCSI unmap's descriptor data to NVMe DSM specific Range data
1757	 * for each descriptors contained in SCSI UNMAP data.
1758	 */
1759	for (i = 0; i < ndesc; i++) {
1760		nvme_dsm_ranges[i].length =
1761		    htole32(be32toh(plist->desc[i].nlb));
1762		nvme_dsm_ranges[i].starting_lba =
1763		    htole64(be64toh(plist->desc[i].slba));
1764		nvme_dsm_ranges[i].attributes = 0;
1765	}
1766
1767	/* Build MPI2.6's NVMe Encapsulated Request Message */
1768	req = (Mpi26NVMeEncapsulatedRequest_t *)cm->cm_req;
1769	bzero(req, sizeof(*req));
1770	req->DevHandle = htole16(targ->handle);
1771	req->Function = MPI2_FUNCTION_NVME_ENCAPSULATED;
1772	req->Flags = MPI26_NVME_FLAGS_WRITE;
1773	req->ErrorResponseBaseAddress.High =
1774	    htole32((uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32));
1775	req->ErrorResponseBaseAddress.Low =
1776	    htole32(cm->cm_sense_busaddr);
1777	req->ErrorResponseAllocationLength =
1778	    htole16(sizeof(struct nvme_completion));
1779	req->EncapsulatedCommandLength =
1780	    htole16(sizeof(struct nvme_command));
1781	req->DataLength = htole32(data_length);
1782
1783	/* Build NVMe DSM command */
1784	c = (struct nvme_command *) req->NVMe_Command;
1785	c->opc = NVME_OPC_DATASET_MANAGEMENT;
1786	c->nsid = htole32(csio->ccb_h.target_lun + 1);
1787	c->cdw10 = htole32(ndesc - 1);
1788	c->cdw11 = htole32(NVME_DSM_ATTR_DEALLOCATE);
1789
1790	cm->cm_length = data_length;
1791	cm->cm_data = NULL;
1792
1793	cm->cm_complete = mprsas_scsiio_complete;
1794	cm->cm_complete_data = ccb;
1795	cm->cm_targ = targ;
1796	cm->cm_lun = csio->ccb_h.target_lun;
1797	cm->cm_ccb = ccb;
1798
1799	cm->cm_desc.Default.RequestFlags =
1800	    MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
1801
1802	csio->ccb_h.qos.sim_data = sbinuptime();
1803	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1804	    mprsas_scsiio_timeout, cm, 0);
1805
1806	targ->issued++;
1807	targ->outstanding++;
1808	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1809	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1810
1811	mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
1812	    __func__, cm, ccb, targ->outstanding);
1813
1814	mpr_build_nvme_prp(sc, cm, req,
1815	    (void *)(uintptr_t)nvme_dsm_ranges_dma_handle, 0, data_length);
1816	mpr_map_command(sc, cm);
1817
1818out:
1819	free(plist, M_MPR);
1820	return 0;
1821}
1822
1823static void
1824mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb)
1825{
1826	MPI2_SCSI_IO_REQUEST *req;
1827	struct ccb_scsiio *csio;
1828	struct mpr_softc *sc;
1829	struct mprsas_target *targ;
1830	struct mprsas_lun *lun;
1831	struct mpr_command *cm;
1832	uint8_t i, lba_byte, *ref_tag_addr, scsi_opcode;
1833	uint16_t eedp_flags;
1834	uint32_t mpi_control;
1835	int rc;
1836
1837	sc = sassc->sc;
1838	MPR_FUNCTRACE(sc);
1839	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1840
1841	csio = &ccb->csio;
1842	KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1843	    ("Target %d out of bounds in XPT_SCSI_IO\n",
1844	     csio->ccb_h.target_id));
1845	targ = &sassc->targets[csio->ccb_h.target_id];
1846	mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1847	if (targ->handle == 0x0) {
1848		mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n",
1849		    __func__, csio->ccb_h.target_id);
1850		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1851		xpt_done(ccb);
1852		return;
1853	}
1854	if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) {
1855		mpr_dprint(sc, MPR_ERROR, "%s Raid component no SCSI IO "
1856		    "supported %u\n", __func__, csio->ccb_h.target_id);
1857		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1858		xpt_done(ccb);
1859		return;
1860	}
1861	/*
1862	 * Sometimes, it is possible to get a command that is not "In
1863	 * Progress" and was actually aborted by the upper layer.  Check for
1864	 * this here and complete the command without error.
1865	 */
1866	if (mprsas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1867		mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for "
1868		    "target %u\n", __func__, csio->ccb_h.target_id);
1869		xpt_done(ccb);
1870		return;
1871	}
1872	/*
1873	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1874	 * that the volume has timed out.  We want volumes to be enumerated
1875	 * until they are deleted/removed, not just failed. In either event,
1876	 * we're removing the target due to a firmware event telling us
1877	 * the device is now gone (as opposed to some transient event). Since
1878	 * we're opting to remove failed devices from the OS's view, we need
1879	 * to propagate that status up the stack.
1880	 */
1881	if (targ->flags & MPRSAS_TARGET_INREMOVAL) {
1882		if (targ->devinfo == 0)
1883			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1884		else
1885			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1886		xpt_done(ccb);
1887		return;
1888	}
1889
1890	if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) {
1891		mpr_dprint(sc, MPR_INFO, "%s shutting down\n", __func__);
1892		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1893		xpt_done(ccb);
1894		return;
1895	}
1896
1897	/*
1898	 * If target has a reset in progress, freeze the devq and return.  The
1899	 * devq will be released when the TM reset is finished.
1900	 */
1901	if (targ->flags & MPRSAS_TARGET_INRESET) {
1902		ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1903		mpr_dprint(sc, MPR_INFO, "%s: Freezing devq for target ID %d\n",
1904		    __func__, targ->tid);
1905		xpt_freeze_devq(ccb->ccb_h.path, 1);
1906		xpt_done(ccb);
1907		return;
1908	}
1909
1910	cm = mpr_alloc_command(sc);
1911	if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) {
1912		if (cm != NULL) {
1913			mpr_free_command(sc, cm);
1914		}
1915		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
1916			xpt_freeze_simq(sassc->sim, 1);
1917			sassc->flags |= MPRSAS_QUEUE_FROZEN;
1918		}
1919		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1920		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1921		xpt_done(ccb);
1922		return;
1923	}
1924
1925	/* For NVME device's issue UNMAP command directly to NVME drives by
1926	 * constructing equivalent native NVMe DataSetManagement command.
1927	 */
1928	scsi_opcode = scsiio_cdb_ptr(csio)[0];
1929	if (scsi_opcode == UNMAP &&
1930	    targ->is_nvme &&
1931	    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
1932		rc = mprsas_build_nvme_unmap(sc, cm, ccb, targ);
1933		if (rc == 1) { /* return command to CAM with success status */
1934			mpr_free_command(sc, cm);
1935			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1936			xpt_done(ccb);
1937			return;
1938		} else if (!rc) /* Issued NVMe Encapsulated Request Message */
1939			return;
1940	}
1941
1942	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1943	bzero(req, sizeof(*req));
1944	req->DevHandle = htole16(targ->handle);
1945	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1946	req->MsgFlags = 0;
1947	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1948	req->SenseBufferLength = MPR_SENSE_LEN;
1949	req->SGLFlags = 0;
1950	req->ChainOffset = 0;
1951	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1952	req->SGLOffset1= 0;
1953	req->SGLOffset2= 0;
1954	req->SGLOffset3= 0;
1955	req->SkipCount = 0;
1956	req->DataLength = htole32(csio->dxfer_len);
1957	req->BidirectionalDataLength = 0;
1958	req->IoFlags = htole16(csio->cdb_len);
1959	req->EEDPFlags = 0;
1960
1961	/* Note: BiDirectional transfers are not supported */
1962	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1963	case CAM_DIR_IN:
1964		mpi_control = MPI2_SCSIIO_CONTROL_READ;
1965		cm->cm_flags |= MPR_CM_FLAGS_DATAIN;
1966		break;
1967	case CAM_DIR_OUT:
1968		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1969		cm->cm_flags |= MPR_CM_FLAGS_DATAOUT;
1970		break;
1971	case CAM_DIR_NONE:
1972	default:
1973		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1974		break;
1975	}
1976
1977	if (csio->cdb_len == 32)
1978		mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1979	/*
1980	 * It looks like the hardware doesn't require an explicit tag
1981	 * number for each transaction.  SAM Task Management not supported
1982	 * at the moment.
1983	 */
1984	switch (csio->tag_action) {
1985	case MSG_HEAD_OF_Q_TAG:
1986		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1987		break;
1988	case MSG_ORDERED_Q_TAG:
1989		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1990		break;
1991	case MSG_ACA_TASK:
1992		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1993		break;
1994	case CAM_TAG_ACTION_NONE:
1995	case MSG_SIMPLE_Q_TAG:
1996	default:
1997		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1998		break;
1999	}
2000	mpi_control |= (csio->priority << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT) &
2001	    MPI2_SCSIIO_CONTROL_CMDPRI_MASK;
2002	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
2003	req->Control = htole32(mpi_control);
2004
2005	if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
2006		mpr_free_command(sc, cm);
2007		mprsas_set_ccbstatus(ccb, CAM_LUN_INVALID);
2008		xpt_done(ccb);
2009		return;
2010	}
2011
2012	if (csio->ccb_h.flags & CAM_CDB_POINTER)
2013		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
2014	else {
2015		KASSERT(csio->cdb_len <= IOCDBLEN,
2016		    ("cdb_len %d is greater than IOCDBLEN but CAM_CDB_POINTER "
2017		    "is not set", csio->cdb_len));
2018		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
2019	}
2020	req->IoFlags = htole16(csio->cdb_len);
2021
2022	/*
2023	 * Check if EEDP is supported and enabled.  If it is then check if the
2024	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
2025	 * is formatted for EEDP support.  If all of this is true, set CDB up
2026	 * for EEDP transfer.
2027	 */
2028	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
2029	if (sc->eedp_enabled && eedp_flags) {
2030		SLIST_FOREACH(lun, &targ->luns, lun_link) {
2031			if (lun->lun_id == csio->ccb_h.target_lun) {
2032				break;
2033			}
2034		}
2035
2036		if ((lun != NULL) && (lun->eedp_formatted)) {
2037			req->EEDPBlockSize = htole32(lun->eedp_block_size);
2038			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2039			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
2040			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
2041			if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
2042				eedp_flags |=
2043				    MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
2044			}
2045			req->EEDPFlags = htole16(eedp_flags);
2046
2047			/*
2048			 * If CDB less than 32, fill in Primary Ref Tag with
2049			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
2050			 * already there.  Also, set protection bit.  FreeBSD
2051			 * currently does not support CDBs bigger than 16, but
2052			 * the code doesn't hurt, and will be here for the
2053			 * future.
2054			 */
2055			if (csio->cdb_len != 32) {
2056				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
2057				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
2058				    PrimaryReferenceTag;
2059				for (i = 0; i < 4; i++) {
2060					*ref_tag_addr =
2061					    req->CDB.CDB32[lba_byte + i];
2062					ref_tag_addr++;
2063				}
2064				req->CDB.EEDP32.PrimaryReferenceTag =
2065				    htole32(req->
2066				    CDB.EEDP32.PrimaryReferenceTag);
2067				req->CDB.EEDP32.PrimaryApplicationTagMask =
2068				    0xFFFF;
2069				req->CDB.CDB32[1] =
2070				    (req->CDB.CDB32[1] & 0x1F) | 0x20;
2071			} else {
2072				eedp_flags |=
2073				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
2074				req->EEDPFlags = htole16(eedp_flags);
2075				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
2076				    0x1F) | 0x20;
2077			}
2078		}
2079	}
2080
2081	cm->cm_length = csio->dxfer_len;
2082	if (cm->cm_length != 0) {
2083		cm->cm_data = ccb;
2084		cm->cm_flags |= MPR_CM_FLAGS_USE_CCB;
2085	} else {
2086		cm->cm_data = NULL;
2087	}
2088	cm->cm_sge = &req->SGL;
2089	cm->cm_sglsize = (32 - 24) * 4;
2090	cm->cm_complete = mprsas_scsiio_complete;
2091	cm->cm_complete_data = ccb;
2092	cm->cm_targ = targ;
2093	cm->cm_lun = csio->ccb_h.target_lun;
2094	cm->cm_ccb = ccb;
2095	/*
2096	 * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0)
2097	 * and set descriptor type.
2098	 */
2099	if (targ->scsi_req_desc_type ==
2100	    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) {
2101		req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
2102		cm->cm_desc.FastPathSCSIIO.RequestFlags =
2103		    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
2104		if (!sc->atomic_desc_capable) {
2105			cm->cm_desc.FastPathSCSIIO.DevHandle =
2106			    htole16(targ->handle);
2107		}
2108	} else {
2109		cm->cm_desc.SCSIIO.RequestFlags =
2110		    MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
2111		if (!sc->atomic_desc_capable)
2112			cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
2113	}
2114
2115	csio->ccb_h.qos.sim_data = sbinuptime();
2116	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
2117	    mprsas_scsiio_timeout, cm, 0);
2118
2119	targ->issued++;
2120	targ->outstanding++;
2121	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
2122	ccb->ccb_h.status |= CAM_SIM_QUEUED;
2123
2124	mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
2125	    __func__, cm, ccb, targ->outstanding);
2126
2127	mpr_map_command(sc, cm);
2128	return;
2129}
2130
2131/**
2132 * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request
2133 */
2134static void
2135mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio,
2136    Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ)
2137{
2138	u32 response_info;
2139	u8 *response_bytes;
2140	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
2141	    MPI2_IOCSTATUS_MASK;
2142	u8 scsi_state = mpi_reply->SCSIState;
2143	u8 scsi_status = mpi_reply->SCSIStatus;
2144	char *desc_ioc_state = NULL;
2145	char *desc_scsi_status = NULL;
2146	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
2147
2148	if (log_info == 0x31170000)
2149		return;
2150
2151	desc_ioc_state = mpr_describe_table(mpr_iocstatus_string,
2152	     ioc_status);
2153	desc_scsi_status = mpr_describe_table(mpr_scsi_status_string,
2154	    scsi_status);
2155
2156	mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2157	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2158	if (targ->encl_level_valid) {
2159		mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
2160		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
2161		    targ->connector_name);
2162	}
2163
2164	/*
2165	 * We can add more detail about underflow data here
2166	 * TO-DO
2167	 */
2168	mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), "
2169	    "scsi_state %b\n", desc_scsi_status, scsi_status,
2170	    scsi_state, "\20" "\1AutosenseValid" "\2AutosenseFailed"
2171	    "\3NoScsiStatus" "\4Terminated" "\5Response InfoValid");
2172
2173	if (sc->mpr_debug & MPR_XINFO &&
2174	    scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2175		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n");
2176		scsi_sense_print(csio);
2177		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n");
2178	}
2179
2180	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2181		response_info = le32toh(mpi_reply->ResponseInfo);
2182		response_bytes = (u8 *)&response_info;
2183		mpr_dprint(sc, MPR_XINFO, "response code(0x%01x): %s\n",
2184		    response_bytes[0],
2185		    mpr_describe_table(mpr_scsi_taskmgmt_string,
2186		    response_bytes[0]));
2187	}
2188}
2189
2190/** mprsas_nvme_trans_status_code
2191 *
2192 * Convert Native NVMe command error status to
2193 * equivalent SCSI error status.
2194 *
2195 * Returns appropriate scsi_status
2196 */
2197static u8
2198mprsas_nvme_trans_status_code(uint16_t nvme_status,
2199    struct mpr_command *cm)
2200{
2201	u8 status = MPI2_SCSI_STATUS_GOOD;
2202	int skey, asc, ascq;
2203	union ccb *ccb = cm->cm_complete_data;
2204	int returned_sense_len;
2205	uint8_t sct, sc;
2206
2207	sct = NVME_STATUS_GET_SCT(nvme_status);
2208	sc = NVME_STATUS_GET_SC(nvme_status);
2209
2210	status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2211	skey = SSD_KEY_ILLEGAL_REQUEST;
2212	asc = SCSI_ASC_NO_SENSE;
2213	ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2214
2215	switch (sct) {
2216	case NVME_SCT_GENERIC:
2217		switch (sc) {
2218		case NVME_SC_SUCCESS:
2219			status = MPI2_SCSI_STATUS_GOOD;
2220			skey = SSD_KEY_NO_SENSE;
2221			asc = SCSI_ASC_NO_SENSE;
2222			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2223			break;
2224		case NVME_SC_INVALID_OPCODE:
2225			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2226			skey = SSD_KEY_ILLEGAL_REQUEST;
2227			asc = SCSI_ASC_ILLEGAL_COMMAND;
2228			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2229			break;
2230		case NVME_SC_INVALID_FIELD:
2231			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2232			skey = SSD_KEY_ILLEGAL_REQUEST;
2233			asc = SCSI_ASC_INVALID_CDB;
2234			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2235			break;
2236		case NVME_SC_DATA_TRANSFER_ERROR:
2237			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2238			skey = SSD_KEY_MEDIUM_ERROR;
2239			asc = SCSI_ASC_NO_SENSE;
2240			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2241			break;
2242		case NVME_SC_ABORTED_POWER_LOSS:
2243			status = MPI2_SCSI_STATUS_TASK_ABORTED;
2244			skey = SSD_KEY_ABORTED_COMMAND;
2245			asc = SCSI_ASC_WARNING;
2246			ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED;
2247			break;
2248		case NVME_SC_INTERNAL_DEVICE_ERROR:
2249			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2250			skey = SSD_KEY_HARDWARE_ERROR;
2251			asc = SCSI_ASC_INTERNAL_TARGET_FAILURE;
2252			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2253			break;
2254		case NVME_SC_ABORTED_BY_REQUEST:
2255		case NVME_SC_ABORTED_SQ_DELETION:
2256		case NVME_SC_ABORTED_FAILED_FUSED:
2257		case NVME_SC_ABORTED_MISSING_FUSED:
2258			status = MPI2_SCSI_STATUS_TASK_ABORTED;
2259			skey = SSD_KEY_ABORTED_COMMAND;
2260			asc = SCSI_ASC_NO_SENSE;
2261			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2262			break;
2263		case NVME_SC_INVALID_NAMESPACE_OR_FORMAT:
2264			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2265			skey = SSD_KEY_ILLEGAL_REQUEST;
2266			asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2267			ascq = SCSI_ASCQ_INVALID_LUN_ID;
2268			break;
2269		case NVME_SC_LBA_OUT_OF_RANGE:
2270			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2271			skey = SSD_KEY_ILLEGAL_REQUEST;
2272			asc = SCSI_ASC_ILLEGAL_BLOCK;
2273			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2274			break;
2275		case NVME_SC_CAPACITY_EXCEEDED:
2276			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2277			skey = SSD_KEY_MEDIUM_ERROR;
2278			asc = SCSI_ASC_NO_SENSE;
2279			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2280			break;
2281		case NVME_SC_NAMESPACE_NOT_READY:
2282			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2283			skey = SSD_KEY_NOT_READY;
2284			asc = SCSI_ASC_LUN_NOT_READY;
2285			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2286			break;
2287		}
2288		break;
2289	case NVME_SCT_COMMAND_SPECIFIC:
2290		switch (sc) {
2291		case NVME_SC_INVALID_FORMAT:
2292			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2293			skey = SSD_KEY_ILLEGAL_REQUEST;
2294			asc = SCSI_ASC_FORMAT_COMMAND_FAILED;
2295			ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED;
2296			break;
2297		case NVME_SC_CONFLICTING_ATTRIBUTES:
2298			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2299			skey = SSD_KEY_ILLEGAL_REQUEST;
2300			asc = SCSI_ASC_INVALID_CDB;
2301			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2302			break;
2303		}
2304		break;
2305	case NVME_SCT_MEDIA_ERROR:
2306		switch (sc) {
2307		case NVME_SC_WRITE_FAULTS:
2308			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2309			skey = SSD_KEY_MEDIUM_ERROR;
2310			asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT;
2311			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2312			break;
2313		case NVME_SC_UNRECOVERED_READ_ERROR:
2314			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2315			skey = SSD_KEY_MEDIUM_ERROR;
2316			asc = SCSI_ASC_UNRECOVERED_READ_ERROR;
2317			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2318			break;
2319		case NVME_SC_GUARD_CHECK_ERROR:
2320			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2321			skey = SSD_KEY_MEDIUM_ERROR;
2322			asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED;
2323			ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED;
2324			break;
2325		case NVME_SC_APPLICATION_TAG_CHECK_ERROR:
2326			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2327			skey = SSD_KEY_MEDIUM_ERROR;
2328			asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED;
2329			ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED;
2330			break;
2331		case NVME_SC_REFERENCE_TAG_CHECK_ERROR:
2332			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2333			skey = SSD_KEY_MEDIUM_ERROR;
2334			asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED;
2335			ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED;
2336			break;
2337		case NVME_SC_COMPARE_FAILURE:
2338			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2339			skey = SSD_KEY_MISCOMPARE;
2340			asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY;
2341			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2342			break;
2343		case NVME_SC_ACCESS_DENIED:
2344			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2345			skey = SSD_KEY_ILLEGAL_REQUEST;
2346			asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2347			ascq = SCSI_ASCQ_INVALID_LUN_ID;
2348			break;
2349		}
2350		break;
2351	}
2352
2353	returned_sense_len = sizeof(struct scsi_sense_data);
2354	if (returned_sense_len < ccb->csio.sense_len)
2355		ccb->csio.sense_resid = ccb->csio.sense_len -
2356		    returned_sense_len;
2357	else
2358		ccb->csio.sense_resid = 0;
2359
2360	scsi_set_sense_data(&ccb->csio.sense_data, SSD_TYPE_FIXED,
2361	    1, skey, asc, ascq, SSD_ELEM_NONE);
2362	ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2363
2364	return status;
2365}
2366
2367/** mprsas_complete_nvme_unmap
2368 *
2369 * Complete native NVMe command issued using NVMe Encapsulated
2370 * Request Message.
2371 */
2372static u8
2373mprsas_complete_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm)
2374{
2375	Mpi26NVMeEncapsulatedErrorReply_t *mpi_reply;
2376	struct nvme_completion *nvme_completion = NULL;
2377	u8 scsi_status = MPI2_SCSI_STATUS_GOOD;
2378
2379	mpi_reply =(Mpi26NVMeEncapsulatedErrorReply_t *)cm->cm_reply;
2380	if (le16toh(mpi_reply->ErrorResponseCount)){
2381		nvme_completion = (struct nvme_completion *)cm->cm_sense;
2382		scsi_status = mprsas_nvme_trans_status_code(
2383		    nvme_completion->status, cm);
2384	}
2385	return scsi_status;
2386}
2387
2388static void
2389mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2390{
2391	MPI2_SCSI_IO_REPLY *rep;
2392	union ccb *ccb;
2393	struct ccb_scsiio *csio;
2394	struct mprsas_softc *sassc;
2395	struct scsi_vpd_supported_page_list *vpd_list = NULL;
2396	u8 *TLR_bits, TLR_on, *scsi_cdb;
2397	int dir = 0, i;
2398	u16 alloc_len;
2399	struct mprsas_target *target;
2400	target_id_t target_id;
2401
2402	MPR_FUNCTRACE(sc);
2403
2404	callout_stop(&cm->cm_callout);
2405	mtx_assert(&sc->mpr_mtx, MA_OWNED);
2406
2407	sassc = sc->sassc;
2408	ccb = cm->cm_complete_data;
2409	csio = &ccb->csio;
2410	target_id = csio->ccb_h.target_id;
2411	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2412	mpr_dprint(sc, MPR_TRACE,
2413	    "cm %p SMID %u ccb %p reply %p outstanding %u csio->scsi_status 0x%x,"
2414	    "csio->dxfer_len 0x%x, csio->msg_le 0x%xn\n", cm,
2415	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2416	    cm->cm_targ->outstanding, csio->scsi_status,
2417	    csio->dxfer_len, csio->msg_len);
2418	/*
2419	 * XXX KDM if the chain allocation fails, does it matter if we do
2420	 * the sync and unload here?  It is simpler to do it in every case,
2421	 * assuming it doesn't cause problems.
2422	 */
2423	if (cm->cm_data != NULL) {
2424		if (cm->cm_flags & MPR_CM_FLAGS_DATAIN)
2425			dir = BUS_DMASYNC_POSTREAD;
2426		else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT)
2427			dir = BUS_DMASYNC_POSTWRITE;
2428		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2429		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2430	}
2431
2432	cm->cm_targ->completed++;
2433	cm->cm_targ->outstanding--;
2434	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2435	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2436
2437	if (cm->cm_flags & MPR_CM_FLAGS_ON_RECOVERY) {
2438		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2439		KASSERT(cm->cm_state == MPR_CM_STATE_BUSY,
2440		    ("Not busy for CM_FLAGS_TIMEDOUT: %d\n", cm->cm_state));
2441		cm->cm_flags &= ~MPR_CM_FLAGS_ON_RECOVERY;
2442		if (cm->cm_reply != NULL)
2443			mprsas_log_command(cm, MPR_RECOVERY,
2444			    "completed timedout cm %p ccb %p during recovery "
2445			    "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb,
2446			    le16toh(rep->IOCStatus), rep->SCSIStatus,
2447			    rep->SCSIState, le32toh(rep->TransferCount));
2448		else
2449			mprsas_log_command(cm, MPR_RECOVERY,
2450			    "completed timedout cm %p ccb %p during recovery\n",
2451			    cm, cm->cm_ccb);
2452	} else if (cm->cm_targ->tm != NULL) {
2453		if (cm->cm_reply != NULL)
2454			mprsas_log_command(cm, MPR_RECOVERY,
2455			    "completed cm %p ccb %p during recovery "
2456			    "ioc %x scsi %x state %x xfer %u\n",
2457			    cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2458			    rep->SCSIStatus, rep->SCSIState,
2459			    le32toh(rep->TransferCount));
2460		else
2461			mprsas_log_command(cm, MPR_RECOVERY,
2462			    "completed cm %p ccb %p during recovery\n",
2463			    cm, cm->cm_ccb);
2464	} else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
2465		mprsas_log_command(cm, MPR_RECOVERY,
2466		    "reset completed cm %p ccb %p\n", cm, cm->cm_ccb);
2467	}
2468
2469	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2470		/*
2471		 * We ran into an error after we tried to map the command,
2472		 * so we're getting a callback without queueing the command
2473		 * to the hardware.  So we set the status here, and it will
2474		 * be retained below.  We'll go through the "fast path",
2475		 * because there can be no reply when we haven't actually
2476		 * gone out to the hardware.
2477		 */
2478		mprsas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2479
2480		/*
2481		 * Currently the only error included in the mask is
2482		 * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of
2483		 * chain frames.  We need to freeze the queue until we get
2484		 * a command that completed without this error, which will
2485		 * hopefully have some chain frames attached that we can
2486		 * use.  If we wanted to get smarter about it, we would
2487		 * only unfreeze the queue in this condition when we're
2488		 * sure that we're getting some chain frames back.  That's
2489		 * probably unnecessary.
2490		 */
2491		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2492			xpt_freeze_simq(sassc->sim, 1);
2493			sassc->flags |= MPRSAS_QUEUE_FROZEN;
2494			mpr_dprint(sc, MPR_XINFO, "Error sending command, "
2495			    "freezing SIM queue\n");
2496		}
2497	}
2498
2499	/*
2500	 * Point to the SCSI CDB, which is dependent on the CAM_CDB_POINTER
2501	 * flag, and use it in a few places in the rest of this function for
2502	 * convenience. Use the macro if available.
2503	 */
2504	scsi_cdb = scsiio_cdb_ptr(csio);
2505
2506	/*
2507	 * If this is a Start Stop Unit command and it was issued by the driver
2508	 * during shutdown, decrement the refcount to account for all of the
2509	 * commands that were sent.  All SSU commands should be completed before
2510	 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2511	 * is TRUE.
2512	 */
2513	if (sc->SSU_started && (scsi_cdb[0] == START_STOP_UNIT)) {
2514		mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n");
2515		sc->SSU_refcount--;
2516	}
2517
2518	/* Take the fast path to completion */
2519	if (cm->cm_reply == NULL) {
2520		if (mprsas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2521			if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0)
2522				mprsas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2523			else {
2524				mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2525				csio->scsi_status = SCSI_STATUS_OK;
2526			}
2527			if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2528				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2529				sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2530				mpr_dprint(sc, MPR_XINFO,
2531				    "Unfreezing SIM queue\n");
2532			}
2533		}
2534
2535		/*
2536		 * There are two scenarios where the status won't be
2537		 * CAM_REQ_CMP.  The first is if MPR_CM_FLAGS_ERROR_MASK is
2538		 * set, the second is in the MPR_FLAGS_DIAGRESET above.
2539		 */
2540		if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2541			/*
2542			 * Freeze the dev queue so that commands are
2543			 * executed in the correct order after error
2544			 * recovery.
2545			 */
2546			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2547			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2548		}
2549		mpr_free_command(sc, cm);
2550		xpt_done(ccb);
2551		return;
2552	}
2553
2554	target = &sassc->targets[target_id];
2555	if (scsi_cdb[0] == UNMAP &&
2556	    target->is_nvme &&
2557	    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
2558		rep->SCSIStatus = mprsas_complete_nvme_unmap(sc, cm);
2559		csio->scsi_status = rep->SCSIStatus;
2560	}
2561
2562	mprsas_log_command(cm, MPR_XINFO,
2563	    "ioc %x scsi %x state %x xfer %u\n",
2564	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2565	    le32toh(rep->TransferCount));
2566
2567	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2568	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2569		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2570		/* FALLTHROUGH */
2571	case MPI2_IOCSTATUS_SUCCESS:
2572	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2573		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2574		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2575			mprsas_log_command(cm, MPR_XINFO, "recovered error\n");
2576
2577		/* Completion failed at the transport level. */
2578		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2579		    MPI2_SCSI_STATE_TERMINATED)) {
2580			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2581			break;
2582		}
2583
2584		/* In a modern packetized environment, an autosense failure
2585		 * implies that there's not much else that can be done to
2586		 * recover the command.
2587		 */
2588		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2589			mprsas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2590			break;
2591		}
2592
2593		/*
2594		 * CAM doesn't care about SAS Response Info data, but if this is
2595		 * the state check if TLR should be done.  If not, clear the
2596		 * TLR_bits for the target.
2597		 */
2598		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2599		    ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE)
2600		    == MPR_SCSI_RI_INVALID_FRAME)) {
2601			sc->mapping_table[target_id].TLR_bits =
2602			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2603		}
2604
2605		/*
2606		 * Intentionally override the normal SCSI status reporting
2607		 * for these two cases.  These are likely to happen in a
2608		 * multi-initiator environment, and we want to make sure that
2609		 * CAM retries these commands rather than fail them.
2610		 */
2611		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2612		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2613			mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2614			break;
2615		}
2616
2617		/* Handle normal status and sense */
2618		csio->scsi_status = rep->SCSIStatus;
2619		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2620			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2621		else
2622			mprsas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2623
2624		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2625			int sense_len, returned_sense_len;
2626
2627			returned_sense_len = min(le32toh(rep->SenseCount),
2628			    sizeof(struct scsi_sense_data));
2629			if (returned_sense_len < csio->sense_len)
2630				csio->sense_resid = csio->sense_len -
2631				    returned_sense_len;
2632			else
2633				csio->sense_resid = 0;
2634
2635			sense_len = min(returned_sense_len,
2636			    csio->sense_len - csio->sense_resid);
2637			bzero(&csio->sense_data, sizeof(csio->sense_data));
2638			bcopy(cm->cm_sense, &csio->sense_data, sense_len);
2639			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2640		}
2641
2642		/*
2643		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2644		 * and it's page code 0 (Supported Page List), and there is
2645		 * inquiry data, and this is for a sequential access device, and
2646		 * the device is an SSP target, and TLR is supported by the
2647		 * controller, turn the TLR_bits value ON if page 0x90 is
2648		 * supported.
2649		 */
2650		if ((scsi_cdb[0] == INQUIRY) &&
2651		    (scsi_cdb[1] & SI_EVPD) &&
2652		    (scsi_cdb[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2653		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2654		    (csio->data_ptr != NULL) &&
2655		    ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2656		    (sc->control_TLR) &&
2657		    (sc->mapping_table[target_id].device_info &
2658		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2659			vpd_list = (struct scsi_vpd_supported_page_list *)
2660			    csio->data_ptr;
2661			TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2662			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2663			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2664			alloc_len = ((u16)scsi_cdb[3] << 8) + scsi_cdb[4];
2665			alloc_len -= csio->resid;
2666			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2667				if (vpd_list->list[i] == 0x90) {
2668					*TLR_bits = TLR_on;
2669					break;
2670				}
2671			}
2672		}
2673
2674		/*
2675		 * If this is a SATA direct-access end device, mark it so that
2676		 * a SCSI StartStopUnit command will be sent to it when the
2677		 * driver is being shutdown.
2678		 */
2679		if ((scsi_cdb[0] == INQUIRY) &&
2680		    (csio->data_ptr != NULL) &&
2681		    ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2682		    (sc->mapping_table[target_id].device_info &
2683		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2684		    ((sc->mapping_table[target_id].device_info &
2685		    MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2686		    MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2687			target = &sassc->targets[target_id];
2688			target->supports_SSU = TRUE;
2689			mpr_dprint(sc, MPR_XINFO, "Target %d supports SSU\n",
2690			    target_id);
2691		}
2692		break;
2693	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2694	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2695		/*
2696		 * If devinfo is 0 this will be a volume.  In that case don't
2697		 * tell CAM that the volume is not there.  We want volumes to
2698		 * be enumerated until they are deleted/removed, not just
2699		 * failed.
2700		 */
2701		if (cm->cm_targ->devinfo == 0)
2702			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2703		else
2704			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2705		break;
2706	case MPI2_IOCSTATUS_INVALID_SGL:
2707		mpr_print_scsiio_cmd(sc, cm);
2708		mprsas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2709		break;
2710	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2711		/*
2712		 * This is one of the responses that comes back when an I/O
2713		 * has been aborted.  If it is because of a timeout that we
2714		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2715		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2716		 * command is the same (it gets retried, subject to the
2717		 * retry counter), the only difference is what gets printed
2718		 * on the console.
2719		 */
2720		if (cm->cm_flags & MPR_CM_FLAGS_TIMEDOUT)
2721			mprsas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2722		else
2723			mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2724		break;
2725	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2726		/* resid is ignored for this condition */
2727		csio->resid = 0;
2728		mprsas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2729		break;
2730	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2731	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2732		/*
2733		 * These can sometimes be transient transport-related
2734		 * errors, and sometimes persistent drive-related errors.
2735		 * We used to retry these without decrementing the retry
2736		 * count by returning CAM_REQUEUE_REQ.  Unfortunately, if
2737		 * we hit a persistent drive problem that returns one of
2738		 * these error codes, we would retry indefinitely.  So,
2739		 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2740		 * count and avoid infinite retries.  We're taking the
2741		 * potential risk of flagging false failures in the event
2742		 * of a topology-related error (e.g. a SAS expander problem
2743		 * causes a command addressed to a drive to fail), but
2744		 * avoiding getting into an infinite retry loop. However,
2745		 * if we get them while were moving a device, we should
2746		 * fail the request as 'not there' because the device
2747		 * is effectively gone.
2748		 */
2749		if (cm->cm_targ->flags & MPRSAS_TARGET_INREMOVAL)
2750			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2751		else
2752			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2753		mpr_dprint(sc, MPR_INFO,
2754		    "Controller reported %s tgt %u SMID %u loginfo %x%s\n",
2755		    mpr_describe_table(mpr_iocstatus_string,
2756		    le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK),
2757		    target_id, cm->cm_desc.Default.SMID,
2758		    le32toh(rep->IOCLogInfo),
2759		    (cm->cm_targ->flags & MPRSAS_TARGET_INREMOVAL) ? " departing" : "");
2760		mpr_dprint(sc, MPR_XINFO,
2761		    "SCSIStatus %x SCSIState %x xfercount %u\n",
2762		    rep->SCSIStatus, rep->SCSIState,
2763		    le32toh(rep->TransferCount));
2764		break;
2765	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2766	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2767	case MPI2_IOCSTATUS_INVALID_VPID:
2768	case MPI2_IOCSTATUS_INVALID_FIELD:
2769	case MPI2_IOCSTATUS_INVALID_STATE:
2770	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2771	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2772	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2773	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2774	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2775	default:
2776		mprsas_log_command(cm, MPR_XINFO,
2777		    "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2778		    le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2779		    rep->SCSIStatus, rep->SCSIState,
2780		    le32toh(rep->TransferCount));
2781		csio->resid = cm->cm_length;
2782
2783		if (scsi_cdb[0] == UNMAP &&
2784		    target->is_nvme &&
2785		    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR)
2786			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2787		else
2788			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2789
2790		break;
2791	}
2792
2793	mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ);
2794
2795	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2796		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2797		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2798		mpr_dprint(sc, MPR_XINFO, "Command completed, unfreezing SIM "
2799		    "queue\n");
2800	}
2801
2802	if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2803		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2804		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2805	}
2806
2807	/*
2808	 * Check to see if we're removing the device. If so, and this is the
2809	 * last command on the queue, proceed with the deferred removal of the
2810	 * device.  Note, for removing a volume, this won't trigger because
2811	 * pending_remove_tm will be NULL.
2812	 */
2813	if (cm->cm_targ->flags & MPRSAS_TARGET_INREMOVAL) {
2814		if (TAILQ_FIRST(&cm->cm_targ->commands) == NULL &&
2815		    cm->cm_targ->pending_remove_tm != NULL) {
2816			mpr_dprint(sc, MPR_INFO, "Last pending command complete: starting remove_device\n");
2817			mpr_map_command(sc, cm->cm_targ->pending_remove_tm);
2818			cm->cm_targ->pending_remove_tm = NULL;
2819		}
2820	}
2821
2822	mpr_free_command(sc, cm);
2823	xpt_done(ccb);
2824}
2825
2826static void
2827mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2828{
2829	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2830	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2831	uint64_t sasaddr;
2832	union ccb *ccb;
2833
2834	ccb = cm->cm_complete_data;
2835
2836	/*
2837	 * Currently there should be no way we can hit this case.  It only
2838	 * happens when we have a failure to allocate chain frames, and SMP
2839	 * commands require two S/G elements only.  That should be handled
2840	 * in the standard request size.
2841	 */
2842	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2843		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x on SMP "
2844		    "request!\n", __func__, cm->cm_flags);
2845		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2846		goto bailout;
2847        }
2848
2849	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2850	if (rpl == NULL) {
2851		mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__);
2852		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2853		goto bailout;
2854	}
2855
2856	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2857	sasaddr = le32toh(req->SASAddress.Low);
2858	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2859
2860	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2861	    MPI2_IOCSTATUS_SUCCESS ||
2862	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2863		mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2864		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2865		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2866		goto bailout;
2867	}
2868
2869	mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address %#jx "
2870	    "completed successfully\n", __func__, (uintmax_t)sasaddr);
2871
2872	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2873		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2874	else
2875		mprsas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2876
2877bailout:
2878	/*
2879	 * We sync in both directions because we had DMAs in the S/G list
2880	 * in both directions.
2881	 */
2882	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2883			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2884	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2885	mpr_free_command(sc, cm);
2886	xpt_done(ccb);
2887}
2888
2889static void
2890mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2891{
2892	struct mpr_command *cm;
2893	uint8_t *request, *response;
2894	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2895	struct mpr_softc *sc;
2896	struct sglist *sg;
2897	int error;
2898
2899	sc = sassc->sc;
2900	sg = NULL;
2901	error = 0;
2902
2903	switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
2904	case CAM_DATA_PADDR:
2905	case CAM_DATA_SG_PADDR:
2906		/*
2907		 * XXX We don't yet support physical addresses here.
2908		 */
2909		mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
2910		    "supported\n", __func__);
2911		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2912		xpt_done(ccb);
2913		return;
2914	case CAM_DATA_SG:
2915		/*
2916		 * The chip does not support more than one buffer for the
2917		 * request or response.
2918		 */
2919		if ((ccb->smpio.smp_request_sglist_cnt > 1)
2920		    || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2921			mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
2922			    "response buffer segments not supported for SMP\n",
2923			    __func__);
2924			mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2925			xpt_done(ccb);
2926			return;
2927		}
2928
2929		/*
2930		 * The CAM_SCATTER_VALID flag was originally implemented
2931		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2932		 * We have two.  So, just take that flag to mean that we
2933		 * might have S/G lists, and look at the S/G segment count
2934		 * to figure out whether that is the case for each individual
2935		 * buffer.
2936		 */
2937		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2938			bus_dma_segment_t *req_sg;
2939
2940			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2941			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2942		} else
2943			request = ccb->smpio.smp_request;
2944
2945		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2946			bus_dma_segment_t *rsp_sg;
2947
2948			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2949			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2950		} else
2951			response = ccb->smpio.smp_response;
2952		break;
2953	case CAM_DATA_VADDR:
2954		request = ccb->smpio.smp_request;
2955		response = ccb->smpio.smp_response;
2956		break;
2957	default:
2958		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2959		xpt_done(ccb);
2960		return;
2961	}
2962
2963	cm = mpr_alloc_command(sc);
2964	if (cm == NULL) {
2965		mpr_dprint(sc, MPR_ERROR, "%s: cannot allocate command\n",
2966		    __func__);
2967		mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2968		xpt_done(ccb);
2969		return;
2970	}
2971
2972	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2973	bzero(req, sizeof(*req));
2974	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2975
2976	/* Allow the chip to use any route to this SAS address. */
2977	req->PhysicalPort = 0xff;
2978
2979	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2980	req->SGLFlags =
2981	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2982
2983	mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address "
2984	    "%#jx\n", __func__, (uintmax_t)sasaddr);
2985
2986	mpr_init_sge(cm, req, &req->SGL);
2987
2988	/*
2989	 * Set up a uio to pass into mpr_map_command().  This allows us to
2990	 * do one map command, and one busdma call in there.
2991	 */
2992	cm->cm_uio.uio_iov = cm->cm_iovec;
2993	cm->cm_uio.uio_iovcnt = 2;
2994	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2995
2996	/*
2997	 * The read/write flag isn't used by busdma, but set it just in
2998	 * case.  This isn't exactly accurate, either, since we're going in
2999	 * both directions.
3000	 */
3001	cm->cm_uio.uio_rw = UIO_WRITE;
3002
3003	cm->cm_iovec[0].iov_base = request;
3004	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
3005	cm->cm_iovec[1].iov_base = response;
3006	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
3007
3008	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
3009			       cm->cm_iovec[1].iov_len;
3010
3011	/*
3012	 * Trigger a warning message in mpr_data_cb() for the user if we
3013	 * wind up exceeding two S/G segments.  The chip expects one
3014	 * segment for the request and another for the response.
3015	 */
3016	cm->cm_max_segs = 2;
3017
3018	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3019	cm->cm_complete = mprsas_smpio_complete;
3020	cm->cm_complete_data = ccb;
3021
3022	/*
3023	 * Tell the mapping code that we're using a uio, and that this is
3024	 * an SMP passthrough request.  There is a little special-case
3025	 * logic there (in mpr_data_cb()) to handle the bidirectional
3026	 * transfer.
3027	 */
3028	cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS |
3029			MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT;
3030
3031	/* The chip data format is little endian. */
3032	req->SASAddress.High = htole32(sasaddr >> 32);
3033	req->SASAddress.Low = htole32(sasaddr);
3034
3035	/*
3036	 * XXX Note that we don't have a timeout/abort mechanism here.
3037	 * From the manual, it looks like task management requests only
3038	 * work for SCSI IO and SATA passthrough requests.  We may need to
3039	 * have a mechanism to retry requests in the event of a chip reset
3040	 * at least.  Hopefully the chip will insure that any errors short
3041	 * of that are relayed back to the driver.
3042	 */
3043	error = mpr_map_command(sc, cm);
3044	if ((error != 0) && (error != EINPROGRESS)) {
3045		mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from "
3046		    "mpr_map_command()\n", __func__, error);
3047		goto bailout_error;
3048	}
3049
3050	return;
3051
3052bailout_error:
3053	mpr_free_command(sc, cm);
3054	mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3055	xpt_done(ccb);
3056	return;
3057}
3058
3059static void
3060mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb)
3061{
3062	struct mpr_softc *sc;
3063	struct mprsas_target *targ;
3064	uint64_t sasaddr = 0;
3065
3066	sc = sassc->sc;
3067
3068	/*
3069	 * Make sure the target exists.
3070	 */
3071	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3072	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
3073	targ = &sassc->targets[ccb->ccb_h.target_id];
3074	if (targ->handle == 0x0) {
3075		mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n",
3076		    __func__, ccb->ccb_h.target_id);
3077		mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
3078		xpt_done(ccb);
3079		return;
3080	}
3081
3082	/*
3083	 * If this device has an embedded SMP target, we'll talk to it
3084	 * directly.
3085	 * figure out what the expander's address is.
3086	 */
3087	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
3088		sasaddr = targ->sasaddr;
3089
3090	/*
3091	 * If we don't have a SAS address for the expander yet, try
3092	 * grabbing it from the page 0x83 information cached in the
3093	 * transport layer for this target.  LSI expanders report the
3094	 * expander SAS address as the port-associated SAS address in
3095	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
3096	 * 0x83.
3097	 *
3098	 * XXX KDM disable this for now, but leave it commented out so that
3099	 * it is obvious that this is another possible way to get the SAS
3100	 * address.
3101	 *
3102	 * The parent handle method below is a little more reliable, and
3103	 * the other benefit is that it works for devices other than SES
3104	 * devices.  So you can send a SMP request to a da(4) device and it
3105	 * will get routed to the expander that device is attached to.
3106	 * (Assuming the da(4) device doesn't contain an SMP target...)
3107	 */
3108#if 0
3109	if (sasaddr == 0)
3110		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
3111#endif
3112
3113	/*
3114	 * If we still don't have a SAS address for the expander, look for
3115	 * the parent device of this device, which is probably the expander.
3116	 */
3117	if (sasaddr == 0) {
3118#ifdef OLD_MPR_PROBE
3119		struct mprsas_target *parent_target;
3120#endif
3121
3122		if (targ->parent_handle == 0x0) {
3123			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3124			    "a valid parent handle!\n", __func__, targ->handle);
3125			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3126			goto bailout;
3127		}
3128#ifdef OLD_MPR_PROBE
3129		parent_target = mprsas_find_target_by_handle(sassc, 0,
3130		    targ->parent_handle);
3131
3132		if (parent_target == NULL) {
3133			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3134			    "a valid parent target!\n", __func__, targ->handle);
3135			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3136			goto bailout;
3137		}
3138
3139		if ((parent_target->devinfo &
3140		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3141			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3142			    "does not have an SMP target!\n", __func__,
3143			    targ->handle, parent_target->handle);
3144			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3145			goto bailout;
3146		}
3147
3148		sasaddr = parent_target->sasaddr;
3149#else /* OLD_MPR_PROBE */
3150		if ((targ->parent_devinfo &
3151		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3152			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3153			    "does not have an SMP target!\n", __func__,
3154			    targ->handle, targ->parent_handle);
3155			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3156			goto bailout;
3157		}
3158		if (targ->parent_sasaddr == 0x0) {
3159			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle "
3160			    "%d does not have a valid SAS address!\n", __func__,
3161			    targ->handle, targ->parent_handle);
3162			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3163			goto bailout;
3164		}
3165
3166		sasaddr = targ->parent_sasaddr;
3167#endif /* OLD_MPR_PROBE */
3168	}
3169
3170	if (sasaddr == 0) {
3171		mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for "
3172		    "handle %d\n", __func__, targ->handle);
3173		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3174		goto bailout;
3175	}
3176	mprsas_send_smpcmd(sassc, ccb, sasaddr);
3177
3178	return;
3179
3180bailout:
3181	xpt_done(ccb);
3182
3183}
3184
3185static void
3186mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb)
3187{
3188	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3189	struct mpr_softc *sc;
3190	struct mpr_command *tm;
3191	struct mprsas_target *targ;
3192
3193	MPR_FUNCTRACE(sassc->sc);
3194	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
3195
3196	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out of "
3197	    "bounds in XPT_RESET_DEV\n", ccb->ccb_h.target_id));
3198	sc = sassc->sc;
3199	tm = mprsas_alloc_tm(sc);
3200	if (tm == NULL) {
3201		mpr_dprint(sc, MPR_ERROR, "command alloc failure in "
3202		    "mprsas_action_resetdev\n");
3203		mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3204		xpt_done(ccb);
3205		return;
3206	}
3207
3208	targ = &sassc->targets[ccb->ccb_h.target_id];
3209	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3210	req->DevHandle = htole16(targ->handle);
3211	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3212
3213	if (!targ->is_nvme || sc->custom_nvme_tm_handling) {
3214		/* SAS Hard Link Reset / SATA Link Reset */
3215		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3216	} else {
3217		/* PCIe Protocol Level Reset*/
3218		req->MsgFlags =
3219		    MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3220	}
3221
3222	tm->cm_data = NULL;
3223	tm->cm_complete = mprsas_resetdev_complete;
3224	tm->cm_complete_data = ccb;
3225
3226	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
3227	    __func__, targ->tid);
3228	tm->cm_targ = targ;
3229
3230	mprsas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
3231	mpr_map_command(sc, tm);
3232}
3233
3234static void
3235mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm)
3236{
3237	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3238	union ccb *ccb;
3239
3240	MPR_FUNCTRACE(sc);
3241	mtx_assert(&sc->mpr_mtx, MA_OWNED);
3242
3243	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3244	ccb = tm->cm_complete_data;
3245
3246	/*
3247	 * Currently there should be no way we can hit this case.  It only
3248	 * happens when we have a failure to allocate chain frames, and
3249	 * task management commands don't have S/G lists.
3250	 */
3251	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3252		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3253
3254		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3255
3256		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of "
3257		    "handle %#04x! This should not happen!\n", __func__,
3258		    tm->cm_flags, req->DevHandle);
3259		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3260		goto bailout;
3261	}
3262
3263	mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n",
3264	    __func__, le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3265
3266	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3267		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
3268		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3269		    CAM_LUN_WILDCARD);
3270	}
3271	else
3272		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3273
3274bailout:
3275
3276	mprsas_free_tm(sc, tm);
3277	xpt_done(ccb);
3278}
3279
3280static void
3281mprsas_poll(struct cam_sim *sim)
3282{
3283	struct mprsas_softc *sassc;
3284
3285	sassc = cam_sim_softc(sim);
3286
3287	if (sassc->sc->mpr_debug & MPR_TRACE) {
3288		/* frequent debug messages during a panic just slow
3289		 * everything down too much.
3290		 */
3291		mpr_dprint(sassc->sc, MPR_XINFO, "%s clearing MPR_TRACE\n",
3292		    __func__);
3293		sassc->sc->mpr_debug &= ~MPR_TRACE;
3294	}
3295
3296	mpr_intr_locked(sassc->sc);
3297}
3298
3299static void
3300mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3301    void *arg)
3302{
3303	struct mpr_softc *sc;
3304
3305	sc = (struct mpr_softc *)callback_arg;
3306
3307	switch (code) {
3308	case AC_ADVINFO_CHANGED: {
3309		struct mprsas_target *target;
3310		struct mprsas_softc *sassc;
3311		struct scsi_read_capacity_data_long rcap_buf;
3312		struct ccb_dev_advinfo cdai;
3313		struct mprsas_lun *lun;
3314		lun_id_t lunid;
3315		int found_lun;
3316		uintptr_t buftype;
3317
3318		buftype = (uintptr_t)arg;
3319
3320		found_lun = 0;
3321		sassc = sc->sassc;
3322
3323		/*
3324		 * We're only interested in read capacity data changes.
3325		 */
3326		if (buftype != CDAI_TYPE_RCAPLONG)
3327			break;
3328
3329		/*
3330		 * We should have a handle for this, but check to make sure.
3331		 */
3332		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3333		    ("Target %d out of bounds in mprsas_async\n",
3334		    xpt_path_target_id(path)));
3335		target = &sassc->targets[xpt_path_target_id(path)];
3336		if (target->handle == 0)
3337			break;
3338
3339		lunid = xpt_path_lun_id(path);
3340
3341		SLIST_FOREACH(lun, &target->luns, lun_link) {
3342			if (lun->lun_id == lunid) {
3343				found_lun = 1;
3344				break;
3345			}
3346		}
3347
3348		if (found_lun == 0) {
3349			lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3350			    M_NOWAIT | M_ZERO);
3351			if (lun == NULL) {
3352				mpr_dprint(sc, MPR_ERROR, "Unable to alloc "
3353				    "LUN for EEDP support.\n");
3354				break;
3355			}
3356			lun->lun_id = lunid;
3357			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3358		}
3359
3360		bzero(&rcap_buf, sizeof(rcap_buf));
3361		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3362		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3363		cdai.ccb_h.flags = CAM_DIR_IN;
3364		cdai.buftype = CDAI_TYPE_RCAPLONG;
3365		cdai.flags = CDAI_FLAG_NONE;
3366		cdai.bufsiz = sizeof(rcap_buf);
3367		cdai.buf = (uint8_t *)&rcap_buf;
3368		xpt_action((union ccb *)&cdai);
3369		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3370			cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
3371
3372		if ((mprsas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3373		    && (rcap_buf.prot & SRC16_PROT_EN)) {
3374			switch (rcap_buf.prot & SRC16_P_TYPE) {
3375			case SRC16_PTYPE_1:
3376			case SRC16_PTYPE_3:
3377				lun->eedp_formatted = TRUE;
3378				lun->eedp_block_size =
3379				    scsi_4btoul(rcap_buf.length);
3380				break;
3381			case SRC16_PTYPE_2:
3382			default:
3383				lun->eedp_formatted = FALSE;
3384				lun->eedp_block_size = 0;
3385				break;
3386			}
3387		} else {
3388			lun->eedp_formatted = FALSE;
3389			lun->eedp_block_size = 0;
3390		}
3391		break;
3392	}
3393	case AC_FOUND_DEVICE:
3394	default:
3395		break;
3396	}
3397}
3398
3399/*
3400 * Set the INRESET flag for this target so that no I/O will be sent to
3401 * the target until the reset has completed.  If an I/O request does
3402 * happen, the devq will be frozen.  The CCB holds the path which is
3403 * used to release the devq.  The devq is released and the CCB is freed
3404 * when the TM completes.
3405 */
3406void
3407mprsas_prepare_for_tm(struct mpr_softc *sc, struct mpr_command *tm,
3408    struct mprsas_target *target, lun_id_t lun_id)
3409{
3410	union ccb *ccb;
3411	path_id_t path_id;
3412
3413	ccb = xpt_alloc_ccb_nowait();
3414	if (ccb) {
3415		path_id = cam_sim_path(sc->sassc->sim);
3416		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3417		    target->tid, lun_id) != CAM_REQ_CMP) {
3418			xpt_free_ccb(ccb);
3419		} else {
3420			tm->cm_ccb = ccb;
3421			tm->cm_targ = target;
3422			target->flags |= MPRSAS_TARGET_INRESET;
3423		}
3424	}
3425}
3426
3427int
3428mprsas_startup(struct mpr_softc *sc)
3429{
3430	/*
3431	 * Send the port enable message and set the wait_for_port_enable flag.
3432	 * This flag helps to keep the simq frozen until all discovery events
3433	 * are processed.
3434	 */
3435	sc->wait_for_port_enable = 1;
3436	mprsas_send_portenable(sc);
3437	return (0);
3438}
3439
3440static int
3441mprsas_send_portenable(struct mpr_softc *sc)
3442{
3443	MPI2_PORT_ENABLE_REQUEST *request;
3444	struct mpr_command *cm;
3445
3446	MPR_FUNCTRACE(sc);
3447
3448	if ((cm = mpr_alloc_command(sc)) == NULL)
3449		return (EBUSY);
3450	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3451	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3452	request->MsgFlags = 0;
3453	request->VP_ID = 0;
3454	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3455	cm->cm_complete = mprsas_portenable_complete;
3456	cm->cm_data = NULL;
3457	cm->cm_sge = NULL;
3458
3459	mpr_map_command(sc, cm);
3460	mpr_dprint(sc, MPR_XINFO,
3461	    "mpr_send_portenable finished cm %p req %p complete %p\n",
3462	    cm, cm->cm_req, cm->cm_complete);
3463	return (0);
3464}
3465
3466static void
3467mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm)
3468{
3469	MPI2_PORT_ENABLE_REPLY *reply;
3470	struct mprsas_softc *sassc;
3471
3472	MPR_FUNCTRACE(sc);
3473	sassc = sc->sassc;
3474
3475	/*
3476	 * Currently there should be no way we can hit this case.  It only
3477	 * happens when we have a failure to allocate chain frames, and
3478	 * port enable commands don't have S/G lists.
3479	 */
3480	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3481		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! "
3482		    "This should not happen!\n", __func__, cm->cm_flags);
3483	}
3484
3485	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3486	if (reply == NULL)
3487		mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n");
3488	else if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
3489	    MPI2_IOCSTATUS_SUCCESS)
3490		mpr_dprint(sc, MPR_FAULT, "Portenable failed\n");
3491
3492	mpr_free_command(sc, cm);
3493	/*
3494	 * Done waiting for port enable to complete.  Decrement the refcount.
3495	 * If refcount is 0, discovery is complete and a rescan of the bus can
3496	 * take place.
3497	 */
3498	sc->wait_for_port_enable = 0;
3499	sc->port_enable_complete = 1;
3500	wakeup(&sc->port_enable_complete);
3501	mprsas_startup_decrement(sassc);
3502}
3503
3504int
3505mprsas_check_id(struct mprsas_softc *sassc, int id)
3506{
3507	struct mpr_softc *sc = sassc->sc;
3508	char *ids;
3509	char *name;
3510
3511	ids = &sc->exclude_ids[0];
3512	while((name = strsep(&ids, ",")) != NULL) {
3513		if (name[0] == '\0')
3514			continue;
3515		if (strtol(name, NULL, 0) == (long)id)
3516			return (1);
3517	}
3518
3519	return (0);
3520}
3521
3522void
3523mprsas_realloc_targets(struct mpr_softc *sc, int maxtargets)
3524{
3525	struct mprsas_softc *sassc;
3526	struct mprsas_lun *lun, *lun_tmp;
3527	struct mprsas_target *targ;
3528	int i;
3529
3530	sassc = sc->sassc;
3531	/*
3532	 * The number of targets is based on IOC Facts, so free all of
3533	 * the allocated LUNs for each target and then the target buffer
3534	 * itself.
3535	 */
3536	for (i=0; i< maxtargets; i++) {
3537		targ = &sassc->targets[i];
3538		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3539			free(lun, M_MPR);
3540		}
3541	}
3542	free(sassc->targets, M_MPR);
3543
3544	sassc->targets = malloc(sizeof(struct mprsas_target) * maxtargets,
3545	    M_MPR, M_WAITOK|M_ZERO);
3546}
3547