1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2009 Yahoo! Inc.
5 * Copyright (c) 2011-2015 LSI Corp.
6 * Copyright (c) 2013-2015 Avago Technologies
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
31 *
32 * $FreeBSD$
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD$");
37
38/* Communications core for Avago Technologies (LSI) MPT2 */
39
40/* TODO Move headers to mpsvar */
41#include <sys/types.h>
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/kernel.h>
45#include <sys/selinfo.h>
46#include <sys/module.h>
47#include <sys/bus.h>
48#include <sys/conf.h>
49#include <sys/bio.h>
50#include <sys/malloc.h>
51#include <sys/uio.h>
52#include <sys/sysctl.h>
53#include <sys/endian.h>
54#include <sys/queue.h>
55#include <sys/kthread.h>
56#include <sys/taskqueue.h>
57#include <sys/sbuf.h>
58
59#include <machine/bus.h>
60#include <machine/resource.h>
61#include <sys/rman.h>
62
63#include <machine/stdarg.h>
64
65#include <cam/cam.h>
66#include <cam/cam_ccb.h>
67#include <cam/cam_xpt.h>
68#include <cam/cam_debug.h>
69#include <cam/cam_sim.h>
70#include <cam/cam_xpt_sim.h>
71#include <cam/cam_xpt_periph.h>
72#include <cam/cam_periph.h>
73#include <cam/scsi/scsi_all.h>
74#include <cam/scsi/scsi_message.h>
75#if __FreeBSD_version >= 900026
76#include <cam/scsi/smp_all.h>
77#endif
78
79#include <dev/mps/mpi/mpi2_type.h>
80#include <dev/mps/mpi/mpi2.h>
81#include <dev/mps/mpi/mpi2_ioc.h>
82#include <dev/mps/mpi/mpi2_sas.h>
83#include <dev/mps/mpi/mpi2_cnfg.h>
84#include <dev/mps/mpi/mpi2_init.h>
85#include <dev/mps/mpi/mpi2_tool.h>
86#include <dev/mps/mps_ioctl.h>
87#include <dev/mps/mpsvar.h>
88#include <dev/mps/mps_table.h>
89#include <dev/mps/mps_sas.h>
90
91#define MPSSAS_DISCOVERY_TIMEOUT	20
92#define MPSSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
93
94/*
95 * static array to check SCSI OpCode for EEDP protection bits
96 */
97#define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
98#define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
99#define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
100static uint8_t op_code_prot[256] = {
101	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
104	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
109	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
110	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
112	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
114	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
115	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
116	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
117};
118
119MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
120
121static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
122static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
123static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
124static void mpssas_poll(struct cam_sim *sim);
125static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm,
126    struct mps_command *cm);
127static void mpssas_scsiio_timeout(void *data);
128static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
129static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
130    struct mps_command *cm, union ccb *ccb);
131static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
132static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
133static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
134#if __FreeBSD_version >= 900026
135static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
136static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
137			       uint64_t sasaddr);
138static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
139#endif //FreeBSD_version >= 900026
140static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
141static void mpssas_async(void *callback_arg, uint32_t code,
142			 struct cam_path *path, void *arg);
143#if (__FreeBSD_version < 901503) || \
144    ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
145static void mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
146			      struct ccb_getdev *cgd);
147static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
148#endif
149static int mpssas_send_portenable(struct mps_softc *sc);
150static void mpssas_portenable_complete(struct mps_softc *sc,
151    struct mps_command *cm);
152
153struct mpssas_target *
154mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
155{
156	struct mpssas_target *target;
157	int i;
158
159	for (i = start; i < sassc->maxtargets; i++) {
160		target = &sassc->targets[i];
161		if (target->handle == handle)
162			return (target);
163	}
164
165	return (NULL);
166}
167
168/* we need to freeze the simq during attach and diag reset, to avoid failing
169 * commands before device handles have been found by discovery.  Since
170 * discovery involves reading config pages and possibly sending commands,
171 * discovery actions may continue even after we receive the end of discovery
172 * event, so refcount discovery actions instead of assuming we can unfreeze
173 * the simq when we get the event.
174 */
175void
176mpssas_startup_increment(struct mpssas_softc *sassc)
177{
178	MPS_FUNCTRACE(sassc->sc);
179
180	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
181		if (sassc->startup_refcount++ == 0) {
182			/* just starting, freeze the simq */
183			mps_dprint(sassc->sc, MPS_INIT,
184			    "%s freezing simq\n", __func__);
185#if __FreeBSD_version >= 1000039
186			xpt_hold_boot();
187#endif
188			xpt_freeze_simq(sassc->sim, 1);
189		}
190		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
191		    sassc->startup_refcount);
192	}
193}
194
195void
196mpssas_release_simq_reinit(struct mpssas_softc *sassc)
197{
198	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
199		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
200		xpt_release_simq(sassc->sim, 1);
201		mps_dprint(sassc->sc, MPS_INFO, "Unfreezing SIM queue\n");
202	}
203}
204
205void
206mpssas_startup_decrement(struct mpssas_softc *sassc)
207{
208	MPS_FUNCTRACE(sassc->sc);
209
210	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
211		if (--sassc->startup_refcount == 0) {
212			/* finished all discovery-related actions, release
213			 * the simq and rescan for the latest topology.
214			 */
215			mps_dprint(sassc->sc, MPS_INIT,
216			    "%s releasing simq\n", __func__);
217			sassc->flags &= ~MPSSAS_IN_STARTUP;
218			xpt_release_simq(sassc->sim, 1);
219#if __FreeBSD_version >= 1000039
220			xpt_release_boot();
221#else
222			mpssas_rescan_target(sassc->sc, NULL);
223#endif
224		}
225		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
226		    sassc->startup_refcount);
227	}
228}
229
230/*
231 * The firmware requires us to stop sending commands when we're doing task
232 * management.
233 * XXX The logic for serializing the device has been made lazy and moved to
234 * mpssas_prepare_for_tm().
235 */
236struct mps_command *
237mpssas_alloc_tm(struct mps_softc *sc)
238{
239	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
240	struct mps_command *tm;
241
242	tm = mps_alloc_high_priority_command(sc);
243	if (tm == NULL)
244		return (NULL);
245
246	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
247	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
248	return tm;
249}
250
251void
252mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
253{
254	int target_id = 0xFFFFFFFF;
255
256	if (tm == NULL)
257		return;
258
259	/*
260	 * For TM's the devq is frozen for the device.  Unfreeze it here and
261	 * free the resources used for freezing the devq.  Must clear the
262	 * INRESET flag as well or scsi I/O will not work.
263	 */
264	if (tm->cm_targ != NULL) {
265		tm->cm_targ->flags &= ~MPSSAS_TARGET_INRESET;
266		target_id = tm->cm_targ->tid;
267	}
268	if (tm->cm_ccb) {
269		mps_dprint(sc, MPS_INFO, "Unfreezing devq for target ID %d\n",
270		    target_id);
271		xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
272		xpt_free_path(tm->cm_ccb->ccb_h.path);
273		xpt_free_ccb(tm->cm_ccb);
274	}
275
276	mps_free_high_priority_command(sc, tm);
277}
278
279void
280mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
281{
282	struct mpssas_softc *sassc = sc->sassc;
283	path_id_t pathid;
284	target_id_t targetid;
285	union ccb *ccb;
286
287	MPS_FUNCTRACE(sc);
288	pathid = cam_sim_path(sassc->sim);
289	if (targ == NULL)
290		targetid = CAM_TARGET_WILDCARD;
291	else
292		targetid = targ - sassc->targets;
293
294	/*
295	 * Allocate a CCB and schedule a rescan.
296	 */
297	ccb = xpt_alloc_ccb_nowait();
298	if (ccb == NULL) {
299		mps_dprint(sc, MPS_ERROR, "unable to alloc CCB for rescan\n");
300		return;
301	}
302
303	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
304	    targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
305		mps_dprint(sc, MPS_ERROR, "unable to create path for rescan\n");
306		xpt_free_ccb(ccb);
307		return;
308	}
309
310	if (targetid == CAM_TARGET_WILDCARD)
311		ccb->ccb_h.func_code = XPT_SCAN_BUS;
312	else
313		ccb->ccb_h.func_code = XPT_SCAN_TGT;
314
315	mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
316	xpt_rescan(ccb);
317}
318
319static void
320mpssas_log_command(struct mps_command *cm, u_int level, const char *fmt, ...)
321{
322	struct sbuf sb;
323	va_list ap;
324	char str[224];
325	char path_str[64];
326
327	if (cm == NULL)
328		return;
329
330	/* No need to be in here if debugging isn't enabled */
331	if ((cm->cm_sc->mps_debug & level) == 0)
332		return;
333
334	sbuf_new(&sb, str, sizeof(str), 0);
335
336	va_start(ap, fmt);
337
338	if (cm->cm_ccb != NULL) {
339		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
340				sizeof(path_str));
341		sbuf_cat(&sb, path_str);
342		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
343			scsi_command_string(&cm->cm_ccb->csio, &sb);
344			sbuf_printf(&sb, "length %d ",
345				    cm->cm_ccb->csio.dxfer_len);
346		}
347	}
348	else {
349		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
350		    cam_sim_name(cm->cm_sc->sassc->sim),
351		    cam_sim_unit(cm->cm_sc->sassc->sim),
352		    cam_sim_bus(cm->cm_sc->sassc->sim),
353		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
354		    cm->cm_lun);
355	}
356
357	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
358	sbuf_vprintf(&sb, fmt, ap);
359	sbuf_finish(&sb);
360	mps_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
361
362	va_end(ap);
363}
364
365
366static void
367mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
368{
369	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
370	struct mpssas_target *targ;
371	uint16_t handle;
372
373	MPS_FUNCTRACE(sc);
374
375	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
376	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
377	targ = tm->cm_targ;
378
379	if (reply == NULL) {
380		/* XXX retry the remove after the diag reset completes? */
381		mps_dprint(sc, MPS_FAULT,
382		    "%s NULL reply resetting device 0x%04x\n", __func__,
383		    handle);
384		mpssas_free_tm(sc, tm);
385		return;
386	}
387
388	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
389	    MPI2_IOCSTATUS_SUCCESS) {
390		mps_dprint(sc, MPS_ERROR,
391		   "IOCStatus = 0x%x while resetting device 0x%x\n",
392		   le16toh(reply->IOCStatus), handle);
393	}
394
395	mps_dprint(sc, MPS_XINFO,
396	    "Reset aborted %u commands\n", reply->TerminationCount);
397	mps_free_reply(sc, tm->cm_reply_data);
398	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
399
400	mps_dprint(sc, MPS_XINFO,
401	    "clearing target %u handle 0x%04x\n", targ->tid, handle);
402
403	/*
404	 * Don't clear target if remove fails because things will get confusing.
405	 * Leave the devname and sasaddr intact so that we know to avoid reusing
406	 * this target id if possible, and so we can assign the same target id
407	 * to this device if it comes back in the future.
408	 */
409	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
410	    MPI2_IOCSTATUS_SUCCESS) {
411		targ = tm->cm_targ;
412		targ->handle = 0x0;
413		targ->encl_handle = 0x0;
414		targ->encl_slot = 0x0;
415		targ->exp_dev_handle = 0x0;
416		targ->phy_num = 0x0;
417		targ->linkrate = 0x0;
418		targ->devinfo = 0x0;
419		targ->flags = 0x0;
420	}
421
422	mpssas_free_tm(sc, tm);
423}
424
425
426/*
427 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
428 * Otherwise Volume Delete is same as Bare Drive Removal.
429 */
430void
431mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
432{
433	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
434	struct mps_softc *sc;
435	struct mps_command *tm;
436	struct mpssas_target *targ = NULL;
437
438	MPS_FUNCTRACE(sassc->sc);
439	sc = sassc->sc;
440
441#ifdef WD_SUPPORT
442	/*
443	 * If this is a WD controller, determine if the disk should be exposed
444	 * to the OS or not.  If disk should be exposed, return from this
445	 * function without doing anything.
446	 */
447	if (sc->WD_available && (sc->WD_hide_expose ==
448	    MPS_WD_EXPOSE_ALWAYS)) {
449		return;
450	}
451#endif //WD_SUPPORT
452
453	targ = mpssas_find_target_by_handle(sassc, 0, handle);
454	if (targ == NULL) {
455		/* FIXME: what is the action? */
456		/* We don't know about this device? */
457		mps_dprint(sc, MPS_ERROR,
458		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
459		return;
460	}
461
462	targ->flags |= MPSSAS_TARGET_INREMOVAL;
463
464	tm = mpssas_alloc_tm(sc);
465	if (tm == NULL) {
466		mps_dprint(sc, MPS_ERROR,
467		    "%s: command alloc failure\n", __func__);
468		return;
469	}
470
471	mpssas_rescan_target(sc, targ);
472
473	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
474	req->DevHandle = targ->handle;
475	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
476
477	/* SAS Hard Link Reset / SATA Link Reset */
478	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
479
480	tm->cm_targ = targ;
481	tm->cm_data = NULL;
482	tm->cm_complete = mpssas_remove_volume;
483	tm->cm_complete_data = (void *)(uintptr_t)handle;
484
485	mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
486	    __func__, targ->tid);
487	mpssas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
488
489	mps_map_command(sc, tm);
490}
491
492/*
493 * The MPT2 firmware performs debounce on the link to avoid transient link
494 * errors and false removals.  When it does decide that link has been lost
495 * and a device need to go away, it expects that the host will perform a
496 * target reset and then an op remove.  The reset has the side-effect of
497 * aborting any outstanding requests for the device, which is required for
498 * the op-remove to succeed.  It's not clear if the host should check for
499 * the device coming back alive after the reset.
500 */
501void
502mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
503{
504	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
505	struct mps_softc *sc;
506	struct mps_command *cm;
507	struct mpssas_target *targ = NULL;
508
509	MPS_FUNCTRACE(sassc->sc);
510
511	sc = sassc->sc;
512
513	targ = mpssas_find_target_by_handle(sassc, 0, handle);
514	if (targ == NULL) {
515		/* FIXME: what is the action? */
516		/* We don't know about this device? */
517		mps_dprint(sc, MPS_ERROR,
518		    "%s : invalid handle 0x%x \n", __func__, handle);
519		return;
520	}
521
522	targ->flags |= MPSSAS_TARGET_INREMOVAL;
523
524	cm = mpssas_alloc_tm(sc);
525	if (cm == NULL) {
526		mps_dprint(sc, MPS_ERROR,
527		    "%s: command alloc failure\n", __func__);
528		return;
529	}
530
531	mpssas_rescan_target(sc, targ);
532
533	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
534	req->DevHandle = htole16(targ->handle);
535	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
536
537	/* SAS Hard Link Reset / SATA Link Reset */
538	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
539
540	cm->cm_targ = targ;
541	cm->cm_data = NULL;
542	cm->cm_complete = mpssas_remove_device;
543	cm->cm_complete_data = (void *)(uintptr_t)handle;
544
545	mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
546	    __func__, targ->tid);
547	mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
548
549	mps_map_command(sc, cm);
550}
551
552static void
553mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
554{
555	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
556	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
557	struct mpssas_target *targ;
558	uint16_t handle;
559
560	MPS_FUNCTRACE(sc);
561
562	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
563	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
564	targ = tm->cm_targ;
565
566	/*
567	 * Currently there should be no way we can hit this case.  It only
568	 * happens when we have a failure to allocate chain frames, and
569	 * task management commands don't have S/G lists.
570	 */
571	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
572		mps_dprint(sc, MPS_ERROR,
573		    "%s: cm_flags = %#x for remove of handle %#04x! "
574		    "This should not happen!\n", __func__, tm->cm_flags,
575		    handle);
576	}
577
578	if (reply == NULL) {
579		/* XXX retry the remove after the diag reset completes? */
580		mps_dprint(sc, MPS_FAULT,
581		    "%s NULL reply resetting device 0x%04x\n", __func__,
582		    handle);
583		mpssas_free_tm(sc, tm);
584		return;
585	}
586
587	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
588	    MPI2_IOCSTATUS_SUCCESS) {
589		mps_dprint(sc, MPS_ERROR,
590		   "IOCStatus = 0x%x while resetting device 0x%x\n",
591		   le16toh(reply->IOCStatus), handle);
592	}
593
594	mps_dprint(sc, MPS_XINFO, "Reset aborted %u commands\n",
595	    le32toh(reply->TerminationCount));
596	mps_free_reply(sc, tm->cm_reply_data);
597	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
598
599	/* Reuse the existing command */
600	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
601	memset(req, 0, sizeof(*req));
602	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
603	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
604	req->DevHandle = htole16(handle);
605	tm->cm_data = NULL;
606	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
607	tm->cm_complete = mpssas_remove_complete;
608	tm->cm_complete_data = (void *)(uintptr_t)handle;
609
610	/*
611	 * Wait to send the REMOVE_DEVICE until all the commands have cleared.
612	 * They should be aborted or time out and we'll kick thus off there
613	 * if so.
614	 */
615	if (TAILQ_FIRST(&targ->commands) == NULL) {
616		mps_dprint(sc, MPS_INFO, "No pending commands: starting remove_device\n");
617		mps_map_command(sc, tm);
618		targ->pending_remove_tm = NULL;
619	} else {
620		targ->pending_remove_tm = tm;
621	}
622
623
624	mps_dprint(sc, MPS_XINFO, "clearing target %u handle 0x%04x\n",
625		   targ->tid, handle);
626}
627
628static void
629mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
630{
631	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
632	uint16_t handle;
633	struct mpssas_target *targ;
634	struct mpssas_lun *lun;
635
636	MPS_FUNCTRACE(sc);
637
638	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
639	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
640	targ = tm->cm_targ;
641
642	/*
643	 * At this point, we should have no pending commands for the target.
644	 * The remove target has just completed.
645	 */
646	KASSERT(TAILQ_FIRST(&targ->commands) == NULL,
647	    ("%s: no commands should be pending\n", __func__));
648
649
650	/*
651	 * Currently there should be no way we can hit this case.  It only
652	 * happens when we have a failure to allocate chain frames, and
653	 * task management commands don't have S/G lists.
654	 */
655	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
656		mps_dprint(sc, MPS_XINFO,
657			   "%s: cm_flags = %#x for remove of handle %#04x! "
658			   "This should not happen!\n", __func__, tm->cm_flags,
659			   handle);
660		mpssas_free_tm(sc, tm);
661		return;
662	}
663
664	if (reply == NULL) {
665		/* most likely a chip reset */
666		mps_dprint(sc, MPS_FAULT,
667		    "%s NULL reply removing device 0x%04x\n", __func__, handle);
668		mpssas_free_tm(sc, tm);
669		return;
670	}
671
672	mps_dprint(sc, MPS_XINFO,
673	    "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
674	    handle, le16toh(reply->IOCStatus));
675
676	/*
677	 * Don't clear target if remove fails because things will get confusing.
678	 * Leave the devname and sasaddr intact so that we know to avoid reusing
679	 * this target id if possible, and so we can assign the same target id
680	 * to this device if it comes back in the future.
681	 */
682	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
683	    MPI2_IOCSTATUS_SUCCESS) {
684		targ->handle = 0x0;
685		targ->encl_handle = 0x0;
686		targ->encl_slot = 0x0;
687		targ->exp_dev_handle = 0x0;
688		targ->phy_num = 0x0;
689		targ->linkrate = 0x0;
690		targ->devinfo = 0x0;
691		targ->flags = 0x0;
692
693		while(!SLIST_EMPTY(&targ->luns)) {
694			lun = SLIST_FIRST(&targ->luns);
695			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
696			free(lun, M_MPT2);
697		}
698	}
699
700
701	mpssas_free_tm(sc, tm);
702}
703
704static int
705mpssas_register_events(struct mps_softc *sc)
706{
707	u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
708
709	bzero(events, 16);
710	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
711	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
712	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
713	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
714	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
715	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
716	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
717	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
718	setbit(events, MPI2_EVENT_IR_VOLUME);
719	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
720	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
721	setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
722
723	mps_register_events(sc, events, mpssas_evt_handler, NULL,
724	    &sc->sassc->mpssas_eh);
725
726	return (0);
727}
728
729int
730mps_attach_sas(struct mps_softc *sc)
731{
732	struct mpssas_softc *sassc;
733	cam_status status;
734	int unit, error = 0, reqs;
735
736	MPS_FUNCTRACE(sc);
737	mps_dprint(sc, MPS_INIT, "%s entered\n", __func__);
738
739	sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
740
741	/*
742	 * XXX MaxTargets could change during a reinit.  Since we don't
743	 * resize the targets[] array during such an event, cache the value
744	 * of MaxTargets here so that we don't get into trouble later.  This
745	 * should move into the reinit logic.
746	 */
747	sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
748	sassc->targets = malloc(sizeof(struct mpssas_target) *
749	    sassc->maxtargets, M_MPT2, M_WAITOK|M_ZERO);
750	sc->sassc = sassc;
751	sassc->sc = sc;
752
753	reqs = sc->num_reqs - sc->num_prireqs - 1;
754	if ((sassc->devq = cam_simq_alloc(reqs)) == NULL) {
755		mps_dprint(sc, MPS_ERROR, "Cannot allocate SIMQ\n");
756		error = ENOMEM;
757		goto out;
758	}
759
760	unit = device_get_unit(sc->mps_dev);
761	sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
762	    unit, &sc->mps_mtx, reqs, reqs, sassc->devq);
763	if (sassc->sim == NULL) {
764		mps_dprint(sc, MPS_INIT|MPS_ERROR, "Cannot allocate SIM\n");
765		error = EINVAL;
766		goto out;
767	}
768
769	TAILQ_INIT(&sassc->ev_queue);
770
771	/* Initialize taskqueue for Event Handling */
772	TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
773	sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
774	    taskqueue_thread_enqueue, &sassc->ev_tq);
775	taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
776	    device_get_nameunit(sc->mps_dev));
777
778	mps_lock(sc);
779
780	/*
781	 * XXX There should be a bus for every port on the adapter, but since
782	 * we're just going to fake the topology for now, we'll pretend that
783	 * everything is just a target on a single bus.
784	 */
785	if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
786		mps_dprint(sc, MPS_INIT|MPS_ERROR,
787		    "Error %d registering SCSI bus\n", error);
788		mps_unlock(sc);
789		goto out;
790	}
791
792	/*
793	 * Assume that discovery events will start right away.
794	 *
795	 * Hold off boot until discovery is complete.
796	 */
797	sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
798	sc->sassc->startup_refcount = 0;
799	mpssas_startup_increment(sassc);
800
801	callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
802
803	/*
804	 * Register for async events so we can determine the EEDP
805	 * capabilities of devices.
806	 */
807	status = xpt_create_path(&sassc->path, /*periph*/NULL,
808	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
809	    CAM_LUN_WILDCARD);
810	if (status != CAM_REQ_CMP) {
811		mps_dprint(sc, MPS_ERROR|MPS_INIT,
812		    "Error %#x creating sim path\n", status);
813		sassc->path = NULL;
814	} else {
815		int event;
816
817#if (__FreeBSD_version >= 1000006) || \
818    ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
819		event = AC_ADVINFO_CHANGED;
820#else
821		event = AC_FOUND_DEVICE;
822#endif
823		status = xpt_register_async(event, mpssas_async, sc,
824					    sassc->path);
825		if (status != CAM_REQ_CMP) {
826			mps_dprint(sc, MPS_ERROR,
827			    "Error %#x registering async handler for "
828			    "AC_ADVINFO_CHANGED events\n", status);
829			xpt_free_path(sassc->path);
830			sassc->path = NULL;
831		}
832	}
833	if (status != CAM_REQ_CMP) {
834		/*
835		 * EEDP use is the exception, not the rule.
836		 * Warn the user, but do not fail to attach.
837		 */
838		mps_printf(sc, "EEDP capabilities disabled.\n");
839	}
840
841	mps_unlock(sc);
842
843	mpssas_register_events(sc);
844out:
845	if (error)
846		mps_detach_sas(sc);
847
848	mps_dprint(sc, MPS_INIT, "%s exit error= %d\n", __func__, error);
849	return (error);
850}
851
852int
853mps_detach_sas(struct mps_softc *sc)
854{
855	struct mpssas_softc *sassc;
856	struct mpssas_lun *lun, *lun_tmp;
857	struct mpssas_target *targ;
858	int i;
859
860	MPS_FUNCTRACE(sc);
861
862	if (sc->sassc == NULL)
863		return (0);
864
865	sassc = sc->sassc;
866	mps_deregister_events(sc, sassc->mpssas_eh);
867
868	/*
869	 * Drain and free the event handling taskqueue with the lock
870	 * unheld so that any parallel processing tasks drain properly
871	 * without deadlocking.
872	 */
873	if (sassc->ev_tq != NULL)
874		taskqueue_free(sassc->ev_tq);
875
876	/* Make sure CAM doesn't wedge if we had to bail out early. */
877	mps_lock(sc);
878
879	while (sassc->startup_refcount != 0)
880		mpssas_startup_decrement(sassc);
881
882	/* Deregister our async handler */
883	if (sassc->path != NULL) {
884		xpt_register_async(0, mpssas_async, sc, sassc->path);
885		xpt_free_path(sassc->path);
886		sassc->path = NULL;
887	}
888
889	if (sassc->flags & MPSSAS_IN_STARTUP)
890		xpt_release_simq(sassc->sim, 1);
891
892	if (sassc->sim != NULL) {
893		xpt_bus_deregister(cam_sim_path(sassc->sim));
894		cam_sim_free(sassc->sim, FALSE);
895	}
896
897	mps_unlock(sc);
898
899	if (sassc->devq != NULL)
900		cam_simq_free(sassc->devq);
901
902	for(i=0; i< sassc->maxtargets ;i++) {
903		targ = &sassc->targets[i];
904		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
905			free(lun, M_MPT2);
906		}
907	}
908	free(sassc->targets, M_MPT2);
909	free(sassc, M_MPT2);
910	sc->sassc = NULL;
911
912	return (0);
913}
914
915void
916mpssas_discovery_end(struct mpssas_softc *sassc)
917{
918	struct mps_softc *sc = sassc->sc;
919
920	MPS_FUNCTRACE(sc);
921
922	if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
923		callout_stop(&sassc->discovery_callout);
924
925	/*
926	 * After discovery has completed, check the mapping table for any
927	 * missing devices and update their missing counts. Only do this once
928	 * whenever the driver is initialized so that missing counts aren't
929	 * updated unnecessarily. Note that just because discovery has
930	 * completed doesn't mean that events have been processed yet. The
931	 * check_devices function is a callout timer that checks if ALL devices
932	 * are missing. If so, it will wait a little longer for events to
933	 * complete and keep resetting itself until some device in the mapping
934	 * table is not missing, meaning that event processing has started.
935	 */
936	if (sc->track_mapping_events) {
937		mps_dprint(sc, MPS_XINFO | MPS_MAPPING, "Discovery has "
938		    "completed. Check for missing devices in the mapping "
939		    "table.\n");
940		callout_reset(&sc->device_check_callout,
941		    MPS_MISSING_CHECK_DELAY * hz, mps_mapping_check_devices,
942		    sc);
943	}
944}
945
946static void
947mpssas_action(struct cam_sim *sim, union ccb *ccb)
948{
949	struct mpssas_softc *sassc;
950
951	sassc = cam_sim_softc(sim);
952
953	MPS_FUNCTRACE(sassc->sc);
954	mps_dprint(sassc->sc, MPS_TRACE, "ccb func_code 0x%x\n",
955	    ccb->ccb_h.func_code);
956	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
957
958	switch (ccb->ccb_h.func_code) {
959	case XPT_PATH_INQ:
960	{
961		struct ccb_pathinq *cpi = &ccb->cpi;
962		struct mps_softc *sc = sassc->sc;
963
964		cpi->version_num = 1;
965		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
966		cpi->target_sprt = 0;
967#if __FreeBSD_version >= 1000039
968		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
969#else
970		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
971#endif
972		cpi->hba_eng_cnt = 0;
973		cpi->max_target = sassc->maxtargets - 1;
974		cpi->max_lun = 255;
975
976		/*
977		 * initiator_id is set here to an ID outside the set of valid
978		 * target IDs (including volumes).
979		 */
980		cpi->initiator_id = sassc->maxtargets;
981		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
982		strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
983		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
984		cpi->unit_number = cam_sim_unit(sim);
985		cpi->bus_id = cam_sim_bus(sim);
986		cpi->base_transfer_speed = 150000;
987		cpi->transport = XPORT_SAS;
988		cpi->transport_version = 0;
989		cpi->protocol = PROTO_SCSI;
990		cpi->protocol_version = SCSI_REV_SPC;
991		cpi->maxio = sc->maxio;
992		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
993		break;
994	}
995	case XPT_GET_TRAN_SETTINGS:
996	{
997		struct ccb_trans_settings	*cts;
998		struct ccb_trans_settings_sas	*sas;
999		struct ccb_trans_settings_scsi	*scsi;
1000		struct mpssas_target *targ;
1001
1002		cts = &ccb->cts;
1003		sas = &cts->xport_specific.sas;
1004		scsi = &cts->proto_specific.scsi;
1005
1006		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1007		    ("Target %d out of bounds in XPT_GET_TRANS_SETTINGS\n",
1008		    cts->ccb_h.target_id));
1009		targ = &sassc->targets[cts->ccb_h.target_id];
1010		if (targ->handle == 0x0) {
1011			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1012			break;
1013		}
1014
1015		cts->protocol_version = SCSI_REV_SPC2;
1016		cts->transport = XPORT_SAS;
1017		cts->transport_version = 0;
1018
1019		sas->valid = CTS_SAS_VALID_SPEED;
1020		switch (targ->linkrate) {
1021		case 0x08:
1022			sas->bitrate = 150000;
1023			break;
1024		case 0x09:
1025			sas->bitrate = 300000;
1026			break;
1027		case 0x0a:
1028			sas->bitrate = 600000;
1029			break;
1030		default:
1031			sas->valid = 0;
1032		}
1033
1034		cts->protocol = PROTO_SCSI;
1035		scsi->valid = CTS_SCSI_VALID_TQ;
1036		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1037
1038		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1039		break;
1040	}
1041	case XPT_CALC_GEOMETRY:
1042		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1043		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1044		break;
1045	case XPT_RESET_DEV:
1046		mps_dprint(sassc->sc, MPS_XINFO, "mpssas_action XPT_RESET_DEV\n");
1047		mpssas_action_resetdev(sassc, ccb);
1048		return;
1049	case XPT_RESET_BUS:
1050	case XPT_ABORT:
1051	case XPT_TERM_IO:
1052		mps_dprint(sassc->sc, MPS_XINFO,
1053		    "mpssas_action faking success for abort or reset\n");
1054		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1055		break;
1056	case XPT_SCSI_IO:
1057		mpssas_action_scsiio(sassc, ccb);
1058		return;
1059#if __FreeBSD_version >= 900026
1060	case XPT_SMP_IO:
1061		mpssas_action_smpio(sassc, ccb);
1062		return;
1063#endif
1064	default:
1065		mpssas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1066		break;
1067	}
1068	xpt_done(ccb);
1069
1070}
1071
1072static void
1073mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
1074    target_id_t target_id, lun_id_t lun_id)
1075{
1076	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1077	struct cam_path *path;
1078
1079	mps_dprint(sc, MPS_XINFO, "%s code %x target %d lun %jx\n", __func__,
1080	    ac_code, target_id, (uintmax_t)lun_id);
1081
1082	if (xpt_create_path(&path, NULL,
1083		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1084		mps_dprint(sc, MPS_ERROR, "unable to create path for reset "
1085			   "notification\n");
1086		return;
1087	}
1088
1089	xpt_async(ac_code, path, NULL);
1090	xpt_free_path(path);
1091}
1092
1093static void
1094mpssas_complete_all_commands(struct mps_softc *sc)
1095{
1096	struct mps_command *cm;
1097	int i;
1098	int completed;
1099
1100	MPS_FUNCTRACE(sc);
1101	mtx_assert(&sc->mps_mtx, MA_OWNED);
1102
1103	/* complete all commands with a NULL reply */
1104	for (i = 1; i < sc->num_reqs; i++) {
1105		cm = &sc->commands[i];
1106		if (cm->cm_state == MPS_CM_STATE_FREE)
1107			continue;
1108
1109		cm->cm_state = MPS_CM_STATE_BUSY;
1110		cm->cm_reply = NULL;
1111		completed = 0;
1112
1113		if (cm->cm_flags & MPS_CM_FLAGS_SATA_ID_TIMEOUT) {
1114			MPASS(cm->cm_data);
1115			free(cm->cm_data, M_MPT2);
1116			cm->cm_data = NULL;
1117		}
1118
1119		if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
1120			cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
1121
1122		if (cm->cm_complete != NULL) {
1123			mpssas_log_command(cm, MPS_RECOVERY,
1124			    "completing cm %p state %x ccb %p for diag reset\n",
1125			    cm, cm->cm_state, cm->cm_ccb);
1126
1127			cm->cm_complete(sc, cm);
1128			completed = 1;
1129		} else if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
1130			mpssas_log_command(cm, MPS_RECOVERY,
1131			    "waking up cm %p state %x ccb %p for diag reset\n",
1132			    cm, cm->cm_state, cm->cm_ccb);
1133			wakeup(cm);
1134			completed = 1;
1135		}
1136
1137		if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
1138			/* this should never happen, but if it does, log */
1139			mpssas_log_command(cm, MPS_RECOVERY,
1140			    "cm %p state %x flags 0x%x ccb %p during diag "
1141			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1142			    cm->cm_ccb);
1143		}
1144	}
1145
1146	sc->io_cmds_active = 0;
1147}
1148
1149void
1150mpssas_handle_reinit(struct mps_softc *sc)
1151{
1152	int i;
1153
1154	/* Go back into startup mode and freeze the simq, so that CAM
1155	 * doesn't send any commands until after we've rediscovered all
1156	 * targets and found the proper device handles for them.
1157	 *
1158	 * After the reset, portenable will trigger discovery, and after all
1159	 * discovery-related activities have finished, the simq will be
1160	 * released.
1161	 */
1162	mps_dprint(sc, MPS_INIT, "%s startup\n", __func__);
1163	sc->sassc->flags |= MPSSAS_IN_STARTUP;
1164	sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
1165	mpssas_startup_increment(sc->sassc);
1166
1167	/* notify CAM of a bus reset */
1168	mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1169	    CAM_LUN_WILDCARD);
1170
1171	/* complete and cleanup after all outstanding commands */
1172	mpssas_complete_all_commands(sc);
1173
1174	mps_dprint(sc, MPS_INIT,
1175	    "%s startup %u after command completion\n", __func__,
1176	    sc->sassc->startup_refcount);
1177
1178	/* zero all the target handles, since they may change after the
1179	 * reset, and we have to rediscover all the targets and use the new
1180	 * handles.
1181	 */
1182	for (i = 0; i < sc->sassc->maxtargets; i++) {
1183		if (sc->sassc->targets[i].outstanding != 0)
1184			mps_dprint(sc, MPS_INIT, "target %u outstanding %u\n",
1185			    i, sc->sassc->targets[i].outstanding);
1186		sc->sassc->targets[i].handle = 0x0;
1187		sc->sassc->targets[i].exp_dev_handle = 0x0;
1188		sc->sassc->targets[i].outstanding = 0;
1189		sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1190	}
1191}
1192
1193static void
1194mpssas_tm_timeout(void *data)
1195{
1196	struct mps_command *tm = data;
1197	struct mps_softc *sc = tm->cm_sc;
1198
1199	mtx_assert(&sc->mps_mtx, MA_OWNED);
1200
1201	mpssas_log_command(tm, MPS_INFO|MPS_RECOVERY,
1202	    "task mgmt %p timed out\n", tm);
1203
1204	KASSERT(tm->cm_state == MPS_CM_STATE_INQUEUE,
1205	    ("command not inqueue\n"));
1206
1207	tm->cm_state = MPS_CM_STATE_BUSY;
1208	mps_reinit(sc);
1209}
1210
1211static void
1212mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1213{
1214	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1215	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1216	unsigned int cm_count = 0;
1217	struct mps_command *cm;
1218	struct mpssas_target *targ;
1219
1220	callout_stop(&tm->cm_callout);
1221
1222	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1223	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1224	targ = tm->cm_targ;
1225
1226	/*
1227	 * Currently there should be no way we can hit this case.  It only
1228	 * happens when we have a failure to allocate chain frames, and
1229	 * task management commands don't have S/G lists.
1230	 * XXXSL So should it be an assertion?
1231	 */
1232	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1233		mps_dprint(sc, MPS_RECOVERY|MPS_ERROR,
1234		    "%s: cm_flags = %#x for LUN reset! "
1235		   "This should not happen!\n", __func__, tm->cm_flags);
1236		mpssas_free_tm(sc, tm);
1237		return;
1238	}
1239
1240	if (reply == NULL) {
1241		mps_dprint(sc, MPS_RECOVERY, "NULL reset reply for tm %p\n",
1242		    tm);
1243		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1244			/* this completion was due to a reset, just cleanup */
1245			mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1246			    "reset, ignoring NULL LUN reset reply\n");
1247			targ->tm = NULL;
1248			mpssas_free_tm(sc, tm);
1249		}
1250		else {
1251			/* we should have gotten a reply. */
1252			mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1253			    "LUN reset attempt, resetting controller\n");
1254			mps_reinit(sc);
1255		}
1256		return;
1257	}
1258
1259	mps_dprint(sc, MPS_RECOVERY,
1260	    "logical unit reset status 0x%x code 0x%x count %u\n",
1261	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1262	    le32toh(reply->TerminationCount));
1263
1264	/*
1265	 * See if there are any outstanding commands for this LUN.
1266	 * This could be made more efficient by using a per-LU data
1267	 * structure of some sort.
1268	 */
1269	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1270		if (cm->cm_lun == tm->cm_lun)
1271			cm_count++;
1272	}
1273
1274	if (cm_count == 0) {
1275		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1276		    "Finished recovery after LUN reset for target %u\n",
1277		    targ->tid);
1278
1279		mpssas_announce_reset(sc, AC_SENT_BDR, targ->tid, tm->cm_lun);
1280
1281		/*
1282		 * We've finished recovery for this logical unit.  check and
1283		 * see if some other logical unit has a timedout command
1284		 * that needs to be processed.
1285		 */
1286		cm = TAILQ_FIRST(&targ->timedout_commands);
1287		if (cm) {
1288			mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1289			    "More commands to abort for target %u\n",
1290			    targ->tid);
1291			mpssas_send_abort(sc, tm, cm);
1292		} else {
1293			targ->tm = NULL;
1294			mpssas_free_tm(sc, tm);
1295		}
1296	} else {
1297		/*
1298		 * If we still have commands for this LUN, the reset
1299		 * effectively failed, regardless of the status reported.
1300		 * Escalate to a target reset.
1301		 */
1302		mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1303		    "logical unit reset complete for target %u, but still "
1304		    "have %u command(s), sending target reset\n", targ->tid,
1305		    cm_count);
1306		mpssas_send_reset(sc, tm,
1307		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1308	}
1309}
1310
1311static void
1312mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1313{
1314	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1315	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1316	struct mpssas_target *targ;
1317
1318	callout_stop(&tm->cm_callout);
1319
1320	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1321	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1322	targ = tm->cm_targ;
1323
1324	/*
1325	 * Currently there should be no way we can hit this case.  It only
1326	 * happens when we have a failure to allocate chain frames, and
1327	 * task management commands don't have S/G lists.
1328	 */
1329	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1330		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x for target reset! "
1331			   "This should not happen!\n", __func__, tm->cm_flags);
1332		mpssas_free_tm(sc, tm);
1333		return;
1334	}
1335
1336	if (reply == NULL) {
1337		mps_dprint(sc, MPS_RECOVERY,
1338		    "NULL target reset reply for tm %pi TaskMID %u\n",
1339		    tm, le16toh(req->TaskMID));
1340		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1341			/* this completion was due to a reset, just cleanup */
1342			mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1343			    "reset, ignoring NULL target reset reply\n");
1344			targ->tm = NULL;
1345			mpssas_free_tm(sc, tm);
1346		} else {
1347			/* we should have gotten a reply. */
1348			mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1349			    "target reset attempt, resetting controller\n");
1350			mps_reinit(sc);
1351		}
1352		return;
1353	}
1354
1355	mps_dprint(sc, MPS_RECOVERY,
1356	    "target reset status 0x%x code 0x%x count %u\n",
1357	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1358	    le32toh(reply->TerminationCount));
1359
1360	if (targ->outstanding == 0) {
1361		/* we've finished recovery for this target and all
1362		 * of its logical units.
1363		 */
1364		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1365		    "Finished reset recovery for target %u\n", targ->tid);
1366
1367		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1368		    CAM_LUN_WILDCARD);
1369
1370		targ->tm = NULL;
1371		mpssas_free_tm(sc, tm);
1372	} else {
1373		/*
1374		 * After a target reset, if this target still has
1375		 * outstanding commands, the reset effectively failed,
1376		 * regardless of the status reported.  escalate.
1377		 */
1378		mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1379		    "Target reset complete for target %u, but still have %u "
1380		    "command(s), resetting controller\n", targ->tid,
1381		    targ->outstanding);
1382		mps_reinit(sc);
1383	}
1384}
1385
1386#define MPS_RESET_TIMEOUT 30
1387
1388int
1389mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1390{
1391	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1392	struct mpssas_target *target;
1393	int err;
1394
1395	target = tm->cm_targ;
1396	if (target->handle == 0) {
1397		mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1398		    __func__, target->tid);
1399		return -1;
1400	}
1401
1402	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1403	req->DevHandle = htole16(target->handle);
1404	req->TaskType = type;
1405
1406	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1407		/* XXX Need to handle invalid LUNs */
1408		MPS_SET_LUN(req->LUN, tm->cm_lun);
1409		tm->cm_targ->logical_unit_resets++;
1410		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1411		    "Sending logical unit reset to target %u lun %d\n",
1412		    target->tid, tm->cm_lun);
1413		tm->cm_complete = mpssas_logical_unit_reset_complete;
1414		mpssas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1415	} else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1416		/*
1417		 * Target reset method =
1418		 * 	SAS Hard Link Reset / SATA Link Reset
1419		 */
1420		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1421		tm->cm_targ->target_resets++;
1422		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1423		    "Sending target reset to target %u\n", target->tid);
1424		tm->cm_complete = mpssas_target_reset_complete;
1425		mpssas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1426	} else {
1427		mps_dprint(sc, MPS_ERROR, "unexpected reset type 0x%x\n", type);
1428		return -1;
1429	}
1430
1431	tm->cm_data = NULL;
1432	tm->cm_complete_data = (void *)tm;
1433
1434	callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1435	    mpssas_tm_timeout, tm);
1436
1437	err = mps_map_command(sc, tm);
1438	if (err)
1439		mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1440		    "error %d sending reset type %u\n",
1441		    err, type);
1442
1443	return err;
1444}
1445
1446
1447static void
1448mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1449{
1450	struct mps_command *cm;
1451	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1452	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1453	struct mpssas_target *targ;
1454
1455	callout_stop(&tm->cm_callout);
1456
1457	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1458	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1459	targ = tm->cm_targ;
1460
1461	/*
1462	 * Currently there should be no way we can hit this case.  It only
1463	 * happens when we have a failure to allocate chain frames, and
1464	 * task management commands don't have S/G lists.
1465	 */
1466	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1467		mps_dprint(sc, MPS_RECOVERY,
1468		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1469		    tm->cm_flags, tm, le16toh(req->TaskMID));
1470		mpssas_free_tm(sc, tm);
1471		return;
1472	}
1473
1474	if (reply == NULL) {
1475		mps_dprint(sc, MPS_RECOVERY,
1476		    "NULL abort reply for tm %p TaskMID %u\n",
1477		    tm, le16toh(req->TaskMID));
1478		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1479			/* this completion was due to a reset, just cleanup */
1480			mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1481			    "reset, ignoring NULL abort reply\n");
1482			targ->tm = NULL;
1483			mpssas_free_tm(sc, tm);
1484		} else {
1485			/* we should have gotten a reply. */
1486			mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1487			    "abort attempt, resetting controller\n");
1488			mps_reinit(sc);
1489		}
1490		return;
1491	}
1492
1493	mps_dprint(sc, MPS_RECOVERY,
1494	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1495	    le16toh(req->TaskMID),
1496	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1497	    le32toh(reply->TerminationCount));
1498
1499	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1500	if (cm == NULL) {
1501		/*
1502		 * If there are no more timedout commands, we're done with
1503		 * error recovery for this target.
1504		 */
1505		mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1506		    "Finished abort recovery for target %u\n", targ->tid);
1507
1508		targ->tm = NULL;
1509		mpssas_free_tm(sc, tm);
1510	} else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1511		/* abort success, but we have more timedout commands to abort */
1512		mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1513		    "Continuing abort recovery for target %u\n", targ->tid);
1514
1515		mpssas_send_abort(sc, tm, cm);
1516	} else {
1517		/* we didn't get a command completion, so the abort
1518		 * failed as far as we're concerned.  escalate.
1519		 */
1520		mps_dprint(sc, MPS_RECOVERY,
1521		    "Abort failed for target %u, sending logical unit reset\n",
1522		    targ->tid);
1523
1524		mpssas_send_reset(sc, tm,
1525		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1526	}
1527}
1528
1529#define MPS_ABORT_TIMEOUT 5
1530
1531static int
1532mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1533{
1534	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1535	struct mpssas_target *targ;
1536	int err;
1537
1538	targ = cm->cm_targ;
1539	if (targ->handle == 0) {
1540		mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1541		    "%s null devhandle for target_id %d\n",
1542		    __func__, cm->cm_ccb->ccb_h.target_id);
1543		return -1;
1544	}
1545
1546	mpssas_log_command(cm, MPS_RECOVERY|MPS_INFO,
1547	    "Aborting command %p\n", cm);
1548
1549	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1550	req->DevHandle = htole16(targ->handle);
1551	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1552
1553	/* XXX Need to handle invalid LUNs */
1554	MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1555
1556	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1557
1558	tm->cm_data = NULL;
1559	tm->cm_complete = mpssas_abort_complete;
1560	tm->cm_complete_data = (void *)tm;
1561	tm->cm_targ = cm->cm_targ;
1562	tm->cm_lun = cm->cm_lun;
1563
1564	callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1565	    mpssas_tm_timeout, tm);
1566
1567	targ->aborts++;
1568
1569	mpssas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1570
1571	err = mps_map_command(sc, tm);
1572	if (err)
1573		mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1574		    "error %d sending abort for cm %p SMID %u\n",
1575		    err, cm, req->TaskMID);
1576	return err;
1577}
1578
1579static void
1580mpssas_scsiio_timeout(void *data)
1581{
1582	sbintime_t elapsed, now;
1583	union ccb *ccb;
1584	struct mps_softc *sc;
1585	struct mps_command *cm;
1586	struct mpssas_target *targ;
1587
1588	cm = (struct mps_command *)data;
1589	sc = cm->cm_sc;
1590	ccb = cm->cm_ccb;
1591	now = sbinuptime();
1592
1593	MPS_FUNCTRACE(sc);
1594	mtx_assert(&sc->mps_mtx, MA_OWNED);
1595
1596	mps_dprint(sc, MPS_XINFO|MPS_RECOVERY, "Timeout checking cm %p\n", sc);
1597
1598	/*
1599	 * Run the interrupt handler to make sure it's not pending.  This
1600	 * isn't perfect because the command could have already completed
1601	 * and been re-used, though this is unlikely.
1602	 */
1603	mps_intr_locked(sc);
1604	if (cm->cm_flags & MPS_CM_FLAGS_ON_RECOVERY) {
1605		mpssas_log_command(cm, MPS_XINFO,
1606		    "SCSI command %p almost timed out\n", cm);
1607		return;
1608	}
1609
1610	if (cm->cm_ccb == NULL) {
1611		mps_dprint(sc, MPS_ERROR, "command timeout with NULL ccb\n");
1612		return;
1613	}
1614
1615	targ = cm->cm_targ;
1616	targ->timeouts++;
1617
1618	elapsed = now - ccb->ccb_h.qos.sim_data;
1619	mpssas_log_command(cm, MPS_INFO|MPS_RECOVERY,
1620	    "Command timeout on target %u(0x%04x) %d set, %d.%d elapsed\n",
1621	    targ->tid, targ->handle, ccb->ccb_h.timeout,
1622	    sbintime_getsec(elapsed), elapsed & 0xffffffff);
1623
1624	/* XXX first, check the firmware state, to see if it's still
1625	 * operational.  if not, do a diag reset.
1626	 */
1627	mpssas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1628	cm->cm_flags |= MPS_CM_FLAGS_ON_RECOVERY | MPS_CM_FLAGS_TIMEDOUT;
1629	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1630
1631	if (targ->tm != NULL) {
1632		/* target already in recovery, just queue up another
1633		 * timedout command to be processed later.
1634		 */
1635		mps_dprint(sc, MPS_RECOVERY,
1636		    "queued timedout cm %p for processing by tm %p\n",
1637		    cm, targ->tm);
1638	} else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1639		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1640		    "Sending abort to target %u for SMID %d\n", targ->tid,
1641		    cm->cm_desc.Default.SMID);
1642		mps_dprint(sc, MPS_RECOVERY, "timedout cm %p allocated tm %p\n",
1643		    cm, targ->tm);
1644
1645		/* start recovery by aborting the first timedout command */
1646		mpssas_send_abort(sc, targ->tm, cm);
1647	} else {
1648		/* XXX queue this target up for recovery once a TM becomes
1649		 * available.  The firmware only has a limited number of
1650		 * HighPriority credits for the high priority requests used
1651		 * for task management, and we ran out.
1652		 *
1653		 * Isilon: don't worry about this for now, since we have
1654		 * more credits than disks in an enclosure, and limit
1655		 * ourselves to one TM per target for recovery.
1656		 */
1657		mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1658		    "timedout cm %p failed to allocate a tm\n", cm);
1659	}
1660
1661}
1662
1663static void
1664mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1665{
1666	MPI2_SCSI_IO_REQUEST *req;
1667	struct ccb_scsiio *csio;
1668	struct mps_softc *sc;
1669	struct mpssas_target *targ;
1670	struct mpssas_lun *lun;
1671	struct mps_command *cm;
1672	uint8_t i, lba_byte, *ref_tag_addr;
1673	uint16_t eedp_flags;
1674	uint32_t mpi_control;
1675
1676	sc = sassc->sc;
1677	MPS_FUNCTRACE(sc);
1678	mtx_assert(&sc->mps_mtx, MA_OWNED);
1679
1680	csio = &ccb->csio;
1681	KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1682	    ("Target %d out of bounds in XPT_SCSI_IO\n",
1683	     csio->ccb_h.target_id));
1684	targ = &sassc->targets[csio->ccb_h.target_id];
1685	mps_dprint(sc, MPS_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1686	if (targ->handle == 0x0) {
1687		mps_dprint(sc, MPS_ERROR, "%s NULL handle for target %u\n",
1688		    __func__, csio->ccb_h.target_id);
1689		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1690		xpt_done(ccb);
1691		return;
1692	}
1693	if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
1694		mps_dprint(sc, MPS_ERROR, "%s Raid component no SCSI IO "
1695		    "supported %u\n", __func__, csio->ccb_h.target_id);
1696		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1697		xpt_done(ccb);
1698		return;
1699	}
1700	/*
1701	 * Sometimes, it is possible to get a command that is not "In
1702	 * Progress" and was actually aborted by the upper layer.  Check for
1703	 * this here and complete the command without error.
1704	 */
1705	if (mpssas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1706		mps_dprint(sc, MPS_TRACE, "%s Command is not in progress for "
1707		    "target %u\n", __func__, csio->ccb_h.target_id);
1708		xpt_done(ccb);
1709		return;
1710	}
1711	/*
1712	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1713	 * that the volume has timed out.  We want volumes to be enumerated
1714	 * until they are deleted/removed, not just failed. In either event,
1715	 * we're removing the target due to a firmware event telling us
1716	 * the device is now gone (as opposed to some transient event). Since
1717	 * we're opting to remove failed devices from the OS's view, we need
1718	 * to propagate that status up the stack.
1719	 */
1720	if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1721		if (targ->devinfo == 0)
1722			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1723		else
1724			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1725		xpt_done(ccb);
1726		return;
1727	}
1728
1729	if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1730		mps_dprint(sc, MPS_INFO, "%s shutting down\n", __func__);
1731		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1732		xpt_done(ccb);
1733		return;
1734	}
1735
1736	/*
1737	 * If target has a reset in progress, freeze the devq and return.  The
1738	 * devq will be released when the TM reset is finished.
1739	 */
1740	if (targ->flags & MPSSAS_TARGET_INRESET) {
1741		ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1742		mps_dprint(sc, MPS_INFO, "%s: Freezing devq for target ID %d\n",
1743		    __func__, targ->tid);
1744		xpt_freeze_devq(ccb->ccb_h.path, 1);
1745		xpt_done(ccb);
1746		return;
1747	}
1748
1749	cm = mps_alloc_command(sc);
1750	if (cm == NULL || (sc->mps_flags & MPS_FLAGS_DIAGRESET)) {
1751		if (cm != NULL) {
1752			mps_free_command(sc, cm);
1753		}
1754		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1755			xpt_freeze_simq(sassc->sim, 1);
1756			sassc->flags |= MPSSAS_QUEUE_FROZEN;
1757		}
1758		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1759		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1760		xpt_done(ccb);
1761		return;
1762	}
1763
1764	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1765	bzero(req, sizeof(*req));
1766	req->DevHandle = htole16(targ->handle);
1767	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1768	req->MsgFlags = 0;
1769	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1770	req->SenseBufferLength = MPS_SENSE_LEN;
1771	req->SGLFlags = 0;
1772	req->ChainOffset = 0;
1773	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1774	req->SGLOffset1= 0;
1775	req->SGLOffset2= 0;
1776	req->SGLOffset3= 0;
1777	req->SkipCount = 0;
1778	req->DataLength = htole32(csio->dxfer_len);
1779	req->BidirectionalDataLength = 0;
1780	req->IoFlags = htole16(csio->cdb_len);
1781	req->EEDPFlags = 0;
1782
1783	/* Note: BiDirectional transfers are not supported */
1784	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1785	case CAM_DIR_IN:
1786		mpi_control = MPI2_SCSIIO_CONTROL_READ;
1787		cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1788		break;
1789	case CAM_DIR_OUT:
1790		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1791		cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1792		break;
1793	case CAM_DIR_NONE:
1794	default:
1795		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1796		break;
1797	}
1798
1799	if (csio->cdb_len == 32)
1800                mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1801	/*
1802	 * It looks like the hardware doesn't require an explicit tag
1803	 * number for each transaction.  SAM Task Management not supported
1804	 * at the moment.
1805	 */
1806	switch (csio->tag_action) {
1807	case MSG_HEAD_OF_Q_TAG:
1808		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1809		break;
1810	case MSG_ORDERED_Q_TAG:
1811		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1812		break;
1813	case MSG_ACA_TASK:
1814		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1815		break;
1816	case CAM_TAG_ACTION_NONE:
1817	case MSG_SIMPLE_Q_TAG:
1818	default:
1819		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1820		break;
1821	}
1822	mpi_control |= (csio->priority << MPI2_SCSIIO_CONTROL_TASKPRI_SHIFT) &
1823	    MPI2_SCSIIO_CONTROL_TASKPRI_MASK;
1824	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1825	req->Control = htole32(mpi_control);
1826	if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1827		mps_free_command(sc, cm);
1828		mpssas_set_ccbstatus(ccb, CAM_LUN_INVALID);
1829		xpt_done(ccb);
1830		return;
1831	}
1832
1833	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1834		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1835	else
1836		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1837	req->IoFlags = htole16(csio->cdb_len);
1838
1839	/*
1840	 * Check if EEDP is supported and enabled.  If it is then check if the
1841	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
1842	 * is formatted for EEDP support.  If all of this is true, set CDB up
1843	 * for EEDP transfer.
1844	 */
1845	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1846	if (sc->eedp_enabled && eedp_flags) {
1847		SLIST_FOREACH(lun, &targ->luns, lun_link) {
1848			if (lun->lun_id == csio->ccb_h.target_lun) {
1849				break;
1850			}
1851		}
1852
1853		if ((lun != NULL) && (lun->eedp_formatted)) {
1854			req->EEDPBlockSize = htole16(lun->eedp_block_size);
1855			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1856			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1857			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1858			req->EEDPFlags = htole16(eedp_flags);
1859
1860			/*
1861			 * If CDB less than 32, fill in Primary Ref Tag with
1862			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
1863			 * already there.  Also, set protection bit.  FreeBSD
1864			 * currently does not support CDBs bigger than 16, but
1865			 * the code doesn't hurt, and will be here for the
1866			 * future.
1867			 */
1868			if (csio->cdb_len != 32) {
1869				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1870				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1871				    PrimaryReferenceTag;
1872				for (i = 0; i < 4; i++) {
1873					*ref_tag_addr =
1874					    req->CDB.CDB32[lba_byte + i];
1875					ref_tag_addr++;
1876				}
1877				req->CDB.EEDP32.PrimaryReferenceTag =
1878					htole32(req->CDB.EEDP32.PrimaryReferenceTag);
1879				req->CDB.EEDP32.PrimaryApplicationTagMask =
1880				    0xFFFF;
1881				req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1882				    0x20;
1883			} else {
1884				eedp_flags |=
1885				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1886				req->EEDPFlags = htole16(eedp_flags);
1887				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1888				    0x1F) | 0x20;
1889			}
1890		}
1891	}
1892
1893	cm->cm_length = csio->dxfer_len;
1894	if (cm->cm_length != 0) {
1895		cm->cm_data = ccb;
1896		cm->cm_flags |= MPS_CM_FLAGS_USE_CCB;
1897	} else {
1898		cm->cm_data = NULL;
1899	}
1900	cm->cm_sge = &req->SGL;
1901	cm->cm_sglsize = (32 - 24) * 4;
1902	cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1903	cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1904	cm->cm_complete = mpssas_scsiio_complete;
1905	cm->cm_complete_data = ccb;
1906	cm->cm_targ = targ;
1907	cm->cm_lun = csio->ccb_h.target_lun;
1908	cm->cm_ccb = ccb;
1909
1910	/*
1911	 * If HBA is a WD and the command is not for a retry, try to build a
1912	 * direct I/O message. If failed, or the command is for a retry, send
1913	 * the I/O to the IR volume itself.
1914	 */
1915	if (sc->WD_valid_config) {
1916		if (ccb->ccb_h.sim_priv.entries[0].field == MPS_WD_RETRY) {
1917			mpssas_direct_drive_io(sassc, cm, ccb);
1918		} else {
1919			mpssas_set_ccbstatus(ccb, CAM_REQ_INPROG);
1920		}
1921	}
1922
1923#if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
1924	if (csio->bio != NULL)
1925		biotrack(csio->bio, __func__);
1926#endif
1927	csio->ccb_h.qos.sim_data = sbinuptime();
1928	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1929	    mpssas_scsiio_timeout, cm, 0);
1930
1931	targ->issued++;
1932	targ->outstanding++;
1933	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1934	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1935
1936	mpssas_log_command(cm, MPS_XINFO, "%s cm %p ccb %p outstanding %u\n",
1937	    __func__, cm, ccb, targ->outstanding);
1938
1939	mps_map_command(sc, cm);
1940	return;
1941}
1942
1943/**
1944 * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request
1945 */
1946static void
1947mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio,
1948    Mpi2SCSIIOReply_t *mpi_reply)
1949{
1950	u32 response_info;
1951	u8 *response_bytes;
1952	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1953	    MPI2_IOCSTATUS_MASK;
1954	u8 scsi_state = mpi_reply->SCSIState;
1955	u8 scsi_status = mpi_reply->SCSIStatus;
1956	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1957	const char *desc_ioc_state, *desc_scsi_status;
1958
1959	if (log_info == 0x31170000)
1960		return;
1961
1962	desc_ioc_state = mps_describe_table(mps_iocstatus_string,
1963	    ioc_status);
1964	desc_scsi_status = mps_describe_table(mps_scsi_status_string,
1965	    scsi_status);
1966
1967	mps_dprint(sc, MPS_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
1968	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
1969
1970	/*
1971	 *We can add more detail about underflow data here
1972	 * TO-DO
1973	 */
1974	mps_dprint(sc, MPS_XINFO, "\tscsi_status(%s)(0x%02x), "
1975	    "scsi_state %b\n", desc_scsi_status, scsi_status,
1976	    scsi_state, "\20" "\1AutosenseValid" "\2AutosenseFailed"
1977	    "\3NoScsiStatus" "\4Terminated" "\5Response InfoValid");
1978
1979	if (sc->mps_debug & MPS_XINFO &&
1980		scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
1981		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : Start :\n");
1982		scsi_sense_print(csio);
1983		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : End :\n");
1984	}
1985
1986	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
1987		response_info = le32toh(mpi_reply->ResponseInfo);
1988		response_bytes = (u8 *)&response_info;
1989		mps_dprint(sc, MPS_XINFO, "response code(0x%1x): %s\n",
1990		    response_bytes[0],
1991		    mps_describe_table(mps_scsi_taskmgmt_string,
1992		    response_bytes[0]));
1993	}
1994}
1995
1996static void
1997mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
1998{
1999	MPI2_SCSI_IO_REPLY *rep;
2000	union ccb *ccb;
2001	struct ccb_scsiio *csio;
2002	struct mpssas_softc *sassc;
2003	struct scsi_vpd_supported_page_list *vpd_list = NULL;
2004	u8 *TLR_bits, TLR_on;
2005	int dir = 0, i;
2006	u16 alloc_len;
2007	struct mpssas_target *target;
2008	target_id_t target_id;
2009
2010	MPS_FUNCTRACE(sc);
2011	mps_dprint(sc, MPS_TRACE,
2012	    "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2013	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2014	    cm->cm_targ->outstanding);
2015
2016	callout_stop(&cm->cm_callout);
2017	mtx_assert(&sc->mps_mtx, MA_OWNED);
2018
2019	sassc = sc->sassc;
2020	ccb = cm->cm_complete_data;
2021	csio = &ccb->csio;
2022	target_id = csio->ccb_h.target_id;
2023	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2024	/*
2025	 * XXX KDM if the chain allocation fails, does it matter if we do
2026	 * the sync and unload here?  It is simpler to do it in every case,
2027	 * assuming it doesn't cause problems.
2028	 */
2029	if (cm->cm_data != NULL) {
2030		if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
2031			dir = BUS_DMASYNC_POSTREAD;
2032		else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
2033			dir = BUS_DMASYNC_POSTWRITE;
2034		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2035		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2036	}
2037
2038	cm->cm_targ->completed++;
2039	cm->cm_targ->outstanding--;
2040	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2041	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2042
2043#if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
2044	if (ccb->csio.bio != NULL)
2045		biotrack(ccb->csio.bio, __func__);
2046#endif
2047
2048	if (cm->cm_flags & MPS_CM_FLAGS_ON_RECOVERY) {
2049		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2050		KASSERT(cm->cm_state == MPS_CM_STATE_BUSY,
2051		    ("Not busy for CM_FLAGS_TIMEDOUT: %d\n", cm->cm_state));
2052		cm->cm_flags &= ~MPS_CM_FLAGS_ON_RECOVERY;
2053		if (cm->cm_reply != NULL)
2054			mpssas_log_command(cm, MPS_RECOVERY,
2055			    "completed timedout cm %p ccb %p during recovery "
2056			    "ioc %x scsi %x state %x xfer %u\n",
2057			    cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2058			    rep->SCSIStatus, rep->SCSIState,
2059			    le32toh(rep->TransferCount));
2060		else
2061			mpssas_log_command(cm, MPS_RECOVERY,
2062			    "completed timedout cm %p ccb %p during recovery\n",
2063			    cm, cm->cm_ccb);
2064	} else if (cm->cm_targ->tm != NULL) {
2065		if (cm->cm_reply != NULL)
2066			mpssas_log_command(cm, MPS_RECOVERY,
2067			    "completed cm %p ccb %p during recovery "
2068			    "ioc %x scsi %x state %x xfer %u\n",
2069			    cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2070			    rep->SCSIStatus, rep->SCSIState,
2071			    le32toh(rep->TransferCount));
2072		else
2073			mpssas_log_command(cm, MPS_RECOVERY,
2074			    "completed cm %p ccb %p during recovery\n",
2075			    cm, cm->cm_ccb);
2076	} else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
2077		mpssas_log_command(cm, MPS_RECOVERY,
2078		    "reset completed cm %p ccb %p\n",
2079		    cm, cm->cm_ccb);
2080	}
2081
2082	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2083		/*
2084		 * We ran into an error after we tried to map the command,
2085		 * so we're getting a callback without queueing the command
2086		 * to the hardware.  So we set the status here, and it will
2087		 * be retained below.  We'll go through the "fast path",
2088		 * because there can be no reply when we haven't actually
2089		 * gone out to the hardware.
2090		 */
2091		mpssas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2092
2093		/*
2094		 * Currently the only error included in the mask is
2095		 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
2096		 * chain frames.  We need to freeze the queue until we get
2097		 * a command that completed without this error, which will
2098		 * hopefully have some chain frames attached that we can
2099		 * use.  If we wanted to get smarter about it, we would
2100		 * only unfreeze the queue in this condition when we're
2101		 * sure that we're getting some chain frames back.  That's
2102		 * probably unnecessary.
2103		 */
2104		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
2105			xpt_freeze_simq(sassc->sim, 1);
2106			sassc->flags |= MPSSAS_QUEUE_FROZEN;
2107			mps_dprint(sc, MPS_XINFO, "Error sending command, "
2108				   "freezing SIM queue\n");
2109		}
2110	}
2111
2112	/*
2113	 * If this is a Start Stop Unit command and it was issued by the driver
2114	 * during shutdown, decrement the refcount to account for all of the
2115	 * commands that were sent.  All SSU commands should be completed before
2116	 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2117	 * is TRUE.
2118	 */
2119	if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) {
2120		mps_dprint(sc, MPS_INFO, "Decrementing SSU count.\n");
2121		sc->SSU_refcount--;
2122	}
2123
2124	/* Take the fast path to completion */
2125	if (cm->cm_reply == NULL) {
2126		if (mpssas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2127			if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
2128				mpssas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2129			else {
2130				mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2131				ccb->csio.scsi_status = SCSI_STATUS_OK;
2132			}
2133			if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2134				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2135				sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2136				mps_dprint(sc, MPS_XINFO,
2137				    "Unfreezing SIM queue\n");
2138			}
2139		}
2140
2141		/*
2142		 * There are two scenarios where the status won't be
2143		 * CAM_REQ_CMP.  The first is if MPS_CM_FLAGS_ERROR_MASK is
2144		 * set, the second is in the MPS_FLAGS_DIAGRESET above.
2145		 */
2146		if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2147			/*
2148			 * Freeze the dev queue so that commands are
2149			 * executed in the correct order after error
2150			 * recovery.
2151			 */
2152			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2153			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2154		}
2155		mps_free_command(sc, cm);
2156		xpt_done(ccb);
2157		return;
2158	}
2159
2160	mpssas_log_command(cm, MPS_XINFO,
2161	    "ioc %x scsi %x state %x xfer %u\n",
2162	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2163	    le32toh(rep->TransferCount));
2164
2165	/*
2166	 * If this is a Direct Drive I/O, reissue the I/O to the original IR
2167	 * Volume if an error occurred (normal I/O retry).  Use the original
2168	 * CCB, but set a flag that this will be a retry so that it's sent to
2169	 * the original volume.  Free the command but reuse the CCB.
2170	 */
2171	if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
2172		mps_free_command(sc, cm);
2173		ccb->ccb_h.sim_priv.entries[0].field = MPS_WD_RETRY;
2174		mpssas_action_scsiio(sassc, ccb);
2175		return;
2176	} else
2177		ccb->ccb_h.sim_priv.entries[0].field = 0;
2178
2179	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2180	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2181		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2182		/* FALLTHROUGH */
2183	case MPI2_IOCSTATUS_SUCCESS:
2184	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2185
2186		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2187		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2188			mpssas_log_command(cm, MPS_XINFO, "recovered error\n");
2189
2190		/* Completion failed at the transport level. */
2191		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2192		    MPI2_SCSI_STATE_TERMINATED)) {
2193			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2194			break;
2195		}
2196
2197		/* In a modern packetized environment, an autosense failure
2198		 * implies that there's not much else that can be done to
2199		 * recover the command.
2200		 */
2201		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2202			mpssas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2203			break;
2204		}
2205
2206		/*
2207		 * CAM doesn't care about SAS Response Info data, but if this is
2208		 * the state check if TLR should be done.  If not, clear the
2209		 * TLR_bits for the target.
2210		 */
2211		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2212		    ((le32toh(rep->ResponseInfo) &
2213		    MPI2_SCSI_RI_MASK_REASONCODE) ==
2214		    MPS_SCSI_RI_INVALID_FRAME)) {
2215			sc->mapping_table[target_id].TLR_bits =
2216			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2217		}
2218
2219		/*
2220		 * Intentionally override the normal SCSI status reporting
2221		 * for these two cases.  These are likely to happen in a
2222		 * multi-initiator environment, and we want to make sure that
2223		 * CAM retries these commands rather than fail them.
2224		 */
2225		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2226		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2227			mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2228			break;
2229		}
2230
2231		/* Handle normal status and sense */
2232		csio->scsi_status = rep->SCSIStatus;
2233		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2234			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2235		else
2236			mpssas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2237
2238		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2239			int sense_len, returned_sense_len;
2240
2241			returned_sense_len = min(le32toh(rep->SenseCount),
2242			    sizeof(struct scsi_sense_data));
2243			if (returned_sense_len < ccb->csio.sense_len)
2244				ccb->csio.sense_resid = ccb->csio.sense_len -
2245					returned_sense_len;
2246			else
2247				ccb->csio.sense_resid = 0;
2248
2249			sense_len = min(returned_sense_len,
2250			    ccb->csio.sense_len - ccb->csio.sense_resid);
2251			bzero(&ccb->csio.sense_data,
2252			      sizeof(ccb->csio.sense_data));
2253			bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
2254			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2255		}
2256
2257		/*
2258		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2259		 * and it's page code 0 (Supported Page List), and there is
2260		 * inquiry data, and this is for a sequential access device, and
2261		 * the device is an SSP target, and TLR is supported by the
2262		 * controller, turn the TLR_bits value ON if page 0x90 is
2263		 * supported.
2264		 */
2265		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2266		    (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2267		    (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2268		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2269		    (csio->data_ptr != NULL) &&
2270		    ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2271		    (sc->control_TLR) &&
2272		    (sc->mapping_table[target_id].device_info &
2273		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2274			vpd_list = (struct scsi_vpd_supported_page_list *)
2275			    csio->data_ptr;
2276			TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2277			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2278			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2279			alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2280			    csio->cdb_io.cdb_bytes[4];
2281			alloc_len -= csio->resid;
2282			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2283				if (vpd_list->list[i] == 0x90) {
2284					*TLR_bits = TLR_on;
2285					break;
2286				}
2287			}
2288		}
2289
2290		/*
2291		 * If this is a SATA direct-access end device, mark it so that
2292		 * a SCSI StartStopUnit command will be sent to it when the
2293		 * driver is being shutdown.
2294		 */
2295		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2296		    ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2297		    (sc->mapping_table[target_id].device_info &
2298		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2299		    ((sc->mapping_table[target_id].device_info &
2300		    MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2301		    MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2302			target = &sassc->targets[target_id];
2303			target->supports_SSU = TRUE;
2304			mps_dprint(sc, MPS_XINFO, "Target %d supports SSU\n",
2305			    target_id);
2306		}
2307		break;
2308	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2309	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2310		/*
2311		 * If devinfo is 0 this will be a volume.  In that case don't
2312		 * tell CAM that the volume is not there.  We want volumes to
2313		 * be enumerated until they are deleted/removed, not just
2314		 * failed.
2315		 */
2316		if (cm->cm_targ->devinfo == 0)
2317			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2318		else
2319			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2320		break;
2321	case MPI2_IOCSTATUS_INVALID_SGL:
2322		mps_print_scsiio_cmd(sc, cm);
2323		mpssas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2324		break;
2325	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2326		/*
2327		 * This is one of the responses that comes back when an I/O
2328		 * has been aborted.  If it is because of a timeout that we
2329		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2330		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2331		 * command is the same (it gets retried, subject to the
2332		 * retry counter), the only difference is what gets printed
2333		 * on the console.
2334		 */
2335		if (cm->cm_flags & MPS_CM_FLAGS_TIMEDOUT)
2336			mpssas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2337		else
2338			mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2339		break;
2340	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2341		/* resid is ignored for this condition */
2342		csio->resid = 0;
2343		mpssas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2344		break;
2345	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2346	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2347		/*
2348		 * These can sometimes be transient transport-related
2349		 * errors, and sometimes persistent drive-related errors.
2350		 * We used to retry these without decrementing the retry
2351		 * count by returning CAM_REQUEUE_REQ.  Unfortunately, if
2352		 * we hit a persistent drive problem that returns one of
2353		 * these error codes, we would retry indefinitely.  So,
2354		 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2355		 * count and avoid infinite retries.  We're taking the
2356		 * potential risk of flagging false failures in the event
2357		 * of a topology-related error (e.g. a SAS expander problem
2358		 * causes a command addressed to a drive to fail), but
2359		 * avoiding getting into an infinite retry loop. However,
2360		 * if we get them while were moving a device, we should
2361		 * fail the request as 'not there' because the device
2362		 * is effectively gone.
2363		 */
2364		if (cm->cm_targ->flags & MPSSAS_TARGET_INREMOVAL)
2365			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2366		else
2367			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2368		mps_dprint(sc, MPS_INFO,
2369		    "Controller reported %s tgt %u SMID %u loginfo %x%s\n",
2370		    mps_describe_table(mps_iocstatus_string,
2371		    le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK),
2372		    target_id, cm->cm_desc.Default.SMID,
2373		    le32toh(rep->IOCLogInfo),
2374		    (cm->cm_targ->flags & MPSSAS_TARGET_INREMOVAL) ? " departing" : "");
2375		mps_dprint(sc, MPS_XINFO,
2376		    "SCSIStatus %x SCSIState %x xfercount %u\n",
2377		    rep->SCSIStatus, rep->SCSIState,
2378		    le32toh(rep->TransferCount));
2379		break;
2380	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2381	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2382	case MPI2_IOCSTATUS_INVALID_VPID:
2383	case MPI2_IOCSTATUS_INVALID_FIELD:
2384	case MPI2_IOCSTATUS_INVALID_STATE:
2385	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2386	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2387	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2388	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2389	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2390	default:
2391		mpssas_log_command(cm, MPS_XINFO,
2392		    "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2393		    le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2394		    rep->SCSIStatus, rep->SCSIState,
2395		    le32toh(rep->TransferCount));
2396		csio->resid = cm->cm_length;
2397		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2398		break;
2399	}
2400
2401	mps_sc_failed_io_info(sc,csio,rep);
2402
2403	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2404		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2405		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2406		mps_dprint(sc, MPS_XINFO, "Command completed, "
2407		    "unfreezing SIM queue\n");
2408	}
2409
2410	if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2411		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2412		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2413	}
2414
2415	/*
2416	 * Check to see if we're removing the device. If so, and this is the
2417	 * last command on the queue, proceed with the deferred removal of the
2418	 * device.  Note, for removing a volume, this won't trigger because
2419	 * pending_remove_tm will be NULL.
2420	 */
2421	if (cm->cm_targ->flags & MPSSAS_TARGET_INREMOVAL) {
2422		if (TAILQ_FIRST(&cm->cm_targ->commands) == NULL &&
2423		    cm->cm_targ->pending_remove_tm != NULL) {
2424			mps_dprint(sc, MPS_INFO, "Last pending command complete: starting remove_device\n");
2425			mps_map_command(sc, cm->cm_targ->pending_remove_tm);
2426			cm->cm_targ->pending_remove_tm = NULL;
2427		}
2428	}
2429
2430	mps_free_command(sc, cm);
2431	xpt_done(ccb);
2432}
2433
2434/* All Request reached here are Endian safe */
2435static void
2436mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2437    union ccb *ccb) {
2438	pMpi2SCSIIORequest_t	pIO_req;
2439	struct mps_softc	*sc = sassc->sc;
2440	uint64_t		virtLBA;
2441	uint32_t		physLBA, stripe_offset, stripe_unit;
2442	uint32_t		io_size, column;
2443	uint8_t			*ptrLBA, lba_idx, physLBA_byte, *CDB;
2444
2445	/*
2446	 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2447	 * Write10, or Write16), build a direct I/O message.  Otherwise, the I/O
2448	 * will be sent to the IR volume itself.  Since Read6 and Write6 are a
2449	 * bit different than the 10/16 CDBs, handle them separately.
2450	 */
2451	pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2452	CDB = pIO_req->CDB.CDB32;
2453
2454	/*
2455	 * Handle 6 byte CDBs.
2456	 */
2457	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2458	    (CDB[0] == WRITE_6))) {
2459		/*
2460		 * Get the transfer size in blocks.
2461		 */
2462		io_size = (cm->cm_length >> sc->DD_block_exponent);
2463
2464		/*
2465		 * Get virtual LBA given in the CDB.
2466		 */
2467		virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2468		    ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2469
2470		/*
2471		 * Check that LBA range for I/O does not exceed volume's
2472		 * MaxLBA.
2473		 */
2474		if ((virtLBA + (uint64_t)io_size - 1) <=
2475		    sc->DD_max_lba) {
2476			/*
2477			 * Check if the I/O crosses a stripe boundary.  If not,
2478			 * translate the virtual LBA to a physical LBA and set
2479			 * the DevHandle for the PhysDisk to be used.  If it
2480			 * does cross a boundary, do normal I/O.  To get the
2481			 * right DevHandle to use, get the map number for the
2482			 * column, then use that map number to look up the
2483			 * DevHandle of the PhysDisk.
2484			 */
2485			stripe_offset = (uint32_t)virtLBA &
2486			    (sc->DD_stripe_size - 1);
2487			if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2488				physLBA = (uint32_t)virtLBA >>
2489				    sc->DD_stripe_exponent;
2490				stripe_unit = physLBA / sc->DD_num_phys_disks;
2491				column = physLBA % sc->DD_num_phys_disks;
2492				pIO_req->DevHandle =
2493				    htole16(sc->DD_column_map[column].dev_handle);
2494				/* ???? Is this endian safe*/
2495				cm->cm_desc.SCSIIO.DevHandle =
2496				    pIO_req->DevHandle;
2497
2498				physLBA = (stripe_unit <<
2499				    sc->DD_stripe_exponent) + stripe_offset;
2500				ptrLBA = &pIO_req->CDB.CDB32[1];
2501				physLBA_byte = (uint8_t)(physLBA >> 16);
2502				*ptrLBA = physLBA_byte;
2503				ptrLBA = &pIO_req->CDB.CDB32[2];
2504				physLBA_byte = (uint8_t)(physLBA >> 8);
2505				*ptrLBA = physLBA_byte;
2506				ptrLBA = &pIO_req->CDB.CDB32[3];
2507				physLBA_byte = (uint8_t)physLBA;
2508				*ptrLBA = physLBA_byte;
2509
2510				/*
2511				 * Set flag that Direct Drive I/O is
2512				 * being done.
2513				 */
2514				cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2515			}
2516		}
2517		return;
2518	}
2519
2520	/*
2521	 * Handle 10, 12 or 16 byte CDBs.
2522	 */
2523	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2524	    (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2525	    (CDB[0] == WRITE_16) || (CDB[0] == READ_12) ||
2526	    (CDB[0] == WRITE_12))) {
2527		/*
2528		 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2529		 * are 0.  If not, this is accessing beyond 2TB so handle it in
2530		 * the else section.  10-byte and 12-byte CDB's are OK.
2531		 * FreeBSD sends very rare 12 byte READ/WRITE, but driver is
2532		 * ready to accept 12byte CDB for Direct IOs.
2533		 */
2534		if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) ||
2535		    (CDB[0] == READ_12 || CDB[0] == WRITE_12) ||
2536		    !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2537			/*
2538			 * Get the transfer size in blocks.
2539			 */
2540			io_size = (cm->cm_length >> sc->DD_block_exponent);
2541
2542			/*
2543			 * Get virtual LBA.  Point to correct lower 4 bytes of
2544			 * LBA in the CDB depending on command.
2545			 */
2546			lba_idx = ((CDB[0] == READ_12) ||
2547				(CDB[0] == WRITE_12) ||
2548				(CDB[0] == READ_10) ||
2549				(CDB[0] == WRITE_10))? 2 : 6;
2550			virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2551			    ((uint64_t)CDB[lba_idx + 1] << 16) |
2552			    ((uint64_t)CDB[lba_idx + 2] << 8) |
2553			    (uint64_t)CDB[lba_idx + 3];
2554
2555			/*
2556			 * Check that LBA range for I/O does not exceed volume's
2557			 * MaxLBA.
2558			 */
2559			if ((virtLBA + (uint64_t)io_size - 1) <=
2560			    sc->DD_max_lba) {
2561				/*
2562				 * Check if the I/O crosses a stripe boundary.
2563				 * If not, translate the virtual LBA to a
2564				 * physical LBA and set the DevHandle for the
2565				 * PhysDisk to be used.  If it does cross a
2566				 * boundary, do normal I/O.  To get the right
2567				 * DevHandle to use, get the map number for the
2568				 * column, then use that map number to look up
2569				 * the DevHandle of the PhysDisk.
2570				 */
2571				stripe_offset = (uint32_t)virtLBA &
2572				    (sc->DD_stripe_size - 1);
2573				if ((stripe_offset + io_size) <=
2574				    sc->DD_stripe_size) {
2575					physLBA = (uint32_t)virtLBA >>
2576					    sc->DD_stripe_exponent;
2577					stripe_unit = physLBA /
2578					    sc->DD_num_phys_disks;
2579					column = physLBA %
2580					    sc->DD_num_phys_disks;
2581					pIO_req->DevHandle =
2582					    htole16(sc->DD_column_map[column].
2583					    dev_handle);
2584					cm->cm_desc.SCSIIO.DevHandle =
2585					    pIO_req->DevHandle;
2586
2587					physLBA = (stripe_unit <<
2588					    sc->DD_stripe_exponent) +
2589					    stripe_offset;
2590					ptrLBA =
2591					    &pIO_req->CDB.CDB32[lba_idx];
2592					physLBA_byte = (uint8_t)(physLBA >> 24);
2593					*ptrLBA = physLBA_byte;
2594					ptrLBA =
2595					    &pIO_req->CDB.CDB32[lba_idx + 1];
2596					physLBA_byte = (uint8_t)(physLBA >> 16);
2597					*ptrLBA = physLBA_byte;
2598					ptrLBA =
2599					    &pIO_req->CDB.CDB32[lba_idx + 2];
2600					physLBA_byte = (uint8_t)(physLBA >> 8);
2601					*ptrLBA = physLBA_byte;
2602					ptrLBA =
2603					    &pIO_req->CDB.CDB32[lba_idx + 3];
2604					physLBA_byte = (uint8_t)physLBA;
2605					*ptrLBA = physLBA_byte;
2606
2607					/*
2608					 * Set flag that Direct Drive I/O is
2609					 * being done.
2610					 */
2611					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2612				}
2613			}
2614		} else {
2615			/*
2616			 * 16-byte CDB and the upper 4 bytes of the CDB are not
2617			 * 0.  Get the transfer size in blocks.
2618			 */
2619			io_size = (cm->cm_length >> sc->DD_block_exponent);
2620
2621			/*
2622			 * Get virtual LBA.
2623			 */
2624			virtLBA = ((uint64_t)CDB[2] << 54) |
2625			    ((uint64_t)CDB[3] << 48) |
2626			    ((uint64_t)CDB[4] << 40) |
2627			    ((uint64_t)CDB[5] << 32) |
2628			    ((uint64_t)CDB[6] << 24) |
2629			    ((uint64_t)CDB[7] << 16) |
2630			    ((uint64_t)CDB[8] << 8) |
2631			    (uint64_t)CDB[9];
2632
2633			/*
2634			 * Check that LBA range for I/O does not exceed volume's
2635			 * MaxLBA.
2636			 */
2637			if ((virtLBA + (uint64_t)io_size - 1) <=
2638			    sc->DD_max_lba) {
2639				/*
2640				 * Check if the I/O crosses a stripe boundary.
2641				 * If not, translate the virtual LBA to a
2642				 * physical LBA and set the DevHandle for the
2643				 * PhysDisk to be used.  If it does cross a
2644				 * boundary, do normal I/O.  To get the right
2645				 * DevHandle to use, get the map number for the
2646				 * column, then use that map number to look up
2647				 * the DevHandle of the PhysDisk.
2648				 */
2649				stripe_offset = (uint32_t)virtLBA &
2650				    (sc->DD_stripe_size - 1);
2651				if ((stripe_offset + io_size) <=
2652				    sc->DD_stripe_size) {
2653					physLBA = (uint32_t)(virtLBA >>
2654					    sc->DD_stripe_exponent);
2655					stripe_unit = physLBA /
2656					    sc->DD_num_phys_disks;
2657					column = physLBA %
2658					    sc->DD_num_phys_disks;
2659					pIO_req->DevHandle =
2660					    htole16(sc->DD_column_map[column].
2661					    dev_handle);
2662					cm->cm_desc.SCSIIO.DevHandle =
2663					    pIO_req->DevHandle;
2664
2665					physLBA = (stripe_unit <<
2666					    sc->DD_stripe_exponent) +
2667					    stripe_offset;
2668
2669					/*
2670					 * Set upper 4 bytes of LBA to 0.  We
2671					 * assume that the phys disks are less
2672					 * than 2 TB's in size.  Then, set the
2673					 * lower 4 bytes.
2674					 */
2675					pIO_req->CDB.CDB32[2] = 0;
2676					pIO_req->CDB.CDB32[3] = 0;
2677					pIO_req->CDB.CDB32[4] = 0;
2678					pIO_req->CDB.CDB32[5] = 0;
2679					ptrLBA = &pIO_req->CDB.CDB32[6];
2680					physLBA_byte = (uint8_t)(physLBA >> 24);
2681					*ptrLBA = physLBA_byte;
2682					ptrLBA = &pIO_req->CDB.CDB32[7];
2683					physLBA_byte = (uint8_t)(physLBA >> 16);
2684					*ptrLBA = physLBA_byte;
2685					ptrLBA = &pIO_req->CDB.CDB32[8];
2686					physLBA_byte = (uint8_t)(physLBA >> 8);
2687					*ptrLBA = physLBA_byte;
2688					ptrLBA = &pIO_req->CDB.CDB32[9];
2689					physLBA_byte = (uint8_t)physLBA;
2690					*ptrLBA = physLBA_byte;
2691
2692					/*
2693					 * Set flag that Direct Drive I/O is
2694					 * being done.
2695					 */
2696					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2697				}
2698			}
2699		}
2700	}
2701}
2702
2703#if __FreeBSD_version >= 900026
2704static void
2705mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2706{
2707	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2708	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2709	uint64_t sasaddr;
2710	union ccb *ccb;
2711
2712	ccb = cm->cm_complete_data;
2713
2714	/*
2715	 * Currently there should be no way we can hit this case.  It only
2716	 * happens when we have a failure to allocate chain frames, and SMP
2717	 * commands require two S/G elements only.  That should be handled
2718	 * in the standard request size.
2719	 */
2720	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2721		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x on SMP request!\n",
2722			   __func__, cm->cm_flags);
2723		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2724		goto bailout;
2725        }
2726
2727	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2728	if (rpl == NULL) {
2729		mps_dprint(sc, MPS_ERROR, "%s: NULL cm_reply!\n", __func__);
2730		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2731		goto bailout;
2732	}
2733
2734	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2735	sasaddr = le32toh(req->SASAddress.Low);
2736	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2737
2738	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2739	    MPI2_IOCSTATUS_SUCCESS ||
2740	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2741		mps_dprint(sc, MPS_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2742		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2743		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2744		goto bailout;
2745	}
2746
2747	mps_dprint(sc, MPS_XINFO, "%s: SMP request to SAS address "
2748		   "%#jx completed successfully\n", __func__,
2749		   (uintmax_t)sasaddr);
2750
2751	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2752		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2753	else
2754		mpssas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2755
2756bailout:
2757	/*
2758	 * We sync in both directions because we had DMAs in the S/G list
2759	 * in both directions.
2760	 */
2761	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2762			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2763	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2764	mps_free_command(sc, cm);
2765	xpt_done(ccb);
2766}
2767
2768static void
2769mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2770{
2771	struct mps_command *cm;
2772	uint8_t *request, *response;
2773	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2774	struct mps_softc *sc;
2775	int error;
2776
2777	sc = sassc->sc;
2778	error = 0;
2779
2780	/*
2781	 * XXX We don't yet support physical addresses here.
2782	 */
2783	switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
2784	case CAM_DATA_PADDR:
2785	case CAM_DATA_SG_PADDR:
2786		mps_dprint(sc, MPS_ERROR,
2787			   "%s: physical addresses not supported\n", __func__);
2788		mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2789		xpt_done(ccb);
2790		return;
2791	case CAM_DATA_SG:
2792		/*
2793		 * The chip does not support more than one buffer for the
2794		 * request or response.
2795		 */
2796	 	if ((ccb->smpio.smp_request_sglist_cnt > 1)
2797		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2798			mps_dprint(sc, MPS_ERROR,
2799				   "%s: multiple request or response "
2800				   "buffer segments not supported for SMP\n",
2801				   __func__);
2802			mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2803			xpt_done(ccb);
2804			return;
2805		}
2806
2807		/*
2808		 * The CAM_SCATTER_VALID flag was originally implemented
2809		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2810		 * We have two.  So, just take that flag to mean that we
2811		 * might have S/G lists, and look at the S/G segment count
2812		 * to figure out whether that is the case for each individual
2813		 * buffer.
2814		 */
2815		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2816			bus_dma_segment_t *req_sg;
2817
2818			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2819			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2820		} else
2821			request = ccb->smpio.smp_request;
2822
2823		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2824			bus_dma_segment_t *rsp_sg;
2825
2826			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2827			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2828		} else
2829			response = ccb->smpio.smp_response;
2830		break;
2831	case CAM_DATA_VADDR:
2832		request = ccb->smpio.smp_request;
2833		response = ccb->smpio.smp_response;
2834		break;
2835	default:
2836		mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2837		xpt_done(ccb);
2838		return;
2839	}
2840
2841	cm = mps_alloc_command(sc);
2842	if (cm == NULL) {
2843		mps_dprint(sc, MPS_ERROR,
2844		    "%s: cannot allocate command\n", __func__);
2845		mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2846		xpt_done(ccb);
2847		return;
2848	}
2849
2850	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2851	bzero(req, sizeof(*req));
2852	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2853
2854	/* Allow the chip to use any route to this SAS address. */
2855	req->PhysicalPort = 0xff;
2856
2857	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2858	req->SGLFlags =
2859	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2860
2861	mps_dprint(sc, MPS_XINFO, "%s: sending SMP request to SAS "
2862	    "address %#jx\n", __func__, (uintmax_t)sasaddr);
2863
2864	mpi_init_sge(cm, req, &req->SGL);
2865
2866	/*
2867	 * Set up a uio to pass into mps_map_command().  This allows us to
2868	 * do one map command, and one busdma call in there.
2869	 */
2870	cm->cm_uio.uio_iov = cm->cm_iovec;
2871	cm->cm_uio.uio_iovcnt = 2;
2872	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2873
2874	/*
2875	 * The read/write flag isn't used by busdma, but set it just in
2876	 * case.  This isn't exactly accurate, either, since we're going in
2877	 * both directions.
2878	 */
2879	cm->cm_uio.uio_rw = UIO_WRITE;
2880
2881	cm->cm_iovec[0].iov_base = request;
2882	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2883	cm->cm_iovec[1].iov_base = response;
2884	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2885
2886	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2887			       cm->cm_iovec[1].iov_len;
2888
2889	/*
2890	 * Trigger a warning message in mps_data_cb() for the user if we
2891	 * wind up exceeding two S/G segments.  The chip expects one
2892	 * segment for the request and another for the response.
2893	 */
2894	cm->cm_max_segs = 2;
2895
2896	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2897	cm->cm_complete = mpssas_smpio_complete;
2898	cm->cm_complete_data = ccb;
2899
2900	/*
2901	 * Tell the mapping code that we're using a uio, and that this is
2902	 * an SMP passthrough request.  There is a little special-case
2903	 * logic there (in mps_data_cb()) to handle the bidirectional
2904	 * transfer.
2905	 */
2906	cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2907			MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2908
2909	/* The chip data format is little endian. */
2910	req->SASAddress.High = htole32(sasaddr >> 32);
2911	req->SASAddress.Low = htole32(sasaddr);
2912
2913	/*
2914	 * XXX Note that we don't have a timeout/abort mechanism here.
2915	 * From the manual, it looks like task management requests only
2916	 * work for SCSI IO and SATA passthrough requests.  We may need to
2917	 * have a mechanism to retry requests in the event of a chip reset
2918	 * at least.  Hopefully the chip will insure that any errors short
2919	 * of that are relayed back to the driver.
2920	 */
2921	error = mps_map_command(sc, cm);
2922	if ((error != 0) && (error != EINPROGRESS)) {
2923		mps_dprint(sc, MPS_ERROR,
2924			   "%s: error %d returned from mps_map_command()\n",
2925			   __func__, error);
2926		goto bailout_error;
2927	}
2928
2929	return;
2930
2931bailout_error:
2932	mps_free_command(sc, cm);
2933	mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2934	xpt_done(ccb);
2935	return;
2936
2937}
2938
2939static void
2940mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2941{
2942	struct mps_softc *sc;
2943	struct mpssas_target *targ;
2944	uint64_t sasaddr = 0;
2945
2946	sc = sassc->sc;
2947
2948	/*
2949	 * Make sure the target exists.
2950	 */
2951	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2952	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
2953	targ = &sassc->targets[ccb->ccb_h.target_id];
2954	if (targ->handle == 0x0) {
2955		mps_dprint(sc, MPS_ERROR,
2956			   "%s: target %d does not exist!\n", __func__,
2957			   ccb->ccb_h.target_id);
2958		mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
2959		xpt_done(ccb);
2960		return;
2961	}
2962
2963	/*
2964	 * If this device has an embedded SMP target, we'll talk to it
2965	 * directly.
2966	 * figure out what the expander's address is.
2967	 */
2968	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2969		sasaddr = targ->sasaddr;
2970
2971	/*
2972	 * If we don't have a SAS address for the expander yet, try
2973	 * grabbing it from the page 0x83 information cached in the
2974	 * transport layer for this target.  LSI expanders report the
2975	 * expander SAS address as the port-associated SAS address in
2976	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
2977	 * 0x83.
2978	 *
2979	 * XXX KDM disable this for now, but leave it commented out so that
2980	 * it is obvious that this is another possible way to get the SAS
2981	 * address.
2982	 *
2983	 * The parent handle method below is a little more reliable, and
2984	 * the other benefit is that it works for devices other than SES
2985	 * devices.  So you can send a SMP request to a da(4) device and it
2986	 * will get routed to the expander that device is attached to.
2987	 * (Assuming the da(4) device doesn't contain an SMP target...)
2988	 */
2989#if 0
2990	if (sasaddr == 0)
2991		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2992#endif
2993
2994	/*
2995	 * If we still don't have a SAS address for the expander, look for
2996	 * the parent device of this device, which is probably the expander.
2997	 */
2998	if (sasaddr == 0) {
2999#ifdef OLD_MPS_PROBE
3000		struct mpssas_target *parent_target;
3001#endif
3002
3003		if (targ->parent_handle == 0x0) {
3004			mps_dprint(sc, MPS_ERROR,
3005				   "%s: handle %d does not have a valid "
3006				   "parent handle!\n", __func__, targ->handle);
3007			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3008			goto bailout;
3009		}
3010#ifdef OLD_MPS_PROBE
3011		parent_target = mpssas_find_target_by_handle(sassc, 0,
3012			targ->parent_handle);
3013
3014		if (parent_target == NULL) {
3015			mps_dprint(sc, MPS_ERROR,
3016				   "%s: handle %d does not have a valid "
3017				   "parent target!\n", __func__, targ->handle);
3018			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3019			goto bailout;
3020		}
3021
3022		if ((parent_target->devinfo &
3023		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3024			mps_dprint(sc, MPS_ERROR,
3025				   "%s: handle %d parent %d does not "
3026				   "have an SMP target!\n", __func__,
3027				   targ->handle, parent_target->handle);
3028			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3029			goto bailout;
3030
3031		}
3032
3033		sasaddr = parent_target->sasaddr;
3034#else /* OLD_MPS_PROBE */
3035		if ((targ->parent_devinfo &
3036		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3037			mps_dprint(sc, MPS_ERROR,
3038				   "%s: handle %d parent %d does not "
3039				   "have an SMP target!\n", __func__,
3040				   targ->handle, targ->parent_handle);
3041			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3042			goto bailout;
3043
3044		}
3045		if (targ->parent_sasaddr == 0x0) {
3046			mps_dprint(sc, MPS_ERROR,
3047				   "%s: handle %d parent handle %d does "
3048				   "not have a valid SAS address!\n",
3049				   __func__, targ->handle, targ->parent_handle);
3050			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3051			goto bailout;
3052		}
3053
3054		sasaddr = targ->parent_sasaddr;
3055#endif /* OLD_MPS_PROBE */
3056
3057	}
3058
3059	if (sasaddr == 0) {
3060		mps_dprint(sc, MPS_INFO,
3061			   "%s: unable to find SAS address for handle %d\n",
3062			   __func__, targ->handle);
3063		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3064		goto bailout;
3065	}
3066	mpssas_send_smpcmd(sassc, ccb, sasaddr);
3067
3068	return;
3069
3070bailout:
3071	xpt_done(ccb);
3072
3073}
3074#endif //__FreeBSD_version >= 900026
3075
3076static void
3077mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
3078{
3079	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3080	struct mps_softc *sc;
3081	struct mps_command *tm;
3082	struct mpssas_target *targ;
3083
3084	MPS_FUNCTRACE(sassc->sc);
3085	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
3086
3087	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3088	    ("Target %d out of bounds in XPT_RESET_DEV\n",
3089	     ccb->ccb_h.target_id));
3090	sc = sassc->sc;
3091	tm = mpssas_alloc_tm(sc);
3092	if (tm == NULL) {
3093		mps_dprint(sc, MPS_ERROR,
3094		    "command alloc failure in mpssas_action_resetdev\n");
3095		mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3096		xpt_done(ccb);
3097		return;
3098	}
3099
3100	targ = &sassc->targets[ccb->ccb_h.target_id];
3101	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3102	req->DevHandle = htole16(targ->handle);
3103	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3104
3105	/* SAS Hard Link Reset / SATA Link Reset */
3106	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3107
3108	tm->cm_data = NULL;
3109	tm->cm_complete = mpssas_resetdev_complete;
3110	tm->cm_complete_data = ccb;
3111	tm->cm_targ = targ;
3112
3113	mpssas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
3114	mps_map_command(sc, tm);
3115}
3116
3117static void
3118mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
3119{
3120	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3121	union ccb *ccb;
3122
3123	MPS_FUNCTRACE(sc);
3124	mtx_assert(&sc->mps_mtx, MA_OWNED);
3125
3126	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3127	ccb = tm->cm_complete_data;
3128
3129	/*
3130	 * Currently there should be no way we can hit this case.  It only
3131	 * happens when we have a failure to allocate chain frames, and
3132	 * task management commands don't have S/G lists.
3133	 */
3134	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3135		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3136
3137		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3138
3139		mps_dprint(sc, MPS_ERROR,
3140			   "%s: cm_flags = %#x for reset of handle %#04x! "
3141			   "This should not happen!\n", __func__, tm->cm_flags,
3142			   req->DevHandle);
3143		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3144		goto bailout;
3145	}
3146
3147	mps_dprint(sc, MPS_XINFO,
3148	    "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
3149	    le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3150
3151	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3152		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
3153		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3154		    CAM_LUN_WILDCARD);
3155	}
3156	else
3157		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3158
3159bailout:
3160
3161	mpssas_free_tm(sc, tm);
3162	xpt_done(ccb);
3163}
3164
3165static void
3166mpssas_poll(struct cam_sim *sim)
3167{
3168	struct mpssas_softc *sassc;
3169
3170	sassc = cam_sim_softc(sim);
3171
3172	if (sassc->sc->mps_debug & MPS_TRACE) {
3173		/* frequent debug messages during a panic just slow
3174		 * everything down too much.
3175		 */
3176		mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
3177		sassc->sc->mps_debug &= ~MPS_TRACE;
3178	}
3179
3180	mps_intr_locked(sassc->sc);
3181}
3182
3183static void
3184mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3185	     void *arg)
3186{
3187	struct mps_softc *sc;
3188
3189	sc = (struct mps_softc *)callback_arg;
3190
3191	switch (code) {
3192#if (__FreeBSD_version >= 1000006) || \
3193    ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3194	case AC_ADVINFO_CHANGED: {
3195		struct mpssas_target *target;
3196		struct mpssas_softc *sassc;
3197		struct scsi_read_capacity_data_long rcap_buf;
3198		struct ccb_dev_advinfo cdai;
3199		struct mpssas_lun *lun;
3200		lun_id_t lunid;
3201		int found_lun;
3202		uintptr_t buftype;
3203
3204		buftype = (uintptr_t)arg;
3205
3206		found_lun = 0;
3207		sassc = sc->sassc;
3208
3209		/*
3210		 * We're only interested in read capacity data changes.
3211		 */
3212		if (buftype != CDAI_TYPE_RCAPLONG)
3213			break;
3214
3215		/*
3216		 * We should have a handle for this, but check to make sure.
3217		 */
3218		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3219		    ("Target %d out of bounds in mpssas_async\n",
3220		    xpt_path_target_id(path)));
3221		target = &sassc->targets[xpt_path_target_id(path)];
3222		if (target->handle == 0)
3223			break;
3224
3225		lunid = xpt_path_lun_id(path);
3226
3227		SLIST_FOREACH(lun, &target->luns, lun_link) {
3228			if (lun->lun_id == lunid) {
3229				found_lun = 1;
3230				break;
3231			}
3232		}
3233
3234		if (found_lun == 0) {
3235			lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3236				     M_NOWAIT | M_ZERO);
3237			if (lun == NULL) {
3238				mps_dprint(sc, MPS_ERROR, "Unable to alloc "
3239					   "LUN for EEDP support.\n");
3240				break;
3241			}
3242			lun->lun_id = lunid;
3243			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3244		}
3245
3246		bzero(&rcap_buf, sizeof(rcap_buf));
3247		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3248		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3249		cdai.ccb_h.flags = CAM_DIR_IN;
3250		cdai.buftype = CDAI_TYPE_RCAPLONG;
3251#if (__FreeBSD_version >= 1100061) || \
3252    ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000))
3253		cdai.flags = CDAI_FLAG_NONE;
3254#else
3255		cdai.flags = 0;
3256#endif
3257		cdai.bufsiz = sizeof(rcap_buf);
3258		cdai.buf = (uint8_t *)&rcap_buf;
3259		xpt_action((union ccb *)&cdai);
3260		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3261			cam_release_devq(cdai.ccb_h.path,
3262					 0, 0, 0, FALSE);
3263
3264		if ((mpssas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3265		 && (rcap_buf.prot & SRC16_PROT_EN)) {
3266			switch (rcap_buf.prot & SRC16_P_TYPE) {
3267			case SRC16_PTYPE_1:
3268			case SRC16_PTYPE_3:
3269				lun->eedp_formatted = TRUE;
3270				lun->eedp_block_size =
3271				    scsi_4btoul(rcap_buf.length);
3272				break;
3273			case SRC16_PTYPE_2:
3274			default:
3275				lun->eedp_formatted = FALSE;
3276				lun->eedp_block_size = 0;
3277				break;
3278			}
3279		} else {
3280			lun->eedp_formatted = FALSE;
3281			lun->eedp_block_size = 0;
3282		}
3283		break;
3284	}
3285#else
3286	case AC_FOUND_DEVICE: {
3287		struct ccb_getdev *cgd;
3288
3289		cgd = arg;
3290		mpssas_check_eedp(sc, path, cgd);
3291		break;
3292	}
3293#endif
3294	default:
3295		break;
3296	}
3297}
3298
3299#if (__FreeBSD_version < 901503) || \
3300    ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3301static void
3302mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
3303		  struct ccb_getdev *cgd)
3304{
3305	struct mpssas_softc *sassc = sc->sassc;
3306	struct ccb_scsiio *csio;
3307	struct scsi_read_capacity_16 *scsi_cmd;
3308	struct scsi_read_capacity_eedp *rcap_buf;
3309	path_id_t pathid;
3310	target_id_t targetid;
3311	lun_id_t lunid;
3312	union ccb *ccb;
3313	struct cam_path *local_path;
3314	struct mpssas_target *target;
3315	struct mpssas_lun *lun;
3316	uint8_t	found_lun;
3317	char path_str[64];
3318
3319	sassc = sc->sassc;
3320	pathid = cam_sim_path(sassc->sim);
3321	targetid = xpt_path_target_id(path);
3322	lunid = xpt_path_lun_id(path);
3323
3324	KASSERT(targetid < sassc->maxtargets,
3325	    ("Target %d out of bounds in mpssas_check_eedp\n",
3326	     targetid));
3327	target = &sassc->targets[targetid];
3328	if (target->handle == 0x0)
3329		return;
3330
3331	/*
3332	 * Determine if the device is EEDP capable.
3333	 *
3334	 * If this flag is set in the inquiry data,
3335	 * the device supports protection information,
3336	 * and must support the 16 byte read
3337	 * capacity command, otherwise continue without
3338	 * sending read cap 16
3339	 */
3340	if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3341		return;
3342
3343	/*
3344	 * Issue a READ CAPACITY 16 command.  This info
3345	 * is used to determine if the LUN is formatted
3346	 * for EEDP support.
3347	 */
3348	ccb = xpt_alloc_ccb_nowait();
3349	if (ccb == NULL) {
3350		mps_dprint(sc, MPS_ERROR, "Unable to alloc CCB "
3351		    "for EEDP support.\n");
3352		return;
3353	}
3354
3355	if (xpt_create_path(&local_path, xpt_periph,
3356	    pathid, targetid, lunid) != CAM_REQ_CMP) {
3357		mps_dprint(sc, MPS_ERROR, "Unable to create "
3358		    "path for EEDP support\n");
3359		xpt_free_ccb(ccb);
3360		return;
3361	}
3362
3363	/*
3364	 * If LUN is already in list, don't create a new
3365	 * one.
3366	 */
3367	found_lun = FALSE;
3368	SLIST_FOREACH(lun, &target->luns, lun_link) {
3369		if (lun->lun_id == lunid) {
3370			found_lun = TRUE;
3371			break;
3372		}
3373	}
3374	if (!found_lun) {
3375		lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3376		    M_NOWAIT | M_ZERO);
3377		if (lun == NULL) {
3378			mps_dprint(sc, MPS_ERROR,
3379			    "Unable to alloc LUN for EEDP support.\n");
3380			xpt_free_path(local_path);
3381			xpt_free_ccb(ccb);
3382			return;
3383		}
3384		lun->lun_id = lunid;
3385		SLIST_INSERT_HEAD(&target->luns, lun,
3386		    lun_link);
3387	}
3388
3389	xpt_path_string(local_path, path_str, sizeof(path_str));
3390
3391	mps_dprint(sc, MPS_INFO, "Sending read cap: path %s handle %d\n",
3392	    path_str, target->handle);
3393
3394	/*
3395	 * Issue a READ CAPACITY 16 command for the LUN.
3396	 * The mpssas_read_cap_done function will load
3397	 * the read cap info into the LUN struct.
3398	 */
3399	rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp),
3400	    M_MPT2, M_NOWAIT | M_ZERO);
3401	if (rcap_buf == NULL) {
3402		mps_dprint(sc, MPS_FAULT,
3403		    "Unable to alloc read capacity buffer for EEDP support.\n");
3404		xpt_free_path(ccb->ccb_h.path);
3405		xpt_free_ccb(ccb);
3406		return;
3407	}
3408	xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3409	csio = &ccb->csio;
3410	csio->ccb_h.func_code = XPT_SCSI_IO;
3411	csio->ccb_h.flags = CAM_DIR_IN;
3412	csio->ccb_h.retry_count = 4;
3413	csio->ccb_h.cbfcnp = mpssas_read_cap_done;
3414	csio->ccb_h.timeout = 60000;
3415	csio->data_ptr = (uint8_t *)rcap_buf;
3416	csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3417	csio->sense_len = MPS_SENSE_LEN;
3418	csio->cdb_len = sizeof(*scsi_cmd);
3419	csio->tag_action = MSG_SIMPLE_Q_TAG;
3420
3421	scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3422	bzero(scsi_cmd, sizeof(*scsi_cmd));
3423	scsi_cmd->opcode = 0x9E;
3424	scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3425	((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3426
3427	ccb->ccb_h.ppriv_ptr1 = sassc;
3428	xpt_action(ccb);
3429}
3430
3431static void
3432mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3433{
3434	struct mpssas_softc *sassc;
3435	struct mpssas_target *target;
3436	struct mpssas_lun *lun;
3437	struct scsi_read_capacity_eedp *rcap_buf;
3438
3439	if (done_ccb == NULL)
3440		return;
3441
3442	/* Driver need to release devq, it Scsi command is
3443	 * generated by driver internally.
3444	 * Currently there is a single place where driver
3445	 * calls scsi command internally. In future if driver
3446	 * calls more scsi command internally, it needs to release
3447	 * devq internally, since those command will not go back to
3448	 * cam_periph.
3449	 */
3450	if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3451        	done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3452		xpt_release_devq(done_ccb->ccb_h.path,
3453			       	/*count*/ 1, /*run_queue*/TRUE);
3454	}
3455
3456	rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3457
3458	/*
3459	 * Get the LUN ID for the path and look it up in the LUN list for the
3460	 * target.
3461	 */
3462	sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3463	KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets,
3464	    ("Target %d out of bounds in mpssas_read_cap_done\n",
3465	     done_ccb->ccb_h.target_id));
3466	target = &sassc->targets[done_ccb->ccb_h.target_id];
3467	SLIST_FOREACH(lun, &target->luns, lun_link) {
3468		if (lun->lun_id != done_ccb->ccb_h.target_lun)
3469			continue;
3470
3471		/*
3472		 * Got the LUN in the target's LUN list.  Fill it in
3473		 * with EEDP info.  If the READ CAP 16 command had some
3474		 * SCSI error (common if command is not supported), mark
3475		 * the lun as not supporting EEDP and set the block size
3476		 * to 0.
3477		 */
3478		if ((mpssas_get_ccbstatus(done_ccb) != CAM_REQ_CMP)
3479		 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3480			lun->eedp_formatted = FALSE;
3481			lun->eedp_block_size = 0;
3482			break;
3483		}
3484
3485		if (rcap_buf->protect & 0x01) {
3486			mps_dprint(sassc->sc, MPS_INFO, "LUN %d for "
3487 			    "target ID %d is formatted for EEDP "
3488 			    "support.\n", done_ccb->ccb_h.target_lun,
3489 			    done_ccb->ccb_h.target_id);
3490			lun->eedp_formatted = TRUE;
3491			lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3492		}
3493		break;
3494	}
3495
3496	// Finished with this CCB and path.
3497	free(rcap_buf, M_MPT2);
3498	xpt_free_path(done_ccb->ccb_h.path);
3499	xpt_free_ccb(done_ccb);
3500}
3501#endif /* (__FreeBSD_version < 901503) || \
3502          ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3503
3504/*
3505 * Set the INRESET flag for this target so that no I/O will be sent to
3506 * the target until the reset has completed.  If an I/O request does
3507 * happen, the devq will be frozen.  The CCB holds the path which is
3508 * used to release the devq.  The devq is released and the CCB is freed
3509 * when the TM completes.
3510 */
3511void
3512mpssas_prepare_for_tm(struct mps_softc *sc, struct mps_command *tm,
3513    struct mpssas_target *target, lun_id_t lun_id)
3514{
3515	union ccb *ccb;
3516	path_id_t path_id;
3517
3518	ccb = xpt_alloc_ccb_nowait();
3519	if (ccb) {
3520		path_id = cam_sim_path(sc->sassc->sim);
3521		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3522		    target->tid, lun_id) != CAM_REQ_CMP) {
3523			xpt_free_ccb(ccb);
3524		} else {
3525			tm->cm_ccb = ccb;
3526			tm->cm_targ = target;
3527			target->flags |= MPSSAS_TARGET_INRESET;
3528		}
3529	}
3530}
3531
3532int
3533mpssas_startup(struct mps_softc *sc)
3534{
3535
3536	/*
3537	 * Send the port enable message and set the wait_for_port_enable flag.
3538	 * This flag helps to keep the simq frozen until all discovery events
3539	 * are processed.
3540	 */
3541	sc->wait_for_port_enable = 1;
3542	mpssas_send_portenable(sc);
3543	return (0);
3544}
3545
3546static int
3547mpssas_send_portenable(struct mps_softc *sc)
3548{
3549	MPI2_PORT_ENABLE_REQUEST *request;
3550	struct mps_command *cm;
3551
3552	MPS_FUNCTRACE(sc);
3553
3554	if ((cm = mps_alloc_command(sc)) == NULL)
3555		return (EBUSY);
3556	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3557	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3558	request->MsgFlags = 0;
3559	request->VP_ID = 0;
3560	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3561	cm->cm_complete = mpssas_portenable_complete;
3562	cm->cm_data = NULL;
3563	cm->cm_sge = NULL;
3564
3565	mps_map_command(sc, cm);
3566	mps_dprint(sc, MPS_XINFO,
3567	    "mps_send_portenable finished cm %p req %p complete %p\n",
3568	    cm, cm->cm_req, cm->cm_complete);
3569	return (0);
3570}
3571
3572static void
3573mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3574{
3575	MPI2_PORT_ENABLE_REPLY *reply;
3576	struct mpssas_softc *sassc;
3577
3578	MPS_FUNCTRACE(sc);
3579	sassc = sc->sassc;
3580
3581	/*
3582	 * Currently there should be no way we can hit this case.  It only
3583	 * happens when we have a failure to allocate chain frames, and
3584	 * port enable commands don't have S/G lists.
3585	 */
3586	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3587		mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for port enable! "
3588			   "This should not happen!\n", __func__, cm->cm_flags);
3589	}
3590
3591	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3592	if (reply == NULL)
3593		mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3594	else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3595	    MPI2_IOCSTATUS_SUCCESS)
3596		mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3597
3598	mps_free_command(sc, cm);
3599
3600	/*
3601	 * Get WarpDrive info after discovery is complete but before the scan
3602	 * starts.  At this point, all devices are ready to be exposed to the
3603	 * OS.  If devices should be hidden instead, take them out of the
3604	 * 'targets' array before the scan.  The devinfo for a disk will have
3605	 * some info and a volume's will be 0.  Use that to remove disks.
3606	 */
3607	mps_wd_config_pages(sc);
3608
3609	/*
3610	 * Done waiting for port enable to complete.  Decrement the refcount.
3611	 * If refcount is 0, discovery is complete and a rescan of the bus can
3612	 * take place.  Since the simq was explicitly frozen before port
3613	 * enable, it must be explicitly released here to keep the
3614	 * freeze/release count in sync.
3615	 */
3616	sc->wait_for_port_enable = 0;
3617	sc->port_enable_complete = 1;
3618	wakeup(&sc->port_enable_complete);
3619	mpssas_startup_decrement(sassc);
3620}
3621
3622int
3623mpssas_check_id(struct mpssas_softc *sassc, int id)
3624{
3625	struct mps_softc *sc = sassc->sc;
3626	char *ids;
3627	char *name;
3628
3629	ids = &sc->exclude_ids[0];
3630	while((name = strsep(&ids, ",")) != NULL) {
3631		if (name[0] == '\0')
3632			continue;
3633		if (strtol(name, NULL, 0) == (long)id)
3634			return (1);
3635	}
3636
3637	return (0);
3638}
3639
3640void
3641mpssas_realloc_targets(struct mps_softc *sc, int maxtargets)
3642{
3643	struct mpssas_softc *sassc;
3644	struct mpssas_lun *lun, *lun_tmp;
3645	struct mpssas_target *targ;
3646	int i;
3647
3648	sassc = sc->sassc;
3649	/*
3650	 * The number of targets is based on IOC Facts, so free all of
3651	 * the allocated LUNs for each target and then the target buffer
3652	 * itself.
3653	 */
3654	for (i=0; i< maxtargets; i++) {
3655		targ = &sassc->targets[i];
3656		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3657			free(lun, M_MPT2);
3658		}
3659	}
3660	free(sassc->targets, M_MPT2);
3661
3662	sassc->targets = malloc(sizeof(struct mpssas_target) * maxtargets,
3663	    M_MPT2, M_WAITOK|M_ZERO);
3664}
3665