1/*-
2 * Copyright (c) 2009 Yahoo! Inc.
3 * Copyright (c) 2011-2015 LSI Corp.
4 * Copyright (c) 2013-2015 Avago Technologies
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
29 *
30 * $FreeBSD: releng/10.2/sys/dev/mps/mps_sas.c 281564 2015-04-15 21:47:15Z slm $
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: releng/10.2/sys/dev/mps/mps_sas.c 281564 2015-04-15 21:47:15Z slm $");
35
36/* Communications core for Avago Technologies (LSI) MPT2 */
37
38/* TODO Move headers to mpsvar */
39#include <sys/types.h>
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/kernel.h>
43#include <sys/selinfo.h>
44#include <sys/module.h>
45#include <sys/bus.h>
46#include <sys/conf.h>
47#include <sys/bio.h>
48#include <sys/malloc.h>
49#include <sys/uio.h>
50#include <sys/sysctl.h>
51#include <sys/endian.h>
52#include <sys/queue.h>
53#include <sys/kthread.h>
54#include <sys/taskqueue.h>
55#include <sys/sbuf.h>
56
57#include <machine/bus.h>
58#include <machine/resource.h>
59#include <sys/rman.h>
60
61#include <machine/stdarg.h>
62
63#include <cam/cam.h>
64#include <cam/cam_ccb.h>
65#include <cam/cam_xpt.h>
66#include <cam/cam_debug.h>
67#include <cam/cam_sim.h>
68#include <cam/cam_xpt_sim.h>
69#include <cam/cam_xpt_periph.h>
70#include <cam/cam_periph.h>
71#include <cam/scsi/scsi_all.h>
72#include <cam/scsi/scsi_message.h>
73#if __FreeBSD_version >= 900026
74#include <cam/scsi/smp_all.h>
75#endif
76
77#include <dev/mps/mpi/mpi2_type.h>
78#include <dev/mps/mpi/mpi2.h>
79#include <dev/mps/mpi/mpi2_ioc.h>
80#include <dev/mps/mpi/mpi2_sas.h>
81#include <dev/mps/mpi/mpi2_cnfg.h>
82#include <dev/mps/mpi/mpi2_init.h>
83#include <dev/mps/mpi/mpi2_tool.h>
84#include <dev/mps/mps_ioctl.h>
85#include <dev/mps/mpsvar.h>
86#include <dev/mps/mps_table.h>
87#include <dev/mps/mps_sas.h>
88
89#define MPSSAS_DISCOVERY_TIMEOUT	20
90#define MPSSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
91
92/*
93 * static array to check SCSI OpCode for EEDP protection bits
94 */
95#define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
96#define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
97#define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
98static uint8_t op_code_prot[256] = {
99	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
102	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
108	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
109	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
110	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
114	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
115};
116
117MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
118
119static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
120static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
121static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
122static void mpssas_poll(struct cam_sim *sim);
123static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm,
124    struct mps_command *cm);
125static void mpssas_scsiio_timeout(void *data);
126static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
127static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
128    struct mps_command *cm, union ccb *ccb);
129static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
130static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
131static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
132#if __FreeBSD_version >= 900026
133static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
134static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
135			       uint64_t sasaddr);
136static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
137#endif //FreeBSD_version >= 900026
138static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
139static void mpssas_async(void *callback_arg, uint32_t code,
140			 struct cam_path *path, void *arg);
141#if (__FreeBSD_version < 901503) || \
142    ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
143static void mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
144			      struct ccb_getdev *cgd);
145static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
146#endif
147static int mpssas_send_portenable(struct mps_softc *sc);
148static void mpssas_portenable_complete(struct mps_softc *sc,
149    struct mps_command *cm);
150
151struct mpssas_target *
152mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
153{
154	struct mpssas_target *target;
155	int i;
156
157	for (i = start; i < sassc->maxtargets; i++) {
158		target = &sassc->targets[i];
159		if (target->handle == handle)
160			return (target);
161	}
162
163	return (NULL);
164}
165
166/* we need to freeze the simq during attach and diag reset, to avoid failing
167 * commands before device handles have been found by discovery.  Since
168 * discovery involves reading config pages and possibly sending commands,
169 * discovery actions may continue even after we receive the end of discovery
170 * event, so refcount discovery actions instead of assuming we can unfreeze
171 * the simq when we get the event.
172 */
173void
174mpssas_startup_increment(struct mpssas_softc *sassc)
175{
176	MPS_FUNCTRACE(sassc->sc);
177
178	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
179		if (sassc->startup_refcount++ == 0) {
180			/* just starting, freeze the simq */
181			mps_dprint(sassc->sc, MPS_INIT,
182			    "%s freezing simq\n", __func__);
183#if __FreeBSD_version >= 1000039
184			xpt_hold_boot();
185#endif
186			xpt_freeze_simq(sassc->sim, 1);
187		}
188		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
189		    sassc->startup_refcount);
190	}
191}
192
193void
194mpssas_release_simq_reinit(struct mpssas_softc *sassc)
195{
196	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
197		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
198		xpt_release_simq(sassc->sim, 1);
199		mps_dprint(sassc->sc, MPS_INFO, "Unfreezing SIM queue\n");
200	}
201}
202
203void
204mpssas_startup_decrement(struct mpssas_softc *sassc)
205{
206	MPS_FUNCTRACE(sassc->sc);
207
208	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
209		if (--sassc->startup_refcount == 0) {
210			/* finished all discovery-related actions, release
211			 * the simq and rescan for the latest topology.
212			 */
213			mps_dprint(sassc->sc, MPS_INIT,
214			    "%s releasing simq\n", __func__);
215			sassc->flags &= ~MPSSAS_IN_STARTUP;
216			xpt_release_simq(sassc->sim, 1);
217#if __FreeBSD_version >= 1000039
218			xpt_release_boot();
219#else
220			mpssas_rescan_target(sassc->sc, NULL);
221#endif
222		}
223		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
224		    sassc->startup_refcount);
225	}
226}
227
228/* The firmware requires us to stop sending commands when we're doing task
229 * management, so refcount the TMs and keep the simq frozen when any are in
230 * use.
231 */
232struct mps_command *
233mpssas_alloc_tm(struct mps_softc *sc)
234{
235	struct mps_command *tm;
236
237	tm = mps_alloc_high_priority_command(sc);
238	return tm;
239}
240
241void
242mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
243{
244	if (tm == NULL)
245		return;
246
247	/*
248	 * For TM's the devq is frozen for the device.  Unfreeze it here and
249	 * free the resources used for freezing the devq.  Must clear the
250	 * INRESET flag as well or scsi I/O will not work.
251	 */
252	if (tm->cm_targ != NULL) {
253		tm->cm_targ->flags &= ~MPSSAS_TARGET_INRESET;
254	}
255	if (tm->cm_ccb) {
256		mps_dprint(sc, MPS_INFO, "Unfreezing devq for target ID %d\n",
257		    tm->cm_targ->tid);
258		xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
259		xpt_free_path(tm->cm_ccb->ccb_h.path);
260		xpt_free_ccb(tm->cm_ccb);
261	}
262
263	mps_free_high_priority_command(sc, tm);
264}
265
266void
267mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
268{
269	struct mpssas_softc *sassc = sc->sassc;
270	path_id_t pathid;
271	target_id_t targetid;
272	union ccb *ccb;
273
274	MPS_FUNCTRACE(sc);
275	pathid = cam_sim_path(sassc->sim);
276	if (targ == NULL)
277		targetid = CAM_TARGET_WILDCARD;
278	else
279		targetid = targ - sassc->targets;
280
281	/*
282	 * Allocate a CCB and schedule a rescan.
283	 */
284	ccb = xpt_alloc_ccb_nowait();
285	if (ccb == NULL) {
286		mps_dprint(sc, MPS_ERROR, "unable to alloc CCB for rescan\n");
287		return;
288	}
289
290	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
291	    targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
292		mps_dprint(sc, MPS_ERROR, "unable to create path for rescan\n");
293		xpt_free_ccb(ccb);
294		return;
295	}
296
297	if (targetid == CAM_TARGET_WILDCARD)
298		ccb->ccb_h.func_code = XPT_SCAN_BUS;
299	else
300		ccb->ccb_h.func_code = XPT_SCAN_TGT;
301
302	mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
303	xpt_rescan(ccb);
304}
305
306static void
307mpssas_log_command(struct mps_command *cm, u_int level, const char *fmt, ...)
308{
309	struct sbuf sb;
310	va_list ap;
311	char str[192];
312	char path_str[64];
313
314	if (cm == NULL)
315		return;
316
317	/* No need to be in here if debugging isn't enabled */
318	if ((cm->cm_sc->mps_debug & level) == 0)
319		return;
320
321	sbuf_new(&sb, str, sizeof(str), 0);
322
323	va_start(ap, fmt);
324
325	if (cm->cm_ccb != NULL) {
326		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
327				sizeof(path_str));
328		sbuf_cat(&sb, path_str);
329		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
330			scsi_command_string(&cm->cm_ccb->csio, &sb);
331			sbuf_printf(&sb, "length %d ",
332				    cm->cm_ccb->csio.dxfer_len);
333		}
334	}
335	else {
336		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
337		    cam_sim_name(cm->cm_sc->sassc->sim),
338		    cam_sim_unit(cm->cm_sc->sassc->sim),
339		    cam_sim_bus(cm->cm_sc->sassc->sim),
340		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
341		    cm->cm_lun);
342	}
343
344	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
345	sbuf_vprintf(&sb, fmt, ap);
346	sbuf_finish(&sb);
347	mps_dprint_field(cm->cm_sc, level, "%s", sbuf_data(&sb));
348
349	va_end(ap);
350}
351
352
353static void
354mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
355{
356	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
357	struct mpssas_target *targ;
358	uint16_t handle;
359
360	MPS_FUNCTRACE(sc);
361
362	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
363	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
364	targ = tm->cm_targ;
365
366	if (reply == NULL) {
367		/* XXX retry the remove after the diag reset completes? */
368		mps_dprint(sc, MPS_FAULT,
369		    "%s NULL reply resetting device 0x%04x\n", __func__,
370		    handle);
371		mpssas_free_tm(sc, tm);
372		return;
373	}
374
375	if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
376		mps_dprint(sc, MPS_FAULT,
377		   "IOCStatus = 0x%x while resetting device 0x%x\n",
378		   reply->IOCStatus, handle);
379		mpssas_free_tm(sc, tm);
380		return;
381	}
382
383	mps_dprint(sc, MPS_XINFO,
384	    "Reset aborted %u commands\n", reply->TerminationCount);
385	mps_free_reply(sc, tm->cm_reply_data);
386	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
387
388	mps_dprint(sc, MPS_XINFO,
389	    "clearing target %u handle 0x%04x\n", targ->tid, handle);
390
391	/*
392	 * Don't clear target if remove fails because things will get confusing.
393	 * Leave the devname and sasaddr intact so that we know to avoid reusing
394	 * this target id if possible, and so we can assign the same target id
395	 * to this device if it comes back in the future.
396	 */
397	if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
398		targ = tm->cm_targ;
399		targ->handle = 0x0;
400		targ->encl_handle = 0x0;
401		targ->encl_slot = 0x0;
402		targ->exp_dev_handle = 0x0;
403		targ->phy_num = 0x0;
404		targ->linkrate = 0x0;
405		targ->devinfo = 0x0;
406		targ->flags = 0x0;
407	}
408
409	mpssas_free_tm(sc, tm);
410}
411
412
413/*
414 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
415 * Otherwise Volume Delete is same as Bare Drive Removal.
416 */
417void
418mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
419{
420	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
421	struct mps_softc *sc;
422	struct mps_command *cm;
423	struct mpssas_target *targ = NULL;
424
425	MPS_FUNCTRACE(sassc->sc);
426	sc = sassc->sc;
427
428#ifdef WD_SUPPORT
429	/*
430	 * If this is a WD controller, determine if the disk should be exposed
431	 * to the OS or not.  If disk should be exposed, return from this
432	 * function without doing anything.
433	 */
434	if (sc->WD_available && (sc->WD_hide_expose ==
435	    MPS_WD_EXPOSE_ALWAYS)) {
436		return;
437	}
438#endif //WD_SUPPORT
439
440	targ = mpssas_find_target_by_handle(sassc, 0, handle);
441	if (targ == NULL) {
442		/* FIXME: what is the action? */
443		/* We don't know about this device? */
444		mps_dprint(sc, MPS_ERROR,
445		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
446		return;
447	}
448
449	targ->flags |= MPSSAS_TARGET_INREMOVAL;
450
451	cm = mpssas_alloc_tm(sc);
452	if (cm == NULL) {
453		mps_dprint(sc, MPS_ERROR,
454		    "%s: command alloc failure\n", __func__);
455		return;
456	}
457
458	mpssas_rescan_target(sc, targ);
459
460	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
461	req->DevHandle = targ->handle;
462	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
463	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
464
465	/* SAS Hard Link Reset / SATA Link Reset */
466	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
467
468	cm->cm_targ = targ;
469	cm->cm_data = NULL;
470	cm->cm_desc.HighPriority.RequestFlags =
471	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
472	cm->cm_complete = mpssas_remove_volume;
473	cm->cm_complete_data = (void *)(uintptr_t)handle;
474
475	mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
476	    __func__, targ->tid);
477	mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
478
479	mps_map_command(sc, cm);
480}
481
482/*
483 * The MPT2 firmware performs debounce on the link to avoid transient link
484 * errors and false removals.  When it does decide that link has been lost
485 * and a device need to go away, it expects that the host will perform a
486 * target reset and then an op remove.  The reset has the side-effect of
487 * aborting any outstanding requests for the device, which is required for
488 * the op-remove to succeed.  It's not clear if the host should check for
489 * the device coming back alive after the reset.
490 */
491void
492mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
493{
494	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
495	struct mps_softc *sc;
496	struct mps_command *cm;
497	struct mpssas_target *targ = NULL;
498
499	MPS_FUNCTRACE(sassc->sc);
500
501	sc = sassc->sc;
502
503	targ = mpssas_find_target_by_handle(sassc, 0, handle);
504	if (targ == NULL) {
505		/* FIXME: what is the action? */
506		/* We don't know about this device? */
507		mps_dprint(sc, MPS_ERROR,
508		    "%s : invalid handle 0x%x \n", __func__, handle);
509		return;
510	}
511
512	targ->flags |= MPSSAS_TARGET_INREMOVAL;
513
514	cm = mpssas_alloc_tm(sc);
515	if (cm == NULL) {
516		mps_dprint(sc, MPS_ERROR,
517		    "%s: command alloc failure\n", __func__);
518		return;
519	}
520
521	mpssas_rescan_target(sc, targ);
522
523	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
524	memset(req, 0, sizeof(*req));
525	req->DevHandle = htole16(targ->handle);
526	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
527	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
528
529	/* SAS Hard Link Reset / SATA Link Reset */
530	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
531
532	cm->cm_targ = targ;
533	cm->cm_data = NULL;
534	cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
535	cm->cm_complete = mpssas_remove_device;
536	cm->cm_complete_data = (void *)(uintptr_t)handle;
537
538	mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
539	    __func__, targ->tid);
540	mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
541
542	mps_map_command(sc, cm);
543}
544
545static void
546mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
547{
548	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
549	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
550	struct mpssas_target *targ;
551	struct mps_command *next_cm;
552	uint16_t handle;
553
554	MPS_FUNCTRACE(sc);
555
556	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
557	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
558	targ = tm->cm_targ;
559
560	/*
561	 * Currently there should be no way we can hit this case.  It only
562	 * happens when we have a failure to allocate chain frames, and
563	 * task management commands don't have S/G lists.
564	 */
565	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
566		mps_dprint(sc, MPS_ERROR,
567		    "%s: cm_flags = %#x for remove of handle %#04x! "
568		    "This should not happen!\n", __func__, tm->cm_flags,
569		    handle);
570		mpssas_free_tm(sc, tm);
571		return;
572	}
573
574	if (reply == NULL) {
575		/* XXX retry the remove after the diag reset completes? */
576		mps_dprint(sc, MPS_FAULT,
577		    "%s NULL reply reseting device 0x%04x\n", __func__, handle);
578		mpssas_free_tm(sc, tm);
579		return;
580	}
581
582	if (le16toh(reply->IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
583		mps_dprint(sc, MPS_FAULT,
584		   "IOCStatus = 0x%x while resetting device 0x%x\n",
585		   le16toh(reply->IOCStatus), handle);
586		mpssas_free_tm(sc, tm);
587		return;
588	}
589
590	mps_dprint(sc, MPS_XINFO, "Reset aborted %u commands\n",
591	    le32toh(reply->TerminationCount));
592	mps_free_reply(sc, tm->cm_reply_data);
593	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
594
595	/* Reuse the existing command */
596	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
597	memset(req, 0, sizeof(*req));
598	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
599	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
600	req->DevHandle = htole16(handle);
601	tm->cm_data = NULL;
602	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
603	tm->cm_complete = mpssas_remove_complete;
604	tm->cm_complete_data = (void *)(uintptr_t)handle;
605
606	mps_map_command(sc, tm);
607
608	mps_dprint(sc, MPS_XINFO, "clearing target %u handle 0x%04x\n",
609		   targ->tid, handle);
610	TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
611		union ccb *ccb;
612
613		mps_dprint(sc, MPS_XINFO, "Completing missed command %p\n", tm);
614		ccb = tm->cm_complete_data;
615		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
616		mpssas_scsiio_complete(sc, tm);
617	}
618}
619
620static void
621mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
622{
623	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
624	uint16_t handle;
625	struct mpssas_target *targ;
626	struct mpssas_lun *lun;
627
628	MPS_FUNCTRACE(sc);
629
630	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
631	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
632
633	/*
634	 * Currently there should be no way we can hit this case.  It only
635	 * happens when we have a failure to allocate chain frames, and
636	 * task management commands don't have S/G lists.
637	 */
638	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
639		mps_dprint(sc, MPS_XINFO,
640			   "%s: cm_flags = %#x for remove of handle %#04x! "
641			   "This should not happen!\n", __func__, tm->cm_flags,
642			   handle);
643		mpssas_free_tm(sc, tm);
644		return;
645	}
646
647	if (reply == NULL) {
648		/* most likely a chip reset */
649		mps_dprint(sc, MPS_FAULT,
650		    "%s NULL reply removing device 0x%04x\n", __func__, handle);
651		mpssas_free_tm(sc, tm);
652		return;
653	}
654
655	mps_dprint(sc, MPS_XINFO,
656	    "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
657	    handle, le16toh(reply->IOCStatus));
658
659	/*
660	 * Don't clear target if remove fails because things will get confusing.
661	 * Leave the devname and sasaddr intact so that we know to avoid reusing
662	 * this target id if possible, and so we can assign the same target id
663	 * to this device if it comes back in the future.
664	 */
665	if (le16toh(reply->IOCStatus) == MPI2_IOCSTATUS_SUCCESS) {
666		targ = tm->cm_targ;
667		targ->handle = 0x0;
668		targ->encl_handle = 0x0;
669		targ->encl_slot = 0x0;
670		targ->exp_dev_handle = 0x0;
671		targ->phy_num = 0x0;
672		targ->linkrate = 0x0;
673		targ->devinfo = 0x0;
674		targ->flags = 0x0;
675
676		while(!SLIST_EMPTY(&targ->luns)) {
677			lun = SLIST_FIRST(&targ->luns);
678			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
679			free(lun, M_MPT2);
680		}
681	}
682
683
684	mpssas_free_tm(sc, tm);
685}
686
687static int
688mpssas_register_events(struct mps_softc *sc)
689{
690	u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
691
692	bzero(events, 16);
693	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
694	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
695	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
696	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
697	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
698	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
699	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
700	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
701	setbit(events, MPI2_EVENT_IR_VOLUME);
702	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
703	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
704	setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
705
706	mps_register_events(sc, events, mpssas_evt_handler, NULL,
707	    &sc->sassc->mpssas_eh);
708
709	return (0);
710}
711
712int
713mps_attach_sas(struct mps_softc *sc)
714{
715	struct mpssas_softc *sassc;
716	cam_status status;
717	int unit, error = 0;
718
719	MPS_FUNCTRACE(sc);
720
721	sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
722	if(!sassc) {
723		device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
724		__func__, __LINE__);
725		return (ENOMEM);
726	}
727
728	/*
729	 * XXX MaxTargets could change during a reinit.  Since we don't
730	 * resize the targets[] array during such an event, cache the value
731	 * of MaxTargets here so that we don't get into trouble later.  This
732	 * should move into the reinit logic.
733	 */
734	sassc->maxtargets = sc->facts->MaxTargets;
735	sassc->targets = malloc(sizeof(struct mpssas_target) *
736	    sassc->maxtargets, M_MPT2, M_WAITOK|M_ZERO);
737	if(!sassc->targets) {
738		device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
739		__func__, __LINE__);
740		free(sassc, M_MPT2);
741		return (ENOMEM);
742	}
743	sc->sassc = sassc;
744	sassc->sc = sc;
745
746	if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
747		mps_dprint(sc, MPS_ERROR, "Cannot allocate SIMQ\n");
748		error = ENOMEM;
749		goto out;
750	}
751
752	unit = device_get_unit(sc->mps_dev);
753	sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
754	    unit, &sc->mps_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
755	if (sassc->sim == NULL) {
756		mps_dprint(sc, MPS_ERROR, "Cannot allocate SIM\n");
757		error = EINVAL;
758		goto out;
759	}
760
761	TAILQ_INIT(&sassc->ev_queue);
762
763	/* Initialize taskqueue for Event Handling */
764	TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
765	sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
766	    taskqueue_thread_enqueue, &sassc->ev_tq);
767	taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
768	    device_get_nameunit(sc->mps_dev));
769
770	mps_lock(sc);
771
772	/*
773	 * XXX There should be a bus for every port on the adapter, but since
774	 * we're just going to fake the topology for now, we'll pretend that
775	 * everything is just a target on a single bus.
776	 */
777	if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
778		mps_dprint(sc, MPS_ERROR, "Error %d registering SCSI bus\n",
779		    error);
780		mps_unlock(sc);
781		goto out;
782	}
783
784	/*
785	 * Assume that discovery events will start right away.
786	 *
787	 * Hold off boot until discovery is complete.
788	 */
789	sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
790	sc->sassc->startup_refcount = 0;
791	mpssas_startup_increment(sassc);
792
793	callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
794
795	/*
796	 * Register for async events so we can determine the EEDP
797	 * capabilities of devices.
798	 */
799	status = xpt_create_path(&sassc->path, /*periph*/NULL,
800	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
801	    CAM_LUN_WILDCARD);
802	if (status != CAM_REQ_CMP) {
803		mps_printf(sc, "Error %#x creating sim path\n", status);
804		sassc->path = NULL;
805	} else {
806		int event;
807
808#if (__FreeBSD_version >= 1000006) || \
809    ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
810		event = AC_ADVINFO_CHANGED;
811#else
812		event = AC_FOUND_DEVICE;
813#endif
814		status = xpt_register_async(event, mpssas_async, sc,
815					    sassc->path);
816		if (status != CAM_REQ_CMP) {
817			mps_dprint(sc, MPS_ERROR,
818			    "Error %#x registering async handler for "
819			    "AC_ADVINFO_CHANGED events\n", status);
820			xpt_free_path(sassc->path);
821			sassc->path = NULL;
822		}
823	}
824	if (status != CAM_REQ_CMP) {
825		/*
826		 * EEDP use is the exception, not the rule.
827		 * Warn the user, but do not fail to attach.
828		 */
829		mps_printf(sc, "EEDP capabilities disabled.\n");
830	}
831
832	mps_unlock(sc);
833
834	mpssas_register_events(sc);
835out:
836	if (error)
837		mps_detach_sas(sc);
838	return (error);
839}
840
841int
842mps_detach_sas(struct mps_softc *sc)
843{
844	struct mpssas_softc *sassc;
845	struct mpssas_lun *lun, *lun_tmp;
846	struct mpssas_target *targ;
847	int i;
848
849	MPS_FUNCTRACE(sc);
850
851	if (sc->sassc == NULL)
852		return (0);
853
854	sassc = sc->sassc;
855	mps_deregister_events(sc, sassc->mpssas_eh);
856
857	/*
858	 * Drain and free the event handling taskqueue with the lock
859	 * unheld so that any parallel processing tasks drain properly
860	 * without deadlocking.
861	 */
862	if (sassc->ev_tq != NULL)
863		taskqueue_free(sassc->ev_tq);
864
865	/* Make sure CAM doesn't wedge if we had to bail out early. */
866	mps_lock(sc);
867
868	/* Deregister our async handler */
869	if (sassc->path != NULL) {
870		xpt_register_async(0, mpssas_async, sc, sassc->path);
871		xpt_free_path(sassc->path);
872		sassc->path = NULL;
873	}
874
875	if (sassc->flags & MPSSAS_IN_STARTUP)
876		xpt_release_simq(sassc->sim, 1);
877
878	if (sassc->sim != NULL) {
879		xpt_bus_deregister(cam_sim_path(sassc->sim));
880		cam_sim_free(sassc->sim, FALSE);
881	}
882
883	sassc->flags |= MPSSAS_SHUTDOWN;
884	mps_unlock(sc);
885
886	if (sassc->devq != NULL)
887		cam_simq_free(sassc->devq);
888
889	for(i=0; i< sassc->maxtargets ;i++) {
890		targ = &sassc->targets[i];
891		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
892			free(lun, M_MPT2);
893		}
894	}
895	free(sassc->targets, M_MPT2);
896	free(sassc, M_MPT2);
897	sc->sassc = NULL;
898
899	return (0);
900}
901
902void
903mpssas_discovery_end(struct mpssas_softc *sassc)
904{
905	struct mps_softc *sc = sassc->sc;
906
907	MPS_FUNCTRACE(sc);
908
909	if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
910		callout_stop(&sassc->discovery_callout);
911
912}
913
914static void
915mpssas_action(struct cam_sim *sim, union ccb *ccb)
916{
917	struct mpssas_softc *sassc;
918
919	sassc = cam_sim_softc(sim);
920
921	MPS_FUNCTRACE(sassc->sc);
922	mps_dprint(sassc->sc, MPS_TRACE, "ccb func_code 0x%x\n",
923	    ccb->ccb_h.func_code);
924	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
925
926	switch (ccb->ccb_h.func_code) {
927	case XPT_PATH_INQ:
928	{
929		struct ccb_pathinq *cpi = &ccb->cpi;
930
931		cpi->version_num = 1;
932		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
933		cpi->target_sprt = 0;
934#if __FreeBSD_version >= 1000039
935		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
936#else
937		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
938#endif
939		cpi->hba_eng_cnt = 0;
940		cpi->max_target = sassc->maxtargets - 1;
941		cpi->max_lun = 255;
942		cpi->initiator_id = sassc->maxtargets - 1;
943		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
944		strncpy(cpi->hba_vid, "Avago Tech (LSI)", HBA_IDLEN);
945		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
946		cpi->unit_number = cam_sim_unit(sim);
947		cpi->bus_id = cam_sim_bus(sim);
948		cpi->base_transfer_speed = 150000;
949		cpi->transport = XPORT_SAS;
950		cpi->transport_version = 0;
951		cpi->protocol = PROTO_SCSI;
952		cpi->protocol_version = SCSI_REV_SPC;
953#if __FreeBSD_version >= 800001
954		/*
955		 * XXX KDM where does this number come from?
956		 */
957		cpi->maxio = 256 * 1024;
958#endif
959		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
960		break;
961	}
962	case XPT_GET_TRAN_SETTINGS:
963	{
964		struct ccb_trans_settings	*cts;
965		struct ccb_trans_settings_sas	*sas;
966		struct ccb_trans_settings_scsi	*scsi;
967		struct mpssas_target *targ;
968
969		cts = &ccb->cts;
970		sas = &cts->xport_specific.sas;
971		scsi = &cts->proto_specific.scsi;
972
973		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
974		    ("Target %d out of bounds in XPT_GET_TRANS_SETTINGS\n",
975		    cts->ccb_h.target_id));
976		targ = &sassc->targets[cts->ccb_h.target_id];
977		if (targ->handle == 0x0) {
978			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
979			break;
980		}
981
982		cts->protocol_version = SCSI_REV_SPC2;
983		cts->transport = XPORT_SAS;
984		cts->transport_version = 0;
985
986		sas->valid = CTS_SAS_VALID_SPEED;
987		switch (targ->linkrate) {
988		case 0x08:
989			sas->bitrate = 150000;
990			break;
991		case 0x09:
992			sas->bitrate = 300000;
993			break;
994		case 0x0a:
995			sas->bitrate = 600000;
996			break;
997		default:
998			sas->valid = 0;
999		}
1000
1001		cts->protocol = PROTO_SCSI;
1002		scsi->valid = CTS_SCSI_VALID_TQ;
1003		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1004
1005		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1006		break;
1007	}
1008	case XPT_CALC_GEOMETRY:
1009		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1010		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1011		break;
1012	case XPT_RESET_DEV:
1013		mps_dprint(sassc->sc, MPS_XINFO, "mpssas_action XPT_RESET_DEV\n");
1014		mpssas_action_resetdev(sassc, ccb);
1015		return;
1016	case XPT_RESET_BUS:
1017	case XPT_ABORT:
1018	case XPT_TERM_IO:
1019		mps_dprint(sassc->sc, MPS_XINFO,
1020		    "mpssas_action faking success for abort or reset\n");
1021		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1022		break;
1023	case XPT_SCSI_IO:
1024		mpssas_action_scsiio(sassc, ccb);
1025		return;
1026#if __FreeBSD_version >= 900026
1027	case XPT_SMP_IO:
1028		mpssas_action_smpio(sassc, ccb);
1029		return;
1030#endif
1031	default:
1032		mpssas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1033		break;
1034	}
1035	xpt_done(ccb);
1036
1037}
1038
1039static void
1040mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
1041    target_id_t target_id, lun_id_t lun_id)
1042{
1043	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1044	struct cam_path *path;
1045
1046	mps_dprint(sc, MPS_XINFO, "%s code %x target %d lun %jx\n", __func__,
1047	    ac_code, target_id, (uintmax_t)lun_id);
1048
1049	if (xpt_create_path(&path, NULL,
1050		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1051		mps_dprint(sc, MPS_ERROR, "unable to create path for reset "
1052			   "notification\n");
1053		return;
1054	}
1055
1056	xpt_async(ac_code, path, NULL);
1057	xpt_free_path(path);
1058}
1059
1060static void
1061mpssas_complete_all_commands(struct mps_softc *sc)
1062{
1063	struct mps_command *cm;
1064	int i;
1065	int completed;
1066
1067	MPS_FUNCTRACE(sc);
1068	mtx_assert(&sc->mps_mtx, MA_OWNED);
1069
1070	/* complete all commands with a NULL reply */
1071	for (i = 1; i < sc->num_reqs; i++) {
1072		cm = &sc->commands[i];
1073		cm->cm_reply = NULL;
1074		completed = 0;
1075
1076		if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
1077			cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
1078
1079		if (cm->cm_complete != NULL) {
1080			mpssas_log_command(cm, MPS_RECOVERY,
1081			    "completing cm %p state %x ccb %p for diag reset\n",
1082			    cm, cm->cm_state, cm->cm_ccb);
1083
1084			cm->cm_complete(sc, cm);
1085			completed = 1;
1086		}
1087
1088		if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
1089			mpssas_log_command(cm, MPS_RECOVERY,
1090			    "waking up cm %p state %x ccb %p for diag reset\n",
1091			    cm, cm->cm_state, cm->cm_ccb);
1092			wakeup(cm);
1093			completed = 1;
1094		}
1095
1096		if (cm->cm_sc->io_cmds_active != 0) {
1097			cm->cm_sc->io_cmds_active--;
1098		} else {
1099			mps_dprint(cm->cm_sc, MPS_INFO, "Warning: "
1100			    "io_cmds_active is out of sync - resynching to "
1101			    "0\n");
1102		}
1103
1104		if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
1105			/* this should never happen, but if it does, log */
1106			mpssas_log_command(cm, MPS_RECOVERY,
1107			    "cm %p state %x flags 0x%x ccb %p during diag "
1108			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1109			    cm->cm_ccb);
1110		}
1111	}
1112}
1113
1114void
1115mpssas_handle_reinit(struct mps_softc *sc)
1116{
1117	int i;
1118
1119	/* Go back into startup mode and freeze the simq, so that CAM
1120	 * doesn't send any commands until after we've rediscovered all
1121	 * targets and found the proper device handles for them.
1122	 *
1123	 * After the reset, portenable will trigger discovery, and after all
1124	 * discovery-related activities have finished, the simq will be
1125	 * released.
1126	 */
1127	mps_dprint(sc, MPS_INIT, "%s startup\n", __func__);
1128	sc->sassc->flags |= MPSSAS_IN_STARTUP;
1129	sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
1130	mpssas_startup_increment(sc->sassc);
1131
1132	/* notify CAM of a bus reset */
1133	mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1134	    CAM_LUN_WILDCARD);
1135
1136	/* complete and cleanup after all outstanding commands */
1137	mpssas_complete_all_commands(sc);
1138
1139	mps_dprint(sc, MPS_INIT,
1140	    "%s startup %u after command completion\n", __func__,
1141	    sc->sassc->startup_refcount);
1142
1143	/* zero all the target handles, since they may change after the
1144	 * reset, and we have to rediscover all the targets and use the new
1145	 * handles.
1146	 */
1147	for (i = 0; i < sc->sassc->maxtargets; i++) {
1148		if (sc->sassc->targets[i].outstanding != 0)
1149			mps_dprint(sc, MPS_INIT, "target %u outstanding %u\n",
1150			    i, sc->sassc->targets[i].outstanding);
1151		sc->sassc->targets[i].handle = 0x0;
1152		sc->sassc->targets[i].exp_dev_handle = 0x0;
1153		sc->sassc->targets[i].outstanding = 0;
1154		sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1155	}
1156}
1157
1158static void
1159mpssas_tm_timeout(void *data)
1160{
1161	struct mps_command *tm = data;
1162	struct mps_softc *sc = tm->cm_sc;
1163
1164	mtx_assert(&sc->mps_mtx, MA_OWNED);
1165
1166	mpssas_log_command(tm, MPS_INFO|MPS_RECOVERY,
1167	    "task mgmt %p timed out\n", tm);
1168	mps_reinit(sc);
1169}
1170
1171static void
1172mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1173{
1174	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1175	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1176	unsigned int cm_count = 0;
1177	struct mps_command *cm;
1178	struct mpssas_target *targ;
1179
1180	callout_stop(&tm->cm_callout);
1181
1182	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1183	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1184	targ = tm->cm_targ;
1185
1186	/*
1187	 * Currently there should be no way we can hit this case.  It only
1188	 * happens when we have a failure to allocate chain frames, and
1189	 * task management commands don't have S/G lists.
1190	 * XXXSL So should it be an assertion?
1191	 */
1192	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1193		mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for LUN reset! "
1194			   "This should not happen!\n", __func__, tm->cm_flags);
1195		mpssas_free_tm(sc, tm);
1196		return;
1197	}
1198
1199	if (reply == NULL) {
1200		mpssas_log_command(tm, MPS_RECOVERY,
1201		    "NULL reset reply for tm %p\n", tm);
1202		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1203			/* this completion was due to a reset, just cleanup */
1204			targ->tm = NULL;
1205			mpssas_free_tm(sc, tm);
1206		}
1207		else {
1208			/* we should have gotten a reply. */
1209			mps_reinit(sc);
1210		}
1211		return;
1212	}
1213
1214	mpssas_log_command(tm, MPS_RECOVERY,
1215	    "logical unit reset status 0x%x code 0x%x count %u\n",
1216	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1217	    le32toh(reply->TerminationCount));
1218
1219	/* See if there are any outstanding commands for this LUN.
1220	 * This could be made more efficient by using a per-LU data
1221	 * structure of some sort.
1222	 */
1223	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1224		if (cm->cm_lun == tm->cm_lun)
1225			cm_count++;
1226	}
1227
1228	if (cm_count == 0) {
1229		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1230		    "logical unit %u finished recovery after reset\n",
1231		    tm->cm_lun, tm);
1232
1233		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1234		    tm->cm_lun);
1235
1236		/* we've finished recovery for this logical unit.  check and
1237		 * see if some other logical unit has a timedout command
1238		 * that needs to be processed.
1239		 */
1240		cm = TAILQ_FIRST(&targ->timedout_commands);
1241		if (cm) {
1242			mpssas_send_abort(sc, tm, cm);
1243		}
1244		else {
1245			targ->tm = NULL;
1246			mpssas_free_tm(sc, tm);
1247		}
1248	}
1249	else {
1250		/* if we still have commands for this LUN, the reset
1251		 * effectively failed, regardless of the status reported.
1252		 * Escalate to a target reset.
1253		 */
1254		mpssas_log_command(tm, MPS_RECOVERY,
1255		    "logical unit reset complete for tm %p, but still have %u command(s)\n",
1256		    tm, cm_count);
1257		mpssas_send_reset(sc, tm,
1258		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1259	}
1260}
1261
1262static void
1263mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1264{
1265	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1266	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1267	struct mpssas_target *targ;
1268
1269	callout_stop(&tm->cm_callout);
1270
1271	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1272	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1273	targ = tm->cm_targ;
1274
1275	/*
1276	 * Currently there should be no way we can hit this case.  It only
1277	 * happens when we have a failure to allocate chain frames, and
1278	 * task management commands don't have S/G lists.
1279	 */
1280	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1281		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x for target reset! "
1282			   "This should not happen!\n", __func__, tm->cm_flags);
1283		mpssas_free_tm(sc, tm);
1284		return;
1285	}
1286
1287	if (reply == NULL) {
1288		mpssas_log_command(tm, MPS_RECOVERY,
1289		    "NULL reset reply for tm %p\n", tm);
1290		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1291			/* this completion was due to a reset, just cleanup */
1292			targ->tm = NULL;
1293			mpssas_free_tm(sc, tm);
1294		}
1295		else {
1296			/* we should have gotten a reply. */
1297			mps_reinit(sc);
1298		}
1299		return;
1300	}
1301
1302	mpssas_log_command(tm, MPS_RECOVERY,
1303	    "target reset status 0x%x code 0x%x count %u\n",
1304	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1305	    le32toh(reply->TerminationCount));
1306
1307	if (targ->outstanding == 0) {
1308		/* we've finished recovery for this target and all
1309		 * of its logical units.
1310		 */
1311		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1312		    "recovery finished after target reset\n");
1313
1314		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1315		    CAM_LUN_WILDCARD);
1316
1317		targ->tm = NULL;
1318		mpssas_free_tm(sc, tm);
1319	}
1320	else {
1321		/* after a target reset, if this target still has
1322		 * outstanding commands, the reset effectively failed,
1323		 * regardless of the status reported.  escalate.
1324		 */
1325		mpssas_log_command(tm, MPS_RECOVERY,
1326		    "target reset complete for tm %p, but still have %u command(s)\n",
1327		    tm, targ->outstanding);
1328		mps_reinit(sc);
1329	}
1330}
1331
1332#define MPS_RESET_TIMEOUT 30
1333
1334int
1335mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1336{
1337	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1338	struct mpssas_target *target;
1339	int err;
1340
1341	target = tm->cm_targ;
1342	if (target->handle == 0) {
1343		mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1344		    __func__, target->tid);
1345		return -1;
1346	}
1347
1348	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1349	req->DevHandle = htole16(target->handle);
1350	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1351	req->TaskType = type;
1352
1353	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1354		/* XXX Need to handle invalid LUNs */
1355		MPS_SET_LUN(req->LUN, tm->cm_lun);
1356		tm->cm_targ->logical_unit_resets++;
1357		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1358		    "sending logical unit reset\n");
1359		tm->cm_complete = mpssas_logical_unit_reset_complete;
1360		mpssas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1361	}
1362	else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1363		/*
1364		 * Target reset method =
1365		 * 	SAS Hard Link Reset / SATA Link Reset
1366		 */
1367		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1368		tm->cm_targ->target_resets++;
1369		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1370		    "sending target reset\n");
1371		tm->cm_complete = mpssas_target_reset_complete;
1372		mpssas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1373	}
1374	else {
1375		mps_dprint(sc, MPS_ERROR, "unexpected reset type 0x%x\n", type);
1376		return -1;
1377	}
1378
1379	tm->cm_data = NULL;
1380	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1381	tm->cm_complete_data = (void *)tm;
1382
1383	callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1384	    mpssas_tm_timeout, tm);
1385
1386	err = mps_map_command(sc, tm);
1387	if (err)
1388		mpssas_log_command(tm, MPS_RECOVERY,
1389		    "error %d sending reset type %u\n",
1390		    err, type);
1391
1392	return err;
1393}
1394
1395
1396static void
1397mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1398{
1399	struct mps_command *cm;
1400	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1401	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1402	struct mpssas_target *targ;
1403
1404	callout_stop(&tm->cm_callout);
1405
1406	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1407	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1408	targ = tm->cm_targ;
1409
1410	/*
1411	 * Currently there should be no way we can hit this case.  It only
1412	 * happens when we have a failure to allocate chain frames, and
1413	 * task management commands don't have S/G lists.
1414	 */
1415	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1416		mpssas_log_command(tm, MPS_RECOVERY,
1417		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1418		    tm->cm_flags, tm, le16toh(req->TaskMID));
1419		mpssas_free_tm(sc, tm);
1420		return;
1421	}
1422
1423	if (reply == NULL) {
1424		mpssas_log_command(tm, MPS_RECOVERY,
1425		    "NULL abort reply for tm %p TaskMID %u\n",
1426		    tm, le16toh(req->TaskMID));
1427		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1428			/* this completion was due to a reset, just cleanup */
1429			targ->tm = NULL;
1430			mpssas_free_tm(sc, tm);
1431		}
1432		else {
1433			/* we should have gotten a reply. */
1434			mps_reinit(sc);
1435		}
1436		return;
1437	}
1438
1439	mpssas_log_command(tm, MPS_RECOVERY,
1440	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1441	    le16toh(req->TaskMID),
1442	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1443	    le32toh(reply->TerminationCount));
1444
1445	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1446	if (cm == NULL) {
1447		/* if there are no more timedout commands, we're done with
1448		 * error recovery for this target.
1449		 */
1450		mpssas_log_command(tm, MPS_RECOVERY,
1451		    "finished recovery after aborting TaskMID %u\n",
1452		    le16toh(req->TaskMID));
1453
1454		targ->tm = NULL;
1455		mpssas_free_tm(sc, tm);
1456	}
1457	else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1458		/* abort success, but we have more timedout commands to abort */
1459		mpssas_log_command(tm, MPS_RECOVERY,
1460		    "continuing recovery after aborting TaskMID %u\n",
1461		    le16toh(req->TaskMID));
1462
1463		mpssas_send_abort(sc, tm, cm);
1464	}
1465	else {
1466		/* we didn't get a command completion, so the abort
1467		 * failed as far as we're concerned.  escalate.
1468		 */
1469		mpssas_log_command(tm, MPS_RECOVERY,
1470		    "abort failed for TaskMID %u tm %p\n",
1471		    le16toh(req->TaskMID), tm);
1472
1473		mpssas_send_reset(sc, tm,
1474		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1475	}
1476}
1477
1478#define MPS_ABORT_TIMEOUT 5
1479
1480static int
1481mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1482{
1483	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1484	struct mpssas_target *targ;
1485	int err;
1486
1487	targ = cm->cm_targ;
1488	if (targ->handle == 0) {
1489		mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1490		    __func__, cm->cm_ccb->ccb_h.target_id);
1491		return -1;
1492	}
1493
1494	mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1495	    "Aborting command %p\n", cm);
1496
1497	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1498	req->DevHandle = htole16(targ->handle);
1499	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1500	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1501
1502	/* XXX Need to handle invalid LUNs */
1503	MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1504
1505	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1506
1507	tm->cm_data = NULL;
1508	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1509	tm->cm_complete = mpssas_abort_complete;
1510	tm->cm_complete_data = (void *)tm;
1511	tm->cm_targ = cm->cm_targ;
1512	tm->cm_lun = cm->cm_lun;
1513
1514	callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1515	    mpssas_tm_timeout, tm);
1516
1517	targ->aborts++;
1518
1519	mps_dprint(sc, MPS_INFO, "Sending reset from %s for target ID %d\n",
1520	    __func__, targ->tid);
1521	mpssas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1522
1523	err = mps_map_command(sc, tm);
1524	if (err)
1525		mpssas_log_command(tm, MPS_RECOVERY,
1526		    "error %d sending abort for cm %p SMID %u\n",
1527		    err, cm, req->TaskMID);
1528	return err;
1529}
1530
1531static void
1532mpssas_scsiio_timeout(void *data)
1533{
1534	struct mps_softc *sc;
1535	struct mps_command *cm;
1536	struct mpssas_target *targ;
1537
1538	cm = (struct mps_command *)data;
1539	sc = cm->cm_sc;
1540
1541	MPS_FUNCTRACE(sc);
1542	mtx_assert(&sc->mps_mtx, MA_OWNED);
1543
1544	mps_dprint(sc, MPS_XINFO, "Timeout checking cm %p\n", sc);
1545
1546	/*
1547	 * Run the interrupt handler to make sure it's not pending.  This
1548	 * isn't perfect because the command could have already completed
1549	 * and been re-used, though this is unlikely.
1550	 */
1551	mps_intr_locked(sc);
1552	if (cm->cm_state == MPS_CM_STATE_FREE) {
1553		mpssas_log_command(cm, MPS_XINFO,
1554		    "SCSI command %p almost timed out\n", cm);
1555		return;
1556	}
1557
1558	if (cm->cm_ccb == NULL) {
1559		mps_dprint(sc, MPS_ERROR, "command timeout with NULL ccb\n");
1560		return;
1561	}
1562
1563	mpssas_log_command(cm, MPS_INFO, "command timeout cm %p ccb %p\n",
1564	    cm, cm->cm_ccb);
1565
1566	targ = cm->cm_targ;
1567	targ->timeouts++;
1568
1569	/* XXX first, check the firmware state, to see if it's still
1570	 * operational.  if not, do a diag reset.
1571	 */
1572	mpssas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1573	cm->cm_state = MPS_CM_STATE_TIMEDOUT;
1574	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1575
1576	if (targ->tm != NULL) {
1577		/* target already in recovery, just queue up another
1578		 * timedout command to be processed later.
1579		 */
1580		mps_dprint(sc, MPS_RECOVERY,
1581		    "queued timedout cm %p for processing by tm %p\n",
1582		    cm, targ->tm);
1583	}
1584	else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1585		mps_dprint(sc, MPS_RECOVERY, "timedout cm %p allocated tm %p\n",
1586		    cm, targ->tm);
1587
1588		/* start recovery by aborting the first timedout command */
1589		mpssas_send_abort(sc, targ->tm, cm);
1590	}
1591	else {
1592		/* XXX queue this target up for recovery once a TM becomes
1593		 * available.  The firmware only has a limited number of
1594		 * HighPriority credits for the high priority requests used
1595		 * for task management, and we ran out.
1596		 *
1597		 * Isilon: don't worry about this for now, since we have
1598		 * more credits than disks in an enclosure, and limit
1599		 * ourselves to one TM per target for recovery.
1600		 */
1601		mps_dprint(sc, MPS_RECOVERY,
1602		    "timedout cm %p failed to allocate a tm\n", cm);
1603	}
1604
1605}
1606
1607static void
1608mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1609{
1610	MPI2_SCSI_IO_REQUEST *req;
1611	struct ccb_scsiio *csio;
1612	struct mps_softc *sc;
1613	struct mpssas_target *targ;
1614	struct mpssas_lun *lun;
1615	struct mps_command *cm;
1616	uint8_t i, lba_byte, *ref_tag_addr;
1617	uint16_t eedp_flags;
1618	uint32_t mpi_control;
1619
1620	sc = sassc->sc;
1621	MPS_FUNCTRACE(sc);
1622	mtx_assert(&sc->mps_mtx, MA_OWNED);
1623
1624	csio = &ccb->csio;
1625	KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1626	    ("Target %d out of bounds in XPT_SCSI_IO\n",
1627	     csio->ccb_h.target_id));
1628	targ = &sassc->targets[csio->ccb_h.target_id];
1629	mps_dprint(sc, MPS_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1630	if (targ->handle == 0x0) {
1631		mps_dprint(sc, MPS_ERROR, "%s NULL handle for target %u\n",
1632		    __func__, csio->ccb_h.target_id);
1633		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1634		xpt_done(ccb);
1635		return;
1636	}
1637	if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
1638		mps_dprint(sc, MPS_ERROR, "%s Raid component no SCSI IO "
1639		    "supported %u\n", __func__, csio->ccb_h.target_id);
1640		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1641		xpt_done(ccb);
1642		return;
1643	}
1644	/*
1645	 * Sometimes, it is possible to get a command that is not "In
1646	 * Progress" and was actually aborted by the upper layer.  Check for
1647	 * this here and complete the command without error.
1648	 */
1649	if (mpssas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1650		mps_dprint(sc, MPS_TRACE, "%s Command is not in progress for "
1651		    "target %u\n", __func__, csio->ccb_h.target_id);
1652		xpt_done(ccb);
1653		return;
1654	}
1655	/*
1656	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1657	 * that the volume has timed out.  We want volumes to be enumerated
1658	 * until they are deleted/removed, not just failed.
1659	 */
1660	if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1661		if (targ->devinfo == 0)
1662			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1663		else
1664			mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1665		xpt_done(ccb);
1666		return;
1667	}
1668
1669	if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1670		mps_dprint(sc, MPS_INFO, "%s shutting down\n", __func__);
1671		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1672		xpt_done(ccb);
1673		return;
1674	}
1675
1676	/*
1677	 * If target has a reset in progress, freeze the devq and return.  The
1678	 * devq will be released when the TM reset is finished.
1679	 */
1680	if (targ->flags & MPSSAS_TARGET_INRESET) {
1681		ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1682		mps_dprint(sc, MPS_INFO, "%s: Freezing devq for target ID %d\n",
1683		    __func__, targ->tid);
1684		xpt_freeze_devq(ccb->ccb_h.path, 1);
1685		xpt_done(ccb);
1686		return;
1687	}
1688
1689	cm = mps_alloc_command(sc);
1690	if (cm == NULL || (sc->mps_flags & MPS_FLAGS_DIAGRESET)) {
1691		if (cm != NULL) {
1692			mps_free_command(sc, cm);
1693		}
1694		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1695			xpt_freeze_simq(sassc->sim, 1);
1696			sassc->flags |= MPSSAS_QUEUE_FROZEN;
1697		}
1698		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1699		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1700		xpt_done(ccb);
1701		return;
1702	}
1703
1704	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1705	bzero(req, sizeof(*req));
1706	req->DevHandle = htole16(targ->handle);
1707	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1708	req->MsgFlags = 0;
1709	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1710	req->SenseBufferLength = MPS_SENSE_LEN;
1711	req->SGLFlags = 0;
1712	req->ChainOffset = 0;
1713	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1714	req->SGLOffset1= 0;
1715	req->SGLOffset2= 0;
1716	req->SGLOffset3= 0;
1717	req->SkipCount = 0;
1718	req->DataLength = htole32(csio->dxfer_len);
1719	req->BidirectionalDataLength = 0;
1720	req->IoFlags = htole16(csio->cdb_len);
1721	req->EEDPFlags = 0;
1722
1723	/* Note: BiDirectional transfers are not supported */
1724	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1725	case CAM_DIR_IN:
1726		mpi_control = MPI2_SCSIIO_CONTROL_READ;
1727		cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1728		break;
1729	case CAM_DIR_OUT:
1730		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1731		cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1732		break;
1733	case CAM_DIR_NONE:
1734	default:
1735		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1736		break;
1737	}
1738
1739	if (csio->cdb_len == 32)
1740                mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1741	/*
1742	 * It looks like the hardware doesn't require an explicit tag
1743	 * number for each transaction.  SAM Task Management not supported
1744	 * at the moment.
1745	 */
1746	switch (csio->tag_action) {
1747	case MSG_HEAD_OF_Q_TAG:
1748		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1749		break;
1750	case MSG_ORDERED_Q_TAG:
1751		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1752		break;
1753	case MSG_ACA_TASK:
1754		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1755		break;
1756	case CAM_TAG_ACTION_NONE:
1757	case MSG_SIMPLE_Q_TAG:
1758	default:
1759		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1760		break;
1761	}
1762	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1763	req->Control = htole32(mpi_control);
1764	if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1765		mps_free_command(sc, cm);
1766		mpssas_set_ccbstatus(ccb, CAM_LUN_INVALID);
1767		xpt_done(ccb);
1768		return;
1769	}
1770
1771	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1772		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1773	else
1774		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1775	req->IoFlags = htole16(csio->cdb_len);
1776
1777	/*
1778	 * Check if EEDP is supported and enabled.  If it is then check if the
1779	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
1780	 * is formatted for EEDP support.  If all of this is true, set CDB up
1781	 * for EEDP transfer.
1782	 */
1783	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1784	if (sc->eedp_enabled && eedp_flags) {
1785		SLIST_FOREACH(lun, &targ->luns, lun_link) {
1786			if (lun->lun_id == csio->ccb_h.target_lun) {
1787				break;
1788			}
1789		}
1790
1791		if ((lun != NULL) && (lun->eedp_formatted)) {
1792			req->EEDPBlockSize = htole16(lun->eedp_block_size);
1793			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1794			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1795			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1796			req->EEDPFlags = htole16(eedp_flags);
1797
1798			/*
1799			 * If CDB less than 32, fill in Primary Ref Tag with
1800			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
1801			 * already there.  Also, set protection bit.  FreeBSD
1802			 * currently does not support CDBs bigger than 16, but
1803			 * the code doesn't hurt, and will be here for the
1804			 * future.
1805			 */
1806			if (csio->cdb_len != 32) {
1807				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1808				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1809				    PrimaryReferenceTag;
1810				for (i = 0; i < 4; i++) {
1811					*ref_tag_addr =
1812					    req->CDB.CDB32[lba_byte + i];
1813					ref_tag_addr++;
1814				}
1815				req->CDB.EEDP32.PrimaryReferenceTag =
1816					htole32(req->CDB.EEDP32.PrimaryReferenceTag);
1817				req->CDB.EEDP32.PrimaryApplicationTagMask =
1818				    0xFFFF;
1819				req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1820				    0x20;
1821			} else {
1822				eedp_flags |=
1823				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1824				req->EEDPFlags = htole16(eedp_flags);
1825				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1826				    0x1F) | 0x20;
1827			}
1828		}
1829	}
1830
1831	cm->cm_length = csio->dxfer_len;
1832	if (cm->cm_length != 0) {
1833		cm->cm_data = ccb;
1834		cm->cm_flags |= MPS_CM_FLAGS_USE_CCB;
1835	} else {
1836		cm->cm_data = NULL;
1837	}
1838	cm->cm_sge = &req->SGL;
1839	cm->cm_sglsize = (32 - 24) * 4;
1840	cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1841	cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1842	cm->cm_complete = mpssas_scsiio_complete;
1843	cm->cm_complete_data = ccb;
1844	cm->cm_targ = targ;
1845	cm->cm_lun = csio->ccb_h.target_lun;
1846	cm->cm_ccb = ccb;
1847
1848	/*
1849	 * If HBA is a WD and the command is not for a retry, try to build a
1850	 * direct I/O message. If failed, or the command is for a retry, send
1851	 * the I/O to the IR volume itself.
1852	 */
1853	if (sc->WD_valid_config) {
1854		if (ccb->ccb_h.sim_priv.entries[0].field == MPS_WD_RETRY) {
1855			mpssas_direct_drive_io(sassc, cm, ccb);
1856		} else {
1857			mpssas_set_ccbstatus(ccb, CAM_REQ_INPROG);
1858		}
1859	}
1860
1861	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1862	    mpssas_scsiio_timeout, cm, 0);
1863
1864	targ->issued++;
1865	targ->outstanding++;
1866	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1867	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1868
1869	mpssas_log_command(cm, MPS_XINFO, "%s cm %p ccb %p outstanding %u\n",
1870	    __func__, cm, ccb, targ->outstanding);
1871
1872	mps_map_command(sc, cm);
1873	return;
1874}
1875
1876static void
1877mps_response_code(struct mps_softc *sc, u8 response_code)
1878{
1879        char *desc;
1880
1881        switch (response_code) {
1882        case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
1883                desc = "task management request completed";
1884                break;
1885        case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
1886                desc = "invalid frame";
1887                break;
1888        case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
1889                desc = "task management request not supported";
1890                break;
1891        case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
1892                desc = "task management request failed";
1893                break;
1894        case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
1895                desc = "task management request succeeded";
1896                break;
1897        case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
1898                desc = "invalid lun";
1899                break;
1900        case 0xA:
1901                desc = "overlapped tag attempted";
1902                break;
1903        case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
1904                desc = "task queued, however not sent to target";
1905                break;
1906        default:
1907                desc = "unknown";
1908                break;
1909        }
1910		mps_dprint(sc, MPS_XINFO, "response_code(0x%01x): %s\n",
1911                response_code, desc);
1912}
1913/**
1914 * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request
1915 */
1916static void
1917mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio,
1918    Mpi2SCSIIOReply_t *mpi_reply)
1919{
1920	u32 response_info;
1921	u8 *response_bytes;
1922	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1923	    MPI2_IOCSTATUS_MASK;
1924	u8 scsi_state = mpi_reply->SCSIState;
1925	u8 scsi_status = mpi_reply->SCSIStatus;
1926	char *desc_ioc_state = NULL;
1927	char *desc_scsi_status = NULL;
1928	char *desc_scsi_state = sc->tmp_string;
1929	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1930
1931	if (log_info == 0x31170000)
1932		return;
1933
1934	switch (ioc_status) {
1935	case MPI2_IOCSTATUS_SUCCESS:
1936		desc_ioc_state = "success";
1937		break;
1938	case MPI2_IOCSTATUS_INVALID_FUNCTION:
1939		desc_ioc_state = "invalid function";
1940		break;
1941	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1942		desc_ioc_state = "scsi recovered error";
1943		break;
1944	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1945		desc_ioc_state = "scsi invalid dev handle";
1946		break;
1947	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1948		desc_ioc_state = "scsi device not there";
1949		break;
1950	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1951		desc_ioc_state = "scsi data overrun";
1952		break;
1953	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1954		desc_ioc_state = "scsi data underrun";
1955		break;
1956	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
1957		desc_ioc_state = "scsi io data error";
1958		break;
1959	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
1960		desc_ioc_state = "scsi protocol error";
1961		break;
1962	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
1963		desc_ioc_state = "scsi task terminated";
1964		break;
1965	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
1966		desc_ioc_state = "scsi residual mismatch";
1967		break;
1968	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
1969		desc_ioc_state = "scsi task mgmt failed";
1970		break;
1971	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
1972		desc_ioc_state = "scsi ioc terminated";
1973		break;
1974	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
1975		desc_ioc_state = "scsi ext terminated";
1976		break;
1977	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
1978		desc_ioc_state = "eedp guard error";
1979		break;
1980	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
1981		desc_ioc_state = "eedp ref tag error";
1982		break;
1983	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
1984		desc_ioc_state = "eedp app tag error";
1985		break;
1986	default:
1987		desc_ioc_state = "unknown";
1988		break;
1989	}
1990
1991	switch (scsi_status) {
1992	case MPI2_SCSI_STATUS_GOOD:
1993		desc_scsi_status = "good";
1994		break;
1995	case MPI2_SCSI_STATUS_CHECK_CONDITION:
1996		desc_scsi_status = "check condition";
1997		break;
1998	case MPI2_SCSI_STATUS_CONDITION_MET:
1999		desc_scsi_status = "condition met";
2000		break;
2001	case MPI2_SCSI_STATUS_BUSY:
2002		desc_scsi_status = "busy";
2003		break;
2004	case MPI2_SCSI_STATUS_INTERMEDIATE:
2005		desc_scsi_status = "intermediate";
2006		break;
2007	case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
2008		desc_scsi_status = "intermediate condmet";
2009		break;
2010	case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
2011		desc_scsi_status = "reservation conflict";
2012		break;
2013	case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
2014		desc_scsi_status = "command terminated";
2015		break;
2016	case MPI2_SCSI_STATUS_TASK_SET_FULL:
2017		desc_scsi_status = "task set full";
2018		break;
2019	case MPI2_SCSI_STATUS_ACA_ACTIVE:
2020		desc_scsi_status = "aca active";
2021		break;
2022	case MPI2_SCSI_STATUS_TASK_ABORTED:
2023		desc_scsi_status = "task aborted";
2024		break;
2025	default:
2026		desc_scsi_status = "unknown";
2027		break;
2028	}
2029
2030	desc_scsi_state[0] = '\0';
2031	if (!scsi_state)
2032		desc_scsi_state = " ";
2033	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
2034		strcat(desc_scsi_state, "response info ");
2035	if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
2036		strcat(desc_scsi_state, "state terminated ");
2037	if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
2038		strcat(desc_scsi_state, "no status ");
2039	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
2040		strcat(desc_scsi_state, "autosense failed ");
2041	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
2042		strcat(desc_scsi_state, "autosense valid ");
2043
2044	mps_dprint(sc, MPS_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2045	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2046	/* We can add more detail about underflow data here
2047	 * TO-DO
2048	 * */
2049	mps_dprint(sc, MPS_XINFO, "\tscsi_status(%s)(0x%02x), "
2050	    "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status,
2051	    desc_scsi_state, scsi_state);
2052
2053	if (sc->mps_debug & MPS_XINFO &&
2054		scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2055		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : Start :\n");
2056		scsi_sense_print(csio);
2057		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : End :\n");
2058	}
2059
2060	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2061		response_info = le32toh(mpi_reply->ResponseInfo);
2062		response_bytes = (u8 *)&response_info;
2063		mps_response_code(sc,response_bytes[0]);
2064	}
2065}
2066
2067static void
2068mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
2069{
2070	MPI2_SCSI_IO_REPLY *rep;
2071	union ccb *ccb;
2072	struct ccb_scsiio *csio;
2073	struct mpssas_softc *sassc;
2074	struct scsi_vpd_supported_page_list *vpd_list = NULL;
2075	u8 *TLR_bits, TLR_on;
2076	int dir = 0, i;
2077	u16 alloc_len;
2078	struct mpssas_target *target;
2079	target_id_t target_id;
2080
2081	MPS_FUNCTRACE(sc);
2082	mps_dprint(sc, MPS_TRACE,
2083	    "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2084	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2085	    cm->cm_targ->outstanding);
2086
2087	callout_stop(&cm->cm_callout);
2088	mtx_assert(&sc->mps_mtx, MA_OWNED);
2089
2090	sassc = sc->sassc;
2091	ccb = cm->cm_complete_data;
2092	csio = &ccb->csio;
2093	target_id = csio->ccb_h.target_id;
2094	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2095	/*
2096	 * XXX KDM if the chain allocation fails, does it matter if we do
2097	 * the sync and unload here?  It is simpler to do it in every case,
2098	 * assuming it doesn't cause problems.
2099	 */
2100	if (cm->cm_data != NULL) {
2101		if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
2102			dir = BUS_DMASYNC_POSTREAD;
2103		else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
2104			dir = BUS_DMASYNC_POSTWRITE;
2105		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2106		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2107	}
2108
2109	cm->cm_targ->completed++;
2110	cm->cm_targ->outstanding--;
2111	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2112	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2113
2114	if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) {
2115		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2116		if (cm->cm_reply != NULL)
2117			mpssas_log_command(cm, MPS_RECOVERY,
2118			    "completed timedout cm %p ccb %p during recovery "
2119			    "ioc %x scsi %x state %x xfer %u\n",
2120			    cm, cm->cm_ccb,
2121			    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2122			    le32toh(rep->TransferCount));
2123		else
2124			mpssas_log_command(cm, MPS_RECOVERY,
2125			    "completed timedout cm %p ccb %p during recovery\n",
2126			    cm, cm->cm_ccb);
2127	} else if (cm->cm_targ->tm != NULL) {
2128		if (cm->cm_reply != NULL)
2129			mpssas_log_command(cm, MPS_RECOVERY,
2130			    "completed cm %p ccb %p during recovery "
2131			    "ioc %x scsi %x state %x xfer %u\n",
2132			    cm, cm->cm_ccb,
2133			    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2134			    le32toh(rep->TransferCount));
2135		else
2136			mpssas_log_command(cm, MPS_RECOVERY,
2137			    "completed cm %p ccb %p during recovery\n",
2138			    cm, cm->cm_ccb);
2139	} else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
2140		mpssas_log_command(cm, MPS_RECOVERY,
2141		    "reset completed cm %p ccb %p\n",
2142		    cm, cm->cm_ccb);
2143	}
2144
2145	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2146		/*
2147		 * We ran into an error after we tried to map the command,
2148		 * so we're getting a callback without queueing the command
2149		 * to the hardware.  So we set the status here, and it will
2150		 * be retained below.  We'll go through the "fast path",
2151		 * because there can be no reply when we haven't actually
2152		 * gone out to the hardware.
2153		 */
2154		mpssas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2155
2156		/*
2157		 * Currently the only error included in the mask is
2158		 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
2159		 * chain frames.  We need to freeze the queue until we get
2160		 * a command that completed without this error, which will
2161		 * hopefully have some chain frames attached that we can
2162		 * use.  If we wanted to get smarter about it, we would
2163		 * only unfreeze the queue in this condition when we're
2164		 * sure that we're getting some chain frames back.  That's
2165		 * probably unnecessary.
2166		 */
2167		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
2168			xpt_freeze_simq(sassc->sim, 1);
2169			sassc->flags |= MPSSAS_QUEUE_FROZEN;
2170			mps_dprint(sc, MPS_XINFO, "Error sending command, "
2171				   "freezing SIM queue\n");
2172		}
2173	}
2174
2175	/*
2176	 * If this is a Start Stop Unit command and it was issued by the driver
2177	 * during shutdown, decrement the refcount to account for all of the
2178	 * commands that were sent.  All SSU commands should be completed before
2179	 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2180	 * is TRUE.
2181	 */
2182	if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) {
2183		mps_dprint(sc, MPS_INFO, "Decrementing SSU count.\n");
2184		sc->SSU_refcount--;
2185	}
2186
2187	/* Take the fast path to completion */
2188	if (cm->cm_reply == NULL) {
2189		if (mpssas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2190			if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
2191				mpssas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2192			else {
2193				mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2194				ccb->csio.scsi_status = SCSI_STATUS_OK;
2195			}
2196			if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2197				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2198				sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2199				mps_dprint(sc, MPS_XINFO,
2200				    "Unfreezing SIM queue\n");
2201			}
2202		}
2203
2204		/*
2205		 * There are two scenarios where the status won't be
2206		 * CAM_REQ_CMP.  The first is if MPS_CM_FLAGS_ERROR_MASK is
2207		 * set, the second is in the MPS_FLAGS_DIAGRESET above.
2208		 */
2209		if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2210			/*
2211			 * Freeze the dev queue so that commands are
2212			 * executed in the correct order after error
2213			 * recovery.
2214			 */
2215			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2216			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2217		}
2218		mps_free_command(sc, cm);
2219		xpt_done(ccb);
2220		return;
2221	}
2222
2223	mpssas_log_command(cm, MPS_XINFO,
2224	    "ioc %x scsi %x state %x xfer %u\n",
2225	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2226	    le32toh(rep->TransferCount));
2227
2228	/*
2229	 * If this is a Direct Drive I/O, reissue the I/O to the original IR
2230	 * Volume if an error occurred (normal I/O retry).  Use the original
2231	 * CCB, but set a flag that this will be a retry so that it's sent to
2232	 * the original volume.  Free the command but reuse the CCB.
2233	 */
2234	if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
2235		mps_free_command(sc, cm);
2236		ccb->ccb_h.sim_priv.entries[0].field = MPS_WD_RETRY;
2237		mpssas_action_scsiio(sassc, ccb);
2238		return;
2239	} else
2240		ccb->ccb_h.sim_priv.entries[0].field = 0;
2241
2242	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2243	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2244		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2245		/* FALLTHROUGH */
2246	case MPI2_IOCSTATUS_SUCCESS:
2247	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2248
2249		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2250		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2251			mpssas_log_command(cm, MPS_XINFO, "recovered error\n");
2252
2253		/* Completion failed at the transport level. */
2254		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2255		    MPI2_SCSI_STATE_TERMINATED)) {
2256			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2257			break;
2258		}
2259
2260		/* In a modern packetized environment, an autosense failure
2261		 * implies that there's not much else that can be done to
2262		 * recover the command.
2263		 */
2264		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2265			mpssas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2266			break;
2267		}
2268
2269		/*
2270		 * CAM doesn't care about SAS Response Info data, but if this is
2271		 * the state check if TLR should be done.  If not, clear the
2272		 * TLR_bits for the target.
2273		 */
2274		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2275		    ((le32toh(rep->ResponseInfo) &
2276		    MPI2_SCSI_RI_MASK_REASONCODE) ==
2277		    MPS_SCSI_RI_INVALID_FRAME)) {
2278			sc->mapping_table[target_id].TLR_bits =
2279			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2280		}
2281
2282		/*
2283		 * Intentionally override the normal SCSI status reporting
2284		 * for these two cases.  These are likely to happen in a
2285		 * multi-initiator environment, and we want to make sure that
2286		 * CAM retries these commands rather than fail them.
2287		 */
2288		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2289		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2290			mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2291			break;
2292		}
2293
2294		/* Handle normal status and sense */
2295		csio->scsi_status = rep->SCSIStatus;
2296		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2297			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2298		else
2299			mpssas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2300
2301		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2302			int sense_len, returned_sense_len;
2303
2304			returned_sense_len = min(le32toh(rep->SenseCount),
2305			    sizeof(struct scsi_sense_data));
2306			if (returned_sense_len < ccb->csio.sense_len)
2307				ccb->csio.sense_resid = ccb->csio.sense_len -
2308					returned_sense_len;
2309			else
2310				ccb->csio.sense_resid = 0;
2311
2312			sense_len = min(returned_sense_len,
2313			    ccb->csio.sense_len - ccb->csio.sense_resid);
2314			bzero(&ccb->csio.sense_data,
2315			      sizeof(ccb->csio.sense_data));
2316			bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
2317			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2318		}
2319
2320		/*
2321		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2322		 * and it's page code 0 (Supported Page List), and there is
2323		 * inquiry data, and this is for a sequential access device, and
2324		 * the device is an SSP target, and TLR is supported by the
2325		 * controller, turn the TLR_bits value ON if page 0x90 is
2326		 * supported.
2327		 */
2328		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2329		    (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2330		    (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2331		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2332		    (csio->data_ptr != NULL) &&
2333		    ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2334		    (sc->control_TLR) &&
2335		    (sc->mapping_table[target_id].device_info &
2336		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2337			vpd_list = (struct scsi_vpd_supported_page_list *)
2338			    csio->data_ptr;
2339			TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2340			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2341			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2342			alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2343			    csio->cdb_io.cdb_bytes[4];
2344			alloc_len -= csio->resid;
2345			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2346				if (vpd_list->list[i] == 0x90) {
2347					*TLR_bits = TLR_on;
2348					break;
2349				}
2350			}
2351		}
2352
2353		/*
2354		 * If this is a SATA direct-access end device, mark it so that
2355		 * a SCSI StartStopUnit command will be sent to it when the
2356		 * driver is being shutdown.
2357		 */
2358		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2359		    ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2360		    (sc->mapping_table[target_id].device_info &
2361		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2362		    ((sc->mapping_table[target_id].device_info &
2363		    MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2364		    MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2365			target = &sassc->targets[target_id];
2366			target->supports_SSU = TRUE;
2367			mps_dprint(sc, MPS_XINFO, "Target %d supports SSU\n",
2368			    target_id);
2369		}
2370		break;
2371	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2372	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2373		/*
2374		 * If devinfo is 0 this will be a volume.  In that case don't
2375		 * tell CAM that the volume is not there.  We want volumes to
2376		 * be enumerated until they are deleted/removed, not just
2377		 * failed.
2378		 */
2379		if (cm->cm_targ->devinfo == 0)
2380			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2381		else
2382			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2383		break;
2384	case MPI2_IOCSTATUS_INVALID_SGL:
2385		mps_print_scsiio_cmd(sc, cm);
2386		mpssas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2387		break;
2388	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2389		/*
2390		 * This is one of the responses that comes back when an I/O
2391		 * has been aborted.  If it is because of a timeout that we
2392		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2393		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2394		 * command is the same (it gets retried, subject to the
2395		 * retry counter), the only difference is what gets printed
2396		 * on the console.
2397		 */
2398		if (cm->cm_state == MPS_CM_STATE_TIMEDOUT)
2399			mpssas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2400		else
2401			mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2402		break;
2403	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2404		/* resid is ignored for this condition */
2405		csio->resid = 0;
2406		mpssas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2407		break;
2408	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2409	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2410		/*
2411		 * Since these are generally external (i.e. hopefully
2412		 * transient transport-related) errors, retry these without
2413		 * decrementing the retry count.
2414		 */
2415		mpssas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2416		mpssas_log_command(cm, MPS_INFO,
2417		    "terminated ioc %x scsi %x state %x xfer %u\n",
2418		    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2419		    le32toh(rep->TransferCount));
2420		break;
2421	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2422	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2423	case MPI2_IOCSTATUS_INVALID_VPID:
2424	case MPI2_IOCSTATUS_INVALID_FIELD:
2425	case MPI2_IOCSTATUS_INVALID_STATE:
2426	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2427	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2428	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2429	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2430	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2431	default:
2432		mpssas_log_command(cm, MPS_XINFO,
2433		    "completed ioc %x scsi %x state %x xfer %u\n",
2434		    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2435		    le32toh(rep->TransferCount));
2436		csio->resid = cm->cm_length;
2437		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2438		break;
2439	}
2440
2441	mps_sc_failed_io_info(sc,csio,rep);
2442
2443	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2444		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2445		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2446		mps_dprint(sc, MPS_XINFO, "Command completed, "
2447		    "unfreezing SIM queue\n");
2448	}
2449
2450	if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2451		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2452		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2453	}
2454
2455	mps_free_command(sc, cm);
2456	xpt_done(ccb);
2457}
2458
2459/* All Request reached here are Endian safe */
2460static void
2461mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2462    union ccb *ccb) {
2463	pMpi2SCSIIORequest_t	pIO_req;
2464	struct mps_softc	*sc = sassc->sc;
2465	uint64_t		virtLBA;
2466	uint32_t		physLBA, stripe_offset, stripe_unit;
2467	uint32_t		io_size, column;
2468	uint8_t			*ptrLBA, lba_idx, physLBA_byte, *CDB;
2469
2470	/*
2471	 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2472	 * Write10, or Write16), build a direct I/O message.  Otherwise, the I/O
2473	 * will be sent to the IR volume itself.  Since Read6 and Write6 are a
2474	 * bit different than the 10/16 CDBs, handle them separately.
2475	 */
2476	pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2477	CDB = pIO_req->CDB.CDB32;
2478
2479	/*
2480	 * Handle 6 byte CDBs.
2481	 */
2482	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2483	    (CDB[0] == WRITE_6))) {
2484		/*
2485		 * Get the transfer size in blocks.
2486		 */
2487		io_size = (cm->cm_length >> sc->DD_block_exponent);
2488
2489		/*
2490		 * Get virtual LBA given in the CDB.
2491		 */
2492		virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2493		    ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2494
2495		/*
2496		 * Check that LBA range for I/O does not exceed volume's
2497		 * MaxLBA.
2498		 */
2499		if ((virtLBA + (uint64_t)io_size - 1) <=
2500		    sc->DD_max_lba) {
2501			/*
2502			 * Check if the I/O crosses a stripe boundary.  If not,
2503			 * translate the virtual LBA to a physical LBA and set
2504			 * the DevHandle for the PhysDisk to be used.  If it
2505			 * does cross a boundry, do normal I/O.  To get the
2506			 * right DevHandle to use, get the map number for the
2507			 * column, then use that map number to look up the
2508			 * DevHandle of the PhysDisk.
2509			 */
2510			stripe_offset = (uint32_t)virtLBA &
2511			    (sc->DD_stripe_size - 1);
2512			if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2513				physLBA = (uint32_t)virtLBA >>
2514				    sc->DD_stripe_exponent;
2515				stripe_unit = physLBA / sc->DD_num_phys_disks;
2516				column = physLBA % sc->DD_num_phys_disks;
2517				pIO_req->DevHandle =
2518				    htole16(sc->DD_column_map[column].dev_handle);
2519				/* ???? Is this endian safe*/
2520				cm->cm_desc.SCSIIO.DevHandle =
2521				    pIO_req->DevHandle;
2522
2523				physLBA = (stripe_unit <<
2524				    sc->DD_stripe_exponent) + stripe_offset;
2525				ptrLBA = &pIO_req->CDB.CDB32[1];
2526				physLBA_byte = (uint8_t)(physLBA >> 16);
2527				*ptrLBA = physLBA_byte;
2528				ptrLBA = &pIO_req->CDB.CDB32[2];
2529				physLBA_byte = (uint8_t)(physLBA >> 8);
2530				*ptrLBA = physLBA_byte;
2531				ptrLBA = &pIO_req->CDB.CDB32[3];
2532				physLBA_byte = (uint8_t)physLBA;
2533				*ptrLBA = physLBA_byte;
2534
2535				/*
2536				 * Set flag that Direct Drive I/O is
2537				 * being done.
2538				 */
2539				cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2540			}
2541		}
2542		return;
2543	}
2544
2545	/*
2546	 * Handle 10, 12 or 16 byte CDBs.
2547	 */
2548	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2549	    (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2550	    (CDB[0] == WRITE_16) || (CDB[0] == READ_12) ||
2551	    (CDB[0] == WRITE_12))) {
2552		/*
2553		 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2554		 * are 0.  If not, this is accessing beyond 2TB so handle it in
2555		 * the else section.  10-byte and 12-byte CDB's are OK.
2556		 * FreeBSD sends very rare 12 byte READ/WRITE, but driver is
2557		 * ready to accept 12byte CDB for Direct IOs.
2558		 */
2559		if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) ||
2560		    (CDB[0] == READ_12 || CDB[0] == WRITE_12) ||
2561		    !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2562			/*
2563			 * Get the transfer size in blocks.
2564			 */
2565			io_size = (cm->cm_length >> sc->DD_block_exponent);
2566
2567			/*
2568			 * Get virtual LBA.  Point to correct lower 4 bytes of
2569			 * LBA in the CDB depending on command.
2570			 */
2571			lba_idx = ((CDB[0] == READ_12) ||
2572				(CDB[0] == WRITE_12) ||
2573				(CDB[0] == READ_10) ||
2574				(CDB[0] == WRITE_10))? 2 : 6;
2575			virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2576			    ((uint64_t)CDB[lba_idx + 1] << 16) |
2577			    ((uint64_t)CDB[lba_idx + 2] << 8) |
2578			    (uint64_t)CDB[lba_idx + 3];
2579
2580			/*
2581			 * Check that LBA range for I/O does not exceed volume's
2582			 * MaxLBA.
2583			 */
2584			if ((virtLBA + (uint64_t)io_size - 1) <=
2585			    sc->DD_max_lba) {
2586				/*
2587				 * Check if the I/O crosses a stripe boundary.
2588				 * If not, translate the virtual LBA to a
2589				 * physical LBA and set the DevHandle for the
2590				 * PhysDisk to be used.  If it does cross a
2591				 * boundry, do normal I/O.  To get the right
2592				 * DevHandle to use, get the map number for the
2593				 * column, then use that map number to look up
2594				 * the DevHandle of the PhysDisk.
2595				 */
2596				stripe_offset = (uint32_t)virtLBA &
2597				    (sc->DD_stripe_size - 1);
2598				if ((stripe_offset + io_size) <=
2599				    sc->DD_stripe_size) {
2600					physLBA = (uint32_t)virtLBA >>
2601					    sc->DD_stripe_exponent;
2602					stripe_unit = physLBA /
2603					    sc->DD_num_phys_disks;
2604					column = physLBA %
2605					    sc->DD_num_phys_disks;
2606					pIO_req->DevHandle =
2607					    htole16(sc->DD_column_map[column].
2608					    dev_handle);
2609					cm->cm_desc.SCSIIO.DevHandle =
2610					    pIO_req->DevHandle;
2611
2612					physLBA = (stripe_unit <<
2613					    sc->DD_stripe_exponent) +
2614					    stripe_offset;
2615					ptrLBA =
2616					    &pIO_req->CDB.CDB32[lba_idx];
2617					physLBA_byte = (uint8_t)(physLBA >> 24);
2618					*ptrLBA = physLBA_byte;
2619					ptrLBA =
2620					    &pIO_req->CDB.CDB32[lba_idx + 1];
2621					physLBA_byte = (uint8_t)(physLBA >> 16);
2622					*ptrLBA = physLBA_byte;
2623					ptrLBA =
2624					    &pIO_req->CDB.CDB32[lba_idx + 2];
2625					physLBA_byte = (uint8_t)(physLBA >> 8);
2626					*ptrLBA = physLBA_byte;
2627					ptrLBA =
2628					    &pIO_req->CDB.CDB32[lba_idx + 3];
2629					physLBA_byte = (uint8_t)physLBA;
2630					*ptrLBA = physLBA_byte;
2631
2632					/*
2633					 * Set flag that Direct Drive I/O is
2634					 * being done.
2635					 */
2636					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2637				}
2638			}
2639		} else {
2640			/*
2641			 * 16-byte CDB and the upper 4 bytes of the CDB are not
2642			 * 0.  Get the transfer size in blocks.
2643			 */
2644			io_size = (cm->cm_length >> sc->DD_block_exponent);
2645
2646			/*
2647			 * Get virtual LBA.
2648			 */
2649			virtLBA = ((uint64_t)CDB[2] << 54) |
2650			    ((uint64_t)CDB[3] << 48) |
2651			    ((uint64_t)CDB[4] << 40) |
2652			    ((uint64_t)CDB[5] << 32) |
2653			    ((uint64_t)CDB[6] << 24) |
2654			    ((uint64_t)CDB[7] << 16) |
2655			    ((uint64_t)CDB[8] << 8) |
2656			    (uint64_t)CDB[9];
2657
2658			/*
2659			 * Check that LBA range for I/O does not exceed volume's
2660			 * MaxLBA.
2661			 */
2662			if ((virtLBA + (uint64_t)io_size - 1) <=
2663			    sc->DD_max_lba) {
2664				/*
2665				 * Check if the I/O crosses a stripe boundary.
2666				 * If not, translate the virtual LBA to a
2667				 * physical LBA and set the DevHandle for the
2668				 * PhysDisk to be used.  If it does cross a
2669				 * boundry, do normal I/O.  To get the right
2670				 * DevHandle to use, get the map number for the
2671				 * column, then use that map number to look up
2672				 * the DevHandle of the PhysDisk.
2673				 */
2674				stripe_offset = (uint32_t)virtLBA &
2675				    (sc->DD_stripe_size - 1);
2676				if ((stripe_offset + io_size) <=
2677				    sc->DD_stripe_size) {
2678					physLBA = (uint32_t)(virtLBA >>
2679					    sc->DD_stripe_exponent);
2680					stripe_unit = physLBA /
2681					    sc->DD_num_phys_disks;
2682					column = physLBA %
2683					    sc->DD_num_phys_disks;
2684					pIO_req->DevHandle =
2685					    htole16(sc->DD_column_map[column].
2686					    dev_handle);
2687					cm->cm_desc.SCSIIO.DevHandle =
2688					    pIO_req->DevHandle;
2689
2690					physLBA = (stripe_unit <<
2691					    sc->DD_stripe_exponent) +
2692					    stripe_offset;
2693
2694					/*
2695					 * Set upper 4 bytes of LBA to 0.  We
2696					 * assume that the phys disks are less
2697					 * than 2 TB's in size.  Then, set the
2698					 * lower 4 bytes.
2699					 */
2700					pIO_req->CDB.CDB32[2] = 0;
2701					pIO_req->CDB.CDB32[3] = 0;
2702					pIO_req->CDB.CDB32[4] = 0;
2703					pIO_req->CDB.CDB32[5] = 0;
2704					ptrLBA = &pIO_req->CDB.CDB32[6];
2705					physLBA_byte = (uint8_t)(physLBA >> 24);
2706					*ptrLBA = physLBA_byte;
2707					ptrLBA = &pIO_req->CDB.CDB32[7];
2708					physLBA_byte = (uint8_t)(physLBA >> 16);
2709					*ptrLBA = physLBA_byte;
2710					ptrLBA = &pIO_req->CDB.CDB32[8];
2711					physLBA_byte = (uint8_t)(physLBA >> 8);
2712					*ptrLBA = physLBA_byte;
2713					ptrLBA = &pIO_req->CDB.CDB32[9];
2714					physLBA_byte = (uint8_t)physLBA;
2715					*ptrLBA = physLBA_byte;
2716
2717					/*
2718					 * Set flag that Direct Drive I/O is
2719					 * being done.
2720					 */
2721					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2722				}
2723			}
2724		}
2725	}
2726}
2727
2728#if __FreeBSD_version >= 900026
2729static void
2730mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2731{
2732	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2733	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2734	uint64_t sasaddr;
2735	union ccb *ccb;
2736
2737	ccb = cm->cm_complete_data;
2738
2739	/*
2740	 * Currently there should be no way we can hit this case.  It only
2741	 * happens when we have a failure to allocate chain frames, and SMP
2742	 * commands require two S/G elements only.  That should be handled
2743	 * in the standard request size.
2744	 */
2745	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2746		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x on SMP request!\n",
2747			   __func__, cm->cm_flags);
2748		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2749		goto bailout;
2750        }
2751
2752	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2753	if (rpl == NULL) {
2754		mps_dprint(sc, MPS_ERROR, "%s: NULL cm_reply!\n", __func__);
2755		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2756		goto bailout;
2757	}
2758
2759	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2760	sasaddr = le32toh(req->SASAddress.Low);
2761	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2762
2763	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2764	    MPI2_IOCSTATUS_SUCCESS ||
2765	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2766		mps_dprint(sc, MPS_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2767		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2768		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2769		goto bailout;
2770	}
2771
2772	mps_dprint(sc, MPS_XINFO, "%s: SMP request to SAS address "
2773		   "%#jx completed successfully\n", __func__,
2774		   (uintmax_t)sasaddr);
2775
2776	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2777		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2778	else
2779		mpssas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2780
2781bailout:
2782	/*
2783	 * We sync in both directions because we had DMAs in the S/G list
2784	 * in both directions.
2785	 */
2786	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2787			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2788	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2789	mps_free_command(sc, cm);
2790	xpt_done(ccb);
2791}
2792
2793static void
2794mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2795{
2796	struct mps_command *cm;
2797	uint8_t *request, *response;
2798	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2799	struct mps_softc *sc;
2800	struct sglist *sg;
2801	int error;
2802
2803	sc = sassc->sc;
2804	sg = NULL;
2805	error = 0;
2806
2807	/*
2808	 * XXX We don't yet support physical addresses here.
2809	 */
2810	switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
2811	case CAM_DATA_PADDR:
2812	case CAM_DATA_SG_PADDR:
2813		mps_dprint(sc, MPS_ERROR,
2814			   "%s: physical addresses not supported\n", __func__);
2815		mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2816		xpt_done(ccb);
2817		return;
2818	case CAM_DATA_SG:
2819		/*
2820		 * The chip does not support more than one buffer for the
2821		 * request or response.
2822		 */
2823	 	if ((ccb->smpio.smp_request_sglist_cnt > 1)
2824		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2825			mps_dprint(sc, MPS_ERROR,
2826				   "%s: multiple request or response "
2827				   "buffer segments not supported for SMP\n",
2828				   __func__);
2829			mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2830			xpt_done(ccb);
2831			return;
2832		}
2833
2834		/*
2835		 * The CAM_SCATTER_VALID flag was originally implemented
2836		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2837		 * We have two.  So, just take that flag to mean that we
2838		 * might have S/G lists, and look at the S/G segment count
2839		 * to figure out whether that is the case for each individual
2840		 * buffer.
2841		 */
2842		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2843			bus_dma_segment_t *req_sg;
2844
2845			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2846			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2847		} else
2848			request = ccb->smpio.smp_request;
2849
2850		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2851			bus_dma_segment_t *rsp_sg;
2852
2853			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2854			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2855		} else
2856			response = ccb->smpio.smp_response;
2857		break;
2858	case CAM_DATA_VADDR:
2859		request = ccb->smpio.smp_request;
2860		response = ccb->smpio.smp_response;
2861		break;
2862	default:
2863		mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2864		xpt_done(ccb);
2865		return;
2866	}
2867
2868	cm = mps_alloc_command(sc);
2869	if (cm == NULL) {
2870		mps_dprint(sc, MPS_ERROR,
2871		    "%s: cannot allocate command\n", __func__);
2872		mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2873		xpt_done(ccb);
2874		return;
2875	}
2876
2877	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2878	bzero(req, sizeof(*req));
2879	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2880
2881	/* Allow the chip to use any route to this SAS address. */
2882	req->PhysicalPort = 0xff;
2883
2884	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2885	req->SGLFlags =
2886	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2887
2888	mps_dprint(sc, MPS_XINFO, "%s: sending SMP request to SAS "
2889	    "address %#jx\n", __func__, (uintmax_t)sasaddr);
2890
2891	mpi_init_sge(cm, req, &req->SGL);
2892
2893	/*
2894	 * Set up a uio to pass into mps_map_command().  This allows us to
2895	 * do one map command, and one busdma call in there.
2896	 */
2897	cm->cm_uio.uio_iov = cm->cm_iovec;
2898	cm->cm_uio.uio_iovcnt = 2;
2899	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2900
2901	/*
2902	 * The read/write flag isn't used by busdma, but set it just in
2903	 * case.  This isn't exactly accurate, either, since we're going in
2904	 * both directions.
2905	 */
2906	cm->cm_uio.uio_rw = UIO_WRITE;
2907
2908	cm->cm_iovec[0].iov_base = request;
2909	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2910	cm->cm_iovec[1].iov_base = response;
2911	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2912
2913	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2914			       cm->cm_iovec[1].iov_len;
2915
2916	/*
2917	 * Trigger a warning message in mps_data_cb() for the user if we
2918	 * wind up exceeding two S/G segments.  The chip expects one
2919	 * segment for the request and another for the response.
2920	 */
2921	cm->cm_max_segs = 2;
2922
2923	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2924	cm->cm_complete = mpssas_smpio_complete;
2925	cm->cm_complete_data = ccb;
2926
2927	/*
2928	 * Tell the mapping code that we're using a uio, and that this is
2929	 * an SMP passthrough request.  There is a little special-case
2930	 * logic there (in mps_data_cb()) to handle the bidirectional
2931	 * transfer.
2932	 */
2933	cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2934			MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2935
2936	/* The chip data format is little endian. */
2937	req->SASAddress.High = htole32(sasaddr >> 32);
2938	req->SASAddress.Low = htole32(sasaddr);
2939
2940	/*
2941	 * XXX Note that we don't have a timeout/abort mechanism here.
2942	 * From the manual, it looks like task management requests only
2943	 * work for SCSI IO and SATA passthrough requests.  We may need to
2944	 * have a mechanism to retry requests in the event of a chip reset
2945	 * at least.  Hopefully the chip will insure that any errors short
2946	 * of that are relayed back to the driver.
2947	 */
2948	error = mps_map_command(sc, cm);
2949	if ((error != 0) && (error != EINPROGRESS)) {
2950		mps_dprint(sc, MPS_ERROR,
2951			   "%s: error %d returned from mps_map_command()\n",
2952			   __func__, error);
2953		goto bailout_error;
2954	}
2955
2956	return;
2957
2958bailout_error:
2959	mps_free_command(sc, cm);
2960	mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2961	xpt_done(ccb);
2962	return;
2963
2964}
2965
2966static void
2967mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2968{
2969	struct mps_softc *sc;
2970	struct mpssas_target *targ;
2971	uint64_t sasaddr = 0;
2972
2973	sc = sassc->sc;
2974
2975	/*
2976	 * Make sure the target exists.
2977	 */
2978	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2979	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
2980	targ = &sassc->targets[ccb->ccb_h.target_id];
2981	if (targ->handle == 0x0) {
2982		mps_dprint(sc, MPS_ERROR,
2983			   "%s: target %d does not exist!\n", __func__,
2984			   ccb->ccb_h.target_id);
2985		mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
2986		xpt_done(ccb);
2987		return;
2988	}
2989
2990	/*
2991	 * If this device has an embedded SMP target, we'll talk to it
2992	 * directly.
2993	 * figure out what the expander's address is.
2994	 */
2995	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2996		sasaddr = targ->sasaddr;
2997
2998	/*
2999	 * If we don't have a SAS address for the expander yet, try
3000	 * grabbing it from the page 0x83 information cached in the
3001	 * transport layer for this target.  LSI expanders report the
3002	 * expander SAS address as the port-associated SAS address in
3003	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
3004	 * 0x83.
3005	 *
3006	 * XXX KDM disable this for now, but leave it commented out so that
3007	 * it is obvious that this is another possible way to get the SAS
3008	 * address.
3009	 *
3010	 * The parent handle method below is a little more reliable, and
3011	 * the other benefit is that it works for devices other than SES
3012	 * devices.  So you can send a SMP request to a da(4) device and it
3013	 * will get routed to the expander that device is attached to.
3014	 * (Assuming the da(4) device doesn't contain an SMP target...)
3015	 */
3016#if 0
3017	if (sasaddr == 0)
3018		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
3019#endif
3020
3021	/*
3022	 * If we still don't have a SAS address for the expander, look for
3023	 * the parent device of this device, which is probably the expander.
3024	 */
3025	if (sasaddr == 0) {
3026#ifdef OLD_MPS_PROBE
3027		struct mpssas_target *parent_target;
3028#endif
3029
3030		if (targ->parent_handle == 0x0) {
3031			mps_dprint(sc, MPS_ERROR,
3032				   "%s: handle %d does not have a valid "
3033				   "parent handle!\n", __func__, targ->handle);
3034			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3035			goto bailout;
3036		}
3037#ifdef OLD_MPS_PROBE
3038		parent_target = mpssas_find_target_by_handle(sassc, 0,
3039			targ->parent_handle);
3040
3041		if (parent_target == NULL) {
3042			mps_dprint(sc, MPS_ERROR,
3043				   "%s: handle %d does not have a valid "
3044				   "parent target!\n", __func__, targ->handle);
3045			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3046			goto bailout;
3047		}
3048
3049		if ((parent_target->devinfo &
3050		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3051			mps_dprint(sc, MPS_ERROR,
3052				   "%s: handle %d parent %d does not "
3053				   "have an SMP target!\n", __func__,
3054				   targ->handle, parent_target->handle);
3055			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3056			goto bailout;
3057
3058		}
3059
3060		sasaddr = parent_target->sasaddr;
3061#else /* OLD_MPS_PROBE */
3062		if ((targ->parent_devinfo &
3063		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3064			mps_dprint(sc, MPS_ERROR,
3065				   "%s: handle %d parent %d does not "
3066				   "have an SMP target!\n", __func__,
3067				   targ->handle, targ->parent_handle);
3068			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3069			goto bailout;
3070
3071		}
3072		if (targ->parent_sasaddr == 0x0) {
3073			mps_dprint(sc, MPS_ERROR,
3074				   "%s: handle %d parent handle %d does "
3075				   "not have a valid SAS address!\n",
3076				   __func__, targ->handle, targ->parent_handle);
3077			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3078			goto bailout;
3079		}
3080
3081		sasaddr = targ->parent_sasaddr;
3082#endif /* OLD_MPS_PROBE */
3083
3084	}
3085
3086	if (sasaddr == 0) {
3087		mps_dprint(sc, MPS_INFO,
3088			   "%s: unable to find SAS address for handle %d\n",
3089			   __func__, targ->handle);
3090		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3091		goto bailout;
3092	}
3093	mpssas_send_smpcmd(sassc, ccb, sasaddr);
3094
3095	return;
3096
3097bailout:
3098	xpt_done(ccb);
3099
3100}
3101#endif //__FreeBSD_version >= 900026
3102
3103static void
3104mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
3105{
3106	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3107	struct mps_softc *sc;
3108	struct mps_command *tm;
3109	struct mpssas_target *targ;
3110
3111	MPS_FUNCTRACE(sassc->sc);
3112	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
3113
3114	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3115	    ("Target %d out of bounds in XPT_RESET_DEV\n",
3116	     ccb->ccb_h.target_id));
3117	sc = sassc->sc;
3118	tm = mps_alloc_command(sc);
3119	if (tm == NULL) {
3120		mps_dprint(sc, MPS_ERROR,
3121		    "command alloc failure in mpssas_action_resetdev\n");
3122		mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3123		xpt_done(ccb);
3124		return;
3125	}
3126
3127	targ = &sassc->targets[ccb->ccb_h.target_id];
3128	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3129	req->DevHandle = htole16(targ->handle);
3130	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3131	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3132
3133	/* SAS Hard Link Reset / SATA Link Reset */
3134	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3135
3136	tm->cm_data = NULL;
3137	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3138	tm->cm_complete = mpssas_resetdev_complete;
3139	tm->cm_complete_data = ccb;
3140	tm->cm_targ = targ;
3141	targ->flags |= MPSSAS_TARGET_INRESET;
3142
3143	mps_map_command(sc, tm);
3144}
3145
3146static void
3147mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
3148{
3149	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3150	union ccb *ccb;
3151
3152	MPS_FUNCTRACE(sc);
3153	mtx_assert(&sc->mps_mtx, MA_OWNED);
3154
3155	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3156	ccb = tm->cm_complete_data;
3157
3158	/*
3159	 * Currently there should be no way we can hit this case.  It only
3160	 * happens when we have a failure to allocate chain frames, and
3161	 * task management commands don't have S/G lists.
3162	 */
3163	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3164		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3165
3166		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3167
3168		mps_dprint(sc, MPS_ERROR,
3169			   "%s: cm_flags = %#x for reset of handle %#04x! "
3170			   "This should not happen!\n", __func__, tm->cm_flags,
3171			   req->DevHandle);
3172		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3173		goto bailout;
3174	}
3175
3176	mps_dprint(sc, MPS_XINFO,
3177	    "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
3178	    le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3179
3180	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3181		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
3182		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3183		    CAM_LUN_WILDCARD);
3184	}
3185	else
3186		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3187
3188bailout:
3189
3190	mpssas_free_tm(sc, tm);
3191	xpt_done(ccb);
3192}
3193
3194static void
3195mpssas_poll(struct cam_sim *sim)
3196{
3197	struct mpssas_softc *sassc;
3198
3199	sassc = cam_sim_softc(sim);
3200
3201	if (sassc->sc->mps_debug & MPS_TRACE) {
3202		/* frequent debug messages during a panic just slow
3203		 * everything down too much.
3204		 */
3205		mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
3206		sassc->sc->mps_debug &= ~MPS_TRACE;
3207	}
3208
3209	mps_intr_locked(sassc->sc);
3210}
3211
3212static void
3213mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3214	     void *arg)
3215{
3216	struct mps_softc *sc;
3217
3218	sc = (struct mps_softc *)callback_arg;
3219
3220	switch (code) {
3221#if (__FreeBSD_version >= 1000006) || \
3222    ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3223	case AC_ADVINFO_CHANGED: {
3224		struct mpssas_target *target;
3225		struct mpssas_softc *sassc;
3226		struct scsi_read_capacity_data_long rcap_buf;
3227		struct ccb_dev_advinfo cdai;
3228		struct mpssas_lun *lun;
3229		lun_id_t lunid;
3230		int found_lun;
3231		uintptr_t buftype;
3232
3233		buftype = (uintptr_t)arg;
3234
3235		found_lun = 0;
3236		sassc = sc->sassc;
3237
3238		/*
3239		 * We're only interested in read capacity data changes.
3240		 */
3241		if (buftype != CDAI_TYPE_RCAPLONG)
3242			break;
3243
3244		/*
3245		 * We should have a handle for this, but check to make sure.
3246		 */
3247		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3248		    ("Target %d out of bounds in mpssas_async\n",
3249		    xpt_path_target_id(path)));
3250		target = &sassc->targets[xpt_path_target_id(path)];
3251		if (target->handle == 0)
3252			break;
3253
3254		lunid = xpt_path_lun_id(path);
3255
3256		SLIST_FOREACH(lun, &target->luns, lun_link) {
3257			if (lun->lun_id == lunid) {
3258				found_lun = 1;
3259				break;
3260			}
3261		}
3262
3263		if (found_lun == 0) {
3264			lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3265				     M_NOWAIT | M_ZERO);
3266			if (lun == NULL) {
3267				mps_dprint(sc, MPS_ERROR, "Unable to alloc "
3268					   "LUN for EEDP support.\n");
3269				break;
3270			}
3271			lun->lun_id = lunid;
3272			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3273		}
3274
3275		bzero(&rcap_buf, sizeof(rcap_buf));
3276		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3277		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3278		cdai.ccb_h.flags = CAM_DIR_IN;
3279		cdai.buftype = CDAI_TYPE_RCAPLONG;
3280#if (__FreeBSD_version >= 1100061) || \
3281    ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000))
3282		cdai.flags = CDAI_FLAG_NONE;
3283#else
3284		cdai.flags = 0;
3285#endif
3286		cdai.bufsiz = sizeof(rcap_buf);
3287		cdai.buf = (uint8_t *)&rcap_buf;
3288		xpt_action((union ccb *)&cdai);
3289		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3290			cam_release_devq(cdai.ccb_h.path,
3291					 0, 0, 0, FALSE);
3292
3293		if ((mpssas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3294		 && (rcap_buf.prot & SRC16_PROT_EN)) {
3295			lun->eedp_formatted = TRUE;
3296			lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3297		} else {
3298			lun->eedp_formatted = FALSE;
3299			lun->eedp_block_size = 0;
3300		}
3301		break;
3302	}
3303#else
3304	case AC_FOUND_DEVICE: {
3305		struct ccb_getdev *cgd;
3306
3307		cgd = arg;
3308		mpssas_check_eedp(sc, path, cgd);
3309		break;
3310	}
3311#endif
3312	default:
3313		break;
3314	}
3315}
3316
3317#if (__FreeBSD_version < 901503) || \
3318    ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3319static void
3320mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
3321		  struct ccb_getdev *cgd)
3322{
3323	struct mpssas_softc *sassc = sc->sassc;
3324	struct ccb_scsiio *csio;
3325	struct scsi_read_capacity_16 *scsi_cmd;
3326	struct scsi_read_capacity_eedp *rcap_buf;
3327	path_id_t pathid;
3328	target_id_t targetid;
3329	lun_id_t lunid;
3330	union ccb *ccb;
3331	struct cam_path *local_path;
3332	struct mpssas_target *target;
3333	struct mpssas_lun *lun;
3334	uint8_t	found_lun;
3335	char path_str[64];
3336
3337	sassc = sc->sassc;
3338	pathid = cam_sim_path(sassc->sim);
3339	targetid = xpt_path_target_id(path);
3340	lunid = xpt_path_lun_id(path);
3341
3342	KASSERT(targetid < sassc->maxtargets,
3343	    ("Target %d out of bounds in mpssas_check_eedp\n",
3344	     targetid));
3345	target = &sassc->targets[targetid];
3346	if (target->handle == 0x0)
3347		return;
3348
3349	/*
3350	 * Determine if the device is EEDP capable.
3351	 *
3352	 * If this flag is set in the inquiry data,
3353	 * the device supports protection information,
3354	 * and must support the 16 byte read
3355	 * capacity command, otherwise continue without
3356	 * sending read cap 16
3357	 */
3358	if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3359		return;
3360
3361	/*
3362	 * Issue a READ CAPACITY 16 command.  This info
3363	 * is used to determine if the LUN is formatted
3364	 * for EEDP support.
3365	 */
3366	ccb = xpt_alloc_ccb_nowait();
3367	if (ccb == NULL) {
3368		mps_dprint(sc, MPS_ERROR, "Unable to alloc CCB "
3369		    "for EEDP support.\n");
3370		return;
3371	}
3372
3373	if (xpt_create_path(&local_path, xpt_periph,
3374	    pathid, targetid, lunid) != CAM_REQ_CMP) {
3375		mps_dprint(sc, MPS_ERROR, "Unable to create "
3376		    "path for EEDP support\n");
3377		xpt_free_ccb(ccb);
3378		return;
3379	}
3380
3381	/*
3382	 * If LUN is already in list, don't create a new
3383	 * one.
3384	 */
3385	found_lun = FALSE;
3386	SLIST_FOREACH(lun, &target->luns, lun_link) {
3387		if (lun->lun_id == lunid) {
3388			found_lun = TRUE;
3389			break;
3390		}
3391	}
3392	if (!found_lun) {
3393		lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3394		    M_NOWAIT | M_ZERO);
3395		if (lun == NULL) {
3396			mps_dprint(sc, MPS_ERROR,
3397			    "Unable to alloc LUN for EEDP support.\n");
3398			xpt_free_path(local_path);
3399			xpt_free_ccb(ccb);
3400			return;
3401		}
3402		lun->lun_id = lunid;
3403		SLIST_INSERT_HEAD(&target->luns, lun,
3404		    lun_link);
3405	}
3406
3407	xpt_path_string(local_path, path_str, sizeof(path_str));
3408
3409	mps_dprint(sc, MPS_INFO, "Sending read cap: path %s handle %d\n",
3410	    path_str, target->handle);
3411
3412	/*
3413	 * Issue a READ CAPACITY 16 command for the LUN.
3414	 * The mpssas_read_cap_done function will load
3415	 * the read cap info into the LUN struct.
3416	 */
3417	rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp),
3418	    M_MPT2, M_NOWAIT | M_ZERO);
3419	if (rcap_buf == NULL) {
3420		mps_dprint(sc, MPS_FAULT,
3421		    "Unable to alloc read capacity buffer for EEDP support.\n");
3422		xpt_free_path(ccb->ccb_h.path);
3423		xpt_free_ccb(ccb);
3424		return;
3425	}
3426	xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3427	csio = &ccb->csio;
3428	csio->ccb_h.func_code = XPT_SCSI_IO;
3429	csio->ccb_h.flags = CAM_DIR_IN;
3430	csio->ccb_h.retry_count = 4;
3431	csio->ccb_h.cbfcnp = mpssas_read_cap_done;
3432	csio->ccb_h.timeout = 60000;
3433	csio->data_ptr = (uint8_t *)rcap_buf;
3434	csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3435	csio->sense_len = MPS_SENSE_LEN;
3436	csio->cdb_len = sizeof(*scsi_cmd);
3437	csio->tag_action = MSG_SIMPLE_Q_TAG;
3438
3439	scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3440	bzero(scsi_cmd, sizeof(*scsi_cmd));
3441	scsi_cmd->opcode = 0x9E;
3442	scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3443	((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3444
3445	ccb->ccb_h.ppriv_ptr1 = sassc;
3446	xpt_action(ccb);
3447}
3448
3449static void
3450mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3451{
3452	struct mpssas_softc *sassc;
3453	struct mpssas_target *target;
3454	struct mpssas_lun *lun;
3455	struct scsi_read_capacity_eedp *rcap_buf;
3456
3457	if (done_ccb == NULL)
3458		return;
3459
3460	/* Driver need to release devq, it Scsi command is
3461	 * generated by driver internally.
3462	 * Currently there is a single place where driver
3463	 * calls scsi command internally. In future if driver
3464	 * calls more scsi command internally, it needs to release
3465	 * devq internally, since those command will not go back to
3466	 * cam_periph.
3467	 */
3468	if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3469        	done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3470		xpt_release_devq(done_ccb->ccb_h.path,
3471			       	/*count*/ 1, /*run_queue*/TRUE);
3472	}
3473
3474	rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3475
3476	/*
3477	 * Get the LUN ID for the path and look it up in the LUN list for the
3478	 * target.
3479	 */
3480	sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3481	KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets,
3482	    ("Target %d out of bounds in mpssas_read_cap_done\n",
3483	     done_ccb->ccb_h.target_id));
3484	target = &sassc->targets[done_ccb->ccb_h.target_id];
3485	SLIST_FOREACH(lun, &target->luns, lun_link) {
3486		if (lun->lun_id != done_ccb->ccb_h.target_lun)
3487			continue;
3488
3489		/*
3490		 * Got the LUN in the target's LUN list.  Fill it in
3491		 * with EEDP info.  If the READ CAP 16 command had some
3492		 * SCSI error (common if command is not supported), mark
3493		 * the lun as not supporting EEDP and set the block size
3494		 * to 0.
3495		 */
3496		if ((mpssas_get_ccbstatus(done_ccb) != CAM_REQ_CMP)
3497		 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3498			lun->eedp_formatted = FALSE;
3499			lun->eedp_block_size = 0;
3500			break;
3501		}
3502
3503		if (rcap_buf->protect & 0x01) {
3504			mps_dprint(sassc->sc, MPS_INFO, "LUN %d for "
3505 			    "target ID %d is formatted for EEDP "
3506 			    "support.\n", done_ccb->ccb_h.target_lun,
3507 			    done_ccb->ccb_h.target_id);
3508			lun->eedp_formatted = TRUE;
3509			lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3510		}
3511		break;
3512	}
3513
3514	// Finished with this CCB and path.
3515	free(rcap_buf, M_MPT2);
3516	xpt_free_path(done_ccb->ccb_h.path);
3517	xpt_free_ccb(done_ccb);
3518}
3519#endif /* (__FreeBSD_version < 901503) || \
3520          ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3521
3522void
3523mpssas_prepare_for_tm(struct mps_softc *sc, struct mps_command *tm,
3524    struct mpssas_target *target, lun_id_t lun_id)
3525{
3526	union ccb *ccb;
3527	path_id_t path_id;
3528
3529	/*
3530	 * Set the INRESET flag for this target so that no I/O will be sent to
3531	 * the target until the reset has completed.  If an I/O request does
3532	 * happen, the devq will be frozen.  The CCB holds the path which is
3533	 * used to release the devq.  The devq is released and the CCB is freed
3534	 * when the TM completes.
3535	 */
3536	ccb = xpt_alloc_ccb_nowait();
3537	if (ccb) {
3538		path_id = cam_sim_path(sc->sassc->sim);
3539		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3540		    target->tid, lun_id) != CAM_REQ_CMP) {
3541			xpt_free_ccb(ccb);
3542		} else {
3543			tm->cm_ccb = ccb;
3544			tm->cm_targ = target;
3545			target->flags |= MPSSAS_TARGET_INRESET;
3546		}
3547	}
3548}
3549
3550int
3551mpssas_startup(struct mps_softc *sc)
3552{
3553
3554	/*
3555	 * Send the port enable message and set the wait_for_port_enable flag.
3556	 * This flag helps to keep the simq frozen until all discovery events
3557	 * are processed.
3558	 */
3559	sc->wait_for_port_enable = 1;
3560	mpssas_send_portenable(sc);
3561	return (0);
3562}
3563
3564static int
3565mpssas_send_portenable(struct mps_softc *sc)
3566{
3567	MPI2_PORT_ENABLE_REQUEST *request;
3568	struct mps_command *cm;
3569
3570	MPS_FUNCTRACE(sc);
3571
3572	if ((cm = mps_alloc_command(sc)) == NULL)
3573		return (EBUSY);
3574	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3575	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3576	request->MsgFlags = 0;
3577	request->VP_ID = 0;
3578	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3579	cm->cm_complete = mpssas_portenable_complete;
3580	cm->cm_data = NULL;
3581	cm->cm_sge = NULL;
3582
3583	mps_map_command(sc, cm);
3584	mps_dprint(sc, MPS_XINFO,
3585	    "mps_send_portenable finished cm %p req %p complete %p\n",
3586	    cm, cm->cm_req, cm->cm_complete);
3587	return (0);
3588}
3589
3590static void
3591mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3592{
3593	MPI2_PORT_ENABLE_REPLY *reply;
3594	struct mpssas_softc *sassc;
3595
3596	MPS_FUNCTRACE(sc);
3597	sassc = sc->sassc;
3598
3599	/*
3600	 * Currently there should be no way we can hit this case.  It only
3601	 * happens when we have a failure to allocate chain frames, and
3602	 * port enable commands don't have S/G lists.
3603	 */
3604	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3605		mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for port enable! "
3606			   "This should not happen!\n", __func__, cm->cm_flags);
3607	}
3608
3609	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3610	if (reply == NULL)
3611		mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3612	else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3613	    MPI2_IOCSTATUS_SUCCESS)
3614		mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3615
3616	mps_free_command(sc, cm);
3617	if (sc->mps_ich.ich_arg != NULL) {
3618		mps_dprint(sc, MPS_XINFO, "disestablish config intrhook\n");
3619		config_intrhook_disestablish(&sc->mps_ich);
3620		sc->mps_ich.ich_arg = NULL;
3621	}
3622
3623	/*
3624	 * Get WarpDrive info after discovery is complete but before the scan
3625	 * starts.  At this point, all devices are ready to be exposed to the
3626	 * OS.  If devices should be hidden instead, take them out of the
3627	 * 'targets' array before the scan.  The devinfo for a disk will have
3628	 * some info and a volume's will be 0.  Use that to remove disks.
3629	 */
3630	mps_wd_config_pages(sc);
3631
3632	/*
3633	 * Done waiting for port enable to complete.  Decrement the refcount.
3634	 * If refcount is 0, discovery is complete and a rescan of the bus can
3635	 * take place.  Since the simq was explicitly frozen before port
3636	 * enable, it must be explicitly released here to keep the
3637	 * freeze/release count in sync.
3638	 */
3639	sc->wait_for_port_enable = 0;
3640	sc->port_enable_complete = 1;
3641	wakeup(&sc->port_enable_complete);
3642	mpssas_startup_decrement(sassc);
3643}
3644
3645int
3646mpssas_check_id(struct mpssas_softc *sassc, int id)
3647{
3648	struct mps_softc *sc = sassc->sc;
3649	char *ids;
3650	char *name;
3651
3652	ids = &sc->exclude_ids[0];
3653	while((name = strsep(&ids, ",")) != NULL) {
3654		if (name[0] == '\0')
3655			continue;
3656		if (strtol(name, NULL, 0) == (long)id)
3657			return (1);
3658	}
3659
3660	return (0);
3661}
3662
3663void
3664mpssas_realloc_targets(struct mps_softc *sc, int maxtargets)
3665{
3666	struct mpssas_softc *sassc;
3667	struct mpssas_lun *lun, *lun_tmp;
3668	struct mpssas_target *targ;
3669	int i;
3670
3671	sassc = sc->sassc;
3672	/*
3673	 * The number of targets is based on IOC Facts, so free all of
3674	 * the allocated LUNs for each target and then the target buffer
3675	 * itself.
3676	 */
3677	for (i=0; i< maxtargets; i++) {
3678		targ = &sassc->targets[i];
3679		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3680			free(lun, M_MPT2);
3681		}
3682	}
3683	free(sassc->targets, M_MPT2);
3684
3685	sassc->targets = malloc(sizeof(struct mpssas_target) * maxtargets,
3686	    M_MPT2, M_WAITOK|M_ZERO);
3687	if (!sassc->targets) {
3688		panic("%s failed to alloc targets with error %d\n",
3689		    __func__, ENOMEM);
3690	}
3691}
3692