1/*-
2 * FreeBSD/CAM specific routines for LSI '909 FC  adapters.
3 * FreeBSD Version.
4 *
5 * Copyright (c)  2000, 2001 by Greg Ansley
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice immediately at the beginning of the file, without modification,
12 *    this list of conditions, and the following disclaimer.
13 * 2. The name of the author may not be used to endorse or promote products
14 *    derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28/*-
29 * Copyright (c) 2002, 2006 by Matthew Jacob
30 * All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions are
34 * met:
35 * 1. Redistributions of source code must retain the above copyright
36 *    notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
38 *    substantially similar to the "NO WARRANTY" disclaimer below
39 *    ("Disclaimer") and any redistribution must be conditioned upon including
40 *    a substantially similar Disclaimer requirement for further binary
41 *    redistribution.
42 * 3. Neither the names of the above listed copyright holders nor the names
43 *    of any contributors may be used to endorse or promote products derived
44 *    from this software without specific prior written permission.
45 *
46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
47 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
50 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
56 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57 *
58 * Support from Chris Ellsworth in order to make SAS adapters work
59 * is gratefully acknowledged.
60 *
61 * Support from LSI-Logic has also gone a great deal toward making this a
62 * workable subsystem and is gratefully acknowledged.
63 */
64/*-
65 * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
66 * Copyright (c) 2005, WHEEL Sp. z o.o.
67 * Copyright (c) 2004, 2005 Justin T. Gibbs
68 * All rights reserved.
69 *
70 * Redistribution and use in source and binary forms, with or without
71 * modification, are permitted provided that the following conditions are
72 * met:
73 * 1. Redistributions of source code must retain the above copyright
74 *    notice, this list of conditions and the following disclaimer.
75 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
76 *    substantially similar to the "NO WARRANTY" disclaimer below
77 *    ("Disclaimer") and any redistribution must be conditioned upon including
78 *    a substantially similar Disclaimer requirement for further binary
79 *    redistribution.
80 * 3. Neither the names of the above listed copyright holders nor the names
81 *    of any contributors may be used to endorse or promote products derived
82 *    from this software without specific prior written permission.
83 *
84 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
85 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
86 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
87 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
88 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
89 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
90 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
91 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
92 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
93 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
94 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
95 */
96#include <sys/cdefs.h>
97__FBSDID("$FreeBSD: stable/11/sys/dev/mpt/mpt_cam.c 315827 2017-03-23 06:53:31Z mav $");
98
99#include <dev/mpt/mpt.h>
100#include <dev/mpt/mpt_cam.h>
101#include <dev/mpt/mpt_raid.h>
102
103#include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
104#include "dev/mpt/mpilib/mpi_init.h"
105#include "dev/mpt/mpilib/mpi_targ.h"
106#include "dev/mpt/mpilib/mpi_fc.h"
107#include "dev/mpt/mpilib/mpi_sas.h"
108
109#include <sys/callout.h>
110#include <sys/kthread.h>
111#include <sys/sysctl.h>
112
113static void mpt_poll(struct cam_sim *);
114static timeout_t mpt_timeout;
115static void mpt_action(struct cam_sim *, union ccb *);
116static int
117mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *);
118static void mpt_setwidth(struct mpt_softc *, int, int);
119static void mpt_setsync(struct mpt_softc *, int, int, int);
120static int mpt_update_spi_config(struct mpt_softc *, int);
121
122static mpt_reply_handler_t mpt_scsi_reply_handler;
123static mpt_reply_handler_t mpt_scsi_tmf_reply_handler;
124static mpt_reply_handler_t mpt_fc_els_reply_handler;
125static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *,
126					MSG_DEFAULT_REPLY *);
127static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int);
128static int mpt_fc_reset_link(struct mpt_softc *, int);
129
130static int mpt_spawn_recovery_thread(struct mpt_softc *mpt);
131static void mpt_terminate_recovery_thread(struct mpt_softc *mpt);
132static void mpt_recovery_thread(void *arg);
133static void mpt_recover_commands(struct mpt_softc *mpt);
134
135static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int,
136    target_id_t, lun_id_t, u_int, int);
137
138static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int);
139static void mpt_post_target_command(struct mpt_softc *, request_t *, int);
140static int mpt_add_els_buffers(struct mpt_softc *mpt);
141static int mpt_add_target_commands(struct mpt_softc *mpt);
142static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t);
143static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t);
144static void mpt_target_start_io(struct mpt_softc *, union ccb *);
145static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *);
146static int mpt_abort_target_cmd(struct mpt_softc *, request_t *);
147static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *,
148    uint8_t, uint8_t const *, u_int);
149static void
150mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t,
151    tgt_resource_t *, int);
152static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *);
153static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *);
154static mpt_reply_handler_t mpt_scsi_tgt_reply_handler;
155static mpt_reply_handler_t mpt_sata_pass_reply_handler;
156
157static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE;
158static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE;
159static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE;
160static uint32_t sata_pass_handler_id = MPT_HANDLER_ID_NONE;
161
162static mpt_probe_handler_t	mpt_cam_probe;
163static mpt_attach_handler_t	mpt_cam_attach;
164static mpt_enable_handler_t	mpt_cam_enable;
165static mpt_ready_handler_t	mpt_cam_ready;
166static mpt_event_handler_t	mpt_cam_event;
167static mpt_reset_handler_t	mpt_cam_ioc_reset;
168static mpt_detach_handler_t	mpt_cam_detach;
169
170static struct mpt_personality mpt_cam_personality =
171{
172	.name		= "mpt_cam",
173	.probe		= mpt_cam_probe,
174	.attach		= mpt_cam_attach,
175	.enable		= mpt_cam_enable,
176	.ready		= mpt_cam_ready,
177	.event		= mpt_cam_event,
178	.reset		= mpt_cam_ioc_reset,
179	.detach		= mpt_cam_detach,
180};
181
182DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND);
183MODULE_DEPEND(mpt_cam, cam, 1, 1, 1);
184
185int mpt_enable_sata_wc = -1;
186TUNABLE_INT("hw.mpt.enable_sata_wc", &mpt_enable_sata_wc);
187
188static int
189mpt_cam_probe(struct mpt_softc *mpt)
190{
191	int role;
192
193	/*
194	 * Only attach to nodes that support the initiator or target role
195	 * (or want to) or have RAID physical devices that need CAM pass-thru
196	 * support.
197	 */
198	if (mpt->do_cfg_role) {
199		role = mpt->cfg_role;
200	} else {
201		role = mpt->role;
202	}
203	if ((role & (MPT_ROLE_TARGET|MPT_ROLE_INITIATOR)) != 0 ||
204	    (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) {
205		return (0);
206	}
207	return (ENODEV);
208}
209
210static int
211mpt_cam_attach(struct mpt_softc *mpt)
212{
213	struct cam_devq *devq;
214	mpt_handler_t	 handler;
215	int		 maxq;
216	int		 error;
217
218	MPT_LOCK(mpt);
219	TAILQ_INIT(&mpt->request_timeout_list);
220	maxq = (mpt->ioc_facts.GlobalCredits < MPT_MAX_REQUESTS(mpt))?
221	    mpt->ioc_facts.GlobalCredits : MPT_MAX_REQUESTS(mpt);
222
223	handler.reply_handler = mpt_scsi_reply_handler;
224	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
225				     &scsi_io_handler_id);
226	if (error != 0) {
227		MPT_UNLOCK(mpt);
228		goto cleanup;
229	}
230
231	handler.reply_handler = mpt_scsi_tmf_reply_handler;
232	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
233				     &scsi_tmf_handler_id);
234	if (error != 0) {
235		MPT_UNLOCK(mpt);
236		goto cleanup;
237	}
238
239	/*
240	 * If we're fibre channel and could support target mode, we register
241	 * an ELS reply handler and give it resources.
242	 */
243	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
244		handler.reply_handler = mpt_fc_els_reply_handler;
245		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
246		    &fc_els_handler_id);
247		if (error != 0) {
248			MPT_UNLOCK(mpt);
249			goto cleanup;
250		}
251		if (mpt_add_els_buffers(mpt) == FALSE) {
252			error = ENOMEM;
253			MPT_UNLOCK(mpt);
254			goto cleanup;
255		}
256		maxq -= mpt->els_cmds_allocated;
257	}
258
259	/*
260	 * If we support target mode, we register a reply handler for it,
261	 * but don't add command resources until we actually enable target
262	 * mode.
263	 */
264	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
265		handler.reply_handler = mpt_scsi_tgt_reply_handler;
266		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
267		    &mpt->scsi_tgt_handler_id);
268		if (error != 0) {
269			MPT_UNLOCK(mpt);
270			goto cleanup;
271		}
272	}
273
274	if (mpt->is_sas) {
275		handler.reply_handler = mpt_sata_pass_reply_handler;
276		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
277		    &sata_pass_handler_id);
278		if (error != 0) {
279			MPT_UNLOCK(mpt);
280			goto cleanup;
281		}
282	}
283
284	/*
285	 * We keep one request reserved for timeout TMF requests.
286	 */
287	mpt->tmf_req = mpt_get_request(mpt, FALSE);
288	if (mpt->tmf_req == NULL) {
289		mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n");
290		error = ENOMEM;
291		MPT_UNLOCK(mpt);
292		goto cleanup;
293	}
294
295	/*
296	 * Mark the request as free even though not on the free list.
297	 * There is only one TMF request allowed to be outstanding at
298	 * a time and the TMF routines perform their own allocation
299	 * tracking using the standard state flags.
300	 */
301	mpt->tmf_req->state = REQ_STATE_FREE;
302	maxq--;
303
304	/*
305	 * The rest of this is CAM foo, for which we need to drop our lock
306	 */
307	MPT_UNLOCK(mpt);
308
309	if (mpt_spawn_recovery_thread(mpt) != 0) {
310		mpt_prt(mpt, "Unable to spawn recovery thread!\n");
311		error = ENOMEM;
312		goto cleanup;
313	}
314
315	/*
316	 * Create the device queue for our SIM(s).
317	 */
318	devq = cam_simq_alloc(maxq);
319	if (devq == NULL) {
320		mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n");
321		error = ENOMEM;
322		goto cleanup;
323	}
324
325	/*
326	 * Construct our SIM entry.
327	 */
328	mpt->sim =
329	    mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
330	if (mpt->sim == NULL) {
331		mpt_prt(mpt, "Unable to allocate CAM SIM!\n");
332		cam_simq_free(devq);
333		error = ENOMEM;
334		goto cleanup;
335	}
336
337	/*
338	 * Register exactly this bus.
339	 */
340	MPT_LOCK(mpt);
341	if (xpt_bus_register(mpt->sim, mpt->dev, 0) != CAM_SUCCESS) {
342		mpt_prt(mpt, "Bus registration Failed!\n");
343		error = ENOMEM;
344		MPT_UNLOCK(mpt);
345		goto cleanup;
346	}
347
348	if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim),
349	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
350		mpt_prt(mpt, "Unable to allocate Path!\n");
351		error = ENOMEM;
352		MPT_UNLOCK(mpt);
353		goto cleanup;
354	}
355	MPT_UNLOCK(mpt);
356
357	/*
358	 * Only register a second bus for RAID physical
359	 * devices if the controller supports RAID.
360	 */
361	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
362		return (0);
363	}
364
365	/*
366	 * Create a "bus" to export all hidden disks to CAM.
367	 */
368	mpt->phydisk_sim =
369	    mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
370	if (mpt->phydisk_sim == NULL) {
371		mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n");
372		error = ENOMEM;
373		goto cleanup;
374	}
375
376	/*
377	 * Register this bus.
378	 */
379	MPT_LOCK(mpt);
380	if (xpt_bus_register(mpt->phydisk_sim, mpt->dev, 1) !=
381	    CAM_SUCCESS) {
382		mpt_prt(mpt, "Physical Disk Bus registration Failed!\n");
383		error = ENOMEM;
384		MPT_UNLOCK(mpt);
385		goto cleanup;
386	}
387
388	if (xpt_create_path(&mpt->phydisk_path, NULL,
389	    cam_sim_path(mpt->phydisk_sim),
390	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
391		mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n");
392		error = ENOMEM;
393		MPT_UNLOCK(mpt);
394		goto cleanup;
395	}
396	MPT_UNLOCK(mpt);
397	mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n");
398	return (0);
399
400cleanup:
401	mpt_cam_detach(mpt);
402	return (error);
403}
404
405/*
406 * Read FC configuration information
407 */
408static int
409mpt_read_config_info_fc(struct mpt_softc *mpt)
410{
411	struct sysctl_ctx_list *ctx;
412	struct sysctl_oid *tree;
413	char *topology = NULL;
414	int rv;
415
416	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0,
417	    0, &mpt->mpt_fcport_page0.Header, FALSE, 5000);
418	if (rv) {
419		return (-1);
420	}
421	mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n",
422		 mpt->mpt_fcport_page0.Header.PageVersion,
423		 mpt->mpt_fcport_page0.Header.PageLength,
424		 mpt->mpt_fcport_page0.Header.PageNumber,
425		 mpt->mpt_fcport_page0.Header.PageType);
426
427
428	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header,
429	    sizeof(mpt->mpt_fcport_page0), FALSE, 5000);
430	if (rv) {
431		mpt_prt(mpt, "failed to read FC Port Page 0\n");
432		return (-1);
433	}
434	mpt2host_config_page_fc_port_0(&mpt->mpt_fcport_page0);
435
436	switch (mpt->mpt_fcport_page0.CurrentSpeed) {
437	case MPI_FCPORTPAGE0_CURRENT_SPEED_1GBIT:
438		mpt->mpt_fcport_speed = 1;
439		break;
440	case MPI_FCPORTPAGE0_CURRENT_SPEED_2GBIT:
441		mpt->mpt_fcport_speed = 2;
442		break;
443	case MPI_FCPORTPAGE0_CURRENT_SPEED_10GBIT:
444		mpt->mpt_fcport_speed = 10;
445		break;
446	case MPI_FCPORTPAGE0_CURRENT_SPEED_4GBIT:
447		mpt->mpt_fcport_speed = 4;
448		break;
449	default:
450		mpt->mpt_fcport_speed = 0;
451		break;
452	}
453
454	switch (mpt->mpt_fcport_page0.Flags &
455	    MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) {
456	case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT:
457		mpt->mpt_fcport_speed = 0;
458		topology = "<NO LOOP>";
459		break;
460	case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT:
461		topology = "N-Port";
462		break;
463	case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP:
464		topology = "NL-Port";
465		break;
466	case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT:
467		topology = "F-Port";
468		break;
469	case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP:
470		topology = "FL-Port";
471		break;
472	default:
473		mpt->mpt_fcport_speed = 0;
474		topology = "?";
475		break;
476	}
477
478	mpt->scinfo.fc.wwnn = ((uint64_t)mpt->mpt_fcport_page0.WWNN.High << 32)
479	    | mpt->mpt_fcport_page0.WWNN.Low;
480	mpt->scinfo.fc.wwpn = ((uint64_t)mpt->mpt_fcport_page0.WWPN.High << 32)
481	    | mpt->mpt_fcport_page0.WWPN.Low;
482	mpt->scinfo.fc.portid = mpt->mpt_fcport_page0.PortIdentifier;
483
484	mpt_lprt(mpt, MPT_PRT_INFO,
485	    "FC Port Page 0: Topology <%s> WWNN 0x%16jx WWPN 0x%16jx "
486	    "Speed %u-Gbit\n", topology,
487	    (uintmax_t)mpt->scinfo.fc.wwnn, (uintmax_t)mpt->scinfo.fc.wwpn,
488	    mpt->mpt_fcport_speed);
489	MPT_UNLOCK(mpt);
490	ctx = device_get_sysctl_ctx(mpt->dev);
491	tree = device_get_sysctl_tree(mpt->dev);
492
493	SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
494	    "wwnn", CTLFLAG_RD, &mpt->scinfo.fc.wwnn,
495	    "World Wide Node Name");
496
497	SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
498	     "wwpn", CTLFLAG_RD, &mpt->scinfo.fc.wwpn,
499	     "World Wide Port Name");
500
501	MPT_LOCK(mpt);
502	return (0);
503}
504
505/*
506 * Set FC configuration information.
507 */
508static int
509mpt_set_initial_config_fc(struct mpt_softc *mpt)
510{
511	CONFIG_PAGE_FC_PORT_1 fc;
512	U32 fl;
513	int r, doit = 0;
514	int role;
515
516	r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0,
517	    &fc.Header, FALSE, 5000);
518	if (r) {
519		mpt_prt(mpt, "failed to read FC page 1 header\n");
520		return (mpt_fc_reset_link(mpt, 1));
521	}
522
523	r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_NVRAM, 0,
524	    &fc.Header, sizeof (fc), FALSE, 5000);
525	if (r) {
526		mpt_prt(mpt, "failed to read FC page 1\n");
527		return (mpt_fc_reset_link(mpt, 1));
528	}
529	mpt2host_config_page_fc_port_1(&fc);
530
531	/*
532	 * Check our flags to make sure we support the role we want.
533	 */
534	doit = 0;
535	role = 0;
536	fl = fc.Flags;
537
538	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) {
539		role |= MPT_ROLE_INITIATOR;
540	}
541	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
542		role |= MPT_ROLE_TARGET;
543	}
544
545	fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK;
546
547	if (mpt->do_cfg_role == 0) {
548		role = mpt->cfg_role;
549	} else {
550		mpt->do_cfg_role = 0;
551	}
552
553	if (role != mpt->cfg_role) {
554		if (mpt->cfg_role & MPT_ROLE_INITIATOR) {
555			if ((role & MPT_ROLE_INITIATOR) == 0) {
556				mpt_prt(mpt, "adding initiator role\n");
557				fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT;
558				doit++;
559			} else {
560				mpt_prt(mpt, "keeping initiator role\n");
561			}
562		} else if (role & MPT_ROLE_INITIATOR) {
563			mpt_prt(mpt, "removing initiator role\n");
564			doit++;
565		}
566		if (mpt->cfg_role & MPT_ROLE_TARGET) {
567			if ((role & MPT_ROLE_TARGET) == 0) {
568				mpt_prt(mpt, "adding target role\n");
569				fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG;
570				doit++;
571			} else {
572				mpt_prt(mpt, "keeping target role\n");
573			}
574		} else if (role & MPT_ROLE_TARGET) {
575			mpt_prt(mpt, "removing target role\n");
576			doit++;
577		}
578		mpt->role = mpt->cfg_role;
579	}
580
581	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
582		if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) {
583			mpt_prt(mpt, "adding OXID option\n");
584			fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID;
585			doit++;
586		}
587	}
588
589	if (doit) {
590		fc.Flags = fl;
591		host2mpt_config_page_fc_port_1(&fc);
592		r = mpt_write_cfg_page(mpt,
593		    MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM, 0, &fc.Header,
594		    sizeof(fc), FALSE, 5000);
595		if (r != 0) {
596			mpt_prt(mpt, "failed to update NVRAM with changes\n");
597			return (0);
598		}
599		mpt_prt(mpt, "NOTE: NVRAM changes will not take "
600		    "effect until next reboot or IOC reset\n");
601	}
602	return (0);
603}
604
605static int
606mptsas_sas_io_unit_pg0(struct mpt_softc *mpt, struct mptsas_portinfo *portinfo)
607{
608	ConfigExtendedPageHeader_t hdr;
609	struct mptsas_phyinfo *phyinfo;
610	SasIOUnitPage0_t *buffer;
611	int error, len, i;
612
613	error = mpt_read_extcfg_header(mpt, MPI_SASIOUNITPAGE0_PAGEVERSION,
614				       0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT,
615				       &hdr, 0, 10000);
616	if (error)
617		goto out;
618	if (hdr.ExtPageLength == 0) {
619		error = ENXIO;
620		goto out;
621	}
622
623	len = hdr.ExtPageLength * 4;
624	buffer = malloc(len, M_DEVBUF, M_NOWAIT|M_ZERO);
625	if (buffer == NULL) {
626		error = ENOMEM;
627		goto out;
628	}
629
630	error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
631				     0, &hdr, buffer, len, 0, 10000);
632	if (error) {
633		free(buffer, M_DEVBUF);
634		goto out;
635	}
636
637	portinfo->num_phys = buffer->NumPhys;
638	portinfo->phy_info = malloc(sizeof(*portinfo->phy_info) *
639	    portinfo->num_phys, M_DEVBUF, M_NOWAIT|M_ZERO);
640	if (portinfo->phy_info == NULL) {
641		free(buffer, M_DEVBUF);
642		error = ENOMEM;
643		goto out;
644	}
645
646	for (i = 0; i < portinfo->num_phys; i++) {
647		phyinfo = &portinfo->phy_info[i];
648		phyinfo->phy_num = i;
649		phyinfo->port_id = buffer->PhyData[i].Port;
650		phyinfo->negotiated_link_rate =
651		    buffer->PhyData[i].NegotiatedLinkRate;
652		phyinfo->handle =
653		    le16toh(buffer->PhyData[i].ControllerDevHandle);
654	}
655
656	free(buffer, M_DEVBUF);
657out:
658	return (error);
659}
660
661static int
662mptsas_sas_phy_pg0(struct mpt_softc *mpt, struct mptsas_phyinfo *phy_info,
663	uint32_t form, uint32_t form_specific)
664{
665	ConfigExtendedPageHeader_t hdr;
666	SasPhyPage0_t *buffer;
667	int error;
668
669	error = mpt_read_extcfg_header(mpt, MPI_SASPHY0_PAGEVERSION, 0, 0,
670				       MPI_CONFIG_EXTPAGETYPE_SAS_PHY, &hdr,
671				       0, 10000);
672	if (error)
673		goto out;
674	if (hdr.ExtPageLength == 0) {
675		error = ENXIO;
676		goto out;
677	}
678
679	buffer = malloc(sizeof(SasPhyPage0_t), M_DEVBUF, M_NOWAIT|M_ZERO);
680	if (buffer == NULL) {
681		error = ENOMEM;
682		goto out;
683	}
684
685	error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
686				     form + form_specific, &hdr, buffer,
687				     sizeof(SasPhyPage0_t), 0, 10000);
688	if (error) {
689		free(buffer, M_DEVBUF);
690		goto out;
691	}
692
693	phy_info->hw_link_rate = buffer->HwLinkRate;
694	phy_info->programmed_link_rate = buffer->ProgrammedLinkRate;
695	phy_info->identify.dev_handle = le16toh(buffer->OwnerDevHandle);
696	phy_info->attached.dev_handle = le16toh(buffer->AttachedDevHandle);
697
698	free(buffer, M_DEVBUF);
699out:
700	return (error);
701}
702
703static int
704mptsas_sas_device_pg0(struct mpt_softc *mpt, struct mptsas_devinfo *device_info,
705	uint32_t form, uint32_t form_specific)
706{
707	ConfigExtendedPageHeader_t hdr;
708	SasDevicePage0_t *buffer;
709	uint64_t sas_address;
710	int error = 0;
711
712	bzero(device_info, sizeof(*device_info));
713	error = mpt_read_extcfg_header(mpt, MPI_SASDEVICE0_PAGEVERSION, 0, 0,
714				       MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE,
715				       &hdr, 0, 10000);
716	if (error)
717		goto out;
718	if (hdr.ExtPageLength == 0) {
719		error = ENXIO;
720		goto out;
721	}
722
723	buffer = malloc(sizeof(SasDevicePage0_t), M_DEVBUF, M_NOWAIT|M_ZERO);
724	if (buffer == NULL) {
725		error = ENOMEM;
726		goto out;
727	}
728
729	error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
730				     form + form_specific, &hdr, buffer,
731				     sizeof(SasDevicePage0_t), 0, 10000);
732	if (error) {
733		free(buffer, M_DEVBUF);
734		goto out;
735	}
736
737	device_info->dev_handle = le16toh(buffer->DevHandle);
738	device_info->parent_dev_handle = le16toh(buffer->ParentDevHandle);
739	device_info->enclosure_handle = le16toh(buffer->EnclosureHandle);
740	device_info->slot = le16toh(buffer->Slot);
741	device_info->phy_num = buffer->PhyNum;
742	device_info->physical_port = buffer->PhysicalPort;
743	device_info->target_id = buffer->TargetID;
744	device_info->bus = buffer->Bus;
745	bcopy(&buffer->SASAddress, &sas_address, sizeof(uint64_t));
746	device_info->sas_address = le64toh(sas_address);
747	device_info->device_info = le32toh(buffer->DeviceInfo);
748
749	free(buffer, M_DEVBUF);
750out:
751	return (error);
752}
753
754/*
755 * Read SAS configuration information. Nothing to do yet.
756 */
757static int
758mpt_read_config_info_sas(struct mpt_softc *mpt)
759{
760	struct mptsas_portinfo *portinfo;
761	struct mptsas_phyinfo *phyinfo;
762	int error, i;
763
764	portinfo = malloc(sizeof(*portinfo), M_DEVBUF, M_NOWAIT|M_ZERO);
765	if (portinfo == NULL)
766		return (ENOMEM);
767
768	error = mptsas_sas_io_unit_pg0(mpt, portinfo);
769	if (error) {
770		free(portinfo, M_DEVBUF);
771		return (0);
772	}
773
774	for (i = 0; i < portinfo->num_phys; i++) {
775		phyinfo = &portinfo->phy_info[i];
776		error = mptsas_sas_phy_pg0(mpt, phyinfo,
777		    (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER <<
778		    MPI_SAS_PHY_PGAD_FORM_SHIFT), i);
779		if (error)
780			break;
781		error = mptsas_sas_device_pg0(mpt, &phyinfo->identify,
782		    (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
783		    MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
784		    phyinfo->handle);
785		if (error)
786			break;
787		phyinfo->identify.phy_num = phyinfo->phy_num = i;
788		if (phyinfo->attached.dev_handle)
789			error = mptsas_sas_device_pg0(mpt,
790			    &phyinfo->attached,
791			    (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
792			    MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
793			    phyinfo->attached.dev_handle);
794		if (error)
795			break;
796	}
797	mpt->sas_portinfo = portinfo;
798	return (0);
799}
800
801static void
802mptsas_set_sata_wc(struct mpt_softc *mpt, struct mptsas_devinfo *devinfo,
803	int enabled)
804{
805	SataPassthroughRequest_t	*pass;
806	request_t *req;
807	int error, status;
808
809	req = mpt_get_request(mpt, 0);
810	if (req == NULL)
811		return;
812
813	pass = req->req_vbuf;
814	bzero(pass, sizeof(SataPassthroughRequest_t));
815	pass->Function = MPI_FUNCTION_SATA_PASSTHROUGH;
816	pass->TargetID = devinfo->target_id;
817	pass->Bus = devinfo->bus;
818	pass->PassthroughFlags = 0;
819	pass->ConnectionRate = MPI_SATA_PT_REQ_CONNECT_RATE_NEGOTIATED;
820	pass->DataLength = 0;
821	pass->MsgContext = htole32(req->index | sata_pass_handler_id);
822	pass->CommandFIS[0] = 0x27;
823	pass->CommandFIS[1] = 0x80;
824	pass->CommandFIS[2] = 0xef;
825	pass->CommandFIS[3] = (enabled) ? 0x02 : 0x82;
826	pass->CommandFIS[7] = 0x40;
827	pass->CommandFIS[15] = 0x08;
828
829	mpt_check_doorbell(mpt);
830	mpt_send_cmd(mpt, req);
831	error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 0,
832			     10 * 1000);
833	if (error) {
834		mpt_free_request(mpt, req);
835		printf("error %d sending passthrough\n", error);
836		return;
837	}
838
839	status = le16toh(req->IOCStatus);
840	if (status != MPI_IOCSTATUS_SUCCESS) {
841		mpt_free_request(mpt, req);
842		printf("IOCSTATUS %d\n", status);
843		return;
844	}
845
846	mpt_free_request(mpt, req);
847}
848
849/*
850 * Set SAS configuration information. Nothing to do yet.
851 */
852static int
853mpt_set_initial_config_sas(struct mpt_softc *mpt)
854{
855	struct mptsas_phyinfo *phyinfo;
856	int i;
857
858	if ((mpt_enable_sata_wc != -1) && (mpt->sas_portinfo != NULL)) {
859		for (i = 0; i < mpt->sas_portinfo->num_phys; i++) {
860			phyinfo = &mpt->sas_portinfo->phy_info[i];
861			if (phyinfo->attached.dev_handle == 0)
862				continue;
863			if ((phyinfo->attached.device_info &
864			    MPI_SAS_DEVICE_INFO_SATA_DEVICE) == 0)
865				continue;
866			if (bootverbose)
867				device_printf(mpt->dev,
868				    "%sabling SATA WC on phy %d\n",
869				    (mpt_enable_sata_wc) ? "En" : "Dis", i);
870			mptsas_set_sata_wc(mpt, &phyinfo->attached,
871					   mpt_enable_sata_wc);
872		}
873	}
874
875	return (0);
876}
877
878static int
879mpt_sata_pass_reply_handler(struct mpt_softc *mpt, request_t *req,
880 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
881{
882
883	if (req != NULL) {
884		if (reply_frame != NULL) {
885			req->IOCStatus = le16toh(reply_frame->IOCStatus);
886		}
887		req->state &= ~REQ_STATE_QUEUED;
888		req->state |= REQ_STATE_DONE;
889		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
890		if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
891			wakeup(req);
892		} else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
893			/*
894			 * Whew- we can free this request (late completion)
895			 */
896			mpt_free_request(mpt, req);
897		}
898	}
899
900	return (TRUE);
901}
902
903/*
904 * Read SCSI configuration information
905 */
906static int
907mpt_read_config_info_spi(struct mpt_softc *mpt)
908{
909	int rv, i;
910
911	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0,
912	    &mpt->mpt_port_page0.Header, FALSE, 5000);
913	if (rv) {
914		return (-1);
915	}
916	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n",
917	    mpt->mpt_port_page0.Header.PageVersion,
918	    mpt->mpt_port_page0.Header.PageLength,
919	    mpt->mpt_port_page0.Header.PageNumber,
920	    mpt->mpt_port_page0.Header.PageType);
921
922	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0,
923	    &mpt->mpt_port_page1.Header, FALSE, 5000);
924	if (rv) {
925		return (-1);
926	}
927	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n",
928	    mpt->mpt_port_page1.Header.PageVersion,
929	    mpt->mpt_port_page1.Header.PageLength,
930	    mpt->mpt_port_page1.Header.PageNumber,
931	    mpt->mpt_port_page1.Header.PageType);
932
933	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0,
934	    &mpt->mpt_port_page2.Header, FALSE, 5000);
935	if (rv) {
936		return (-1);
937	}
938	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n",
939	    mpt->mpt_port_page2.Header.PageVersion,
940	    mpt->mpt_port_page2.Header.PageLength,
941	    mpt->mpt_port_page2.Header.PageNumber,
942	    mpt->mpt_port_page2.Header.PageType);
943
944	for (i = 0; i < 16; i++) {
945		rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
946		    0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000);
947		if (rv) {
948			return (-1);
949		}
950		mpt_lprt(mpt, MPT_PRT_DEBUG,
951		    "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i,
952		    mpt->mpt_dev_page0[i].Header.PageVersion,
953		    mpt->mpt_dev_page0[i].Header.PageLength,
954		    mpt->mpt_dev_page0[i].Header.PageNumber,
955		    mpt->mpt_dev_page0[i].Header.PageType);
956
957		rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
958		    1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000);
959		if (rv) {
960			return (-1);
961		}
962		mpt_lprt(mpt, MPT_PRT_DEBUG,
963		    "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i,
964		    mpt->mpt_dev_page1[i].Header.PageVersion,
965		    mpt->mpt_dev_page1[i].Header.PageLength,
966		    mpt->mpt_dev_page1[i].Header.PageNumber,
967		    mpt->mpt_dev_page1[i].Header.PageType);
968	}
969
970	/*
971	 * At this point, we don't *have* to fail. As long as we have
972	 * valid config header information, we can (barely) lurch
973	 * along.
974	 */
975
976	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header,
977	    sizeof(mpt->mpt_port_page0), FALSE, 5000);
978	if (rv) {
979		mpt_prt(mpt, "failed to read SPI Port Page 0\n");
980	} else {
981		mpt2host_config_page_scsi_port_0(&mpt->mpt_port_page0);
982		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
983		    "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n",
984		    mpt->mpt_port_page0.Capabilities,
985		    mpt->mpt_port_page0.PhysicalInterface);
986	}
987
988	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header,
989	    sizeof(mpt->mpt_port_page1), FALSE, 5000);
990	if (rv) {
991		mpt_prt(mpt, "failed to read SPI Port Page 1\n");
992	} else {
993		mpt2host_config_page_scsi_port_1(&mpt->mpt_port_page1);
994		mpt_lprt(mpt, MPT_PRT_DEBUG,
995		    "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n",
996		    mpt->mpt_port_page1.Configuration,
997		    mpt->mpt_port_page1.OnBusTimerValue);
998	}
999
1000	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header,
1001	    sizeof(mpt->mpt_port_page2), FALSE, 5000);
1002	if (rv) {
1003		mpt_prt(mpt, "failed to read SPI Port Page 2\n");
1004	} else {
1005		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1006		    "Port Page 2: Flags %x Settings %x\n",
1007		    mpt->mpt_port_page2.PortFlags,
1008		    mpt->mpt_port_page2.PortSettings);
1009		mpt2host_config_page_scsi_port_2(&mpt->mpt_port_page2);
1010		for (i = 0; i < 16; i++) {
1011			mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1012		  	    " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n",
1013			    i, mpt->mpt_port_page2.DeviceSettings[i].Timeout,
1014			    mpt->mpt_port_page2.DeviceSettings[i].SyncFactor,
1015			    mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags);
1016		}
1017	}
1018
1019	for (i = 0; i < 16; i++) {
1020		rv = mpt_read_cur_cfg_page(mpt, i,
1021		    &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0),
1022		    FALSE, 5000);
1023		if (rv) {
1024			mpt_prt(mpt,
1025			    "cannot read SPI Target %d Device Page 0\n", i);
1026			continue;
1027		}
1028		mpt2host_config_page_scsi_device_0(&mpt->mpt_dev_page0[i]);
1029		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1030		    "target %d page 0: Negotiated Params %x Information %x\n",
1031		    i, mpt->mpt_dev_page0[i].NegotiatedParameters,
1032		    mpt->mpt_dev_page0[i].Information);
1033
1034		rv = mpt_read_cur_cfg_page(mpt, i,
1035		    &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1),
1036		    FALSE, 5000);
1037		if (rv) {
1038			mpt_prt(mpt,
1039			    "cannot read SPI Target %d Device Page 1\n", i);
1040			continue;
1041		}
1042		mpt2host_config_page_scsi_device_1(&mpt->mpt_dev_page1[i]);
1043		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1044		    "target %d page 1: Requested Params %x Configuration %x\n",
1045		    i, mpt->mpt_dev_page1[i].RequestedParameters,
1046		    mpt->mpt_dev_page1[i].Configuration);
1047	}
1048	return (0);
1049}
1050
1051/*
1052 * Validate SPI configuration information.
1053 *
1054 * In particular, validate SPI Port Page 1.
1055 */
1056static int
1057mpt_set_initial_config_spi(struct mpt_softc *mpt)
1058{
1059	int error, i, pp1val;
1060
1061	mpt->mpt_disc_enable = 0xff;
1062	mpt->mpt_tag_enable = 0;
1063
1064	pp1val = ((1 << mpt->mpt_ini_id) <<
1065	    MPI_SCSIPORTPAGE1_CFG_SHIFT_PORT_RESPONSE_ID) | mpt->mpt_ini_id;
1066	if (mpt->mpt_port_page1.Configuration != pp1val) {
1067		CONFIG_PAGE_SCSI_PORT_1 tmp;
1068
1069		mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should "
1070		    "be %x\n", mpt->mpt_port_page1.Configuration, pp1val);
1071		tmp = mpt->mpt_port_page1;
1072		tmp.Configuration = pp1val;
1073		host2mpt_config_page_scsi_port_1(&tmp);
1074		error = mpt_write_cur_cfg_page(mpt, 0,
1075		    &tmp.Header, sizeof(tmp), FALSE, 5000);
1076		if (error) {
1077			return (-1);
1078		}
1079		error = mpt_read_cur_cfg_page(mpt, 0,
1080		    &tmp.Header, sizeof(tmp), FALSE, 5000);
1081		if (error) {
1082			return (-1);
1083		}
1084		mpt2host_config_page_scsi_port_1(&tmp);
1085		if (tmp.Configuration != pp1val) {
1086			mpt_prt(mpt,
1087			    "failed to reset SPI Port Page 1 Config value\n");
1088			return (-1);
1089		}
1090		mpt->mpt_port_page1 = tmp;
1091	}
1092
1093	/*
1094	 * The purpose of this exercise is to get
1095	 * all targets back to async/narrow.
1096	 *
1097	 * We skip this step if the BIOS has already negotiated
1098	 * speeds with the targets.
1099	 */
1100	i = mpt->mpt_port_page2.PortSettings &
1101	    MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
1102	if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS) {
1103		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1104		    "honoring BIOS transfer negotiations\n");
1105	} else {
1106		for (i = 0; i < 16; i++) {
1107			mpt->mpt_dev_page1[i].RequestedParameters = 0;
1108			mpt->mpt_dev_page1[i].Configuration = 0;
1109			(void) mpt_update_spi_config(mpt, i);
1110		}
1111	}
1112	return (0);
1113}
1114
1115static int
1116mpt_cam_enable(struct mpt_softc *mpt)
1117{
1118	int error;
1119
1120	MPT_LOCK(mpt);
1121
1122	error = EIO;
1123	if (mpt->is_fc) {
1124		if (mpt_read_config_info_fc(mpt)) {
1125			goto out;
1126		}
1127		if (mpt_set_initial_config_fc(mpt)) {
1128			goto out;
1129		}
1130	} else if (mpt->is_sas) {
1131		if (mpt_read_config_info_sas(mpt)) {
1132			goto out;
1133		}
1134		if (mpt_set_initial_config_sas(mpt)) {
1135			goto out;
1136		}
1137	} else if (mpt->is_spi) {
1138		if (mpt_read_config_info_spi(mpt)) {
1139			goto out;
1140		}
1141		if (mpt_set_initial_config_spi(mpt)) {
1142			goto out;
1143		}
1144	}
1145	error = 0;
1146
1147out:
1148	MPT_UNLOCK(mpt);
1149	return (error);
1150}
1151
1152static void
1153mpt_cam_ready(struct mpt_softc *mpt)
1154{
1155
1156	/*
1157	 * If we're in target mode, hang out resources now
1158	 * so we don't cause the world to hang talking to us.
1159	 */
1160	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
1161		/*
1162		 * Try to add some target command resources
1163		 */
1164		MPT_LOCK(mpt);
1165		if (mpt_add_target_commands(mpt) == FALSE) {
1166			mpt_prt(mpt, "failed to add target commands\n");
1167		}
1168		MPT_UNLOCK(mpt);
1169	}
1170	mpt->ready = 1;
1171}
1172
1173static void
1174mpt_cam_detach(struct mpt_softc *mpt)
1175{
1176	mpt_handler_t handler;
1177
1178	MPT_LOCK(mpt);
1179	mpt->ready = 0;
1180	mpt_terminate_recovery_thread(mpt);
1181
1182	handler.reply_handler = mpt_scsi_reply_handler;
1183	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1184			       scsi_io_handler_id);
1185	handler.reply_handler = mpt_scsi_tmf_reply_handler;
1186	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1187			       scsi_tmf_handler_id);
1188	handler.reply_handler = mpt_fc_els_reply_handler;
1189	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1190			       fc_els_handler_id);
1191	handler.reply_handler = mpt_scsi_tgt_reply_handler;
1192	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1193			       mpt->scsi_tgt_handler_id);
1194	handler.reply_handler = mpt_sata_pass_reply_handler;
1195	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1196			       sata_pass_handler_id);
1197
1198	if (mpt->tmf_req != NULL) {
1199		mpt->tmf_req->state = REQ_STATE_ALLOCATED;
1200		mpt_free_request(mpt, mpt->tmf_req);
1201		mpt->tmf_req = NULL;
1202	}
1203	if (mpt->sas_portinfo != NULL) {
1204		free(mpt->sas_portinfo, M_DEVBUF);
1205		mpt->sas_portinfo = NULL;
1206	}
1207
1208	if (mpt->sim != NULL) {
1209		xpt_free_path(mpt->path);
1210		xpt_bus_deregister(cam_sim_path(mpt->sim));
1211		cam_sim_free(mpt->sim, TRUE);
1212		mpt->sim = NULL;
1213	}
1214
1215	if (mpt->phydisk_sim != NULL) {
1216		xpt_free_path(mpt->phydisk_path);
1217		xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim));
1218		cam_sim_free(mpt->phydisk_sim, TRUE);
1219		mpt->phydisk_sim = NULL;
1220	}
1221	MPT_UNLOCK(mpt);
1222}
1223
1224/* This routine is used after a system crash to dump core onto the swap device.
1225 */
1226static void
1227mpt_poll(struct cam_sim *sim)
1228{
1229	struct mpt_softc *mpt;
1230
1231	mpt = (struct mpt_softc *)cam_sim_softc(sim);
1232	mpt_intr(mpt);
1233}
1234
1235/*
1236 * Watchdog timeout routine for SCSI requests.
1237 */
1238static void
1239mpt_timeout(void *arg)
1240{
1241	union ccb	 *ccb;
1242	struct mpt_softc *mpt;
1243	request_t	 *req;
1244
1245	ccb = (union ccb *)arg;
1246	mpt = ccb->ccb_h.ccb_mpt_ptr;
1247
1248	MPT_LOCK_ASSERT(mpt);
1249	req = ccb->ccb_h.ccb_req_ptr;
1250	mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req,
1251	    req->serno, ccb, req->ccb);
1252/* XXX: WHAT ARE WE TRYING TO DO HERE? */
1253	if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) {
1254		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
1255		TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links);
1256		req->state |= REQ_STATE_TIMEDOUT;
1257		mpt_wakeup_recovery_thread(mpt);
1258	}
1259}
1260
1261/*
1262 * Callback routine from bus_dmamap_load_ccb(9) or, in simple cases, called
1263 * directly.
1264 *
1265 * Takes a list of physical segments and builds the SGL for SCSI IO command
1266 * and forwards the commard to the IOC after one last check that CAM has not
1267 * aborted the transaction.
1268 */
1269static void
1270mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1271{
1272	request_t *req, *trq;
1273	char *mpt_off;
1274	union ccb *ccb;
1275	struct mpt_softc *mpt;
1276	bus_addr_t chain_list_addr;
1277	int first_lim, seg, this_seg_lim;
1278	uint32_t addr, cur_off, flags, nxt_off, tf;
1279	void *sglp = NULL;
1280	MSG_REQUEST_HEADER *hdrp;
1281	SGE_SIMPLE64 *se;
1282	SGE_CHAIN64 *ce;
1283	int istgt = 0;
1284
1285	req = (request_t *)arg;
1286	ccb = req->ccb;
1287
1288	mpt = ccb->ccb_h.ccb_mpt_ptr;
1289	req = ccb->ccb_h.ccb_req_ptr;
1290
1291	hdrp = req->req_vbuf;
1292	mpt_off = req->req_vbuf;
1293
1294	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1295		error = EFBIG;
1296	}
1297
1298	if (error == 0) {
1299		switch (hdrp->Function) {
1300		case MPI_FUNCTION_SCSI_IO_REQUEST:
1301		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1302			istgt = 0;
1303			sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1304			break;
1305		case MPI_FUNCTION_TARGET_ASSIST:
1306			istgt = 1;
1307			sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1308			break;
1309		default:
1310			mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n",
1311			    hdrp->Function);
1312			error = EINVAL;
1313			break;
1314		}
1315	}
1316
1317	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1318		error = EFBIG;
1319		mpt_prt(mpt, "segment count %d too large (max %u)\n",
1320		    nseg, mpt->max_seg_cnt);
1321	}
1322
1323bad:
1324	if (error != 0) {
1325		if (error != EFBIG && error != ENOMEM) {
1326			mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error);
1327		}
1328		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1329			cam_status status;
1330			mpt_freeze_ccb(ccb);
1331			if (error == EFBIG) {
1332				status = CAM_REQ_TOO_BIG;
1333			} else if (error == ENOMEM) {
1334				if (mpt->outofbeer == 0) {
1335					mpt->outofbeer = 1;
1336					xpt_freeze_simq(mpt->sim, 1);
1337					mpt_lprt(mpt, MPT_PRT_DEBUG,
1338					    "FREEZEQ\n");
1339				}
1340				status = CAM_REQUEUE_REQ;
1341			} else {
1342				status = CAM_REQ_CMP_ERR;
1343			}
1344			mpt_set_ccb_status(ccb, status);
1345		}
1346		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1347			request_t *cmd_req =
1348				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1349			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1350			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1351			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1352		}
1353		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1354		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
1355		xpt_done(ccb);
1356		mpt_free_request(mpt, req);
1357		return;
1358	}
1359
1360	/*
1361	 * No data to transfer?
1362	 * Just make a single simple SGL with zero length.
1363	 */
1364
1365	if (mpt->verbose >= MPT_PRT_DEBUG) {
1366		int tidx = ((char *)sglp) - mpt_off;
1367		memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1368	}
1369
1370	if (nseg == 0) {
1371		SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1372		MPI_pSGE_SET_FLAGS(se1,
1373		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1374		    MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1375		se1->FlagsLength = htole32(se1->FlagsLength);
1376		goto out;
1377	}
1378
1379
1380	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1381	if (istgt == 0) {
1382		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1383			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1384		}
1385	} else {
1386		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1387			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1388		}
1389	}
1390
1391	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1392		bus_dmasync_op_t op;
1393		if (istgt == 0) {
1394			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1395				op = BUS_DMASYNC_PREREAD;
1396			} else {
1397				op = BUS_DMASYNC_PREWRITE;
1398			}
1399		} else {
1400			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1401				op = BUS_DMASYNC_PREWRITE;
1402			} else {
1403				op = BUS_DMASYNC_PREREAD;
1404			}
1405		}
1406		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1407	}
1408
1409	/*
1410	 * Okay, fill in what we can at the end of the command frame.
1411	 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1412	 * the command frame.
1413	 *
1414	 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1415	 * SIMPLE64 pointers and start doing CHAIN64 entries after
1416	 * that.
1417	 */
1418
1419	if (nseg < MPT_NSGL_FIRST(mpt)) {
1420		first_lim = nseg;
1421	} else {
1422		/*
1423		 * Leave room for CHAIN element
1424		 */
1425		first_lim = MPT_NSGL_FIRST(mpt) - 1;
1426	}
1427
1428	se = (SGE_SIMPLE64 *) sglp;
1429	for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1430		tf = flags;
1431		memset(se, 0, sizeof (*se));
1432		MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1433		se->Address.Low = htole32(dm_segs->ds_addr & 0xffffffff);
1434		if (sizeof(bus_addr_t) > 4) {
1435			addr = ((uint64_t)dm_segs->ds_addr) >> 32;
1436			/* SAS1078 36GB limitation WAR */
1437			if (mpt->is_1078 && (((uint64_t)dm_segs->ds_addr +
1438			    MPI_SGE_LENGTH(se->FlagsLength)) >> 32) == 9) {
1439				addr |= (1U << 31);
1440				tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS;
1441			}
1442			se->Address.High = htole32(addr);
1443		}
1444		if (seg == first_lim - 1) {
1445			tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1446		}
1447		if (seg == nseg - 1) {
1448			tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1449				MPI_SGE_FLAGS_END_OF_BUFFER;
1450		}
1451		MPI_pSGE_SET_FLAGS(se, tf);
1452		se->FlagsLength = htole32(se->FlagsLength);
1453	}
1454
1455	if (seg == nseg) {
1456		goto out;
1457	}
1458
1459	/*
1460	 * Tell the IOC where to find the first chain element.
1461	 */
1462	hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1463	nxt_off = MPT_RQSL(mpt);
1464	trq = req;
1465
1466	/*
1467	 * Make up the rest of the data segments out of a chain element
1468	 * (contained in the current request frame) which points to
1469	 * SIMPLE64 elements in the next request frame, possibly ending
1470	 * with *another* chain element (if there's more).
1471	 */
1472	while (seg < nseg) {
1473		/*
1474		 * Point to the chain descriptor. Note that the chain
1475		 * descriptor is at the end of the *previous* list (whether
1476		 * chain or simple).
1477		 */
1478		ce = (SGE_CHAIN64 *) se;
1479
1480		/*
1481		 * Before we change our current pointer, make  sure we won't
1482		 * overflow the request area with this frame. Note that we
1483		 * test against 'greater than' here as it's okay in this case
1484		 * to have next offset be just outside the request area.
1485		 */
1486		if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1487			nxt_off = MPT_REQUEST_AREA;
1488			goto next_chain;
1489		}
1490
1491		/*
1492		 * Set our SGE element pointer to the beginning of the chain
1493		 * list and update our next chain list offset.
1494		 */
1495		se = (SGE_SIMPLE64 *) &mpt_off[nxt_off];
1496		cur_off = nxt_off;
1497		nxt_off += MPT_RQSL(mpt);
1498
1499		/*
1500		 * Now initialize the chain descriptor.
1501		 */
1502		memset(ce, 0, sizeof (*ce));
1503
1504		/*
1505		 * Get the physical address of the chain list.
1506		 */
1507		chain_list_addr = trq->req_pbuf;
1508		chain_list_addr += cur_off;
1509		if (sizeof (bus_addr_t) > 4) {
1510			ce->Address.High =
1511			    htole32(((uint64_t)chain_list_addr) >> 32);
1512		}
1513		ce->Address.Low = htole32(chain_list_addr & 0xffffffff);
1514		ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT |
1515			    MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1516
1517		/*
1518		 * If we have more than a frame's worth of segments left,
1519		 * set up the chain list to have the last element be another
1520		 * chain descriptor.
1521		 */
1522		if ((nseg - seg) > MPT_NSGL(mpt)) {
1523			this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1524			/*
1525			 * The length of the chain is the length in bytes of the
1526			 * number of segments plus the next chain element.
1527			 *
1528			 * The next chain descriptor offset is the length,
1529			 * in words, of the number of segments.
1530			 */
1531			ce->Length = (this_seg_lim - seg) *
1532			    sizeof (SGE_SIMPLE64);
1533			ce->NextChainOffset = ce->Length >> 2;
1534			ce->Length += sizeof (SGE_CHAIN64);
1535		} else {
1536			this_seg_lim = nseg;
1537			ce->Length = (this_seg_lim - seg) *
1538			    sizeof (SGE_SIMPLE64);
1539		}
1540		ce->Length = htole16(ce->Length);
1541
1542		/*
1543		 * Fill in the chain list SGE elements with our segment data.
1544		 *
1545		 * If we're the last element in this chain list, set the last
1546		 * element flag. If we're the completely last element period,
1547		 * set the end of list and end of buffer flags.
1548		 */
1549		while (seg < this_seg_lim) {
1550			tf = flags;
1551			memset(se, 0, sizeof (*se));
1552			MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1553			se->Address.Low = htole32(dm_segs->ds_addr &
1554			    0xffffffff);
1555			if (sizeof (bus_addr_t) > 4) {
1556				addr = ((uint64_t)dm_segs->ds_addr) >> 32;
1557				/* SAS1078 36GB limitation WAR */
1558				if (mpt->is_1078 &&
1559				    (((uint64_t)dm_segs->ds_addr +
1560				    MPI_SGE_LENGTH(se->FlagsLength)) >>
1561				    32) == 9) {
1562					addr |= (1U << 31);
1563					tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS;
1564				}
1565				se->Address.High = htole32(addr);
1566			}
1567			if (seg == this_seg_lim - 1) {
1568				tf |=	MPI_SGE_FLAGS_LAST_ELEMENT;
1569			}
1570			if (seg == nseg - 1) {
1571				tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1572					MPI_SGE_FLAGS_END_OF_BUFFER;
1573			}
1574			MPI_pSGE_SET_FLAGS(se, tf);
1575			se->FlagsLength = htole32(se->FlagsLength);
1576			se++;
1577			seg++;
1578			dm_segs++;
1579		}
1580
1581    next_chain:
1582		/*
1583		 * If we have more segments to do and we've used up all of
1584		 * the space in a request area, go allocate another one
1585		 * and chain to that.
1586		 */
1587		if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1588			request_t *nrq;
1589
1590			nrq = mpt_get_request(mpt, FALSE);
1591
1592			if (nrq == NULL) {
1593				error = ENOMEM;
1594				goto bad;
1595			}
1596
1597			/*
1598			 * Append the new request area on the tail of our list.
1599			 */
1600			if ((trq = req->chain) == NULL) {
1601				req->chain = nrq;
1602			} else {
1603				while (trq->chain != NULL) {
1604					trq = trq->chain;
1605				}
1606				trq->chain = nrq;
1607			}
1608			trq = nrq;
1609			mpt_off = trq->req_vbuf;
1610			if (mpt->verbose >= MPT_PRT_DEBUG) {
1611				memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1612			}
1613			nxt_off = 0;
1614		}
1615	}
1616out:
1617
1618	/*
1619	 * Last time we need to check if this CCB needs to be aborted.
1620	 */
1621	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1622		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1623			request_t *cmd_req =
1624				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1625			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1626			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1627			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1628		}
1629		mpt_prt(mpt,
1630		    "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n",
1631		    ccb->ccb_h.status & CAM_STATUS_MASK);
1632		if (nseg) {
1633			bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
1634		}
1635		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1636		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
1637		xpt_done(ccb);
1638		mpt_free_request(mpt, req);
1639		return;
1640	}
1641
1642	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1643	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1644		mpt_req_timeout(req, SBT_1MS * ccb->ccb_h.timeout,
1645		    mpt_timeout, ccb);
1646	}
1647	if (mpt->verbose > MPT_PRT_DEBUG) {
1648		int nc = 0;
1649		mpt_print_request(req->req_vbuf);
1650		for (trq = req->chain; trq; trq = trq->chain) {
1651			printf("  Additional Chain Area %d\n", nc++);
1652			mpt_dump_sgl(trq->req_vbuf, 0);
1653		}
1654	}
1655
1656	if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1657		request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1658		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
1659#ifdef	WE_TRUST_AUTO_GOOD_STATUS
1660		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1661		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
1662			tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
1663		} else {
1664			tgt->state = TGT_STATE_MOVING_DATA;
1665		}
1666#else
1667		tgt->state = TGT_STATE_MOVING_DATA;
1668#endif
1669	}
1670	mpt_send_cmd(mpt, req);
1671}
1672
1673static void
1674mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1675{
1676	request_t *req, *trq;
1677	char *mpt_off;
1678	union ccb *ccb;
1679	struct mpt_softc *mpt;
1680	int seg, first_lim;
1681	uint32_t flags, nxt_off;
1682	void *sglp = NULL;
1683	MSG_REQUEST_HEADER *hdrp;
1684	SGE_SIMPLE32 *se;
1685	SGE_CHAIN32 *ce;
1686	int istgt = 0;
1687
1688	req = (request_t *)arg;
1689	ccb = req->ccb;
1690
1691	mpt = ccb->ccb_h.ccb_mpt_ptr;
1692	req = ccb->ccb_h.ccb_req_ptr;
1693
1694	hdrp = req->req_vbuf;
1695	mpt_off = req->req_vbuf;
1696
1697	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1698		error = EFBIG;
1699	}
1700
1701	if (error == 0) {
1702		switch (hdrp->Function) {
1703		case MPI_FUNCTION_SCSI_IO_REQUEST:
1704		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1705			sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1706			break;
1707		case MPI_FUNCTION_TARGET_ASSIST:
1708			istgt = 1;
1709			sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1710			break;
1711		default:
1712			mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n",
1713			    hdrp->Function);
1714			error = EINVAL;
1715			break;
1716		}
1717	}
1718
1719	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1720		error = EFBIG;
1721		mpt_prt(mpt, "segment count %d too large (max %u)\n",
1722		    nseg, mpt->max_seg_cnt);
1723	}
1724
1725bad:
1726	if (error != 0) {
1727		if (error != EFBIG && error != ENOMEM) {
1728			mpt_prt(mpt, "mpt_execute_req: err %d\n", error);
1729		}
1730		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1731			cam_status status;
1732			mpt_freeze_ccb(ccb);
1733			if (error == EFBIG) {
1734				status = CAM_REQ_TOO_BIG;
1735			} else if (error == ENOMEM) {
1736				if (mpt->outofbeer == 0) {
1737					mpt->outofbeer = 1;
1738					xpt_freeze_simq(mpt->sim, 1);
1739					mpt_lprt(mpt, MPT_PRT_DEBUG,
1740					    "FREEZEQ\n");
1741				}
1742				status = CAM_REQUEUE_REQ;
1743			} else {
1744				status = CAM_REQ_CMP_ERR;
1745			}
1746			mpt_set_ccb_status(ccb, status);
1747		}
1748		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1749			request_t *cmd_req =
1750				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1751			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1752			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1753			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1754		}
1755		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1756		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
1757		xpt_done(ccb);
1758		mpt_free_request(mpt, req);
1759		return;
1760	}
1761
1762	/*
1763	 * No data to transfer?
1764	 * Just make a single simple SGL with zero length.
1765	 */
1766
1767	if (mpt->verbose >= MPT_PRT_DEBUG) {
1768		int tidx = ((char *)sglp) - mpt_off;
1769		memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1770	}
1771
1772	if (nseg == 0) {
1773		SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1774		MPI_pSGE_SET_FLAGS(se1,
1775		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1776		    MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1777		se1->FlagsLength = htole32(se1->FlagsLength);
1778		goto out;
1779	}
1780
1781
1782	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
1783	if (istgt == 0) {
1784		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1785			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1786		}
1787	} else {
1788		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1789			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1790		}
1791	}
1792
1793	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1794		bus_dmasync_op_t op;
1795		if (istgt) {
1796			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1797				op = BUS_DMASYNC_PREREAD;
1798			} else {
1799				op = BUS_DMASYNC_PREWRITE;
1800			}
1801		} else {
1802			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1803				op = BUS_DMASYNC_PREWRITE;
1804			} else {
1805				op = BUS_DMASYNC_PREREAD;
1806			}
1807		}
1808		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1809	}
1810
1811	/*
1812	 * Okay, fill in what we can at the end of the command frame.
1813	 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1814	 * the command frame.
1815	 *
1816	 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1817	 * SIMPLE32 pointers and start doing CHAIN32 entries after
1818	 * that.
1819	 */
1820
1821	if (nseg < MPT_NSGL_FIRST(mpt)) {
1822		first_lim = nseg;
1823	} else {
1824		/*
1825		 * Leave room for CHAIN element
1826		 */
1827		first_lim = MPT_NSGL_FIRST(mpt) - 1;
1828	}
1829
1830	se = (SGE_SIMPLE32 *) sglp;
1831	for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1832		uint32_t tf;
1833
1834		memset(se, 0,sizeof (*se));
1835		se->Address = htole32(dm_segs->ds_addr);
1836
1837		MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1838		tf = flags;
1839		if (seg == first_lim - 1) {
1840			tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1841		}
1842		if (seg == nseg - 1) {
1843			tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1844				MPI_SGE_FLAGS_END_OF_BUFFER;
1845		}
1846		MPI_pSGE_SET_FLAGS(se, tf);
1847		se->FlagsLength = htole32(se->FlagsLength);
1848	}
1849
1850	if (seg == nseg) {
1851		goto out;
1852	}
1853
1854	/*
1855	 * Tell the IOC where to find the first chain element.
1856	 */
1857	hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1858	nxt_off = MPT_RQSL(mpt);
1859	trq = req;
1860
1861	/*
1862	 * Make up the rest of the data segments out of a chain element
1863	 * (contained in the current request frame) which points to
1864	 * SIMPLE32 elements in the next request frame, possibly ending
1865	 * with *another* chain element (if there's more).
1866	 */
1867	while (seg < nseg) {
1868		int this_seg_lim;
1869		uint32_t tf, cur_off;
1870		bus_addr_t chain_list_addr;
1871
1872		/*
1873		 * Point to the chain descriptor. Note that the chain
1874		 * descriptor is at the end of the *previous* list (whether
1875		 * chain or simple).
1876		 */
1877		ce = (SGE_CHAIN32 *) se;
1878
1879		/*
1880		 * Before we change our current pointer, make  sure we won't
1881		 * overflow the request area with this frame. Note that we
1882		 * test against 'greater than' here as it's okay in this case
1883		 * to have next offset be just outside the request area.
1884		 */
1885		if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1886			nxt_off = MPT_REQUEST_AREA;
1887			goto next_chain;
1888		}
1889
1890		/*
1891		 * Set our SGE element pointer to the beginning of the chain
1892		 * list and update our next chain list offset.
1893		 */
1894		se = (SGE_SIMPLE32 *) &mpt_off[nxt_off];
1895		cur_off = nxt_off;
1896		nxt_off += MPT_RQSL(mpt);
1897
1898		/*
1899		 * Now initialize the chain descriptor.
1900		 */
1901		memset(ce, 0, sizeof (*ce));
1902
1903		/*
1904		 * Get the physical address of the chain list.
1905		 */
1906		chain_list_addr = trq->req_pbuf;
1907		chain_list_addr += cur_off;
1908
1909
1910
1911		ce->Address = htole32(chain_list_addr);
1912		ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
1913
1914
1915		/*
1916		 * If we have more than a frame's worth of segments left,
1917		 * set up the chain list to have the last element be another
1918		 * chain descriptor.
1919		 */
1920		if ((nseg - seg) > MPT_NSGL(mpt)) {
1921			this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1922			/*
1923			 * The length of the chain is the length in bytes of the
1924			 * number of segments plus the next chain element.
1925			 *
1926			 * The next chain descriptor offset is the length,
1927			 * in words, of the number of segments.
1928			 */
1929			ce->Length = (this_seg_lim - seg) *
1930			    sizeof (SGE_SIMPLE32);
1931			ce->NextChainOffset = ce->Length >> 2;
1932			ce->Length += sizeof (SGE_CHAIN32);
1933		} else {
1934			this_seg_lim = nseg;
1935			ce->Length = (this_seg_lim - seg) *
1936			    sizeof (SGE_SIMPLE32);
1937		}
1938		ce->Length = htole16(ce->Length);
1939
1940		/*
1941		 * Fill in the chain list SGE elements with our segment data.
1942		 *
1943		 * If we're the last element in this chain list, set the last
1944		 * element flag. If we're the completely last element period,
1945		 * set the end of list and end of buffer flags.
1946		 */
1947		while (seg < this_seg_lim) {
1948			memset(se, 0, sizeof (*se));
1949			se->Address = htole32(dm_segs->ds_addr);
1950
1951			MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1952			tf = flags;
1953			if (seg == this_seg_lim - 1) {
1954				tf |=	MPI_SGE_FLAGS_LAST_ELEMENT;
1955			}
1956			if (seg == nseg - 1) {
1957				tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1958					MPI_SGE_FLAGS_END_OF_BUFFER;
1959			}
1960			MPI_pSGE_SET_FLAGS(se, tf);
1961			se->FlagsLength = htole32(se->FlagsLength);
1962			se++;
1963			seg++;
1964			dm_segs++;
1965		}
1966
1967    next_chain:
1968		/*
1969		 * If we have more segments to do and we've used up all of
1970		 * the space in a request area, go allocate another one
1971		 * and chain to that.
1972		 */
1973		if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1974			request_t *nrq;
1975
1976			nrq = mpt_get_request(mpt, FALSE);
1977
1978			if (nrq == NULL) {
1979				error = ENOMEM;
1980				goto bad;
1981			}
1982
1983			/*
1984			 * Append the new request area on the tail of our list.
1985			 */
1986			if ((trq = req->chain) == NULL) {
1987				req->chain = nrq;
1988			} else {
1989				while (trq->chain != NULL) {
1990					trq = trq->chain;
1991				}
1992				trq->chain = nrq;
1993			}
1994			trq = nrq;
1995			mpt_off = trq->req_vbuf;
1996			if (mpt->verbose >= MPT_PRT_DEBUG) {
1997				memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1998			}
1999			nxt_off = 0;
2000		}
2001	}
2002out:
2003
2004	/*
2005	 * Last time we need to check if this CCB needs to be aborted.
2006	 */
2007	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
2008		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
2009			request_t *cmd_req =
2010				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
2011			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
2012			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
2013			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
2014		}
2015		mpt_prt(mpt,
2016		    "mpt_execute_req: I/O cancelled (status 0x%x)\n",
2017		    ccb->ccb_h.status & CAM_STATUS_MASK);
2018		if (nseg) {
2019			bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2020		}
2021		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2022		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
2023		xpt_done(ccb);
2024		mpt_free_request(mpt, req);
2025		return;
2026	}
2027
2028	ccb->ccb_h.status |= CAM_SIM_QUEUED;
2029	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
2030		mpt_req_timeout(req, SBT_1MS * ccb->ccb_h.timeout,
2031		    mpt_timeout, ccb);
2032	}
2033	if (mpt->verbose > MPT_PRT_DEBUG) {
2034		int nc = 0;
2035		mpt_print_request(req->req_vbuf);
2036		for (trq = req->chain; trq; trq = trq->chain) {
2037			printf("  Additional Chain Area %d\n", nc++);
2038			mpt_dump_sgl(trq->req_vbuf, 0);
2039		}
2040	}
2041
2042	if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
2043		request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
2044		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
2045#ifdef	WE_TRUST_AUTO_GOOD_STATUS
2046		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
2047		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
2048			tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
2049		} else {
2050			tgt->state = TGT_STATE_MOVING_DATA;
2051		}
2052#else
2053		tgt->state = TGT_STATE_MOVING_DATA;
2054#endif
2055	}
2056	mpt_send_cmd(mpt, req);
2057}
2058
2059static void
2060mpt_start(struct cam_sim *sim, union ccb *ccb)
2061{
2062	request_t *req;
2063	struct mpt_softc *mpt;
2064	MSG_SCSI_IO_REQUEST *mpt_req;
2065	struct ccb_scsiio *csio = &ccb->csio;
2066	struct ccb_hdr *ccbh = &ccb->ccb_h;
2067	bus_dmamap_callback_t *cb;
2068	target_id_t tgt;
2069	int raid_passthru;
2070	int error;
2071
2072	/* Get the pointer for the physical addapter */
2073	mpt = ccb->ccb_h.ccb_mpt_ptr;
2074	raid_passthru = (sim == mpt->phydisk_sim);
2075
2076	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
2077		if (mpt->outofbeer == 0) {
2078			mpt->outofbeer = 1;
2079			xpt_freeze_simq(mpt->sim, 1);
2080			mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
2081		}
2082		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2083		mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
2084		xpt_done(ccb);
2085		return;
2086	}
2087#ifdef	INVARIANTS
2088	mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__);
2089#endif
2090
2091	if (sizeof (bus_addr_t) > 4) {
2092		cb = mpt_execute_req_a64;
2093	} else {
2094		cb = mpt_execute_req;
2095	}
2096
2097	/*
2098	 * Link the ccb and the request structure so we can find
2099	 * the other knowing either the request or the ccb
2100	 */
2101	req->ccb = ccb;
2102	ccb->ccb_h.ccb_req_ptr = req;
2103
2104	/* Now we build the command for the IOC */
2105	mpt_req = req->req_vbuf;
2106	memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST));
2107
2108	mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
2109	if (raid_passthru) {
2110		mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
2111		if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
2112			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2113			mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
2114			xpt_done(ccb);
2115			return;
2116		}
2117		mpt_req->Bus = 0;	/* we never set bus here */
2118	} else {
2119		tgt = ccb->ccb_h.target_id;
2120		mpt_req->Bus = 0;	/* XXX */
2121
2122	}
2123	mpt_req->SenseBufferLength =
2124		(csio->sense_len < MPT_SENSE_SIZE) ?
2125		 csio->sense_len : MPT_SENSE_SIZE;
2126
2127	/*
2128	 * We use the message context to find the request structure when we
2129	 * Get the command completion interrupt from the IOC.
2130	 */
2131	mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id);
2132
2133	/* Which physical device to do the I/O on */
2134	mpt_req->TargetID = tgt;
2135
2136	be64enc(mpt_req->LUN, CAM_EXTLUN_BYTE_SWIZZLE(ccb->ccb_h.target_lun));
2137
2138	/* Set the direction of the transfer */
2139	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2140		mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
2141	} else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
2142		mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
2143	} else {
2144		mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
2145	}
2146
2147	if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
2148		switch(ccb->csio.tag_action) {
2149		case MSG_HEAD_OF_Q_TAG:
2150			mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
2151			break;
2152		case MSG_ACA_TASK:
2153			mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
2154			break;
2155		case MSG_ORDERED_Q_TAG:
2156			mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
2157			break;
2158		case MSG_SIMPLE_Q_TAG:
2159		default:
2160			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
2161			break;
2162		}
2163	} else {
2164		if (mpt->is_fc || mpt->is_sas) {
2165			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
2166		} else {
2167			/* XXX No such thing for a target doing packetized. */
2168			mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
2169		}
2170	}
2171
2172	if (mpt->is_spi) {
2173		if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
2174			mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
2175		}
2176	}
2177	mpt_req->Control = htole32(mpt_req->Control);
2178
2179	/* Copy the scsi command block into place */
2180	if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2181		bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len);
2182	} else {
2183		bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len);
2184	}
2185
2186	mpt_req->CDBLength = csio->cdb_len;
2187	mpt_req->DataLength = htole32(csio->dxfer_len);
2188	mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf);
2189
2190	/*
2191	 * Do a *short* print here if we're set to MPT_PRT_DEBUG
2192	 */
2193	if (mpt->verbose == MPT_PRT_DEBUG) {
2194		U32 df;
2195		mpt_prt(mpt, "mpt_start: %s op 0x%x ",
2196		    (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)?
2197		    "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]);
2198		df = mpt_req->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK;
2199		if (df != MPI_SCSIIO_CONTROL_NODATATRANSFER) {
2200			mpt_prtc(mpt, "(%s %u byte%s ",
2201			    (df == MPI_SCSIIO_CONTROL_READ)?
2202			    "read" : "write",  csio->dxfer_len,
2203			    (csio->dxfer_len == 1)? ")" : "s)");
2204		}
2205		mpt_prtc(mpt, "tgt %u lun %jx req %p:%u\n", tgt,
2206		    (uintmax_t)ccb->ccb_h.target_lun, req, req->serno);
2207	}
2208
2209	error = bus_dmamap_load_ccb(mpt->buffer_dmat, req->dmap, ccb, cb,
2210	    req, 0);
2211	if (error == EINPROGRESS) {
2212		/*
2213		 * So as to maintain ordering, freeze the controller queue
2214		 * until our mapping is returned.
2215		 */
2216		xpt_freeze_simq(mpt->sim, 1);
2217		ccbh->status |= CAM_RELEASE_SIMQ;
2218	}
2219}
2220
2221static int
2222mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun,
2223    int sleep_ok)
2224{
2225	int   error;
2226	uint16_t status;
2227	uint8_t response;
2228
2229	error = mpt_scsi_send_tmf(mpt,
2230	    (tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ?
2231	    MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET :
2232	    MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
2233	    mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0,
2234	    0,	/* XXX How do I get the channel ID? */
2235	    tgt != CAM_TARGET_WILDCARD ? tgt : 0,
2236	    lun != CAM_LUN_WILDCARD ? lun : 0,
2237	    0, sleep_ok);
2238
2239	if (error != 0) {
2240		/*
2241		 * mpt_scsi_send_tmf hard resets on failure, so no
2242		 * need to do so here.
2243		 */
2244		mpt_prt(mpt,
2245		    "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error);
2246		return (EIO);
2247	}
2248
2249	/* Wait for bus reset to be processed by the IOC. */
2250	error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
2251	    REQ_STATE_DONE, sleep_ok, 5000);
2252
2253	status = le16toh(mpt->tmf_req->IOCStatus);
2254	response = mpt->tmf_req->ResponseCode;
2255	mpt->tmf_req->state = REQ_STATE_FREE;
2256
2257	if (error) {
2258		mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. "
2259		    "Resetting controller.\n");
2260		mpt_reset(mpt, TRUE);
2261		return (ETIMEDOUT);
2262	}
2263
2264	if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
2265		mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. "
2266		    "Resetting controller.\n", status);
2267		mpt_reset(mpt, TRUE);
2268		return (EIO);
2269	}
2270
2271	if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
2272	    response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
2273		mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. "
2274		    "Resetting controller.\n", response);
2275		mpt_reset(mpt, TRUE);
2276		return (EIO);
2277	}
2278	return (0);
2279}
2280
2281static int
2282mpt_fc_reset_link(struct mpt_softc *mpt, int dowait)
2283{
2284	int r = 0;
2285	request_t *req;
2286	PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc;
2287
2288 	req = mpt_get_request(mpt, FALSE);
2289	if (req == NULL) {
2290		return (ENOMEM);
2291	}
2292	fc = req->req_vbuf;
2293	memset(fc, 0, sizeof(*fc));
2294	fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK;
2295	fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND;
2296	fc->MsgContext = htole32(req->index | fc_els_handler_id);
2297	mpt_send_cmd(mpt, req);
2298	if (dowait) {
2299		r = mpt_wait_req(mpt, req, REQ_STATE_DONE,
2300		    REQ_STATE_DONE, FALSE, 60 * 1000);
2301		if (r == 0) {
2302			mpt_free_request(mpt, req);
2303		}
2304	}
2305	return (r);
2306}
2307
2308static int
2309mpt_cam_event(struct mpt_softc *mpt, request_t *req,
2310	      MSG_EVENT_NOTIFY_REPLY *msg)
2311{
2312	uint32_t data0, data1;
2313
2314	data0 = le32toh(msg->Data[0]);
2315	data1 = le32toh(msg->Data[1]);
2316	switch(msg->Event & 0xFF) {
2317	case MPI_EVENT_UNIT_ATTENTION:
2318		mpt_prt(mpt, "UNIT ATTENTION: Bus: 0x%02x TargetID: 0x%02x\n",
2319		    (data0 >> 8) & 0xff, data0 & 0xff);
2320		break;
2321
2322	case MPI_EVENT_IOC_BUS_RESET:
2323		/* We generated a bus reset */
2324		mpt_prt(mpt, "IOC Generated Bus Reset Port: %d\n",
2325		    (data0 >> 8) & 0xff);
2326		xpt_async(AC_BUS_RESET, mpt->path, NULL);
2327		break;
2328
2329	case MPI_EVENT_EXT_BUS_RESET:
2330		/* Someone else generated a bus reset */
2331		mpt_prt(mpt, "External Bus Reset Detected\n");
2332		/*
2333		 * These replies don't return EventData like the MPI
2334		 * spec says they do
2335		 */
2336		xpt_async(AC_BUS_RESET, mpt->path, NULL);
2337		break;
2338
2339	case MPI_EVENT_RESCAN:
2340	{
2341		union ccb *ccb;
2342		uint32_t pathid;
2343		/*
2344		 * In general this means a device has been added to the loop.
2345		 */
2346		mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff);
2347		if (mpt->ready == 0) {
2348			break;
2349		}
2350		if (mpt->phydisk_sim) {
2351			pathid = cam_sim_path(mpt->phydisk_sim);
2352		} else {
2353			pathid = cam_sim_path(mpt->sim);
2354		}
2355		/*
2356		 * Allocate a CCB, create a wildcard path for this bus,
2357		 * and schedule a rescan.
2358		 */
2359		ccb = xpt_alloc_ccb_nowait();
2360		if (ccb == NULL) {
2361			mpt_prt(mpt, "unable to alloc CCB for rescan\n");
2362			break;
2363		}
2364
2365		if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
2366		    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2367			mpt_prt(mpt, "unable to create path for rescan\n");
2368			xpt_free_ccb(ccb);
2369			break;
2370		}
2371		xpt_rescan(ccb);
2372		break;
2373	}
2374
2375	case MPI_EVENT_LINK_STATUS_CHANGE:
2376		mpt_prt(mpt, "Port %d: LinkState: %s\n",
2377		    (data1 >> 8) & 0xff,
2378		    ((data0 & 0xff) == 0)?  "Failed" : "Active");
2379		break;
2380
2381	case MPI_EVENT_LOOP_STATE_CHANGE:
2382		switch ((data0 >> 16) & 0xff) {
2383		case 0x01:
2384			mpt_prt(mpt,
2385			    "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) "
2386			    "(Loop Initialization)\n",
2387			    (data1 >> 8) & 0xff,
2388			    (data0 >> 8) & 0xff,
2389			    (data0     ) & 0xff);
2390			switch ((data0 >> 8) & 0xff) {
2391			case 0xF7:
2392				if ((data0 & 0xff) == 0xF7) {
2393					mpt_prt(mpt, "Device needs AL_PA\n");
2394				} else {
2395					mpt_prt(mpt, "Device %02x doesn't like "
2396					    "FC performance\n",
2397					    data0 & 0xFF);
2398				}
2399				break;
2400			case 0xF8:
2401				if ((data0 & 0xff) == 0xF7) {
2402					mpt_prt(mpt, "Device had loop failure "
2403					    "at its receiver prior to acquiring"
2404					    " AL_PA\n");
2405				} else {
2406					mpt_prt(mpt, "Device %02x detected loop"
2407					    " failure at its receiver\n",
2408					    data0 & 0xFF);
2409				}
2410				break;
2411			default:
2412				mpt_prt(mpt, "Device %02x requests that device "
2413				    "%02x reset itself\n",
2414				    data0 & 0xFF,
2415				    (data0 >> 8) & 0xFF);
2416				break;
2417			}
2418			break;
2419		case 0x02:
2420			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2421			    "LPE(%02x,%02x) (Loop Port Enable)\n",
2422			    (data1 >> 8) & 0xff, /* Port */
2423			    (data0 >>  8) & 0xff, /* Character 3 */
2424			    (data0      ) & 0xff  /* Character 4 */);
2425			break;
2426		case 0x03:
2427			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2428			    "LPB(%02x,%02x) (Loop Port Bypass)\n",
2429			    (data1 >> 8) & 0xff, /* Port */
2430			    (data0 >> 8) & 0xff, /* Character 3 */
2431			    (data0     ) & 0xff  /* Character 4 */);
2432			break;
2433		default:
2434			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown "
2435			    "FC event (%02x %02x %02x)\n",
2436			    (data1 >> 8) & 0xff, /* Port */
2437			    (data0 >> 16) & 0xff, /* Event */
2438			    (data0 >>  8) & 0xff, /* Character 3 */
2439			    (data0      ) & 0xff  /* Character 4 */);
2440		}
2441		break;
2442
2443	case MPI_EVENT_LOGOUT:
2444		mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n",
2445		    (data1 >> 8) & 0xff, data0);
2446		break;
2447	case MPI_EVENT_QUEUE_FULL:
2448	{
2449		struct cam_sim *sim;
2450		struct cam_path *tmppath;
2451		struct ccb_relsim crs;
2452		PTR_EVENT_DATA_QUEUE_FULL pqf;
2453		lun_id_t lun_id;
2454
2455		pqf = (PTR_EVENT_DATA_QUEUE_FULL)msg->Data;
2456		pqf->CurrentDepth = le16toh(pqf->CurrentDepth);
2457		if (bootverbose) {
2458		    mpt_prt(mpt, "QUEUE FULL EVENT: Bus 0x%02x Target 0x%02x "
2459			"Depth %d\n",
2460			pqf->Bus, pqf->TargetID, pqf->CurrentDepth);
2461		}
2462		if (mpt->phydisk_sim && mpt_is_raid_member(mpt,
2463		    pqf->TargetID) != 0) {
2464			sim = mpt->phydisk_sim;
2465		} else {
2466			sim = mpt->sim;
2467		}
2468		for (lun_id = 0; lun_id < MPT_MAX_LUNS; lun_id++) {
2469			if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim),
2470			    pqf->TargetID, lun_id) != CAM_REQ_CMP) {
2471				mpt_prt(mpt, "unable to create a path to send "
2472				    "XPT_REL_SIMQ");
2473				break;
2474			}
2475			xpt_setup_ccb(&crs.ccb_h, tmppath, 5);
2476			crs.ccb_h.func_code = XPT_REL_SIMQ;
2477			crs.ccb_h.flags = CAM_DEV_QFREEZE;
2478			crs.release_flags = RELSIM_ADJUST_OPENINGS;
2479			crs.openings = pqf->CurrentDepth - 1;
2480			xpt_action((union ccb *)&crs);
2481			if (crs.ccb_h.status != CAM_REQ_CMP) {
2482				mpt_prt(mpt, "XPT_REL_SIMQ failed\n");
2483			}
2484			xpt_free_path(tmppath);
2485		}
2486		break;
2487	}
2488	case MPI_EVENT_IR_RESYNC_UPDATE:
2489		mpt_prt(mpt, "IR resync update %d completed\n",
2490		    (data0 >> 16) & 0xff);
2491		break;
2492	case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
2493	{
2494		union ccb *ccb;
2495		struct cam_sim *sim;
2496		struct cam_path *tmppath;
2497		PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE psdsc;
2498
2499		psdsc = (PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE)msg->Data;
2500		if (mpt->phydisk_sim && mpt_is_raid_member(mpt,
2501		    psdsc->TargetID) != 0)
2502			sim = mpt->phydisk_sim;
2503		else
2504			sim = mpt->sim;
2505		switch(psdsc->ReasonCode) {
2506		case MPI_EVENT_SAS_DEV_STAT_RC_ADDED:
2507			ccb = xpt_alloc_ccb_nowait();
2508			if (ccb == NULL) {
2509				mpt_prt(mpt,
2510				    "unable to alloc CCB for rescan\n");
2511				break;
2512			}
2513			if (xpt_create_path(&ccb->ccb_h.path, NULL,
2514			    cam_sim_path(sim), psdsc->TargetID,
2515			    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2516				mpt_prt(mpt,
2517				    "unable to create path for rescan\n");
2518				xpt_free_ccb(ccb);
2519				break;
2520			}
2521			xpt_rescan(ccb);
2522			break;
2523		case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING:
2524			if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim),
2525			    psdsc->TargetID, CAM_LUN_WILDCARD) !=
2526			    CAM_REQ_CMP) {
2527				mpt_prt(mpt,
2528				    "unable to create path for async event");
2529				break;
2530			}
2531			xpt_async(AC_LOST_DEVICE, tmppath, NULL);
2532			xpt_free_path(tmppath);
2533			break;
2534		case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_INTERNAL_DEV_RESET:
2535		case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_TASK_ABORT_INTERNAL:
2536		case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
2537			break;
2538		default:
2539			mpt_lprt(mpt, MPT_PRT_WARN,
2540			    "SAS device status change: Bus: 0x%02x TargetID: "
2541			    "0x%02x ReasonCode: 0x%02x\n", psdsc->Bus,
2542			    psdsc->TargetID, psdsc->ReasonCode);
2543			break;
2544		}
2545		break;
2546	}
2547	case MPI_EVENT_SAS_DISCOVERY_ERROR:
2548	{
2549		PTR_EVENT_DATA_DISCOVERY_ERROR pde;
2550
2551		pde = (PTR_EVENT_DATA_DISCOVERY_ERROR)msg->Data;
2552		pde->DiscoveryStatus = le32toh(pde->DiscoveryStatus);
2553		mpt_lprt(mpt, MPT_PRT_WARN,
2554		    "SAS discovery error: Port: 0x%02x Status: 0x%08x\n",
2555		    pde->Port, pde->DiscoveryStatus);
2556		break;
2557	}
2558	case MPI_EVENT_EVENT_CHANGE:
2559	case MPI_EVENT_INTEGRATED_RAID:
2560	case MPI_EVENT_IR2:
2561	case MPI_EVENT_LOG_ENTRY_ADDED:
2562	case MPI_EVENT_SAS_DISCOVERY:
2563	case MPI_EVENT_SAS_PHY_LINK_STATUS:
2564	case MPI_EVENT_SAS_SES:
2565		break;
2566	default:
2567		mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n",
2568		    msg->Event & 0xFF);
2569		return (0);
2570	}
2571	return (1);
2572}
2573
2574/*
2575 * Reply path for all SCSI I/O requests, called from our
2576 * interrupt handler by extracting our handler index from
2577 * the MsgContext field of the reply from the IOC.
2578 *
2579 * This routine is optimized for the common case of a
2580 * completion without error.  All exception handling is
2581 * offloaded to non-inlined helper routines to minimize
2582 * cache footprint.
2583 */
2584static int
2585mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req,
2586    uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2587{
2588	MSG_SCSI_IO_REQUEST *scsi_req;
2589	union ccb *ccb;
2590
2591	if (req->state == REQ_STATE_FREE) {
2592		mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n");
2593		return (TRUE);
2594	}
2595
2596	scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf;
2597	ccb = req->ccb;
2598	if (ccb == NULL) {
2599		mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n",
2600		    req, req->serno);
2601		return (TRUE);
2602	}
2603
2604	mpt_req_untimeout(req, mpt_timeout, ccb);
2605	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2606
2607	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2608		bus_dmasync_op_t op;
2609
2610		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
2611			op = BUS_DMASYNC_POSTREAD;
2612		else
2613			op = BUS_DMASYNC_POSTWRITE;
2614		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
2615		bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2616	}
2617
2618	if (reply_frame == NULL) {
2619		/*
2620		 * Context only reply, completion without error status.
2621		 */
2622		ccb->csio.resid = 0;
2623		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2624		ccb->csio.scsi_status = SCSI_STATUS_OK;
2625	} else {
2626		mpt_scsi_reply_frame_handler(mpt, req, reply_frame);
2627	}
2628
2629	if (mpt->outofbeer) {
2630		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2631		mpt->outofbeer = 0;
2632		mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
2633	}
2634	if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) {
2635		struct scsi_inquiry_data *iq =
2636		    (struct scsi_inquiry_data *)ccb->csio.data_ptr;
2637		if (scsi_req->Function ==
2638		    MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
2639			/*
2640			 * Fake out the device type so that only the
2641			 * pass-thru device will attach.
2642			 */
2643			iq->device &= ~0x1F;
2644			iq->device |= T_NODEVICE;
2645		}
2646	}
2647	if (mpt->verbose == MPT_PRT_DEBUG) {
2648		mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n",
2649		    req, req->serno);
2650	}
2651	KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
2652	xpt_done(ccb);
2653	if ((req->state & REQ_STATE_TIMEDOUT) == 0) {
2654		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2655	} else {
2656		mpt_prt(mpt, "completing timedout/aborted req %p:%u\n",
2657		    req, req->serno);
2658		TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
2659	}
2660	KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0,
2661	    ("CCB req needed wakeup"));
2662#ifdef	INVARIANTS
2663	mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__);
2664#endif
2665	mpt_free_request(mpt, req);
2666	return (TRUE);
2667}
2668
2669static int
2670mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req,
2671    uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2672{
2673	MSG_SCSI_TASK_MGMT_REPLY *tmf_reply;
2674
2675	KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req"));
2676#ifdef	INVARIANTS
2677	mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__);
2678#endif
2679	tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame;
2680	/* Record IOC Status and Response Code of TMF for any waiters. */
2681	req->IOCStatus = le16toh(tmf_reply->IOCStatus);
2682	req->ResponseCode = tmf_reply->ResponseCode;
2683
2684	mpt_lprt(mpt, MPT_PRT_DEBUG, "TMF complete: req %p:%u status 0x%x\n",
2685	    req, req->serno, le16toh(tmf_reply->IOCStatus));
2686	TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2687	if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
2688		req->state |= REQ_STATE_DONE;
2689		wakeup(req);
2690	} else {
2691		mpt->tmf_req->state = REQ_STATE_FREE;
2692	}
2693	return (TRUE);
2694}
2695
2696/*
2697 * XXX: Move to definitions file
2698 */
2699#define	ELS	0x22
2700#define	FC4LS	0x32
2701#define	ABTS	0x81
2702#define	BA_ACC	0x84
2703
2704#define	LS_RJT	0x01
2705#define	LS_ACC	0x02
2706#define	PLOGI	0x03
2707#define	LOGO	0x05
2708#define SRR	0x14
2709#define PRLI	0x20
2710#define PRLO	0x21
2711#define ADISC	0x52
2712#define RSCN	0x61
2713
2714static void
2715mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req,
2716    PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length)
2717{
2718	uint32_t fl;
2719	MSG_LINK_SERVICE_RSP_REQUEST tmp;
2720	PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp;
2721
2722	/*
2723	 * We are going to reuse the ELS request to send this response back.
2724	 */
2725	rsp = &tmp;
2726	memset(rsp, 0, sizeof(*rsp));
2727
2728#ifdef	USE_IMMEDIATE_LINK_DATA
2729	/*
2730	 * Apparently the IMMEDIATE stuff doesn't seem to work.
2731	 */
2732	rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE;
2733#endif
2734	rsp->RspLength = length;
2735	rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP;
2736	rsp->MsgContext = htole32(req->index | fc_els_handler_id);
2737
2738	/*
2739	 * Copy over information from the original reply frame to
2740	 * it's correct place in the response.
2741	 */
2742	memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24);
2743
2744	/*
2745	 * And now copy back the temporary area to the original frame.
2746	 */
2747	memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST));
2748	rsp = req->req_vbuf;
2749
2750#ifdef	USE_IMMEDIATE_LINK_DATA
2751	memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length);
2752#else
2753{
2754	PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL;
2755	bus_addr_t paddr = req->req_pbuf;
2756	paddr += MPT_RQSL(mpt);
2757
2758	fl =
2759		MPI_SGE_FLAGS_HOST_TO_IOC	|
2760		MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
2761		MPI_SGE_FLAGS_LAST_ELEMENT	|
2762		MPI_SGE_FLAGS_END_OF_LIST	|
2763		MPI_SGE_FLAGS_END_OF_BUFFER;
2764	fl <<= MPI_SGE_FLAGS_SHIFT;
2765	fl |= (length);
2766	se->FlagsLength = htole32(fl);
2767	se->Address = htole32((uint32_t) paddr);
2768}
2769#endif
2770
2771	/*
2772	 * Send it on...
2773	 */
2774	mpt_send_cmd(mpt, req);
2775}
2776
2777static int
2778mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req,
2779    uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2780{
2781	PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp =
2782	    (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame;
2783	U8 rctl;
2784	U8 type;
2785	U8 cmd;
2786	U16 status = le16toh(reply_frame->IOCStatus);
2787	U32 *elsbuf;
2788	int ioindex;
2789	int do_refresh = TRUE;
2790
2791#ifdef	INVARIANTS
2792	KASSERT(mpt_req_on_free_list(mpt, req) == 0,
2793	    ("fc_els_reply_handler: req %p:%u for function %x on freelist!",
2794	    req, req->serno, rp->Function));
2795	if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2796		mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2797	} else {
2798		mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2799	}
2800#endif
2801	mpt_lprt(mpt, MPT_PRT_DEBUG,
2802	    "FC_ELS Complete: req %p:%u, reply %p function %x\n",
2803	    req, req->serno, reply_frame, reply_frame->Function);
2804
2805	if  (status != MPI_IOCSTATUS_SUCCESS) {
2806		mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n",
2807		    status, reply_frame->Function);
2808		if (status == MPI_IOCSTATUS_INVALID_STATE) {
2809			/*
2810			 * XXX: to get around shutdown issue
2811			 */
2812			mpt->disabled = 1;
2813			return (TRUE);
2814		}
2815		return (TRUE);
2816	}
2817
2818	/*
2819	 * If the function of a link service response, we recycle the
2820	 * response to be a refresh for a new link service request.
2821	 *
2822	 * The request pointer is bogus in this case and we have to fetch
2823	 * it based upon the TransactionContext.
2824	 */
2825	if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) {
2826		/* Freddie Uncle Charlie Katie */
2827		/* We don't get the IOINDEX as part of the Link Svc Rsp */
2828		for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++)
2829			if (mpt->els_cmd_ptrs[ioindex] == req) {
2830				break;
2831			}
2832
2833		KASSERT(ioindex < mpt->els_cmds_allocated,
2834		    ("can't find my mommie!"));
2835
2836		/* remove from active list as we're going to re-post it */
2837		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2838		req->state &= ~REQ_STATE_QUEUED;
2839		req->state |= REQ_STATE_DONE;
2840		mpt_fc_post_els(mpt, req, ioindex);
2841		return (TRUE);
2842	}
2843
2844	if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2845		/* remove from active list as we're done */
2846		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2847		req->state &= ~REQ_STATE_QUEUED;
2848		req->state |= REQ_STATE_DONE;
2849		if (req->state & REQ_STATE_TIMEDOUT) {
2850			mpt_lprt(mpt, MPT_PRT_DEBUG,
2851			    "Sync Primitive Send Completed After Timeout\n");
2852			mpt_free_request(mpt, req);
2853		} else if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) {
2854			mpt_lprt(mpt, MPT_PRT_DEBUG,
2855			    "Async Primitive Send Complete\n");
2856			mpt_free_request(mpt, req);
2857		} else {
2858			mpt_lprt(mpt, MPT_PRT_DEBUG,
2859			    "Sync Primitive Send Complete- Waking Waiter\n");
2860			wakeup(req);
2861		}
2862		return (TRUE);
2863	}
2864
2865	if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) {
2866		mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x "
2867		    "Length %d Message Flags %x\n", rp->Function, rp->Flags,
2868		    rp->MsgLength, rp->MsgFlags);
2869		return (TRUE);
2870	}
2871
2872	if (rp->MsgLength <= 5) {
2873		/*
2874		 * This is just a ack of an original ELS buffer post
2875		 */
2876		mpt_lprt(mpt, MPT_PRT_DEBUG,
2877		    "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno);
2878		return (TRUE);
2879	}
2880
2881
2882	rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT;
2883	type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT;
2884
2885	elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)];
2886	cmd = be32toh(elsbuf[0]) >> 24;
2887
2888	if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) {
2889		mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n");
2890		return (TRUE);
2891	}
2892
2893	ioindex = le32toh(rp->TransactionContext);
2894	req = mpt->els_cmd_ptrs[ioindex];
2895
2896	if (rctl == ELS && type == 1) {
2897		switch (cmd) {
2898		case PRLI:
2899			/*
2900			 * Send back a PRLI ACC
2901			 */
2902			mpt_prt(mpt, "PRLI from 0x%08x%08x\n",
2903			    le32toh(rp->Wwn.PortNameHigh),
2904			    le32toh(rp->Wwn.PortNameLow));
2905			elsbuf[0] = htobe32(0x02100014);
2906			elsbuf[1] |= htobe32(0x00000100);
2907			elsbuf[4] = htobe32(0x00000002);
2908			if (mpt->role & MPT_ROLE_TARGET)
2909				elsbuf[4] |= htobe32(0x00000010);
2910			if (mpt->role & MPT_ROLE_INITIATOR)
2911				elsbuf[4] |= htobe32(0x00000020);
2912			/* remove from active list as we're done */
2913			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2914			req->state &= ~REQ_STATE_QUEUED;
2915			req->state |= REQ_STATE_DONE;
2916			mpt_fc_els_send_response(mpt, req, rp, 20);
2917			do_refresh = FALSE;
2918			break;
2919		case PRLO:
2920			memset(elsbuf, 0, 5 * (sizeof (U32)));
2921			elsbuf[0] = htobe32(0x02100014);
2922			elsbuf[1] = htobe32(0x08000100);
2923			mpt_prt(mpt, "PRLO from 0x%08x%08x\n",
2924			    le32toh(rp->Wwn.PortNameHigh),
2925			    le32toh(rp->Wwn.PortNameLow));
2926			/* remove from active list as we're done */
2927			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2928			req->state &= ~REQ_STATE_QUEUED;
2929			req->state |= REQ_STATE_DONE;
2930			mpt_fc_els_send_response(mpt, req, rp, 20);
2931			do_refresh = FALSE;
2932			break;
2933		default:
2934			mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd);
2935			break;
2936		}
2937	} else if (rctl == ABTS && type == 0) {
2938		uint16_t rx_id = le16toh(rp->Rxid);
2939		uint16_t ox_id = le16toh(rp->Oxid);
2940		mpt_tgt_state_t *tgt;
2941		request_t *tgt_req = NULL;
2942		union ccb *ccb;
2943		uint32_t ct_id;
2944
2945		mpt_prt(mpt,
2946		    "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n",
2947		    ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh),
2948		    le32toh(rp->Wwn.PortNameLow));
2949		if (rx_id >= mpt->mpt_max_tgtcmds) {
2950			mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id);
2951		} else if (mpt->tgt_cmd_ptrs == NULL) {
2952			mpt_prt(mpt, "No TGT CMD PTRS\n");
2953		} else {
2954			tgt_req = mpt->tgt_cmd_ptrs[rx_id];
2955		}
2956		if (tgt_req == NULL) {
2957			mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id);
2958			goto skip;
2959		}
2960		tgt = MPT_TGT_STATE(mpt, tgt_req);
2961
2962		/* Check to make sure we have the correct command. */
2963		ct_id = GET_IO_INDEX(tgt->reply_desc);
2964		if (ct_id != rx_id) {
2965			mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: "
2966			    "RX_ID received=0x%x, in cmd=0x%x\n", rx_id, ct_id);
2967			goto skip;
2968		}
2969		if (tgt->itag != ox_id) {
2970			mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: "
2971			    "OX_ID received=0x%x, in cmd=0x%x\n", ox_id, tgt->itag);
2972			goto skip;
2973		}
2974
2975		if ((ccb = tgt->ccb) != NULL) {
2976			mpt_prt(mpt, "CCB (%p): lun %jx flags %x status %x\n",
2977			    ccb, (uintmax_t)ccb->ccb_h.target_lun,
2978			    ccb->ccb_h.flags, ccb->ccb_h.status);
2979		}
2980		mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd "
2981		    "%x nxfers %x\n", tgt->state, tgt->resid,
2982		    tgt->bytes_xfered, tgt->reply_desc, tgt->nxfers);
2983		if (mpt_abort_target_cmd(mpt, tgt_req))
2984			mpt_prt(mpt, "unable to start TargetAbort\n");
2985
2986skip:
2987		memset(elsbuf, 0, 5 * (sizeof (U32)));
2988		elsbuf[0] = htobe32(0);
2989		elsbuf[1] = htobe32((ox_id << 16) | rx_id);
2990		elsbuf[2] = htobe32(0x000ffff);
2991		/*
2992		 * Dork with the reply frame so that the response to it
2993		 * will be correct.
2994		 */
2995		rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT);
2996		/* remove from active list as we're done */
2997		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2998		req->state &= ~REQ_STATE_QUEUED;
2999		req->state |= REQ_STATE_DONE;
3000		mpt_fc_els_send_response(mpt, req, rp, 12);
3001		do_refresh = FALSE;
3002	} else {
3003		mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd);
3004	}
3005	if (do_refresh == TRUE) {
3006		/* remove from active list as we're done */
3007		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
3008		req->state &= ~REQ_STATE_QUEUED;
3009		req->state |= REQ_STATE_DONE;
3010		mpt_fc_post_els(mpt, req, ioindex);
3011	}
3012	return (TRUE);
3013}
3014
3015/*
3016 * Clean up all SCSI Initiator personality state in response
3017 * to a controller reset.
3018 */
3019static void
3020mpt_cam_ioc_reset(struct mpt_softc *mpt, int type)
3021{
3022
3023	/*
3024	 * The pending list is already run down by
3025	 * the generic handler.  Perform the same
3026	 * operation on the timed out request list.
3027	 */
3028	mpt_complete_request_chain(mpt, &mpt->request_timeout_list,
3029				   MPI_IOCSTATUS_INVALID_STATE);
3030
3031	/*
3032	 * XXX: We need to repost ELS and Target Command Buffers?
3033	 */
3034
3035	/*
3036	 * Inform the XPT that a bus reset has occurred.
3037	 */
3038	xpt_async(AC_BUS_RESET, mpt->path, NULL);
3039}
3040
3041/*
3042 * Parse additional completion information in the reply
3043 * frame for SCSI I/O requests.
3044 */
3045static int
3046mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
3047			     MSG_DEFAULT_REPLY *reply_frame)
3048{
3049	union ccb *ccb;
3050	MSG_SCSI_IO_REPLY *scsi_io_reply;
3051	u_int ioc_status;
3052	u_int sstate;
3053
3054	MPT_DUMP_REPLY_FRAME(mpt, reply_frame);
3055	KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST
3056	     || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH,
3057		("MPT SCSI I/O Handler called with incorrect reply type"));
3058	KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0,
3059		("MPT SCSI I/O Handler called with continuation reply"));
3060
3061	scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame;
3062	ioc_status = le16toh(scsi_io_reply->IOCStatus);
3063	ioc_status &= MPI_IOCSTATUS_MASK;
3064	sstate = scsi_io_reply->SCSIState;
3065
3066	ccb = req->ccb;
3067	ccb->csio.resid =
3068	    ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount);
3069
3070	if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0
3071	 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) {
3072		uint32_t sense_returned;
3073
3074		ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
3075
3076		sense_returned = le32toh(scsi_io_reply->SenseCount);
3077		if (sense_returned < ccb->csio.sense_len)
3078			ccb->csio.sense_resid = ccb->csio.sense_len -
3079						sense_returned;
3080		else
3081			ccb->csio.sense_resid = 0;
3082
3083		bzero(&ccb->csio.sense_data, sizeof(ccb->csio.sense_data));
3084		bcopy(req->sense_vbuf, &ccb->csio.sense_data,
3085		    min(ccb->csio.sense_len, sense_returned));
3086	}
3087
3088	if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) {
3089		/*
3090		 * Tag messages rejected, but non-tagged retry
3091		 * was successful.
3092XXXX
3093		mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE);
3094		 */
3095	}
3096
3097	switch(ioc_status) {
3098	case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
3099		/*
3100		 * XXX
3101		 * Linux driver indicates that a zero
3102		 * transfer length with this error code
3103		 * indicates a CRC error.
3104		 *
3105		 * No need to swap the bytes for checking
3106		 * against zero.
3107		 */
3108		if (scsi_io_reply->TransferCount == 0) {
3109			mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
3110			break;
3111		}
3112		/* FALLTHROUGH */
3113	case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
3114	case MPI_IOCSTATUS_SUCCESS:
3115	case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
3116		if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) {
3117			/*
3118			 * Status was never returned for this transaction.
3119			 */
3120			mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE);
3121		} else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) {
3122			ccb->csio.scsi_status = scsi_io_reply->SCSIStatus;
3123			mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR);
3124			if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0)
3125				mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL);
3126		} else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) {
3127
3128			/* XXX Handle SPI-Packet and FCP-2 response info. */
3129			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3130		} else
3131			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3132		break;
3133	case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
3134		mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR);
3135		break;
3136	case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR:
3137		mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
3138		break;
3139	case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
3140		/*
3141		 * Since selection timeouts and "device really not
3142		 * there" are grouped into this error code, report
3143		 * selection timeout.  Selection timeouts are
3144		 * typically retried before giving up on the device
3145		 * whereas "device not there" errors are considered
3146		 * unretryable.
3147		 */
3148		mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
3149		break;
3150	case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
3151		mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL);
3152		break;
3153	case MPI_IOCSTATUS_SCSI_INVALID_BUS:
3154		mpt_set_ccb_status(ccb, CAM_PATH_INVALID);
3155		break;
3156	case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
3157		mpt_set_ccb_status(ccb, CAM_TID_INVALID);
3158		break;
3159	case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
3160		ccb->ccb_h.status = CAM_UA_TERMIO;
3161		break;
3162	case MPI_IOCSTATUS_INVALID_STATE:
3163		/*
3164		 * The IOC has been reset.  Emulate a bus reset.
3165		 */
3166		/* FALLTHROUGH */
3167	case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
3168		ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
3169		break;
3170	case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
3171	case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
3172		/*
3173		 * Don't clobber any timeout status that has
3174		 * already been set for this transaction.  We
3175		 * want the SCSI layer to be able to differentiate
3176		 * between the command we aborted due to timeout
3177		 * and any innocent bystanders.
3178		 */
3179		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG)
3180			break;
3181		mpt_set_ccb_status(ccb, CAM_REQ_TERMIO);
3182		break;
3183
3184	case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
3185		mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL);
3186		break;
3187	case MPI_IOCSTATUS_BUSY:
3188		mpt_set_ccb_status(ccb, CAM_BUSY);
3189		break;
3190	case MPI_IOCSTATUS_INVALID_FUNCTION:
3191	case MPI_IOCSTATUS_INVALID_SGL:
3192	case MPI_IOCSTATUS_INTERNAL_ERROR:
3193	case MPI_IOCSTATUS_INVALID_FIELD:
3194	default:
3195		/* XXX
3196		 * Some of the above may need to kick
3197		 * of a recovery action!!!!
3198		 */
3199		ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
3200		break;
3201	}
3202
3203	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
3204		mpt_freeze_ccb(ccb);
3205	}
3206
3207	return (TRUE);
3208}
3209
3210static void
3211mpt_action(struct cam_sim *sim, union ccb *ccb)
3212{
3213	struct mpt_softc *mpt;
3214	struct ccb_trans_settings *cts;
3215	target_id_t tgt;
3216	lun_id_t lun;
3217	int raid_passthru;
3218
3219	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n"));
3220
3221	mpt = (struct mpt_softc *)cam_sim_softc(sim);
3222	raid_passthru = (sim == mpt->phydisk_sim);
3223	MPT_LOCK_ASSERT(mpt);
3224
3225	tgt = ccb->ccb_h.target_id;
3226	lun = ccb->ccb_h.target_lun;
3227	if (raid_passthru &&
3228	    ccb->ccb_h.func_code != XPT_PATH_INQ &&
3229	    ccb->ccb_h.func_code != XPT_RESET_BUS &&
3230	    ccb->ccb_h.func_code != XPT_RESET_DEV) {
3231		if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
3232			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3233			mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
3234			xpt_done(ccb);
3235			return;
3236		}
3237	}
3238	ccb->ccb_h.ccb_mpt_ptr = mpt;
3239
3240	switch (ccb->ccb_h.func_code) {
3241	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
3242		/*
3243		 * Do a couple of preliminary checks...
3244		 */
3245		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
3246			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
3247				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3248				mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3249				break;
3250			}
3251		}
3252		/* Max supported CDB length is 16 bytes */
3253		/* XXX Unless we implement the new 32byte message type */
3254		if (ccb->csio.cdb_len >
3255		    sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) {
3256			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3257			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3258			break;
3259		}
3260#ifdef	MPT_TEST_MULTIPATH
3261		if (mpt->failure_id == ccb->ccb_h.target_id) {
3262			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3263			mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
3264			break;
3265		}
3266#endif
3267		ccb->csio.scsi_status = SCSI_STATUS_OK;
3268		mpt_start(sim, ccb);
3269		return;
3270
3271	case XPT_RESET_BUS:
3272		if (raid_passthru) {
3273			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3274			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3275			break;
3276		}
3277	case XPT_RESET_DEV:
3278		if (ccb->ccb_h.func_code == XPT_RESET_BUS) {
3279			if (bootverbose) {
3280				xpt_print(ccb->ccb_h.path, "reset bus\n");
3281			}
3282		} else {
3283			xpt_print(ccb->ccb_h.path, "reset device\n");
3284		}
3285		(void) mpt_bus_reset(mpt, tgt, lun, FALSE);
3286
3287		/*
3288		 * mpt_bus_reset is always successful in that it
3289		 * will fall back to a hard reset should a bus
3290		 * reset attempt fail.
3291		 */
3292		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3293		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3294		break;
3295
3296	case XPT_ABORT:
3297	{
3298		union ccb *accb = ccb->cab.abort_ccb;
3299		switch (accb->ccb_h.func_code) {
3300		case XPT_ACCEPT_TARGET_IO:
3301		case XPT_IMMEDIATE_NOTIFY:
3302			ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb);
3303			break;
3304		case XPT_CONT_TARGET_IO:
3305			mpt_prt(mpt, "cannot abort active CTIOs yet\n");
3306			ccb->ccb_h.status = CAM_UA_ABORT;
3307			break;
3308		case XPT_SCSI_IO:
3309			ccb->ccb_h.status = CAM_UA_ABORT;
3310			break;
3311		default:
3312			ccb->ccb_h.status = CAM_REQ_INVALID;
3313			break;
3314		}
3315		break;
3316	}
3317
3318#define	IS_CURRENT_SETTINGS(c)	((c)->type == CTS_TYPE_CURRENT_SETTINGS)
3319
3320#define	DP_DISC_ENABLE	0x1
3321#define	DP_DISC_DISABL	0x2
3322#define	DP_DISC		(DP_DISC_ENABLE|DP_DISC_DISABL)
3323
3324#define	DP_TQING_ENABLE	0x4
3325#define	DP_TQING_DISABL	0x8
3326#define	DP_TQING	(DP_TQING_ENABLE|DP_TQING_DISABL)
3327
3328#define	DP_WIDE		0x10
3329#define	DP_NARROW	0x20
3330#define	DP_WIDTH	(DP_WIDE|DP_NARROW)
3331
3332#define	DP_SYNC		0x40
3333
3334	case XPT_SET_TRAN_SETTINGS:	/* Nexus Settings */
3335	{
3336		struct ccb_trans_settings_scsi *scsi;
3337		struct ccb_trans_settings_spi *spi;
3338		uint8_t dval;
3339		u_int period;
3340		u_int offset;
3341		int i, j;
3342
3343		cts = &ccb->cts;
3344
3345		if (mpt->is_fc || mpt->is_sas) {
3346			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3347			break;
3348		}
3349
3350		scsi = &cts->proto_specific.scsi;
3351		spi = &cts->xport_specific.spi;
3352
3353		/*
3354		 * We can be called just to valid transport and proto versions
3355		 */
3356		if (scsi->valid == 0 && spi->valid == 0) {
3357			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3358			break;
3359		}
3360
3361		/*
3362		 * Skip attempting settings on RAID volume disks.
3363		 * Other devices on the bus get the normal treatment.
3364		 */
3365		if (mpt->phydisk_sim && raid_passthru == 0 &&
3366		    mpt_is_raid_volume(mpt, tgt) != 0) {
3367			mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3368			    "no transfer settings for RAID vols\n");
3369			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3370			break;
3371		}
3372
3373		i = mpt->mpt_port_page2.PortSettings &
3374		    MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
3375		j = mpt->mpt_port_page2.PortFlags &
3376		    MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK;
3377		if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS &&
3378		    j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) {
3379			mpt_lprt(mpt, MPT_PRT_ALWAYS,
3380			    "honoring BIOS transfer negotiations\n");
3381			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3382			break;
3383		}
3384
3385		dval = 0;
3386		period = 0;
3387		offset = 0;
3388
3389		if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
3390			dval |= ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ?
3391			    DP_DISC_ENABLE : DP_DISC_DISABL;
3392		}
3393
3394		if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
3395			dval |= ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ?
3396			    DP_TQING_ENABLE : DP_TQING_DISABL;
3397		}
3398
3399		if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
3400			dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ?
3401			    DP_WIDE : DP_NARROW;
3402		}
3403
3404		if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) {
3405			dval |= DP_SYNC;
3406			offset = spi->sync_offset;
3407		} else {
3408			PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
3409			    &mpt->mpt_dev_page1[tgt];
3410			offset = ptr->RequestedParameters;
3411			offset &= MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3412	    		offset >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
3413		}
3414		if (spi->valid & CTS_SPI_VALID_SYNC_RATE) {
3415			dval |= DP_SYNC;
3416			period = spi->sync_period;
3417		} else {
3418			PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
3419			    &mpt->mpt_dev_page1[tgt];
3420			period = ptr->RequestedParameters;
3421			period &= MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3422	    		period >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
3423		}
3424
3425		if (dval & DP_DISC_ENABLE) {
3426			mpt->mpt_disc_enable |= (1 << tgt);
3427		} else if (dval & DP_DISC_DISABL) {
3428			mpt->mpt_disc_enable &= ~(1 << tgt);
3429		}
3430		if (dval & DP_TQING_ENABLE) {
3431			mpt->mpt_tag_enable |= (1 << tgt);
3432		} else if (dval & DP_TQING_DISABL) {
3433			mpt->mpt_tag_enable &= ~(1 << tgt);
3434		}
3435		if (dval & DP_WIDTH) {
3436			mpt_setwidth(mpt, tgt, 1);
3437		}
3438		if (dval & DP_SYNC) {
3439			mpt_setsync(mpt, tgt, period, offset);
3440		}
3441		if (dval == 0) {
3442			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3443			break;
3444		}
3445		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3446		    "set [%d]: 0x%x period 0x%x offset %d\n",
3447		    tgt, dval, period, offset);
3448		if (mpt_update_spi_config(mpt, tgt)) {
3449			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3450		} else {
3451			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3452		}
3453		break;
3454	}
3455	case XPT_GET_TRAN_SETTINGS:
3456	{
3457		struct ccb_trans_settings_scsi *scsi;
3458		cts = &ccb->cts;
3459		cts->protocol = PROTO_SCSI;
3460		if (mpt->is_fc) {
3461			struct ccb_trans_settings_fc *fc =
3462			    &cts->xport_specific.fc;
3463			cts->protocol_version = SCSI_REV_SPC;
3464			cts->transport = XPORT_FC;
3465			cts->transport_version = 0;
3466			if (mpt->mpt_fcport_speed != 0) {
3467				fc->valid = CTS_FC_VALID_SPEED;
3468				fc->bitrate = 100000 * mpt->mpt_fcport_speed;
3469			}
3470		} else if (mpt->is_sas) {
3471			struct ccb_trans_settings_sas *sas =
3472			    &cts->xport_specific.sas;
3473			cts->protocol_version = SCSI_REV_SPC2;
3474			cts->transport = XPORT_SAS;
3475			cts->transport_version = 0;
3476			sas->valid = CTS_SAS_VALID_SPEED;
3477			sas->bitrate = 300000;
3478		} else {
3479			cts->protocol_version = SCSI_REV_2;
3480			cts->transport = XPORT_SPI;
3481			cts->transport_version = 2;
3482			if (mpt_get_spi_settings(mpt, cts) != 0) {
3483				mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3484				break;
3485			}
3486		}
3487		scsi = &cts->proto_specific.scsi;
3488		scsi->valid = CTS_SCSI_VALID_TQ;
3489		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
3490		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3491		break;
3492	}
3493	case XPT_CALC_GEOMETRY:
3494	{
3495		struct ccb_calc_geometry *ccg;
3496
3497		ccg = &ccb->ccg;
3498		if (ccg->block_size == 0) {
3499			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3500			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3501			break;
3502		}
3503		cam_calc_geometry(ccg, /* extended */ 1);
3504		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
3505		break;
3506	}
3507	case XPT_GET_SIM_KNOB:
3508	{
3509		struct ccb_sim_knob *kp = &ccb->knob;
3510
3511		if (mpt->is_fc) {
3512			kp->xport_specific.fc.wwnn = mpt->scinfo.fc.wwnn;
3513			kp->xport_specific.fc.wwpn = mpt->scinfo.fc.wwpn;
3514			switch (mpt->role) {
3515			case MPT_ROLE_NONE:
3516				kp->xport_specific.fc.role = KNOB_ROLE_NONE;
3517				break;
3518			case MPT_ROLE_INITIATOR:
3519				kp->xport_specific.fc.role = KNOB_ROLE_INITIATOR;
3520				break;
3521			case MPT_ROLE_TARGET:
3522				kp->xport_specific.fc.role = KNOB_ROLE_TARGET;
3523				break;
3524			case MPT_ROLE_BOTH:
3525				kp->xport_specific.fc.role = KNOB_ROLE_BOTH;
3526				break;
3527			}
3528			kp->xport_specific.fc.valid =
3529			    KNOB_VALID_ADDRESS | KNOB_VALID_ROLE;
3530			ccb->ccb_h.status = CAM_REQ_CMP;
3531		} else {
3532			ccb->ccb_h.status = CAM_REQ_INVALID;
3533		}
3534		xpt_done(ccb);
3535		break;
3536	}
3537	case XPT_PATH_INQ:		/* Path routing inquiry */
3538	{
3539		struct ccb_pathinq *cpi = &ccb->cpi;
3540
3541		cpi->version_num = 1;
3542		cpi->target_sprt = 0;
3543		cpi->hba_eng_cnt = 0;
3544		cpi->max_target = mpt->port_facts[0].MaxDevices - 1;
3545		cpi->maxio = (mpt->max_cam_seg_cnt - 1) * PAGE_SIZE;
3546		/*
3547		 * FC cards report MAX_DEVICES of 512, but
3548		 * the MSG_SCSI_IO_REQUEST target id field
3549		 * is only 8 bits. Until we fix the driver
3550		 * to support 'channels' for bus overflow,
3551		 * just limit it.
3552		 */
3553		if (cpi->max_target > 255) {
3554			cpi->max_target = 255;
3555		}
3556
3557		/*
3558		 * VMware ESX reports > 16 devices and then dies when we probe.
3559		 */
3560		if (mpt->is_spi && cpi->max_target > 15) {
3561			cpi->max_target = 15;
3562		}
3563		if (mpt->is_spi)
3564			cpi->max_lun = 7;
3565		else
3566			cpi->max_lun = MPT_MAX_LUNS;
3567		cpi->initiator_id = mpt->mpt_ini_id;
3568		cpi->bus_id = cam_sim_bus(sim);
3569
3570		/*
3571		 * The base speed is the speed of the underlying connection.
3572		 */
3573		cpi->protocol = PROTO_SCSI;
3574		if (mpt->is_fc) {
3575			cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED |
3576			    PIM_EXTLUNS;
3577			cpi->base_transfer_speed = 100000;
3578			cpi->hba_inquiry = PI_TAG_ABLE;
3579			cpi->transport = XPORT_FC;
3580			cpi->transport_version = 0;
3581			cpi->protocol_version = SCSI_REV_SPC;
3582			cpi->xport_specific.fc.wwnn = mpt->scinfo.fc.wwnn;
3583			cpi->xport_specific.fc.wwpn = mpt->scinfo.fc.wwpn;
3584			cpi->xport_specific.fc.port = mpt->scinfo.fc.portid;
3585			cpi->xport_specific.fc.bitrate =
3586			    100000 * mpt->mpt_fcport_speed;
3587		} else if (mpt->is_sas) {
3588			cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED |
3589			    PIM_EXTLUNS;
3590			cpi->base_transfer_speed = 300000;
3591			cpi->hba_inquiry = PI_TAG_ABLE;
3592			cpi->transport = XPORT_SAS;
3593			cpi->transport_version = 0;
3594			cpi->protocol_version = SCSI_REV_SPC2;
3595		} else {
3596			cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED |
3597			    PIM_EXTLUNS;
3598			cpi->base_transfer_speed = 3300;
3599			cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3600			cpi->transport = XPORT_SPI;
3601			cpi->transport_version = 2;
3602			cpi->protocol_version = SCSI_REV_2;
3603		}
3604
3605		/*
3606		 * We give our fake RAID passhtru bus a width that is MaxVolumes
3607		 * wide and restrict it to one lun.
3608		 */
3609		if (raid_passthru) {
3610			cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1;
3611			cpi->initiator_id = cpi->max_target + 1;
3612			cpi->max_lun = 0;
3613		}
3614
3615		if ((mpt->role & MPT_ROLE_INITIATOR) == 0) {
3616			cpi->hba_misc |= PIM_NOINITIATOR;
3617		}
3618		if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
3619			cpi->target_sprt =
3620			    PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
3621		} else {
3622			cpi->target_sprt = 0;
3623		}
3624		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
3625		strlcpy(cpi->hba_vid, "LSI", HBA_IDLEN);
3626		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
3627		cpi->unit_number = cam_sim_unit(sim);
3628		cpi->ccb_h.status = CAM_REQ_CMP;
3629		break;
3630	}
3631	case XPT_EN_LUN:		/* Enable LUN as a target */
3632	{
3633		int result;
3634
3635		if (ccb->cel.enable)
3636			result = mpt_enable_lun(mpt,
3637			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3638		else
3639			result = mpt_disable_lun(mpt,
3640			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3641		if (result == 0) {
3642			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3643		} else {
3644			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3645		}
3646		break;
3647	}
3648	case XPT_IMMEDIATE_NOTIFY:	/* Add Immediate Notify Resource */
3649	case XPT_ACCEPT_TARGET_IO:	/* Add Accept Target IO Resource */
3650	{
3651		tgt_resource_t *trtp;
3652		lun_id_t lun = ccb->ccb_h.target_lun;
3653		ccb->ccb_h.sim_priv.entries[0].field = 0;
3654		ccb->ccb_h.sim_priv.entries[1].ptr = mpt;
3655
3656		if (lun == CAM_LUN_WILDCARD) {
3657			if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
3658				mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3659				break;
3660			}
3661			trtp = &mpt->trt_wildcard;
3662		} else if (lun >= MPT_MAX_LUNS) {
3663			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3664			break;
3665		} else {
3666			trtp = &mpt->trt[lun];
3667		}
3668		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
3669			mpt_lprt(mpt, MPT_PRT_DEBUG1,
3670			    "Put FREE ATIO %p lun %jx\n", ccb, (uintmax_t)lun);
3671			STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h,
3672			    sim_links.stqe);
3673		} else {
3674			mpt_lprt(mpt, MPT_PRT_DEBUG1,
3675			    "Put FREE INOT lun %jx\n", (uintmax_t)lun);
3676			STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h,
3677			    sim_links.stqe);
3678		}
3679		mpt_set_ccb_status(ccb, CAM_REQ_INPROG);
3680		return;
3681	}
3682	case XPT_NOTIFY_ACKNOWLEDGE:	/* Task management request done. */
3683	{
3684		request_t *req = MPT_TAG_2_REQ(mpt, ccb->cna2.tag_id);
3685
3686		mpt_lprt(mpt, MPT_PRT_DEBUG, "Got Notify ACK\n");
3687		mpt_scsi_tgt_status(mpt, NULL, req, 0, NULL, 0);
3688		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3689		break;
3690	}
3691	case XPT_CONT_TARGET_IO:
3692		mpt_target_start_io(mpt, ccb);
3693		return;
3694
3695	default:
3696		ccb->ccb_h.status = CAM_REQ_INVALID;
3697		break;
3698	}
3699	xpt_done(ccb);
3700}
3701
3702static int
3703mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts)
3704{
3705	struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi;
3706	struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi;
3707	target_id_t tgt;
3708	uint32_t dval, pval, oval;
3709	int rv;
3710
3711	if (IS_CURRENT_SETTINGS(cts) == 0) {
3712		tgt = cts->ccb_h.target_id;
3713	} else if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) {
3714		if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) {
3715			return (-1);
3716		}
3717	} else {
3718		tgt = cts->ccb_h.target_id;
3719	}
3720
3721	/*
3722	 * We aren't looking at Port Page 2 BIOS settings here-
3723	 * sometimes these have been known to be bogus XXX.
3724	 *
3725	 * For user settings, we pick the max from port page 0
3726	 *
3727	 * For current settings we read the current settings out from
3728	 * device page 0 for that target.
3729	 */
3730	if (IS_CURRENT_SETTINGS(cts)) {
3731		CONFIG_PAGE_SCSI_DEVICE_0 tmp;
3732		dval = 0;
3733
3734		tmp = mpt->mpt_dev_page0[tgt];
3735		rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header,
3736		    sizeof(tmp), FALSE, 5000);
3737		if (rv) {
3738			mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt);
3739			return (rv);
3740		}
3741		mpt2host_config_page_scsi_device_0(&tmp);
3742
3743		mpt_lprt(mpt, MPT_PRT_DEBUG,
3744		    "mpt_get_spi_settings[%d]: current NP %x Info %x\n", tgt,
3745		    tmp.NegotiatedParameters, tmp.Information);
3746		dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ?
3747		    DP_WIDE : DP_NARROW;
3748		dval |= (mpt->mpt_disc_enable & (1 << tgt)) ?
3749		    DP_DISC_ENABLE : DP_DISC_DISABL;
3750		dval |= (mpt->mpt_tag_enable & (1 << tgt)) ?
3751		    DP_TQING_ENABLE : DP_TQING_DISABL;
3752		oval = tmp.NegotiatedParameters;
3753		oval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK;
3754		oval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET;
3755		pval = tmp.NegotiatedParameters;
3756		pval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK;
3757		pval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD;
3758		mpt->mpt_dev_page0[tgt] = tmp;
3759	} else {
3760		dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE|DP_SYNC;
3761		oval = mpt->mpt_port_page0.Capabilities;
3762		oval = MPI_SCSIPORTPAGE0_CAP_GET_MAX_SYNC_OFFSET(oval);
3763		pval = mpt->mpt_port_page0.Capabilities;
3764		pval = MPI_SCSIPORTPAGE0_CAP_GET_MIN_SYNC_PERIOD(pval);
3765	}
3766
3767	spi->valid = 0;
3768	scsi->valid = 0;
3769	spi->flags = 0;
3770	scsi->flags = 0;
3771	spi->sync_offset = oval;
3772	spi->sync_period = pval;
3773	spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
3774	spi->valid |= CTS_SPI_VALID_SYNC_RATE;
3775	spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
3776	if (dval & DP_WIDE) {
3777		spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3778	} else {
3779		spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3780	}
3781	if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
3782		scsi->valid = CTS_SCSI_VALID_TQ;
3783		if (dval & DP_TQING_ENABLE) {
3784			scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
3785		}
3786		spi->valid |= CTS_SPI_VALID_DISC;
3787		if (dval & DP_DISC_ENABLE) {
3788			spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
3789		}
3790	}
3791
3792	mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3793	    "mpt_get_spi_settings[%d]: %s flags 0x%x per 0x%x off=%d\n", tgt,
3794	    IS_CURRENT_SETTINGS(cts) ? "ACTIVE" : "NVRAM ", dval, pval, oval);
3795	return (0);
3796}
3797
3798static void
3799mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff)
3800{
3801	PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3802
3803	ptr = &mpt->mpt_dev_page1[tgt];
3804	if (onoff) {
3805		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
3806	} else {
3807		ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
3808	}
3809}
3810
3811static void
3812mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset)
3813{
3814	PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3815
3816	ptr = &mpt->mpt_dev_page1[tgt];
3817	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3818	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3819	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT;
3820	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS;
3821	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU;
3822	if (period == 0) {
3823		return;
3824	}
3825	ptr->RequestedParameters |=
3826	    period << MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
3827	ptr->RequestedParameters |=
3828	    offset << MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
3829	if (period < 0xa) {
3830		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT;
3831	}
3832	if (period < 0x9) {
3833		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS;
3834		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU;
3835	}
3836}
3837
3838static int
3839mpt_update_spi_config(struct mpt_softc *mpt, int tgt)
3840{
3841	CONFIG_PAGE_SCSI_DEVICE_1 tmp;
3842	int rv;
3843
3844	mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3845	    "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n",
3846	    tgt, mpt->mpt_dev_page1[tgt].RequestedParameters);
3847	tmp = mpt->mpt_dev_page1[tgt];
3848	host2mpt_config_page_scsi_device_1(&tmp);
3849	rv = mpt_write_cur_cfg_page(mpt, tgt,
3850	    &tmp.Header, sizeof(tmp), FALSE, 5000);
3851	if (rv) {
3852		mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n");
3853		return (-1);
3854	}
3855	return (0);
3856}
3857
3858/****************************** Timeout Recovery ******************************/
3859static int
3860mpt_spawn_recovery_thread(struct mpt_softc *mpt)
3861{
3862	int error;
3863
3864	error = kproc_create(mpt_recovery_thread, mpt,
3865	    &mpt->recovery_thread, /*flags*/0,
3866	    /*altstack*/0, "mpt_recovery%d", mpt->unit);
3867	return (error);
3868}
3869
3870static void
3871mpt_terminate_recovery_thread(struct mpt_softc *mpt)
3872{
3873
3874	if (mpt->recovery_thread == NULL) {
3875		return;
3876	}
3877	mpt->shutdwn_recovery = 1;
3878	wakeup(mpt);
3879	/*
3880	 * Sleep on a slightly different location
3881	 * for this interlock just for added safety.
3882	 */
3883	mpt_sleep(mpt, &mpt->recovery_thread, PUSER, "thtrm", 0);
3884}
3885
3886static void
3887mpt_recovery_thread(void *arg)
3888{
3889	struct mpt_softc *mpt;
3890
3891	mpt = (struct mpt_softc *)arg;
3892	MPT_LOCK(mpt);
3893	for (;;) {
3894		if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3895			if (mpt->shutdwn_recovery == 0) {
3896				mpt_sleep(mpt, mpt, PUSER, "idle", 0);
3897			}
3898		}
3899		if (mpt->shutdwn_recovery != 0) {
3900			break;
3901		}
3902		mpt_recover_commands(mpt);
3903	}
3904	mpt->recovery_thread = NULL;
3905	wakeup(&mpt->recovery_thread);
3906	MPT_UNLOCK(mpt);
3907	kproc_exit(0);
3908}
3909
3910static int
3911mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags,
3912    u_int channel, target_id_t target, lun_id_t lun, u_int abort_ctx,
3913    int sleep_ok)
3914{
3915	MSG_SCSI_TASK_MGMT *tmf_req;
3916	int		    error;
3917
3918	/*
3919	 * Wait for any current TMF request to complete.
3920	 * We're only allowed to issue one TMF at a time.
3921	 */
3922	error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE,
3923	    sleep_ok, MPT_TMF_MAX_TIMEOUT);
3924	if (error != 0) {
3925		mpt_reset(mpt, TRUE);
3926		return (ETIMEDOUT);
3927	}
3928
3929	mpt_assign_serno(mpt, mpt->tmf_req);
3930	mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED;
3931
3932	tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf;
3933	memset(tmf_req, 0, sizeof(*tmf_req));
3934	tmf_req->TargetID = target;
3935	tmf_req->Bus = channel;
3936	tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
3937	tmf_req->TaskType = type;
3938	tmf_req->MsgFlags = flags;
3939	tmf_req->MsgContext =
3940	    htole32(mpt->tmf_req->index | scsi_tmf_handler_id);
3941	be64enc(tmf_req->LUN, CAM_EXTLUN_BYTE_SWIZZLE(lun));
3942	tmf_req->TaskMsgContext = abort_ctx;
3943
3944	mpt_lprt(mpt, MPT_PRT_DEBUG,
3945	    "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req,
3946	    mpt->tmf_req->serno, tmf_req->MsgContext);
3947	if (mpt->verbose > MPT_PRT_DEBUG) {
3948		mpt_print_request(tmf_req);
3949	}
3950
3951	KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0,
3952	    ("mpt_scsi_send_tmf: tmf_req already on pending list"));
3953	TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links);
3954	error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req);
3955	if (error != MPT_OK) {
3956		TAILQ_REMOVE(&mpt->request_pending_list, mpt->tmf_req, links);
3957		mpt->tmf_req->state = REQ_STATE_FREE;
3958		mpt_reset(mpt, TRUE);
3959	}
3960	return (error);
3961}
3962
3963/*
3964 * When a command times out, it is placed on the requeust_timeout_list
3965 * and we wake our recovery thread.  The MPT-Fusion architecture supports
3966 * only a single TMF operation at a time, so we serially abort/bdr, etc,
3967 * the timedout transactions.  The next TMF is issued either by the
3968 * completion handler of the current TMF waking our recovery thread,
3969 * or the TMF timeout handler causing a hard reset sequence.
3970 */
3971static void
3972mpt_recover_commands(struct mpt_softc *mpt)
3973{
3974	request_t	   *req;
3975	union ccb	   *ccb;
3976	int		    error;
3977
3978	if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3979		/*
3980		 * No work to do- leave.
3981		 */
3982		mpt_prt(mpt, "mpt_recover_commands: no requests.\n");
3983		return;
3984	}
3985
3986	/*
3987	 * Flush any commands whose completion coincides with their timeout.
3988	 */
3989	mpt_intr(mpt);
3990
3991	if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3992		/*
3993		 * The timedout commands have already
3994		 * completed.  This typically means
3995		 * that either the timeout value was on
3996		 * the hairy edge of what the device
3997		 * requires or - more likely - interrupts
3998		 * are not happening.
3999		 */
4000		mpt_prt(mpt, "Timedout requests already complete. "
4001		    "Interrupts may not be functioning.\n");
4002		mpt_enable_ints(mpt);
4003		return;
4004	}
4005
4006	/*
4007	 * We have no visibility into the current state of the
4008	 * controller, so attempt to abort the commands in the
4009	 * order they timed-out. For initiator commands, we
4010	 * depend on the reply handler pulling requests off
4011	 * the timeout list.
4012	 */
4013	while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) {
4014		uint16_t status;
4015		uint8_t response;
4016		MSG_REQUEST_HEADER *hdrp = req->req_vbuf;
4017
4018		mpt_prt(mpt, "attempting to abort req %p:%u function %x\n",
4019		    req, req->serno, hdrp->Function);
4020		ccb = req->ccb;
4021		if (ccb == NULL) {
4022			mpt_prt(mpt, "null ccb in timed out request. "
4023			    "Resetting Controller.\n");
4024			mpt_reset(mpt, TRUE);
4025			continue;
4026		}
4027		mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT);
4028
4029		/*
4030		 * Check to see if this is not an initiator command and
4031		 * deal with it differently if it is.
4032		 */
4033		switch (hdrp->Function) {
4034		case MPI_FUNCTION_SCSI_IO_REQUEST:
4035		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
4036			break;
4037		default:
4038			/*
4039			 * XXX: FIX ME: need to abort target assists...
4040			 */
4041			mpt_prt(mpt, "just putting it back on the pend q\n");
4042			TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
4043			TAILQ_INSERT_HEAD(&mpt->request_pending_list, req,
4044			    links);
4045			continue;
4046		}
4047
4048		error = mpt_scsi_send_tmf(mpt,
4049		    MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
4050		    0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
4051		    htole32(req->index | scsi_io_handler_id), TRUE);
4052
4053		if (error != 0) {
4054			/*
4055			 * mpt_scsi_send_tmf hard resets on failure, so no
4056			 * need to do so here.  Our queue should be emptied
4057			 * by the hard reset.
4058			 */
4059			continue;
4060		}
4061
4062		error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
4063		    REQ_STATE_DONE, TRUE, 500);
4064
4065		status = le16toh(mpt->tmf_req->IOCStatus);
4066		response = mpt->tmf_req->ResponseCode;
4067		mpt->tmf_req->state = REQ_STATE_FREE;
4068
4069		if (error != 0) {
4070			/*
4071			 * If we've errored out,, reset the controller.
4072			 */
4073			mpt_prt(mpt, "mpt_recover_commands: abort timed-out. "
4074			    "Resetting controller\n");
4075			mpt_reset(mpt, TRUE);
4076			continue;
4077		}
4078
4079		if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
4080			mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. "
4081			    "Resetting controller.\n", status);
4082			mpt_reset(mpt, TRUE);
4083			continue;
4084		}
4085
4086		if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
4087		    response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
4088			mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. "
4089			    "Resetting controller.\n", response);
4090			mpt_reset(mpt, TRUE);
4091			continue;
4092		}
4093		mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno);
4094	}
4095}
4096
4097/************************ Target Mode Support ****************************/
4098static void
4099mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex)
4100{
4101	MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc;
4102	PTR_SGE_TRANSACTION32 tep;
4103	PTR_SGE_SIMPLE32 se;
4104	bus_addr_t paddr;
4105	uint32_t fl;
4106
4107	paddr = req->req_pbuf;
4108	paddr += MPT_RQSL(mpt);
4109
4110	fc = req->req_vbuf;
4111	memset(fc, 0, MPT_REQUEST_AREA);
4112	fc->BufferCount = 1;
4113	fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST;
4114	fc->MsgContext = htole32(req->index | fc_els_handler_id);
4115
4116	/*
4117	 * Okay, set up ELS buffer pointers. ELS buffer pointers
4118	 * consist of a TE SGL element (with details length of zero)
4119	 * followed by a SIMPLE SGL element which holds the address
4120	 * of the buffer.
4121	 */
4122
4123	tep = (PTR_SGE_TRANSACTION32) &fc->SGL;
4124
4125	tep->ContextSize = 4;
4126	tep->Flags = 0;
4127	tep->TransactionContext[0] = htole32(ioindex);
4128
4129	se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0];
4130	fl =
4131		MPI_SGE_FLAGS_HOST_TO_IOC	|
4132		MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
4133		MPI_SGE_FLAGS_LAST_ELEMENT	|
4134		MPI_SGE_FLAGS_END_OF_LIST	|
4135		MPI_SGE_FLAGS_END_OF_BUFFER;
4136	fl <<= MPI_SGE_FLAGS_SHIFT;
4137	fl |= (MPT_NRFM(mpt) - MPT_RQSL(mpt));
4138	se->FlagsLength = htole32(fl);
4139	se->Address = htole32((uint32_t) paddr);
4140	mpt_lprt(mpt, MPT_PRT_DEBUG,
4141	    "add ELS index %d ioindex %d for %p:%u\n",
4142	    req->index, ioindex, req, req->serno);
4143	KASSERT(((req->state & REQ_STATE_LOCKED) != 0),
4144	    ("mpt_fc_post_els: request not locked"));
4145	mpt_send_cmd(mpt, req);
4146}
4147
4148static void
4149mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex)
4150{
4151	PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc;
4152	PTR_CMD_BUFFER_DESCRIPTOR cb;
4153	bus_addr_t paddr;
4154
4155	paddr = req->req_pbuf;
4156	paddr += MPT_RQSL(mpt);
4157	memset(req->req_vbuf, 0, MPT_REQUEST_AREA);
4158	MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING;
4159
4160	fc = req->req_vbuf;
4161	fc->BufferCount = 1;
4162	fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST;
4163	fc->BufferLength = MIN(MPT_REQUEST_AREA - MPT_RQSL(mpt), UINT8_MAX);
4164	fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4165
4166	cb = &fc->Buffer[0];
4167	cb->IoIndex = htole16(ioindex);
4168	cb->u.PhysicalAddress32 = htole32((U32) paddr);
4169
4170	mpt_check_doorbell(mpt);
4171	mpt_send_cmd(mpt, req);
4172}
4173
4174static int
4175mpt_add_els_buffers(struct mpt_softc *mpt)
4176{
4177	int i;
4178
4179	if (mpt->is_fc == 0) {
4180		return (TRUE);
4181	}
4182
4183	if (mpt->els_cmds_allocated) {
4184		return (TRUE);
4185	}
4186
4187	mpt->els_cmd_ptrs = malloc(MPT_MAX_ELS * sizeof (request_t *),
4188	    M_DEVBUF, M_NOWAIT | M_ZERO);
4189
4190	if (mpt->els_cmd_ptrs == NULL) {
4191		return (FALSE);
4192	}
4193
4194	/*
4195	 * Feed the chip some ELS buffer resources
4196	 */
4197	for (i = 0; i < MPT_MAX_ELS; i++) {
4198		request_t *req = mpt_get_request(mpt, FALSE);
4199		if (req == NULL) {
4200			break;
4201		}
4202		req->state |= REQ_STATE_LOCKED;
4203		mpt->els_cmd_ptrs[i] = req;
4204		mpt_fc_post_els(mpt, req, i);
4205	}
4206
4207	if (i == 0) {
4208		mpt_prt(mpt, "unable to add ELS buffer resources\n");
4209		free(mpt->els_cmd_ptrs, M_DEVBUF);
4210		mpt->els_cmd_ptrs = NULL;
4211		return (FALSE);
4212	}
4213	if (i != MPT_MAX_ELS) {
4214		mpt_lprt(mpt, MPT_PRT_INFO,
4215		    "only added %d of %d  ELS buffers\n", i, MPT_MAX_ELS);
4216	}
4217	mpt->els_cmds_allocated = i;
4218	return(TRUE);
4219}
4220
4221static int
4222mpt_add_target_commands(struct mpt_softc *mpt)
4223{
4224	int i, max;
4225
4226	if (mpt->tgt_cmd_ptrs) {
4227		return (TRUE);
4228	}
4229
4230	max = MPT_MAX_REQUESTS(mpt) >> 1;
4231	if (max > mpt->mpt_max_tgtcmds) {
4232		max = mpt->mpt_max_tgtcmds;
4233	}
4234	mpt->tgt_cmd_ptrs =
4235	    malloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO);
4236	if (mpt->tgt_cmd_ptrs == NULL) {
4237		mpt_prt(mpt,
4238		    "mpt_add_target_commands: could not allocate cmd ptrs\n");
4239		return (FALSE);
4240	}
4241
4242	for (i = 0; i < max; i++) {
4243		request_t *req;
4244
4245		req = mpt_get_request(mpt, FALSE);
4246		if (req == NULL) {
4247			break;
4248		}
4249		req->state |= REQ_STATE_LOCKED;
4250		mpt->tgt_cmd_ptrs[i] = req;
4251		mpt_post_target_command(mpt, req, i);
4252	}
4253
4254
4255	if (i == 0) {
4256		mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n");
4257		free(mpt->tgt_cmd_ptrs, M_DEVBUF);
4258		mpt->tgt_cmd_ptrs = NULL;
4259		return (FALSE);
4260	}
4261
4262	mpt->tgt_cmds_allocated = i;
4263
4264	if (i < max) {
4265		mpt_lprt(mpt, MPT_PRT_INFO,
4266		    "added %d of %d target bufs\n", i, max);
4267	}
4268	return (i);
4269}
4270
4271static int
4272mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
4273{
4274
4275	if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
4276		mpt->twildcard = 1;
4277	} else if (lun >= MPT_MAX_LUNS) {
4278		return (EINVAL);
4279	} else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
4280		return (EINVAL);
4281	}
4282	if (mpt->tenabled == 0) {
4283		if (mpt->is_fc) {
4284			(void) mpt_fc_reset_link(mpt, 0);
4285		}
4286		mpt->tenabled = 1;
4287	}
4288	if (lun == CAM_LUN_WILDCARD) {
4289		mpt->trt_wildcard.enabled = 1;
4290	} else {
4291		mpt->trt[lun].enabled = 1;
4292	}
4293	return (0);
4294}
4295
4296static int
4297mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
4298{
4299	int i;
4300
4301	if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
4302		mpt->twildcard = 0;
4303	} else if (lun >= MPT_MAX_LUNS) {
4304		return (EINVAL);
4305	} else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
4306		return (EINVAL);
4307	}
4308	if (lun == CAM_LUN_WILDCARD) {
4309		mpt->trt_wildcard.enabled = 0;
4310	} else {
4311		mpt->trt[lun].enabled = 0;
4312	}
4313	for (i = 0; i < MPT_MAX_LUNS; i++) {
4314		if (mpt->trt[i].enabled) {
4315			break;
4316		}
4317	}
4318	if (i == MPT_MAX_LUNS && mpt->twildcard == 0) {
4319		if (mpt->is_fc) {
4320			(void) mpt_fc_reset_link(mpt, 0);
4321		}
4322		mpt->tenabled = 0;
4323	}
4324	return (0);
4325}
4326
4327/*
4328 * Called with MPT lock held
4329 */
4330static void
4331mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb)
4332{
4333	struct ccb_scsiio *csio = &ccb->csio;
4334	request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id);
4335	mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
4336
4337	switch (tgt->state) {
4338	case TGT_STATE_IN_CAM:
4339		break;
4340	case TGT_STATE_MOVING_DATA:
4341		mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4342		xpt_freeze_simq(mpt->sim, 1);
4343		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4344		tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4345		xpt_done(ccb);
4346		return;
4347	default:
4348		mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request "
4349		    "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id);
4350		mpt_tgt_dump_req_state(mpt, cmd_req);
4351		mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
4352		xpt_done(ccb);
4353		return;
4354	}
4355
4356	if (csio->dxfer_len) {
4357		bus_dmamap_callback_t *cb;
4358		PTR_MSG_TARGET_ASSIST_REQUEST ta;
4359		request_t *req;
4360		int error;
4361
4362		KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE,
4363		    ("dxfer_len %u but direction is NONE", csio->dxfer_len));
4364
4365		if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4366			if (mpt->outofbeer == 0) {
4367				mpt->outofbeer = 1;
4368				xpt_freeze_simq(mpt->sim, 1);
4369				mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4370			}
4371			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4372			mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4373			xpt_done(ccb);
4374			return;
4375		}
4376		ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4377		if (sizeof (bus_addr_t) > 4) {
4378			cb = mpt_execute_req_a64;
4379		} else {
4380			cb = mpt_execute_req;
4381		}
4382
4383		req->ccb = ccb;
4384		ccb->ccb_h.ccb_req_ptr = req;
4385
4386		/*
4387		 * Record the currently active ccb and the
4388		 * request for it in our target state area.
4389		 */
4390		tgt->ccb = ccb;
4391		tgt->req = req;
4392
4393		memset(req->req_vbuf, 0, MPT_RQSL(mpt));
4394		ta = req->req_vbuf;
4395
4396		if (mpt->is_sas) {
4397			PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4398			     cmd_req->req_vbuf;
4399			ta->QueueTag = ssp->InitiatorTag;
4400		} else if (mpt->is_spi) {
4401			PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4402			     cmd_req->req_vbuf;
4403			ta->QueueTag = sp->Tag;
4404		}
4405		ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4406		ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4407		ta->ReplyWord = htole32(tgt->reply_desc);
4408		be64enc(ta->LUN, CAM_EXTLUN_BYTE_SWIZZLE(csio->ccb_h.target_lun));
4409
4410		ta->RelativeOffset = tgt->bytes_xfered;
4411		ta->DataLength = ccb->csio.dxfer_len;
4412		if (ta->DataLength > tgt->resid) {
4413			ta->DataLength = tgt->resid;
4414		}
4415
4416		/*
4417		 * XXX Should be done after data transfer completes?
4418		 */
4419		csio->resid = csio->dxfer_len - ta->DataLength;
4420		tgt->resid -= csio->dxfer_len;
4421		tgt->bytes_xfered += csio->dxfer_len;
4422
4423		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
4424			ta->TargetAssistFlags |=
4425			    TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4426		}
4427
4428#ifdef	WE_TRUST_AUTO_GOOD_STATUS
4429		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
4430		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
4431			ta->TargetAssistFlags |=
4432			    TARGET_ASSIST_FLAGS_AUTO_STATUS;
4433		}
4434#endif
4435		tgt->state = TGT_STATE_SETTING_UP_FOR_DATA;
4436
4437		mpt_lprt(mpt, MPT_PRT_DEBUG,
4438		    "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u "
4439		    "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len,
4440		    tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state);
4441
4442		error = bus_dmamap_load_ccb(mpt->buffer_dmat, req->dmap, ccb,
4443		    cb, req, 0);
4444		if (error == EINPROGRESS) {
4445			xpt_freeze_simq(mpt->sim, 1);
4446			ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4447		}
4448	} else {
4449		/*
4450		 * XXX: I don't know why this seems to happen, but
4451		 * XXX: completing the CCB seems to make things happy.
4452		 * XXX: This seems to happen if the initiator requests
4453		 * XXX: enough data that we have to do multiple CTIOs.
4454		 */
4455		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
4456			mpt_lprt(mpt, MPT_PRT_DEBUG,
4457			    "Meaningless STATUS CCB (%p): flags %x status %x "
4458			    "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags,
4459			    ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered);
4460			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
4461			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4462			xpt_done(ccb);
4463			return;
4464		}
4465		mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status,
4466		    (void *)&csio->sense_data,
4467		    (ccb->ccb_h.flags & CAM_SEND_SENSE) ?
4468		     csio->sense_len : 0);
4469	}
4470}
4471
4472static void
4473mpt_scsi_tgt_local(struct mpt_softc *mpt, request_t *cmd_req,
4474    lun_id_t lun, int send, uint8_t *data, size_t length)
4475{
4476	mpt_tgt_state_t *tgt;
4477	PTR_MSG_TARGET_ASSIST_REQUEST ta;
4478	SGE_SIMPLE32 *se;
4479	uint32_t flags;
4480	uint8_t *dptr;
4481	bus_addr_t pptr;
4482	request_t *req;
4483
4484	/*
4485	 * We enter with resid set to the data load for the command.
4486	 */
4487	tgt = MPT_TGT_STATE(mpt, cmd_req);
4488	if (length == 0 || tgt->resid == 0) {
4489		tgt->resid = 0;
4490		mpt_scsi_tgt_status(mpt, NULL, cmd_req, 0, NULL, 0);
4491		return;
4492	}
4493
4494	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4495		mpt_prt(mpt, "out of resources- dropping local response\n");
4496		return;
4497	}
4498	tgt->is_local = 1;
4499
4500
4501	memset(req->req_vbuf, 0, MPT_RQSL(mpt));
4502	ta = req->req_vbuf;
4503
4504	if (mpt->is_sas) {
4505		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf;
4506		ta->QueueTag = ssp->InitiatorTag;
4507	} else if (mpt->is_spi) {
4508		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf;
4509		ta->QueueTag = sp->Tag;
4510	}
4511	ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4512	ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4513	ta->ReplyWord = htole32(tgt->reply_desc);
4514	be64enc(ta->LUN, CAM_EXTLUN_BYTE_SWIZZLE(lun));
4515	ta->RelativeOffset = 0;
4516	ta->DataLength = length;
4517
4518	dptr = req->req_vbuf;
4519	dptr += MPT_RQSL(mpt);
4520	pptr = req->req_pbuf;
4521	pptr += MPT_RQSL(mpt);
4522	memcpy(dptr, data, min(length, MPT_RQSL(mpt)));
4523
4524	se = (SGE_SIMPLE32 *) &ta->SGL[0];
4525	memset(se, 0,sizeof (*se));
4526
4527	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
4528	if (send) {
4529		ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4530		flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
4531	}
4532	se->Address = pptr;
4533	MPI_pSGE_SET_LENGTH(se, length);
4534	flags |= MPI_SGE_FLAGS_LAST_ELEMENT;
4535	flags |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER;
4536	MPI_pSGE_SET_FLAGS(se, flags);
4537
4538	tgt->ccb = NULL;
4539	tgt->req = req;
4540	tgt->resid -= length;
4541	tgt->bytes_xfered = length;
4542#ifdef	WE_TRUST_AUTO_GOOD_STATUS
4543	tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
4544#else
4545	tgt->state = TGT_STATE_MOVING_DATA;
4546#endif
4547	mpt_send_cmd(mpt, req);
4548}
4549
4550/*
4551 * Abort queued up CCBs
4552 */
4553static cam_status
4554mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb)
4555{
4556	struct mpt_hdr_stailq *lp;
4557	struct ccb_hdr *srch;
4558	union ccb *accb = ccb->cab.abort_ccb;
4559	tgt_resource_t *trtp;
4560	mpt_tgt_state_t *tgt;
4561	request_t *req;
4562	uint32_t tag;
4563
4564	mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb);
4565	if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD)
4566		trtp = &mpt->trt_wildcard;
4567	else
4568		trtp = &mpt->trt[ccb->ccb_h.target_lun];
4569	if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
4570		lp = &trtp->atios;
4571		tag = accb->atio.tag_id;
4572	} else {
4573		lp = &trtp->inots;
4574		tag = accb->cin1.tag_id;
4575	}
4576
4577	/* Search the CCB among queued. */
4578	STAILQ_FOREACH(srch, lp, sim_links.stqe) {
4579		if (srch != &accb->ccb_h)
4580			continue;
4581		STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe);
4582		accb->ccb_h.status = CAM_REQ_ABORTED;
4583		xpt_done(accb);
4584		return (CAM_REQ_CMP);
4585	}
4586
4587	/* Search the CCB among running. */
4588	req = MPT_TAG_2_REQ(mpt, tag);
4589	tgt = MPT_TGT_STATE(mpt, req);
4590	if (tgt->tag_id == tag) {
4591		mpt_abort_target_cmd(mpt, req);
4592		return (CAM_REQ_CMP);
4593	}
4594
4595	return (CAM_UA_ABORT);
4596}
4597
4598/*
4599 * Ask the MPT to abort the current target command
4600 */
4601static int
4602mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req)
4603{
4604	int error;
4605	request_t *req;
4606	PTR_MSG_TARGET_MODE_ABORT abtp;
4607
4608	req = mpt_get_request(mpt, FALSE);
4609	if (req == NULL) {
4610		return (-1);
4611	}
4612	abtp = req->req_vbuf;
4613	memset(abtp, 0, sizeof (*abtp));
4614
4615	abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4616	abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO;
4617	abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT;
4618	abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc);
4619	error = 0;
4620	if (mpt->is_fc || mpt->is_sas) {
4621		mpt_send_cmd(mpt, req);
4622	} else {
4623		error = mpt_send_handshake_cmd(mpt, sizeof(*req), req);
4624	}
4625	return (error);
4626}
4627
4628/*
4629 * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting
4630 * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the
4631 * FC929 to set bogus FC_RSP fields (nonzero residuals
4632 * but w/o RESID fields set). This causes QLogic initiators
4633 * to think maybe that a frame was lost.
4634 *
4635 * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because
4636 * we use allocated requests to do TARGET_ASSIST and we
4637 * need to know when to release them.
4638 */
4639
4640static void
4641mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req,
4642    uint8_t status, uint8_t const *sense_data, u_int sense_len)
4643{
4644	uint8_t *cmd_vbuf;
4645	mpt_tgt_state_t *tgt;
4646	PTR_MSG_TARGET_STATUS_SEND_REQUEST tp;
4647	request_t *req;
4648	bus_addr_t paddr;
4649	int resplen = 0;
4650	uint32_t fl;
4651
4652	cmd_vbuf = cmd_req->req_vbuf;
4653	cmd_vbuf += MPT_RQSL(mpt);
4654	tgt = MPT_TGT_STATE(mpt, cmd_req);
4655
4656	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4657		if (mpt->outofbeer == 0) {
4658			mpt->outofbeer = 1;
4659			xpt_freeze_simq(mpt->sim, 1);
4660			mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4661		}
4662		if (ccb) {
4663			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4664			mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4665			xpt_done(ccb);
4666		} else {
4667			mpt_prt(mpt,
4668			    "could not allocate status request- dropping\n");
4669		}
4670		return;
4671	}
4672	req->ccb = ccb;
4673	if (ccb) {
4674		ccb->ccb_h.ccb_mpt_ptr = mpt;
4675		ccb->ccb_h.ccb_req_ptr = req;
4676	}
4677
4678	/*
4679	 * Record the currently active ccb, if any, and the
4680	 * request for it in our target state area.
4681	 */
4682	tgt->ccb = ccb;
4683	tgt->req = req;
4684	tgt->state = TGT_STATE_SENDING_STATUS;
4685
4686	tp = req->req_vbuf;
4687	paddr = req->req_pbuf;
4688	paddr += MPT_RQSL(mpt);
4689
4690	memset(tp, 0, sizeof (*tp));
4691	tp->StatusCode = status;
4692	tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND;
4693	if (mpt->is_fc) {
4694		PTR_MPI_TARGET_FCP_CMD_BUFFER fc =
4695		    (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf;
4696		uint8_t *sts_vbuf;
4697		uint32_t *rsp;
4698
4699		sts_vbuf = req->req_vbuf;
4700		sts_vbuf += MPT_RQSL(mpt);
4701		rsp = (uint32_t *) sts_vbuf;
4702		memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN));
4703
4704		/*
4705		 * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate.
4706		 * It has to be big-endian in memory and is organized
4707		 * in 32 bit words, which are much easier to deal with
4708		 * as words which are swizzled as needed.
4709		 *
4710		 * All we're filling here is the FC_RSP payload.
4711		 * We may just have the chip synthesize it if
4712		 * we have no residual and an OK status.
4713		 *
4714		 */
4715		memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER));
4716
4717		rsp[2] = htobe32(status);
4718#define	MIN_FCP_RESPONSE_SIZE	24
4719#ifndef	WE_TRUST_AUTO_GOOD_STATUS
4720		resplen = MIN_FCP_RESPONSE_SIZE;
4721#endif
4722		if (tgt->resid < 0) {
4723			rsp[2] |= htobe32(0x400); /* XXXX NEED MNEMONIC!!!! */
4724			rsp[3] = htobe32(-tgt->resid);
4725			resplen = MIN_FCP_RESPONSE_SIZE;
4726		} else if (tgt->resid > 0) {
4727			rsp[2] |= htobe32(0x800); /* XXXX NEED MNEMONIC!!!! */
4728			rsp[3] = htobe32(tgt->resid);
4729			resplen = MIN_FCP_RESPONSE_SIZE;
4730		}
4731		if (sense_len > 0) {
4732			rsp[2] |= htobe32(0x200); /* XXXX NEED MNEMONIC!!!! */
4733			rsp[4] = htobe32(sense_len);
4734			memcpy(&rsp[6], sense_data, sense_len);
4735			resplen = MIN_FCP_RESPONSE_SIZE + sense_len;
4736		}
4737	} else if (mpt->is_sas) {
4738		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4739		    (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf;
4740		memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN));
4741	} else {
4742		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4743		    (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf;
4744		tp->QueueTag = htole16(sp->Tag);
4745		memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN));
4746	}
4747
4748	tp->ReplyWord = htole32(tgt->reply_desc);
4749	tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4750
4751#ifdef	WE_CAN_USE_AUTO_REPOST
4752	tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER;
4753#endif
4754	if (status == SCSI_STATUS_OK && resplen == 0) {
4755		tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS;
4756	} else {
4757		tp->StatusDataSGE.u.Address32 = htole32((uint32_t) paddr);
4758		fl = MPI_SGE_FLAGS_HOST_TO_IOC |
4759		     MPI_SGE_FLAGS_SIMPLE_ELEMENT |
4760		     MPI_SGE_FLAGS_LAST_ELEMENT |
4761		     MPI_SGE_FLAGS_END_OF_LIST |
4762		     MPI_SGE_FLAGS_END_OF_BUFFER;
4763		fl <<= MPI_SGE_FLAGS_SHIFT;
4764		fl |= resplen;
4765		tp->StatusDataSGE.FlagsLength = htole32(fl);
4766	}
4767
4768	mpt_lprt(mpt, MPT_PRT_DEBUG,
4769	    "STATUS_CCB %p (with%s sense) tag %x req %p:%u resid %u\n",
4770	    ccb, sense_len > 0 ? "" : "out", tgt->tag_id,
4771	    req, req->serno, tgt->resid);
4772	if (mpt->verbose > MPT_PRT_DEBUG)
4773		mpt_print_request(req->req_vbuf);
4774	if (ccb) {
4775		ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4776		mpt_req_timeout(req, SBT_1S * 60, mpt_timeout, ccb);
4777	}
4778	mpt_send_cmd(mpt, req);
4779}
4780
4781static void
4782mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc,
4783    tgt_resource_t *trtp, int init_id)
4784{
4785	struct ccb_immediate_notify *inot;
4786	mpt_tgt_state_t *tgt;
4787
4788	tgt = MPT_TGT_STATE(mpt, req);
4789	inot = (struct ccb_immediate_notify *) STAILQ_FIRST(&trtp->inots);
4790	if (inot == NULL) {
4791		mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n");
4792		mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL, 0);
4793		return;
4794	}
4795	STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe);
4796	mpt_lprt(mpt, MPT_PRT_DEBUG1,
4797	    "Get FREE INOT %p lun %jx\n", inot,
4798	    (uintmax_t)inot->ccb_h.target_lun);
4799
4800	inot->initiator_id = init_id;	/* XXX */
4801	inot->tag_id = tgt->tag_id;
4802	inot->seq_id = 0;
4803	/*
4804	 * This is a somewhat grotesque attempt to map from task management
4805	 * to old style SCSI messages. God help us all.
4806	 */
4807	switch (fc) {
4808	case MPT_QUERY_TASK_SET:
4809		inot->arg = MSG_QUERY_TASK_SET;
4810		break;
4811	case MPT_ABORT_TASK_SET:
4812		inot->arg = MSG_ABORT_TASK_SET;
4813		break;
4814	case MPT_CLEAR_TASK_SET:
4815		inot->arg = MSG_CLEAR_TASK_SET;
4816		break;
4817	case MPT_QUERY_ASYNC_EVENT:
4818		inot->arg = MSG_QUERY_ASYNC_EVENT;
4819		break;
4820	case MPT_LOGICAL_UNIT_RESET:
4821		inot->arg = MSG_LOGICAL_UNIT_RESET;
4822		break;
4823	case MPT_TARGET_RESET:
4824		inot->arg = MSG_TARGET_RESET;
4825		break;
4826	case MPT_CLEAR_ACA:
4827		inot->arg = MSG_CLEAR_ACA;
4828		break;
4829	default:
4830		inot->arg = MSG_NOOP;
4831		break;
4832	}
4833	tgt->ccb = (union ccb *) inot;
4834	inot->ccb_h.status = CAM_MESSAGE_RECV;
4835	xpt_done((union ccb *)inot);
4836}
4837
4838static void
4839mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc)
4840{
4841	static uint8_t null_iqd[SHORT_INQUIRY_LENGTH] = {
4842	    0x7f, 0x00, 0x02, 0x02, 0x20, 0x00, 0x00, 0x32,
4843	     'F',  'R',  'E',  'E',  'B',  'S',  'D',  ' ',
4844	     'L',  'S',  'I',  '-',  'L',  'O',  'G',  'I',
4845	     'C',  ' ',  'N',  'U',  'L',  'D',  'E',  'V',
4846	     '0',  '0',  '0',  '1'
4847	};
4848	struct ccb_accept_tio *atiop;
4849	lun_id_t lun;
4850	int tag_action = 0;
4851	mpt_tgt_state_t *tgt;
4852	tgt_resource_t *trtp = NULL;
4853	U8 *lunptr;
4854	U8 *vbuf;
4855	U16 ioindex;
4856	mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE;
4857	uint8_t *cdbp;
4858
4859	/*
4860	 * Stash info for the current command where we can get at it later.
4861	 */
4862	vbuf = req->req_vbuf;
4863	vbuf += MPT_RQSL(mpt);
4864	if (mpt->verbose >= MPT_PRT_DEBUG) {
4865		mpt_dump_data(mpt, "mpt_scsi_tgt_atio response", vbuf,
4866		    max(sizeof (MPI_TARGET_FCP_CMD_BUFFER),
4867		    max(sizeof (MPI_TARGET_SSP_CMD_BUFFER),
4868		    sizeof (MPI_TARGET_SCSI_SPI_CMD_BUFFER))));
4869	}
4870
4871	/*
4872	 * Get our state pointer set up.
4873	 */
4874	tgt = MPT_TGT_STATE(mpt, req);
4875	if (tgt->state != TGT_STATE_LOADED) {
4876		mpt_tgt_dump_req_state(mpt, req);
4877		panic("bad target state in mpt_scsi_tgt_atio");
4878	}
4879	memset(tgt, 0, sizeof (mpt_tgt_state_t));
4880	tgt->state = TGT_STATE_IN_CAM;
4881	tgt->reply_desc = reply_desc;
4882	ioindex = GET_IO_INDEX(reply_desc);
4883
4884	/*
4885	 * The tag we construct here allows us to find the
4886	 * original request that the command came in with.
4887	 *
4888	 * This way we don't have to depend on anything but the
4889	 * tag to find things when CCBs show back up from CAM.
4890	 */
4891	tgt->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
4892
4893	if (mpt->is_fc) {
4894		PTR_MPI_TARGET_FCP_CMD_BUFFER fc;
4895		fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf;
4896		if (fc->FcpCntl[2]) {
4897			/*
4898			 * Task Management Request
4899			 */
4900			switch (fc->FcpCntl[2]) {
4901			case 0x1:
4902				fct = MPT_QUERY_TASK_SET;
4903				break;
4904			case 0x2:
4905				fct = MPT_ABORT_TASK_SET;
4906				break;
4907			case 0x4:
4908				fct = MPT_CLEAR_TASK_SET;
4909				break;
4910			case 0x8:
4911				fct = MPT_QUERY_ASYNC_EVENT;
4912				break;
4913			case 0x10:
4914				fct = MPT_LOGICAL_UNIT_RESET;
4915				break;
4916			case 0x20:
4917				fct = MPT_TARGET_RESET;
4918				break;
4919			case 0x40:
4920				fct = MPT_CLEAR_ACA;
4921				break;
4922			default:
4923				mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n",
4924				    fc->FcpCntl[2]);
4925				mpt_scsi_tgt_status(mpt, NULL, req,
4926				    SCSI_STATUS_OK, NULL, 0);
4927				return;
4928			}
4929		} else {
4930			switch (fc->FcpCntl[1]) {
4931			case 0:
4932				tag_action = MSG_SIMPLE_Q_TAG;
4933				break;
4934			case 1:
4935				tag_action = MSG_HEAD_OF_Q_TAG;
4936				break;
4937			case 2:
4938				tag_action = MSG_ORDERED_Q_TAG;
4939				break;
4940			default:
4941				/*
4942				 * Bah. Ignore Untagged Queing and ACA
4943				 */
4944				tag_action = MSG_SIMPLE_Q_TAG;
4945				break;
4946			}
4947		}
4948		tgt->resid = be32toh(fc->FcpDl);
4949		cdbp = fc->FcpCdb;
4950		lunptr = fc->FcpLun;
4951		tgt->itag = fc->OptionalOxid;
4952	} else if (mpt->is_sas) {
4953		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp;
4954		ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf;
4955		cdbp = ssp->CDB;
4956		lunptr = ssp->LogicalUnitNumber;
4957		tgt->itag = ssp->InitiatorTag;
4958	} else {
4959		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp;
4960		sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf;
4961		cdbp = sp->CDB;
4962		lunptr = sp->LogicalUnitNumber;
4963		tgt->itag = sp->Tag;
4964	}
4965
4966	lun = CAM_EXTLUN_BYTE_SWIZZLE(be64dec(lunptr));
4967
4968	/*
4969	 * Deal with non-enabled or bad luns here.
4970	 */
4971	if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 ||
4972	    mpt->trt[lun].enabled == 0) {
4973		if (mpt->twildcard) {
4974			trtp = &mpt->trt_wildcard;
4975		} else if (fct == MPT_NIL_TMT_VALUE) {
4976			/*
4977			 * In this case, we haven't got an upstream listener
4978			 * for either a specific lun or wildcard luns. We
4979			 * have to make some sensible response. For regular
4980			 * inquiry, just return some NOT HERE inquiry data.
4981			 * For VPD inquiry, report illegal field in cdb.
4982			 * For REQUEST SENSE, just return NO SENSE data.
4983			 * REPORT LUNS gets illegal command.
4984			 * All other commands get 'no such device'.
4985			 */
4986			uint8_t sense[MPT_SENSE_SIZE];
4987			size_t len;
4988
4989			memset(sense, 0, sizeof(sense));
4990			sense[0] = 0xf0;
4991			sense[2] = 0x5;
4992			sense[7] = 0x8;
4993
4994			switch (cdbp[0]) {
4995			case INQUIRY:
4996			{
4997				if (cdbp[1] != 0) {
4998					sense[12] = 0x26;
4999					sense[13] = 0x01;
5000					break;
5001				}
5002				len = min(tgt->resid, cdbp[4]);
5003				len = min(len, sizeof (null_iqd));
5004				mpt_lprt(mpt, MPT_PRT_DEBUG,
5005				    "local inquiry %ld bytes\n", (long) len);
5006				mpt_scsi_tgt_local(mpt, req, lun, 1,
5007				    null_iqd, len);
5008				return;
5009			}
5010			case REQUEST_SENSE:
5011			{
5012				sense[2] = 0x0;
5013				len = min(tgt->resid, cdbp[4]);
5014				len = min(len, sizeof (sense));
5015				mpt_lprt(mpt, MPT_PRT_DEBUG,
5016				    "local reqsense %ld bytes\n", (long) len);
5017				mpt_scsi_tgt_local(mpt, req, lun, 1,
5018				    sense, len);
5019				return;
5020			}
5021			case REPORT_LUNS:
5022				mpt_lprt(mpt, MPT_PRT_DEBUG, "REPORT LUNS\n");
5023				sense[12] = 0x26;
5024				return;
5025			default:
5026				mpt_lprt(mpt, MPT_PRT_DEBUG,
5027				    "CMD 0x%x to unmanaged lun %jx\n",
5028				    cdbp[0], (uintmax_t)lun);
5029				sense[12] = 0x25;
5030				break;
5031			}
5032			mpt_scsi_tgt_status(mpt, NULL, req,
5033			    SCSI_STATUS_CHECK_COND, sense, sizeof(sense));
5034			return;
5035		}
5036		/* otherwise, leave trtp NULL */
5037	} else {
5038		trtp = &mpt->trt[lun];
5039	}
5040
5041	/*
5042	 * Deal with any task management
5043	 */
5044	if (fct != MPT_NIL_TMT_VALUE) {
5045		if (trtp == NULL) {
5046			mpt_prt(mpt, "task mgmt function %x but no listener\n",
5047			    fct);
5048			mpt_scsi_tgt_status(mpt, NULL, req,
5049			    SCSI_STATUS_OK, NULL, 0);
5050		} else {
5051			mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp,
5052			    GET_INITIATOR_INDEX(reply_desc));
5053		}
5054		return;
5055	}
5056
5057
5058	atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios);
5059	if (atiop == NULL) {
5060		mpt_lprt(mpt, MPT_PRT_WARN,
5061		    "no ATIOs for lun %jx- sending back %s\n", (uintmax_t)lun,
5062		    mpt->tenabled? "QUEUE FULL" : "BUSY");
5063		mpt_scsi_tgt_status(mpt, NULL, req,
5064		    mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY,
5065		    NULL, 0);
5066		return;
5067	}
5068	STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe);
5069	mpt_lprt(mpt, MPT_PRT_DEBUG1,
5070	    "Get FREE ATIO %p lun %jx\n", atiop,
5071	    (uintmax_t)atiop->ccb_h.target_lun);
5072	atiop->ccb_h.ccb_mpt_ptr = mpt;
5073	atiop->ccb_h.status = CAM_CDB_RECVD;
5074	atiop->ccb_h.target_lun = lun;
5075	atiop->sense_len = 0;
5076	atiop->tag_id = tgt->tag_id;
5077	atiop->init_id = GET_INITIATOR_INDEX(reply_desc);
5078	atiop->cdb_len = 16;
5079	memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len);
5080	if (tag_action) {
5081		atiop->tag_action = tag_action;
5082		atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID;
5083	}
5084	if (mpt->verbose >= MPT_PRT_DEBUG) {
5085		int i;
5086		mpt_prt(mpt, "START_CCB %p for lun %jx CDB=<", atiop,
5087		    (uintmax_t)atiop->ccb_h.target_lun);
5088		for (i = 0; i < atiop->cdb_len; i++) {
5089			mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff,
5090			    (i == (atiop->cdb_len - 1))? '>' : ' ');
5091		}
5092		mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n",
5093		    tgt->itag, tgt->tag_id, tgt->reply_desc, tgt->resid);
5094	}
5095
5096	xpt_done((union ccb *)atiop);
5097}
5098
5099static void
5100mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req)
5101{
5102	mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
5103
5104	mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p "
5105	    "nx %d tag 0x%08x itag 0x%04x state=%d\n", req, req->serno,
5106	    tgt->reply_desc, tgt->resid, tgt->bytes_xfered, tgt->ccb,
5107	    tgt->req, tgt->nxfers, tgt->tag_id, tgt->itag, tgt->state);
5108}
5109
5110static void
5111mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req)
5112{
5113
5114	mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno,
5115	    req->index, req->index, req->state);
5116	mpt_tgt_dump_tgt_state(mpt, req);
5117}
5118
5119static int
5120mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req,
5121    uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
5122{
5123	int dbg;
5124	union ccb *ccb;
5125	U16 status;
5126
5127	if (reply_frame == NULL) {
5128		/*
5129		 * Figure out what the state of the command is.
5130		 */
5131		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
5132
5133#ifdef	INVARIANTS
5134		mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__);
5135		if (tgt->req) {
5136			mpt_req_not_spcl(mpt, tgt->req,
5137			    "turbo scsi_tgt_reply associated req", __LINE__);
5138		}
5139#endif
5140		switch(tgt->state) {
5141		case TGT_STATE_LOADED:
5142			/*
5143			 * This is a new command starting.
5144			 */
5145			mpt_scsi_tgt_atio(mpt, req, reply_desc);
5146			break;
5147		case TGT_STATE_MOVING_DATA:
5148		{
5149			ccb = tgt->ccb;
5150			if (tgt->req == NULL) {
5151				panic("mpt: turbo target reply with null "
5152				    "associated request moving data");
5153				/* NOTREACHED */
5154			}
5155			if (ccb == NULL) {
5156				if (tgt->is_local == 0) {
5157					panic("mpt: turbo target reply with "
5158					    "null associated ccb moving data");
5159					/* NOTREACHED */
5160				}
5161				mpt_lprt(mpt, MPT_PRT_DEBUG,
5162				    "TARGET_ASSIST local done\n");
5163				TAILQ_REMOVE(&mpt->request_pending_list,
5164				    tgt->req, links);
5165				mpt_free_request(mpt, tgt->req);
5166				tgt->req = NULL;
5167				mpt_scsi_tgt_status(mpt, NULL, req,
5168				    0, NULL, 0);
5169				return (TRUE);
5170			}
5171			tgt->ccb = NULL;
5172			tgt->nxfers++;
5173			mpt_req_untimeout(tgt->req, mpt_timeout, ccb);
5174			mpt_lprt(mpt, MPT_PRT_DEBUG,
5175			    "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n",
5176			    ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id);
5177			/*
5178			 * Free the Target Assist Request
5179			 */
5180			KASSERT(tgt->req->ccb == ccb,
5181			    ("tgt->req %p:%u tgt->req->ccb %p", tgt->req,
5182			    tgt->req->serno, tgt->req->ccb));
5183			TAILQ_REMOVE(&mpt->request_pending_list,
5184			    tgt->req, links);
5185			mpt_free_request(mpt, tgt->req);
5186			tgt->req = NULL;
5187
5188			/*
5189			 * Do we need to send status now? That is, are
5190			 * we done with all our data transfers?
5191			 */
5192			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
5193				mpt_set_ccb_status(ccb, CAM_REQ_CMP);
5194				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
5195				KASSERT(ccb->ccb_h.status,
5196				    ("zero ccb sts at %d", __LINE__));
5197				tgt->state = TGT_STATE_IN_CAM;
5198				if (mpt->outofbeer) {
5199					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
5200					mpt->outofbeer = 0;
5201					mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
5202				}
5203				xpt_done(ccb);
5204				break;
5205			}
5206			/*
5207			 * Otherwise, send status (and sense)
5208			 */
5209			mpt_scsi_tgt_status(mpt, ccb, req,
5210			    ccb->csio.scsi_status,
5211			    (void *)&ccb->csio.sense_data,
5212			    (ccb->ccb_h.flags & CAM_SEND_SENSE) ?
5213			     ccb->csio.sense_len : 0);
5214			break;
5215		}
5216		case TGT_STATE_SENDING_STATUS:
5217		case TGT_STATE_MOVING_DATA_AND_STATUS:
5218		{
5219			int ioindex;
5220			ccb = tgt->ccb;
5221
5222			if (tgt->req == NULL) {
5223				panic("mpt: turbo target reply with null "
5224				    "associated request sending status");
5225				/* NOTREACHED */
5226			}
5227
5228			if (ccb) {
5229				tgt->ccb = NULL;
5230				if (tgt->state ==
5231				    TGT_STATE_MOVING_DATA_AND_STATUS) {
5232					tgt->nxfers++;
5233				}
5234				mpt_req_untimeout(tgt->req, mpt_timeout, ccb);
5235				if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
5236					ccb->ccb_h.status |= CAM_SENT_SENSE;
5237				}
5238				mpt_lprt(mpt, MPT_PRT_DEBUG,
5239				    "TARGET_STATUS tag %x sts %x flgs %x req "
5240				    "%p\n", ccb->csio.tag_id, ccb->ccb_h.status,
5241				    ccb->ccb_h.flags, tgt->req);
5242				/*
5243				 * Free the Target Send Status Request
5244				 */
5245				KASSERT(tgt->req->ccb == ccb,
5246				    ("tgt->req %p:%u tgt->req->ccb %p",
5247				    tgt->req, tgt->req->serno, tgt->req->ccb));
5248				/*
5249				 * Notify CAM that we're done
5250				 */
5251				mpt_set_ccb_status(ccb, CAM_REQ_CMP);
5252				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
5253				KASSERT(ccb->ccb_h.status,
5254				    ("ZERO ccb sts at %d", __LINE__));
5255				tgt->ccb = NULL;
5256			} else {
5257				mpt_lprt(mpt, MPT_PRT_DEBUG,
5258				    "TARGET_STATUS non-CAM for req %p:%u\n",
5259				    tgt->req, tgt->req->serno);
5260			}
5261			TAILQ_REMOVE(&mpt->request_pending_list,
5262			    tgt->req, links);
5263			mpt_free_request(mpt, tgt->req);
5264			tgt->req = NULL;
5265
5266			/*
5267			 * And re-post the Command Buffer.
5268			 * This will reset the state.
5269			 */
5270			ioindex = GET_IO_INDEX(reply_desc);
5271			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5272			tgt->is_local = 0;
5273			mpt_post_target_command(mpt, req, ioindex);
5274
5275			/*
5276			 * And post a done for anyone who cares
5277			 */
5278			if (ccb) {
5279				if (mpt->outofbeer) {
5280					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
5281					mpt->outofbeer = 0;
5282					mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
5283				}
5284				xpt_done(ccb);
5285			}
5286			break;
5287		}
5288		case TGT_STATE_NIL:	/* XXX This Never Happens XXX */
5289			tgt->state = TGT_STATE_LOADED;
5290			break;
5291		default:
5292			mpt_prt(mpt, "Unknown Target State 0x%x in Context "
5293			    "Reply Function\n", tgt->state);
5294		}
5295		return (TRUE);
5296	}
5297
5298	status = le16toh(reply_frame->IOCStatus);
5299	if (status != MPI_IOCSTATUS_SUCCESS) {
5300		dbg = MPT_PRT_ERROR;
5301	} else {
5302		dbg = MPT_PRT_DEBUG1;
5303	}
5304
5305	mpt_lprt(mpt, dbg,
5306	    "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n",
5307	     req, req->serno, reply_frame, reply_frame->Function, status);
5308
5309	switch (reply_frame->Function) {
5310	case MPI_FUNCTION_TARGET_CMD_BUFFER_POST:
5311	{
5312		mpt_tgt_state_t *tgt;
5313#ifdef	INVARIANTS
5314		mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__);
5315#endif
5316		if (status != MPI_IOCSTATUS_SUCCESS) {
5317			/*
5318			 * XXX What to do?
5319			 */
5320			break;
5321		}
5322		tgt = MPT_TGT_STATE(mpt, req);
5323		KASSERT(tgt->state == TGT_STATE_LOADING,
5324		    ("bad state 0x%x on reply to buffer post", tgt->state));
5325		mpt_assign_serno(mpt, req);
5326		tgt->state = TGT_STATE_LOADED;
5327		break;
5328	}
5329	case MPI_FUNCTION_TARGET_ASSIST:
5330#ifdef	INVARIANTS
5331		mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__);
5332#endif
5333		mpt_prt(mpt, "target assist completion\n");
5334		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5335		mpt_free_request(mpt, req);
5336		break;
5337	case MPI_FUNCTION_TARGET_STATUS_SEND:
5338#ifdef	INVARIANTS
5339		mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__);
5340#endif
5341		mpt_prt(mpt, "status send completion\n");
5342		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5343		mpt_free_request(mpt, req);
5344		break;
5345	case MPI_FUNCTION_TARGET_MODE_ABORT:
5346	{
5347		PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp =
5348		    (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame;
5349		PTR_MSG_TARGET_MODE_ABORT abtp =
5350		    (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf;
5351		uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord));
5352#ifdef	INVARIANTS
5353		mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__);
5354#endif
5355		mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n",
5356		    cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount));
5357		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5358		mpt_free_request(mpt, req);
5359		break;
5360	}
5361	default:
5362		mpt_prt(mpt, "Unknown Target Address Reply Function code: "
5363		    "0x%x\n", reply_frame->Function);
5364		break;
5365	}
5366	return (TRUE);
5367}
5368