mpt_cam.c revision 261455
1/*-
2 * FreeBSD/CAM specific routines for LSI '909 FC  adapters.
3 * FreeBSD Version.
4 *
5 * Copyright (c)  2000, 2001 by Greg Ansley
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice immediately at the beginning of the file, without modification,
12 *    this list of conditions, and the following disclaimer.
13 * 2. The name of the author may not be used to endorse or promote products
14 *    derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28/*-
29 * Copyright (c) 2002, 2006 by Matthew Jacob
30 * All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions are
34 * met:
35 * 1. Redistributions of source code must retain the above copyright
36 *    notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
38 *    substantially similar to the "NO WARRANTY" disclaimer below
39 *    ("Disclaimer") and any redistribution must be conditioned upon including
40 *    a substantially similar Disclaimer requirement for further binary
41 *    redistribution.
42 * 3. Neither the names of the above listed copyright holders nor the names
43 *    of any contributors may be used to endorse or promote products derived
44 *    from this software without specific prior written permission.
45 *
46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
47 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
50 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
56 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57 *
58 * Support from Chris Ellsworth in order to make SAS adapters work
59 * is gratefully acknowledged.
60 *
61 * Support from LSI-Logic has also gone a great deal toward making this a
62 * workable subsystem and is gratefully acknowledged.
63 */
64/*-
65 * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
66 * Copyright (c) 2005, WHEEL Sp. z o.o.
67 * Copyright (c) 2004, 2005 Justin T. Gibbs
68 * All rights reserved.
69 *
70 * Redistribution and use in source and binary forms, with or without
71 * modification, are permitted provided that the following conditions are
72 * met:
73 * 1. Redistributions of source code must retain the above copyright
74 *    notice, this list of conditions and the following disclaimer.
75 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
76 *    substantially similar to the "NO WARRANTY" disclaimer below
77 *    ("Disclaimer") and any redistribution must be conditioned upon including
78 *    a substantially similar Disclaimer requirement for further binary
79 *    redistribution.
80 * 3. Neither the names of the above listed copyright holders nor the names
81 *    of any contributors may be used to endorse or promote products derived
82 *    from this software without specific prior written permission.
83 *
84 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
85 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
86 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
87 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
88 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
89 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
90 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
91 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
92 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
93 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
94 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
95 */
96#include <sys/cdefs.h>
97__FBSDID("$FreeBSD: stable/10/sys/dev/mpt/mpt_cam.c 261455 2014-02-04 03:36:42Z eadler $");
98
99#include <dev/mpt/mpt.h>
100#include <dev/mpt/mpt_cam.h>
101#include <dev/mpt/mpt_raid.h>
102
103#include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
104#include "dev/mpt/mpilib/mpi_init.h"
105#include "dev/mpt/mpilib/mpi_targ.h"
106#include "dev/mpt/mpilib/mpi_fc.h"
107#include "dev/mpt/mpilib/mpi_sas.h"
108
109#include <sys/callout.h>
110#include <sys/kthread.h>
111#include <sys/sysctl.h>
112
113#if __FreeBSD_version >= 700025
114#ifndef	CAM_NEW_TRAN_CODE
115#define	CAM_NEW_TRAN_CODE	1
116#endif
117#endif
118
119static void mpt_poll(struct cam_sim *);
120static timeout_t mpt_timeout;
121static void mpt_action(struct cam_sim *, union ccb *);
122static int
123mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *);
124static void mpt_setwidth(struct mpt_softc *, int, int);
125static void mpt_setsync(struct mpt_softc *, int, int, int);
126static int mpt_update_spi_config(struct mpt_softc *, int);
127
128static mpt_reply_handler_t mpt_scsi_reply_handler;
129static mpt_reply_handler_t mpt_scsi_tmf_reply_handler;
130static mpt_reply_handler_t mpt_fc_els_reply_handler;
131static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *,
132					MSG_DEFAULT_REPLY *);
133static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int);
134static int mpt_fc_reset_link(struct mpt_softc *, int);
135
136static int mpt_spawn_recovery_thread(struct mpt_softc *mpt);
137static void mpt_terminate_recovery_thread(struct mpt_softc *mpt);
138static void mpt_recovery_thread(void *arg);
139static void mpt_recover_commands(struct mpt_softc *mpt);
140
141static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int,
142    u_int, u_int, u_int, int);
143
144static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int);
145static void mpt_post_target_command(struct mpt_softc *, request_t *, int);
146static int mpt_add_els_buffers(struct mpt_softc *mpt);
147static int mpt_add_target_commands(struct mpt_softc *mpt);
148static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t);
149static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t);
150static void mpt_target_start_io(struct mpt_softc *, union ccb *);
151static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *);
152static int mpt_abort_target_cmd(struct mpt_softc *, request_t *);
153static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *,
154    uint8_t, uint8_t const *);
155static void
156mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t,
157    tgt_resource_t *, int);
158static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *);
159static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *);
160static mpt_reply_handler_t mpt_scsi_tgt_reply_handler;
161static mpt_reply_handler_t mpt_sata_pass_reply_handler;
162
163static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE;
164static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE;
165static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE;
166static uint32_t sata_pass_handler_id = MPT_HANDLER_ID_NONE;
167
168static mpt_probe_handler_t	mpt_cam_probe;
169static mpt_attach_handler_t	mpt_cam_attach;
170static mpt_enable_handler_t	mpt_cam_enable;
171static mpt_ready_handler_t	mpt_cam_ready;
172static mpt_event_handler_t	mpt_cam_event;
173static mpt_reset_handler_t	mpt_cam_ioc_reset;
174static mpt_detach_handler_t	mpt_cam_detach;
175
176static struct mpt_personality mpt_cam_personality =
177{
178	.name		= "mpt_cam",
179	.probe		= mpt_cam_probe,
180	.attach		= mpt_cam_attach,
181	.enable		= mpt_cam_enable,
182	.ready		= mpt_cam_ready,
183	.event		= mpt_cam_event,
184	.reset		= mpt_cam_ioc_reset,
185	.detach		= mpt_cam_detach,
186};
187
188DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND);
189MODULE_DEPEND(mpt_cam, cam, 1, 1, 1);
190
191int mpt_enable_sata_wc = -1;
192TUNABLE_INT("hw.mpt.enable_sata_wc", &mpt_enable_sata_wc);
193
194static int
195mpt_cam_probe(struct mpt_softc *mpt)
196{
197	int role;
198
199	/*
200	 * Only attach to nodes that support the initiator or target role
201	 * (or want to) or have RAID physical devices that need CAM pass-thru
202	 * support.
203	 */
204	if (mpt->do_cfg_role) {
205		role = mpt->cfg_role;
206	} else {
207		role = mpt->role;
208	}
209	if ((role & (MPT_ROLE_TARGET|MPT_ROLE_INITIATOR)) != 0 ||
210	    (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) {
211		return (0);
212	}
213	return (ENODEV);
214}
215
216static int
217mpt_cam_attach(struct mpt_softc *mpt)
218{
219	struct cam_devq *devq;
220	mpt_handler_t	 handler;
221	int		 maxq;
222	int		 error;
223
224	MPT_LOCK(mpt);
225	TAILQ_INIT(&mpt->request_timeout_list);
226	maxq = (mpt->ioc_facts.GlobalCredits < MPT_MAX_REQUESTS(mpt))?
227	    mpt->ioc_facts.GlobalCredits : MPT_MAX_REQUESTS(mpt);
228
229	handler.reply_handler = mpt_scsi_reply_handler;
230	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
231				     &scsi_io_handler_id);
232	if (error != 0) {
233		MPT_UNLOCK(mpt);
234		goto cleanup;
235	}
236
237	handler.reply_handler = mpt_scsi_tmf_reply_handler;
238	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
239				     &scsi_tmf_handler_id);
240	if (error != 0) {
241		MPT_UNLOCK(mpt);
242		goto cleanup;
243	}
244
245	/*
246	 * If we're fibre channel and could support target mode, we register
247	 * an ELS reply handler and give it resources.
248	 */
249	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
250		handler.reply_handler = mpt_fc_els_reply_handler;
251		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
252		    &fc_els_handler_id);
253		if (error != 0) {
254			MPT_UNLOCK(mpt);
255			goto cleanup;
256		}
257		if (mpt_add_els_buffers(mpt) == FALSE) {
258			error = ENOMEM;
259			MPT_UNLOCK(mpt);
260			goto cleanup;
261		}
262		maxq -= mpt->els_cmds_allocated;
263	}
264
265	/*
266	 * If we support target mode, we register a reply handler for it,
267	 * but don't add command resources until we actually enable target
268	 * mode.
269	 */
270	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
271		handler.reply_handler = mpt_scsi_tgt_reply_handler;
272		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
273		    &mpt->scsi_tgt_handler_id);
274		if (error != 0) {
275			MPT_UNLOCK(mpt);
276			goto cleanup;
277		}
278	}
279
280	if (mpt->is_sas) {
281		handler.reply_handler = mpt_sata_pass_reply_handler;
282		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
283		    &sata_pass_handler_id);
284		if (error != 0) {
285			MPT_UNLOCK(mpt);
286			goto cleanup;
287		}
288	}
289
290	/*
291	 * We keep one request reserved for timeout TMF requests.
292	 */
293	mpt->tmf_req = mpt_get_request(mpt, FALSE);
294	if (mpt->tmf_req == NULL) {
295		mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n");
296		error = ENOMEM;
297		MPT_UNLOCK(mpt);
298		goto cleanup;
299	}
300
301	/*
302	 * Mark the request as free even though not on the free list.
303	 * There is only one TMF request allowed to be outstanding at
304	 * a time and the TMF routines perform their own allocation
305	 * tracking using the standard state flags.
306	 */
307	mpt->tmf_req->state = REQ_STATE_FREE;
308	maxq--;
309
310	/*
311	 * The rest of this is CAM foo, for which we need to drop our lock
312	 */
313	MPT_UNLOCK(mpt);
314
315	if (mpt_spawn_recovery_thread(mpt) != 0) {
316		mpt_prt(mpt, "Unable to spawn recovery thread!\n");
317		error = ENOMEM;
318		goto cleanup;
319	}
320
321	/*
322	 * Create the device queue for our SIM(s).
323	 */
324	devq = cam_simq_alloc(maxq);
325	if (devq == NULL) {
326		mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n");
327		error = ENOMEM;
328		goto cleanup;
329	}
330
331	/*
332	 * Construct our SIM entry.
333	 */
334	mpt->sim =
335	    mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
336	if (mpt->sim == NULL) {
337		mpt_prt(mpt, "Unable to allocate CAM SIM!\n");
338		cam_simq_free(devq);
339		error = ENOMEM;
340		goto cleanup;
341	}
342
343	/*
344	 * Register exactly this bus.
345	 */
346	MPT_LOCK(mpt);
347	if (mpt_xpt_bus_register(mpt->sim, mpt->dev, 0) != CAM_SUCCESS) {
348		mpt_prt(mpt, "Bus registration Failed!\n");
349		error = ENOMEM;
350		MPT_UNLOCK(mpt);
351		goto cleanup;
352	}
353
354	if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim),
355	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
356		mpt_prt(mpt, "Unable to allocate Path!\n");
357		error = ENOMEM;
358		MPT_UNLOCK(mpt);
359		goto cleanup;
360	}
361	MPT_UNLOCK(mpt);
362
363	/*
364	 * Only register a second bus for RAID physical
365	 * devices if the controller supports RAID.
366	 */
367	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
368		return (0);
369	}
370
371	/*
372	 * Create a "bus" to export all hidden disks to CAM.
373	 */
374	mpt->phydisk_sim =
375	    mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
376	if (mpt->phydisk_sim == NULL) {
377		mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n");
378		error = ENOMEM;
379		goto cleanup;
380	}
381
382	/*
383	 * Register this bus.
384	 */
385	MPT_LOCK(mpt);
386	if (mpt_xpt_bus_register(mpt->phydisk_sim, mpt->dev, 1) !=
387	    CAM_SUCCESS) {
388		mpt_prt(mpt, "Physical Disk Bus registration Failed!\n");
389		error = ENOMEM;
390		MPT_UNLOCK(mpt);
391		goto cleanup;
392	}
393
394	if (xpt_create_path(&mpt->phydisk_path, NULL,
395	    cam_sim_path(mpt->phydisk_sim),
396	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
397		mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n");
398		error = ENOMEM;
399		MPT_UNLOCK(mpt);
400		goto cleanup;
401	}
402	MPT_UNLOCK(mpt);
403	mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n");
404	return (0);
405
406cleanup:
407	mpt_cam_detach(mpt);
408	return (error);
409}
410
411/*
412 * Read FC configuration information
413 */
414static int
415mpt_read_config_info_fc(struct mpt_softc *mpt)
416{
417	struct sysctl_ctx_list *ctx;
418	struct sysctl_oid *tree;
419	char *topology = NULL;
420	int rv;
421
422	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0,
423	    0, &mpt->mpt_fcport_page0.Header, FALSE, 5000);
424	if (rv) {
425		return (-1);
426	}
427	mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n",
428		 mpt->mpt_fcport_page0.Header.PageVersion,
429		 mpt->mpt_fcport_page0.Header.PageLength,
430		 mpt->mpt_fcport_page0.Header.PageNumber,
431		 mpt->mpt_fcport_page0.Header.PageType);
432
433
434	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header,
435	    sizeof(mpt->mpt_fcport_page0), FALSE, 5000);
436	if (rv) {
437		mpt_prt(mpt, "failed to read FC Port Page 0\n");
438		return (-1);
439	}
440	mpt2host_config_page_fc_port_0(&mpt->mpt_fcport_page0);
441
442	mpt->mpt_fcport_speed = mpt->mpt_fcport_page0.CurrentSpeed;
443
444	switch (mpt->mpt_fcport_page0.Flags &
445	    MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) {
446	case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT:
447		mpt->mpt_fcport_speed = 0;
448		topology = "<NO LOOP>";
449		break;
450	case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT:
451		topology = "N-Port";
452		break;
453	case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP:
454		topology = "NL-Port";
455		break;
456	case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT:
457		topology = "F-Port";
458		break;
459	case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP:
460		topology = "FL-Port";
461		break;
462	default:
463		mpt->mpt_fcport_speed = 0;
464		topology = "?";
465		break;
466	}
467
468	mpt_lprt(mpt, MPT_PRT_INFO,
469	    "FC Port Page 0: Topology <%s> WWNN 0x%08x%08x WWPN 0x%08x%08x "
470	    "Speed %u-Gbit\n", topology,
471	    mpt->mpt_fcport_page0.WWNN.High,
472	    mpt->mpt_fcport_page0.WWNN.Low,
473	    mpt->mpt_fcport_page0.WWPN.High,
474	    mpt->mpt_fcport_page0.WWPN.Low,
475	    mpt->mpt_fcport_speed);
476	MPT_UNLOCK(mpt);
477	ctx = device_get_sysctl_ctx(mpt->dev);
478	tree = device_get_sysctl_tree(mpt->dev);
479
480	snprintf(mpt->scinfo.fc.wwnn, sizeof (mpt->scinfo.fc.wwnn),
481	    "0x%08x%08x", mpt->mpt_fcport_page0.WWNN.High,
482	    mpt->mpt_fcport_page0.WWNN.Low);
483
484	snprintf(mpt->scinfo.fc.wwpn, sizeof (mpt->scinfo.fc.wwpn),
485	    "0x%08x%08x", mpt->mpt_fcport_page0.WWPN.High,
486	    mpt->mpt_fcport_page0.WWPN.Low);
487
488	SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
489	    "wwnn", CTLFLAG_RD, mpt->scinfo.fc.wwnn, 0,
490	    "World Wide Node Name");
491
492	SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
493	     "wwpn", CTLFLAG_RD, mpt->scinfo.fc.wwpn, 0,
494	     "World Wide Port Name");
495
496	MPT_LOCK(mpt);
497	return (0);
498}
499
500/*
501 * Set FC configuration information.
502 */
503static int
504mpt_set_initial_config_fc(struct mpt_softc *mpt)
505{
506	CONFIG_PAGE_FC_PORT_1 fc;
507	U32 fl;
508	int r, doit = 0;
509	int role;
510
511	r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0,
512	    &fc.Header, FALSE, 5000);
513	if (r) {
514		mpt_prt(mpt, "failed to read FC page 1 header\n");
515		return (mpt_fc_reset_link(mpt, 1));
516	}
517
518	r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_NVRAM, 0,
519	    &fc.Header, sizeof (fc), FALSE, 5000);
520	if (r) {
521		mpt_prt(mpt, "failed to read FC page 1\n");
522		return (mpt_fc_reset_link(mpt, 1));
523	}
524	mpt2host_config_page_fc_port_1(&fc);
525
526	/*
527	 * Check our flags to make sure we support the role we want.
528	 */
529	doit = 0;
530	role = 0;
531	fl = fc.Flags;
532
533	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) {
534		role |= MPT_ROLE_INITIATOR;
535	}
536	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
537		role |= MPT_ROLE_TARGET;
538	}
539
540	fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK;
541
542	if (mpt->do_cfg_role == 0) {
543		role = mpt->cfg_role;
544	} else {
545		mpt->do_cfg_role = 0;
546	}
547
548	if (role != mpt->cfg_role) {
549		if (mpt->cfg_role & MPT_ROLE_INITIATOR) {
550			if ((role & MPT_ROLE_INITIATOR) == 0) {
551				mpt_prt(mpt, "adding initiator role\n");
552				fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT;
553				doit++;
554			} else {
555				mpt_prt(mpt, "keeping initiator role\n");
556			}
557		} else if (role & MPT_ROLE_INITIATOR) {
558			mpt_prt(mpt, "removing initiator role\n");
559			doit++;
560		}
561		if (mpt->cfg_role & MPT_ROLE_TARGET) {
562			if ((role & MPT_ROLE_TARGET) == 0) {
563				mpt_prt(mpt, "adding target role\n");
564				fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG;
565				doit++;
566			} else {
567				mpt_prt(mpt, "keeping target role\n");
568			}
569		} else if (role & MPT_ROLE_TARGET) {
570			mpt_prt(mpt, "removing target role\n");
571			doit++;
572		}
573		mpt->role = mpt->cfg_role;
574	}
575
576	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
577		if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) {
578			mpt_prt(mpt, "adding OXID option\n");
579			fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID;
580			doit++;
581		}
582	}
583
584	if (doit) {
585		fc.Flags = fl;
586		host2mpt_config_page_fc_port_1(&fc);
587		r = mpt_write_cfg_page(mpt,
588		    MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM, 0, &fc.Header,
589		    sizeof(fc), FALSE, 5000);
590		if (r != 0) {
591			mpt_prt(mpt, "failed to update NVRAM with changes\n");
592			return (0);
593		}
594		mpt_prt(mpt, "NOTE: NVRAM changes will not take "
595		    "effect until next reboot or IOC reset\n");
596	}
597	return (0);
598}
599
600static int
601mptsas_sas_io_unit_pg0(struct mpt_softc *mpt, struct mptsas_portinfo *portinfo)
602{
603	ConfigExtendedPageHeader_t hdr;
604	struct mptsas_phyinfo *phyinfo;
605	SasIOUnitPage0_t *buffer;
606	int error, len, i;
607
608	error = mpt_read_extcfg_header(mpt, MPI_SASIOUNITPAGE0_PAGEVERSION,
609				       0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT,
610				       &hdr, 0, 10000);
611	if (error)
612		goto out;
613	if (hdr.ExtPageLength == 0) {
614		error = ENXIO;
615		goto out;
616	}
617
618	len = hdr.ExtPageLength * 4;
619	buffer = malloc(len, M_DEVBUF, M_NOWAIT|M_ZERO);
620	if (buffer == NULL) {
621		error = ENOMEM;
622		goto out;
623	}
624
625	error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
626				     0, &hdr, buffer, len, 0, 10000);
627	if (error) {
628		free(buffer, M_DEVBUF);
629		goto out;
630	}
631
632	portinfo->num_phys = buffer->NumPhys;
633	portinfo->phy_info = malloc(sizeof(*portinfo->phy_info) *
634	    portinfo->num_phys, M_DEVBUF, M_NOWAIT|M_ZERO);
635	if (portinfo->phy_info == NULL) {
636		free(buffer, M_DEVBUF);
637		error = ENOMEM;
638		goto out;
639	}
640
641	for (i = 0; i < portinfo->num_phys; i++) {
642		phyinfo = &portinfo->phy_info[i];
643		phyinfo->phy_num = i;
644		phyinfo->port_id = buffer->PhyData[i].Port;
645		phyinfo->negotiated_link_rate =
646		    buffer->PhyData[i].NegotiatedLinkRate;
647		phyinfo->handle =
648		    le16toh(buffer->PhyData[i].ControllerDevHandle);
649	}
650
651	free(buffer, M_DEVBUF);
652out:
653	return (error);
654}
655
656static int
657mptsas_sas_phy_pg0(struct mpt_softc *mpt, struct mptsas_phyinfo *phy_info,
658	uint32_t form, uint32_t form_specific)
659{
660	ConfigExtendedPageHeader_t hdr;
661	SasPhyPage0_t *buffer;
662	int error;
663
664	error = mpt_read_extcfg_header(mpt, MPI_SASPHY0_PAGEVERSION, 0, 0,
665				       MPI_CONFIG_EXTPAGETYPE_SAS_PHY, &hdr,
666				       0, 10000);
667	if (error)
668		goto out;
669	if (hdr.ExtPageLength == 0) {
670		error = ENXIO;
671		goto out;
672	}
673
674	buffer = malloc(sizeof(SasPhyPage0_t), M_DEVBUF, M_NOWAIT|M_ZERO);
675	if (buffer == NULL) {
676		error = ENOMEM;
677		goto out;
678	}
679
680	error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
681				     form + form_specific, &hdr, buffer,
682				     sizeof(SasPhyPage0_t), 0, 10000);
683	if (error) {
684		free(buffer, M_DEVBUF);
685		goto out;
686	}
687
688	phy_info->hw_link_rate = buffer->HwLinkRate;
689	phy_info->programmed_link_rate = buffer->ProgrammedLinkRate;
690	phy_info->identify.dev_handle = le16toh(buffer->OwnerDevHandle);
691	phy_info->attached.dev_handle = le16toh(buffer->AttachedDevHandle);
692
693	free(buffer, M_DEVBUF);
694out:
695	return (error);
696}
697
698static int
699mptsas_sas_device_pg0(struct mpt_softc *mpt, struct mptsas_devinfo *device_info,
700	uint32_t form, uint32_t form_specific)
701{
702	ConfigExtendedPageHeader_t hdr;
703	SasDevicePage0_t *buffer;
704	uint64_t sas_address;
705	int error = 0;
706
707	bzero(device_info, sizeof(*device_info));
708	error = mpt_read_extcfg_header(mpt, MPI_SASDEVICE0_PAGEVERSION, 0, 0,
709				       MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE,
710				       &hdr, 0, 10000);
711	if (error)
712		goto out;
713	if (hdr.ExtPageLength == 0) {
714		error = ENXIO;
715		goto out;
716	}
717
718	buffer = malloc(sizeof(SasDevicePage0_t), M_DEVBUF, M_NOWAIT|M_ZERO);
719	if (buffer == NULL) {
720		error = ENOMEM;
721		goto out;
722	}
723
724	error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
725				     form + form_specific, &hdr, buffer,
726				     sizeof(SasDevicePage0_t), 0, 10000);
727	if (error) {
728		free(buffer, M_DEVBUF);
729		goto out;
730	}
731
732	device_info->dev_handle = le16toh(buffer->DevHandle);
733	device_info->parent_dev_handle = le16toh(buffer->ParentDevHandle);
734	device_info->enclosure_handle = le16toh(buffer->EnclosureHandle);
735	device_info->slot = le16toh(buffer->Slot);
736	device_info->phy_num = buffer->PhyNum;
737	device_info->physical_port = buffer->PhysicalPort;
738	device_info->target_id = buffer->TargetID;
739	device_info->bus = buffer->Bus;
740	bcopy(&buffer->SASAddress, &sas_address, sizeof(uint64_t));
741	device_info->sas_address = le64toh(sas_address);
742	device_info->device_info = le32toh(buffer->DeviceInfo);
743
744	free(buffer, M_DEVBUF);
745out:
746	return (error);
747}
748
749/*
750 * Read SAS configuration information. Nothing to do yet.
751 */
752static int
753mpt_read_config_info_sas(struct mpt_softc *mpt)
754{
755	struct mptsas_portinfo *portinfo;
756	struct mptsas_phyinfo *phyinfo;
757	int error, i;
758
759	portinfo = malloc(sizeof(*portinfo), M_DEVBUF, M_NOWAIT|M_ZERO);
760	if (portinfo == NULL)
761		return (ENOMEM);
762
763	error = mptsas_sas_io_unit_pg0(mpt, portinfo);
764	if (error) {
765		free(portinfo, M_DEVBUF);
766		return (0);
767	}
768
769	for (i = 0; i < portinfo->num_phys; i++) {
770		phyinfo = &portinfo->phy_info[i];
771		error = mptsas_sas_phy_pg0(mpt, phyinfo,
772		    (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER <<
773		    MPI_SAS_PHY_PGAD_FORM_SHIFT), i);
774		if (error)
775			break;
776		error = mptsas_sas_device_pg0(mpt, &phyinfo->identify,
777		    (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
778		    MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
779		    phyinfo->handle);
780		if (error)
781			break;
782		phyinfo->identify.phy_num = phyinfo->phy_num = i;
783		if (phyinfo->attached.dev_handle)
784			error = mptsas_sas_device_pg0(mpt,
785			    &phyinfo->attached,
786			    (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
787			    MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
788			    phyinfo->attached.dev_handle);
789		if (error)
790			break;
791	}
792	mpt->sas_portinfo = portinfo;
793	return (0);
794}
795
796static void
797mptsas_set_sata_wc(struct mpt_softc *mpt, struct mptsas_devinfo *devinfo,
798	int enabled)
799{
800	SataPassthroughRequest_t	*pass;
801	request_t *req;
802	int error, status;
803
804	req = mpt_get_request(mpt, 0);
805	if (req == NULL)
806		return;
807
808	pass = req->req_vbuf;
809	bzero(pass, sizeof(SataPassthroughRequest_t));
810	pass->Function = MPI_FUNCTION_SATA_PASSTHROUGH;
811	pass->TargetID = devinfo->target_id;
812	pass->Bus = devinfo->bus;
813	pass->PassthroughFlags = 0;
814	pass->ConnectionRate = MPI_SATA_PT_REQ_CONNECT_RATE_NEGOTIATED;
815	pass->DataLength = 0;
816	pass->MsgContext = htole32(req->index | sata_pass_handler_id);
817	pass->CommandFIS[0] = 0x27;
818	pass->CommandFIS[1] = 0x80;
819	pass->CommandFIS[2] = 0xef;
820	pass->CommandFIS[3] = (enabled) ? 0x02 : 0x82;
821	pass->CommandFIS[7] = 0x40;
822	pass->CommandFIS[15] = 0x08;
823
824	mpt_check_doorbell(mpt);
825	mpt_send_cmd(mpt, req);
826	error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 0,
827			     10 * 1000);
828	if (error) {
829		mpt_free_request(mpt, req);
830		printf("error %d sending passthrough\n", error);
831		return;
832	}
833
834	status = le16toh(req->IOCStatus);
835	if (status != MPI_IOCSTATUS_SUCCESS) {
836		mpt_free_request(mpt, req);
837		printf("IOCSTATUS %d\n", status);
838		return;
839	}
840
841	mpt_free_request(mpt, req);
842}
843
844/*
845 * Set SAS configuration information. Nothing to do yet.
846 */
847static int
848mpt_set_initial_config_sas(struct mpt_softc *mpt)
849{
850	struct mptsas_phyinfo *phyinfo;
851	int i;
852
853	if ((mpt_enable_sata_wc != -1) && (mpt->sas_portinfo != NULL)) {
854		for (i = 0; i < mpt->sas_portinfo->num_phys; i++) {
855			phyinfo = &mpt->sas_portinfo->phy_info[i];
856			if (phyinfo->attached.dev_handle == 0)
857				continue;
858			if ((phyinfo->attached.device_info &
859			    MPI_SAS_DEVICE_INFO_SATA_DEVICE) == 0)
860				continue;
861			if (bootverbose)
862				device_printf(mpt->dev,
863				    "%sabling SATA WC on phy %d\n",
864				    (mpt_enable_sata_wc) ? "En" : "Dis", i);
865			mptsas_set_sata_wc(mpt, &phyinfo->attached,
866					   mpt_enable_sata_wc);
867		}
868	}
869
870	return (0);
871}
872
873static int
874mpt_sata_pass_reply_handler(struct mpt_softc *mpt, request_t *req,
875 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
876{
877
878	if (req != NULL) {
879		if (reply_frame != NULL) {
880			req->IOCStatus = le16toh(reply_frame->IOCStatus);
881		}
882		req->state &= ~REQ_STATE_QUEUED;
883		req->state |= REQ_STATE_DONE;
884		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
885		if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
886			wakeup(req);
887		} else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
888			/*
889			 * Whew- we can free this request (late completion)
890			 */
891			mpt_free_request(mpt, req);
892		}
893	}
894
895	return (TRUE);
896}
897
898/*
899 * Read SCSI configuration information
900 */
901static int
902mpt_read_config_info_spi(struct mpt_softc *mpt)
903{
904	int rv, i;
905
906	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0,
907	    &mpt->mpt_port_page0.Header, FALSE, 5000);
908	if (rv) {
909		return (-1);
910	}
911	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n",
912	    mpt->mpt_port_page0.Header.PageVersion,
913	    mpt->mpt_port_page0.Header.PageLength,
914	    mpt->mpt_port_page0.Header.PageNumber,
915	    mpt->mpt_port_page0.Header.PageType);
916
917	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0,
918	    &mpt->mpt_port_page1.Header, FALSE, 5000);
919	if (rv) {
920		return (-1);
921	}
922	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n",
923	    mpt->mpt_port_page1.Header.PageVersion,
924	    mpt->mpt_port_page1.Header.PageLength,
925	    mpt->mpt_port_page1.Header.PageNumber,
926	    mpt->mpt_port_page1.Header.PageType);
927
928	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0,
929	    &mpt->mpt_port_page2.Header, FALSE, 5000);
930	if (rv) {
931		return (-1);
932	}
933	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n",
934	    mpt->mpt_port_page2.Header.PageVersion,
935	    mpt->mpt_port_page2.Header.PageLength,
936	    mpt->mpt_port_page2.Header.PageNumber,
937	    mpt->mpt_port_page2.Header.PageType);
938
939	for (i = 0; i < 16; i++) {
940		rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
941		    0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000);
942		if (rv) {
943			return (-1);
944		}
945		mpt_lprt(mpt, MPT_PRT_DEBUG,
946		    "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i,
947		    mpt->mpt_dev_page0[i].Header.PageVersion,
948		    mpt->mpt_dev_page0[i].Header.PageLength,
949		    mpt->mpt_dev_page0[i].Header.PageNumber,
950		    mpt->mpt_dev_page0[i].Header.PageType);
951
952		rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
953		    1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000);
954		if (rv) {
955			return (-1);
956		}
957		mpt_lprt(mpt, MPT_PRT_DEBUG,
958		    "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i,
959		    mpt->mpt_dev_page1[i].Header.PageVersion,
960		    mpt->mpt_dev_page1[i].Header.PageLength,
961		    mpt->mpt_dev_page1[i].Header.PageNumber,
962		    mpt->mpt_dev_page1[i].Header.PageType);
963	}
964
965	/*
966	 * At this point, we don't *have* to fail. As long as we have
967	 * valid config header information, we can (barely) lurch
968	 * along.
969	 */
970
971	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header,
972	    sizeof(mpt->mpt_port_page0), FALSE, 5000);
973	if (rv) {
974		mpt_prt(mpt, "failed to read SPI Port Page 0\n");
975	} else {
976		mpt2host_config_page_scsi_port_0(&mpt->mpt_port_page0);
977		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
978		    "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n",
979		    mpt->mpt_port_page0.Capabilities,
980		    mpt->mpt_port_page0.PhysicalInterface);
981	}
982
983	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header,
984	    sizeof(mpt->mpt_port_page1), FALSE, 5000);
985	if (rv) {
986		mpt_prt(mpt, "failed to read SPI Port Page 1\n");
987	} else {
988		mpt2host_config_page_scsi_port_1(&mpt->mpt_port_page1);
989		mpt_lprt(mpt, MPT_PRT_DEBUG,
990		    "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n",
991		    mpt->mpt_port_page1.Configuration,
992		    mpt->mpt_port_page1.OnBusTimerValue);
993	}
994
995	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header,
996	    sizeof(mpt->mpt_port_page2), FALSE, 5000);
997	if (rv) {
998		mpt_prt(mpt, "failed to read SPI Port Page 2\n");
999	} else {
1000		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1001		    "Port Page 2: Flags %x Settings %x\n",
1002		    mpt->mpt_port_page2.PortFlags,
1003		    mpt->mpt_port_page2.PortSettings);
1004		mpt2host_config_page_scsi_port_2(&mpt->mpt_port_page2);
1005		for (i = 0; i < 16; i++) {
1006			mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1007		  	    " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n",
1008			    i, mpt->mpt_port_page2.DeviceSettings[i].Timeout,
1009			    mpt->mpt_port_page2.DeviceSettings[i].SyncFactor,
1010			    mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags);
1011		}
1012	}
1013
1014	for (i = 0; i < 16; i++) {
1015		rv = mpt_read_cur_cfg_page(mpt, i,
1016		    &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0),
1017		    FALSE, 5000);
1018		if (rv) {
1019			mpt_prt(mpt,
1020			    "cannot read SPI Target %d Device Page 0\n", i);
1021			continue;
1022		}
1023		mpt2host_config_page_scsi_device_0(&mpt->mpt_dev_page0[i]);
1024		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1025		    "target %d page 0: Negotiated Params %x Information %x\n",
1026		    i, mpt->mpt_dev_page0[i].NegotiatedParameters,
1027		    mpt->mpt_dev_page0[i].Information);
1028
1029		rv = mpt_read_cur_cfg_page(mpt, i,
1030		    &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1),
1031		    FALSE, 5000);
1032		if (rv) {
1033			mpt_prt(mpt,
1034			    "cannot read SPI Target %d Device Page 1\n", i);
1035			continue;
1036		}
1037		mpt2host_config_page_scsi_device_1(&mpt->mpt_dev_page1[i]);
1038		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1039		    "target %d page 1: Requested Params %x Configuration %x\n",
1040		    i, mpt->mpt_dev_page1[i].RequestedParameters,
1041		    mpt->mpt_dev_page1[i].Configuration);
1042	}
1043	return (0);
1044}
1045
1046/*
1047 * Validate SPI configuration information.
1048 *
1049 * In particular, validate SPI Port Page 1.
1050 */
1051static int
1052mpt_set_initial_config_spi(struct mpt_softc *mpt)
1053{
1054	int error, i, pp1val;
1055
1056	mpt->mpt_disc_enable = 0xff;
1057	mpt->mpt_tag_enable = 0;
1058
1059	pp1val = ((1 << mpt->mpt_ini_id) <<
1060	    MPI_SCSIPORTPAGE1_CFG_SHIFT_PORT_RESPONSE_ID) | mpt->mpt_ini_id;
1061	if (mpt->mpt_port_page1.Configuration != pp1val) {
1062		CONFIG_PAGE_SCSI_PORT_1 tmp;
1063
1064		mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should "
1065		    "be %x\n", mpt->mpt_port_page1.Configuration, pp1val);
1066		tmp = mpt->mpt_port_page1;
1067		tmp.Configuration = pp1val;
1068		host2mpt_config_page_scsi_port_1(&tmp);
1069		error = mpt_write_cur_cfg_page(mpt, 0,
1070		    &tmp.Header, sizeof(tmp), FALSE, 5000);
1071		if (error) {
1072			return (-1);
1073		}
1074		error = mpt_read_cur_cfg_page(mpt, 0,
1075		    &tmp.Header, sizeof(tmp), FALSE, 5000);
1076		if (error) {
1077			return (-1);
1078		}
1079		mpt2host_config_page_scsi_port_1(&tmp);
1080		if (tmp.Configuration != pp1val) {
1081			mpt_prt(mpt,
1082			    "failed to reset SPI Port Page 1 Config value\n");
1083			return (-1);
1084		}
1085		mpt->mpt_port_page1 = tmp;
1086	}
1087
1088	/*
1089	 * The purpose of this exercise is to get
1090	 * all targets back to async/narrow.
1091	 *
1092	 * We skip this step if the BIOS has already negotiated
1093	 * speeds with the targets.
1094	 */
1095	i = mpt->mpt_port_page2.PortSettings &
1096	    MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
1097	if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS) {
1098		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1099		    "honoring BIOS transfer negotiations\n");
1100	} else {
1101		for (i = 0; i < 16; i++) {
1102			mpt->mpt_dev_page1[i].RequestedParameters = 0;
1103			mpt->mpt_dev_page1[i].Configuration = 0;
1104			(void) mpt_update_spi_config(mpt, i);
1105		}
1106	}
1107	return (0);
1108}
1109
1110static int
1111mpt_cam_enable(struct mpt_softc *mpt)
1112{
1113	int error;
1114
1115	MPT_LOCK(mpt);
1116
1117	error = EIO;
1118	if (mpt->is_fc) {
1119		if (mpt_read_config_info_fc(mpt)) {
1120			goto out;
1121		}
1122		if (mpt_set_initial_config_fc(mpt)) {
1123			goto out;
1124		}
1125	} else if (mpt->is_sas) {
1126		if (mpt_read_config_info_sas(mpt)) {
1127			goto out;
1128		}
1129		if (mpt_set_initial_config_sas(mpt)) {
1130			goto out;
1131		}
1132	} else if (mpt->is_spi) {
1133		if (mpt_read_config_info_spi(mpt)) {
1134			goto out;
1135		}
1136		if (mpt_set_initial_config_spi(mpt)) {
1137			goto out;
1138		}
1139	}
1140	error = 0;
1141
1142out:
1143	MPT_UNLOCK(mpt);
1144	return (error);
1145}
1146
1147static void
1148mpt_cam_ready(struct mpt_softc *mpt)
1149{
1150
1151	/*
1152	 * If we're in target mode, hang out resources now
1153	 * so we don't cause the world to hang talking to us.
1154	 */
1155	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
1156		/*
1157		 * Try to add some target command resources
1158		 */
1159		MPT_LOCK(mpt);
1160		if (mpt_add_target_commands(mpt) == FALSE) {
1161			mpt_prt(mpt, "failed to add target commands\n");
1162		}
1163		MPT_UNLOCK(mpt);
1164	}
1165	mpt->ready = 1;
1166}
1167
1168static void
1169mpt_cam_detach(struct mpt_softc *mpt)
1170{
1171	mpt_handler_t handler;
1172
1173	MPT_LOCK(mpt);
1174	mpt->ready = 0;
1175	mpt_terminate_recovery_thread(mpt);
1176
1177	handler.reply_handler = mpt_scsi_reply_handler;
1178	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1179			       scsi_io_handler_id);
1180	handler.reply_handler = mpt_scsi_tmf_reply_handler;
1181	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1182			       scsi_tmf_handler_id);
1183	handler.reply_handler = mpt_fc_els_reply_handler;
1184	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1185			       fc_els_handler_id);
1186	handler.reply_handler = mpt_scsi_tgt_reply_handler;
1187	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1188			       mpt->scsi_tgt_handler_id);
1189	handler.reply_handler = mpt_sata_pass_reply_handler;
1190	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1191			       sata_pass_handler_id);
1192
1193	if (mpt->tmf_req != NULL) {
1194		mpt->tmf_req->state = REQ_STATE_ALLOCATED;
1195		mpt_free_request(mpt, mpt->tmf_req);
1196		mpt->tmf_req = NULL;
1197	}
1198	if (mpt->sas_portinfo != NULL) {
1199		free(mpt->sas_portinfo, M_DEVBUF);
1200		mpt->sas_portinfo = NULL;
1201	}
1202
1203	if (mpt->sim != NULL) {
1204		xpt_free_path(mpt->path);
1205		xpt_bus_deregister(cam_sim_path(mpt->sim));
1206		cam_sim_free(mpt->sim, TRUE);
1207		mpt->sim = NULL;
1208	}
1209
1210	if (mpt->phydisk_sim != NULL) {
1211		xpt_free_path(mpt->phydisk_path);
1212		xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim));
1213		cam_sim_free(mpt->phydisk_sim, TRUE);
1214		mpt->phydisk_sim = NULL;
1215	}
1216	MPT_UNLOCK(mpt);
1217}
1218
1219/* This routine is used after a system crash to dump core onto the swap device.
1220 */
1221static void
1222mpt_poll(struct cam_sim *sim)
1223{
1224	struct mpt_softc *mpt;
1225
1226	mpt = (struct mpt_softc *)cam_sim_softc(sim);
1227	mpt_intr(mpt);
1228}
1229
1230/*
1231 * Watchdog timeout routine for SCSI requests.
1232 */
1233static void
1234mpt_timeout(void *arg)
1235{
1236	union ccb	 *ccb;
1237	struct mpt_softc *mpt;
1238	request_t	 *req;
1239
1240	ccb = (union ccb *)arg;
1241	mpt = ccb->ccb_h.ccb_mpt_ptr;
1242
1243	MPT_LOCK_ASSERT(mpt);
1244	req = ccb->ccb_h.ccb_req_ptr;
1245	mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req,
1246	    req->serno, ccb, req->ccb);
1247/* XXX: WHAT ARE WE TRYING TO DO HERE? */
1248	if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) {
1249		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
1250		TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links);
1251		req->state |= REQ_STATE_TIMEDOUT;
1252		mpt_wakeup_recovery_thread(mpt);
1253	}
1254}
1255
1256/*
1257 * Callback routine from bus_dmamap_load_ccb(9) or, in simple cases, called
1258 * directly.
1259 *
1260 * Takes a list of physical segments and builds the SGL for SCSI IO command
1261 * and forwards the commard to the IOC after one last check that CAM has not
1262 * aborted the transaction.
1263 */
1264static void
1265mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1266{
1267	request_t *req, *trq;
1268	char *mpt_off;
1269	union ccb *ccb;
1270	struct mpt_softc *mpt;
1271	bus_addr_t chain_list_addr;
1272	int first_lim, seg, this_seg_lim;
1273	uint32_t addr, cur_off, flags, nxt_off, tf;
1274	void *sglp = NULL;
1275	MSG_REQUEST_HEADER *hdrp;
1276	SGE_SIMPLE64 *se;
1277	SGE_CHAIN64 *ce;
1278	int istgt = 0;
1279
1280	req = (request_t *)arg;
1281	ccb = req->ccb;
1282
1283	mpt = ccb->ccb_h.ccb_mpt_ptr;
1284	req = ccb->ccb_h.ccb_req_ptr;
1285
1286	hdrp = req->req_vbuf;
1287	mpt_off = req->req_vbuf;
1288
1289	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1290		error = EFBIG;
1291	}
1292
1293	if (error == 0) {
1294		switch (hdrp->Function) {
1295		case MPI_FUNCTION_SCSI_IO_REQUEST:
1296		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1297			istgt = 0;
1298			sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1299			break;
1300		case MPI_FUNCTION_TARGET_ASSIST:
1301			istgt = 1;
1302			sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1303			break;
1304		default:
1305			mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n",
1306			    hdrp->Function);
1307			error = EINVAL;
1308			break;
1309		}
1310	}
1311
1312	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1313		error = EFBIG;
1314		mpt_prt(mpt, "segment count %d too large (max %u)\n",
1315		    nseg, mpt->max_seg_cnt);
1316	}
1317
1318bad:
1319	if (error != 0) {
1320		if (error != EFBIG && error != ENOMEM) {
1321			mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error);
1322		}
1323		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1324			cam_status status;
1325			mpt_freeze_ccb(ccb);
1326			if (error == EFBIG) {
1327				status = CAM_REQ_TOO_BIG;
1328			} else if (error == ENOMEM) {
1329				if (mpt->outofbeer == 0) {
1330					mpt->outofbeer = 1;
1331					xpt_freeze_simq(mpt->sim, 1);
1332					mpt_lprt(mpt, MPT_PRT_DEBUG,
1333					    "FREEZEQ\n");
1334				}
1335				status = CAM_REQUEUE_REQ;
1336			} else {
1337				status = CAM_REQ_CMP_ERR;
1338			}
1339			mpt_set_ccb_status(ccb, status);
1340		}
1341		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1342			request_t *cmd_req =
1343				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1344			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1345			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1346			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1347		}
1348		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1349		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
1350		xpt_done(ccb);
1351		mpt_free_request(mpt, req);
1352		return;
1353	}
1354
1355	/*
1356	 * No data to transfer?
1357	 * Just make a single simple SGL with zero length.
1358	 */
1359
1360	if (mpt->verbose >= MPT_PRT_DEBUG) {
1361		int tidx = ((char *)sglp) - mpt_off;
1362		memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1363	}
1364
1365	if (nseg == 0) {
1366		SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1367		MPI_pSGE_SET_FLAGS(se1,
1368		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1369		    MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1370		se1->FlagsLength = htole32(se1->FlagsLength);
1371		goto out;
1372	}
1373
1374
1375	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1376	if (istgt == 0) {
1377		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1378			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1379		}
1380	} else {
1381		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1382			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1383		}
1384	}
1385
1386	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1387		bus_dmasync_op_t op;
1388		if (istgt == 0) {
1389			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1390				op = BUS_DMASYNC_PREREAD;
1391			} else {
1392				op = BUS_DMASYNC_PREWRITE;
1393			}
1394		} else {
1395			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1396				op = BUS_DMASYNC_PREWRITE;
1397			} else {
1398				op = BUS_DMASYNC_PREREAD;
1399			}
1400		}
1401		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1402	}
1403
1404	/*
1405	 * Okay, fill in what we can at the end of the command frame.
1406	 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1407	 * the command frame.
1408	 *
1409	 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1410	 * SIMPLE64 pointers and start doing CHAIN64 entries after
1411	 * that.
1412	 */
1413
1414	if (nseg < MPT_NSGL_FIRST(mpt)) {
1415		first_lim = nseg;
1416	} else {
1417		/*
1418		 * Leave room for CHAIN element
1419		 */
1420		first_lim = MPT_NSGL_FIRST(mpt) - 1;
1421	}
1422
1423	se = (SGE_SIMPLE64 *) sglp;
1424	for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1425		tf = flags;
1426		memset(se, 0, sizeof (*se));
1427		MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1428		se->Address.Low = htole32(dm_segs->ds_addr & 0xffffffff);
1429		if (sizeof(bus_addr_t) > 4) {
1430			addr = ((uint64_t)dm_segs->ds_addr) >> 32;
1431			/* SAS1078 36GB limitation WAR */
1432			if (mpt->is_1078 && (((uint64_t)dm_segs->ds_addr +
1433			    MPI_SGE_LENGTH(se->FlagsLength)) >> 32) == 9) {
1434				addr |= (1U << 31);
1435				tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS;
1436			}
1437			se->Address.High = htole32(addr);
1438		}
1439		if (seg == first_lim - 1) {
1440			tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1441		}
1442		if (seg == nseg - 1) {
1443			tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1444				MPI_SGE_FLAGS_END_OF_BUFFER;
1445		}
1446		MPI_pSGE_SET_FLAGS(se, tf);
1447		se->FlagsLength = htole32(se->FlagsLength);
1448	}
1449
1450	if (seg == nseg) {
1451		goto out;
1452	}
1453
1454	/*
1455	 * Tell the IOC where to find the first chain element.
1456	 */
1457	hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1458	nxt_off = MPT_RQSL(mpt);
1459	trq = req;
1460
1461	/*
1462	 * Make up the rest of the data segments out of a chain element
1463	 * (contained in the current request frame) which points to
1464	 * SIMPLE64 elements in the next request frame, possibly ending
1465	 * with *another* chain element (if there's more).
1466	 */
1467	while (seg < nseg) {
1468		/*
1469		 * Point to the chain descriptor. Note that the chain
1470		 * descriptor is at the end of the *previous* list (whether
1471		 * chain or simple).
1472		 */
1473		ce = (SGE_CHAIN64 *) se;
1474
1475		/*
1476		 * Before we change our current pointer, make  sure we won't
1477		 * overflow the request area with this frame. Note that we
1478		 * test against 'greater than' here as it's okay in this case
1479		 * to have next offset be just outside the request area.
1480		 */
1481		if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1482			nxt_off = MPT_REQUEST_AREA;
1483			goto next_chain;
1484		}
1485
1486		/*
1487		 * Set our SGE element pointer to the beginning of the chain
1488		 * list and update our next chain list offset.
1489		 */
1490		se = (SGE_SIMPLE64 *) &mpt_off[nxt_off];
1491		cur_off = nxt_off;
1492		nxt_off += MPT_RQSL(mpt);
1493
1494		/*
1495		 * Now initialize the chain descriptor.
1496		 */
1497		memset(ce, 0, sizeof (*ce));
1498
1499		/*
1500		 * Get the physical address of the chain list.
1501		 */
1502		chain_list_addr = trq->req_pbuf;
1503		chain_list_addr += cur_off;
1504		if (sizeof (bus_addr_t) > 4) {
1505			ce->Address.High =
1506			    htole32(((uint64_t)chain_list_addr) >> 32);
1507		}
1508		ce->Address.Low = htole32(chain_list_addr & 0xffffffff);
1509		ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT |
1510			    MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1511
1512		/*
1513		 * If we have more than a frame's worth of segments left,
1514		 * set up the chain list to have the last element be another
1515		 * chain descriptor.
1516		 */
1517		if ((nseg - seg) > MPT_NSGL(mpt)) {
1518			this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1519			/*
1520			 * The length of the chain is the length in bytes of the
1521			 * number of segments plus the next chain element.
1522			 *
1523			 * The next chain descriptor offset is the length,
1524			 * in words, of the number of segments.
1525			 */
1526			ce->Length = (this_seg_lim - seg) *
1527			    sizeof (SGE_SIMPLE64);
1528			ce->NextChainOffset = ce->Length >> 2;
1529			ce->Length += sizeof (SGE_CHAIN64);
1530		} else {
1531			this_seg_lim = nseg;
1532			ce->Length = (this_seg_lim - seg) *
1533			    sizeof (SGE_SIMPLE64);
1534		}
1535		ce->Length = htole16(ce->Length);
1536
1537		/*
1538		 * Fill in the chain list SGE elements with our segment data.
1539		 *
1540		 * If we're the last element in this chain list, set the last
1541		 * element flag. If we're the completely last element period,
1542		 * set the end of list and end of buffer flags.
1543		 */
1544		while (seg < this_seg_lim) {
1545			tf = flags;
1546			memset(se, 0, sizeof (*se));
1547			MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1548			se->Address.Low = htole32(dm_segs->ds_addr &
1549			    0xffffffff);
1550			if (sizeof (bus_addr_t) > 4) {
1551				addr = ((uint64_t)dm_segs->ds_addr) >> 32;
1552				/* SAS1078 36GB limitation WAR */
1553				if (mpt->is_1078 &&
1554				    (((uint64_t)dm_segs->ds_addr +
1555				    MPI_SGE_LENGTH(se->FlagsLength)) >>
1556				    32) == 9) {
1557					addr |= (1U << 31);
1558					tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS;
1559				}
1560				se->Address.High = htole32(addr);
1561			}
1562			if (seg == this_seg_lim - 1) {
1563				tf |=	MPI_SGE_FLAGS_LAST_ELEMENT;
1564			}
1565			if (seg == nseg - 1) {
1566				tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1567					MPI_SGE_FLAGS_END_OF_BUFFER;
1568			}
1569			MPI_pSGE_SET_FLAGS(se, tf);
1570			se->FlagsLength = htole32(se->FlagsLength);
1571			se++;
1572			seg++;
1573			dm_segs++;
1574		}
1575
1576    next_chain:
1577		/*
1578		 * If we have more segments to do and we've used up all of
1579		 * the space in a request area, go allocate another one
1580		 * and chain to that.
1581		 */
1582		if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1583			request_t *nrq;
1584
1585			nrq = mpt_get_request(mpt, FALSE);
1586
1587			if (nrq == NULL) {
1588				error = ENOMEM;
1589				goto bad;
1590			}
1591
1592			/*
1593			 * Append the new request area on the tail of our list.
1594			 */
1595			if ((trq = req->chain) == NULL) {
1596				req->chain = nrq;
1597			} else {
1598				while (trq->chain != NULL) {
1599					trq = trq->chain;
1600				}
1601				trq->chain = nrq;
1602			}
1603			trq = nrq;
1604			mpt_off = trq->req_vbuf;
1605			if (mpt->verbose >= MPT_PRT_DEBUG) {
1606				memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1607			}
1608			nxt_off = 0;
1609		}
1610	}
1611out:
1612
1613	/*
1614	 * Last time we need to check if this CCB needs to be aborted.
1615	 */
1616	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1617		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1618			request_t *cmd_req =
1619				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1620			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1621			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1622			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1623		}
1624		mpt_prt(mpt,
1625		    "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n",
1626		    ccb->ccb_h.status & CAM_STATUS_MASK);
1627		if (nseg) {
1628			bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
1629		}
1630		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1631		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
1632		xpt_done(ccb);
1633		mpt_free_request(mpt, req);
1634		return;
1635	}
1636
1637	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1638	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1639		mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000,
1640		    mpt_timeout, ccb);
1641	}
1642	if (mpt->verbose > MPT_PRT_DEBUG) {
1643		int nc = 0;
1644		mpt_print_request(req->req_vbuf);
1645		for (trq = req->chain; trq; trq = trq->chain) {
1646			printf("  Additional Chain Area %d\n", nc++);
1647			mpt_dump_sgl(trq->req_vbuf, 0);
1648		}
1649	}
1650
1651	if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1652		request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1653		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
1654#ifdef	WE_TRUST_AUTO_GOOD_STATUS
1655		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1656		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
1657			tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
1658		} else {
1659			tgt->state = TGT_STATE_MOVING_DATA;
1660		}
1661#else
1662		tgt->state = TGT_STATE_MOVING_DATA;
1663#endif
1664	}
1665	mpt_send_cmd(mpt, req);
1666}
1667
1668static void
1669mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1670{
1671	request_t *req, *trq;
1672	char *mpt_off;
1673	union ccb *ccb;
1674	struct mpt_softc *mpt;
1675	int seg, first_lim;
1676	uint32_t flags, nxt_off;
1677	void *sglp = NULL;
1678	MSG_REQUEST_HEADER *hdrp;
1679	SGE_SIMPLE32 *se;
1680	SGE_CHAIN32 *ce;
1681	int istgt = 0;
1682
1683	req = (request_t *)arg;
1684	ccb = req->ccb;
1685
1686	mpt = ccb->ccb_h.ccb_mpt_ptr;
1687	req = ccb->ccb_h.ccb_req_ptr;
1688
1689	hdrp = req->req_vbuf;
1690	mpt_off = req->req_vbuf;
1691
1692	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1693		error = EFBIG;
1694	}
1695
1696	if (error == 0) {
1697		switch (hdrp->Function) {
1698		case MPI_FUNCTION_SCSI_IO_REQUEST:
1699		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1700			sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1701			break;
1702		case MPI_FUNCTION_TARGET_ASSIST:
1703			istgt = 1;
1704			sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1705			break;
1706		default:
1707			mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n",
1708			    hdrp->Function);
1709			error = EINVAL;
1710			break;
1711		}
1712	}
1713
1714	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1715		error = EFBIG;
1716		mpt_prt(mpt, "segment count %d too large (max %u)\n",
1717		    nseg, mpt->max_seg_cnt);
1718	}
1719
1720bad:
1721	if (error != 0) {
1722		if (error != EFBIG && error != ENOMEM) {
1723			mpt_prt(mpt, "mpt_execute_req: err %d\n", error);
1724		}
1725		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1726			cam_status status;
1727			mpt_freeze_ccb(ccb);
1728			if (error == EFBIG) {
1729				status = CAM_REQ_TOO_BIG;
1730			} else if (error == ENOMEM) {
1731				if (mpt->outofbeer == 0) {
1732					mpt->outofbeer = 1;
1733					xpt_freeze_simq(mpt->sim, 1);
1734					mpt_lprt(mpt, MPT_PRT_DEBUG,
1735					    "FREEZEQ\n");
1736				}
1737				status = CAM_REQUEUE_REQ;
1738			} else {
1739				status = CAM_REQ_CMP_ERR;
1740			}
1741			mpt_set_ccb_status(ccb, status);
1742		}
1743		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1744			request_t *cmd_req =
1745				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1746			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1747			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1748			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1749		}
1750		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1751		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
1752		xpt_done(ccb);
1753		mpt_free_request(mpt, req);
1754		return;
1755	}
1756
1757	/*
1758	 * No data to transfer?
1759	 * Just make a single simple SGL with zero length.
1760	 */
1761
1762	if (mpt->verbose >= MPT_PRT_DEBUG) {
1763		int tidx = ((char *)sglp) - mpt_off;
1764		memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1765	}
1766
1767	if (nseg == 0) {
1768		SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1769		MPI_pSGE_SET_FLAGS(se1,
1770		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1771		    MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1772		se1->FlagsLength = htole32(se1->FlagsLength);
1773		goto out;
1774	}
1775
1776
1777	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
1778	if (istgt == 0) {
1779		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1780			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1781		}
1782	} else {
1783		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1784			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1785		}
1786	}
1787
1788	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1789		bus_dmasync_op_t op;
1790		if (istgt) {
1791			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1792				op = BUS_DMASYNC_PREREAD;
1793			} else {
1794				op = BUS_DMASYNC_PREWRITE;
1795			}
1796		} else {
1797			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1798				op = BUS_DMASYNC_PREWRITE;
1799			} else {
1800				op = BUS_DMASYNC_PREREAD;
1801			}
1802		}
1803		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1804	}
1805
1806	/*
1807	 * Okay, fill in what we can at the end of the command frame.
1808	 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1809	 * the command frame.
1810	 *
1811	 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1812	 * SIMPLE32 pointers and start doing CHAIN32 entries after
1813	 * that.
1814	 */
1815
1816	if (nseg < MPT_NSGL_FIRST(mpt)) {
1817		first_lim = nseg;
1818	} else {
1819		/*
1820		 * Leave room for CHAIN element
1821		 */
1822		first_lim = MPT_NSGL_FIRST(mpt) - 1;
1823	}
1824
1825	se = (SGE_SIMPLE32 *) sglp;
1826	for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1827		uint32_t tf;
1828
1829		memset(se, 0,sizeof (*se));
1830		se->Address = htole32(dm_segs->ds_addr);
1831
1832		MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1833		tf = flags;
1834		if (seg == first_lim - 1) {
1835			tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1836		}
1837		if (seg == nseg - 1) {
1838			tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1839				MPI_SGE_FLAGS_END_OF_BUFFER;
1840		}
1841		MPI_pSGE_SET_FLAGS(se, tf);
1842		se->FlagsLength = htole32(se->FlagsLength);
1843	}
1844
1845	if (seg == nseg) {
1846		goto out;
1847	}
1848
1849	/*
1850	 * Tell the IOC where to find the first chain element.
1851	 */
1852	hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1853	nxt_off = MPT_RQSL(mpt);
1854	trq = req;
1855
1856	/*
1857	 * Make up the rest of the data segments out of a chain element
1858	 * (contained in the current request frame) which points to
1859	 * SIMPLE32 elements in the next request frame, possibly ending
1860	 * with *another* chain element (if there's more).
1861	 */
1862	while (seg < nseg) {
1863		int this_seg_lim;
1864		uint32_t tf, cur_off;
1865		bus_addr_t chain_list_addr;
1866
1867		/*
1868		 * Point to the chain descriptor. Note that the chain
1869		 * descriptor is at the end of the *previous* list (whether
1870		 * chain or simple).
1871		 */
1872		ce = (SGE_CHAIN32 *) se;
1873
1874		/*
1875		 * Before we change our current pointer, make  sure we won't
1876		 * overflow the request area with this frame. Note that we
1877		 * test against 'greater than' here as it's okay in this case
1878		 * to have next offset be just outside the request area.
1879		 */
1880		if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1881			nxt_off = MPT_REQUEST_AREA;
1882			goto next_chain;
1883		}
1884
1885		/*
1886		 * Set our SGE element pointer to the beginning of the chain
1887		 * list and update our next chain list offset.
1888		 */
1889		se = (SGE_SIMPLE32 *) &mpt_off[nxt_off];
1890		cur_off = nxt_off;
1891		nxt_off += MPT_RQSL(mpt);
1892
1893		/*
1894		 * Now initialize the chain descriptor.
1895		 */
1896		memset(ce, 0, sizeof (*ce));
1897
1898		/*
1899		 * Get the physical address of the chain list.
1900		 */
1901		chain_list_addr = trq->req_pbuf;
1902		chain_list_addr += cur_off;
1903
1904
1905
1906		ce->Address = htole32(chain_list_addr);
1907		ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
1908
1909
1910		/*
1911		 * If we have more than a frame's worth of segments left,
1912		 * set up the chain list to have the last element be another
1913		 * chain descriptor.
1914		 */
1915		if ((nseg - seg) > MPT_NSGL(mpt)) {
1916			this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1917			/*
1918			 * The length of the chain is the length in bytes of the
1919			 * number of segments plus the next chain element.
1920			 *
1921			 * The next chain descriptor offset is the length,
1922			 * in words, of the number of segments.
1923			 */
1924			ce->Length = (this_seg_lim - seg) *
1925			    sizeof (SGE_SIMPLE32);
1926			ce->NextChainOffset = ce->Length >> 2;
1927			ce->Length += sizeof (SGE_CHAIN32);
1928		} else {
1929			this_seg_lim = nseg;
1930			ce->Length = (this_seg_lim - seg) *
1931			    sizeof (SGE_SIMPLE32);
1932		}
1933		ce->Length = htole16(ce->Length);
1934
1935		/*
1936		 * Fill in the chain list SGE elements with our segment data.
1937		 *
1938		 * If we're the last element in this chain list, set the last
1939		 * element flag. If we're the completely last element period,
1940		 * set the end of list and end of buffer flags.
1941		 */
1942		while (seg < this_seg_lim) {
1943			memset(se, 0, sizeof (*se));
1944			se->Address = htole32(dm_segs->ds_addr);
1945
1946			MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1947			tf = flags;
1948			if (seg == this_seg_lim - 1) {
1949				tf |=	MPI_SGE_FLAGS_LAST_ELEMENT;
1950			}
1951			if (seg == nseg - 1) {
1952				tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1953					MPI_SGE_FLAGS_END_OF_BUFFER;
1954			}
1955			MPI_pSGE_SET_FLAGS(se, tf);
1956			se->FlagsLength = htole32(se->FlagsLength);
1957			se++;
1958			seg++;
1959			dm_segs++;
1960		}
1961
1962    next_chain:
1963		/*
1964		 * If we have more segments to do and we've used up all of
1965		 * the space in a request area, go allocate another one
1966		 * and chain to that.
1967		 */
1968		if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1969			request_t *nrq;
1970
1971			nrq = mpt_get_request(mpt, FALSE);
1972
1973			if (nrq == NULL) {
1974				error = ENOMEM;
1975				goto bad;
1976			}
1977
1978			/*
1979			 * Append the new request area on the tail of our list.
1980			 */
1981			if ((trq = req->chain) == NULL) {
1982				req->chain = nrq;
1983			} else {
1984				while (trq->chain != NULL) {
1985					trq = trq->chain;
1986				}
1987				trq->chain = nrq;
1988			}
1989			trq = nrq;
1990			mpt_off = trq->req_vbuf;
1991			if (mpt->verbose >= MPT_PRT_DEBUG) {
1992				memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1993			}
1994			nxt_off = 0;
1995		}
1996	}
1997out:
1998
1999	/*
2000	 * Last time we need to check if this CCB needs to be aborted.
2001	 */
2002	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
2003		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
2004			request_t *cmd_req =
2005				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
2006			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
2007			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
2008			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
2009		}
2010		mpt_prt(mpt,
2011		    "mpt_execute_req: I/O cancelled (status 0x%x)\n",
2012		    ccb->ccb_h.status & CAM_STATUS_MASK);
2013		if (nseg) {
2014			bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2015		}
2016		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2017		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
2018		xpt_done(ccb);
2019		mpt_free_request(mpt, req);
2020		return;
2021	}
2022
2023	ccb->ccb_h.status |= CAM_SIM_QUEUED;
2024	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
2025		mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000,
2026		    mpt_timeout, ccb);
2027	}
2028	if (mpt->verbose > MPT_PRT_DEBUG) {
2029		int nc = 0;
2030		mpt_print_request(req->req_vbuf);
2031		for (trq = req->chain; trq; trq = trq->chain) {
2032			printf("  Additional Chain Area %d\n", nc++);
2033			mpt_dump_sgl(trq->req_vbuf, 0);
2034		}
2035	}
2036
2037	if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
2038		request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
2039		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
2040#ifdef	WE_TRUST_AUTO_GOOD_STATUS
2041		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
2042		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
2043			tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
2044		} else {
2045			tgt->state = TGT_STATE_MOVING_DATA;
2046		}
2047#else
2048		tgt->state = TGT_STATE_MOVING_DATA;
2049#endif
2050	}
2051	mpt_send_cmd(mpt, req);
2052}
2053
2054static void
2055mpt_start(struct cam_sim *sim, union ccb *ccb)
2056{
2057	request_t *req;
2058	struct mpt_softc *mpt;
2059	MSG_SCSI_IO_REQUEST *mpt_req;
2060	struct ccb_scsiio *csio = &ccb->csio;
2061	struct ccb_hdr *ccbh = &ccb->ccb_h;
2062	bus_dmamap_callback_t *cb;
2063	target_id_t tgt;
2064	int raid_passthru;
2065	int error;
2066
2067	/* Get the pointer for the physical addapter */
2068	mpt = ccb->ccb_h.ccb_mpt_ptr;
2069	raid_passthru = (sim == mpt->phydisk_sim);
2070
2071	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
2072		if (mpt->outofbeer == 0) {
2073			mpt->outofbeer = 1;
2074			xpt_freeze_simq(mpt->sim, 1);
2075			mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
2076		}
2077		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2078		mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
2079		xpt_done(ccb);
2080		return;
2081	}
2082#ifdef	INVARIANTS
2083	mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__);
2084#endif
2085
2086	if (sizeof (bus_addr_t) > 4) {
2087		cb = mpt_execute_req_a64;
2088	} else {
2089		cb = mpt_execute_req;
2090	}
2091
2092	/*
2093	 * Link the ccb and the request structure so we can find
2094	 * the other knowing either the request or the ccb
2095	 */
2096	req->ccb = ccb;
2097	ccb->ccb_h.ccb_req_ptr = req;
2098
2099	/* Now we build the command for the IOC */
2100	mpt_req = req->req_vbuf;
2101	memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST));
2102
2103	mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
2104	if (raid_passthru) {
2105		mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
2106		if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
2107			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2108			mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
2109			xpt_done(ccb);
2110			return;
2111		}
2112		mpt_req->Bus = 0;	/* we never set bus here */
2113	} else {
2114		tgt = ccb->ccb_h.target_id;
2115		mpt_req->Bus = 0;	/* XXX */
2116
2117	}
2118	mpt_req->SenseBufferLength =
2119		(csio->sense_len < MPT_SENSE_SIZE) ?
2120		 csio->sense_len : MPT_SENSE_SIZE;
2121
2122	/*
2123	 * We use the message context to find the request structure when we
2124	 * Get the command completion interrupt from the IOC.
2125	 */
2126	mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id);
2127
2128	/* Which physical device to do the I/O on */
2129	mpt_req->TargetID = tgt;
2130
2131	/* We assume a single level LUN type */
2132	if (ccb->ccb_h.target_lun >= MPT_MAX_LUNS) {
2133		mpt_req->LUN[0] = 0x40 | ((ccb->ccb_h.target_lun >> 8) & 0x3f);
2134		mpt_req->LUN[1] = ccb->ccb_h.target_lun & 0xff;
2135	} else {
2136		mpt_req->LUN[1] = ccb->ccb_h.target_lun;
2137	}
2138
2139	/* Set the direction of the transfer */
2140	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2141		mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
2142	} else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
2143		mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
2144	} else {
2145		mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
2146	}
2147
2148	if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
2149		switch(ccb->csio.tag_action) {
2150		case MSG_HEAD_OF_Q_TAG:
2151			mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
2152			break;
2153		case MSG_ACA_TASK:
2154			mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
2155			break;
2156		case MSG_ORDERED_Q_TAG:
2157			mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
2158			break;
2159		case MSG_SIMPLE_Q_TAG:
2160		default:
2161			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
2162			break;
2163		}
2164	} else {
2165		if (mpt->is_fc || mpt->is_sas) {
2166			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
2167		} else {
2168			/* XXX No such thing for a target doing packetized. */
2169			mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
2170		}
2171	}
2172
2173	if (mpt->is_spi) {
2174		if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
2175			mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
2176		}
2177	}
2178	mpt_req->Control = htole32(mpt_req->Control);
2179
2180	/* Copy the scsi command block into place */
2181	if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2182		bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len);
2183	} else {
2184		bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len);
2185	}
2186
2187	mpt_req->CDBLength = csio->cdb_len;
2188	mpt_req->DataLength = htole32(csio->dxfer_len);
2189	mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf);
2190
2191	/*
2192	 * Do a *short* print here if we're set to MPT_PRT_DEBUG
2193	 */
2194	if (mpt->verbose == MPT_PRT_DEBUG) {
2195		U32 df;
2196		mpt_prt(mpt, "mpt_start: %s op 0x%x ",
2197		    (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)?
2198		    "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]);
2199		df = mpt_req->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK;
2200		if (df != MPI_SCSIIO_CONTROL_NODATATRANSFER) {
2201			mpt_prtc(mpt, "(%s %u byte%s ",
2202			    (df == MPI_SCSIIO_CONTROL_READ)?
2203			    "read" : "write",  csio->dxfer_len,
2204			    (csio->dxfer_len == 1)? ")" : "s)");
2205		}
2206		mpt_prtc(mpt, "tgt %u lun %u req %p:%u\n", tgt,
2207		    ccb->ccb_h.target_lun, req, req->serno);
2208	}
2209
2210	error = bus_dmamap_load_ccb(mpt->buffer_dmat, req->dmap, ccb, cb,
2211	    req, 0);
2212	if (error == EINPROGRESS) {
2213		/*
2214		 * So as to maintain ordering, freeze the controller queue
2215		 * until our mapping is returned.
2216		 */
2217		xpt_freeze_simq(mpt->sim, 1);
2218		ccbh->status |= CAM_RELEASE_SIMQ;
2219	}
2220}
2221
2222static int
2223mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun,
2224    int sleep_ok)
2225{
2226	int   error;
2227	uint16_t status;
2228	uint8_t response;
2229
2230	error = mpt_scsi_send_tmf(mpt,
2231	    (tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ?
2232	    MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET :
2233	    MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
2234	    mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0,
2235	    0,	/* XXX How do I get the channel ID? */
2236	    tgt != CAM_TARGET_WILDCARD ? tgt : 0,
2237	    lun != CAM_LUN_WILDCARD ? lun : 0,
2238	    0, sleep_ok);
2239
2240	if (error != 0) {
2241		/*
2242		 * mpt_scsi_send_tmf hard resets on failure, so no
2243		 * need to do so here.
2244		 */
2245		mpt_prt(mpt,
2246		    "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error);
2247		return (EIO);
2248	}
2249
2250	/* Wait for bus reset to be processed by the IOC. */
2251	error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
2252	    REQ_STATE_DONE, sleep_ok, 5000);
2253
2254	status = le16toh(mpt->tmf_req->IOCStatus);
2255	response = mpt->tmf_req->ResponseCode;
2256	mpt->tmf_req->state = REQ_STATE_FREE;
2257
2258	if (error) {
2259		mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. "
2260		    "Resetting controller.\n");
2261		mpt_reset(mpt, TRUE);
2262		return (ETIMEDOUT);
2263	}
2264
2265	if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
2266		mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. "
2267		    "Resetting controller.\n", status);
2268		mpt_reset(mpt, TRUE);
2269		return (EIO);
2270	}
2271
2272	if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
2273	    response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
2274		mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. "
2275		    "Resetting controller.\n", response);
2276		mpt_reset(mpt, TRUE);
2277		return (EIO);
2278	}
2279	return (0);
2280}
2281
2282static int
2283mpt_fc_reset_link(struct mpt_softc *mpt, int dowait)
2284{
2285	int r = 0;
2286	request_t *req;
2287	PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc;
2288
2289 	req = mpt_get_request(mpt, FALSE);
2290	if (req == NULL) {
2291		return (ENOMEM);
2292	}
2293	fc = req->req_vbuf;
2294	memset(fc, 0, sizeof(*fc));
2295	fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK;
2296	fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND;
2297	fc->MsgContext = htole32(req->index | fc_els_handler_id);
2298	mpt_send_cmd(mpt, req);
2299	if (dowait) {
2300		r = mpt_wait_req(mpt, req, REQ_STATE_DONE,
2301		    REQ_STATE_DONE, FALSE, 60 * 1000);
2302		if (r == 0) {
2303			mpt_free_request(mpt, req);
2304		}
2305	}
2306	return (r);
2307}
2308
2309static int
2310mpt_cam_event(struct mpt_softc *mpt, request_t *req,
2311	      MSG_EVENT_NOTIFY_REPLY *msg)
2312{
2313	uint32_t data0, data1;
2314
2315	data0 = le32toh(msg->Data[0]);
2316	data1 = le32toh(msg->Data[1]);
2317	switch(msg->Event & 0xFF) {
2318	case MPI_EVENT_UNIT_ATTENTION:
2319		mpt_prt(mpt, "UNIT ATTENTION: Bus: 0x%02x TargetID: 0x%02x\n",
2320		    (data0 >> 8) & 0xff, data0 & 0xff);
2321		break;
2322
2323	case MPI_EVENT_IOC_BUS_RESET:
2324		/* We generated a bus reset */
2325		mpt_prt(mpt, "IOC Generated Bus Reset Port: %d\n",
2326		    (data0 >> 8) & 0xff);
2327		xpt_async(AC_BUS_RESET, mpt->path, NULL);
2328		break;
2329
2330	case MPI_EVENT_EXT_BUS_RESET:
2331		/* Someone else generated a bus reset */
2332		mpt_prt(mpt, "External Bus Reset Detected\n");
2333		/*
2334		 * These replies don't return EventData like the MPI
2335		 * spec says they do
2336		 */
2337		xpt_async(AC_BUS_RESET, mpt->path, NULL);
2338		break;
2339
2340	case MPI_EVENT_RESCAN:
2341#if __FreeBSD_version >= 600000
2342	{
2343		union ccb *ccb;
2344		uint32_t pathid;
2345		/*
2346		 * In general this means a device has been added to the loop.
2347		 */
2348		mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff);
2349		if (mpt->ready == 0) {
2350			break;
2351		}
2352		if (mpt->phydisk_sim) {
2353			pathid = cam_sim_path(mpt->phydisk_sim);
2354		} else {
2355			pathid = cam_sim_path(mpt->sim);
2356		}
2357		/*
2358		 * Allocate a CCB, create a wildcard path for this bus,
2359		 * and schedule a rescan.
2360		 */
2361		ccb = xpt_alloc_ccb_nowait();
2362		if (ccb == NULL) {
2363			mpt_prt(mpt, "unable to alloc CCB for rescan\n");
2364			break;
2365		}
2366
2367		if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
2368		    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2369			mpt_prt(mpt, "unable to create path for rescan\n");
2370			xpt_free_ccb(ccb);
2371			break;
2372		}
2373		xpt_rescan(ccb);
2374		break;
2375	}
2376#else
2377		mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff);
2378		break;
2379#endif
2380	case MPI_EVENT_LINK_STATUS_CHANGE:
2381		mpt_prt(mpt, "Port %d: LinkState: %s\n",
2382		    (data1 >> 8) & 0xff,
2383		    ((data0 & 0xff) == 0)?  "Failed" : "Active");
2384		break;
2385
2386	case MPI_EVENT_LOOP_STATE_CHANGE:
2387		switch ((data0 >> 16) & 0xff) {
2388		case 0x01:
2389			mpt_prt(mpt,
2390			    "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) "
2391			    "(Loop Initialization)\n",
2392			    (data1 >> 8) & 0xff,
2393			    (data0 >> 8) & 0xff,
2394			    (data0     ) & 0xff);
2395			switch ((data0 >> 8) & 0xff) {
2396			case 0xF7:
2397				if ((data0 & 0xff) == 0xF7) {
2398					mpt_prt(mpt, "Device needs AL_PA\n");
2399				} else {
2400					mpt_prt(mpt, "Device %02x doesn't like "
2401					    "FC performance\n",
2402					    data0 & 0xFF);
2403				}
2404				break;
2405			case 0xF8:
2406				if ((data0 & 0xff) == 0xF7) {
2407					mpt_prt(mpt, "Device had loop failure "
2408					    "at its receiver prior to acquiring"
2409					    " AL_PA\n");
2410				} else {
2411					mpt_prt(mpt, "Device %02x detected loop"
2412					    " failure at its receiver\n",
2413					    data0 & 0xFF);
2414				}
2415				break;
2416			default:
2417				mpt_prt(mpt, "Device %02x requests that device "
2418				    "%02x reset itself\n",
2419				    data0 & 0xFF,
2420				    (data0 >> 8) & 0xFF);
2421				break;
2422			}
2423			break;
2424		case 0x02:
2425			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2426			    "LPE(%02x,%02x) (Loop Port Enable)\n",
2427			    (data1 >> 8) & 0xff, /* Port */
2428			    (data0 >>  8) & 0xff, /* Character 3 */
2429			    (data0      ) & 0xff  /* Character 4 */);
2430			break;
2431		case 0x03:
2432			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2433			    "LPB(%02x,%02x) (Loop Port Bypass)\n",
2434			    (data1 >> 8) & 0xff, /* Port */
2435			    (data0 >> 8) & 0xff, /* Character 3 */
2436			    (data0     ) & 0xff  /* Character 4 */);
2437			break;
2438		default:
2439			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown "
2440			    "FC event (%02x %02x %02x)\n",
2441			    (data1 >> 8) & 0xff, /* Port */
2442			    (data0 >> 16) & 0xff, /* Event */
2443			    (data0 >>  8) & 0xff, /* Character 3 */
2444			    (data0      ) & 0xff  /* Character 4 */);
2445		}
2446		break;
2447
2448	case MPI_EVENT_LOGOUT:
2449		mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n",
2450		    (data1 >> 8) & 0xff, data0);
2451		break;
2452	case MPI_EVENT_QUEUE_FULL:
2453	{
2454		struct cam_sim *sim;
2455		struct cam_path *tmppath;
2456		struct ccb_relsim crs;
2457		PTR_EVENT_DATA_QUEUE_FULL pqf;
2458		lun_id_t lun_id;
2459
2460		pqf = (PTR_EVENT_DATA_QUEUE_FULL)msg->Data;
2461		pqf->CurrentDepth = le16toh(pqf->CurrentDepth);
2462		mpt_prt(mpt, "QUEUE FULL EVENT: Bus 0x%02x Target 0x%02x Depth "
2463		    "%d\n", pqf->Bus, pqf->TargetID, pqf->CurrentDepth);
2464		if (mpt->phydisk_sim && mpt_is_raid_member(mpt,
2465		    pqf->TargetID) != 0) {
2466			sim = mpt->phydisk_sim;
2467		} else {
2468			sim = mpt->sim;
2469		}
2470		for (lun_id = 0; lun_id < MPT_MAX_LUNS; lun_id++) {
2471			if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim),
2472			    pqf->TargetID, lun_id) != CAM_REQ_CMP) {
2473				mpt_prt(mpt, "unable to create a path to send "
2474				    "XPT_REL_SIMQ");
2475				break;
2476			}
2477			xpt_setup_ccb(&crs.ccb_h, tmppath, 5);
2478			crs.ccb_h.func_code = XPT_REL_SIMQ;
2479			crs.ccb_h.flags = CAM_DEV_QFREEZE;
2480			crs.release_flags = RELSIM_ADJUST_OPENINGS;
2481			crs.openings = pqf->CurrentDepth - 1;
2482			xpt_action((union ccb *)&crs);
2483			if (crs.ccb_h.status != CAM_REQ_CMP) {
2484				mpt_prt(mpt, "XPT_REL_SIMQ failed\n");
2485			}
2486			xpt_free_path(tmppath);
2487		}
2488		break;
2489	}
2490	case MPI_EVENT_IR_RESYNC_UPDATE:
2491		mpt_prt(mpt, "IR resync update %d completed\n",
2492		    (data0 >> 16) & 0xff);
2493		break;
2494	case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
2495	{
2496		union ccb *ccb;
2497		struct cam_sim *sim;
2498		struct cam_path *tmppath;
2499		PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE psdsc;
2500
2501		psdsc = (PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE)msg->Data;
2502		if (mpt->phydisk_sim && mpt_is_raid_member(mpt,
2503		    psdsc->TargetID) != 0)
2504			sim = mpt->phydisk_sim;
2505		else
2506			sim = mpt->sim;
2507		switch(psdsc->ReasonCode) {
2508		case MPI_EVENT_SAS_DEV_STAT_RC_ADDED:
2509			ccb = xpt_alloc_ccb_nowait();
2510			if (ccb == NULL) {
2511				mpt_prt(mpt,
2512				    "unable to alloc CCB for rescan\n");
2513				break;
2514			}
2515			if (xpt_create_path(&ccb->ccb_h.path, NULL,
2516			    cam_sim_path(sim), psdsc->TargetID,
2517			    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2518				mpt_prt(mpt,
2519				    "unable to create path for rescan\n");
2520				xpt_free_ccb(ccb);
2521				break;
2522			}
2523			xpt_rescan(ccb);
2524			break;
2525		case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING:
2526			if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim),
2527			    psdsc->TargetID, CAM_LUN_WILDCARD) !=
2528			    CAM_REQ_CMP) {
2529				mpt_prt(mpt,
2530				    "unable to create path for async event");
2531				break;
2532			}
2533			xpt_async(AC_LOST_DEVICE, tmppath, NULL);
2534			xpt_free_path(tmppath);
2535			break;
2536		case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_INTERNAL_DEV_RESET:
2537		case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_TASK_ABORT_INTERNAL:
2538		case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
2539			break;
2540		default:
2541			mpt_lprt(mpt, MPT_PRT_WARN,
2542			    "SAS device status change: Bus: 0x%02x TargetID: "
2543			    "0x%02x ReasonCode: 0x%02x\n", psdsc->Bus,
2544			    psdsc->TargetID, psdsc->ReasonCode);
2545			break;
2546		}
2547		break;
2548	}
2549	case MPI_EVENT_SAS_DISCOVERY_ERROR:
2550	{
2551		PTR_EVENT_DATA_DISCOVERY_ERROR pde;
2552
2553		pde = (PTR_EVENT_DATA_DISCOVERY_ERROR)msg->Data;
2554		pde->DiscoveryStatus = le32toh(pde->DiscoveryStatus);
2555		mpt_lprt(mpt, MPT_PRT_WARN,
2556		    "SAS discovery error: Port: 0x%02x Status: 0x%08x\n",
2557		    pde->Port, pde->DiscoveryStatus);
2558		break;
2559	}
2560	case MPI_EVENT_EVENT_CHANGE:
2561	case MPI_EVENT_INTEGRATED_RAID:
2562	case MPI_EVENT_IR2:
2563	case MPI_EVENT_LOG_ENTRY_ADDED:
2564	case MPI_EVENT_SAS_DISCOVERY:
2565	case MPI_EVENT_SAS_PHY_LINK_STATUS:
2566	case MPI_EVENT_SAS_SES:
2567		break;
2568	default:
2569		mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n",
2570		    msg->Event & 0xFF);
2571		return (0);
2572	}
2573	return (1);
2574}
2575
2576/*
2577 * Reply path for all SCSI I/O requests, called from our
2578 * interrupt handler by extracting our handler index from
2579 * the MsgContext field of the reply from the IOC.
2580 *
2581 * This routine is optimized for the common case of a
2582 * completion without error.  All exception handling is
2583 * offloaded to non-inlined helper routines to minimize
2584 * cache footprint.
2585 */
2586static int
2587mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req,
2588    uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2589{
2590	MSG_SCSI_IO_REQUEST *scsi_req;
2591	union ccb *ccb;
2592
2593	if (req->state == REQ_STATE_FREE) {
2594		mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n");
2595		return (TRUE);
2596	}
2597
2598	scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf;
2599	ccb = req->ccb;
2600	if (ccb == NULL) {
2601		mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n",
2602		    req, req->serno);
2603		return (TRUE);
2604	}
2605
2606	mpt_req_untimeout(req, mpt_timeout, ccb);
2607	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2608
2609	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2610		bus_dmasync_op_t op;
2611
2612		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
2613			op = BUS_DMASYNC_POSTREAD;
2614		else
2615			op = BUS_DMASYNC_POSTWRITE;
2616		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
2617		bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2618	}
2619
2620	if (reply_frame == NULL) {
2621		/*
2622		 * Context only reply, completion without error status.
2623		 */
2624		ccb->csio.resid = 0;
2625		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2626		ccb->csio.scsi_status = SCSI_STATUS_OK;
2627	} else {
2628		mpt_scsi_reply_frame_handler(mpt, req, reply_frame);
2629	}
2630
2631	if (mpt->outofbeer) {
2632		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2633		mpt->outofbeer = 0;
2634		mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
2635	}
2636	if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) {
2637		struct scsi_inquiry_data *iq =
2638		    (struct scsi_inquiry_data *)ccb->csio.data_ptr;
2639		if (scsi_req->Function ==
2640		    MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
2641			/*
2642			 * Fake out the device type so that only the
2643			 * pass-thru device will attach.
2644			 */
2645			iq->device &= ~0x1F;
2646			iq->device |= T_NODEVICE;
2647		}
2648	}
2649	if (mpt->verbose == MPT_PRT_DEBUG) {
2650		mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n",
2651		    req, req->serno);
2652	}
2653	KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
2654	xpt_done(ccb);
2655	if ((req->state & REQ_STATE_TIMEDOUT) == 0) {
2656		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2657	} else {
2658		mpt_prt(mpt, "completing timedout/aborted req %p:%u\n",
2659		    req, req->serno);
2660		TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
2661	}
2662	KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0,
2663	    ("CCB req needed wakeup"));
2664#ifdef	INVARIANTS
2665	mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__);
2666#endif
2667	mpt_free_request(mpt, req);
2668	return (TRUE);
2669}
2670
2671static int
2672mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req,
2673    uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2674{
2675	MSG_SCSI_TASK_MGMT_REPLY *tmf_reply;
2676
2677	KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req"));
2678#ifdef	INVARIANTS
2679	mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__);
2680#endif
2681	tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame;
2682	/* Record IOC Status and Response Code of TMF for any waiters. */
2683	req->IOCStatus = le16toh(tmf_reply->IOCStatus);
2684	req->ResponseCode = tmf_reply->ResponseCode;
2685
2686	mpt_lprt(mpt, MPT_PRT_DEBUG, "TMF complete: req %p:%u status 0x%x\n",
2687	    req, req->serno, le16toh(tmf_reply->IOCStatus));
2688	TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2689	if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
2690		req->state |= REQ_STATE_DONE;
2691		wakeup(req);
2692	} else {
2693		mpt->tmf_req->state = REQ_STATE_FREE;
2694	}
2695	return (TRUE);
2696}
2697
2698/*
2699 * XXX: Move to definitions file
2700 */
2701#define	ELS	0x22
2702#define	FC4LS	0x32
2703#define	ABTS	0x81
2704#define	BA_ACC	0x84
2705
2706#define	LS_RJT	0x01
2707#define	LS_ACC	0x02
2708#define	PLOGI	0x03
2709#define	LOGO	0x05
2710#define SRR	0x14
2711#define PRLI	0x20
2712#define PRLO	0x21
2713#define ADISC	0x52
2714#define RSCN	0x61
2715
2716static void
2717mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req,
2718    PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length)
2719{
2720	uint32_t fl;
2721	MSG_LINK_SERVICE_RSP_REQUEST tmp;
2722	PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp;
2723
2724	/*
2725	 * We are going to reuse the ELS request to send this response back.
2726	 */
2727	rsp = &tmp;
2728	memset(rsp, 0, sizeof(*rsp));
2729
2730#ifdef	USE_IMMEDIATE_LINK_DATA
2731	/*
2732	 * Apparently the IMMEDIATE stuff doesn't seem to work.
2733	 */
2734	rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE;
2735#endif
2736	rsp->RspLength = length;
2737	rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP;
2738	rsp->MsgContext = htole32(req->index | fc_els_handler_id);
2739
2740	/*
2741	 * Copy over information from the original reply frame to
2742	 * it's correct place in the response.
2743	 */
2744	memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24);
2745
2746	/*
2747	 * And now copy back the temporary area to the original frame.
2748	 */
2749	memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST));
2750	rsp = req->req_vbuf;
2751
2752#ifdef	USE_IMMEDIATE_LINK_DATA
2753	memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length);
2754#else
2755{
2756	PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL;
2757	bus_addr_t paddr = req->req_pbuf;
2758	paddr += MPT_RQSL(mpt);
2759
2760	fl =
2761		MPI_SGE_FLAGS_HOST_TO_IOC	|
2762		MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
2763		MPI_SGE_FLAGS_LAST_ELEMENT	|
2764		MPI_SGE_FLAGS_END_OF_LIST	|
2765		MPI_SGE_FLAGS_END_OF_BUFFER;
2766	fl <<= MPI_SGE_FLAGS_SHIFT;
2767	fl |= (length);
2768	se->FlagsLength = htole32(fl);
2769	se->Address = htole32((uint32_t) paddr);
2770}
2771#endif
2772
2773	/*
2774	 * Send it on...
2775	 */
2776	mpt_send_cmd(mpt, req);
2777}
2778
2779static int
2780mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req,
2781    uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2782{
2783	PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp =
2784	    (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame;
2785	U8 rctl;
2786	U8 type;
2787	U8 cmd;
2788	U16 status = le16toh(reply_frame->IOCStatus);
2789	U32 *elsbuf;
2790	int ioindex;
2791	int do_refresh = TRUE;
2792
2793#ifdef	INVARIANTS
2794	KASSERT(mpt_req_on_free_list(mpt, req) == 0,
2795	    ("fc_els_reply_handler: req %p:%u for function %x on freelist!",
2796	    req, req->serno, rp->Function));
2797	if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2798		mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2799	} else {
2800		mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2801	}
2802#endif
2803	mpt_lprt(mpt, MPT_PRT_DEBUG,
2804	    "FC_ELS Complete: req %p:%u, reply %p function %x\n",
2805	    req, req->serno, reply_frame, reply_frame->Function);
2806
2807	if  (status != MPI_IOCSTATUS_SUCCESS) {
2808		mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n",
2809		    status, reply_frame->Function);
2810		if (status == MPI_IOCSTATUS_INVALID_STATE) {
2811			/*
2812			 * XXX: to get around shutdown issue
2813			 */
2814			mpt->disabled = 1;
2815			return (TRUE);
2816		}
2817		return (TRUE);
2818	}
2819
2820	/*
2821	 * If the function of a link service response, we recycle the
2822	 * response to be a refresh for a new link service request.
2823	 *
2824	 * The request pointer is bogus in this case and we have to fetch
2825	 * it based upon the TransactionContext.
2826	 */
2827	if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) {
2828		/* Freddie Uncle Charlie Katie */
2829		/* We don't get the IOINDEX as part of the Link Svc Rsp */
2830		for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++)
2831			if (mpt->els_cmd_ptrs[ioindex] == req) {
2832				break;
2833			}
2834
2835		KASSERT(ioindex < mpt->els_cmds_allocated,
2836		    ("can't find my mommie!"));
2837
2838		/* remove from active list as we're going to re-post it */
2839		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2840		req->state &= ~REQ_STATE_QUEUED;
2841		req->state |= REQ_STATE_DONE;
2842		mpt_fc_post_els(mpt, req, ioindex);
2843		return (TRUE);
2844	}
2845
2846	if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2847		/* remove from active list as we're done */
2848		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2849		req->state &= ~REQ_STATE_QUEUED;
2850		req->state |= REQ_STATE_DONE;
2851		if (req->state & REQ_STATE_TIMEDOUT) {
2852			mpt_lprt(mpt, MPT_PRT_DEBUG,
2853			    "Sync Primitive Send Completed After Timeout\n");
2854			mpt_free_request(mpt, req);
2855		} else if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) {
2856			mpt_lprt(mpt, MPT_PRT_DEBUG,
2857			    "Async Primitive Send Complete\n");
2858			mpt_free_request(mpt, req);
2859		} else {
2860			mpt_lprt(mpt, MPT_PRT_DEBUG,
2861			    "Sync Primitive Send Complete- Waking Waiter\n");
2862			wakeup(req);
2863		}
2864		return (TRUE);
2865	}
2866
2867	if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) {
2868		mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x "
2869		    "Length %d Message Flags %x\n", rp->Function, rp->Flags,
2870		    rp->MsgLength, rp->MsgFlags);
2871		return (TRUE);
2872	}
2873
2874	if (rp->MsgLength <= 5) {
2875		/*
2876		 * This is just a ack of an original ELS buffer post
2877		 */
2878		mpt_lprt(mpt, MPT_PRT_DEBUG,
2879		    "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno);
2880		return (TRUE);
2881	}
2882
2883
2884	rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT;
2885	type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT;
2886
2887	elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)];
2888	cmd = be32toh(elsbuf[0]) >> 24;
2889
2890	if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) {
2891		mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n");
2892		return (TRUE);
2893	}
2894
2895	ioindex = le32toh(rp->TransactionContext);
2896	req = mpt->els_cmd_ptrs[ioindex];
2897
2898	if (rctl == ELS && type == 1) {
2899		switch (cmd) {
2900		case PRLI:
2901			/*
2902			 * Send back a PRLI ACC
2903			 */
2904			mpt_prt(mpt, "PRLI from 0x%08x%08x\n",
2905			    le32toh(rp->Wwn.PortNameHigh),
2906			    le32toh(rp->Wwn.PortNameLow));
2907			elsbuf[0] = htobe32(0x02100014);
2908			elsbuf[1] |= htobe32(0x00000100);
2909			elsbuf[4] = htobe32(0x00000002);
2910			if (mpt->role & MPT_ROLE_TARGET)
2911				elsbuf[4] |= htobe32(0x00000010);
2912			if (mpt->role & MPT_ROLE_INITIATOR)
2913				elsbuf[4] |= htobe32(0x00000020);
2914			/* remove from active list as we're done */
2915			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2916			req->state &= ~REQ_STATE_QUEUED;
2917			req->state |= REQ_STATE_DONE;
2918			mpt_fc_els_send_response(mpt, req, rp, 20);
2919			do_refresh = FALSE;
2920			break;
2921		case PRLO:
2922			memset(elsbuf, 0, 5 * (sizeof (U32)));
2923			elsbuf[0] = htobe32(0x02100014);
2924			elsbuf[1] = htobe32(0x08000100);
2925			mpt_prt(mpt, "PRLO from 0x%08x%08x\n",
2926			    le32toh(rp->Wwn.PortNameHigh),
2927			    le32toh(rp->Wwn.PortNameLow));
2928			/* remove from active list as we're done */
2929			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2930			req->state &= ~REQ_STATE_QUEUED;
2931			req->state |= REQ_STATE_DONE;
2932			mpt_fc_els_send_response(mpt, req, rp, 20);
2933			do_refresh = FALSE;
2934			break;
2935		default:
2936			mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd);
2937			break;
2938		}
2939	} else if (rctl == ABTS && type == 0) {
2940		uint16_t rx_id = le16toh(rp->Rxid);
2941		uint16_t ox_id = le16toh(rp->Oxid);
2942		request_t *tgt_req = NULL;
2943
2944		mpt_prt(mpt,
2945		    "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n",
2946		    ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh),
2947		    le32toh(rp->Wwn.PortNameLow));
2948		if (rx_id >= mpt->mpt_max_tgtcmds) {
2949			mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id);
2950		} else if (mpt->tgt_cmd_ptrs == NULL) {
2951			mpt_prt(mpt, "No TGT CMD PTRS\n");
2952		} else {
2953			tgt_req = mpt->tgt_cmd_ptrs[rx_id];
2954		}
2955		if (tgt_req) {
2956			mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, tgt_req);
2957			union ccb *ccb;
2958			uint32_t ct_id;
2959
2960			/*
2961			 * Check to make sure we have the correct command
2962			 * The reply descriptor in the target state should
2963			 * should contain an IoIndex that should match the
2964			 * RX_ID.
2965			 *
2966			 * It'd be nice to have OX_ID to crosscheck with
2967			 * as well.
2968			 */
2969			ct_id = GET_IO_INDEX(tgt->reply_desc);
2970
2971			if (ct_id != rx_id) {
2972				mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: "
2973				    "RX_ID received=0x%x; RX_ID in cmd=0x%x\n",
2974				    rx_id, ct_id);
2975				goto skip;
2976			}
2977
2978			ccb = tgt->ccb;
2979			if (ccb) {
2980				mpt_prt(mpt,
2981				    "CCB (%p): lun %u flags %x status %x\n",
2982				    ccb, ccb->ccb_h.target_lun,
2983				    ccb->ccb_h.flags, ccb->ccb_h.status);
2984			}
2985			mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd "
2986			    "%x nxfers %x\n", tgt->state,
2987			    tgt->resid, tgt->bytes_xfered, tgt->reply_desc,
2988			    tgt->nxfers);
2989  skip:
2990			if (mpt_abort_target_cmd(mpt, tgt_req)) {
2991				mpt_prt(mpt, "unable to start TargetAbort\n");
2992			}
2993		} else {
2994			mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id);
2995		}
2996		memset(elsbuf, 0, 5 * (sizeof (U32)));
2997		elsbuf[0] = htobe32(0);
2998		elsbuf[1] = htobe32((ox_id << 16) | rx_id);
2999		elsbuf[2] = htobe32(0x000ffff);
3000		/*
3001		 * Dork with the reply frame so that the response to it
3002		 * will be correct.
3003		 */
3004		rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT);
3005		/* remove from active list as we're done */
3006		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
3007		req->state &= ~REQ_STATE_QUEUED;
3008		req->state |= REQ_STATE_DONE;
3009		mpt_fc_els_send_response(mpt, req, rp, 12);
3010		do_refresh = FALSE;
3011	} else {
3012		mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd);
3013	}
3014	if (do_refresh == TRUE) {
3015		/* remove from active list as we're done */
3016		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
3017		req->state &= ~REQ_STATE_QUEUED;
3018		req->state |= REQ_STATE_DONE;
3019		mpt_fc_post_els(mpt, req, ioindex);
3020	}
3021	return (TRUE);
3022}
3023
3024/*
3025 * Clean up all SCSI Initiator personality state in response
3026 * to a controller reset.
3027 */
3028static void
3029mpt_cam_ioc_reset(struct mpt_softc *mpt, int type)
3030{
3031
3032	/*
3033	 * The pending list is already run down by
3034	 * the generic handler.  Perform the same
3035	 * operation on the timed out request list.
3036	 */
3037	mpt_complete_request_chain(mpt, &mpt->request_timeout_list,
3038				   MPI_IOCSTATUS_INVALID_STATE);
3039
3040	/*
3041	 * XXX: We need to repost ELS and Target Command Buffers?
3042	 */
3043
3044	/*
3045	 * Inform the XPT that a bus reset has occurred.
3046	 */
3047	xpt_async(AC_BUS_RESET, mpt->path, NULL);
3048}
3049
3050/*
3051 * Parse additional completion information in the reply
3052 * frame for SCSI I/O requests.
3053 */
3054static int
3055mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
3056			     MSG_DEFAULT_REPLY *reply_frame)
3057{
3058	union ccb *ccb;
3059	MSG_SCSI_IO_REPLY *scsi_io_reply;
3060	u_int ioc_status;
3061	u_int sstate;
3062
3063	MPT_DUMP_REPLY_FRAME(mpt, reply_frame);
3064	KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST
3065	     || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH,
3066		("MPT SCSI I/O Handler called with incorrect reply type"));
3067	KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0,
3068		("MPT SCSI I/O Handler called with continuation reply"));
3069
3070	scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame;
3071	ioc_status = le16toh(scsi_io_reply->IOCStatus);
3072	ioc_status &= MPI_IOCSTATUS_MASK;
3073	sstate = scsi_io_reply->SCSIState;
3074
3075	ccb = req->ccb;
3076	ccb->csio.resid =
3077	    ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount);
3078
3079	if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0
3080	 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) {
3081		uint32_t sense_returned;
3082
3083		ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
3084
3085		sense_returned = le32toh(scsi_io_reply->SenseCount);
3086		if (sense_returned < ccb->csio.sense_len)
3087			ccb->csio.sense_resid = ccb->csio.sense_len -
3088						sense_returned;
3089		else
3090			ccb->csio.sense_resid = 0;
3091
3092		bzero(&ccb->csio.sense_data, sizeof(ccb->csio.sense_data));
3093		bcopy(req->sense_vbuf, &ccb->csio.sense_data,
3094		    min(ccb->csio.sense_len, sense_returned));
3095	}
3096
3097	if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) {
3098		/*
3099		 * Tag messages rejected, but non-tagged retry
3100		 * was successful.
3101XXXX
3102		mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE);
3103		 */
3104	}
3105
3106	switch(ioc_status) {
3107	case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
3108		/*
3109		 * XXX
3110		 * Linux driver indicates that a zero
3111		 * transfer length with this error code
3112		 * indicates a CRC error.
3113		 *
3114		 * No need to swap the bytes for checking
3115		 * against zero.
3116		 */
3117		if (scsi_io_reply->TransferCount == 0) {
3118			mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
3119			break;
3120		}
3121		/* FALLTHROUGH */
3122	case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
3123	case MPI_IOCSTATUS_SUCCESS:
3124	case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
3125		if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) {
3126			/*
3127			 * Status was never returned for this transaction.
3128			 */
3129			mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE);
3130		} else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) {
3131			ccb->csio.scsi_status = scsi_io_reply->SCSIStatus;
3132			mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR);
3133			if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0)
3134				mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL);
3135		} else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) {
3136
3137			/* XXX Handle SPI-Packet and FCP-2 response info. */
3138			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3139		} else
3140			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3141		break;
3142	case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
3143		mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR);
3144		break;
3145	case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR:
3146		mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
3147		break;
3148	case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
3149		/*
3150		 * Since selection timeouts and "device really not
3151		 * there" are grouped into this error code, report
3152		 * selection timeout.  Selection timeouts are
3153		 * typically retried before giving up on the device
3154		 * whereas "device not there" errors are considered
3155		 * unretryable.
3156		 */
3157		mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
3158		break;
3159	case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
3160		mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL);
3161		break;
3162	case MPI_IOCSTATUS_SCSI_INVALID_BUS:
3163		mpt_set_ccb_status(ccb, CAM_PATH_INVALID);
3164		break;
3165	case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
3166		mpt_set_ccb_status(ccb, CAM_TID_INVALID);
3167		break;
3168	case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
3169		ccb->ccb_h.status = CAM_UA_TERMIO;
3170		break;
3171	case MPI_IOCSTATUS_INVALID_STATE:
3172		/*
3173		 * The IOC has been reset.  Emulate a bus reset.
3174		 */
3175		/* FALLTHROUGH */
3176	case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
3177		ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
3178		break;
3179	case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
3180	case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
3181		/*
3182		 * Don't clobber any timeout status that has
3183		 * already been set for this transaction.  We
3184		 * want the SCSI layer to be able to differentiate
3185		 * between the command we aborted due to timeout
3186		 * and any innocent bystanders.
3187		 */
3188		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG)
3189			break;
3190		mpt_set_ccb_status(ccb, CAM_REQ_TERMIO);
3191		break;
3192
3193	case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
3194		mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL);
3195		break;
3196	case MPI_IOCSTATUS_BUSY:
3197		mpt_set_ccb_status(ccb, CAM_BUSY);
3198		break;
3199	case MPI_IOCSTATUS_INVALID_FUNCTION:
3200	case MPI_IOCSTATUS_INVALID_SGL:
3201	case MPI_IOCSTATUS_INTERNAL_ERROR:
3202	case MPI_IOCSTATUS_INVALID_FIELD:
3203	default:
3204		/* XXX
3205		 * Some of the above may need to kick
3206		 * of a recovery action!!!!
3207		 */
3208		ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
3209		break;
3210	}
3211
3212	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
3213		mpt_freeze_ccb(ccb);
3214	}
3215
3216	return (TRUE);
3217}
3218
3219static void
3220mpt_action(struct cam_sim *sim, union ccb *ccb)
3221{
3222	struct mpt_softc *mpt;
3223	struct ccb_trans_settings *cts;
3224	target_id_t tgt;
3225	lun_id_t lun;
3226	int raid_passthru;
3227
3228	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n"));
3229
3230	mpt = (struct mpt_softc *)cam_sim_softc(sim);
3231	raid_passthru = (sim == mpt->phydisk_sim);
3232	MPT_LOCK_ASSERT(mpt);
3233
3234	tgt = ccb->ccb_h.target_id;
3235	lun = ccb->ccb_h.target_lun;
3236	if (raid_passthru &&
3237	    ccb->ccb_h.func_code != XPT_PATH_INQ &&
3238	    ccb->ccb_h.func_code != XPT_RESET_BUS &&
3239	    ccb->ccb_h.func_code != XPT_RESET_DEV) {
3240		if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
3241			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3242			mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
3243			xpt_done(ccb);
3244			return;
3245		}
3246	}
3247	ccb->ccb_h.ccb_mpt_ptr = mpt;
3248
3249	switch (ccb->ccb_h.func_code) {
3250	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
3251		/*
3252		 * Do a couple of preliminary checks...
3253		 */
3254		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
3255			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
3256				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3257				mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3258				break;
3259			}
3260		}
3261		/* Max supported CDB length is 16 bytes */
3262		/* XXX Unless we implement the new 32byte message type */
3263		if (ccb->csio.cdb_len >
3264		    sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) {
3265			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3266			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3267			break;
3268		}
3269#ifdef	MPT_TEST_MULTIPATH
3270		if (mpt->failure_id == ccb->ccb_h.target_id) {
3271			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3272			mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
3273			break;
3274		}
3275#endif
3276		ccb->csio.scsi_status = SCSI_STATUS_OK;
3277		mpt_start(sim, ccb);
3278		return;
3279
3280	case XPT_RESET_BUS:
3281		if (raid_passthru) {
3282			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3283			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3284			break;
3285		}
3286	case XPT_RESET_DEV:
3287		if (ccb->ccb_h.func_code == XPT_RESET_BUS) {
3288			if (bootverbose) {
3289				xpt_print(ccb->ccb_h.path, "reset bus\n");
3290			}
3291		} else {
3292			xpt_print(ccb->ccb_h.path, "reset device\n");
3293		}
3294		(void) mpt_bus_reset(mpt, tgt, lun, FALSE);
3295
3296		/*
3297		 * mpt_bus_reset is always successful in that it
3298		 * will fall back to a hard reset should a bus
3299		 * reset attempt fail.
3300		 */
3301		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3302		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3303		break;
3304
3305	case XPT_ABORT:
3306	{
3307		union ccb *accb = ccb->cab.abort_ccb;
3308		switch (accb->ccb_h.func_code) {
3309		case XPT_ACCEPT_TARGET_IO:
3310		case XPT_IMMEDIATE_NOTIFY:
3311			ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb);
3312			break;
3313		case XPT_CONT_TARGET_IO:
3314			mpt_prt(mpt, "cannot abort active CTIOs yet\n");
3315			ccb->ccb_h.status = CAM_UA_ABORT;
3316			break;
3317		case XPT_SCSI_IO:
3318			ccb->ccb_h.status = CAM_UA_ABORT;
3319			break;
3320		default:
3321			ccb->ccb_h.status = CAM_REQ_INVALID;
3322			break;
3323		}
3324		break;
3325	}
3326
3327#ifdef	CAM_NEW_TRAN_CODE
3328#define	IS_CURRENT_SETTINGS(c)	((c)->type == CTS_TYPE_CURRENT_SETTINGS)
3329#else
3330#define	IS_CURRENT_SETTINGS(c)	((c)->flags & CCB_TRANS_CURRENT_SETTINGS)
3331#endif
3332#define	DP_DISC_ENABLE	0x1
3333#define	DP_DISC_DISABL	0x2
3334#define	DP_DISC		(DP_DISC_ENABLE|DP_DISC_DISABL)
3335
3336#define	DP_TQING_ENABLE	0x4
3337#define	DP_TQING_DISABL	0x8
3338#define	DP_TQING	(DP_TQING_ENABLE|DP_TQING_DISABL)
3339
3340#define	DP_WIDE		0x10
3341#define	DP_NARROW	0x20
3342#define	DP_WIDTH	(DP_WIDE|DP_NARROW)
3343
3344#define	DP_SYNC		0x40
3345
3346	case XPT_SET_TRAN_SETTINGS:	/* Nexus Settings */
3347	{
3348#ifdef	CAM_NEW_TRAN_CODE
3349		struct ccb_trans_settings_scsi *scsi;
3350		struct ccb_trans_settings_spi *spi;
3351#endif
3352		uint8_t dval;
3353		u_int period;
3354		u_int offset;
3355		int i, j;
3356
3357		cts = &ccb->cts;
3358
3359		if (mpt->is_fc || mpt->is_sas) {
3360			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3361			break;
3362		}
3363
3364#ifdef	CAM_NEW_TRAN_CODE
3365		scsi = &cts->proto_specific.scsi;
3366		spi = &cts->xport_specific.spi;
3367
3368		/*
3369		 * We can be called just to valid transport and proto versions
3370		 */
3371		if (scsi->valid == 0 && spi->valid == 0) {
3372			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3373			break;
3374		}
3375#endif
3376
3377		/*
3378		 * Skip attempting settings on RAID volume disks.
3379		 * Other devices on the bus get the normal treatment.
3380		 */
3381		if (mpt->phydisk_sim && raid_passthru == 0 &&
3382		    mpt_is_raid_volume(mpt, tgt) != 0) {
3383			mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3384			    "no transfer settings for RAID vols\n");
3385			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3386			break;
3387		}
3388
3389		i = mpt->mpt_port_page2.PortSettings &
3390		    MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
3391		j = mpt->mpt_port_page2.PortFlags &
3392		    MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK;
3393		if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS &&
3394		    j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) {
3395			mpt_lprt(mpt, MPT_PRT_ALWAYS,
3396			    "honoring BIOS transfer negotiations\n");
3397			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3398			break;
3399		}
3400
3401		dval = 0;
3402		period = 0;
3403		offset = 0;
3404
3405#ifndef	CAM_NEW_TRAN_CODE
3406		if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
3407			dval |= (cts->flags & CCB_TRANS_DISC_ENB) ?
3408			    DP_DISC_ENABLE : DP_DISC_DISABL;
3409		}
3410
3411		if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
3412			dval |= (cts->flags & CCB_TRANS_TAG_ENB) ?
3413			    DP_TQING_ENABLE : DP_TQING_DISABL;
3414		}
3415
3416		if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) {
3417			dval |= cts->bus_width ? DP_WIDE : DP_NARROW;
3418		}
3419
3420		if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
3421		    (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID)) {
3422			dval |= DP_SYNC;
3423			period = cts->sync_period;
3424			offset = cts->sync_offset;
3425		}
3426#else
3427		if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
3428			dval |= ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ?
3429			    DP_DISC_ENABLE : DP_DISC_DISABL;
3430		}
3431
3432		if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
3433			dval |= ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ?
3434			    DP_TQING_ENABLE : DP_TQING_DISABL;
3435		}
3436
3437		if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
3438			dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ?
3439			    DP_WIDE : DP_NARROW;
3440		}
3441
3442		if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) {
3443			dval |= DP_SYNC;
3444			offset = spi->sync_offset;
3445		} else {
3446			PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
3447			    &mpt->mpt_dev_page1[tgt];
3448			offset = ptr->RequestedParameters;
3449			offset &= MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3450	    		offset >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
3451		}
3452		if (spi->valid & CTS_SPI_VALID_SYNC_RATE) {
3453			dval |= DP_SYNC;
3454			period = spi->sync_period;
3455		} else {
3456			PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
3457			    &mpt->mpt_dev_page1[tgt];
3458			period = ptr->RequestedParameters;
3459			period &= MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3460	    		period >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
3461		}
3462#endif
3463		if (dval & DP_DISC_ENABLE) {
3464			mpt->mpt_disc_enable |= (1 << tgt);
3465		} else if (dval & DP_DISC_DISABL) {
3466			mpt->mpt_disc_enable &= ~(1 << tgt);
3467		}
3468		if (dval & DP_TQING_ENABLE) {
3469			mpt->mpt_tag_enable |= (1 << tgt);
3470		} else if (dval & DP_TQING_DISABL) {
3471			mpt->mpt_tag_enable &= ~(1 << tgt);
3472		}
3473		if (dval & DP_WIDTH) {
3474			mpt_setwidth(mpt, tgt, 1);
3475		}
3476		if (dval & DP_SYNC) {
3477			mpt_setsync(mpt, tgt, period, offset);
3478		}
3479		if (dval == 0) {
3480			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3481			break;
3482		}
3483		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3484		    "set [%d]: 0x%x period 0x%x offset %d\n",
3485		    tgt, dval, period, offset);
3486		if (mpt_update_spi_config(mpt, tgt)) {
3487			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3488		} else {
3489			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3490		}
3491		break;
3492	}
3493	case XPT_GET_TRAN_SETTINGS:
3494	{
3495#ifdef	CAM_NEW_TRAN_CODE
3496		struct ccb_trans_settings_scsi *scsi;
3497		cts = &ccb->cts;
3498		cts->protocol = PROTO_SCSI;
3499		if (mpt->is_fc) {
3500			struct ccb_trans_settings_fc *fc =
3501			    &cts->xport_specific.fc;
3502			cts->protocol_version = SCSI_REV_SPC;
3503			cts->transport = XPORT_FC;
3504			cts->transport_version = 0;
3505			fc->valid = CTS_FC_VALID_SPEED;
3506			fc->bitrate = 100000;
3507		} else if (mpt->is_sas) {
3508			struct ccb_trans_settings_sas *sas =
3509			    &cts->xport_specific.sas;
3510			cts->protocol_version = SCSI_REV_SPC2;
3511			cts->transport = XPORT_SAS;
3512			cts->transport_version = 0;
3513			sas->valid = CTS_SAS_VALID_SPEED;
3514			sas->bitrate = 300000;
3515		} else {
3516			cts->protocol_version = SCSI_REV_2;
3517			cts->transport = XPORT_SPI;
3518			cts->transport_version = 2;
3519			if (mpt_get_spi_settings(mpt, cts) != 0) {
3520				mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3521				break;
3522			}
3523		}
3524		scsi = &cts->proto_specific.scsi;
3525		scsi->valid = CTS_SCSI_VALID_TQ;
3526		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
3527#else
3528		cts = &ccb->cts;
3529		if (mpt->is_fc) {
3530			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
3531			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3532			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3533		} else if (mpt->is_sas) {
3534			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
3535			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3536			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3537		} else if (mpt_get_spi_settings(mpt, cts) != 0) {
3538			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3539			break;
3540		}
3541#endif
3542		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3543		break;
3544	}
3545	case XPT_CALC_GEOMETRY:
3546	{
3547		struct ccb_calc_geometry *ccg;
3548
3549		ccg = &ccb->ccg;
3550		if (ccg->block_size == 0) {
3551			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3552			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3553			break;
3554		}
3555		cam_calc_geometry(ccg, /* extended */ 1);
3556		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
3557		break;
3558	}
3559	case XPT_PATH_INQ:		/* Path routing inquiry */
3560	{
3561		struct ccb_pathinq *cpi = &ccb->cpi;
3562
3563		cpi->version_num = 1;
3564		cpi->target_sprt = 0;
3565		cpi->hba_eng_cnt = 0;
3566		cpi->max_target = mpt->port_facts[0].MaxDevices - 1;
3567		cpi->maxio = (mpt->max_cam_seg_cnt - 1) * PAGE_SIZE;
3568		/*
3569		 * FC cards report MAX_DEVICES of 512, but
3570		 * the MSG_SCSI_IO_REQUEST target id field
3571		 * is only 8 bits. Until we fix the driver
3572		 * to support 'channels' for bus overflow,
3573		 * just limit it.
3574		 */
3575		if (cpi->max_target > 255) {
3576			cpi->max_target = 255;
3577		}
3578
3579		/*
3580		 * VMware ESX reports > 16 devices and then dies when we probe.
3581		 */
3582		if (mpt->is_spi && cpi->max_target > 15) {
3583			cpi->max_target = 15;
3584		}
3585		if (mpt->is_spi)
3586			cpi->max_lun = 7;
3587		else
3588			cpi->max_lun = MPT_MAX_LUNS;
3589		cpi->initiator_id = mpt->mpt_ini_id;
3590		cpi->bus_id = cam_sim_bus(sim);
3591
3592		/*
3593		 * The base speed is the speed of the underlying connection.
3594		 */
3595#ifdef	CAM_NEW_TRAN_CODE
3596		cpi->protocol = PROTO_SCSI;
3597		if (mpt->is_fc) {
3598			cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
3599			cpi->base_transfer_speed = 100000;
3600			cpi->hba_inquiry = PI_TAG_ABLE;
3601			cpi->transport = XPORT_FC;
3602			cpi->transport_version = 0;
3603			cpi->protocol_version = SCSI_REV_SPC;
3604		} else if (mpt->is_sas) {
3605			cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
3606			cpi->base_transfer_speed = 300000;
3607			cpi->hba_inquiry = PI_TAG_ABLE;
3608			cpi->transport = XPORT_SAS;
3609			cpi->transport_version = 0;
3610			cpi->protocol_version = SCSI_REV_SPC2;
3611		} else {
3612			cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED;
3613			cpi->base_transfer_speed = 3300;
3614			cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3615			cpi->transport = XPORT_SPI;
3616			cpi->transport_version = 2;
3617			cpi->protocol_version = SCSI_REV_2;
3618		}
3619#else
3620		if (mpt->is_fc) {
3621			cpi->hba_misc = PIM_NOBUSRESET;
3622			cpi->base_transfer_speed = 100000;
3623			cpi->hba_inquiry = PI_TAG_ABLE;
3624		} else if (mpt->is_sas) {
3625			cpi->hba_misc = PIM_NOBUSRESET;
3626			cpi->base_transfer_speed = 300000;
3627			cpi->hba_inquiry = PI_TAG_ABLE;
3628		} else {
3629			cpi->hba_misc = PIM_SEQSCAN;
3630			cpi->base_transfer_speed = 3300;
3631			cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3632		}
3633#endif
3634
3635		/*
3636		 * We give our fake RAID passhtru bus a width that is MaxVolumes
3637		 * wide and restrict it to one lun.
3638		 */
3639		if (raid_passthru) {
3640			cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1;
3641			cpi->initiator_id = cpi->max_target + 1;
3642			cpi->max_lun = 0;
3643		}
3644
3645		if ((mpt->role & MPT_ROLE_INITIATOR) == 0) {
3646			cpi->hba_misc |= PIM_NOINITIATOR;
3647		}
3648		if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
3649			cpi->target_sprt =
3650			    PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
3651		} else {
3652			cpi->target_sprt = 0;
3653		}
3654		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
3655		strncpy(cpi->hba_vid, "LSI", HBA_IDLEN);
3656		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
3657		cpi->unit_number = cam_sim_unit(sim);
3658		cpi->ccb_h.status = CAM_REQ_CMP;
3659		break;
3660	}
3661	case XPT_EN_LUN:		/* Enable LUN as a target */
3662	{
3663		int result;
3664
3665		if (ccb->cel.enable)
3666			result = mpt_enable_lun(mpt,
3667			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3668		else
3669			result = mpt_disable_lun(mpt,
3670			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3671		if (result == 0) {
3672			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3673		} else {
3674			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3675		}
3676		break;
3677	}
3678	case XPT_NOTIFY_ACKNOWLEDGE:	/* recycle notify ack */
3679	case XPT_IMMEDIATE_NOTIFY:	/* Add Immediate Notify Resource */
3680	case XPT_ACCEPT_TARGET_IO:	/* Add Accept Target IO Resource */
3681	{
3682		tgt_resource_t *trtp;
3683		lun_id_t lun = ccb->ccb_h.target_lun;
3684		ccb->ccb_h.sim_priv.entries[0].field = 0;
3685		ccb->ccb_h.sim_priv.entries[1].ptr = mpt;
3686
3687		if (lun == CAM_LUN_WILDCARD) {
3688			if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
3689				mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3690				break;
3691			}
3692			trtp = &mpt->trt_wildcard;
3693		} else if (lun >= MPT_MAX_LUNS) {
3694			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3695			break;
3696		} else {
3697			trtp = &mpt->trt[lun];
3698		}
3699		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
3700			mpt_lprt(mpt, MPT_PRT_DEBUG1,
3701			    "Put FREE ATIO %p lun %d\n", ccb, lun);
3702			STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h,
3703			    sim_links.stqe);
3704		} else if (ccb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY) {
3705			mpt_lprt(mpt, MPT_PRT_DEBUG1,
3706			    "Put FREE INOT lun %d\n", lun);
3707			STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h,
3708			    sim_links.stqe);
3709		} else {
3710			mpt_lprt(mpt, MPT_PRT_ALWAYS, "Got Notify ACK\n");
3711		}
3712		mpt_set_ccb_status(ccb, CAM_REQ_INPROG);
3713		return;
3714	}
3715	case XPT_CONT_TARGET_IO:
3716		mpt_target_start_io(mpt, ccb);
3717		return;
3718
3719	default:
3720		ccb->ccb_h.status = CAM_REQ_INVALID;
3721		break;
3722	}
3723	xpt_done(ccb);
3724}
3725
3726static int
3727mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts)
3728{
3729#ifdef	CAM_NEW_TRAN_CODE
3730	struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi;
3731	struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi;
3732#endif
3733	target_id_t tgt;
3734	uint32_t dval, pval, oval;
3735	int rv;
3736
3737	if (IS_CURRENT_SETTINGS(cts) == 0) {
3738		tgt = cts->ccb_h.target_id;
3739	} else if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) {
3740		if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) {
3741			return (-1);
3742		}
3743	} else {
3744		tgt = cts->ccb_h.target_id;
3745	}
3746
3747	/*
3748	 * We aren't looking at Port Page 2 BIOS settings here-
3749	 * sometimes these have been known to be bogus XXX.
3750	 *
3751	 * For user settings, we pick the max from port page 0
3752	 *
3753	 * For current settings we read the current settings out from
3754	 * device page 0 for that target.
3755	 */
3756	if (IS_CURRENT_SETTINGS(cts)) {
3757		CONFIG_PAGE_SCSI_DEVICE_0 tmp;
3758		dval = 0;
3759
3760		tmp = mpt->mpt_dev_page0[tgt];
3761		rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header,
3762		    sizeof(tmp), FALSE, 5000);
3763		if (rv) {
3764			mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt);
3765			return (rv);
3766		}
3767		mpt2host_config_page_scsi_device_0(&tmp);
3768
3769		mpt_lprt(mpt, MPT_PRT_DEBUG,
3770		    "mpt_get_spi_settings[%d]: current NP %x Info %x\n", tgt,
3771		    tmp.NegotiatedParameters, tmp.Information);
3772		dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ?
3773		    DP_WIDE : DP_NARROW;
3774		dval |= (mpt->mpt_disc_enable & (1 << tgt)) ?
3775		    DP_DISC_ENABLE : DP_DISC_DISABL;
3776		dval |= (mpt->mpt_tag_enable & (1 << tgt)) ?
3777		    DP_TQING_ENABLE : DP_TQING_DISABL;
3778		oval = tmp.NegotiatedParameters;
3779		oval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK;
3780		oval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET;
3781		pval = tmp.NegotiatedParameters;
3782		pval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK;
3783		pval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD;
3784		mpt->mpt_dev_page0[tgt] = tmp;
3785	} else {
3786		dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE|DP_SYNC;
3787		oval = mpt->mpt_port_page0.Capabilities;
3788		oval = MPI_SCSIPORTPAGE0_CAP_GET_MAX_SYNC_OFFSET(oval);
3789		pval = mpt->mpt_port_page0.Capabilities;
3790		pval = MPI_SCSIPORTPAGE0_CAP_GET_MIN_SYNC_PERIOD(pval);
3791	}
3792
3793#ifndef	CAM_NEW_TRAN_CODE
3794	cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
3795	cts->valid = 0;
3796	cts->sync_period = pval;
3797	cts->sync_offset = oval;
3798	cts->valid |= CCB_TRANS_SYNC_RATE_VALID;
3799	cts->valid |= CCB_TRANS_SYNC_OFFSET_VALID;
3800	cts->valid |= CCB_TRANS_BUS_WIDTH_VALID;
3801	if (dval & DP_WIDE) {
3802		cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3803	} else {
3804		cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3805	}
3806	if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
3807		cts->valid |= CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3808		if (dval & DP_DISC_ENABLE) {
3809			cts->flags |= CCB_TRANS_DISC_ENB;
3810		}
3811		if (dval & DP_TQING_ENABLE) {
3812			cts->flags |= CCB_TRANS_TAG_ENB;
3813		}
3814	}
3815#else
3816	spi->valid = 0;
3817	scsi->valid = 0;
3818	spi->flags = 0;
3819	scsi->flags = 0;
3820	spi->sync_offset = oval;
3821	spi->sync_period = pval;
3822	spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
3823	spi->valid |= CTS_SPI_VALID_SYNC_RATE;
3824	spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
3825	if (dval & DP_WIDE) {
3826		spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3827	} else {
3828		spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3829	}
3830	if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
3831		scsi->valid = CTS_SCSI_VALID_TQ;
3832		if (dval & DP_TQING_ENABLE) {
3833			scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
3834		}
3835		spi->valid |= CTS_SPI_VALID_DISC;
3836		if (dval & DP_DISC_ENABLE) {
3837			spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
3838		}
3839	}
3840#endif
3841	mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3842	    "mpt_get_spi_settings[%d]: %s flags 0x%x per 0x%x off=%d\n", tgt,
3843	    IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM ", dval, pval, oval);
3844	return (0);
3845}
3846
3847static void
3848mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff)
3849{
3850	PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3851
3852	ptr = &mpt->mpt_dev_page1[tgt];
3853	if (onoff) {
3854		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
3855	} else {
3856		ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
3857	}
3858}
3859
3860static void
3861mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset)
3862{
3863	PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3864
3865	ptr = &mpt->mpt_dev_page1[tgt];
3866	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3867	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3868	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT;
3869	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS;
3870	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU;
3871	if (period == 0) {
3872		return;
3873	}
3874	ptr->RequestedParameters |=
3875	    period << MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
3876	ptr->RequestedParameters |=
3877	    offset << MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
3878	if (period < 0xa) {
3879		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT;
3880	}
3881	if (period < 0x9) {
3882		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS;
3883		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU;
3884	}
3885}
3886
3887static int
3888mpt_update_spi_config(struct mpt_softc *mpt, int tgt)
3889{
3890	CONFIG_PAGE_SCSI_DEVICE_1 tmp;
3891	int rv;
3892
3893	mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3894	    "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n",
3895	    tgt, mpt->mpt_dev_page1[tgt].RequestedParameters);
3896	tmp = mpt->mpt_dev_page1[tgt];
3897	host2mpt_config_page_scsi_device_1(&tmp);
3898	rv = mpt_write_cur_cfg_page(mpt, tgt,
3899	    &tmp.Header, sizeof(tmp), FALSE, 5000);
3900	if (rv) {
3901		mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n");
3902		return (-1);
3903	}
3904	return (0);
3905}
3906
3907/****************************** Timeout Recovery ******************************/
3908static int
3909mpt_spawn_recovery_thread(struct mpt_softc *mpt)
3910{
3911	int error;
3912
3913	error = mpt_kthread_create(mpt_recovery_thread, mpt,
3914	    &mpt->recovery_thread, /*flags*/0,
3915	    /*altstack*/0, "mpt_recovery%d", mpt->unit);
3916	return (error);
3917}
3918
3919static void
3920mpt_terminate_recovery_thread(struct mpt_softc *mpt)
3921{
3922
3923	if (mpt->recovery_thread == NULL) {
3924		return;
3925	}
3926	mpt->shutdwn_recovery = 1;
3927	wakeup(mpt);
3928	/*
3929	 * Sleep on a slightly different location
3930	 * for this interlock just for added safety.
3931	 */
3932	mpt_sleep(mpt, &mpt->recovery_thread, PUSER, "thtrm", 0);
3933}
3934
3935static void
3936mpt_recovery_thread(void *arg)
3937{
3938	struct mpt_softc *mpt;
3939
3940	mpt = (struct mpt_softc *)arg;
3941	MPT_LOCK(mpt);
3942	for (;;) {
3943		if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3944			if (mpt->shutdwn_recovery == 0) {
3945				mpt_sleep(mpt, mpt, PUSER, "idle", 0);
3946			}
3947		}
3948		if (mpt->shutdwn_recovery != 0) {
3949			break;
3950		}
3951		mpt_recover_commands(mpt);
3952	}
3953	mpt->recovery_thread = NULL;
3954	wakeup(&mpt->recovery_thread);
3955	MPT_UNLOCK(mpt);
3956	mpt_kthread_exit(0);
3957}
3958
3959static int
3960mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags,
3961    u_int channel, u_int target, u_int lun, u_int abort_ctx, int sleep_ok)
3962{
3963	MSG_SCSI_TASK_MGMT *tmf_req;
3964	int		    error;
3965
3966	/*
3967	 * Wait for any current TMF request to complete.
3968	 * We're only allowed to issue one TMF at a time.
3969	 */
3970	error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE,
3971	    sleep_ok, MPT_TMF_MAX_TIMEOUT);
3972	if (error != 0) {
3973		mpt_reset(mpt, TRUE);
3974		return (ETIMEDOUT);
3975	}
3976
3977	mpt_assign_serno(mpt, mpt->tmf_req);
3978	mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED;
3979
3980	tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf;
3981	memset(tmf_req, 0, sizeof(*tmf_req));
3982	tmf_req->TargetID = target;
3983	tmf_req->Bus = channel;
3984	tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
3985	tmf_req->TaskType = type;
3986	tmf_req->MsgFlags = flags;
3987	tmf_req->MsgContext =
3988	    htole32(mpt->tmf_req->index | scsi_tmf_handler_id);
3989	if (lun > MPT_MAX_LUNS) {
3990		tmf_req->LUN[0] = 0x40 | ((lun >> 8) & 0x3f);
3991		tmf_req->LUN[1] = lun & 0xff;
3992	} else {
3993		tmf_req->LUN[1] = lun;
3994	}
3995	tmf_req->TaskMsgContext = abort_ctx;
3996
3997	mpt_lprt(mpt, MPT_PRT_DEBUG,
3998	    "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req,
3999	    mpt->tmf_req->serno, tmf_req->MsgContext);
4000	if (mpt->verbose > MPT_PRT_DEBUG) {
4001		mpt_print_request(tmf_req);
4002	}
4003
4004	KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0,
4005	    ("mpt_scsi_send_tmf: tmf_req already on pending list"));
4006	TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links);
4007	error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req);
4008	if (error != MPT_OK) {
4009		TAILQ_REMOVE(&mpt->request_pending_list, mpt->tmf_req, links);
4010		mpt->tmf_req->state = REQ_STATE_FREE;
4011		mpt_reset(mpt, TRUE);
4012	}
4013	return (error);
4014}
4015
4016/*
4017 * When a command times out, it is placed on the requeust_timeout_list
4018 * and we wake our recovery thread.  The MPT-Fusion architecture supports
4019 * only a single TMF operation at a time, so we serially abort/bdr, etc,
4020 * the timedout transactions.  The next TMF is issued either by the
4021 * completion handler of the current TMF waking our recovery thread,
4022 * or the TMF timeout handler causing a hard reset sequence.
4023 */
4024static void
4025mpt_recover_commands(struct mpt_softc *mpt)
4026{
4027	request_t	   *req;
4028	union ccb	   *ccb;
4029	int		    error;
4030
4031	if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
4032		/*
4033		 * No work to do- leave.
4034		 */
4035		mpt_prt(mpt, "mpt_recover_commands: no requests.\n");
4036		return;
4037	}
4038
4039	/*
4040	 * Flush any commands whose completion coincides with their timeout.
4041	 */
4042	mpt_intr(mpt);
4043
4044	if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
4045		/*
4046		 * The timedout commands have already
4047		 * completed.  This typically means
4048		 * that either the timeout value was on
4049		 * the hairy edge of what the device
4050		 * requires or - more likely - interrupts
4051		 * are not happening.
4052		 */
4053		mpt_prt(mpt, "Timedout requests already complete. "
4054		    "Interrupts may not be functioning.\n");
4055		mpt_enable_ints(mpt);
4056		return;
4057	}
4058
4059	/*
4060	 * We have no visibility into the current state of the
4061	 * controller, so attempt to abort the commands in the
4062	 * order they timed-out. For initiator commands, we
4063	 * depend on the reply handler pulling requests off
4064	 * the timeout list.
4065	 */
4066	while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) {
4067		uint16_t status;
4068		uint8_t response;
4069		MSG_REQUEST_HEADER *hdrp = req->req_vbuf;
4070
4071		mpt_prt(mpt, "attempting to abort req %p:%u function %x\n",
4072		    req, req->serno, hdrp->Function);
4073		ccb = req->ccb;
4074		if (ccb == NULL) {
4075			mpt_prt(mpt, "null ccb in timed out request. "
4076			    "Resetting Controller.\n");
4077			mpt_reset(mpt, TRUE);
4078			continue;
4079		}
4080		mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT);
4081
4082		/*
4083		 * Check to see if this is not an initiator command and
4084		 * deal with it differently if it is.
4085		 */
4086		switch (hdrp->Function) {
4087		case MPI_FUNCTION_SCSI_IO_REQUEST:
4088		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
4089			break;
4090		default:
4091			/*
4092			 * XXX: FIX ME: need to abort target assists...
4093			 */
4094			mpt_prt(mpt, "just putting it back on the pend q\n");
4095			TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
4096			TAILQ_INSERT_HEAD(&mpt->request_pending_list, req,
4097			    links);
4098			continue;
4099		}
4100
4101		error = mpt_scsi_send_tmf(mpt,
4102		    MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
4103		    0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
4104		    htole32(req->index | scsi_io_handler_id), TRUE);
4105
4106		if (error != 0) {
4107			/*
4108			 * mpt_scsi_send_tmf hard resets on failure, so no
4109			 * need to do so here.  Our queue should be emptied
4110			 * by the hard reset.
4111			 */
4112			continue;
4113		}
4114
4115		error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
4116		    REQ_STATE_DONE, TRUE, 500);
4117
4118		status = le16toh(mpt->tmf_req->IOCStatus);
4119		response = mpt->tmf_req->ResponseCode;
4120		mpt->tmf_req->state = REQ_STATE_FREE;
4121
4122		if (error != 0) {
4123			/*
4124			 * If we've errored out,, reset the controller.
4125			 */
4126			mpt_prt(mpt, "mpt_recover_commands: abort timed-out. "
4127			    "Resetting controller\n");
4128			mpt_reset(mpt, TRUE);
4129			continue;
4130		}
4131
4132		if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
4133			mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. "
4134			    "Resetting controller.\n", status);
4135			mpt_reset(mpt, TRUE);
4136			continue;
4137		}
4138
4139		if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
4140		    response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
4141			mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. "
4142			    "Resetting controller.\n", response);
4143			mpt_reset(mpt, TRUE);
4144			continue;
4145		}
4146		mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno);
4147	}
4148}
4149
4150/************************ Target Mode Support ****************************/
4151static void
4152mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex)
4153{
4154	MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc;
4155	PTR_SGE_TRANSACTION32 tep;
4156	PTR_SGE_SIMPLE32 se;
4157	bus_addr_t paddr;
4158	uint32_t fl;
4159
4160	paddr = req->req_pbuf;
4161	paddr += MPT_RQSL(mpt);
4162
4163	fc = req->req_vbuf;
4164	memset(fc, 0, MPT_REQUEST_AREA);
4165	fc->BufferCount = 1;
4166	fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST;
4167	fc->MsgContext = htole32(req->index | fc_els_handler_id);
4168
4169	/*
4170	 * Okay, set up ELS buffer pointers. ELS buffer pointers
4171	 * consist of a TE SGL element (with details length of zero)
4172	 * followed by a SIMPLE SGL element which holds the address
4173	 * of the buffer.
4174	 */
4175
4176	tep = (PTR_SGE_TRANSACTION32) &fc->SGL;
4177
4178	tep->ContextSize = 4;
4179	tep->Flags = 0;
4180	tep->TransactionContext[0] = htole32(ioindex);
4181
4182	se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0];
4183	fl =
4184		MPI_SGE_FLAGS_HOST_TO_IOC	|
4185		MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
4186		MPI_SGE_FLAGS_LAST_ELEMENT	|
4187		MPI_SGE_FLAGS_END_OF_LIST	|
4188		MPI_SGE_FLAGS_END_OF_BUFFER;
4189	fl <<= MPI_SGE_FLAGS_SHIFT;
4190	fl |= (MPT_NRFM(mpt) - MPT_RQSL(mpt));
4191	se->FlagsLength = htole32(fl);
4192	se->Address = htole32((uint32_t) paddr);
4193	mpt_lprt(mpt, MPT_PRT_DEBUG,
4194	    "add ELS index %d ioindex %d for %p:%u\n",
4195	    req->index, ioindex, req, req->serno);
4196	KASSERT(((req->state & REQ_STATE_LOCKED) != 0),
4197	    ("mpt_fc_post_els: request not locked"));
4198	mpt_send_cmd(mpt, req);
4199}
4200
4201static void
4202mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex)
4203{
4204	PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc;
4205	PTR_CMD_BUFFER_DESCRIPTOR cb;
4206	bus_addr_t paddr;
4207
4208	paddr = req->req_pbuf;
4209	paddr += MPT_RQSL(mpt);
4210	memset(req->req_vbuf, 0, MPT_REQUEST_AREA);
4211	MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING;
4212
4213	fc = req->req_vbuf;
4214	fc->BufferCount = 1;
4215	fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST;
4216	fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4217
4218	cb = &fc->Buffer[0];
4219	cb->IoIndex = htole16(ioindex);
4220	cb->u.PhysicalAddress32 = htole32((U32) paddr);
4221
4222	mpt_check_doorbell(mpt);
4223	mpt_send_cmd(mpt, req);
4224}
4225
4226static int
4227mpt_add_els_buffers(struct mpt_softc *mpt)
4228{
4229	int i;
4230
4231	if (mpt->is_fc == 0) {
4232		return (TRUE);
4233	}
4234
4235	if (mpt->els_cmds_allocated) {
4236		return (TRUE);
4237	}
4238
4239	mpt->els_cmd_ptrs = malloc(MPT_MAX_ELS * sizeof (request_t *),
4240	    M_DEVBUF, M_NOWAIT | M_ZERO);
4241
4242	if (mpt->els_cmd_ptrs == NULL) {
4243		return (FALSE);
4244	}
4245
4246	/*
4247	 * Feed the chip some ELS buffer resources
4248	 */
4249	for (i = 0; i < MPT_MAX_ELS; i++) {
4250		request_t *req = mpt_get_request(mpt, FALSE);
4251		if (req == NULL) {
4252			break;
4253		}
4254		req->state |= REQ_STATE_LOCKED;
4255		mpt->els_cmd_ptrs[i] = req;
4256		mpt_fc_post_els(mpt, req, i);
4257	}
4258
4259	if (i == 0) {
4260		mpt_prt(mpt, "unable to add ELS buffer resources\n");
4261		free(mpt->els_cmd_ptrs, M_DEVBUF);
4262		mpt->els_cmd_ptrs = NULL;
4263		return (FALSE);
4264	}
4265	if (i != MPT_MAX_ELS) {
4266		mpt_lprt(mpt, MPT_PRT_INFO,
4267		    "only added %d of %d  ELS buffers\n", i, MPT_MAX_ELS);
4268	}
4269	mpt->els_cmds_allocated = i;
4270	return(TRUE);
4271}
4272
4273static int
4274mpt_add_target_commands(struct mpt_softc *mpt)
4275{
4276	int i, max;
4277
4278	if (mpt->tgt_cmd_ptrs) {
4279		return (TRUE);
4280	}
4281
4282	max = MPT_MAX_REQUESTS(mpt) >> 1;
4283	if (max > mpt->mpt_max_tgtcmds) {
4284		max = mpt->mpt_max_tgtcmds;
4285	}
4286	mpt->tgt_cmd_ptrs =
4287	    malloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO);
4288	if (mpt->tgt_cmd_ptrs == NULL) {
4289		mpt_prt(mpt,
4290		    "mpt_add_target_commands: could not allocate cmd ptrs\n");
4291		return (FALSE);
4292	}
4293
4294	for (i = 0; i < max; i++) {
4295		request_t *req;
4296
4297		req = mpt_get_request(mpt, FALSE);
4298		if (req == NULL) {
4299			break;
4300		}
4301		req->state |= REQ_STATE_LOCKED;
4302		mpt->tgt_cmd_ptrs[i] = req;
4303		mpt_post_target_command(mpt, req, i);
4304	}
4305
4306
4307	if (i == 0) {
4308		mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n");
4309		free(mpt->tgt_cmd_ptrs, M_DEVBUF);
4310		mpt->tgt_cmd_ptrs = NULL;
4311		return (FALSE);
4312	}
4313
4314	mpt->tgt_cmds_allocated = i;
4315
4316	if (i < max) {
4317		mpt_lprt(mpt, MPT_PRT_INFO,
4318		    "added %d of %d target bufs\n", i, max);
4319	}
4320	return (i);
4321}
4322
4323static int
4324mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
4325{
4326
4327	if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
4328		mpt->twildcard = 1;
4329	} else if (lun >= MPT_MAX_LUNS) {
4330		return (EINVAL);
4331	} else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
4332		return (EINVAL);
4333	}
4334	if (mpt->tenabled == 0) {
4335		if (mpt->is_fc) {
4336			(void) mpt_fc_reset_link(mpt, 0);
4337		}
4338		mpt->tenabled = 1;
4339	}
4340	if (lun == CAM_LUN_WILDCARD) {
4341		mpt->trt_wildcard.enabled = 1;
4342	} else {
4343		mpt->trt[lun].enabled = 1;
4344	}
4345	return (0);
4346}
4347
4348static int
4349mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
4350{
4351	int i;
4352
4353	if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
4354		mpt->twildcard = 0;
4355	} else if (lun >= MPT_MAX_LUNS) {
4356		return (EINVAL);
4357	} else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
4358		return (EINVAL);
4359	}
4360	if (lun == CAM_LUN_WILDCARD) {
4361		mpt->trt_wildcard.enabled = 0;
4362	} else {
4363		mpt->trt[lun].enabled = 0;
4364	}
4365	for (i = 0; i < MPT_MAX_LUNS; i++) {
4366		if (mpt->trt[lun].enabled) {
4367			break;
4368		}
4369	}
4370	if (i == MPT_MAX_LUNS && mpt->twildcard == 0) {
4371		if (mpt->is_fc) {
4372			(void) mpt_fc_reset_link(mpt, 0);
4373		}
4374		mpt->tenabled = 0;
4375	}
4376	return (0);
4377}
4378
4379/*
4380 * Called with MPT lock held
4381 */
4382static void
4383mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb)
4384{
4385	struct ccb_scsiio *csio = &ccb->csio;
4386	request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id);
4387	mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
4388
4389	switch (tgt->state) {
4390	case TGT_STATE_IN_CAM:
4391		break;
4392	case TGT_STATE_MOVING_DATA:
4393		mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4394		xpt_freeze_simq(mpt->sim, 1);
4395		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4396		tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4397		xpt_done(ccb);
4398		return;
4399	default:
4400		mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request "
4401		    "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id);
4402		mpt_tgt_dump_req_state(mpt, cmd_req);
4403		mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
4404		xpt_done(ccb);
4405		return;
4406	}
4407
4408	if (csio->dxfer_len) {
4409		bus_dmamap_callback_t *cb;
4410		PTR_MSG_TARGET_ASSIST_REQUEST ta;
4411		request_t *req;
4412		int error;
4413
4414		KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE,
4415		    ("dxfer_len %u but direction is NONE", csio->dxfer_len));
4416
4417		if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4418			if (mpt->outofbeer == 0) {
4419				mpt->outofbeer = 1;
4420				xpt_freeze_simq(mpt->sim, 1);
4421				mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4422			}
4423			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4424			mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4425			xpt_done(ccb);
4426			return;
4427		}
4428		ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4429		if (sizeof (bus_addr_t) > 4) {
4430			cb = mpt_execute_req_a64;
4431		} else {
4432			cb = mpt_execute_req;
4433		}
4434
4435		req->ccb = ccb;
4436		ccb->ccb_h.ccb_req_ptr = req;
4437
4438		/*
4439		 * Record the currently active ccb and the
4440		 * request for it in our target state area.
4441		 */
4442		tgt->ccb = ccb;
4443		tgt->req = req;
4444
4445		memset(req->req_vbuf, 0, MPT_RQSL(mpt));
4446		ta = req->req_vbuf;
4447
4448		if (mpt->is_sas) {
4449			PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4450			     cmd_req->req_vbuf;
4451			ta->QueueTag = ssp->InitiatorTag;
4452		} else if (mpt->is_spi) {
4453			PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4454			     cmd_req->req_vbuf;
4455			ta->QueueTag = sp->Tag;
4456		}
4457		ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4458		ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4459		ta->ReplyWord = htole32(tgt->reply_desc);
4460		if (csio->ccb_h.target_lun > MPT_MAX_LUNS) {
4461			ta->LUN[0] =
4462			    0x40 | ((csio->ccb_h.target_lun >> 8) & 0x3f);
4463			ta->LUN[1] = csio->ccb_h.target_lun & 0xff;
4464		} else {
4465			ta->LUN[1] = csio->ccb_h.target_lun;
4466		}
4467
4468		ta->RelativeOffset = tgt->bytes_xfered;
4469		ta->DataLength = ccb->csio.dxfer_len;
4470		if (ta->DataLength > tgt->resid) {
4471			ta->DataLength = tgt->resid;
4472		}
4473
4474		/*
4475		 * XXX Should be done after data transfer completes?
4476		 */
4477		tgt->resid -= csio->dxfer_len;
4478		tgt->bytes_xfered += csio->dxfer_len;
4479
4480		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
4481			ta->TargetAssistFlags |=
4482			    TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4483		}
4484
4485#ifdef	WE_TRUST_AUTO_GOOD_STATUS
4486		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
4487		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
4488			ta->TargetAssistFlags |=
4489			    TARGET_ASSIST_FLAGS_AUTO_STATUS;
4490		}
4491#endif
4492		tgt->state = TGT_STATE_SETTING_UP_FOR_DATA;
4493
4494		mpt_lprt(mpt, MPT_PRT_DEBUG,
4495		    "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u "
4496		    "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len,
4497		    tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state);
4498
4499		error = bus_dmamap_load_ccb(mpt->buffer_dmat, req->dmap, ccb,
4500		    cb, req, 0);
4501		if (error == EINPROGRESS) {
4502			xpt_freeze_simq(mpt->sim, 1);
4503			ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4504		}
4505	} else {
4506		uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
4507
4508		/*
4509		 * XXX: I don't know why this seems to happen, but
4510		 * XXX: completing the CCB seems to make things happy.
4511		 * XXX: This seems to happen if the initiator requests
4512		 * XXX: enough data that we have to do multiple CTIOs.
4513		 */
4514		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
4515			mpt_lprt(mpt, MPT_PRT_DEBUG,
4516			    "Meaningless STATUS CCB (%p): flags %x status %x "
4517			    "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags,
4518			    ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered);
4519			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
4520			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4521			xpt_done(ccb);
4522			return;
4523		}
4524		if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
4525			sp = sense;
4526			memcpy(sp, &csio->sense_data,
4527			   min(csio->sense_len, MPT_SENSE_SIZE));
4528		}
4529		mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status, sp);
4530	}
4531}
4532
4533static void
4534mpt_scsi_tgt_local(struct mpt_softc *mpt, request_t *cmd_req,
4535    uint32_t lun, int send, uint8_t *data, size_t length)
4536{
4537	mpt_tgt_state_t *tgt;
4538	PTR_MSG_TARGET_ASSIST_REQUEST ta;
4539	SGE_SIMPLE32 *se;
4540	uint32_t flags;
4541	uint8_t *dptr;
4542	bus_addr_t pptr;
4543	request_t *req;
4544
4545	/*
4546	 * We enter with resid set to the data load for the command.
4547	 */
4548	tgt = MPT_TGT_STATE(mpt, cmd_req);
4549	if (length == 0 || tgt->resid == 0) {
4550		tgt->resid = 0;
4551		mpt_scsi_tgt_status(mpt, NULL, cmd_req, 0, NULL);
4552		return;
4553	}
4554
4555	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4556		mpt_prt(mpt, "out of resources- dropping local response\n");
4557		return;
4558	}
4559	tgt->is_local = 1;
4560
4561
4562	memset(req->req_vbuf, 0, MPT_RQSL(mpt));
4563	ta = req->req_vbuf;
4564
4565	if (mpt->is_sas) {
4566		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf;
4567		ta->QueueTag = ssp->InitiatorTag;
4568	} else if (mpt->is_spi) {
4569		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf;
4570		ta->QueueTag = sp->Tag;
4571	}
4572	ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4573	ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4574	ta->ReplyWord = htole32(tgt->reply_desc);
4575	if (lun > MPT_MAX_LUNS) {
4576		ta->LUN[0] = 0x40 | ((lun >> 8) & 0x3f);
4577		ta->LUN[1] = lun & 0xff;
4578	} else {
4579		ta->LUN[1] = lun;
4580	}
4581	ta->RelativeOffset = 0;
4582	ta->DataLength = length;
4583
4584	dptr = req->req_vbuf;
4585	dptr += MPT_RQSL(mpt);
4586	pptr = req->req_pbuf;
4587	pptr += MPT_RQSL(mpt);
4588	memcpy(dptr, data, min(length, MPT_RQSL(mpt)));
4589
4590	se = (SGE_SIMPLE32 *) &ta->SGL[0];
4591	memset(se, 0,sizeof (*se));
4592
4593	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
4594	if (send) {
4595		ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4596		flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
4597	}
4598	se->Address = pptr;
4599	MPI_pSGE_SET_LENGTH(se, length);
4600	flags |= MPI_SGE_FLAGS_LAST_ELEMENT;
4601	flags |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER;
4602	MPI_pSGE_SET_FLAGS(se, flags);
4603
4604	tgt->ccb = NULL;
4605	tgt->req = req;
4606	tgt->resid -= length;
4607	tgt->bytes_xfered = length;
4608#ifdef	WE_TRUST_AUTO_GOOD_STATUS
4609	tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
4610#else
4611	tgt->state = TGT_STATE_MOVING_DATA;
4612#endif
4613	mpt_send_cmd(mpt, req);
4614}
4615
4616/*
4617 * Abort queued up CCBs
4618 */
4619static cam_status
4620mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb)
4621{
4622	struct mpt_hdr_stailq *lp;
4623	struct ccb_hdr *srch;
4624	int found = 0;
4625	union ccb *accb = ccb->cab.abort_ccb;
4626	tgt_resource_t *trtp;
4627
4628	mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb);
4629
4630	if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) {
4631		trtp = &mpt->trt_wildcard;
4632	} else {
4633		trtp = &mpt->trt[ccb->ccb_h.target_lun];
4634	}
4635
4636	if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
4637		lp = &trtp->atios;
4638	} else if (accb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY) {
4639		lp = &trtp->inots;
4640	} else {
4641		return (CAM_REQ_INVALID);
4642	}
4643
4644	STAILQ_FOREACH(srch, lp, sim_links.stqe) {
4645		if (srch == &accb->ccb_h) {
4646			found = 1;
4647			STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe);
4648			break;
4649		}
4650	}
4651	if (found) {
4652		accb->ccb_h.status = CAM_REQ_ABORTED;
4653		xpt_done(accb);
4654		return (CAM_REQ_CMP);
4655	}
4656	mpt_prt(mpt, "mpt_abort_tgt_ccb: CCB %p not found\n", ccb);
4657	return (CAM_PATH_INVALID);
4658}
4659
4660/*
4661 * Ask the MPT to abort the current target command
4662 */
4663static int
4664mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req)
4665{
4666	int error;
4667	request_t *req;
4668	PTR_MSG_TARGET_MODE_ABORT abtp;
4669
4670	req = mpt_get_request(mpt, FALSE);
4671	if (req == NULL) {
4672		return (-1);
4673	}
4674	abtp = req->req_vbuf;
4675	memset(abtp, 0, sizeof (*abtp));
4676
4677	abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4678	abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO;
4679	abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT;
4680	abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc);
4681	error = 0;
4682	if (mpt->is_fc || mpt->is_sas) {
4683		mpt_send_cmd(mpt, req);
4684	} else {
4685		error = mpt_send_handshake_cmd(mpt, sizeof(*req), req);
4686	}
4687	return (error);
4688}
4689
4690/*
4691 * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting
4692 * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the
4693 * FC929 to set bogus FC_RSP fields (nonzero residuals
4694 * but w/o RESID fields set). This causes QLogic initiators
4695 * to think maybe that a frame was lost.
4696 *
4697 * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because
4698 * we use allocated requests to do TARGET_ASSIST and we
4699 * need to know when to release them.
4700 */
4701
4702static void
4703mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req,
4704    uint8_t status, uint8_t const *sense_data)
4705{
4706	uint8_t *cmd_vbuf;
4707	mpt_tgt_state_t *tgt;
4708	PTR_MSG_TARGET_STATUS_SEND_REQUEST tp;
4709	request_t *req;
4710	bus_addr_t paddr;
4711	int resplen = 0;
4712	uint32_t fl;
4713
4714	cmd_vbuf = cmd_req->req_vbuf;
4715	cmd_vbuf += MPT_RQSL(mpt);
4716	tgt = MPT_TGT_STATE(mpt, cmd_req);
4717
4718	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4719		if (mpt->outofbeer == 0) {
4720			mpt->outofbeer = 1;
4721			xpt_freeze_simq(mpt->sim, 1);
4722			mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4723		}
4724		if (ccb) {
4725			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4726			mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4727			xpt_done(ccb);
4728		} else {
4729			mpt_prt(mpt,
4730			    "could not allocate status request- dropping\n");
4731		}
4732		return;
4733	}
4734	req->ccb = ccb;
4735	if (ccb) {
4736		ccb->ccb_h.ccb_mpt_ptr = mpt;
4737		ccb->ccb_h.ccb_req_ptr = req;
4738	}
4739
4740	/*
4741	 * Record the currently active ccb, if any, and the
4742	 * request for it in our target state area.
4743	 */
4744	tgt->ccb = ccb;
4745	tgt->req = req;
4746	tgt->state = TGT_STATE_SENDING_STATUS;
4747
4748	tp = req->req_vbuf;
4749	paddr = req->req_pbuf;
4750	paddr += MPT_RQSL(mpt);
4751
4752	memset(tp, 0, sizeof (*tp));
4753	tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND;
4754	if (mpt->is_fc) {
4755		PTR_MPI_TARGET_FCP_CMD_BUFFER fc =
4756		    (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf;
4757		uint8_t *sts_vbuf;
4758		uint32_t *rsp;
4759
4760		sts_vbuf = req->req_vbuf;
4761		sts_vbuf += MPT_RQSL(mpt);
4762		rsp = (uint32_t *) sts_vbuf;
4763		memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN));
4764
4765		/*
4766		 * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate.
4767		 * It has to be big-endian in memory and is organized
4768		 * in 32 bit words, which are much easier to deal with
4769		 * as words which are swizzled as needed.
4770		 *
4771		 * All we're filling here is the FC_RSP payload.
4772		 * We may just have the chip synthesize it if
4773		 * we have no residual and an OK status.
4774		 *
4775		 */
4776		memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER));
4777
4778		rsp[2] = status;
4779		if (tgt->resid) {
4780			rsp[2] |= 0x800;	/* XXXX NEED MNEMONIC!!!! */
4781			rsp[3] = htobe32(tgt->resid);
4782#ifdef	WE_TRUST_AUTO_GOOD_STATUS
4783			resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4784#endif
4785		}
4786		if (status == SCSI_STATUS_CHECK_COND) {
4787			int i;
4788
4789			rsp[2] |= 0x200;	/* XXXX NEED MNEMONIC!!!! */
4790			rsp[4] = htobe32(MPT_SENSE_SIZE);
4791			if (sense_data) {
4792				memcpy(&rsp[8], sense_data, MPT_SENSE_SIZE);
4793			} else {
4794				mpt_prt(mpt, "mpt_scsi_tgt_status: CHECK CONDI"
4795				    "TION but no sense data?\n");
4796				memset(&rsp, 0, MPT_SENSE_SIZE);
4797			}
4798			for (i = 8; i < (8 + (MPT_SENSE_SIZE >> 2)); i++) {
4799				rsp[i] = htobe32(rsp[i]);
4800			}
4801#ifdef	WE_TRUST_AUTO_GOOD_STATUS
4802			resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4803#endif
4804		}
4805#ifndef	WE_TRUST_AUTO_GOOD_STATUS
4806		resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4807#endif
4808		rsp[2] = htobe32(rsp[2]);
4809	} else if (mpt->is_sas) {
4810		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4811		    (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf;
4812		memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN));
4813	} else {
4814		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4815		    (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf;
4816		tp->StatusCode = status;
4817		tp->QueueTag = htole16(sp->Tag);
4818		memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN));
4819	}
4820
4821	tp->ReplyWord = htole32(tgt->reply_desc);
4822	tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4823
4824#ifdef	WE_CAN_USE_AUTO_REPOST
4825	tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER;
4826#endif
4827	if (status == SCSI_STATUS_OK && resplen == 0) {
4828		tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS;
4829	} else {
4830		tp->StatusDataSGE.u.Address32 = htole32((uint32_t) paddr);
4831		fl =
4832			MPI_SGE_FLAGS_HOST_TO_IOC	|
4833			MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
4834			MPI_SGE_FLAGS_LAST_ELEMENT	|
4835			MPI_SGE_FLAGS_END_OF_LIST	|
4836			MPI_SGE_FLAGS_END_OF_BUFFER;
4837		fl <<= MPI_SGE_FLAGS_SHIFT;
4838		fl |= resplen;
4839		tp->StatusDataSGE.FlagsLength = htole32(fl);
4840	}
4841
4842	mpt_lprt(mpt, MPT_PRT_DEBUG,
4843	    "STATUS_CCB %p (wit%s sense) tag %x req %p:%u resid %u\n",
4844	    ccb, sense_data?"h" : "hout", ccb? ccb->csio.tag_id : -1, req,
4845	    req->serno, tgt->resid);
4846	if (ccb) {
4847		ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4848		mpt_req_timeout(req, 60 * hz, mpt_timeout, ccb);
4849	}
4850	mpt_send_cmd(mpt, req);
4851}
4852
4853static void
4854mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc,
4855    tgt_resource_t *trtp, int init_id)
4856{
4857	struct ccb_immediate_notify *inot;
4858	mpt_tgt_state_t *tgt;
4859
4860	tgt = MPT_TGT_STATE(mpt, req);
4861	inot = (struct ccb_immediate_notify *) STAILQ_FIRST(&trtp->inots);
4862	if (inot == NULL) {
4863		mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n");
4864		mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL);
4865		return;
4866	}
4867	STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe);
4868	mpt_lprt(mpt, MPT_PRT_DEBUG1,
4869	    "Get FREE INOT %p lun %d\n", inot, inot->ccb_h.target_lun);
4870
4871	inot->initiator_id = init_id;	/* XXX */
4872	/*
4873	 * This is a somewhat grotesque attempt to map from task management
4874	 * to old style SCSI messages. God help us all.
4875	 */
4876	switch (fc) {
4877	case MPT_ABORT_TASK_SET:
4878		inot->arg = MSG_ABORT_TAG;
4879		break;
4880	case MPT_CLEAR_TASK_SET:
4881		inot->arg = MSG_CLEAR_TASK_SET;
4882		break;
4883	case MPT_TARGET_RESET:
4884		inot->arg = MSG_TARGET_RESET;
4885		break;
4886	case MPT_CLEAR_ACA:
4887		inot->arg = MSG_CLEAR_ACA;
4888		break;
4889	case MPT_TERMINATE_TASK:
4890		inot->arg = MSG_ABORT_TAG;
4891		break;
4892	default:
4893		inot->arg = MSG_NOOP;
4894		break;
4895	}
4896	/*
4897	 * XXX KDM we need the sequence/tag number for the target of the
4898	 * task management operation, especially if it is an abort.
4899	 */
4900	tgt->ccb = (union ccb *) inot;
4901	inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
4902	xpt_done((union ccb *)inot);
4903}
4904
4905static void
4906mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc)
4907{
4908	static uint8_t null_iqd[SHORT_INQUIRY_LENGTH] = {
4909	    0x7f, 0x00, 0x02, 0x02, 0x20, 0x00, 0x00, 0x32,
4910	     'F',  'R',  'E',  'E',  'B',  'S',  'D',  ' ',
4911	     'L',  'S',  'I',  '-',  'L',  'O',  'G',  'I',
4912	     'C',  ' ',  'N',  'U',  'L',  'D',  'E',  'V',
4913	     '0',  '0',  '0',  '1'
4914	};
4915	struct ccb_accept_tio *atiop;
4916	lun_id_t lun;
4917	int tag_action = 0;
4918	mpt_tgt_state_t *tgt;
4919	tgt_resource_t *trtp = NULL;
4920	U8 *lunptr;
4921	U8 *vbuf;
4922	U16 itag;
4923	U16 ioindex;
4924	mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE;
4925	uint8_t *cdbp;
4926
4927	/*
4928	 * Stash info for the current command where we can get at it later.
4929	 */
4930	vbuf = req->req_vbuf;
4931	vbuf += MPT_RQSL(mpt);
4932
4933	/*
4934	 * Get our state pointer set up.
4935	 */
4936	tgt = MPT_TGT_STATE(mpt, req);
4937	if (tgt->state != TGT_STATE_LOADED) {
4938		mpt_tgt_dump_req_state(mpt, req);
4939		panic("bad target state in mpt_scsi_tgt_atio");
4940	}
4941	memset(tgt, 0, sizeof (mpt_tgt_state_t));
4942	tgt->state = TGT_STATE_IN_CAM;
4943	tgt->reply_desc = reply_desc;
4944	ioindex = GET_IO_INDEX(reply_desc);
4945	if (mpt->verbose >= MPT_PRT_DEBUG) {
4946		mpt_dump_data(mpt, "mpt_scsi_tgt_atio response", vbuf,
4947		    max(sizeof (MPI_TARGET_FCP_CMD_BUFFER),
4948		    max(sizeof (MPI_TARGET_SSP_CMD_BUFFER),
4949		    sizeof (MPI_TARGET_SCSI_SPI_CMD_BUFFER))));
4950	}
4951	if (mpt->is_fc) {
4952		PTR_MPI_TARGET_FCP_CMD_BUFFER fc;
4953		fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf;
4954		if (fc->FcpCntl[2]) {
4955			/*
4956			 * Task Management Request
4957			 */
4958			switch (fc->FcpCntl[2]) {
4959			case 0x2:
4960				fct = MPT_ABORT_TASK_SET;
4961				break;
4962			case 0x4:
4963				fct = MPT_CLEAR_TASK_SET;
4964				break;
4965			case 0x20:
4966				fct = MPT_TARGET_RESET;
4967				break;
4968			case 0x40:
4969				fct = MPT_CLEAR_ACA;
4970				break;
4971			case 0x80:
4972				fct = MPT_TERMINATE_TASK;
4973				break;
4974			default:
4975				mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n",
4976				    fc->FcpCntl[2]);
4977				mpt_scsi_tgt_status(mpt, 0, req,
4978				    SCSI_STATUS_OK, 0);
4979				return;
4980			}
4981		} else {
4982			switch (fc->FcpCntl[1]) {
4983			case 0:
4984				tag_action = MSG_SIMPLE_Q_TAG;
4985				break;
4986			case 1:
4987				tag_action = MSG_HEAD_OF_Q_TAG;
4988				break;
4989			case 2:
4990				tag_action = MSG_ORDERED_Q_TAG;
4991				break;
4992			default:
4993				/*
4994				 * Bah. Ignore Untagged Queing and ACA
4995				 */
4996				tag_action = MSG_SIMPLE_Q_TAG;
4997				break;
4998			}
4999		}
5000		tgt->resid = be32toh(fc->FcpDl);
5001		cdbp = fc->FcpCdb;
5002		lunptr = fc->FcpLun;
5003		itag = be16toh(fc->OptionalOxid);
5004	} else if (mpt->is_sas) {
5005		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp;
5006		ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf;
5007		cdbp = ssp->CDB;
5008		lunptr = ssp->LogicalUnitNumber;
5009		itag = ssp->InitiatorTag;
5010	} else {
5011		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp;
5012		sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf;
5013		cdbp = sp->CDB;
5014		lunptr = sp->LogicalUnitNumber;
5015		itag = sp->Tag;
5016	}
5017
5018	/*
5019	 * Generate a simple lun
5020	 */
5021	switch (lunptr[0] & 0xc0) {
5022	case 0x40:
5023		lun = ((lunptr[0] & 0x3f) << 8) | lunptr[1];
5024		break;
5025	case 0:
5026		lun = lunptr[1];
5027		break;
5028	default:
5029		mpt_lprt(mpt, MPT_PRT_ERROR, "cannot handle this type lun\n");
5030		lun = 0xffff;
5031		break;
5032	}
5033
5034	/*
5035	 * Deal with non-enabled or bad luns here.
5036	 */
5037	if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 ||
5038	    mpt->trt[lun].enabled == 0) {
5039		if (mpt->twildcard) {
5040			trtp = &mpt->trt_wildcard;
5041		} else if (fct == MPT_NIL_TMT_VALUE) {
5042			/*
5043			 * In this case, we haven't got an upstream listener
5044			 * for either a specific lun or wildcard luns. We
5045			 * have to make some sensible response. For regular
5046			 * inquiry, just return some NOT HERE inquiry data.
5047			 * For VPD inquiry, report illegal field in cdb.
5048			 * For REQUEST SENSE, just return NO SENSE data.
5049			 * REPORT LUNS gets illegal command.
5050			 * All other commands get 'no such device'.
5051			 */
5052			uint8_t *sp, cond, buf[MPT_SENSE_SIZE];
5053			size_t len;
5054
5055			memset(buf, 0, MPT_SENSE_SIZE);
5056			cond = SCSI_STATUS_CHECK_COND;
5057			buf[0] = 0xf0;
5058			buf[2] = 0x5;
5059			buf[7] = 0x8;
5060			sp = buf;
5061			tgt->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
5062
5063			switch (cdbp[0]) {
5064			case INQUIRY:
5065			{
5066				if (cdbp[1] != 0) {
5067					buf[12] = 0x26;
5068					buf[13] = 0x01;
5069					break;
5070				}
5071				len = min(tgt->resid, cdbp[4]);
5072				len = min(len, sizeof (null_iqd));
5073				mpt_lprt(mpt, MPT_PRT_DEBUG,
5074				    "local inquiry %ld bytes\n", (long) len);
5075				mpt_scsi_tgt_local(mpt, req, lun, 1,
5076				    null_iqd, len);
5077				return;
5078			}
5079			case REQUEST_SENSE:
5080			{
5081				buf[2] = 0x0;
5082				len = min(tgt->resid, cdbp[4]);
5083				len = min(len, sizeof (buf));
5084				mpt_lprt(mpt, MPT_PRT_DEBUG,
5085				    "local reqsense %ld bytes\n", (long) len);
5086				mpt_scsi_tgt_local(mpt, req, lun, 1,
5087				    buf, len);
5088				return;
5089			}
5090			case REPORT_LUNS:
5091				mpt_lprt(mpt, MPT_PRT_DEBUG, "REPORT LUNS\n");
5092				buf[12] = 0x26;
5093				return;
5094			default:
5095				mpt_lprt(mpt, MPT_PRT_DEBUG,
5096				    "CMD 0x%x to unmanaged lun %u\n",
5097				    cdbp[0], lun);
5098				buf[12] = 0x25;
5099				break;
5100			}
5101			mpt_scsi_tgt_status(mpt, NULL, req, cond, sp);
5102			return;
5103		}
5104		/* otherwise, leave trtp NULL */
5105	} else {
5106		trtp = &mpt->trt[lun];
5107	}
5108
5109	/*
5110	 * Deal with any task management
5111	 */
5112	if (fct != MPT_NIL_TMT_VALUE) {
5113		if (trtp == NULL) {
5114			mpt_prt(mpt, "task mgmt function %x but no listener\n",
5115			    fct);
5116			mpt_scsi_tgt_status(mpt, 0, req,
5117			    SCSI_STATUS_OK, 0);
5118		} else {
5119			mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp,
5120			    GET_INITIATOR_INDEX(reply_desc));
5121		}
5122		return;
5123	}
5124
5125
5126	atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios);
5127	if (atiop == NULL) {
5128		mpt_lprt(mpt, MPT_PRT_WARN,
5129		    "no ATIOs for lun %u- sending back %s\n", lun,
5130		    mpt->tenabled? "QUEUE FULL" : "BUSY");
5131		mpt_scsi_tgt_status(mpt, NULL, req,
5132		    mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY,
5133		    NULL);
5134		return;
5135	}
5136	STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe);
5137	mpt_lprt(mpt, MPT_PRT_DEBUG1,
5138	    "Get FREE ATIO %p lun %d\n", atiop, atiop->ccb_h.target_lun);
5139	atiop->ccb_h.ccb_mpt_ptr = mpt;
5140	atiop->ccb_h.status = CAM_CDB_RECVD;
5141	atiop->ccb_h.target_lun = lun;
5142	atiop->sense_len = 0;
5143	atiop->init_id = GET_INITIATOR_INDEX(reply_desc);
5144	atiop->cdb_len = mpt_cdblen(cdbp[0], 16);
5145	memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len);
5146
5147	/*
5148	 * The tag we construct here allows us to find the
5149	 * original request that the command came in with.
5150	 *
5151	 * This way we don't have to depend on anything but the
5152	 * tag to find things when CCBs show back up from CAM.
5153	 */
5154	atiop->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
5155	tgt->tag_id = atiop->tag_id;
5156	if (tag_action) {
5157		atiop->tag_action = tag_action;
5158		atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID;
5159	}
5160	if (mpt->verbose >= MPT_PRT_DEBUG) {
5161		int i;
5162		mpt_prt(mpt, "START_CCB %p for lun %u CDB=<", atiop,
5163		    atiop->ccb_h.target_lun);
5164		for (i = 0; i < atiop->cdb_len; i++) {
5165			mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff,
5166			    (i == (atiop->cdb_len - 1))? '>' : ' ');
5167		}
5168		mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n",
5169	    	    itag, atiop->tag_id, tgt->reply_desc, tgt->resid);
5170	}
5171
5172	xpt_done((union ccb *)atiop);
5173}
5174
5175static void
5176mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req)
5177{
5178	mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
5179
5180	mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p "
5181	    "nx %d tag 0x%08x state=%d\n", req, req->serno, tgt->reply_desc,
5182	    tgt->resid, tgt->bytes_xfered, tgt->ccb, tgt->req, tgt->nxfers,
5183	    tgt->tag_id, tgt->state);
5184}
5185
5186static void
5187mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req)
5188{
5189
5190	mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno,
5191	    req->index, req->index, req->state);
5192	mpt_tgt_dump_tgt_state(mpt, req);
5193}
5194
5195static int
5196mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req,
5197    uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
5198{
5199	int dbg;
5200	union ccb *ccb;
5201	U16 status;
5202
5203	if (reply_frame == NULL) {
5204		/*
5205		 * Figure out what the state of the command is.
5206		 */
5207		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
5208
5209#ifdef	INVARIANTS
5210		mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__);
5211		if (tgt->req) {
5212			mpt_req_not_spcl(mpt, tgt->req,
5213			    "turbo scsi_tgt_reply associated req", __LINE__);
5214		}
5215#endif
5216		switch(tgt->state) {
5217		case TGT_STATE_LOADED:
5218			/*
5219			 * This is a new command starting.
5220			 */
5221			mpt_scsi_tgt_atio(mpt, req, reply_desc);
5222			break;
5223		case TGT_STATE_MOVING_DATA:
5224		{
5225			uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
5226
5227			ccb = tgt->ccb;
5228			if (tgt->req == NULL) {
5229				panic("mpt: turbo target reply with null "
5230				    "associated request moving data");
5231				/* NOTREACHED */
5232			}
5233			if (ccb == NULL) {
5234				if (tgt->is_local == 0) {
5235					panic("mpt: turbo target reply with "
5236					    "null associated ccb moving data");
5237					/* NOTREACHED */
5238				}
5239				mpt_lprt(mpt, MPT_PRT_DEBUG,
5240				    "TARGET_ASSIST local done\n");
5241				TAILQ_REMOVE(&mpt->request_pending_list,
5242				    tgt->req, links);
5243				mpt_free_request(mpt, tgt->req);
5244				tgt->req = NULL;
5245				mpt_scsi_tgt_status(mpt, NULL, req,
5246				    0, NULL);
5247				return (TRUE);
5248			}
5249			tgt->ccb = NULL;
5250			tgt->nxfers++;
5251			mpt_req_untimeout(req, mpt_timeout, ccb);
5252			mpt_lprt(mpt, MPT_PRT_DEBUG,
5253			    "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n",
5254			    ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id);
5255			/*
5256			 * Free the Target Assist Request
5257			 */
5258			KASSERT(tgt->req->ccb == ccb,
5259			    ("tgt->req %p:%u tgt->req->ccb %p", tgt->req,
5260			    tgt->req->serno, tgt->req->ccb));
5261			TAILQ_REMOVE(&mpt->request_pending_list,
5262			    tgt->req, links);
5263			mpt_free_request(mpt, tgt->req);
5264			tgt->req = NULL;
5265
5266			/*
5267			 * Do we need to send status now? That is, are
5268			 * we done with all our data transfers?
5269			 */
5270			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
5271				mpt_set_ccb_status(ccb, CAM_REQ_CMP);
5272				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
5273				KASSERT(ccb->ccb_h.status,
5274				    ("zero ccb sts at %d", __LINE__));
5275				tgt->state = TGT_STATE_IN_CAM;
5276				if (mpt->outofbeer) {
5277					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
5278					mpt->outofbeer = 0;
5279					mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
5280				}
5281				xpt_done(ccb);
5282				break;
5283			}
5284			/*
5285			 * Otherwise, send status (and sense)
5286			 */
5287			if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
5288				sp = sense;
5289				memcpy(sp, &ccb->csio.sense_data,
5290				   min(ccb->csio.sense_len, MPT_SENSE_SIZE));
5291			}
5292			mpt_scsi_tgt_status(mpt, ccb, req,
5293			    ccb->csio.scsi_status, sp);
5294			break;
5295		}
5296		case TGT_STATE_SENDING_STATUS:
5297		case TGT_STATE_MOVING_DATA_AND_STATUS:
5298		{
5299			int ioindex;
5300			ccb = tgt->ccb;
5301
5302			if (tgt->req == NULL) {
5303				panic("mpt: turbo target reply with null "
5304				    "associated request sending status");
5305				/* NOTREACHED */
5306			}
5307
5308			if (ccb) {
5309				tgt->ccb = NULL;
5310				if (tgt->state ==
5311				    TGT_STATE_MOVING_DATA_AND_STATUS) {
5312					tgt->nxfers++;
5313				}
5314				mpt_req_untimeout(req, mpt_timeout, ccb);
5315				if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
5316					ccb->ccb_h.status |= CAM_SENT_SENSE;
5317				}
5318				mpt_lprt(mpt, MPT_PRT_DEBUG,
5319				    "TARGET_STATUS tag %x sts %x flgs %x req "
5320				    "%p\n", ccb->csio.tag_id, ccb->ccb_h.status,
5321				    ccb->ccb_h.flags, tgt->req);
5322				/*
5323				 * Free the Target Send Status Request
5324				 */
5325				KASSERT(tgt->req->ccb == ccb,
5326				    ("tgt->req %p:%u tgt->req->ccb %p",
5327				    tgt->req, tgt->req->serno, tgt->req->ccb));
5328				/*
5329				 * Notify CAM that we're done
5330				 */
5331				mpt_set_ccb_status(ccb, CAM_REQ_CMP);
5332				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
5333				KASSERT(ccb->ccb_h.status,
5334				    ("ZERO ccb sts at %d", __LINE__));
5335				tgt->ccb = NULL;
5336			} else {
5337				mpt_lprt(mpt, MPT_PRT_DEBUG,
5338				    "TARGET_STATUS non-CAM for  req %p:%u\n",
5339				    tgt->req, tgt->req->serno);
5340			}
5341			TAILQ_REMOVE(&mpt->request_pending_list,
5342			    tgt->req, links);
5343			mpt_free_request(mpt, tgt->req);
5344			tgt->req = NULL;
5345
5346			/*
5347			 * And re-post the Command Buffer.
5348			 * This will reset the state.
5349			 */
5350			ioindex = GET_IO_INDEX(reply_desc);
5351			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5352			tgt->is_local = 0;
5353			mpt_post_target_command(mpt, req, ioindex);
5354
5355			/*
5356			 * And post a done for anyone who cares
5357			 */
5358			if (ccb) {
5359				if (mpt->outofbeer) {
5360					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
5361					mpt->outofbeer = 0;
5362					mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
5363				}
5364				xpt_done(ccb);
5365			}
5366			break;
5367		}
5368		case TGT_STATE_NIL:	/* XXX This Never Happens XXX */
5369			tgt->state = TGT_STATE_LOADED;
5370			break;
5371		default:
5372			mpt_prt(mpt, "Unknown Target State 0x%x in Context "
5373			    "Reply Function\n", tgt->state);
5374		}
5375		return (TRUE);
5376	}
5377
5378	status = le16toh(reply_frame->IOCStatus);
5379	if (status != MPI_IOCSTATUS_SUCCESS) {
5380		dbg = MPT_PRT_ERROR;
5381	} else {
5382		dbg = MPT_PRT_DEBUG1;
5383	}
5384
5385	mpt_lprt(mpt, dbg,
5386	    "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n",
5387	     req, req->serno, reply_frame, reply_frame->Function, status);
5388
5389	switch (reply_frame->Function) {
5390	case MPI_FUNCTION_TARGET_CMD_BUFFER_POST:
5391	{
5392		mpt_tgt_state_t *tgt;
5393#ifdef	INVARIANTS
5394		mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__);
5395#endif
5396		if (status != MPI_IOCSTATUS_SUCCESS) {
5397			/*
5398			 * XXX What to do?
5399			 */
5400			break;
5401		}
5402		tgt = MPT_TGT_STATE(mpt, req);
5403		KASSERT(tgt->state == TGT_STATE_LOADING,
5404		    ("bad state 0x%x on reply to buffer post", tgt->state));
5405		mpt_assign_serno(mpt, req);
5406		tgt->state = TGT_STATE_LOADED;
5407		break;
5408	}
5409	case MPI_FUNCTION_TARGET_ASSIST:
5410#ifdef	INVARIANTS
5411		mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__);
5412#endif
5413		mpt_prt(mpt, "target assist completion\n");
5414		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5415		mpt_free_request(mpt, req);
5416		break;
5417	case MPI_FUNCTION_TARGET_STATUS_SEND:
5418#ifdef	INVARIANTS
5419		mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__);
5420#endif
5421		mpt_prt(mpt, "status send completion\n");
5422		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5423		mpt_free_request(mpt, req);
5424		break;
5425	case MPI_FUNCTION_TARGET_MODE_ABORT:
5426	{
5427		PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp =
5428		    (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame;
5429		PTR_MSG_TARGET_MODE_ABORT abtp =
5430		    (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf;
5431		uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord));
5432#ifdef	INVARIANTS
5433		mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__);
5434#endif
5435		mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n",
5436		    cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount));
5437		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5438		mpt_free_request(mpt, req);
5439		break;
5440	}
5441	default:
5442		mpt_prt(mpt, "Unknown Target Address Reply Function code: "
5443		    "0x%x\n", reply_frame->Function);
5444		break;
5445	}
5446	return (TRUE);
5447}
5448