Deleted Added
full compact
mpt_cam.c (156797) mpt_cam.c (157117)
1/*-
2 * FreeBSD/CAM specific routines for LSI '909 FC adapters.
3 * FreeBSD Version.
4 *
5 * Copyright (c) 2000, 2001 by Greg Ansley
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions

--- 77 unchanged lines hidden (view full) ---

86 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
87 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
88 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
89 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
90 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
91 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
92 */
93#include <sys/cdefs.h>
1/*-
2 * FreeBSD/CAM specific routines for LSI '909 FC adapters.
3 * FreeBSD Version.
4 *
5 * Copyright (c) 2000, 2001 by Greg Ansley
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions

--- 77 unchanged lines hidden (view full) ---

86 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
87 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
88 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
89 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
90 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
91 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
92 */
93#include <sys/cdefs.h>
94__FBSDID("$FreeBSD: head/sys/dev/mpt/mpt_cam.c 156797 2006-03-17 04:54:06Z mjacob $");
94__FBSDID("$FreeBSD: head/sys/dev/mpt/mpt_cam.c 157117 2006-03-25 07:08:27Z mjacob $");
95
96#include <dev/mpt/mpt.h>
97#include <dev/mpt/mpt_cam.h>
98#include <dev/mpt/mpt_raid.h>
99
100#include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
101#include "dev/mpt/mpilib/mpi_init.h"
102#include "dev/mpt/mpilib/mpi_targ.h"
95
96#include <dev/mpt/mpt.h>
97#include <dev/mpt/mpt_cam.h>
98#include <dev/mpt/mpt_raid.h>
99
100#include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
101#include "dev/mpt/mpilib/mpi_init.h"
102#include "dev/mpt/mpilib/mpi_targ.h"
103#include "dev/mpt/mpilib/mpi_fc.h"
103
104#include <sys/callout.h>
105#include <sys/kthread.h>
106
107static void mpt_poll(struct cam_sim *);
108static timeout_t mpt_timeout;
109static void mpt_action(struct cam_sim *, union ccb *);
110static int mpt_setwidth(struct mpt_softc *, int, int);
111static int mpt_setsync(struct mpt_softc *, int, int, int);
112static void mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended);
113static mpt_reply_handler_t mpt_scsi_reply_handler;
114static mpt_reply_handler_t mpt_scsi_tmf_reply_handler;
104
105#include <sys/callout.h>
106#include <sys/kthread.h>
107
108static void mpt_poll(struct cam_sim *);
109static timeout_t mpt_timeout;
110static void mpt_action(struct cam_sim *, union ccb *);
111static int mpt_setwidth(struct mpt_softc *, int, int);
112static int mpt_setsync(struct mpt_softc *, int, int, int);
113static void mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended);
114static mpt_reply_handler_t mpt_scsi_reply_handler;
115static mpt_reply_handler_t mpt_scsi_tmf_reply_handler;
116static mpt_reply_handler_t mpt_fc_els_reply_handler;
117static mpt_reply_handler_t mpt_scsi_tgt_reply_handler;
115static int mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
116 MSG_DEFAULT_REPLY *reply_frame);
117static int mpt_bus_reset(struct mpt_softc *, int /*sleep_ok*/);
118static int mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
119 MSG_DEFAULT_REPLY *reply_frame);
120static int mpt_bus_reset(struct mpt_softc *, int /*sleep_ok*/);
121static int mpt_fc_reset_link(struct mpt_softc *, int);
118
119static int mpt_spawn_recovery_thread(struct mpt_softc *mpt);
120static void mpt_terminate_recovery_thread(struct mpt_softc *mpt);
121static void mpt_recovery_thread(void *arg);
122static int mpt_scsi_send_tmf(struct mpt_softc *, u_int /*type*/,
123 u_int /*flags*/, u_int /*channel*/,
124 u_int /*target*/, u_int /*lun*/,
125 u_int /*abort_ctx*/, int /*sleep_ok*/);
122
123static int mpt_spawn_recovery_thread(struct mpt_softc *mpt);
124static void mpt_terminate_recovery_thread(struct mpt_softc *mpt);
125static void mpt_recovery_thread(void *arg);
126static int mpt_scsi_send_tmf(struct mpt_softc *, u_int /*type*/,
127 u_int /*flags*/, u_int /*channel*/,
128 u_int /*target*/, u_int /*lun*/,
129 u_int /*abort_ctx*/, int /*sleep_ok*/);
130
131static void mpt_fc_add_els(struct mpt_softc *mpt, request_t *);
132static void mpt_post_target_command(struct mpt_softc *, request_t *, int);
133static void mpt_add_target_commands(struct mpt_softc *mpt);
134static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t);
135static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t);
136static void mpt_target_start_io(struct mpt_softc *, union ccb *);
137static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *);
138static cam_status mpt_abort_target_cmd(struct mpt_softc *, request_t *);
139
126static void mpt_recover_commands(struct mpt_softc *mpt);
127
128static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE;
129static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE;
140static void mpt_recover_commands(struct mpt_softc *mpt);
141
142static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE;
143static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE;
144static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE;
130
131static mpt_probe_handler_t mpt_cam_probe;
132static mpt_attach_handler_t mpt_cam_attach;
145
146static mpt_probe_handler_t mpt_cam_probe;
147static mpt_attach_handler_t mpt_cam_attach;
148static mpt_enable_handler_t mpt_cam_enable;
133static mpt_event_handler_t mpt_cam_event;
134static mpt_reset_handler_t mpt_cam_ioc_reset;
135static mpt_detach_handler_t mpt_cam_detach;
136
137static struct mpt_personality mpt_cam_personality =
138{
139 .name = "mpt_cam",
140 .probe = mpt_cam_probe,
141 .attach = mpt_cam_attach,
149static mpt_event_handler_t mpt_cam_event;
150static mpt_reset_handler_t mpt_cam_ioc_reset;
151static mpt_detach_handler_t mpt_cam_detach;
152
153static struct mpt_personality mpt_cam_personality =
154{
155 .name = "mpt_cam",
156 .probe = mpt_cam_probe,
157 .attach = mpt_cam_attach,
158 .enable = mpt_cam_enable,
142 .event = mpt_cam_event,
143 .reset = mpt_cam_ioc_reset,
144 .detach = mpt_cam_detach,
145};
146
147DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND);
148
149int
150mpt_cam_probe(struct mpt_softc *mpt)
151{
152 /*
159 .event = mpt_cam_event,
160 .reset = mpt_cam_ioc_reset,
161 .detach = mpt_cam_detach,
162};
163
164DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND);
165
166int
167mpt_cam_probe(struct mpt_softc *mpt)
168{
169 /*
153 * Only attach to nodes that support the initiator
154 * role or have RAID physical devices that need
155 * CAM pass-thru support.
170 * Only attach to nodes that support the initiator or target
171 * role or have RAID physical devices that need CAM pass-thru support.
156 */
157 if ((mpt->mpt_proto_flags & MPI_PORTFACTS_PROTOCOL_INITIATOR) != 0
172 */
173 if ((mpt->mpt_proto_flags & MPI_PORTFACTS_PROTOCOL_INITIATOR) != 0
158 || (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0))
174 || (mpt->mpt_proto_flags & MPI_PORTFACTS_PROTOCOL_TARGET) != 0
175 || (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) {
159 return (0);
176 return (0);
177 }
160 return (ENODEV);
161}
162
163int
164mpt_cam_attach(struct mpt_softc *mpt)
165{
166 struct cam_devq *devq;
167 mpt_handler_t handler;
168 int maxq;
169 int error;
170
178 return (ENODEV);
179}
180
181int
182mpt_cam_attach(struct mpt_softc *mpt)
183{
184 struct cam_devq *devq;
185 mpt_handler_t handler;
186 int maxq;
187 int error;
188
171 MPTLOCK_2_CAMLOCK(mpt);
172 TAILQ_INIT(&mpt->request_timeout_list);
189 TAILQ_INIT(&mpt->request_timeout_list);
173 mpt->bus = 0;
174 maxq = (mpt->mpt_global_credits < MPT_MAX_REQUESTS(mpt))?
175 mpt->mpt_global_credits : MPT_MAX_REQUESTS(mpt);
176
177 handler.reply_handler = mpt_scsi_reply_handler;
178 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
179 &scsi_io_handler_id);
190 maxq = (mpt->mpt_global_credits < MPT_MAX_REQUESTS(mpt))?
191 mpt->mpt_global_credits : MPT_MAX_REQUESTS(mpt);
192
193 handler.reply_handler = mpt_scsi_reply_handler;
194 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
195 &scsi_io_handler_id);
180 if (error != 0)
196 if (error != 0) {
181 goto cleanup;
197 goto cleanup;
198 }
199
182 handler.reply_handler = mpt_scsi_tmf_reply_handler;
183 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
184 &scsi_tmf_handler_id);
200 handler.reply_handler = mpt_scsi_tmf_reply_handler;
201 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
202 &scsi_tmf_handler_id);
185 if (error != 0)
203 if (error != 0) {
186 goto cleanup;
204 goto cleanup;
205 }
187
188 /*
206
207 /*
208 * We keep two requests reserved for ELS replies/responses
209 * if we're fibre channel and target mode.
210 */
211 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
212 request_t *req;
213 int i;
214
215 handler.reply_handler = mpt_fc_els_reply_handler;
216 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
217 &fc_els_handler_id);
218 if (error != 0) {
219 goto cleanup;
220 }
221
222 /*
223 * Feed the chip some ELS buffer resources
224 */
225 for (i = 0; i < MPT_MAX_ELS; i++) {
226 req = mpt_get_request(mpt, FALSE);
227 if (req == NULL) {
228 break;
229 }
230 mpt_fc_add_els(mpt, req);
231 }
232 if (i == 0) {
233 mpt_prt(mpt, "Unable to add ELS buffer resources\n");
234 goto cleanup;
235 }
236 maxq -= i;
237 }
238
239 /*
240 * If we're in target mode, register a reply
241 * handler for it and add some commands.
242 */
243 if ((mpt->role & MPT_ROLE_TARGET) != 0) {
244 handler.reply_handler = mpt_scsi_tgt_reply_handler;
245 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
246 &mpt->scsi_tgt_handler_id);
247 if (error != 0) {
248 goto cleanup;
249 }
250
251 /*
252 * Add some target command resources
253 */
254 mpt_add_target_commands(mpt);
255 }
256
257 /*
189 * We keep one request reserved for timeout TMF requests.
190 */
191 mpt->tmf_req = mpt_get_request(mpt, /*sleep_ok*/FALSE);
192 if (mpt->tmf_req == NULL) {
193 mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n");
194 error = ENOMEM;
195 goto cleanup;
196 }

--- 50 unchanged lines hidden (view full) ---

247 error = ENOMEM;
248 goto cleanup;
249 }
250
251 /*
252 * Only register a second bus for RAID physical
253 * devices if the controller supports RAID.
254 */
258 * We keep one request reserved for timeout TMF requests.
259 */
260 mpt->tmf_req = mpt_get_request(mpt, /*sleep_ok*/FALSE);
261 if (mpt->tmf_req == NULL) {
262 mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n");
263 error = ENOMEM;
264 goto cleanup;
265 }

--- 50 unchanged lines hidden (view full) ---

316 error = ENOMEM;
317 goto cleanup;
318 }
319
320 /*
321 * Only register a second bus for RAID physical
322 * devices if the controller supports RAID.
323 */
255 if (mpt->ioc_page2 == NULL
256 || mpt->ioc_page2->MaxPhysDisks == 0)
324 if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
257 return (0);
325 return (0);
326 }
258
259 /*
260 * Create a "bus" to export all hidden disks to CAM.
261 */
262 mpt->phydisk_sim = cam_sim_alloc(mpt_action, mpt_poll, "mpt", mpt,
263 mpt->unit, 1, maxq, devq);
264 if (mpt->phydisk_sim == NULL) {
265 mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n");

--- 12 unchanged lines hidden (view full) ---

278
279 if (xpt_create_path(&mpt->phydisk_path, NULL,
280 cam_sim_path(mpt->phydisk_sim),
281 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
282 mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n");
283 error = ENOMEM;
284 goto cleanup;
285 }
327
328 /*
329 * Create a "bus" to export all hidden disks to CAM.
330 */
331 mpt->phydisk_sim = cam_sim_alloc(mpt_action, mpt_poll, "mpt", mpt,
332 mpt->unit, 1, maxq, devq);
333 if (mpt->phydisk_sim == NULL) {
334 mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n");

--- 12 unchanged lines hidden (view full) ---

347
348 if (xpt_create_path(&mpt->phydisk_path, NULL,
349 cam_sim_path(mpt->phydisk_sim),
350 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
351 mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n");
352 error = ENOMEM;
353 goto cleanup;
354 }
286
287 CAMLOCK_2_MPTLOCK(mpt);
288 return (0);
289cleanup:
355 return (0);
356cleanup:
290 CAMLOCK_2_MPTLOCK(mpt);
291 mpt_cam_detach(mpt);
292 return (error);
293}
294
357 mpt_cam_detach(mpt);
358 return (error);
359}
360
361/*
362 * Read FC configuration information
363 */
364static int
365mpt_read_config_info_fc(struct mpt_softc *mpt)
366{
367 char *topology = NULL;
368 int rv, speed = 0;
369
370 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0,
371 0, &mpt->mpt_fcport_page0.Header, FALSE, 5000);
372 if (rv) {
373 return (-1);
374 }
375 mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n",
376 mpt->mpt_fcport_page0.Header.PageVersion,
377 mpt->mpt_fcport_page0.Header.PageLength,
378 mpt->mpt_fcport_page0.Header.PageNumber,
379 mpt->mpt_fcport_page0.Header.PageType);
380
381
382 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header,
383 sizeof(mpt->mpt_fcport_page0), FALSE, 5000);
384 if (rv) {
385 mpt_prt(mpt, "failed to read FC Port Page 0\n");
386 return (-1);
387 }
388
389 speed = mpt->mpt_fcport_page0.CurrentSpeed;
390
391 switch (mpt->mpt_fcport_page0.Flags &
392 MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) {
393 case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT:
394 speed = 0;
395 topology = "<NO LOOP>";
396 break;
397 case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT:
398 topology = "N-Port";
399 break;
400 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP:
401 topology = "NL-Port";
402 break;
403 case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT:
404 topology = "F-Port";
405 break;
406 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP:
407 topology = "FL-Port";
408 break;
409 default:
410 speed = 0;
411 topology = "?";
412 break;
413 }
414
415 mpt_lprt(mpt, MPT_PRT_INFO,
416 "FC Port Page 0: Topology <%s> WWNN 0x%08x%08x WWPN 0x%08x%08x "
417 "Speed %u-Gbit\n", topology,
418 mpt->mpt_fcport_page0.WWNN.High,
419 mpt->mpt_fcport_page0.WWNN.Low,
420 mpt->mpt_fcport_page0.WWPN.High,
421 mpt->mpt_fcport_page0.WWPN.Low,
422 speed);
423
424 return (0);
425}
426
427/*
428 * Set FC configuration information.
429 */
430static int
431mpt_set_initial_config_fc(struct mpt_softc *mpt)
432{
433#if 0
434 CONFIG_PAGE_FC_PORT_1 fc;
435 U32 fl;
436 int r, doit = 0;
437
438 r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0,
439 &fc.Header, FALSE, 5000);
440 if (r) {
441 return (mpt_fc_reset_link(mpt, 1));
442 }
443
444 r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, 0,
445 &fc.Header, sizeof (fc), FALSE, 5000);
446 if (r) {
447 return (mpt_fc_reset_link(mpt, 1));
448 }
449
450 fl = le32toh(fc.Flags);
451 if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) {
452 fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID;
453 doit = 1;
454 }
455 if ((fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) &&
456 (mpt->role & MPT_ROLE_INITIATOR) == 0) {
457 fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT;
458 doit = 1;
459 }
460 if ((fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) &&
461 (mpt->role & MPT_ROLE_TARGET) == 0) {
462 fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG;
463 doit = 1;
464 }
465 if (doit) {
466 const char *cc;
467
468 mpt_lprt(mpt, MPT_PRT_INFO,
469 "FC Port Page 1: New Flags %x \n", fl);
470 fc.Flags = htole32(fl);
471 r = mpt_write_cfg_page(mpt,
472 MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT, 0, &fc.Header,
473 sizeof(fc), FALSE, 5000);
474 if (r != 0) {
475 cc = "FC PORT PAGE1 UPDATE: FAILED\n";
476 } else {
477 cc = "FC PORT PAGE1 UPDATED: SYSTEM NEEDS RESET\n";
478 }
479 mpt_prt(mpt, cc);
480 }
481#endif
482 return (mpt_fc_reset_link(mpt, 1));
483}
484
485/*
486 * Read SAS configuration information. Nothing to do yet.
487 */
488static int
489mpt_read_config_info_sas(struct mpt_softc *mpt)
490{
491 return (0);
492}
493
494/*
495 * Set SAS configuration information. Nothing to do yet.
496 */
497static int
498mpt_set_initial_config_sas(struct mpt_softc *mpt)
499{
500 return (0);
501}
502
503/*
504 * Read SCSI configuration information
505 */
506static int
507mpt_read_config_info_spi(struct mpt_softc *mpt)
508{
509 int rv, i;
510
511 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0,
512 0, &mpt->mpt_port_page0.Header,
513 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
514 if (rv)
515 return (-1);
516 mpt_lprt(mpt, MPT_PRT_DEBUG,
517 "SPI Port Page 0 Header: %x %x %x %x\n",
518 mpt->mpt_port_page0.Header.PageVersion,
519 mpt->mpt_port_page0.Header.PageLength,
520 mpt->mpt_port_page0.Header.PageNumber,
521 mpt->mpt_port_page0.Header.PageType);
522
523 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1,
524 0, &mpt->mpt_port_page1.Header,
525 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
526 if (rv)
527 return (-1);
528
529 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n",
530 mpt->mpt_port_page1.Header.PageVersion,
531 mpt->mpt_port_page1.Header.PageLength,
532 mpt->mpt_port_page1.Header.PageNumber,
533 mpt->mpt_port_page1.Header.PageType);
534
535 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2,
536 /*PageAddress*/0, &mpt->mpt_port_page2.Header,
537 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
538 if (rv)
539 return (-1);
540
541 mpt_lprt(mpt, MPT_PRT_DEBUG,
542 "SPI Port Page 2 Header: %x %x %x %x\n",
543 mpt->mpt_port_page1.Header.PageVersion,
544 mpt->mpt_port_page1.Header.PageLength,
545 mpt->mpt_port_page1.Header.PageNumber,
546 mpt->mpt_port_page1.Header.PageType);
547
548 for (i = 0; i < 16; i++) {
549 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
550 0, i, &mpt->mpt_dev_page0[i].Header,
551 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
552 if (rv)
553 return (-1);
554
555 mpt_lprt(mpt, MPT_PRT_DEBUG,
556 "SPI Target %d Device Page 0 Header: %x %x %x %x\n",
557 i, mpt->mpt_dev_page0[i].Header.PageVersion,
558 mpt->mpt_dev_page0[i].Header.PageLength,
559 mpt->mpt_dev_page0[i].Header.PageNumber,
560 mpt->mpt_dev_page0[i].Header.PageType);
561
562 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
563 1, i, &mpt->mpt_dev_page1[i].Header,
564 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
565 if (rv)
566 return (-1);
567
568 mpt_lprt(mpt, MPT_PRT_DEBUG,
569 "SPI Target %d Device Page 1 Header: %x %x %x %x\n",
570 i, mpt->mpt_dev_page1[i].Header.PageVersion,
571 mpt->mpt_dev_page1[i].Header.PageLength,
572 mpt->mpt_dev_page1[i].Header.PageNumber,
573 mpt->mpt_dev_page1[i].Header.PageType);
574 }
575
576 /*
577 * At this point, we don't *have* to fail. As long as we have
578 * valid config header information, we can (barely) lurch
579 * along.
580 */
581
582 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
583 &mpt->mpt_port_page0.Header,
584 sizeof(mpt->mpt_port_page0),
585 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
586 if (rv) {
587 mpt_prt(mpt, "failed to read SPI Port Page 0\n");
588 } else {
589 mpt_lprt(mpt, MPT_PRT_DEBUG,
590 "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n",
591 mpt->mpt_port_page0.Capabilities,
592 mpt->mpt_port_page0.PhysicalInterface);
593 }
594
595 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
596 &mpt->mpt_port_page1.Header,
597 sizeof(mpt->mpt_port_page1),
598 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
599 if (rv) {
600 mpt_prt(mpt, "failed to read SPI Port Page 1\n");
601 } else {
602 mpt_lprt(mpt, MPT_PRT_DEBUG,
603 "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n",
604 mpt->mpt_port_page1.Configuration,
605 mpt->mpt_port_page1.OnBusTimerValue);
606 }
607
608 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
609 &mpt->mpt_port_page2.Header,
610 sizeof(mpt->mpt_port_page2),
611 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
612 if (rv) {
613 mpt_prt(mpt, "failed to read SPI Port Page 2\n");
614 } else {
615 mpt_lprt(mpt, MPT_PRT_DEBUG,
616 "SPI Port Page 2: Flags %x Settings %x\n",
617 mpt->mpt_port_page2.PortFlags,
618 mpt->mpt_port_page2.PortSettings);
619 for (i = 0; i < 16; i++) {
620 mpt_lprt(mpt, MPT_PRT_DEBUG,
621 "SPI Port Page 2 Tgt %d: timo %x SF %x Flags %x\n",
622 i, mpt->mpt_port_page2.DeviceSettings[i].Timeout,
623 mpt->mpt_port_page2.DeviceSettings[i].SyncFactor,
624 mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags);
625 }
626 }
627
628 for (i = 0; i < 16; i++) {
629 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/i,
630 &mpt->mpt_dev_page0[i].Header,
631 sizeof(*mpt->mpt_dev_page0),
632 /*sleep_ok*/FALSE,
633 /*timeout_ms*/5000);
634 if (rv) {
635 mpt_prt(mpt,
636 "cannot read SPI Tgt %d Device Page 0\n", i);
637 continue;
638 }
639 mpt_lprt(mpt, MPT_PRT_DEBUG,
640 "SPI Tgt %d Page 0: NParms %x Information %x",
641 i, mpt->mpt_dev_page0[i].NegotiatedParameters,
642 mpt->mpt_dev_page0[i].Information);
643
644 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/i,
645 &mpt->mpt_dev_page1[i].Header,
646 sizeof(*mpt->mpt_dev_page1),
647 /*sleep_ok*/FALSE,
648 /*timeout_ms*/5000);
649 if (rv) {
650 mpt_prt(mpt,
651 "cannot read SPI Tgt %d Device Page 1\n", i);
652 continue;
653 }
654 mpt_lprt(mpt, MPT_PRT_DEBUG,
655 "SPI Tgt %d Page 1: RParms %x Configuration %x\n",
656 i, mpt->mpt_dev_page1[i].RequestedParameters,
657 mpt->mpt_dev_page1[i].Configuration);
658 }
659 return (0);
660}
661
662/*
663 * Validate SPI configuration information.
664 *
665 * In particular, validate SPI Port Page 1.
666 */
667static int
668mpt_set_initial_config_spi(struct mpt_softc *mpt)
669{
670 int i, pp1val = ((1 << mpt->mpt_ini_id) << 16) | mpt->mpt_ini_id;
671 int error;
672
673 mpt->mpt_disc_enable = 0xff;
674 mpt->mpt_tag_enable = 0;
675
676 if (mpt->mpt_port_page1.Configuration != pp1val) {
677 CONFIG_PAGE_SCSI_PORT_1 tmp;
678
679 mpt_prt(mpt,
680 "SPI Port Page 1 Config value bad (%x)- should be %x\n",
681 mpt->mpt_port_page1.Configuration, pp1val);
682 tmp = mpt->mpt_port_page1;
683 tmp.Configuration = pp1val;
684 error = mpt_write_cur_cfg_page(mpt, /*PageAddress*/0,
685 &tmp.Header, sizeof(tmp),
686 /*sleep_ok*/FALSE,
687 /*timeout_ms*/5000);
688 if (error)
689 return (-1);
690 error = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
691 &tmp.Header, sizeof(tmp),
692 /*sleep_ok*/FALSE,
693 /*timeout_ms*/5000);
694 if (error)
695 return (-1);
696 if (tmp.Configuration != pp1val) {
697 mpt_prt(mpt,
698 "failed to reset SPI Port Page 1 Config value\n");
699 return (-1);
700 }
701 mpt->mpt_port_page1 = tmp;
702 }
703
704 for (i = 0; i < 16; i++) {
705 CONFIG_PAGE_SCSI_DEVICE_1 tmp;
706 tmp = mpt->mpt_dev_page1[i];
707 tmp.RequestedParameters = 0;
708 tmp.Configuration = 0;
709 mpt_lprt(mpt, MPT_PRT_DEBUG,
710 "Set Tgt %d SPI DevicePage 1 values to %x 0 %x\n",
711 i, tmp.RequestedParameters, tmp.Configuration);
712 error = mpt_write_cur_cfg_page(mpt, /*PageAddress*/i,
713 &tmp.Header, sizeof(tmp),
714 /*sleep_ok*/FALSE,
715 /*timeout_ms*/5000);
716 if (error)
717 return (-1);
718 error = mpt_read_cur_cfg_page(mpt, /*PageAddress*/i,
719 &tmp.Header, sizeof(tmp),
720 /*sleep_ok*/FALSE,
721 /*timeout_ms*/5000);
722 if (error)
723 return (-1);
724 mpt->mpt_dev_page1[i] = tmp;
725 mpt_lprt(mpt, MPT_PRT_DEBUG,
726 "SPI Tgt %d Page 1: RParm %x Configuration %x\n", i,
727 mpt->mpt_dev_page1[i].RequestedParameters,
728 mpt->mpt_dev_page1[i].Configuration);
729 }
730 return (0);
731}
732
733int
734mpt_cam_enable(struct mpt_softc *mpt)
735{
736 if (mpt->is_fc) {
737 if (mpt_read_config_info_fc(mpt)) {
738 return (EIO);
739 }
740 if (mpt_set_initial_config_fc(mpt)) {
741 return (EIO);
742 }
743 } else if (mpt->is_sas) {
744 if (mpt_read_config_info_sas(mpt)) {
745 return (EIO);
746 }
747 if (mpt_set_initial_config_sas(mpt)) {
748 return (EIO);
749 }
750 } else {
751 if (mpt_read_config_info_spi(mpt)) {
752 return (EIO);
753 }
754 if (mpt_set_initial_config_spi(mpt)) {
755 return (EIO);
756 }
757 }
758 return (0);
759}
760
295void
296mpt_cam_detach(struct mpt_softc *mpt)
297{
298 mpt_handler_t handler;
299
300 mpt_terminate_recovery_thread(mpt);
301
302 handler.reply_handler = mpt_scsi_reply_handler;
303 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
304 scsi_io_handler_id);
305 handler.reply_handler = mpt_scsi_tmf_reply_handler;
306 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
307 scsi_tmf_handler_id);
761void
762mpt_cam_detach(struct mpt_softc *mpt)
763{
764 mpt_handler_t handler;
765
766 mpt_terminate_recovery_thread(mpt);
767
768 handler.reply_handler = mpt_scsi_reply_handler;
769 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
770 scsi_io_handler_id);
771 handler.reply_handler = mpt_scsi_tmf_reply_handler;
772 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
773 scsi_tmf_handler_id);
774 handler.reply_handler = mpt_fc_els_reply_handler;
775 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
776 fc_els_handler_id);
777 handler.reply_handler = mpt_scsi_tgt_reply_handler;
778 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
779 mpt->scsi_tgt_handler_id);
308
309 if (mpt->tmf_req != NULL) {
310 mpt_free_request(mpt, mpt->tmf_req);
311 mpt->tmf_req = NULL;
312 }
313
314 if (mpt->sim != NULL) {
315 xpt_free_path(mpt->path);

--- 5 unchanged lines hidden (view full) ---

321 if (mpt->phydisk_sim != NULL) {
322 xpt_free_path(mpt->phydisk_path);
323 xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim));
324 cam_sim_free(mpt->phydisk_sim, TRUE);
325 mpt->phydisk_sim = NULL;
326 }
327}
328
780
781 if (mpt->tmf_req != NULL) {
782 mpt_free_request(mpt, mpt->tmf_req);
783 mpt->tmf_req = NULL;
784 }
785
786 if (mpt->sim != NULL) {
787 xpt_free_path(mpt->path);

--- 5 unchanged lines hidden (view full) ---

793 if (mpt->phydisk_sim != NULL) {
794 xpt_free_path(mpt->phydisk_path);
795 xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim));
796 cam_sim_free(mpt->phydisk_sim, TRUE);
797 mpt->phydisk_sim = NULL;
798 }
799}
800
329/* This routine is used after a system crash to dump core onto the
330 * swap device.
801/* This routine is used after a system crash to dump core onto the swap device.
331 */
332static void
333mpt_poll(struct cam_sim *sim)
334{
335 struct mpt_softc *mpt;
336
337 mpt = (struct mpt_softc *)cam_sim_softc(sim);
338 MPT_LOCK(mpt);

--- 17 unchanged lines hidden (view full) ---

356 if (mpt == NULL)
357 return;
358#else
359 mpt = ccb->ccb_h.ccb_mpt_ptr;
360#endif
361
362 MPT_LOCK(mpt);
363 req = ccb->ccb_h.ccb_req_ptr;
802 */
803static void
804mpt_poll(struct cam_sim *sim)
805{
806 struct mpt_softc *mpt;
807
808 mpt = (struct mpt_softc *)cam_sim_softc(sim);
809 MPT_LOCK(mpt);

--- 17 unchanged lines hidden (view full) ---

827 if (mpt == NULL)
828 return;
829#else
830 mpt = ccb->ccb_h.ccb_mpt_ptr;
831#endif
832
833 MPT_LOCK(mpt);
834 req = ccb->ccb_h.ccb_req_ptr;
364 mpt_prt(mpt, "Request %p:serno Timed out.\n", req, req->serno);
835 mpt_prt(mpt, "Request %p Timed out.\n", req);
365 if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) {
366 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
367 TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links);
368 req->state |= REQ_STATE_TIMEDOUT;
369 mpt_wakeup_recovery_thread(mpt);
370 }
371 MPT_UNLOCK(mpt);
372}
373
374/*
375 * Callback routine from "bus_dmamap_load" or, in simple cases, called directly.
376 *
377 * Takes a list of physical segments and builds the SGL for SCSI IO command
378 * and forwards the commard to the IOC after one last check that CAM has not
379 * aborted the transaction.
380 */
381static void
836 if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) {
837 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
838 TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links);
839 req->state |= REQ_STATE_TIMEDOUT;
840 mpt_wakeup_recovery_thread(mpt);
841 }
842 MPT_UNLOCK(mpt);
843}
844
845/*
846 * Callback routine from "bus_dmamap_load" or, in simple cases, called directly.
847 *
848 * Takes a list of physical segments and builds the SGL for SCSI IO command
849 * and forwards the commard to the IOC after one last check that CAM has not
850 * aborted the transaction.
851 */
852static void
382mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
853mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
383{
384 request_t *req, *trq;
385 char *mpt_off;
386 union ccb *ccb;
387 struct mpt_softc *mpt;
388 int seg, first_lim;
389 uint32_t flags, nxt_off;
854{
855 request_t *req, *trq;
856 char *mpt_off;
857 union ccb *ccb;
858 struct mpt_softc *mpt;
859 int seg, first_lim;
860 uint32_t flags, nxt_off;
390 bus_dmasync_op_t op;
391 MSG_SCSI_IO_REQUEST *mpt_req;
861 void *sglp;
862 MSG_REQUEST_HEADER *hdrp;
392 SGE_SIMPLE64 *se;
393 SGE_CHAIN64 *ce;
394
395 req = (request_t *)arg;
396 ccb = req->ccb;
397
398 mpt = ccb->ccb_h.ccb_mpt_ptr;
399 req = ccb->ccb_h.ccb_req_ptr;
863 SGE_SIMPLE64 *se;
864 SGE_CHAIN64 *ce;
865
866 req = (request_t *)arg;
867 ccb = req->ccb;
868
869 mpt = ccb->ccb_h.ccb_mpt_ptr;
870 req = ccb->ccb_h.ccb_req_ptr;
400 mpt_req = req->req_vbuf;
871
872 hdrp = req->req_vbuf;
401 mpt_off = req->req_vbuf;
402
873 mpt_off = req->req_vbuf;
874
875 if (hdrp->Function == MPI_FUNCTION_SCSI_IO_REQUEST) {
876 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
877 } else /* if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) */ {
878 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
879 }
880
881
403 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
404 error = EFBIG;
405 }
406
407bad:
408 if (error != 0) {
882 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
883 error = EFBIG;
884 }
885
886bad:
887 if (error != 0) {
409 if (error != EFBIG && error != ENOMEM)
888 if (error != EFBIG && error != ENOMEM) {
410 mpt_prt(mpt, "mpt_execute_req: err %d\n", error);
889 mpt_prt(mpt, "mpt_execute_req: err %d\n", error);
411 if (ccb->ccb_h.status == CAM_REQ_INPROG) {
412 xpt_freeze_devq(ccb->ccb_h.path, 1);
413 ccb->ccb_h.status = CAM_DEV_QFRZN;
890 }
891 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
892 cam_status status;
893 mpt_freeze_ccb(ccb);
414 if (error == EFBIG) {
894 if (error == EFBIG) {
415 ccb->ccb_h.status |= CAM_REQ_TOO_BIG;
895 status = CAM_REQ_TOO_BIG;
416 } else if (error == ENOMEM) {
417 if (mpt->outofbeer == 0) {
418 mpt->outofbeer = 1;
419 xpt_freeze_simq(mpt->sim, 1);
420 mpt_lprt(mpt, MPT_PRT_DEBUG,
421 "FREEZEQ\n");
422 }
896 } else if (error == ENOMEM) {
897 if (mpt->outofbeer == 0) {
898 mpt->outofbeer = 1;
899 xpt_freeze_simq(mpt->sim, 1);
900 mpt_lprt(mpt, MPT_PRT_DEBUG,
901 "FREEZEQ\n");
902 }
423 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
424 } else
425 ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
903 status = CAM_REQUEUE_REQ;
904 } else {
905 status = CAM_REQ_CMP_ERR;
906 }
907 mpt_set_ccb_status(ccb, status);
426 }
908 }
909 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
910 request_t *cmd_req =
911 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
912 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
913 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
914 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
915 }
427 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
916 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
917 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
428 xpt_done(ccb);
429 CAMLOCK_2_MPTLOCK(mpt);
430 mpt_free_request(mpt, req);
431 MPTLOCK_2_CAMLOCK(mpt);
432 return;
433 }
434
435 /*
436 * No data to transfer?
437 * Just make a single simple SGL with zero length.
438 */
439
440 if (mpt->verbose >= MPT_PRT_DEBUG) {
918 xpt_done(ccb);
919 CAMLOCK_2_MPTLOCK(mpt);
920 mpt_free_request(mpt, req);
921 MPTLOCK_2_CAMLOCK(mpt);
922 return;
923 }
924
925 /*
926 * No data to transfer?
927 * Just make a single simple SGL with zero length.
928 */
929
930 if (mpt->verbose >= MPT_PRT_DEBUG) {
441 int tidx = ((char *)&mpt_req->SGL) - mpt_off;
931 int tidx = ((char *)sglp) - mpt_off;
442 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
443 }
444
445 if (nseg == 0) {
932 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
933 }
934
935 if (nseg == 0) {
446 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) &mpt_req->SGL;
936 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
447 MPI_pSGE_SET_FLAGS(se1,
448 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
449 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
450 goto out;
451 }
452
937 MPI_pSGE_SET_FLAGS(se1,
938 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
939 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
940 goto out;
941 }
942
453 mpt_req->DataLength = ccb->csio.dxfer_len;
454 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING;
455 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
456 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
457
943
458 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
459 op = BUS_DMASYNC_PREREAD;
944 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING;
945 if (hdrp->Function == MPI_FUNCTION_SCSI_IO_REQUEST) {
946 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
947 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
948 }
460 } else {
949 } else {
461 op = BUS_DMASYNC_PREWRITE;
950 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
951 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
952 }
462 }
953 }
954
463 if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
955 if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
956 bus_dmasync_op_t op;
957 if (hdrp->Function == MPI_FUNCTION_SCSI_IO_REQUEST) {
958 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
959 op = BUS_DMASYNC_PREREAD;
960 } else {
961 op = BUS_DMASYNC_PREWRITE;
962 }
963 } else {
964 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
965 op = BUS_DMASYNC_PREWRITE;
966 } else {
967 op = BUS_DMASYNC_PREREAD;
968 }
969 }
464 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
465 }
466
467 /*
468 * Okay, fill in what we can at the end of the command frame.
469 * If we have up to MPT_NSGL_FIRST, we can fit them all into
470 * the command frame.
471 *

--- 6 unchanged lines hidden (view full) ---

478 first_lim = nseg;
479 } else {
480 /*
481 * Leave room for CHAIN element
482 */
483 first_lim = MPT_NSGL_FIRST(mpt) - 1;
484 }
485
970 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
971 }
972
973 /*
974 * Okay, fill in what we can at the end of the command frame.
975 * If we have up to MPT_NSGL_FIRST, we can fit them all into
976 * the command frame.
977 *

--- 6 unchanged lines hidden (view full) ---

984 first_lim = nseg;
985 } else {
986 /*
987 * Leave room for CHAIN element
988 */
989 first_lim = MPT_NSGL_FIRST(mpt) - 1;
990 }
991
486 se = (SGE_SIMPLE64 *) &mpt_req->SGL;
992 se = (SGE_SIMPLE64 *) sglp;
487 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
488 uint32_t tf;
489
490 bzero(se, sizeof (*se));
491 se->Address.Low = dm_segs->ds_addr;
492 if (sizeof(bus_addr_t) > 4) {
493 se->Address.High = ((uint64_t) dm_segs->ds_addr) >> 32;
494 }

--- 11 unchanged lines hidden (view full) ---

506
507 if (seg == nseg) {
508 goto out;
509 }
510
511 /*
512 * Tell the IOC where to find the first chain element.
513 */
993 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
994 uint32_t tf;
995
996 bzero(se, sizeof (*se));
997 se->Address.Low = dm_segs->ds_addr;
998 if (sizeof(bus_addr_t) > 4) {
999 se->Address.High = ((uint64_t) dm_segs->ds_addr) >> 32;
1000 }

--- 11 unchanged lines hidden (view full) ---

1012
1013 if (seg == nseg) {
1014 goto out;
1015 }
1016
1017 /*
1018 * Tell the IOC where to find the first chain element.
1019 */
514 mpt_req->ChainOffset = ((char *)se - (char *)mpt_req) >> 2;
1020 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
515 nxt_off = MPT_RQSL(mpt);
516 trq = req;
517
518 /*
519 * Make up the rest of the data segments out of a chain element
520 * (contiained in the current request frame) which points to
521 * SIMPLE64 elements in the next request frame, possibly ending
522 * with *another* chain element (if there's more).

--- 122 unchanged lines hidden (view full) ---

645 } else {
646 while (trq->chain != NULL) {
647 trq = trq->chain;
648 }
649 trq->chain = nrq;
650 }
651 trq = nrq;
652 mpt_off = trq->req_vbuf;
1021 nxt_off = MPT_RQSL(mpt);
1022 trq = req;
1023
1024 /*
1025 * Make up the rest of the data segments out of a chain element
1026 * (contiained in the current request frame) which points to
1027 * SIMPLE64 elements in the next request frame, possibly ending
1028 * with *another* chain element (if there's more).

--- 122 unchanged lines hidden (view full) ---

1151 } else {
1152 while (trq->chain != NULL) {
1153 trq = trq->chain;
1154 }
1155 trq->chain = nrq;
1156 }
1157 trq = nrq;
1158 mpt_off = trq->req_vbuf;
653 mpt_req = trq->req_vbuf;
654 if (mpt->verbose >= MPT_PRT_DEBUG) {
655 memset(mpt_off, 0xff, MPT_REQUEST_AREA);
656 }
657 nxt_off = 0;
658 }
659 }
660out:
661
662 /*
663 * Last time we need to check if this CCB needs to be aborted.
664 */
1159 if (mpt->verbose >= MPT_PRT_DEBUG) {
1160 memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1161 }
1162 nxt_off = 0;
1163 }
1164 }
1165out:
1166
1167 /*
1168 * Last time we need to check if this CCB needs to be aborted.
1169 */
665 if (ccb->ccb_h.status != CAM_REQ_INPROG) {
666 if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0)
1170 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1171 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1172 request_t *cmd_req =
1173 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1174 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1175 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1176 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1177 }
1178 mpt_prt(mpt,
1179 "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n",
1180 ccb->ccb_h.status & CAM_STATUS_MASK);
1181 if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
667 bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
1182 bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
1183 }
1184 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1185 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1186 xpt_done(ccb);
668 CAMLOCK_2_MPTLOCK(mpt);
669 mpt_free_request(mpt, req);
670 MPTLOCK_2_CAMLOCK(mpt);
1187 CAMLOCK_2_MPTLOCK(mpt);
1188 mpt_free_request(mpt, req);
1189 MPTLOCK_2_CAMLOCK(mpt);
671 xpt_done(ccb);
672 return;
673 }
674
675 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1190 return;
1191 }
1192
1193 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1194 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1195 ccb->ccb_h.timeout_ch =
1196 timeout(mpt_timeout, (caddr_t)ccb,
1197 (ccb->ccb_h.timeout * hz) / 1000);
1198 } else {
1199 callout_handle_init(&ccb->ccb_h.timeout_ch);
1200 }
1201 if (mpt->verbose >= MPT_PRT_DEBUG) {
1202 int nc = 0;
1203 mpt_print_request(req->req_vbuf);
1204 for (trq = req->chain; trq; trq = trq->chain) {
1205 printf(" Additional Chain Area %d\n", nc++);
1206 mpt_dump_sgl(trq->req_vbuf, 0);
1207 }
1208 }
1209 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1210 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1211 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
1212#ifdef WE_TRUST_AUTO_GOOD_STATUS
1213 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1214 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
1215 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
1216 } else {
1217 tgt->state = TGT_STATE_MOVING_DATA;
1218 }
1219#else
1220 tgt->state = TGT_STATE_MOVING_DATA;
1221#endif
1222 }
676 CAMLOCK_2_MPTLOCK(mpt);
1223 CAMLOCK_2_MPTLOCK(mpt);
1224 mpt_send_cmd(mpt, req);
1225 MPTLOCK_2_CAMLOCK(mpt);
1226}
1227
1228static void
1229mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1230{
1231 request_t *req, *trq;
1232 char *mpt_off;
1233 union ccb *ccb;
1234 struct mpt_softc *mpt;
1235 int seg, first_lim;
1236 uint32_t flags, nxt_off;
1237 void *sglp;
1238 MSG_REQUEST_HEADER *hdrp;
1239 SGE_SIMPLE32 *se;
1240 SGE_CHAIN32 *ce;
1241
1242 req = (request_t *)arg;
1243 ccb = req->ccb;
1244
1245 mpt = ccb->ccb_h.ccb_mpt_ptr;
1246 req = ccb->ccb_h.ccb_req_ptr;
1247
1248 hdrp = req->req_vbuf;
1249 mpt_off = req->req_vbuf;
1250
1251
1252 if (hdrp->Function == MPI_FUNCTION_SCSI_IO_REQUEST) {
1253 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1254 } else /* if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) */ {
1255 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1256 }
1257
1258
1259 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1260 error = EFBIG;
1261 mpt_prt(mpt, "segment count %d too large (max %u)\n",
1262 nseg, mpt->max_seg_cnt);
1263 }
1264
1265bad:
1266 if (error != 0) {
1267 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1268 request_t *cmd_req =
1269 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1270 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1271 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1272 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1273 }
1274 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1275 cam_status status;
1276 mpt_freeze_ccb(ccb);
1277 if (error == EFBIG) {
1278 status = CAM_REQ_TOO_BIG;
1279 } else if (error == ENOMEM) {
1280 if (mpt->outofbeer == 0) {
1281 mpt->outofbeer = 1;
1282 xpt_freeze_simq(mpt->sim, 1);
1283 mpt_lprt(mpt, MPT_PRT_DEBUG,
1284 "FREEZEQ\n");
1285 }
1286 status = CAM_REQUEUE_REQ;
1287 } else {
1288 status = CAM_REQ_CMP_ERR;
1289 }
1290 mpt_set_ccb_status(ccb, status);
1291 }
1292 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1293 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1294 xpt_done(ccb);
1295 CAMLOCK_2_MPTLOCK(mpt);
1296 mpt_free_request(mpt, req);
1297 MPTLOCK_2_CAMLOCK(mpt);
1298 return;
1299 }
1300
1301 /*
1302 * No data to transfer?
1303 * Just make a single simple SGL with zero length.
1304 */
1305
1306 if (mpt->verbose >= MPT_PRT_DEBUG) {
1307 int tidx = ((char *)sglp) - mpt_off;
1308 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1309 }
1310
1311 if (nseg == 0) {
1312 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1313 MPI_pSGE_SET_FLAGS(se1,
1314 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1315 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1316 goto out;
1317 }
1318
1319
1320 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
1321 if (hdrp->Function == MPI_FUNCTION_SCSI_IO_REQUEST) {
1322 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1323 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1324 }
1325 } else {
1326 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1327 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1328 }
1329 }
1330
1331 if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
1332 bus_dmasync_op_t op;
1333 if (hdrp->Function == MPI_FUNCTION_SCSI_IO_REQUEST) {
1334 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1335 op = BUS_DMASYNC_PREREAD;
1336 } else {
1337 op = BUS_DMASYNC_PREWRITE;
1338 }
1339 } else {
1340 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1341 op = BUS_DMASYNC_PREWRITE;
1342 } else {
1343 op = BUS_DMASYNC_PREREAD;
1344 }
1345 }
1346 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1347 }
1348
1349 /*
1350 * Okay, fill in what we can at the end of the command frame.
1351 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1352 * the command frame.
1353 *
1354 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1355 * SIMPLE32 pointers and start doing CHAIN32 entries after
1356 * that.
1357 */
1358
1359 if (nseg < MPT_NSGL_FIRST(mpt)) {
1360 first_lim = nseg;
1361 } else {
1362 /*
1363 * Leave room for CHAIN element
1364 */
1365 first_lim = MPT_NSGL_FIRST(mpt) - 1;
1366 }
1367
1368 se = (SGE_SIMPLE32 *) sglp;
1369 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1370 uint32_t tf;
1371
1372 bzero(se, sizeof (*se));
1373 se->Address = dm_segs->ds_addr;
1374 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1375 tf = flags;
1376 if (seg == first_lim - 1) {
1377 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1378 }
1379 if (seg == nseg - 1) {
1380 tf |= MPI_SGE_FLAGS_END_OF_LIST |
1381 MPI_SGE_FLAGS_END_OF_BUFFER;
1382 }
1383 MPI_pSGE_SET_FLAGS(se, tf);
1384 }
1385
1386 if (seg == nseg) {
1387 goto out;
1388 }
1389
1390 /*
1391 * Tell the IOC where to find the first chain element.
1392 */
1393 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1394 nxt_off = MPT_RQSL(mpt);
1395 trq = req;
1396
1397 /*
1398 * Make up the rest of the data segments out of a chain element
1399 * (contiained in the current request frame) which points to
1400 * SIMPLE32 elements in the next request frame, possibly ending
1401 * with *another* chain element (if there's more).
1402 */
1403 while (seg < nseg) {
1404 int this_seg_lim;
1405 uint32_t tf, cur_off;
1406 bus_addr_t chain_list_addr;
1407
1408 /*
1409 * Point to the chain descriptor. Note that the chain
1410 * descriptor is at the end of the *previous* list (whether
1411 * chain or simple).
1412 */
1413 ce = (SGE_CHAIN32 *) se;
1414
1415 /*
1416 * Before we change our current pointer, make sure we won't
1417 * overflow the request area with this frame. Note that we
1418 * test against 'greater than' here as it's okay in this case
1419 * to have next offset be just outside the request area.
1420 */
1421 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1422 nxt_off = MPT_REQUEST_AREA;
1423 goto next_chain;
1424 }
1425
1426 /*
1427 * Set our SGE element pointer to the beginning of the chain
1428 * list and update our next chain list offset.
1429 */
1430 se = (SGE_SIMPLE32 *) &mpt_off[nxt_off];
1431 cur_off = nxt_off;
1432 nxt_off += MPT_RQSL(mpt);
1433
1434 /*
1435 * Now initialized the chain descriptor.
1436 */
1437 bzero(ce, sizeof (SGE_CHAIN32));
1438
1439 /*
1440 * Get the physical address of the chain list.
1441 */
1442 chain_list_addr = trq->req_pbuf;
1443 chain_list_addr += cur_off;
1444 ce->Address = chain_list_addr;
1445 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
1446
1447 /*
1448 * If we have more than a frame's worth of segments left,
1449 * set up the chain list to have the last element be another
1450 * chain descriptor.
1451 */
1452 if ((nseg - seg) > MPT_NSGL(mpt)) {
1453 this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1454 /*
1455 * The length of the chain is the length in bytes of the
1456 * number of segments plus the next chain element.
1457 *
1458 * The next chain descriptor offset is the length,
1459 * in words, of the number of segments.
1460 */
1461 ce->Length = (this_seg_lim - seg) *
1462 sizeof (SGE_SIMPLE32);
1463 ce->NextChainOffset = ce->Length >> 2;
1464 ce->Length += sizeof (SGE_CHAIN32);
1465 } else {
1466 this_seg_lim = nseg;
1467 ce->Length = (this_seg_lim - seg) *
1468 sizeof (SGE_SIMPLE32);
1469 }
1470
1471 /*
1472 * Fill in the chain list SGE elements with our segment data.
1473 *
1474 * If we're the last element in this chain list, set the last
1475 * element flag. If we're the completely last element period,
1476 * set the end of list and end of buffer flags.
1477 */
1478 while (seg < this_seg_lim) {
1479 bzero(se, sizeof (*se));
1480 se->Address = dm_segs->ds_addr;
1481 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1482 tf = flags;
1483 if (seg == this_seg_lim - 1) {
1484 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1485 }
1486 if (seg == nseg - 1) {
1487 tf |= MPI_SGE_FLAGS_END_OF_LIST |
1488 MPI_SGE_FLAGS_END_OF_BUFFER;
1489 }
1490 MPI_pSGE_SET_FLAGS(se, tf);
1491 se++;
1492 seg++;
1493 dm_segs++;
1494 }
1495
1496 next_chain:
1497 /*
1498 * If we have more segments to do and we've used up all of
1499 * the space in a request area, go allocate another one
1500 * and chain to that.
1501 */
1502 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1503 request_t *nrq = mpt_get_request(mpt, FALSE);
1504
1505 if (nrq == NULL) {
1506 error = ENOMEM;
1507 goto bad;
1508 }
1509
1510 /*
1511 * Append the new request area on the tail of our list.
1512 */
1513 if ((trq = req->chain) == NULL) {
1514 req->chain = nrq;
1515 } else {
1516 while (trq->chain != NULL) {
1517 trq = trq->chain;
1518 }
1519 trq->chain = nrq;
1520 }
1521 trq = nrq;
1522 mpt_off = trq->req_vbuf;
1523 if (mpt->verbose >= MPT_PRT_DEBUG) {
1524 memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1525 }
1526 nxt_off = 0;
1527 }
1528 }
1529out:
1530
1531 /*
1532 * Last time we need to check if this CCB needs to be aborted.
1533 */
1534 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1535 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1536 request_t *cmd_req =
1537 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1538 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1539 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1540 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1541 }
1542 mpt_prt(mpt, "mpt_execute_req: I/O cancelled (status 0x%x)\n",
1543 ccb->ccb_h.status & CAM_STATUS_MASK);
1544 if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
1545 bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
1546 }
1547 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1548 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1549 xpt_done(ccb);
1550 CAMLOCK_2_MPTLOCK(mpt);
1551 mpt_free_request(mpt, req);
1552 MPTLOCK_2_CAMLOCK(mpt);
1553 return;
1554 }
1555
1556 ccb->ccb_h.status |= CAM_SIM_QUEUED;
677 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
678 ccb->ccb_h.timeout_ch =
679 timeout(mpt_timeout, (caddr_t)ccb,
680 (ccb->ccb_h.timeout * hz) / 1000);
681 } else {
682 callout_handle_init(&ccb->ccb_h.timeout_ch);
683 }
684 if (mpt->verbose >= MPT_PRT_DEBUG) {
685 int nc = 0;
1557 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1558 ccb->ccb_h.timeout_ch =
1559 timeout(mpt_timeout, (caddr_t)ccb,
1560 (ccb->ccb_h.timeout * hz) / 1000);
1561 } else {
1562 callout_handle_init(&ccb->ccb_h.timeout_ch);
1563 }
1564 if (mpt->verbose >= MPT_PRT_DEBUG) {
1565 int nc = 0;
686 mpt_print_scsi_io_request(req->req_vbuf);
1566 mpt_print_request(req->req_vbuf);
687 for (trq = req->chain; trq; trq = trq->chain) {
688 printf(" Additional Chain Area %d\n", nc++);
689 mpt_dump_sgl(trq->req_vbuf, 0);
690 }
691 }
1567 for (trq = req->chain; trq; trq = trq->chain) {
1568 printf(" Additional Chain Area %d\n", nc++);
1569 mpt_dump_sgl(trq->req_vbuf, 0);
1570 }
1571 }
1572 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1573 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1574 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
1575#ifdef WE_TRUST_AUTO_GOOD_STATUS
1576 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1577 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
1578 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
1579 } else {
1580 tgt->state = TGT_STATE_MOVING_DATA;
1581 }
1582#else
1583 tgt->state = TGT_STATE_MOVING_DATA;
1584#endif
1585 }
1586 CAMLOCK_2_MPTLOCK(mpt);
692 mpt_send_cmd(mpt, req);
693 MPTLOCK_2_CAMLOCK(mpt);
694}
695
696static void
697mpt_start(struct cam_sim *sim, union ccb *ccb)
698{
699 request_t *req;
700 struct mpt_softc *mpt;
701 MSG_SCSI_IO_REQUEST *mpt_req;
702 struct ccb_scsiio *csio = &ccb->csio;
703 struct ccb_hdr *ccbh = &ccb->ccb_h;
1587 mpt_send_cmd(mpt, req);
1588 MPTLOCK_2_CAMLOCK(mpt);
1589}
1590
1591static void
1592mpt_start(struct cam_sim *sim, union ccb *ccb)
1593{
1594 request_t *req;
1595 struct mpt_softc *mpt;
1596 MSG_SCSI_IO_REQUEST *mpt_req;
1597 struct ccb_scsiio *csio = &ccb->csio;
1598 struct ccb_hdr *ccbh = &ccb->ccb_h;
1599 bus_dmamap_callback_t *cb;
704 int raid_passthru;
705
706 /* Get the pointer for the physical addapter */
707 mpt = ccb->ccb_h.ccb_mpt_ptr;
708 raid_passthru = (sim == mpt->phydisk_sim);
709
710 CAMLOCK_2_MPTLOCK(mpt);
711 /* Get a request structure off the free list */
712 if ((req = mpt_get_request(mpt, /*sleep_ok*/FALSE)) == NULL) {
713 if (mpt->outofbeer == 0) {
714 mpt->outofbeer = 1;
715 xpt_freeze_simq(mpt->sim, 1);
716 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
717 }
1600 int raid_passthru;
1601
1602 /* Get the pointer for the physical addapter */
1603 mpt = ccb->ccb_h.ccb_mpt_ptr;
1604 raid_passthru = (sim == mpt->phydisk_sim);
1605
1606 CAMLOCK_2_MPTLOCK(mpt);
1607 /* Get a request structure off the free list */
1608 if ((req = mpt_get_request(mpt, /*sleep_ok*/FALSE)) == NULL) {
1609 if (mpt->outofbeer == 0) {
1610 mpt->outofbeer = 1;
1611 xpt_freeze_simq(mpt->sim, 1);
1612 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
1613 }
1614 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1615 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
718 MPTLOCK_2_CAMLOCK(mpt);
1616 MPTLOCK_2_CAMLOCK(mpt);
719 ccb->ccb_h.status = CAM_REQUEUE_REQ;
720 xpt_done(ccb);
721 return;
722 }
1617 xpt_done(ccb);
1618 return;
1619 }
723
724 MPTLOCK_2_CAMLOCK(mpt);
725
1620 MPTLOCK_2_CAMLOCK(mpt);
1621
1622 if (sizeof (bus_addr_t) > 4) {
1623 cb = mpt_execute_req_a64;
1624 } else {
1625 cb = mpt_execute_req;
1626 }
1627
726#if 0
727 COWWWWW
728 if (raid_passthru) {
729 status = mpt_raid_quiesce_disk(mpt, mpt->raid_disks + ccb->ccb_h.target_id,
730 request_t *req)
731 }
732#endif
733

--- 4 unchanged lines hidden (view full) ---

738 req->ccb = ccb;
739 ccb->ccb_h.ccb_req_ptr = req;
740
741 /* Now we build the command for the IOC */
742 mpt_req = req->req_vbuf;
743 bzero(mpt_req, sizeof *mpt_req);
744
745 mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
1628#if 0
1629 COWWWWW
1630 if (raid_passthru) {
1631 status = mpt_raid_quiesce_disk(mpt, mpt->raid_disks + ccb->ccb_h.target_id,
1632 request_t *req)
1633 }
1634#endif
1635

--- 4 unchanged lines hidden (view full) ---

1640 req->ccb = ccb;
1641 ccb->ccb_h.ccb_req_ptr = req;
1642
1643 /* Now we build the command for the IOC */
1644 mpt_req = req->req_vbuf;
1645 bzero(mpt_req, sizeof *mpt_req);
1646
1647 mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
746 if (raid_passthru)
1648 if (raid_passthru) {
747 mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
1649 mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
1650 }
748
1651
749 mpt_req->Bus = mpt->bus;
1652 mpt_req->Bus = 0; /* we don't have multiport devices yet */
750
751 mpt_req->SenseBufferLength =
752 (csio->sense_len < MPT_SENSE_SIZE) ?
753 csio->sense_len : MPT_SENSE_SIZE;
754
755 /*
756 * We use the message context to find the request structure when we
757 * Get the command completion interrupt from the IOC.
758 */
759 mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id);
760
761 /* Which physical device to do the I/O on */
762 mpt_req->TargetID = ccb->ccb_h.target_id;
1653
1654 mpt_req->SenseBufferLength =
1655 (csio->sense_len < MPT_SENSE_SIZE) ?
1656 csio->sense_len : MPT_SENSE_SIZE;
1657
1658 /*
1659 * We use the message context to find the request structure when we
1660 * Get the command completion interrupt from the IOC.
1661 */
1662 mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id);
1663
1664 /* Which physical device to do the I/O on */
1665 mpt_req->TargetID = ccb->ccb_h.target_id;
763 /*
764 * XXX Assumes Single level, Single byte, CAM LUN type.
765 */
766 mpt_req->LUN[1] = ccb->ccb_h.target_lun;
767
1666
1667 /* We assume a single level LUN type */
1668 if (ccb->ccb_h.target_lun >= 256) {
1669 mpt_req->LUN[0] = 0x40 | ((ccb->ccb_h.target_lun >> 8) & 0x3f);
1670 mpt_req->LUN[1] = ccb->ccb_h.target_lun & 0xff;
1671 } else {
1672 mpt_req->LUN[1] = ccb->ccb_h.target_lun;
1673 }
1674
768 /* Set the direction of the transfer */
769 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
770 mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
771 else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
772 mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
773 else
774 mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
775

--- 48 unchanged lines hidden (view full) ---

824 * We've been given a pointer to a single buffer.
825 */
826 if ((ccbh->flags & CAM_DATA_PHYS) == 0) {
827 /*
828 * Virtual address that needs to translated into
829 * one or more physical address ranges.
830 */
831 int error;
1675 /* Set the direction of the transfer */
1676 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1677 mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
1678 else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
1679 mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
1680 else
1681 mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
1682

--- 48 unchanged lines hidden (view full) ---

1731 * We've been given a pointer to a single buffer.
1732 */
1733 if ((ccbh->flags & CAM_DATA_PHYS) == 0) {
1734 /*
1735 * Virtual address that needs to translated into
1736 * one or more physical address ranges.
1737 */
1738 int error;
832
1739 int s = splsoftvm();
833 error = bus_dmamap_load(mpt->buffer_dmat,
834 req->dmap, csio->data_ptr, csio->dxfer_len,
1740 error = bus_dmamap_load(mpt->buffer_dmat,
1741 req->dmap, csio->data_ptr, csio->dxfer_len,
835 mpt_execute_req, req, 0);
1742 cb, req, 0);
1743 splx(s);
836 if (error == EINPROGRESS) {
837 /*
838 * So as to maintain ordering,
839 * freeze the controller queue
840 * until our mapping is
841 * returned.
842 */
843 xpt_freeze_simq(mpt->sim, 1);
844 ccbh->status |= CAM_RELEASE_SIMQ;
845 }
846 } else {
847 /*
848 * We have been given a pointer to single
849 * physical buffer.
850 */
851 struct bus_dma_segment seg;
852 seg.ds_addr =
853 (bus_addr_t)(vm_offset_t)csio->data_ptr;
854 seg.ds_len = csio->dxfer_len;
1744 if (error == EINPROGRESS) {
1745 /*
1746 * So as to maintain ordering,
1747 * freeze the controller queue
1748 * until our mapping is
1749 * returned.
1750 */
1751 xpt_freeze_simq(mpt->sim, 1);
1752 ccbh->status |= CAM_RELEASE_SIMQ;
1753 }
1754 } else {
1755 /*
1756 * We have been given a pointer to single
1757 * physical buffer.
1758 */
1759 struct bus_dma_segment seg;
1760 seg.ds_addr =
1761 (bus_addr_t)(vm_offset_t)csio->data_ptr;
1762 seg.ds_len = csio->dxfer_len;
855 mpt_execute_req(req, &seg, 1, 0);
1763 (*cb)(req, &seg, 1, 0);
856 }
857 } else {
858 /*
859 * We have been given a list of addresses.
860 * This case could be easily supported but they are not
861 * currently generated by the CAM subsystem so there
862 * is no point in wasting the time right now.
863 */
864 struct bus_dma_segment *segs;
865 if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) {
1764 }
1765 } else {
1766 /*
1767 * We have been given a list of addresses.
1768 * This case could be easily supported but they are not
1769 * currently generated by the CAM subsystem so there
1770 * is no point in wasting the time right now.
1771 */
1772 struct bus_dma_segment *segs;
1773 if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) {
866 mpt_execute_req(req, NULL, 0, EFAULT);
1774 (*cb)(req, NULL, 0, EFAULT);
867 } else {
868 /* Just use the segments provided */
869 segs = (struct bus_dma_segment *)csio->data_ptr;
1775 } else {
1776 /* Just use the segments provided */
1777 segs = (struct bus_dma_segment *)csio->data_ptr;
870 mpt_execute_req(req, segs, csio->sglist_cnt, 0);
1778 (*cb)(req, segs, csio->sglist_cnt, 0);
871 }
872 }
873 } else {
1779 }
1780 }
1781 } else {
874 mpt_execute_req(req, NULL, 0, 0);
1782 (*cb)(req, NULL, 0, 0);
875 }
876}
877
878static int
879mpt_bus_reset(struct mpt_softc *mpt, int sleep_ok)
880{
881 int error;
882 u_int status;

--- 29 unchanged lines hidden (view full) ---

912 "Resetting controller.\n", status);
913 mpt_reset(mpt, /*reinit*/TRUE);
914 return (EIO);
915 }
916 return (0);
917}
918
919static int
1783 }
1784}
1785
1786static int
1787mpt_bus_reset(struct mpt_softc *mpt, int sleep_ok)
1788{
1789 int error;
1790 u_int status;

--- 29 unchanged lines hidden (view full) ---

1820 "Resetting controller.\n", status);
1821 mpt_reset(mpt, /*reinit*/TRUE);
1822 return (EIO);
1823 }
1824 return (0);
1825}
1826
1827static int
1828mpt_fc_reset_link(struct mpt_softc *mpt, int dowait)
1829{
1830 int r = 0;
1831 request_t *req;
1832 PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc;
1833
1834 req = mpt_get_request(mpt, FALSE);
1835 if (req == NULL) {
1836 return (ENOMEM);
1837 }
1838 fc = req->req_vbuf;
1839 memset(fc, 0, sizeof(*fc));
1840 fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK;
1841 fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND;
1842 fc->MsgContext = htole32(req->index | fc_els_handler_id);
1843 mpt_send_cmd(mpt, req);
1844 if (dowait) {
1845 r = mpt_wait_req(mpt, req, REQ_STATE_DONE,
1846 REQ_STATE_DONE, FALSE, 60 * 1000);
1847 mpt_free_request(mpt, req);
1848 }
1849 return (r);
1850}
1851
1852static int
920mpt_cam_event(struct mpt_softc *mpt, request_t *req,
921 MSG_EVENT_NOTIFY_REPLY *msg)
922{
1853mpt_cam_event(struct mpt_softc *mpt, request_t *req,
1854 MSG_EVENT_NOTIFY_REPLY *msg)
1855{
923 mpt_lprt(mpt, MPT_PRT_ALWAYS, "mpt_cam_event: 0x%x\n",
924 msg->Event & 0xFF);
925 switch(msg->Event & 0xFF) {
926 case MPI_EVENT_UNIT_ATTENTION:
927 mpt_prt(mpt, "Bus: 0x%02x TargetID: 0x%02x\n",
928 (msg->Data[0] >> 8) & 0xff, msg->Data[0] & 0xff);
929 break;
930
931 case MPI_EVENT_IOC_BUS_RESET:
932 /* We generated a bus reset */
933 mpt_prt(mpt, "IOC Bus Reset Port: %d\n",
934 (msg->Data[0] >> 8) & 0xff);
935 xpt_async(AC_BUS_RESET, mpt->path, NULL);
936 break;
937
938 case MPI_EVENT_EXT_BUS_RESET:
939 /* Someone else generated a bus reset */
1856 switch(msg->Event & 0xFF) {
1857 case MPI_EVENT_UNIT_ATTENTION:
1858 mpt_prt(mpt, "Bus: 0x%02x TargetID: 0x%02x\n",
1859 (msg->Data[0] >> 8) & 0xff, msg->Data[0] & 0xff);
1860 break;
1861
1862 case MPI_EVENT_IOC_BUS_RESET:
1863 /* We generated a bus reset */
1864 mpt_prt(mpt, "IOC Bus Reset Port: %d\n",
1865 (msg->Data[0] >> 8) & 0xff);
1866 xpt_async(AC_BUS_RESET, mpt->path, NULL);
1867 break;
1868
1869 case MPI_EVENT_EXT_BUS_RESET:
1870 /* Someone else generated a bus reset */
940 mpt_prt(mpt, "Ext Bus Reset\n");
1871 mpt_prt(mpt, "External Bus Reset Detected\n");
941 /*
942 * These replies don't return EventData like the MPI
943 * spec says they do
944 */
945 xpt_async(AC_BUS_RESET, mpt->path, NULL);
946 break;
947
948 case MPI_EVENT_RESCAN:

--- 18 unchanged lines hidden (view full) ---

967 "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) "
968 "(Loop Initialization)\n",
969 (msg->Data[1] >> 8) & 0xff,
970 (msg->Data[0] >> 8) & 0xff,
971 (msg->Data[0] ) & 0xff);
972 switch ((msg->Data[0] >> 8) & 0xff) {
973 case 0xF7:
974 if ((msg->Data[0] & 0xff) == 0xF7) {
1872 /*
1873 * These replies don't return EventData like the MPI
1874 * spec says they do
1875 */
1876 xpt_async(AC_BUS_RESET, mpt->path, NULL);
1877 break;
1878
1879 case MPI_EVENT_RESCAN:

--- 18 unchanged lines hidden (view full) ---

1898 "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) "
1899 "(Loop Initialization)\n",
1900 (msg->Data[1] >> 8) & 0xff,
1901 (msg->Data[0] >> 8) & 0xff,
1902 (msg->Data[0] ) & 0xff);
1903 switch ((msg->Data[0] >> 8) & 0xff) {
1904 case 0xF7:
1905 if ((msg->Data[0] & 0xff) == 0xF7) {
975 printf("Device needs AL_PA\n");
1906 mpt_prt(mpt, "Device needs AL_PA\n");
976 } else {
1907 } else {
977 printf("Device %02x doesn't like "
1908 mpt_prt(mpt, "Device %02x doesn't like "
978 "FC performance\n",
979 msg->Data[0] & 0xFF);
980 }
981 break;
982 case 0xF8:
983 if ((msg->Data[0] & 0xff) == 0xF7) {
1909 "FC performance\n",
1910 msg->Data[0] & 0xFF);
1911 }
1912 break;
1913 case 0xF8:
1914 if ((msg->Data[0] & 0xff) == 0xF7) {
984 printf("Device had loop failure at its "
985 "receiver prior to acquiring "
986 "AL_PA\n");
1915 mpt_prt(mpt, "Device had loop failure "
1916 "at its receiver prior to acquiring"
1917 " AL_PA\n");
987 } else {
1918 } else {
988 printf("Device %02x detected loop "
989 "failure at its receiver\n",
1919 mpt_prt(mpt, "Device %02x detected loop"
1920 " failure at its receiver\n",
990 msg->Data[0] & 0xFF);
991 }
992 break;
993 default:
1921 msg->Data[0] & 0xFF);
1922 }
1923 break;
1924 default:
994 printf("Device %02x requests that device "
1925 mpt_prt(mpt, "Device %02x requests that device "
995 "%02x reset itself\n",
996 msg->Data[0] & 0xFF,
997 (msg->Data[0] >> 8) & 0xFF);
998 break;
999 }
1000 break;
1001 case 0x02:
1002 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "

--- 30 unchanged lines hidden (view full) ---

1033 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
1034 /*
1035 * Devices are attachin'.....
1036 */
1037 mpt_prt(mpt,
1038 "mpt_cam_event: MPI_EVENT_SAS_DEVICE_STATUS_CHANGE\n");
1039 break;
1040 default:
1926 "%02x reset itself\n",
1927 msg->Data[0] & 0xFF,
1928 (msg->Data[0] >> 8) & 0xFF);
1929 break;
1930 }
1931 break;
1932 case 0x02:
1933 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "

--- 30 unchanged lines hidden (view full) ---

1964 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
1965 /*
1966 * Devices are attachin'.....
1967 */
1968 mpt_prt(mpt,
1969 "mpt_cam_event: MPI_EVENT_SAS_DEVICE_STATUS_CHANGE\n");
1970 break;
1971 default:
1972 mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n",
1973 msg->Event & 0xFF);
1041 return (/*handled*/0);
1042 }
1043 return (/*handled*/1);
1044}
1045
1046/*
1047 * Reply path for all SCSI I/O requests, called from our
1048 * interrupt handler by extracting our handler index from
1049 * the MsgContext field of the reply from the IOC.
1050 *
1051 * This routine is optimized for the common case of a
1052 * completion without error. All exception handling is
1053 * offloaded to non-inlined helper routines to minimize
1054 * cache footprint.
1055 */
1056static int
1057mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req,
1974 return (/*handled*/0);
1975 }
1976 return (/*handled*/1);
1977}
1978
1979/*
1980 * Reply path for all SCSI I/O requests, called from our
1981 * interrupt handler by extracting our handler index from
1982 * the MsgContext field of the reply from the IOC.
1983 *
1984 * This routine is optimized for the common case of a
1985 * completion without error. All exception handling is
1986 * offloaded to non-inlined helper routines to minimize
1987 * cache footprint.
1988 */
1989static int
1990mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req,
1058 MSG_DEFAULT_REPLY *reply_frame)
1991 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
1059{
1060 MSG_SCSI_IO_REQUEST *scsi_req;
1061 union ccb *ccb;
1062
1063 scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf;
1064 ccb = req->ccb;
1065 if (ccb == NULL) {
1066 mpt_prt(mpt, "Completion without CCB. Flags %#x, Func %#x\n",

--- 12 unchanged lines hidden (view full) ---

1079 else
1080 op = BUS_DMASYNC_POSTWRITE;
1081 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1082 bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
1083 }
1084
1085 if (reply_frame == NULL) {
1086 /*
1992{
1993 MSG_SCSI_IO_REQUEST *scsi_req;
1994 union ccb *ccb;
1995
1996 scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf;
1997 ccb = req->ccb;
1998 if (ccb == NULL) {
1999 mpt_prt(mpt, "Completion without CCB. Flags %#x, Func %#x\n",

--- 12 unchanged lines hidden (view full) ---

2012 else
2013 op = BUS_DMASYNC_POSTWRITE;
2014 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
2015 bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2016 }
2017
2018 if (reply_frame == NULL) {
2019 /*
1087 * Context only reply, completion
1088 * without error status.
2020 * Context only reply, completion without error status.
1089 */
1090 ccb->csio.resid = 0;
1091 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
1092 ccb->csio.scsi_status = SCSI_STATUS_OK;
1093 } else {
1094 mpt_scsi_reply_frame_handler(mpt, req, reply_frame);
1095 }
1096
1097 if (mpt->outofbeer) {
1098 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1099 mpt->outofbeer = 0;
1100 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
1101 }
2021 */
2022 ccb->csio.resid = 0;
2023 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2024 ccb->csio.scsi_status = SCSI_STATUS_OK;
2025 } else {
2026 mpt_scsi_reply_frame_handler(mpt, req, reply_frame);
2027 }
2028
2029 if (mpt->outofbeer) {
2030 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2031 mpt->outofbeer = 0;
2032 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
2033 }
1102 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1103 MPTLOCK_2_CAMLOCK(mpt);
1104 if (scsi_req->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH
1105 && scsi_req->CDB[0] == INQUIRY
1106 && (scsi_req->CDB[1] & SI_EVPD) == 0) {
2034 if (scsi_req->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH &&
2035 scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) {
1107 struct scsi_inquiry_data *inq;
2036 struct scsi_inquiry_data *inq;
1108
1109 /*
1110 * Fake out the device type so that only the
1111 * pass-thru device will attach.
1112 */
1113 inq = (struct scsi_inquiry_data *)ccb->csio.data_ptr;
1114 inq->device &= ~0x1F;
1115 inq->device |= T_NODEVICE;
1116 }
2037 /*
2038 * Fake out the device type so that only the
2039 * pass-thru device will attach.
2040 */
2041 inq = (struct scsi_inquiry_data *)ccb->csio.data_ptr;
2042 inq->device &= ~0x1F;
2043 inq->device |= T_NODEVICE;
2044 }
2045 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2046 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
2047 MPTLOCK_2_CAMLOCK(mpt);
1117 xpt_done(ccb);
1118 CAMLOCK_2_MPTLOCK(mpt);
2048 xpt_done(ccb);
2049 CAMLOCK_2_MPTLOCK(mpt);
1119 if ((req->state & REQ_STATE_TIMEDOUT) == 0)
2050 if ((req->state & REQ_STATE_TIMEDOUT) == 0) {
1120 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2051 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
1121 else
2052 } else {
1122 TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
2053 TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
1123
2054 }
1124 if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) {
1125 mpt_free_request(mpt, req);
1126 return (/*free_reply*/TRUE);
1127 }
1128 req->state &= ~REQ_STATE_QUEUED;
1129 req->state |= REQ_STATE_DONE;
1130 wakeup(req);
1131 return (/*free_reply*/TRUE);
1132}
1133
1134static int
1135mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req,
2055 if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) {
2056 mpt_free_request(mpt, req);
2057 return (/*free_reply*/TRUE);
2058 }
2059 req->state &= ~REQ_STATE_QUEUED;
2060 req->state |= REQ_STATE_DONE;
2061 wakeup(req);
2062 return (/*free_reply*/TRUE);
2063}
2064
2065static int
2066mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req,
1136 MSG_DEFAULT_REPLY *reply_frame)
2067 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
1137{
1138 MSG_SCSI_TASK_MGMT_REPLY *tmf_reply;
1139 uint16_t status;
1140
1141 KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req"));
1142
1143 tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame;
1144

--- 8 unchanged lines hidden (view full) ---

1153 req->state |= REQ_STATE_DONE;
1154 wakeup(req);
1155 } else
1156 mpt->tmf_req->state = REQ_STATE_FREE;
1157
1158 return (/*free_reply*/TRUE);
1159}
1160
2068{
2069 MSG_SCSI_TASK_MGMT_REPLY *tmf_reply;
2070 uint16_t status;
2071
2072 KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req"));
2073
2074 tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame;
2075

--- 8 unchanged lines hidden (view full) ---

2084 req->state |= REQ_STATE_DONE;
2085 wakeup(req);
2086 } else
2087 mpt->tmf_req->state = REQ_STATE_FREE;
2088
2089 return (/*free_reply*/TRUE);
2090}
2091
2092
1161/*
2093/*
2094 * XXX: Move to definitions file
2095 */
2096#define ELS 0x22
2097#define FC4LS 0x32
2098#define ABTS 0x81
2099#define BA_ACC 0x84
2100
2101#define LS_RJT 0x01
2102#define LS_ACC 0x02
2103#define PLOGI 0x03
2104#define LOGO 0x05
2105#define SRR 0x14
2106#define PRLI 0x20
2107#define PRLO 0x21
2108#define ADISC 0x52
2109#define RSCN 0x61
2110
2111static void
2112mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req,
2113 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length)
2114{
2115 MSG_LINK_SERVICE_RSP_REQUEST tmp;
2116 PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp;
2117
2118 /*
2119 * We are going to reuse the ELS request to send this response back.
2120 */
2121 rsp = &tmp;
2122 memset(rsp, 0, sizeof(*rsp));
2123
2124#ifdef USE_IMMEDIATE_LINK_DATA
2125 /*
2126 * Apparently the IMMEDIATE stuff doesn't seem to work.
2127 */
2128 rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE;
2129#endif
2130 rsp->RspLength = length;
2131 rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP;
2132 rsp->MsgContext = htole32(req->index | fc_els_handler_id);
2133
2134 /*
2135 * Copy over information from the original reply frame to
2136 * it's correct place in the response.
2137 */
2138 memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24);
2139
2140 /*
2141 * And now copy back the temporary area to the original frame.
2142 */
2143 memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST));
2144 rsp = req->req_vbuf;
2145
2146#ifdef USE_IMMEDIATE_LINK_DATA
2147 memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length);
2148#else
2149{
2150 PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL;
2151 bus_addr_t paddr = req->req_pbuf;
2152 paddr += MPT_RQSL(mpt);
2153
2154 se->FlagsLength =
2155 MPI_SGE_FLAGS_HOST_TO_IOC |
2156 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
2157 MPI_SGE_FLAGS_LAST_ELEMENT |
2158 MPI_SGE_FLAGS_END_OF_LIST |
2159 MPI_SGE_FLAGS_END_OF_BUFFER;
2160 se->FlagsLength <<= MPI_SGE_FLAGS_SHIFT;
2161 se->FlagsLength |= (length);
2162 se->Address = (uint32_t) paddr;
2163}
2164#endif
2165
2166 /*
2167 * Send it on...
2168 */
2169 mpt_send_cmd(mpt, req);
2170}
2171
2172static int
2173mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req,
2174 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2175{
2176 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp =
2177 (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame;
2178 U8 rctl;
2179 U8 type;
2180 U8 cmd;
2181 U16 status = le16toh(reply_frame->IOCStatus);
2182 U32 *elsbuf;
2183 int do_refresh = TRUE;
2184
2185 mpt_lprt(mpt, MPT_PRT_DEBUG, "FC_ELS Complete: req %p:%u, reply %p\n",
2186 req, req->serno, reply_frame);
2187
2188 if (status != MPI_IOCSTATUS_SUCCESS) {
2189 mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n",
2190 status, reply_frame->Function);
2191 if (status == MPI_IOCSTATUS_INVALID_STATE) {
2192 /*
2193 * XXX: to get around shutdown issue
2194 */
2195 mpt->disabled = 1;
2196 return (TRUE);
2197 }
2198 return (TRUE);
2199 }
2200
2201 /*
2202 * If the function of a link service response, we recycle the
2203 * response to be a refresh for a new link service request.
2204 */
2205 if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) {
2206 mpt_fc_add_els(mpt, req);
2207 return (TRUE);
2208 }
2209
2210 if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2211 req->state &= ~REQ_STATE_QUEUED;
2212 req->state |= REQ_STATE_DONE;
2213 if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) {
2214 mpt_lprt(mpt, MPT_PRT_DEBUG,
2215 "Async Primitive Send Complete\n");
2216 mpt_free_request(mpt, req);
2217 } else {
2218 mpt_lprt(mpt, MPT_PRT_DEBUG,
2219 "Sync Primitive Send Complete\n");
2220 wakeup(req);
2221 }
2222 return (TRUE);
2223 }
2224
2225 if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) {
2226 mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x "
2227 "Length %d Message Flags %x\n", rp->Function, rp->Flags,
2228 rp->MsgLength, rp->MsgFlags);
2229 return (TRUE);
2230 }
2231
2232 if (rp->MsgLength <= 5) {
2233 /*
2234 * This is just a ack of an original ELS buffer post
2235 */
2236 mpt_lprt(mpt, MPT_PRT_DEBUG,
2237 "Recv'd ACK of FC_ELS buf post %p:%u\n", req, req->serno);
2238 return (TRUE);
2239 }
2240
2241
2242 rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT;
2243 type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT;
2244
2245 elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)];
2246 cmd = be32toh(elsbuf[0]) >> 24;
2247
2248 if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) {
2249 mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n");
2250 return (TRUE);
2251 }
2252
2253
2254 if (rctl == ELS && type == 1) {
2255 switch (cmd) {
2256 case PRLI:
2257 /*
2258 * Send back a PRLI ACC
2259 */
2260 mpt_prt(mpt, "PRLI from 0x%08x%08x\n",
2261 le32toh(rp->Wwn.PortNameHigh),
2262 le32toh(rp->Wwn.PortNameLow));
2263 elsbuf[0] = htobe32(0x02100014);
2264 elsbuf[1] |= htobe32(0x00000100);
2265 elsbuf[4] = htobe32(0x00000002);
2266 if (mpt->role & MPT_ROLE_TARGET)
2267 elsbuf[4] |= htobe32(0x00000010);
2268 if (mpt->role & MPT_ROLE_INITIATOR)
2269 elsbuf[4] |= htobe32(0x00000020);
2270 mpt_fc_els_send_response(mpt, req, rp, 20);
2271 do_refresh = FALSE;
2272 break;
2273 case PRLO:
2274 memset(elsbuf, 0, 5 * (sizeof (U32)));
2275 elsbuf[0] = htobe32(0x02100014);
2276 elsbuf[1] = htobe32(0x08000100);
2277 mpt_prt(mpt, "PRLO from 0x%08x%08x\n",
2278 le32toh(rp->Wwn.PortNameHigh),
2279 le32toh(rp->Wwn.PortNameLow));
2280 mpt_fc_els_send_response(mpt, req, rp, 20);
2281 do_refresh = FALSE;
2282 break;
2283 default:
2284 mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd);
2285 break;
2286 }
2287 } else if (rctl == ABTS && type == 0) {
2288 uint16_t rx_id = le16toh(rp->Rxid);
2289 uint16_t ox_id = le16toh(rp->Oxid);
2290 request_t *tgt_req = NULL;
2291
2292 mpt_prt(mpt,
2293 "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n",
2294 ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh),
2295 le32toh(rp->Wwn.PortNameLow));
2296 if (rx_id >= mpt->mpt_max_tgtcmds) {
2297 mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id);
2298 } else if (mpt->tgt_cmd_ptrs == NULL) {
2299 mpt_prt(mpt, "No TGT CMD PTRS\n");
2300 } else {
2301 tgt_req = mpt->tgt_cmd_ptrs[rx_id];
2302 }
2303 if (tgt_req) {
2304 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, tgt_req);
2305 uint8_t *vbuf;
2306 union ccb *ccb = tgt->ccb;
2307 cam_status cs;
2308 uint32_t ct_id;
2309
2310 vbuf = tgt_req->req_vbuf;
2311 vbuf += MPT_RQSL(mpt);
2312
2313 /*
2314 * Check to make sure we have the correct command
2315 * The reply descriptor in the target state should
2316 * should contain an IoIndex that should match the
2317 * RX_ID.
2318 *
2319 * It'd be nice to have OX_ID to crosscheck with
2320 * as well.
2321 */
2322 ct_id = GET_IO_INDEX(tgt->reply_desc);
2323
2324 if (ct_id != rx_id) {
2325 mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: "
2326 "RX_ID received=0x%x; RX_ID in cmd=0x%x\n",
2327 rx_id, ct_id);
2328 goto skip;
2329 }
2330
2331 ccb = tgt->ccb;
2332 if (ccb) {
2333 mpt_prt(mpt,
2334 "CCB (%p): lun %u flags %x status %x\n",
2335 ccb, ccb->ccb_h.target_lun,
2336 ccb->ccb_h.flags, ccb->ccb_h.status);
2337 }
2338 mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd "
2339 "%x nxfers %x flags %x\n", tgt->state,
2340 tgt->resid, tgt->bytes_xfered, tgt->reply_desc,
2341 tgt->nxfers, tgt->flags);
2342 skip:
2343 cs = mpt_abort_target_cmd(mpt, tgt_req);
2344 if (cs != CAM_REQ_INPROG) {
2345 mpt_prt(mpt, "unable to do TargetAbort (%x)\n",
2346 cs);
2347 }
2348 } else {
2349 mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id);
2350 }
2351 memset(elsbuf, 0, 5 * (sizeof (U32)));
2352 elsbuf[0] = htobe32(0);
2353 elsbuf[1] = htobe32((ox_id << 16) | rx_id);
2354 elsbuf[2] = htobe32(0x000ffff);
2355 /*
2356 * Dork with the reply frame so that the reponse to it
2357 * will be correct.
2358 */
2359 rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT);
2360 mpt_fc_els_send_response(mpt, req, rp, 12);
2361 do_refresh = FALSE;
2362 } else {
2363 mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd);
2364 }
2365 if (do_refresh == TRUE) {
2366 mpt_fc_add_els(mpt, req);
2367 }
2368 return (TRUE);
2369}
2370
2371/*
2372 * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting
2373 * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the
2374 * FC929 to set bogus FC_RSP fields (nonzero residuals
2375 * but w/o RESID fields set). This causes QLogic initiators
2376 * to think maybe that a frame was lost.
2377 *
2378 * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because
2379 * we use allocated requests to do TARGET_ASSIST and we
2380 * need to know when to release them.
2381 */
2382
2383static void
2384mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req,
2385 uint8_t status, uint8_t const *sense_data)
2386{
2387 uint8_t *cmd_vbuf;
2388 mpt_tgt_state_t *tgt;
2389 PTR_MSG_TARGET_STATUS_SEND_REQUEST tp;
2390 request_t *req;
2391 bus_addr_t paddr;
2392 int resplen = 0;
2393
2394 cmd_vbuf = cmd_req->req_vbuf;
2395 cmd_vbuf += MPT_RQSL(mpt);
2396 tgt = MPT_TGT_STATE(mpt, cmd_req);
2397
2398 req = mpt_get_request(mpt, FALSE);
2399 if (req == NULL) {
2400 if (ccb) {
2401 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2402 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
2403 MPTLOCK_2_CAMLOCK(mpt);
2404 xpt_done(ccb);
2405 CAMLOCK_2_MPTLOCK(mpt);
2406 } else {
2407 /*
2408 * XXX: put in deferred processing if we cannot allocate
2409 */
2410 mpt_prt(mpt,
2411 "XXXX could not allocate status req- dropping\n");
2412 }
2413 return;
2414 }
2415 req->ccb = ccb;
2416 if (ccb) {
2417 ccb->ccb_h.ccb_mpt_ptr = mpt;
2418 ccb->ccb_h.ccb_req_ptr = req;
2419 }
2420
2421 /*
2422 * Record the currently active ccb, if any, and the
2423 * request for it in our target state area.
2424 */
2425 tgt->ccb = ccb;
2426 tgt->req = req;
2427 tgt->state = TGT_STATE_SENDING_STATUS;
2428
2429 tp = req->req_vbuf;
2430 paddr = req->req_pbuf;
2431 paddr += MPT_RQSL(mpt);
2432
2433 memset(tp, 0, sizeof (*tp));
2434 tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND;
2435 if (mpt->is_fc) {
2436 PTR_MPI_TARGET_FCP_CMD_BUFFER fc =
2437 (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf;
2438 uint8_t *sts_vbuf;
2439 uint32_t *rsp;
2440
2441 sts_vbuf = req->req_vbuf;
2442 sts_vbuf += MPT_RQSL(mpt);
2443 rsp = (uint32_t *) sts_vbuf;
2444 memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN));
2445
2446 /*
2447 * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate.
2448 * It has to be big-endian in memory and is organized
2449 * in 32 bit words, which are much easier to deal with
2450 * as words which are swizzled as needed.
2451 *
2452 * All we're filling here is the FC_RSP payload.
2453 * We may just have the chip synthesize it if
2454 * we have no residual and an OK status.
2455 *
2456 */
2457 memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER));
2458
2459 rsp[2] = status;
2460 if (tgt->resid) {
2461 rsp[2] |= 0x800;
2462 rsp[3] = htobe32(tgt->resid);
2463#ifdef WE_TRUST_AUTO_GOOD_STATUS
2464 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
2465#endif
2466 }
2467 if (status == SCSI_STATUS_CHECK_COND) {
2468 int i;
2469
2470 rsp[2] |= 0x200;
2471 rsp[4] = htobe32(MPT_SENSE_SIZE);
2472 memcpy(&rsp[8], sense_data, MPT_SENSE_SIZE);
2473 for (i = 8; i < (8 + (MPT_SENSE_SIZE >> 2)); i++) {
2474 rsp[i] = htobe32(rsp[i]);
2475 }
2476#ifdef WE_TRUST_AUTO_GOOD_STATUS
2477 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
2478#endif
2479 }
2480#ifndef WE_TRUST_AUTO_GOOD_STATUS
2481 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
2482#endif
2483 rsp[2] = htobe32(rsp[2]);
2484 } else if (mpt->is_sas) {
2485 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
2486 (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf;
2487 memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN));
2488 } else {
2489 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
2490 (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf;
2491 tp->StatusCode = status;
2492 tp->QueueTag = htole16(sp->Tag);
2493 memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN));
2494 }
2495
2496 tp->ReplyWord = htole32(tgt->reply_desc);
2497 tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
2498
2499#ifdef WE_CAN_USE_AUTO_REPOST
2500 tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER;
2501#endif
2502 if (status == SCSI_STATUS_OK && resplen == 0) {
2503 tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS;
2504 } else {
2505 tp->StatusDataSGE.u.Address32 = (uint32_t) paddr;
2506 tp->StatusDataSGE.FlagsLength =
2507 MPI_SGE_FLAGS_HOST_TO_IOC |
2508 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
2509 MPI_SGE_FLAGS_LAST_ELEMENT |
2510 MPI_SGE_FLAGS_END_OF_LIST |
2511 MPI_SGE_FLAGS_END_OF_BUFFER;
2512 tp->StatusDataSGE.FlagsLength <<= MPI_SGE_FLAGS_SHIFT;
2513 tp->StatusDataSGE.FlagsLength |= resplen;
2514 }
2515
2516 mpt_lprt(mpt, MPT_PRT_DEBUG,
2517 "STATUS_CCB %p (wit%s sense) tag %x req %p:%u resid %u\n",
2518 ccb, sense_data?"h" : "hout", ccb? ccb->csio.tag_id : -1, req,
2519 req->serno, tgt->resid);
2520 if (ccb) {
2521 ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
2522 ccb->ccb_h.timeout_ch = timeout(mpt_timeout, (caddr_t)ccb, hz);
2523 }
2524 mpt_send_cmd(mpt, req);
2525}
2526
2527static void
2528mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc,
2529 tgt_resource_t *trtp, int init_id)
2530{
2531 struct ccb_immed_notify *inot;
2532 mpt_tgt_state_t *tgt;
2533
2534 tgt = MPT_TGT_STATE(mpt, req);
2535 inot = (struct ccb_immed_notify *) STAILQ_FIRST(&trtp->inots);
2536 if (inot == NULL) {
2537 mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n");
2538 mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL);
2539 return;
2540 }
2541 STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe);
2542 mpt_lprt(mpt, MPT_PRT_DEBUG1,
2543 "Get FREE INOT %p lun %d\n", inot, inot->ccb_h.target_lun);
2544
2545 memset(&inot->sense_data, 0, sizeof (inot->sense_data));
2546 inot->sense_len = 0;
2547 memset(inot->message_args, 0, sizeof (inot->message_args));
2548 inot->initiator_id = init_id; /* XXX */
2549
2550 /*
2551 * This is a somewhat grotesque attempt to map from task management
2552 * to old style SCSI messages. God help us all.
2553 */
2554 switch (fc) {
2555 case MPT_ABORT_TASK_SET:
2556 inot->message_args[0] = MSG_ABORT_TAG;
2557 break;
2558 case MPT_CLEAR_TASK_SET:
2559 inot->message_args[0] = MSG_CLEAR_TASK_SET;
2560 break;
2561 case MPT_TARGET_RESET:
2562 inot->message_args[0] = MSG_TARGET_RESET;
2563 break;
2564 case MPT_CLEAR_ACA:
2565 inot->message_args[0] = MSG_CLEAR_ACA;
2566 break;
2567 case MPT_TERMINATE_TASK:
2568 inot->message_args[0] = MSG_ABORT_TAG;
2569 break;
2570 default:
2571 inot->message_args[0] = MSG_NOOP;
2572 break;
2573 }
2574 tgt->ccb = (union ccb *) inot;
2575 inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
2576 MPTLOCK_2_CAMLOCK(mpt);
2577 xpt_done((union ccb *)inot);
2578 CAMLOCK_2_MPTLOCK(mpt);
2579}
2580
2581static void
2582mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc)
2583{
2584 struct ccb_accept_tio *atiop;
2585 lun_id_t lun;
2586 int tag_action = 0;
2587 mpt_tgt_state_t *tgt;
2588 tgt_resource_t *trtp;
2589 U8 *lunptr;
2590 U8 *vbuf;
2591 U16 itag;
2592 U16 ioindex;
2593 mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE;
2594 uint8_t *cdbp;
2595
2596 /*
2597 * First, DMA sync the received command- which is in the *request*
2598 * phys area.
2599 * XXX: We could optimize this for a range
2600 */
2601 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
2602 BUS_DMASYNC_POSTREAD);
2603
2604 /*
2605 * Stash info for the current command where we can get at it later.
2606 */
2607 vbuf = req->req_vbuf;
2608 vbuf += MPT_RQSL(mpt);
2609
2610 /*
2611 * Get our state pointer set up.
2612 */
2613 tgt = MPT_TGT_STATE(mpt, req);
2614 KASSERT(tgt->state == TGT_STATE_LOADED,
2615 ("bad target state %x in mpt_scsi_tgt_atio for req %p\n",
2616 tgt->state, req));
2617 memset(tgt, 0, sizeof (mpt_tgt_state_t));
2618 tgt->state = TGT_STATE_IN_CAM;
2619 tgt->reply_desc = reply_desc;
2620 ioindex = GET_IO_INDEX(reply_desc);
2621
2622 if (mpt->is_fc) {
2623 PTR_MPI_TARGET_FCP_CMD_BUFFER fc;
2624 fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf;
2625 if (fc->FcpCntl[2]) {
2626 /*
2627 * Task Management Request
2628 */
2629 switch (fc->FcpCntl[2]) {
2630 case 0x2:
2631 fct = MPT_ABORT_TASK_SET;
2632 break;
2633 case 0x4:
2634 fct = MPT_CLEAR_TASK_SET;
2635 break;
2636 case 0x20:
2637 fct = MPT_TARGET_RESET;
2638 break;
2639 case 0x40:
2640 fct = MPT_CLEAR_ACA;
2641 break;
2642 case 0x80:
2643 fct = MPT_TERMINATE_TASK;
2644 break;
2645 default:
2646 mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n",
2647 fc->FcpCntl[2]);
2648 mpt_scsi_tgt_status(mpt, 0, req,
2649 SCSI_STATUS_OK, 0);
2650 return;
2651 }
2652 return;
2653 }
2654 switch (fc->FcpCntl[1]) {
2655 case 0:
2656 tag_action = MSG_SIMPLE_Q_TAG;
2657 break;
2658 case 1:
2659 tag_action = MSG_HEAD_OF_Q_TAG;
2660 break;
2661 case 2:
2662 tag_action = MSG_ORDERED_Q_TAG;
2663 break;
2664 default:
2665 /*
2666 * Bah. Ignore Untagged Queing and ACA
2667 */
2668 tag_action = MSG_SIMPLE_Q_TAG;
2669 break;
2670 }
2671 tgt->resid = be32toh(fc->FcpDl);
2672 cdbp = fc->FcpCdb;
2673 lunptr = fc->FcpLun;
2674 itag = be16toh(fc->OptionalOxid);
2675 } else if (mpt->is_sas) {
2676 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp;
2677 ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf;
2678 cdbp = ssp->CDB;
2679 lunptr = ssp->LogicalUnitNumber;
2680 itag = ssp->InitiatorTag;
2681 } else {
2682 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp;
2683 sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf;
2684 cdbp = sp->CDB;
2685 lunptr = sp->LogicalUnitNumber;
2686 itag = sp->Tag;
2687 }
2688
2689 /*
2690 * Generate a simple lun
2691 */
2692 switch (lunptr[0] & 0xc0) {
2693 case 0x40:
2694 lun = ((lunptr[0] & 0x3f) << 8) | lunptr[1];
2695 break;
2696 case 0:
2697 lun = lunptr[1];
2698 break;
2699 default:
2700 mpt_lprt(mpt, MPT_PRT_ERROR, "cannot handle this type lun\n");
2701 lun = 0xffff;
2702 break;
2703 }
2704
2705 /*
2706 * Deal with non-enabled or bad luns here.
2707 */
2708 if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 ||
2709 mpt->trt[lun].enabled == 0) {
2710 if (mpt->twildcard) {
2711 trtp = &mpt->trt_wildcard;
2712 } else {
2713 const uint8_t sp[MPT_SENSE_SIZE] = {
2714 0xf0, 0, 0x5, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0x25
2715 };
2716 mpt_scsi_tgt_status(mpt, NULL, req,
2717 SCSI_STATUS_CHECK_COND, sp);
2718 return;
2719 }
2720 } else {
2721 trtp = &mpt->trt[lun];
2722 }
2723
2724 if (fct != MPT_NIL_TMT_VALUE) {
2725 /* undo any tgt residual settings */
2726 tgt->resid = 0;
2727 mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp,
2728 GET_INITIATOR_INDEX(reply_desc));
2729 return;
2730 }
2731
2732 atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios);
2733 if (atiop == NULL) {
2734 mpt_lprt(mpt, MPT_PRT_WARN,
2735 "no ATIOs for lun %u- sending back %s\n", lun,
2736 mpt->tenabled? "QUEUE FULL" : "BUSY");
2737 mpt_scsi_tgt_status(mpt, NULL, req,
2738 mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY,
2739 NULL);
2740 return;
2741 }
2742 STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe);
2743 mpt_lprt(mpt, MPT_PRT_DEBUG1,
2744 "Get FREE ATIO %p lun %d\n", atiop, atiop->ccb_h.target_lun);
2745 atiop->ccb_h.ccb_mpt_ptr = mpt;
2746 atiop->ccb_h.status = CAM_CDB_RECVD;
2747 atiop->ccb_h.target_lun = lun;
2748 atiop->sense_len = 0;
2749 atiop->init_id = GET_INITIATOR_INDEX(reply_desc);
2750 atiop->cdb_len = mpt_cdblen(cdbp[0], 16);
2751 memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len);
2752
2753 /*
2754 * The tag we construct here allows us to find the
2755 * original request that the command came in with.
2756 *
2757 * This way we don't have to depend on anything but the
2758 * tag to find things when CCBs show back up from CAM.
2759 */
2760 atiop->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
2761 if (tag_action) {
2762 atiop->tag_action = tag_action;
2763 atiop->ccb_h.flags = CAM_TAG_ACTION_VALID;
2764 }
2765 if (mpt->verbose >= MPT_PRT_DEBUG) {
2766 int i;
2767 mpt_prt(mpt, "START_CCB %p for lun %u CDB=<", atiop,
2768 atiop->ccb_h.target_lun);
2769 for (i = 0; i < atiop->cdb_len; i++) {
2770 mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff,
2771 (i == (atiop->cdb_len - 1))? '>' : ' ');
2772 }
2773 mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n",
2774 itag, atiop->tag_id, tgt->reply_desc, tgt->resid);
2775 }
2776 tgt->ccb = (union ccb *) atiop;
2777
2778 MPTLOCK_2_CAMLOCK(mpt);
2779 xpt_done((union ccb *)atiop);
2780 CAMLOCK_2_MPTLOCK(mpt);
2781}
2782
2783static int
2784mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req,
2785 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2786{
2787 int dbg;
2788 union ccb *ccb;
2789 U16 status;
2790
2791 if (reply_frame == NULL) {
2792 /*
2793 * Figure out if this is a new command or a target assist
2794 * completing.
2795 */
2796 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
2797 char serno[8];
2798
2799 if (tgt->req) {
2800 snprintf(serno, 8, "%u", tgt->req->serno);
2801 } else {
2802 strncpy(serno, "??", 8);
2803 }
2804
2805 switch(tgt->state) {
2806 case TGT_STATE_LOADED:
2807 mpt_scsi_tgt_atio(mpt, req, reply_desc);
2808 break;
2809 case TGT_STATE_MOVING_DATA:
2810 {
2811 uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
2812
2813 ccb = tgt->ccb;
2814 tgt->ccb = NULL;
2815 tgt->nxfers++;
2816 untimeout(mpt_timeout, ccb, ccb->ccb_h.timeout_ch);
2817 mpt_lprt(mpt, MPT_PRT_DEBUG,
2818 "TARGET_ASSIST %p (req %p:%s) done tag 0x%x\n",
2819 ccb, tgt->req, serno, ccb->csio.tag_id);
2820 /*
2821 * Free the Target Assist Request
2822 */
2823 KASSERT(tgt->req && tgt->req->ccb == ccb,
2824 ("tgt->req %p:%s tgt->req->ccb %p", tgt->req,
2825 serno, tgt->req? tgt->req->ccb : NULL));
2826 mpt_free_request(mpt, tgt->req);
2827 tgt->req = NULL;
2828 /*
2829 * Do we need to send status now? That is, are
2830 * we done with all our data transfers?
2831 */
2832 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
2833 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2834 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2835 KASSERT(ccb->ccb_h.status,
2836 ("zero ccb sts at %d\n", __LINE__));
2837 tgt->state = TGT_STATE_IN_CAM;
2838 MPTLOCK_2_CAMLOCK(mpt);
2839 xpt_done(ccb);
2840 CAMLOCK_2_MPTLOCK(mpt);
2841 break;
2842 }
2843 if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
2844 sp = sense;
2845 memcpy(sp, &ccb->csio.sense_data,
2846 min(ccb->csio.sense_len, MPT_SENSE_SIZE));
2847 }
2848 mpt_scsi_tgt_status(mpt, ccb, req,
2849 ccb->csio.scsi_status, sp);
2850 break;
2851 }
2852 case TGT_STATE_SENDING_STATUS:
2853 case TGT_STATE_MOVING_DATA_AND_STATUS:
2854 {
2855 int ioindex;
2856 ccb = tgt->ccb;
2857
2858 if (ccb) {
2859 tgt->ccb = NULL;
2860 tgt->nxfers++;
2861 untimeout(mpt_timeout, ccb,
2862 ccb->ccb_h.timeout_ch);
2863 if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
2864 ccb->ccb_h.status |= CAM_SENT_SENSE;
2865 }
2866 mpt_lprt(mpt, MPT_PRT_DEBUG,
2867 "TARGET_STATUS tag %x sts %x flgs %x req "
2868 "%p\n", ccb->csio.tag_id, ccb->ccb_h.status,
2869 ccb->ccb_h.flags, tgt->req);
2870 /*
2871 * Free the Target Send Status Request
2872 */
2873 KASSERT(tgt->req && tgt->req->ccb == ccb,
2874 ("tgt->req %p:%s tgt->req->ccb %p",
2875 tgt->req, serno,
2876 tgt->req? tgt->req->ccb : NULL));
2877 /*
2878 * Notify CAM that we're done
2879 */
2880 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2881 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2882 KASSERT(ccb->ccb_h.status,
2883 ("ZERO ccb sts at %d\n", __LINE__));
2884 tgt->ccb = NULL;
2885 } else {
2886 mpt_lprt(mpt, MPT_PRT_DEBUG,
2887 "TARGET_STATUS non-CAM for req %p:%s\n",
2888 tgt->req, serno);
2889 }
2890 mpt_free_request(mpt, tgt->req);
2891 tgt->req = NULL;
2892
2893 /*
2894 * And re-post the Command Buffer.
2895 */
2896 ioindex = GET_IO_INDEX(reply_desc);
2897 mpt_post_target_command(mpt, req, ioindex);
2898
2899 /*
2900 * And post a done for anyone who cares
2901 */
2902 if (ccb) {
2903 MPTLOCK_2_CAMLOCK(mpt);
2904 xpt_done(ccb);
2905 CAMLOCK_2_MPTLOCK(mpt);
2906 }
2907 break;
2908 }
2909 case TGT_STATE_NIL: /* XXX This Never Happens XXX */
2910 tgt->state = TGT_STATE_LOADED;
2911 break;
2912 default:
2913 mpt_prt(mpt, "Unknown Target State 0x%x in Context "
2914 "Reply Function\n", tgt->state);
2915 }
2916 return (TRUE);
2917 }
2918
2919 status = le16toh(reply_frame->IOCStatus);
2920 if (status != MPI_IOCSTATUS_SUCCESS) {
2921 dbg = MPT_PRT_ERROR;
2922 } else {
2923 dbg = MPT_PRT_DEBUG1;
2924 }
2925
2926 mpt_lprt(mpt, dbg,
2927 "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n",
2928 req, req->serno, reply_frame, reply_frame->Function, status);
2929
2930 switch (reply_frame->Function) {
2931 case MPI_FUNCTION_TARGET_CMD_BUFFER_POST:
2932 KASSERT(MPT_TGT_STATE(mpt,
2933 req)->state == TGT_STATE_NIL,
2934 ("bad state %x on reply to buffer post\n",
2935 MPT_TGT_STATE(mpt, req)->state));
2936 MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADED;
2937 break;
2938 case MPI_FUNCTION_TARGET_ASSIST:
2939 mpt_prt(mpt,
2940 "TARGET_ASSIST err for request %p:%u (%x): status 0x%x\n",
2941 req, req->serno, req->index, status);
2942 mpt_free_request(mpt, req);
2943 break;
2944 case MPI_FUNCTION_TARGET_STATUS_SEND:
2945 mpt_prt(mpt,
2946 "TARGET_STATUS_SEND error for request %p:%u(%x): status "
2947 "0x%x\n", req, req->serno, req->index, status);
2948 mpt_free_request(mpt, req);
2949 break;
2950 case MPI_FUNCTION_TARGET_MODE_ABORT:
2951 {
2952 PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp =
2953 (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame;
2954 PTR_MSG_TARGET_MODE_ABORT abtp =
2955 (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf;
2956 uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord));
2957 mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n",
2958 cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount));
2959 mpt_free_request(mpt, req);
2960 break;
2961 }
2962 default:
2963 mpt_prt(mpt, "Unknown Target Address Reply Function code: "
2964 "0x%x\n", reply_frame->Function);
2965 break;
2966 }
2967 return (TRUE);
2968}
2969
2970/*
1162 * Clean up all SCSI Initiator personality state in response
1163 * to a controller reset.
1164 */
1165static void
1166mpt_cam_ioc_reset(struct mpt_softc *mpt, int type)
1167{
1168 /*
1169 * The pending list is already run down by

--- 179 unchanged lines hidden (view full) ---

1349 int raid_passthru;
1350
1351 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n"));
1352
1353 mpt = (struct mpt_softc *)cam_sim_softc(sim);
1354 raid_passthru = (sim == mpt->phydisk_sim);
1355
1356 tgt = ccb->ccb_h.target_id;
2971 * Clean up all SCSI Initiator personality state in response
2972 * to a controller reset.
2973 */
2974static void
2975mpt_cam_ioc_reset(struct mpt_softc *mpt, int type)
2976{
2977 /*
2978 * The pending list is already run down by

--- 179 unchanged lines hidden (view full) ---

3158 int raid_passthru;
3159
3160 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n"));
3161
3162 mpt = (struct mpt_softc *)cam_sim_softc(sim);
3163 raid_passthru = (sim == mpt->phydisk_sim);
3164
3165 tgt = ccb->ccb_h.target_id;
1357 if (raid_passthru
1358 && ccb->ccb_h.func_code != XPT_PATH_INQ
1359 && ccb->ccb_h.func_code != XPT_RESET_BUS) {
3166 if (raid_passthru && ccb->ccb_h.func_code != XPT_PATH_INQ &&
3167 ccb->ccb_h.func_code != XPT_RESET_BUS) {
1360 CAMLOCK_2_MPTLOCK(mpt);
1361 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
3168 CAMLOCK_2_MPTLOCK(mpt);
3169 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
1362 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
3170 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3171 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
1363 MPTLOCK_2_CAMLOCK(mpt);
1364 xpt_done(ccb);
1365 return;
1366 }
1367 MPTLOCK_2_CAMLOCK(mpt);
1368 }
1369
1370 ccb->ccb_h.ccb_mpt_ptr = mpt;
1371
1372 switch (ccb->ccb_h.func_code) {
1373 case XPT_SCSI_IO: /* Execute the requested I/O operation */
1374 /*
1375 * Do a couple of preliminary checks...
1376 */
1377 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
1378 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
3172 MPTLOCK_2_CAMLOCK(mpt);
3173 xpt_done(ccb);
3174 return;
3175 }
3176 MPTLOCK_2_CAMLOCK(mpt);
3177 }
3178
3179 ccb->ccb_h.ccb_mpt_ptr = mpt;
3180
3181 switch (ccb->ccb_h.func_code) {
3182 case XPT_SCSI_IO: /* Execute the requested I/O operation */
3183 /*
3184 * Do a couple of preliminary checks...
3185 */
3186 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
3187 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
1379 ccb->ccb_h.status = CAM_REQ_INVALID;
3188 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3189 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
1380 xpt_done(ccb);
1381 break;
1382 }
1383 }
1384 /* Max supported CDB length is 16 bytes */
1385 /* XXX Unless we implement the new 32byte message type */
1386 if (ccb->csio.cdb_len >
1387 sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) {
3190 xpt_done(ccb);
3191 break;
3192 }
3193 }
3194 /* Max supported CDB length is 16 bytes */
3195 /* XXX Unless we implement the new 32byte message type */
3196 if (ccb->csio.cdb_len >
3197 sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) {
1388 ccb->ccb_h.status = CAM_REQ_INVALID;
3198 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3199 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
1389 xpt_done(ccb);
1390 return;
1391 }
1392 ccb->csio.scsi_status = SCSI_STATUS_OK;
1393 mpt_start(sim, ccb);
1394 break;
1395
1396 case XPT_RESET_BUS:
1397 mpt_lprt(mpt, MPT_PRT_DEBUG, "XPT_RESET_BUS\n");
1398 if (!raid_passthru) {
1399 CAMLOCK_2_MPTLOCK(mpt);
1400 (void)mpt_bus_reset(mpt, /*sleep_ok*/FALSE);
1401 MPTLOCK_2_CAMLOCK(mpt);
1402 }
1403 /*
1404 * mpt_bus_reset is always successful in that it
1405 * will fall back to a hard reset should a bus
1406 * reset attempt fail.
1407 */
3200 xpt_done(ccb);
3201 return;
3202 }
3203 ccb->csio.scsi_status = SCSI_STATUS_OK;
3204 mpt_start(sim, ccb);
3205 break;
3206
3207 case XPT_RESET_BUS:
3208 mpt_lprt(mpt, MPT_PRT_DEBUG, "XPT_RESET_BUS\n");
3209 if (!raid_passthru) {
3210 CAMLOCK_2_MPTLOCK(mpt);
3211 (void)mpt_bus_reset(mpt, /*sleep_ok*/FALSE);
3212 MPTLOCK_2_CAMLOCK(mpt);
3213 }
3214 /*
3215 * mpt_bus_reset is always successful in that it
3216 * will fall back to a hard reset should a bus
3217 * reset attempt fail.
3218 */
3219 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1408 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
1409 xpt_done(ccb);
1410 break;
1411
1412 case XPT_ABORT:
3220 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3221 xpt_done(ccb);
3222 break;
3223
3224 case XPT_ABORT:
1413 /*
1414 * XXX: Need to implement
1415 */
1416 ccb->ccb_h.status = CAM_UA_ABORT;
3225 {
3226 union ccb *accb = ccb->cab.abort_ccb;
3227 CAMLOCK_2_MPTLOCK(mpt);
3228 switch (accb->ccb_h.func_code) {
3229 case XPT_ACCEPT_TARGET_IO:
3230 case XPT_IMMED_NOTIFY:
3231 ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb);
3232 break;
3233 case XPT_CONT_TARGET_IO:
3234 mpt_prt(mpt, "cannot abort active CTIOs yet\n");
3235 ccb->ccb_h.status = CAM_UA_ABORT;
3236 break;
3237 case XPT_SCSI_IO:
3238 ccb->ccb_h.status = CAM_UA_ABORT;
3239 break;
3240 default:
3241 ccb->ccb_h.status = CAM_REQ_INVALID;
3242 break;
3243 }
3244 MPTLOCK_2_CAMLOCK(mpt);
1417 xpt_done(ccb);
1418 break;
3245 xpt_done(ccb);
3246 break;
3247 }
1419
1420#ifdef CAM_NEW_TRAN_CODE
1421#define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS)
1422#else
1423#define IS_CURRENT_SETTINGS(c) (c->flags & CCB_TRANS_CURRENT_SETTINGS)
1424#endif
1425#define DP_DISC_ENABLE 0x1
1426#define DP_DISC_DISABL 0x2

--- 8 unchanged lines hidden (view full) ---

1435#define DP_WIDTH (DP_WIDE|DP_NARROW)
1436
1437#define DP_SYNC 0x40
1438
1439 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */
1440 cts = &ccb->cts;
1441 if (!IS_CURRENT_SETTINGS(cts)) {
1442 mpt_prt(mpt, "Attempt to set User settings\n");
3248
3249#ifdef CAM_NEW_TRAN_CODE
3250#define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS)
3251#else
3252#define IS_CURRENT_SETTINGS(c) (c->flags & CCB_TRANS_CURRENT_SETTINGS)
3253#endif
3254#define DP_DISC_ENABLE 0x1
3255#define DP_DISC_DISABL 0x2

--- 8 unchanged lines hidden (view full) ---

3264#define DP_WIDTH (DP_WIDE|DP_NARROW)
3265
3266#define DP_SYNC 0x40
3267
3268 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */
3269 cts = &ccb->cts;
3270 if (!IS_CURRENT_SETTINGS(cts)) {
3271 mpt_prt(mpt, "Attempt to set User settings\n");
1443 ccb->ccb_h.status = CAM_REQ_INVALID;
3272 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3273 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
1444 xpt_done(ccb);
1445 break;
1446 }
1447 if (mpt->is_fc == 0 && mpt->is_sas == 0) {
1448 uint8_t dval = 0;
1449 u_int period = 0, offset = 0;
1450#ifndef CAM_NEW_TRAN_CODE
1451 if (cts->valid & CCB_TRANS_DISC_VALID) {

--- 64 unchanged lines hidden (view full) ---

1516 }
1517 if (dval & DP_TQING_ENABLE) {
1518 mpt->mpt_tag_enable |= (1 << tgt);
1519 } else if (dval & DP_TQING_DISABL) {
1520 mpt->mpt_tag_enable &= ~(1 << tgt);
1521 }
1522 if (dval & DP_WIDTH) {
1523 if (mpt_setwidth(mpt, tgt, dval & DP_WIDE)) {
3274 xpt_done(ccb);
3275 break;
3276 }
3277 if (mpt->is_fc == 0 && mpt->is_sas == 0) {
3278 uint8_t dval = 0;
3279 u_int period = 0, offset = 0;
3280#ifndef CAM_NEW_TRAN_CODE
3281 if (cts->valid & CCB_TRANS_DISC_VALID) {

--- 64 unchanged lines hidden (view full) ---

3346 }
3347 if (dval & DP_TQING_ENABLE) {
3348 mpt->mpt_tag_enable |= (1 << tgt);
3349 } else if (dval & DP_TQING_DISABL) {
3350 mpt->mpt_tag_enable &= ~(1 << tgt);
3351 }
3352 if (dval & DP_WIDTH) {
3353 if (mpt_setwidth(mpt, tgt, dval & DP_WIDE)) {
1524mpt_prt(mpt, "Set width Failed!\n");
1525 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3354 mpt_prt(mpt, "Set width Failed!\n");
3355 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3356 mpt_set_ccb_status(ccb,
3357 CAM_REQ_CMP_ERR);
1526 MPTLOCK_2_CAMLOCK(mpt);
1527 xpt_done(ccb);
1528 break;
1529 }
1530 }
1531 if (dval & DP_SYNC) {
1532 if (mpt_setsync(mpt, tgt, period, offset)) {
3358 MPTLOCK_2_CAMLOCK(mpt);
3359 xpt_done(ccb);
3360 break;
3361 }
3362 }
3363 if (dval & DP_SYNC) {
3364 if (mpt_setsync(mpt, tgt, period, offset)) {
1533mpt_prt(mpt, "Set sync Failed!\n");
1534 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3365 mpt_prt(mpt, "Set sync Failed!\n");
3366 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3367 mpt_set_ccb_status(ccb,
3368 CAM_REQ_CMP_ERR);
1535 MPTLOCK_2_CAMLOCK(mpt);
1536 xpt_done(ccb);
1537 break;
1538 }
1539 }
1540 MPTLOCK_2_CAMLOCK(mpt);
1541 mpt_lprt(mpt, MPT_PRT_DEBUG,
1542 "SET tgt %d flags %x period %x off %x\n",
1543 tgt, dval, period, offset);
1544 }
3369 MPTLOCK_2_CAMLOCK(mpt);
3370 xpt_done(ccb);
3371 break;
3372 }
3373 }
3374 MPTLOCK_2_CAMLOCK(mpt);
3375 mpt_lprt(mpt, MPT_PRT_DEBUG,
3376 "SET tgt %d flags %x period %x off %x\n",
3377 tgt, dval, period, offset);
3378 }
1545 ccb->ccb_h.status = CAM_REQ_CMP;
3379 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3380 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
1546 xpt_done(ccb);
1547 break;
1548
1549 case XPT_GET_TRAN_SETTINGS:
1550 cts = &ccb->cts;
1551 if (mpt->is_fc) {
1552#ifndef CAM_NEW_TRAN_CODE
1553 /*

--- 163 unchanged lines hidden (view full) ---

1717 }
1718#endif
1719 mpt_lprt(mpt, MPT_PRT_DEBUG,
1720 "GET %s tgt %d flags %x period %x offset %x\n",
1721 IS_CURRENT_SETTINGS(cts)
1722 ? "ACTIVE" : "NVRAM",
1723 tgt, dval, pval, oval);
1724 }
3381 xpt_done(ccb);
3382 break;
3383
3384 case XPT_GET_TRAN_SETTINGS:
3385 cts = &ccb->cts;
3386 if (mpt->is_fc) {
3387#ifndef CAM_NEW_TRAN_CODE
3388 /*

--- 163 unchanged lines hidden (view full) ---

3552 }
3553#endif
3554 mpt_lprt(mpt, MPT_PRT_DEBUG,
3555 "GET %s tgt %d flags %x period %x offset %x\n",
3556 IS_CURRENT_SETTINGS(cts)
3557 ? "ACTIVE" : "NVRAM",
3558 tgt, dval, pval, oval);
3559 }
1725 ccb->ccb_h.status = CAM_REQ_CMP;
3560 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3561 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
1726 xpt_done(ccb);
1727 break;
1728
1729 case XPT_CALC_GEOMETRY:
1730 {
1731 struct ccb_calc_geometry *ccg;
1732
1733 ccg = &ccb->ccg;
1734 if (ccg->block_size == 0) {
3562 xpt_done(ccb);
3563 break;
3564
3565 case XPT_CALC_GEOMETRY:
3566 {
3567 struct ccb_calc_geometry *ccg;
3568
3569 ccg = &ccb->ccg;
3570 if (ccg->block_size == 0) {
1735 ccb->ccb_h.status = CAM_REQ_INVALID;
3571 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3572 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
1736 xpt_done(ccb);
1737 break;
1738 }
3573 xpt_done(ccb);
3574 break;
3575 }
1739
1740 mpt_calc_geometry(ccg, /*extended*/1);
3576 mpt_calc_geometry(ccg, /*extended*/1);
3577 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1741 xpt_done(ccb);
1742 break;
1743 }
1744 case XPT_PATH_INQ: /* Path routing inquiry */
1745 {
1746 struct ccb_pathinq *cpi = &ccb->cpi;
1747
1748 cpi->version_num = 1;

--- 12 unchanged lines hidden (view full) ---

1761 } else if (mpt->is_sas) {
1762 cpi->base_transfer_speed = 300000;
1763 } else {
1764 cpi->base_transfer_speed = 3300;
1765 cpi->hba_inquiry |=
1766 PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
1767 }
1768 } else if (mpt->is_fc) {
3578 xpt_done(ccb);
3579 break;
3580 }
3581 case XPT_PATH_INQ: /* Path routing inquiry */
3582 {
3583 struct ccb_pathinq *cpi = &ccb->cpi;
3584
3585 cpi->version_num = 1;

--- 12 unchanged lines hidden (view full) ---

3598 } else if (mpt->is_sas) {
3599 cpi->base_transfer_speed = 300000;
3600 } else {
3601 cpi->base_transfer_speed = 3300;
3602 cpi->hba_inquiry |=
3603 PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3604 }
3605 } else if (mpt->is_fc) {
1769/* XXX SHOULD BE BASED UPON IOC FACTS XXX */
3606 /* XXX SHOULD BE BASED UPON IOC FACTS XXX XXX */
1770 cpi->max_target = 255;
1771 cpi->hba_misc = PIM_NOBUSRESET;
3607 cpi->max_target = 255;
3608 cpi->hba_misc = PIM_NOBUSRESET;
1772 cpi->initiator_id = cpi->max_target + 1;
3609 cpi->initiator_id = mpt->mpt_ini_id;
1773 cpi->base_transfer_speed = 100000;
1774 cpi->hba_inquiry = PI_TAG_ABLE;
1775 } else if (mpt->is_sas) {
1776 cpi->max_target = 63; /* XXX */
1777 cpi->hba_misc = PIM_NOBUSRESET;
3610 cpi->base_transfer_speed = 100000;
3611 cpi->hba_inquiry = PI_TAG_ABLE;
3612 } else if (mpt->is_sas) {
3613 cpi->max_target = 63; /* XXX */
3614 cpi->hba_misc = PIM_NOBUSRESET;
1778 cpi->initiator_id = cpi->max_target;
3615 cpi->initiator_id = mpt->mpt_ini_id;
1779 cpi->base_transfer_speed = 300000;
1780 cpi->hba_inquiry = PI_TAG_ABLE;
1781 } else {
1782 cpi->initiator_id = mpt->mpt_ini_id;
1783 cpi->base_transfer_speed = 3300;
1784 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
1785 cpi->hba_misc = 0;
1786 cpi->max_target = 15;
1787 }
1788
3616 cpi->base_transfer_speed = 300000;
3617 cpi->hba_inquiry = PI_TAG_ABLE;
3618 } else {
3619 cpi->initiator_id = mpt->mpt_ini_id;
3620 cpi->base_transfer_speed = 3300;
3621 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3622 cpi->hba_misc = 0;
3623 cpi->max_target = 15;
3624 }
3625
3626 if ((mpt->role & MPT_ROLE_INITIATOR) == 0) {
3627 cpi->hba_misc |= PIM_NOINITIATOR;
3628 }
3629 if ((mpt->role & MPT_ROLE_TARGET) != 0) {
3630 cpi->target_sprt =
3631 PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
3632 } else {
3633 cpi->target_sprt = 0;
3634 }
1789 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1790 strncpy(cpi->hba_vid, "LSI", HBA_IDLEN);
1791 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1792 cpi->unit_number = cam_sim_unit(sim);
1793 cpi->ccb_h.status = CAM_REQ_CMP;
1794 xpt_done(ccb);
1795 break;
1796 }
3635 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
3636 strncpy(cpi->hba_vid, "LSI", HBA_IDLEN);
3637 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
3638 cpi->unit_number = cam_sim_unit(sim);
3639 cpi->ccb_h.status = CAM_REQ_CMP;
3640 xpt_done(ccb);
3641 break;
3642 }
3643 case XPT_EN_LUN: /* Enable LUN as a target */
3644 {
3645 int result;
3646
3647 CAMLOCK_2_MPTLOCK(mpt);
3648 if (ccb->cel.enable)
3649 result = mpt_enable_lun(mpt,
3650 ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3651 else
3652 result = mpt_disable_lun(mpt,
3653 ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3654 MPTLOCK_2_CAMLOCK(mpt);
3655 if (result == 0) {
3656 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3657 } else {
3658 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3659 }
3660 xpt_done(ccb);
3661 break;
3662 }
3663 case XPT_NOTIFY_ACK: /* recycle notify ack */
3664 case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */
3665 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */
3666 {
3667 tgt_resource_t *trtp;
3668 lun_id_t lun = ccb->ccb_h.target_lun;
3669 ccb->ccb_h.sim_priv.entries[0].field = 0;
3670 ccb->ccb_h.sim_priv.entries[1].ptr = mpt;
3671 ccb->ccb_h.flags = 0;
3672
3673 if (lun == CAM_LUN_WILDCARD) {
3674 if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
3675 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3676 xpt_done(ccb);
3677 break;
3678 }
3679 trtp = &mpt->trt_wildcard;
3680 } else if (lun >= MPT_MAX_LUNS) {
3681 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3682 xpt_done(ccb);
3683 break;
3684 } else {
3685 trtp = &mpt->trt[lun];
3686 }
3687 CAMLOCK_2_MPTLOCK(mpt);
3688 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
3689 mpt_lprt(mpt, MPT_PRT_DEBUG1,
3690 "Put FREE ATIO %p lun %d\n", ccb, lun);
3691 STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h,
3692 sim_links.stqe);
3693 } else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
3694 mpt_lprt(mpt, MPT_PRT_DEBUG1,
3695 "Put FREE INOT lun %d\n", lun);
3696 STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h,
3697 sim_links.stqe);
3698 } else {
3699 mpt_lprt(mpt, MPT_PRT_ALWAYS, "Got Notify ACK\n");
3700 }
3701 mpt_set_ccb_status(ccb, CAM_REQ_INPROG);
3702 MPTLOCK_2_CAMLOCK(mpt);
3703 break;
3704 }
3705 case XPT_CONT_TARGET_IO:
3706 CAMLOCK_2_MPTLOCK(mpt);
3707 mpt_target_start_io(mpt, ccb);
3708 MPTLOCK_2_CAMLOCK(mpt);
3709 break;
1797 default:
1798 ccb->ccb_h.status = CAM_REQ_INVALID;
1799 xpt_done(ccb);
1800 break;
1801 }
1802}
1803
1804static int

--- 82 unchanged lines hidden (view full) ---

1887mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended)
1888{
1889#if __FreeBSD_version >= 500000
1890 cam_calc_geometry(ccg, extended);
1891#else
1892 uint32_t size_mb;
1893 uint32_t secs_per_cylinder;
1894
3710 default:
3711 ccb->ccb_h.status = CAM_REQ_INVALID;
3712 xpt_done(ccb);
3713 break;
3714 }
3715}
3716
3717static int

--- 82 unchanged lines hidden (view full) ---

3800mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended)
3801{
3802#if __FreeBSD_version >= 500000
3803 cam_calc_geometry(ccg, extended);
3804#else
3805 uint32_t size_mb;
3806 uint32_t secs_per_cylinder;
3807
3808 if (ccg->block_size == 0) {
3809 ccg->ccb_h.status = CAM_REQ_INVALID;
3810 return;
3811 }
1895 size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size);
1896 if (size_mb > 1024 && extended) {
1897 ccg->heads = 255;
1898 ccg->secs_per_track = 63;
1899 } else {
1900 ccg->heads = 64;
1901 ccg->secs_per_track = 32;
1902 }

--- 10 unchanged lines hidden (view full) ---

1913 int error;
1914
1915 error = mpt_kthread_create(mpt_recovery_thread, mpt,
1916 &mpt->recovery_thread, /*flags*/0,
1917 /*altstack*/0, "mpt_recovery%d", mpt->unit);
1918 return (error);
1919}
1920
3812 size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size);
3813 if (size_mb > 1024 && extended) {
3814 ccg->heads = 255;
3815 ccg->secs_per_track = 63;
3816 } else {
3817 ccg->heads = 64;
3818 ccg->secs_per_track = 32;
3819 }

--- 10 unchanged lines hidden (view full) ---

3830 int error;
3831
3832 error = mpt_kthread_create(mpt_recovery_thread, mpt,
3833 &mpt->recovery_thread, /*flags*/0,
3834 /*altstack*/0, "mpt_recovery%d", mpt->unit);
3835 return (error);
3836}
3837
1921/*
1922 * Lock is not held on entry.
1923 */
1924static void
1925mpt_terminate_recovery_thread(struct mpt_softc *mpt)
1926{
3838static void
3839mpt_terminate_recovery_thread(struct mpt_softc *mpt)
3840{
1927
1928 MPT_LOCK(mpt);
1929 if (mpt->recovery_thread == NULL) {
3841 if (mpt->recovery_thread == NULL) {
1930 MPT_UNLOCK(mpt);
1931 return;
1932 }
1933 mpt->shutdwn_recovery = 1;
1934 wakeup(mpt);
1935 /*
1936 * Sleep on a slightly different location
1937 * for this interlock just for added safety.
1938 */
1939 mpt_sleep(mpt, &mpt->recovery_thread, PUSER, "thtrm", 0);
3842 return;
3843 }
3844 mpt->shutdwn_recovery = 1;
3845 wakeup(mpt);
3846 /*
3847 * Sleep on a slightly different location
3848 * for this interlock just for added safety.
3849 */
3850 mpt_sleep(mpt, &mpt->recovery_thread, PUSER, "thtrm", 0);
1940 MPT_UNLOCK(mpt);
1941}
1942
1943static void
1944mpt_recovery_thread(void *arg)
1945{
1946 struct mpt_softc *mpt;
1947
1948#if __FreeBSD_version >= 500000
1949 mtx_lock(&Giant);
1950#endif
1951 mpt = (struct mpt_softc *)arg;
1952 MPT_LOCK(mpt);
1953 for (;;) {
1954
1955 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0
1956 && mpt->shutdwn_recovery == 0)
1957 mpt_sleep(mpt, mpt, PUSER, "idle", 0);
1958
3851}
3852
3853static void
3854mpt_recovery_thread(void *arg)
3855{
3856 struct mpt_softc *mpt;
3857
3858#if __FreeBSD_version >= 500000
3859 mtx_lock(&Giant);
3860#endif
3861 mpt = (struct mpt_softc *)arg;
3862 MPT_LOCK(mpt);
3863 for (;;) {
3864
3865 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0
3866 && mpt->shutdwn_recovery == 0)
3867 mpt_sleep(mpt, mpt, PUSER, "idle", 0);
3868
1959 if (mpt->shutdwn_recovery != 0)
3869 if (mpt->shutdwn_recovery != 0) {
1960 break;
3870 break;
1961
1962 MPT_UNLOCK(mpt);
3871 }
1963 mpt_recover_commands(mpt);
3872 mpt_recover_commands(mpt);
1964 MPT_LOCK(mpt);
1965 }
1966 mpt->recovery_thread = NULL;
1967 wakeup(&mpt->recovery_thread);
1968 MPT_UNLOCK(mpt);
1969#if __FreeBSD_version >= 500000
1970 mtx_unlock(&Giant);
1971#endif
1972 kthread_exit(0);

--- 38 unchanged lines hidden (view full) ---

2011 tmf_req->TaskMsgContext = abort_ctx;
2012
2013 mpt_lprt(mpt, MPT_PRT_INFO,
2014 "Issuing TMF %p with MsgContext of 0x%x\n", tmf_req,
2015 tmf_req->MsgContext);
2016 if (mpt->verbose > MPT_PRT_DEBUG)
2017 mpt_print_request(tmf_req);
2018
3873 }
3874 mpt->recovery_thread = NULL;
3875 wakeup(&mpt->recovery_thread);
3876 MPT_UNLOCK(mpt);
3877#if __FreeBSD_version >= 500000
3878 mtx_unlock(&Giant);
3879#endif
3880 kthread_exit(0);

--- 38 unchanged lines hidden (view full) ---

3919 tmf_req->TaskMsgContext = abort_ctx;
3920
3921 mpt_lprt(mpt, MPT_PRT_INFO,
3922 "Issuing TMF %p with MsgContext of 0x%x\n", tmf_req,
3923 tmf_req->MsgContext);
3924 if (mpt->verbose > MPT_PRT_DEBUG)
3925 mpt_print_request(tmf_req);
3926
2019 error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req);
2020 if (error != 0)
3927 if (mpt->is_fc || mpt->is_sas) {
3928 mpt_send_cmd(mpt, mpt->tmf_req);
3929 error = MPT_OK;
3930 } else {
3931 error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req);
3932 }
3933 if (error != MPT_OK) {
2021 mpt_reset(mpt, /*reinit*/TRUE);
3934 mpt_reset(mpt, /*reinit*/TRUE);
3935 }
2022 return (error);
2023}
2024
3936 return (error);
3937}
3938
3939static void
3940mpt_fc_add_els(struct mpt_softc *mpt, request_t *req)
3941{
3942 MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc;
3943 PTR_SGE_TRANSACTION32 tep;
3944 PTR_SGE_SIMPLE32 se;
3945 bus_addr_t paddr;
3946
3947 paddr = req->req_pbuf;
3948 paddr += MPT_RQSL(mpt);
3949
3950 fc = req->req_vbuf;
3951 memset(fc, 0, MPT_REQUEST_AREA);
3952 fc->BufferCount = 1;
3953 fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST;
3954 fc->MsgContext = htole32(req->index | fc_els_handler_id);
3955
3956 /*
3957 * Okay, set up ELS buffer pointers. ELS buffer pointers
3958 * consist of a TE SGL element (with details length of zero)
3959 * followe by a SIMPLE SGL element which holds the address
3960 * of the buffer.
3961 */
3962
3963 tep = (PTR_SGE_TRANSACTION32) &fc->SGL;
3964
3965 tep->ContextSize = 4;
3966 tep->Flags = 0;
3967 tep->TransactionContext[0] = htole32(req->index | fc_els_handler_id);
3968
3969 se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0];
3970 se->FlagsLength =
3971 MPI_SGE_FLAGS_HOST_TO_IOC |
3972 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
3973 MPI_SGE_FLAGS_LAST_ELEMENT |
3974 MPI_SGE_FLAGS_END_OF_LIST |
3975 MPI_SGE_FLAGS_END_OF_BUFFER;
3976 se->FlagsLength <<= MPI_SGE_FLAGS_SHIFT;
3977 se->FlagsLength |= (MPT_NRFM(mpt) - MPT_RQSL(mpt));
3978 se->Address = (uint32_t) paddr;
3979 mpt_check_doorbell(mpt);
3980 mpt_send_cmd(mpt, req);
3981}
3982
3983static void
3984mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex)
3985{
3986 PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc;
3987 PTR_CMD_BUFFER_DESCRIPTOR cb;
3988 bus_addr_t paddr;
3989
3990 paddr = req->req_pbuf;
3991 paddr += MPT_RQSL(mpt);
3992 memset(req->req_vbuf, 0, MPT_REQUEST_AREA);
3993
3994 fc = req->req_vbuf;
3995 fc->BufferCount = 1;
3996 fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST;
3997 fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
3998
3999 cb = &fc->Buffer[0];
4000 cb->IoIndex = htole16(ioindex);
4001 cb->u.PhysicalAddress32 = (U32) paddr;
4002
4003 mpt_check_doorbell(mpt);
4004 mpt_send_cmd(mpt, req);
4005}
4006
4007static void
4008mpt_add_target_commands(struct mpt_softc *mpt)
4009{
4010 int i, max;
4011
4012 if (mpt->tgt_cmd_ptrs) {
4013 return;
4014 }
4015
4016 max = MPT_MAX_REQUESTS(mpt) >> 1;
4017 if (max > mpt->mpt_max_tgtcmds) {
4018 max = mpt->mpt_max_tgtcmds;
4019 }
4020 mpt->tgt_cmd_ptrs =
4021 malloc(max * sizeof (void *), M_DEVBUF, M_NOWAIT | M_ZERO);
4022 if (mpt->tgt_cmd_ptrs == NULL) {
4023 mpt_prt(mpt, "could not allocate cmdptrs\n");
4024 return;
4025 }
4026 mpt->tgt_cmds_allocated = max;
4027
4028 for (i = 0; i < max; i++) {
4029 request_t *req;
4030
4031 req = mpt_get_request(mpt, FALSE);
4032 if (req == NULL) {
4033 break;
4034 }
4035 mpt->tgt_cmd_ptrs[i] = req;
4036 mpt_post_target_command(mpt, req, i);
4037 }
4038
4039 if (i == 0) {
4040 mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n");
4041 free(mpt->tgt_cmd_ptrs, M_DEVBUF);
4042 mpt->tgt_cmd_ptrs = NULL;
4043 mpt->tgt_cmds_allocated = 0;
4044 } else if (i < max) {
4045 mpt_lprt(mpt, MPT_PRT_WARN, "added %d of %d target bufs\n",
4046 i, max);
4047 }
4048}
4049
4050static int
4051mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
4052{
4053 if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
4054 mpt->twildcard = 1;
4055 } else if (lun >= MPT_MAX_LUNS) {
4056 return (EINVAL);
4057 } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
4058 return (EINVAL);
4059 }
4060 if (mpt->tenabled == 0) {
4061#if 0
4062 if (mpt->is_fc) {
4063 (void) mpt_fc_reset_link(mpt, 0);
4064 }
4065#endif
4066 mpt->tenabled = 1;
4067 }
4068 if (lun == CAM_LUN_WILDCARD) {
4069 mpt->trt_wildcard.enabled = 1;
4070 } else {
4071 mpt->trt[lun].enabled = 1;
4072 }
4073 return (0);
4074}
4075
4076static int
4077mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
4078{
4079 int i;
4080 if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
4081 mpt->twildcard = 0;
4082 } else if (lun >= MPT_MAX_LUNS) {
4083 return (EINVAL);
4084 } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
4085 return (EINVAL);
4086 }
4087 if (lun == CAM_LUN_WILDCARD) {
4088 mpt->trt_wildcard.enabled = 0;
4089 } else {
4090 mpt->trt[lun].enabled = 0;
4091 }
4092 for (i = 0; i < MPT_MAX_LUNS; i++) {
4093 if (mpt->trt[lun].enabled) {
4094 break;
4095 }
4096 }
4097 if (i == MPT_MAX_LUNS && mpt->twildcard == 0) {
4098 mpt->tenabled = 0;
4099#if 0
4100 if (mpt->is_fc) {
4101 (void) mpt_fc_reset_link(mpt, 0);
4102 }
4103#endif
4104 }
4105 return (0);
4106}
4107
2025/*
4108/*
4109 * Called with MPT lock held
4110 */
4111static void
4112mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb)
4113{
4114 struct ccb_scsiio *csio = &ccb->csio;
4115 request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id);
4116 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
4117
4118
4119 if (tgt->state != TGT_STATE_IN_CAM) {
4120 mpt_prt(mpt, "tag 0x%08x in state %x when starting I/O\n",
4121 csio->tag_id, tgt->state);
4122 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4123 MPTLOCK_2_CAMLOCK(mpt);
4124 xpt_done(ccb);
4125 CAMLOCK_2_MPTLOCK(mpt);
4126 return;
4127 }
4128
4129 if (csio->dxfer_len) {
4130 bus_dmamap_callback_t *cb;
4131 PTR_MSG_TARGET_ASSIST_REQUEST ta;
4132 request_t *req;
4133
4134 KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE,
4135 ("dxfer_len %u but direction is NONE\n", csio->dxfer_len));
4136
4137 req = mpt_get_request(mpt, FALSE);
4138 if (req == NULL) {
4139 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4140 MPTLOCK_2_CAMLOCK(mpt);
4141 xpt_done(ccb);
4142 CAMLOCK_2_MPTLOCK(mpt);
4143 return;
4144 }
4145
4146 ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4147 if (sizeof (bus_addr_t) > 4) {
4148 cb = mpt_execute_req_a64;
4149 } else {
4150 cb = mpt_execute_req;
4151 }
4152
4153 req->ccb = ccb;
4154 ccb->ccb_h.ccb_req_ptr = req;
4155
4156 /*
4157 * Record the currently active ccb and the
4158 * request for it in our target state area.
4159 */
4160 tgt->ccb = ccb;
4161 tgt->req = req;
4162
4163 memset(req->req_vbuf, 0, MPT_RQSL(mpt));
4164 ta = req->req_vbuf;
4165
4166 if (mpt->is_fc) {
4167 ;
4168 } else if (mpt->is_sas == 0) {
4169 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4170 cmd_req->req_vbuf;
4171 ta->QueueTag = ssp->InitiatorTag;
4172 } else {
4173 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4174 cmd_req->req_vbuf;
4175 ta->QueueTag = sp->Tag;
4176 }
4177 ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4178 ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4179 ta->ReplyWord = htole32(tgt->reply_desc);
4180 if (csio->ccb_h.target_lun > 256) {
4181 ta->LUN[0] =
4182 0x40 | ((csio->ccb_h.target_lun >> 8) & 0x3f);
4183 ta->LUN[1] = csio->ccb_h.target_lun & 0xff;
4184 } else {
4185 ta->LUN[1] = csio->ccb_h.target_lun;
4186 }
4187
4188 ta->RelativeOffset = tgt->bytes_xfered;
4189 ta->DataLength = ccb->csio.dxfer_len;
4190 if (ta->DataLength > tgt->resid) {
4191 ta->DataLength = tgt->resid;
4192 }
4193
4194 /*
4195 * XXX Should be done after data transfer completes?
4196 */
4197 tgt->resid -= csio->dxfer_len;
4198 tgt->bytes_xfered += csio->dxfer_len;
4199
4200 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
4201 ta->TargetAssistFlags |=
4202 TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4203 }
4204
4205#ifdef WE_TRUST_AUTO_GOOD_STATUS
4206 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
4207 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
4208 ta->TargetAssistFlags |=
4209 TARGET_ASSIST_FLAGS_AUTO_STATUS;
4210 }
4211#endif
4212 tgt->state = TGT_STATE_SETTING_UP_FOR_DATA;
4213
4214 mpt_lprt(mpt, MPT_PRT_DEBUG,
4215 "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u "
4216 "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len,
4217 tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state);
4218
4219 MPTLOCK_2_CAMLOCK(mpt);
4220 if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
4221 if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
4222 int error;
4223 int s = splsoftvm();
4224 error = bus_dmamap_load(mpt->buffer_dmat,
4225 req->dmap, csio->data_ptr, csio->dxfer_len,
4226 cb, req, 0);
4227 splx(s);
4228 if (error == EINPROGRESS) {
4229 xpt_freeze_simq(mpt->sim, 1);
4230 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4231 }
4232 } else {
4233 /*
4234 * We have been given a pointer to single
4235 * physical buffer.
4236 */
4237 struct bus_dma_segment seg;
4238 seg.ds_addr = (bus_addr_t)
4239 (vm_offset_t)csio->data_ptr;
4240 seg.ds_len = csio->dxfer_len;
4241 (*cb)(req, &seg, 1, 0);
4242 }
4243 } else {
4244 /*
4245 * We have been given a list of addresses.
4246 * This case could be easily supported but they are not
4247 * currently generated by the CAM subsystem so there
4248 * is no point in wasting the time right now.
4249 */
4250 struct bus_dma_segment *sgs;
4251 if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
4252 (*cb)(req, NULL, 0, EFAULT);
4253 } else {
4254 /* Just use the segments provided */
4255 sgs = (struct bus_dma_segment *)csio->data_ptr;
4256 (*cb)(req, sgs, csio->sglist_cnt, 0);
4257 }
4258 }
4259 CAMLOCK_2_MPTLOCK(mpt);
4260 } else {
4261 uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
4262
4263 /*
4264 * XXX: I don't know why this seems to happen, but
4265 * XXX: completing the CCB seems to make things happy.
4266 * XXX: This seems to happen if the initiator requests
4267 * XXX: enough data that we have to do multiple CTIOs.
4268 */
4269 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
4270 mpt_lprt(mpt, MPT_PRT_DEBUG,
4271 "Meaningless STATUS CCB (%p): flags %x status %x "
4272 "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags,
4273 ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered);
4274 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
4275 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4276 tgt->flags |= BOGUS_JO;
4277 MPTLOCK_2_CAMLOCK(mpt);
4278 xpt_done(ccb);
4279 CAMLOCK_2_MPTLOCK(mpt);
4280 return;
4281 }
4282 if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
4283 sp = sense;
4284 memcpy(sp, &csio->sense_data,
4285 min(csio->sense_len, MPT_SENSE_SIZE));
4286 }
4287 mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status, sp);
4288 }
4289}
4290
4291/*
4292 * Abort queued up CCBs
4293 */
4294static cam_status
4295mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb)
4296{
4297 struct mpt_hdr_stailq *lp;
4298 struct ccb_hdr *srch;
4299 int found = 0;
4300 union ccb *accb = ccb->cab.abort_ccb;
4301 tgt_resource_t *trtp;
4302
4303 mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb);
4304
4305 if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) {
4306 trtp = &mpt->trt_wildcard;
4307 } else {
4308 trtp = &mpt->trt[ccb->ccb_h.target_lun];
4309 }
4310
4311 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
4312 lp = &trtp->atios;
4313 } else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
4314 lp = &trtp->inots;
4315 } else {
4316 return (CAM_REQ_INVALID);
4317 }
4318
4319 STAILQ_FOREACH(srch, lp, sim_links.stqe) {
4320 if (srch == &accb->ccb_h) {
4321 found = 1;
4322 STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe);
4323 break;
4324 }
4325 }
4326 if (found) {
4327 accb->ccb_h.status = CAM_REQ_ABORTED;
4328 xpt_done(accb);
4329 return (CAM_REQ_CMP);
4330 }
4331 mpt_prt(mpt, "mpt_abort_tgt_ccb: CCB %p not found\n", ccb);
4332 return (CAM_PATH_INVALID);
4333}
4334
4335/*
4336 * Ask the MPT to abort the current target command
4337 */
4338static cam_status
4339mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req)
4340{
4341 int error;
4342 request_t *req;
4343 PTR_MSG_TARGET_MODE_ABORT abtp;
4344
4345 req = mpt_get_request(mpt, FALSE);
4346 if (req == NULL) {
4347 return (CAM_RESRC_UNAVAIL);
4348 }
4349 abtp = req->req_vbuf;
4350 memset(abtp, 0, sizeof (*abtp));
4351
4352 abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4353 abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO;
4354 abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT;
4355 abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc);
4356 if (mpt->is_fc || mpt->is_sas) {
4357 mpt_send_cmd(mpt, req);
4358 } else {
4359 error = mpt_send_handshake_cmd(mpt, sizeof(*req), req);
4360 }
4361 return (CAM_REQ_INPROG);
4362}
4363
4364/*
2026 * When a command times out, it is placed on the requeust_timeout_list
2027 * and we wake our recovery thread. The MPT-Fusion architecture supports
2028 * only a single TMF operation at a time, so we serially abort/bdr, etc,
2029 * the timedout transactions. The next TMF is issued either by the
2030 * completion handler of the current TMF waking our recovery thread,
2031 * or the TMF timeout handler causing a hard reset sequence.
2032 */
2033static void
2034mpt_recover_commands(struct mpt_softc *mpt)
2035{
2036 request_t *req;
2037 union ccb *ccb;
2038 int error;
2039
4365 * When a command times out, it is placed on the requeust_timeout_list
4366 * and we wake our recovery thread. The MPT-Fusion architecture supports
4367 * only a single TMF operation at a time, so we serially abort/bdr, etc,
4368 * the timedout transactions. The next TMF is issued either by the
4369 * completion handler of the current TMF waking our recovery thread,
4370 * or the TMF timeout handler causing a hard reset sequence.
4371 */
4372static void
4373mpt_recover_commands(struct mpt_softc *mpt)
4374{
4375 request_t *req;
4376 union ccb *ccb;
4377 int error;
4378
2040 MPT_LOCK(mpt);
2041 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
2042 /*
2043 * No work to do- leave.
2044 */
2045 mpt_prt(mpt, "mpt_recover_commands: no requests.\n");
4379 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
4380 /*
4381 * No work to do- leave.
4382 */
4383 mpt_prt(mpt, "mpt_recover_commands: no requests.\n");
2046 MPT_UNLOCK(mpt);
2047 return;
2048 }
2049
2050 /*
2051 * Flush any commands whose completion coincides with their timeout.
2052 */
2053 mpt_intr(mpt);
2054

--- 4 unchanged lines hidden (view full) ---

2059 * that either the timeout value was on
2060 * the hairy edge of what the device
2061 * requires or - more likely - interrupts
2062 * are not happening.
2063 */
2064 mpt_prt(mpt, "Timedout requests already complete. "
2065 "Interrupts may not be functioning.\n");
2066 mpt_enable_ints(mpt);
4384 return;
4385 }
4386
4387 /*
4388 * Flush any commands whose completion coincides with their timeout.
4389 */
4390 mpt_intr(mpt);
4391

--- 4 unchanged lines hidden (view full) ---

4396 * that either the timeout value was on
4397 * the hairy edge of what the device
4398 * requires or - more likely - interrupts
4399 * are not happening.
4400 */
4401 mpt_prt(mpt, "Timedout requests already complete. "
4402 "Interrupts may not be functioning.\n");
4403 mpt_enable_ints(mpt);
2067 MPT_UNLOCK(mpt);
2068 return;
2069 }
2070
2071 /*
2072 * We have no visibility into the current state of the
2073 * controller, so attempt to abort the commands in the
2074 * order they timed-out.
2075 */
2076 while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) {
2077 u_int status;
4404 return;
4405 }
4406
4407 /*
4408 * We have no visibility into the current state of the
4409 * controller, so attempt to abort the commands in the
4410 * order they timed-out.
4411 */
4412 while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) {
4413 u_int status;
2078 u_int32_t serno = req->serno;
2079
4414
2080 mpt_prt(mpt, "Attempting to Abort Req %p:%u\n", req, serno);
4415 mpt_prt(mpt, "Attempting to Abort Req %p\n", req);
4416
2081 ccb = req->ccb;
2082 mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT);
2083 error = mpt_scsi_send_tmf(mpt,
2084 MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
4417 ccb = req->ccb;
4418 mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT);
4419 error = mpt_scsi_send_tmf(mpt,
4420 MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
2085 /*MsgFlags*/0, mpt->bus, ccb->ccb_h.target_id,
4421 /*MsgFlags*/0, /*Bus*/0, ccb->ccb_h.target_id,
2086 ccb->ccb_h.target_lun,
2087 htole32(req->index | scsi_io_handler_id), /*sleep_ok*/TRUE);
2088
2089 if (error != 0) {
4422 ccb->ccb_h.target_lun,
4423 htole32(req->index | scsi_io_handler_id), /*sleep_ok*/TRUE);
4424
4425 if (error != 0) {
2090 mpt_prt(mpt, "Abort Req %p:%u failed to start TMF\n",
2091 req, serno);
2092 /*
2093 * mpt_scsi_send_tmf hard resets on failure, so no
2094 * need to do so here. Our queue should be emptied
2095 * by the hard reset.
2096 */
2097 continue;
2098 }
2099
2100 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
2101 REQ_STATE_DONE, /*sleep_ok*/TRUE, /*time_ms*/500);
2102
2103 status = mpt->tmf_req->IOCStatus;
2104 if (error != 0) {
2105
2106 /*
2107 * If we've errored out and the transaction is still
2108 * pending, reset the controller.
2109 */
4426 /*
4427 * mpt_scsi_send_tmf hard resets on failure, so no
4428 * need to do so here. Our queue should be emptied
4429 * by the hard reset.
4430 */
4431 continue;
4432 }
4433
4434 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
4435 REQ_STATE_DONE, /*sleep_ok*/TRUE, /*time_ms*/500);
4436
4437 status = mpt->tmf_req->IOCStatus;
4438 if (error != 0) {
4439
4440 /*
4441 * If we've errored out and the transaction is still
4442 * pending, reset the controller.
4443 */
2110 mpt_prt(mpt, "Abort Req %p:%d timed-out. "
2111 "Resetting controller\n", req, serno);
4444 mpt_prt(mpt, "mpt_recover_commands: Abort timed-out. "
4445 "Resetting controller\n");
2112 mpt_reset(mpt, /*reinit*/TRUE);
2113 continue;
2114 }
2115
2116 /*
2117 * TMF is complete.
2118 */
2119 TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
2120 mpt->tmf_req->state = REQ_STATE_FREE;
2121 if ((status & MPI_IOCSTATUS_MASK) == MPI_SCSI_STATUS_SUCCESS)
2122 continue;
2123
2124 mpt_lprt(mpt, MPT_PRT_DEBUG,
4446 mpt_reset(mpt, /*reinit*/TRUE);
4447 continue;
4448 }
4449
4450 /*
4451 * TMF is complete.
4452 */
4453 TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
4454 mpt->tmf_req->state = REQ_STATE_FREE;
4455 if ((status & MPI_IOCSTATUS_MASK) == MPI_SCSI_STATUS_SUCCESS)
4456 continue;
4457
4458 mpt_lprt(mpt, MPT_PRT_DEBUG,
2125 "Abort Req %p: %u Failed "
2126 "with status 0x%x\n. Resetting bus.",
2127 req, serno, status);
4459 "mpt_recover_commands: Abort Failed "
4460 "with status 0x%x\n. Resetting bus", status);
2128
2129 /*
2130 * If the abort attempt fails for any reason, reset the bus.
2131 * We should find all of the timed-out commands on our
2132 * list are in the done state after this completes.
2133 */
2134 mpt_bus_reset(mpt, /*sleep_ok*/TRUE);
2135 }
4461
4462 /*
4463 * If the abort attempt fails for any reason, reset the bus.
4464 * We should find all of the timed-out commands on our
4465 * list are in the done state after this completes.
4466 */
4467 mpt_bus_reset(mpt, /*sleep_ok*/TRUE);
4468 }
2136
2137 MPT_UNLOCK(mpt);
2138}
4469}