Deleted Added
full compact
mpt_cam.c (168831) mpt_cam.c (169293)
1/*-
2 * FreeBSD/CAM specific routines for LSI '909 FC adapters.
3 * FreeBSD Version.
4 *
5 * Copyright (c) 2000, 2001 by Greg Ansley
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions

--- 80 unchanged lines hidden (view full) ---

89 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
90 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
91 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
92 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
93 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
94 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
95 */
96#include <sys/cdefs.h>
1/*-
2 * FreeBSD/CAM specific routines for LSI '909 FC adapters.
3 * FreeBSD Version.
4 *
5 * Copyright (c) 2000, 2001 by Greg Ansley
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions

--- 80 unchanged lines hidden (view full) ---

89 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
90 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
91 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
92 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
93 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
94 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
95 */
96#include <sys/cdefs.h>
97__FBSDID("$FreeBSD: head/sys/dev/mpt/mpt_cam.c 168831 2007-04-18 04:58:53Z scottl $");
97__FBSDID("$FreeBSD: head/sys/dev/mpt/mpt_cam.c 169293 2007-05-05 20:18:24Z mjacob $");
98
99#include <dev/mpt/mpt.h>
100#include <dev/mpt/mpt_cam.h>
101#include <dev/mpt/mpt_raid.h>
102
103#include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
104#include "dev/mpt/mpilib/mpi_init.h"
105#include "dev/mpt/mpilib/mpi_targ.h"

--- 106 unchanged lines hidden (view full) ---

212int
213mpt_cam_attach(struct mpt_softc *mpt)
214{
215 struct cam_devq *devq;
216 mpt_handler_t handler;
217 int maxq;
218 int error;
219
98
99#include <dev/mpt/mpt.h>
100#include <dev/mpt/mpt_cam.h>
101#include <dev/mpt/mpt_raid.h>
102
103#include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
104#include "dev/mpt/mpilib/mpi_init.h"
105#include "dev/mpt/mpilib/mpi_targ.h"

--- 106 unchanged lines hidden (view full) ---

212int
213mpt_cam_attach(struct mpt_softc *mpt)
214{
215 struct cam_devq *devq;
216 mpt_handler_t handler;
217 int maxq;
218 int error;
219
220 MPT_LOCK(mpt);
220 TAILQ_INIT(&mpt->request_timeout_list);
221 maxq = (mpt->ioc_facts.GlobalCredits < MPT_MAX_REQUESTS(mpt))?
222 mpt->ioc_facts.GlobalCredits : MPT_MAX_REQUESTS(mpt);
223
224 handler.reply_handler = mpt_scsi_reply_handler;
225 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
226 &scsi_io_handler_id);
227 if (error != 0) {
221 TAILQ_INIT(&mpt->request_timeout_list);
222 maxq = (mpt->ioc_facts.GlobalCredits < MPT_MAX_REQUESTS(mpt))?
223 mpt->ioc_facts.GlobalCredits : MPT_MAX_REQUESTS(mpt);
224
225 handler.reply_handler = mpt_scsi_reply_handler;
226 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
227 &scsi_io_handler_id);
228 if (error != 0) {
228 goto cleanup0;
229 MPT_UNLOCK(mpt);
230 goto cleanup;
229 }
230
231 handler.reply_handler = mpt_scsi_tmf_reply_handler;
232 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
233 &scsi_tmf_handler_id);
234 if (error != 0) {
231 }
232
233 handler.reply_handler = mpt_scsi_tmf_reply_handler;
234 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
235 &scsi_tmf_handler_id);
236 if (error != 0) {
235 goto cleanup0;
237 MPT_UNLOCK(mpt);
238 goto cleanup;
236 }
237
238 /*
239 * If we're fibre channel and could support target mode, we register
240 * an ELS reply handler and give it resources.
241 */
242 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
243 handler.reply_handler = mpt_fc_els_reply_handler;
244 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
245 &fc_els_handler_id);
246 if (error != 0) {
239 }
240
241 /*
242 * If we're fibre channel and could support target mode, we register
243 * an ELS reply handler and give it resources.
244 */
245 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
246 handler.reply_handler = mpt_fc_els_reply_handler;
247 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
248 &fc_els_handler_id);
249 if (error != 0) {
247 goto cleanup0;
250 MPT_UNLOCK(mpt);
251 goto cleanup;
248 }
249 if (mpt_add_els_buffers(mpt) == FALSE) {
250 error = ENOMEM;
252 }
253 if (mpt_add_els_buffers(mpt) == FALSE) {
254 error = ENOMEM;
251 goto cleanup0;
255 MPT_UNLOCK(mpt);
256 goto cleanup;
252 }
253 maxq -= mpt->els_cmds_allocated;
254 }
255
256 /*
257 * If we support target mode, we register a reply handler for it,
258 * but don't add command resources until we actually enable target
259 * mode.
260 */
261 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
262 handler.reply_handler = mpt_scsi_tgt_reply_handler;
263 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
264 &mpt->scsi_tgt_handler_id);
265 if (error != 0) {
257 }
258 maxq -= mpt->els_cmds_allocated;
259 }
260
261 /*
262 * If we support target mode, we register a reply handler for it,
263 * but don't add command resources until we actually enable target
264 * mode.
265 */
266 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
267 handler.reply_handler = mpt_scsi_tgt_reply_handler;
268 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
269 &mpt->scsi_tgt_handler_id);
270 if (error != 0) {
266 goto cleanup0;
271 MPT_UNLOCK(mpt);
272 goto cleanup;
267 }
268 }
269
270 /*
271 * We keep one request reserved for timeout TMF requests.
272 */
273 mpt->tmf_req = mpt_get_request(mpt, FALSE);
274 if (mpt->tmf_req == NULL) {
275 mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n");
276 error = ENOMEM;
273 }
274 }
275
276 /*
277 * We keep one request reserved for timeout TMF requests.
278 */
279 mpt->tmf_req = mpt_get_request(mpt, FALSE);
280 if (mpt->tmf_req == NULL) {
281 mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n");
282 error = ENOMEM;
277 goto cleanup0;
283 MPT_UNLOCK(mpt);
284 goto cleanup;
278 }
279
280 /*
281 * Mark the request as free even though not on the free list.
282 * There is only one TMF request allowed to be outstanding at
283 * a time and the TMF routines perform their own allocation
284 * tracking using the standard state flags.
285 */
286 mpt->tmf_req->state = REQ_STATE_FREE;
287 maxq--;
288
285 }
286
287 /*
288 * Mark the request as free even though not on the free list.
289 * There is only one TMF request allowed to be outstanding at
290 * a time and the TMF routines perform their own allocation
291 * tracking using the standard state flags.
292 */
293 mpt->tmf_req->state = REQ_STATE_FREE;
294 maxq--;
295
296 /*
297 * The rest of this is CAM foo, for which we need to drop our lock
298 */
299 MPT_UNLOCK(mpt);
300
289 if (mpt_spawn_recovery_thread(mpt) != 0) {
290 mpt_prt(mpt, "Unable to spawn recovery thread!\n");
291 error = ENOMEM;
301 if (mpt_spawn_recovery_thread(mpt) != 0) {
302 mpt_prt(mpt, "Unable to spawn recovery thread!\n");
303 error = ENOMEM;
292 goto cleanup0;
304 goto cleanup;
293 }
294
295 /*
305 }
306
307 /*
296 * The rest of this is CAM foo, for which we need to drop our lock
297 */
298 MPTLOCK_2_CAMLOCK(mpt);
299
300 /*
301 * Create the device queue for our SIM(s).
302 */
303 devq = cam_simq_alloc(maxq);
304 if (devq == NULL) {
305 mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n");
306 error = ENOMEM;
307 goto cleanup;
308 }
309
310 /*
311 * Construct our SIM entry.
312 */
308 * Create the device queue for our SIM(s).
309 */
310 devq = cam_simq_alloc(maxq);
311 if (devq == NULL) {
312 mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n");
313 error = ENOMEM;
314 goto cleanup;
315 }
316
317 /*
318 * Construct our SIM entry.
319 */
313 mpt->sim = cam_sim_alloc(mpt_action, mpt_poll, "mpt", mpt,
314 mpt->unit, &Giant, 1, maxq, devq);
320 mpt->sim =
321 mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
315 if (mpt->sim == NULL) {
316 mpt_prt(mpt, "Unable to allocate CAM SIM!\n");
317 cam_simq_free(devq);
318 error = ENOMEM;
319 goto cleanup;
320 }
321
322 /*
323 * Register exactly this bus.
324 */
322 if (mpt->sim == NULL) {
323 mpt_prt(mpt, "Unable to allocate CAM SIM!\n");
324 cam_simq_free(devq);
325 error = ENOMEM;
326 goto cleanup;
327 }
328
329 /*
330 * Register exactly this bus.
331 */
332 MPT_LOCK(mpt);
325 if (xpt_bus_register(mpt->sim, 0) != CAM_SUCCESS) {
326 mpt_prt(mpt, "Bus registration Failed!\n");
327 error = ENOMEM;
333 if (xpt_bus_register(mpt->sim, 0) != CAM_SUCCESS) {
334 mpt_prt(mpt, "Bus registration Failed!\n");
335 error = ENOMEM;
336 MPT_UNLOCK(mpt);
328 goto cleanup;
329 }
330
331 if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim),
332 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
333 mpt_prt(mpt, "Unable to allocate Path!\n");
334 error = ENOMEM;
337 goto cleanup;
338 }
339
340 if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim),
341 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
342 mpt_prt(mpt, "Unable to allocate Path!\n");
343 error = ENOMEM;
344 MPT_UNLOCK(mpt);
335 goto cleanup;
336 }
345 goto cleanup;
346 }
347 MPT_UNLOCK(mpt);
337
338 /*
339 * Only register a second bus for RAID physical
340 * devices if the controller supports RAID.
341 */
342 if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
348
349 /*
350 * Only register a second bus for RAID physical
351 * devices if the controller supports RAID.
352 */
353 if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
343 CAMLOCK_2_MPTLOCK(mpt);
344 return (0);
345 }
346
347 /*
348 * Create a "bus" to export all hidden disks to CAM.
349 */
354 return (0);
355 }
356
357 /*
358 * Create a "bus" to export all hidden disks to CAM.
359 */
350 mpt->phydisk_sim = cam_sim_alloc(mpt_action, mpt_poll, "mpt", mpt,
351 mpt->unit, &Giant, 1, maxq, devq);
360 mpt->phydisk_sim =
361 mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
352 if (mpt->phydisk_sim == NULL) {
353 mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n");
354 error = ENOMEM;
355 goto cleanup;
356 }
357
358 /*
359 * Register this bus.
360 */
362 if (mpt->phydisk_sim == NULL) {
363 mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n");
364 error = ENOMEM;
365 goto cleanup;
366 }
367
368 /*
369 * Register this bus.
370 */
371 MPT_LOCK(mpt);
361 if (xpt_bus_register(mpt->phydisk_sim, 1) != CAM_SUCCESS) {
362 mpt_prt(mpt, "Physical Disk Bus registration Failed!\n");
363 error = ENOMEM;
372 if (xpt_bus_register(mpt->phydisk_sim, 1) != CAM_SUCCESS) {
373 mpt_prt(mpt, "Physical Disk Bus registration Failed!\n");
374 error = ENOMEM;
375 MPT_UNLOCK(mpt);
364 goto cleanup;
365 }
366
367 if (xpt_create_path(&mpt->phydisk_path, NULL,
368 cam_sim_path(mpt->phydisk_sim),
369 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
370 mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n");
371 error = ENOMEM;
376 goto cleanup;
377 }
378
379 if (xpt_create_path(&mpt->phydisk_path, NULL,
380 cam_sim_path(mpt->phydisk_sim),
381 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
382 mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n");
383 error = ENOMEM;
384 MPT_UNLOCK(mpt);
372 goto cleanup;
373 }
385 goto cleanup;
386 }
374 CAMLOCK_2_MPTLOCK(mpt);
387 MPT_UNLOCK(mpt);
375 mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n");
376 return (0);
377
378cleanup:
388 mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n");
389 return (0);
390
391cleanup:
379 CAMLOCK_2_MPTLOCK(mpt);
380cleanup0:
381 mpt_cam_detach(mpt);
382 return (error);
383}
384
385/*
386 * Read FC configuration information
387 */
388static int

--- 51 unchanged lines hidden (view full) ---

440 "FC Port Page 0: Topology <%s> WWNN 0x%08x%08x WWPN 0x%08x%08x "
441 "Speed %u-Gbit\n", topology,
442 mpt->mpt_fcport_page0.WWNN.High,
443 mpt->mpt_fcport_page0.WWNN.Low,
444 mpt->mpt_fcport_page0.WWPN.High,
445 mpt->mpt_fcport_page0.WWPN.Low,
446 mpt->mpt_fcport_speed);
447#if __FreeBSD_version >= 500000
392 mpt_cam_detach(mpt);
393 return (error);
394}
395
396/*
397 * Read FC configuration information
398 */
399static int

--- 51 unchanged lines hidden (view full) ---

451 "FC Port Page 0: Topology <%s> WWNN 0x%08x%08x WWPN 0x%08x%08x "
452 "Speed %u-Gbit\n", topology,
453 mpt->mpt_fcport_page0.WWNN.High,
454 mpt->mpt_fcport_page0.WWNN.Low,
455 mpt->mpt_fcport_page0.WWPN.High,
456 mpt->mpt_fcport_page0.WWPN.Low,
457 mpt->mpt_fcport_speed);
458#if __FreeBSD_version >= 500000
459 MPT_UNLOCK(mpt);
448 {
449 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
450 struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
451
452 snprintf(mpt->scinfo.fc.wwnn,
453 sizeof (mpt->scinfo.fc.wwnn), "0x%08x%08x",
454 mpt->mpt_fcport_page0.WWNN.High,
455 mpt->mpt_fcport_page0.WWNN.Low);

--- 7 unchanged lines hidden (view full) ---

463 "wwnn", CTLFLAG_RD, mpt->scinfo.fc.wwnn, 0,
464 "World Wide Node Name");
465
466 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
467 "wwpn", CTLFLAG_RD, mpt->scinfo.fc.wwpn, 0,
468 "World Wide Port Name");
469
470 }
460 {
461 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
462 struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
463
464 snprintf(mpt->scinfo.fc.wwnn,
465 sizeof (mpt->scinfo.fc.wwnn), "0x%08x%08x",
466 mpt->mpt_fcport_page0.WWNN.High,
467 mpt->mpt_fcport_page0.WWNN.Low);

--- 7 unchanged lines hidden (view full) ---

475 "wwnn", CTLFLAG_RD, mpt->scinfo.fc.wwnn, 0,
476 "World Wide Node Name");
477
478 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
479 "wwpn", CTLFLAG_RD, mpt->scinfo.fc.wwpn, 0,
480 "World Wide Port Name");
481
482 }
483 MPT_LOCK(mpt);
471#endif
472 return (0);
473}
474
475/*
476 * Set FC configuration information.
477 */
478static int

--- 316 unchanged lines hidden (view full) ---

795 }
796 }
797 return (0);
798}
799
800int
801mpt_cam_enable(struct mpt_softc *mpt)
802{
484#endif
485 return (0);
486}
487
488/*
489 * Set FC configuration information.
490 */
491static int

--- 316 unchanged lines hidden (view full) ---

808 }
809 }
810 return (0);
811}
812
813int
814mpt_cam_enable(struct mpt_softc *mpt)
815{
816 int error;
817
818 MPT_LOCK(mpt);
819
820 error = EIO;
803 if (mpt->is_fc) {
804 if (mpt_read_config_info_fc(mpt)) {
821 if (mpt->is_fc) {
822 if (mpt_read_config_info_fc(mpt)) {
805 return (EIO);
823 goto out;
806 }
807 if (mpt_set_initial_config_fc(mpt)) {
824 }
825 if (mpt_set_initial_config_fc(mpt)) {
808 return (EIO);
826 goto out;
809 }
810 } else if (mpt->is_sas) {
811 if (mpt_read_config_info_sas(mpt)) {
827 }
828 } else if (mpt->is_sas) {
829 if (mpt_read_config_info_sas(mpt)) {
812 return (EIO);
830 goto out;
813 }
814 if (mpt_set_initial_config_sas(mpt)) {
831 }
832 if (mpt_set_initial_config_sas(mpt)) {
815 return (EIO);
833 goto out;
816 }
817 } else if (mpt->is_spi) {
818 if (mpt_read_config_info_spi(mpt)) {
834 }
835 } else if (mpt->is_spi) {
836 if (mpt_read_config_info_spi(mpt)) {
819 return (EIO);
837 goto out;
820 }
821 if (mpt_set_initial_config_spi(mpt)) {
838 }
839 if (mpt_set_initial_config_spi(mpt)) {
822 return (EIO);
840 goto out;
823 }
824 }
841 }
842 }
825 return (0);
843 error = 0;
844
845out:
846 MPT_UNLOCK(mpt);
847 return (error);
826}
827
828void
829mpt_cam_ready(struct mpt_softc *mpt)
830{
831 /*
832 * If we're in target mode, hang out resources now
833 * so we don't cause the world to hang talking to us.

--- 11 unchanged lines hidden (view full) ---

845 mpt->ready = 1;
846}
847
848void
849mpt_cam_detach(struct mpt_softc *mpt)
850{
851 mpt_handler_t handler;
852
848}
849
850void
851mpt_cam_ready(struct mpt_softc *mpt)
852{
853 /*
854 * If we're in target mode, hang out resources now
855 * so we don't cause the world to hang talking to us.

--- 11 unchanged lines hidden (view full) ---

867 mpt->ready = 1;
868}
869
870void
871mpt_cam_detach(struct mpt_softc *mpt)
872{
873 mpt_handler_t handler;
874
875 MPT_LOCK(mpt);
853 mpt->ready = 0;
854 mpt_terminate_recovery_thread(mpt);
855
856 handler.reply_handler = mpt_scsi_reply_handler;
857 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
858 scsi_io_handler_id);
859 handler.reply_handler = mpt_scsi_tmf_reply_handler;
860 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,

--- 5 unchanged lines hidden (view full) ---

866 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
867 mpt->scsi_tgt_handler_id);
868
869 if (mpt->tmf_req != NULL) {
870 mpt->tmf_req->state = REQ_STATE_ALLOCATED;
871 mpt_free_request(mpt, mpt->tmf_req);
872 mpt->tmf_req = NULL;
873 }
876 mpt->ready = 0;
877 mpt_terminate_recovery_thread(mpt);
878
879 handler.reply_handler = mpt_scsi_reply_handler;
880 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
881 scsi_io_handler_id);
882 handler.reply_handler = mpt_scsi_tmf_reply_handler;
883 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,

--- 5 unchanged lines hidden (view full) ---

889 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
890 mpt->scsi_tgt_handler_id);
891
892 if (mpt->tmf_req != NULL) {
893 mpt->tmf_req->state = REQ_STATE_ALLOCATED;
894 mpt_free_request(mpt, mpt->tmf_req);
895 mpt->tmf_req = NULL;
896 }
897 MPT_UNLOCK(mpt);
874
875 if (mpt->sim != NULL) {
898
899 if (mpt->sim != NULL) {
876 MPTLOCK_2_CAMLOCK(mpt);
877 xpt_free_path(mpt->path);
878 xpt_bus_deregister(cam_sim_path(mpt->sim));
879 cam_sim_free(mpt->sim, TRUE);
880 mpt->sim = NULL;
900 xpt_free_path(mpt->path);
901 xpt_bus_deregister(cam_sim_path(mpt->sim));
902 cam_sim_free(mpt->sim, TRUE);
903 mpt->sim = NULL;
881 CAMLOCK_2_MPTLOCK(mpt);
882 }
883
884 if (mpt->phydisk_sim != NULL) {
904 }
905
906 if (mpt->phydisk_sim != NULL) {
885 MPTLOCK_2_CAMLOCK(mpt);
886 xpt_free_path(mpt->phydisk_path);
887 xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim));
888 cam_sim_free(mpt->phydisk_sim, TRUE);
889 mpt->phydisk_sim = NULL;
907 xpt_free_path(mpt->phydisk_path);
908 xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim));
909 cam_sim_free(mpt->phydisk_sim, TRUE);
910 mpt->phydisk_sim = NULL;
890 CAMLOCK_2_MPTLOCK(mpt);
891 }
892}
893
894/* This routine is used after a system crash to dump core onto the swap device.
895 */
896static void
897mpt_poll(struct cam_sim *sim)
898{
899 struct mpt_softc *mpt;
900
901 mpt = (struct mpt_softc *)cam_sim_softc(sim);
911 }
912}
913
914/* This routine is used after a system crash to dump core onto the swap device.
915 */
916static void
917mpt_poll(struct cam_sim *sim)
918{
919 struct mpt_softc *mpt;
920
921 mpt = (struct mpt_softc *)cam_sim_softc(sim);
902 MPT_LOCK(mpt);
903 mpt_intr(mpt);
922 mpt_intr(mpt);
904 MPT_UNLOCK(mpt);
905}
906
907/*
908 * Watchdog timeout routine for SCSI requests.
909 */
910static void
911mpt_timeout(void *arg)
912{

--- 389 unchanged lines hidden (view full) ---

1302 CAMLOCK_2_MPTLOCK(mpt);
1303 mpt_free_request(mpt, req);
1304 MPTLOCK_2_CAMLOCK(mpt);
1305 return;
1306 }
1307
1308 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1309 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
923}
924
925/*
926 * Watchdog timeout routine for SCSI requests.
927 */
928static void
929mpt_timeout(void *arg)
930{

--- 389 unchanged lines hidden (view full) ---

1320 CAMLOCK_2_MPTLOCK(mpt);
1321 mpt_free_request(mpt, req);
1322 MPTLOCK_2_CAMLOCK(mpt);
1323 return;
1324 }
1325
1326 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1327 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1310 ccb->ccb_h.timeout_ch =
1311 timeout(mpt_timeout, (caddr_t)ccb,
1312 (ccb->ccb_h.timeout * hz) / 1000);
1328 mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000,
1329 mpt_timeout, ccb);
1313 } else {
1330 } else {
1314 callout_handle_init(&ccb->ccb_h.timeout_ch);
1331 mpt_req_timeout_init(req);
1315 }
1316 if (mpt->verbose > MPT_PRT_DEBUG) {
1317 int nc = 0;
1318 mpt_print_request(req->req_vbuf);
1319 for (trq = req->chain; trq; trq = trq->chain) {
1320 printf(" Additional Chain Area %d\n", nc++);
1321 mpt_dump_sgl(trq->req_vbuf, 0);
1322 }

--- 381 unchanged lines hidden (view full) ---

1704 CAMLOCK_2_MPTLOCK(mpt);
1705 mpt_free_request(mpt, req);
1706 MPTLOCK_2_CAMLOCK(mpt);
1707 return;
1708 }
1709
1710 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1711 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1332 }
1333 if (mpt->verbose > MPT_PRT_DEBUG) {
1334 int nc = 0;
1335 mpt_print_request(req->req_vbuf);
1336 for (trq = req->chain; trq; trq = trq->chain) {
1337 printf(" Additional Chain Area %d\n", nc++);
1338 mpt_dump_sgl(trq->req_vbuf, 0);
1339 }

--- 381 unchanged lines hidden (view full) ---

1721 CAMLOCK_2_MPTLOCK(mpt);
1722 mpt_free_request(mpt, req);
1723 MPTLOCK_2_CAMLOCK(mpt);
1724 return;
1725 }
1726
1727 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1728 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1712 ccb->ccb_h.timeout_ch =
1713 timeout(mpt_timeout, (caddr_t)ccb,
1714 (ccb->ccb_h.timeout * hz) / 1000);
1729 mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000,
1730 mpt_timeout, ccb);
1715 } else {
1731 } else {
1716 callout_handle_init(&ccb->ccb_h.timeout_ch);
1732 mpt_req_timeout_init(req);
1717 }
1718 if (mpt->verbose > MPT_PRT_DEBUG) {
1719 int nc = 0;
1720 mpt_print_request(req->req_vbuf);
1721 for (trq = req->chain; trq; trq = trq->chain) {
1722 printf(" Additional Chain Area %d\n", nc++);
1723 mpt_dump_sgl(trq->req_vbuf, 0);
1724 }

--- 551 unchanged lines hidden (view full) ---

2276 ccb = req->ccb;
2277 if (ccb == NULL) {
2278 mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n",
2279 req, req->serno);
2280 return (TRUE);
2281 }
2282
2283 tgt = scsi_req->TargetID;
1733 }
1734 if (mpt->verbose > MPT_PRT_DEBUG) {
1735 int nc = 0;
1736 mpt_print_request(req->req_vbuf);
1737 for (trq = req->chain; trq; trq = trq->chain) {
1738 printf(" Additional Chain Area %d\n", nc++);
1739 mpt_dump_sgl(trq->req_vbuf, 0);
1740 }

--- 551 unchanged lines hidden (view full) ---

2292 ccb = req->ccb;
2293 if (ccb == NULL) {
2294 mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n",
2295 req, req->serno);
2296 return (TRUE);
2297 }
2298
2299 tgt = scsi_req->TargetID;
2284 untimeout(mpt_timeout, ccb, ccb->ccb_h.timeout_ch);
2300 mpt_req_untimeout(req, mpt_timeout, ccb);
2285 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2286
2287 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2288 bus_dmasync_op_t op;
2289
2290 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
2291 op = BUS_DMASYNC_POSTREAD;
2292 else

--- 606 unchanged lines hidden (view full) ---

2899 struct ccb_trans_settings *cts;
2900 target_id_t tgt;
2901 lun_id_t lun;
2902 int raid_passthru;
2903
2904 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n"));
2905
2906 mpt = (struct mpt_softc *)cam_sim_softc(sim);
2301 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2302
2303 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2304 bus_dmasync_op_t op;
2305
2306 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
2307 op = BUS_DMASYNC_POSTREAD;
2308 else

--- 606 unchanged lines hidden (view full) ---

2915 struct ccb_trans_settings *cts;
2916 target_id_t tgt;
2917 lun_id_t lun;
2918 int raid_passthru;
2919
2920 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n"));
2921
2922 mpt = (struct mpt_softc *)cam_sim_softc(sim);
2907 KASSERT(MPT_OWNED(mpt) == 0, ("mpt owned on entrance to mpt_action"));
2908 raid_passthru = (sim == mpt->phydisk_sim);
2923 raid_passthru = (sim == mpt->phydisk_sim);
2924 MPT_LOCK_ASSERT(mpt);
2909
2910 tgt = ccb->ccb_h.target_id;
2911 lun = ccb->ccb_h.target_lun;
2912 if (raid_passthru &&
2913 ccb->ccb_h.func_code != XPT_PATH_INQ &&
2914 ccb->ccb_h.func_code != XPT_RESET_BUS &&
2915 ccb->ccb_h.func_code != XPT_RESET_DEV) {
2916 CAMLOCK_2_MPTLOCK(mpt);

--- 730 unchanged lines hidden (view full) ---

3647 mpt_sleep(mpt, &mpt->recovery_thread, PUSER, "thtrm", 0);
3648}
3649
3650static void
3651mpt_recovery_thread(void *arg)
3652{
3653 struct mpt_softc *mpt;
3654
2925
2926 tgt = ccb->ccb_h.target_id;
2927 lun = ccb->ccb_h.target_lun;
2928 if (raid_passthru &&
2929 ccb->ccb_h.func_code != XPT_PATH_INQ &&
2930 ccb->ccb_h.func_code != XPT_RESET_BUS &&
2931 ccb->ccb_h.func_code != XPT_RESET_DEV) {
2932 CAMLOCK_2_MPTLOCK(mpt);

--- 730 unchanged lines hidden (view full) ---

3663 mpt_sleep(mpt, &mpt->recovery_thread, PUSER, "thtrm", 0);
3664}
3665
3666static void
3667mpt_recovery_thread(void *arg)
3668{
3669 struct mpt_softc *mpt;
3670
3655#if __FreeBSD_version >= 500000
3656 mtx_lock(&Giant);
3657#endif
3658 mpt = (struct mpt_softc *)arg;
3659 MPT_LOCK(mpt);
3660 for (;;) {
3661 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3662 if (mpt->shutdwn_recovery == 0) {
3663 mpt_sleep(mpt, mpt, PUSER, "idle", 0);
3664 }
3665 }
3666 if (mpt->shutdwn_recovery != 0) {
3667 break;
3668 }
3669 mpt_recover_commands(mpt);
3670 }
3671 mpt->recovery_thread = NULL;
3672 wakeup(&mpt->recovery_thread);
3673 MPT_UNLOCK(mpt);
3671 mpt = (struct mpt_softc *)arg;
3672 MPT_LOCK(mpt);
3673 for (;;) {
3674 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3675 if (mpt->shutdwn_recovery == 0) {
3676 mpt_sleep(mpt, mpt, PUSER, "idle", 0);
3677 }
3678 }
3679 if (mpt->shutdwn_recovery != 0) {
3680 break;
3681 }
3682 mpt_recover_commands(mpt);
3683 }
3684 mpt->recovery_thread = NULL;
3685 wakeup(&mpt->recovery_thread);
3686 MPT_UNLOCK(mpt);
3674#if __FreeBSD_version >= 500000
3675 mtx_unlock(&Giant);
3676#endif
3677 kthread_exit(0);
3678}
3679
3680static int
3681mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags,
3682 u_int channel, u_int target, u_int lun, u_int abort_ctx, int sleep_ok)
3683{
3684 MSG_SCSI_TASK_MGMT *tmf_req;

--- 923 unchanged lines hidden (view full) ---

4608 }
4609
4610 mpt_lprt(mpt, MPT_PRT_DEBUG,
4611 "STATUS_CCB %p (wit%s sense) tag %x req %p:%u resid %u\n",
4612 ccb, sense_data?"h" : "hout", ccb? ccb->csio.tag_id : -1, req,
4613 req->serno, tgt->resid);
4614 if (ccb) {
4615 ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
3687 kthread_exit(0);
3688}
3689
3690static int
3691mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags,
3692 u_int channel, u_int target, u_int lun, u_int abort_ctx, int sleep_ok)
3693{
3694 MSG_SCSI_TASK_MGMT *tmf_req;

--- 923 unchanged lines hidden (view full) ---

4618 }
4619
4620 mpt_lprt(mpt, MPT_PRT_DEBUG,
4621 "STATUS_CCB %p (wit%s sense) tag %x req %p:%u resid %u\n",
4622 ccb, sense_data?"h" : "hout", ccb? ccb->csio.tag_id : -1, req,
4623 req->serno, tgt->resid);
4624 if (ccb) {
4625 ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4616 ccb->ccb_h.timeout_ch = timeout(mpt_timeout, ccb, 60 * hz);
4626 mpt_req_timeout(req, 60 * hz, mpt_timeout, ccb);
4617 }
4618 mpt_send_cmd(mpt, req);
4619}
4620
4621static void
4622mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc,
4623 tgt_resource_t *trtp, int init_id)
4624{

--- 398 unchanged lines hidden (view full) ---

5023 mpt_free_request(mpt, tgt->req);
5024 tgt->req = NULL;
5025 mpt_scsi_tgt_status(mpt, NULL, req,
5026 0, NULL);
5027 return (TRUE);
5028 }
5029 tgt->ccb = NULL;
5030 tgt->nxfers++;
4627 }
4628 mpt_send_cmd(mpt, req);
4629}
4630
4631static void
4632mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc,
4633 tgt_resource_t *trtp, int init_id)
4634{

--- 398 unchanged lines hidden (view full) ---

5033 mpt_free_request(mpt, tgt->req);
5034 tgt->req = NULL;
5035 mpt_scsi_tgt_status(mpt, NULL, req,
5036 0, NULL);
5037 return (TRUE);
5038 }
5039 tgt->ccb = NULL;
5040 tgt->nxfers++;
5031 untimeout(mpt_timeout, ccb, ccb->ccb_h.timeout_ch);
5041 mpt_req_untimeout(req, mpt_timeout, ccb);
5032 mpt_lprt(mpt, MPT_PRT_DEBUG,
5033 "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n",
5034 ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id);
5035 /*
5036 * Free the Target Assist Request
5037 */
5038 KASSERT(tgt->req->ccb == ccb,
5039 ("tgt->req %p:%u tgt->req->ccb %p", tgt->req,

--- 48 unchanged lines hidden (view full) ---

5088 }
5089
5090 if (ccb) {
5091 tgt->ccb = NULL;
5092 if (tgt->state ==
5093 TGT_STATE_MOVING_DATA_AND_STATUS) {
5094 tgt->nxfers++;
5095 }
5042 mpt_lprt(mpt, MPT_PRT_DEBUG,
5043 "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n",
5044 ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id);
5045 /*
5046 * Free the Target Assist Request
5047 */
5048 KASSERT(tgt->req->ccb == ccb,
5049 ("tgt->req %p:%u tgt->req->ccb %p", tgt->req,

--- 48 unchanged lines hidden (view full) ---

5098 }
5099
5100 if (ccb) {
5101 tgt->ccb = NULL;
5102 if (tgt->state ==
5103 TGT_STATE_MOVING_DATA_AND_STATUS) {
5104 tgt->nxfers++;
5105 }
5096 untimeout(mpt_timeout, ccb,
5097 ccb->ccb_h.timeout_ch);
5106 mpt_req_untimeout(req, mpt_timeout, ccb);
5098 if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
5099 ccb->ccb_h.status |= CAM_SENT_SENSE;
5100 }
5101 mpt_lprt(mpt, MPT_PRT_DEBUG,
5102 "TARGET_STATUS tag %x sts %x flgs %x req "
5103 "%p\n", ccb->csio.tag_id, ccb->ccb_h.status,
5104 ccb->ccb_h.flags, tgt->req);
5105 /*

--- 127 unchanged lines hidden ---
5107 if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
5108 ccb->ccb_h.status |= CAM_SENT_SENSE;
5109 }
5110 mpt_lprt(mpt, MPT_PRT_DEBUG,
5111 "TARGET_STATUS tag %x sts %x flgs %x req "
5112 "%p\n", ccb->csio.tag_id, ccb->ccb_h.status,
5113 ccb->ccb_h.flags, tgt->req);
5114 /*

--- 127 unchanged lines hidden ---