ctl.c (288731) | ctl.c (288732) |
---|---|
1/*- 2 * Copyright (c) 2003-2009 Silicon Graphics International Corp. 3 * Copyright (c) 2012 The FreeBSD Foundation | 1/*- 2 * Copyright (c) 2003-2009 Silicon Graphics International Corp. 3 * Copyright (c) 2012 The FreeBSD Foundation |
4 * Copyright (c) 2015 Alexander Motin <mav@FreeBSD.org> |
|
4 * All rights reserved. 5 * 6 * Portions of this software were developed by Edward Tomasz Napierala 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: --- 25 unchanged lines hidden (view full) --- 37 * CAM Target Layer, a SCSI device emulation subsystem. 38 * 39 * Author: Ken Merry <ken@FreeBSD.org> 40 */ 41 42#define _CTL_C 43 44#include <sys/cdefs.h> | 5 * All rights reserved. 6 * 7 * Portions of this software were developed by Edward Tomasz Napierala 8 * under sponsorship from the FreeBSD Foundation. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: --- 25 unchanged lines hidden (view full) --- 38 * CAM Target Layer, a SCSI device emulation subsystem. 39 * 40 * Author: Ken Merry <ken@FreeBSD.org> 41 */ 42 43#define _CTL_C 44 45#include <sys/cdefs.h> |
45__FBSDID("$FreeBSD: stable/10/sys/cam/ctl/ctl.c 288731 2015-10-05 08:55:59Z mav $"); | 46__FBSDID("$FreeBSD: stable/10/sys/cam/ctl/ctl.c 288732 2015-10-05 08:57:16Z mav $"); |
46 47#include <sys/param.h> 48#include <sys/systm.h> 49#include <sys/ctype.h> 50#include <sys/kernel.h> 51#include <sys/types.h> 52#include <sys/kthread.h> 53#include <sys/bio.h> --- 25 unchanged lines hidden (view full) --- 79#include <cam/ctl/ctl_private.h> 80#include <cam/ctl/ctl_debug.h> 81#include <cam/ctl/ctl_scsi_all.h> 82#include <cam/ctl/ctl_error.h> 83 84struct ctl_softc *control_softc = NULL; 85 86/* | 47 48#include <sys/param.h> 49#include <sys/systm.h> 50#include <sys/ctype.h> 51#include <sys/kernel.h> 52#include <sys/types.h> 53#include <sys/kthread.h> 54#include <sys/bio.h> --- 25 unchanged lines hidden (view full) --- 80#include <cam/ctl/ctl_private.h> 81#include <cam/ctl/ctl_debug.h> 82#include <cam/ctl/ctl_scsi_all.h> 83#include <cam/ctl/ctl_error.h> 84 85struct ctl_softc *control_softc = NULL; 86 87/* |
87 * Size and alignment macros needed for Copan-specific HA hardware. These 88 * can go away when the HA code is re-written, and uses busdma for any 89 * hardware. 90 */ 91#define CTL_ALIGN_8B(target, source, type) \ 92 if (((uint32_t)source & 0x7) != 0) \ 93 target = (type)(source + (0x8 - ((uint32_t)source & 0x7)));\ 94 else \ 95 target = (type)source; 96 97#define CTL_SIZE_8B(target, size) \ 98 if ((size & 0x7) != 0) \ 99 target = size + (0x8 - (size & 0x7)); \ 100 else \ 101 target = size; 102 103#define CTL_ALIGN_8B_MARGIN 16 104 105/* | |
106 * Template mode pages. 107 */ 108 109/* 110 * Note that these are default values only. The actual values will be 111 * filled in when the user does a mode sense. 112 */ 113const static struct copan_debugconf_subpage debugconf_page_default = { --- 232 unchanged lines hidden (view full) --- 346 /*count*/{0, 0, 0, 0}}, 347 {/*flags*/0, 348 /*resource*/0, 349 /*reserved*/{0, 0}, 350 /*count*/{0, 0, 0, 0}} 351 } 352}; 353 | 88 * Template mode pages. 89 */ 90 91/* 92 * Note that these are default values only. The actual values will be 93 * filled in when the user does a mode sense. 94 */ 95const static struct copan_debugconf_subpage debugconf_page_default = { --- 232 unchanged lines hidden (view full) --- 328 /*count*/{0, 0, 0, 0}}, 329 {/*flags*/0, 330 /*resource*/0, 331 /*reserved*/{0, 0}, 332 /*count*/{0, 0, 0, 0}} 333 } 334}; 335 |
354/* 355 * XXX KDM move these into the softc. 356 */ 357static int rcv_sync_msg; 358static uint8_t ctl_pause_rtr; 359 | |
360SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer"); 361static int worker_threads = -1; 362TUNABLE_INT("kern.cam.ctl.worker_threads", &worker_threads); 363SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN, 364 &worker_threads, 1, "Number of worker threads"); 365static int ctl_debug = CTL_DEBUG_NONE; 366TUNABLE_INT("kern.cam.ctl.debug", &ctl_debug); 367SYSCTL_INT(_kern_cam_ctl, OID_AUTO, debug, CTLFLAG_RWTUN, 368 &ctl_debug, 0, "Enabled debug flags"); 369 370/* 371 * Supported pages (0x00), Serial number (0x80), Device ID (0x83), 372 * Extended INQUIRY Data (0x86), Mode Page Policy (0x87), 373 * SCSI Ports (0x88), Third-party Copy (0x8F), Block limits (0xB0), 374 * Block Device Characteristics (0xB1) and Logical Block Provisioning (0xB2) 375 */ 376#define SCSI_EVPD_NUM_SUPPORTED_PAGES 10 377 | 336SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer"); 337static int worker_threads = -1; 338TUNABLE_INT("kern.cam.ctl.worker_threads", &worker_threads); 339SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN, 340 &worker_threads, 1, "Number of worker threads"); 341static int ctl_debug = CTL_DEBUG_NONE; 342TUNABLE_INT("kern.cam.ctl.debug", &ctl_debug); 343SYSCTL_INT(_kern_cam_ctl, OID_AUTO, debug, CTLFLAG_RWTUN, 344 &ctl_debug, 0, "Enabled debug flags"); 345 346/* 347 * Supported pages (0x00), Serial number (0x80), Device ID (0x83), 348 * Extended INQUIRY Data (0x86), Mode Page Policy (0x87), 349 * SCSI Ports (0x88), Third-party Copy (0x8F), Block limits (0xB0), 350 * Block Device Characteristics (0xB1) and Logical Block Provisioning (0xB2) 351 */ 352#define SCSI_EVPD_NUM_SUPPORTED_PAGES 10 353 |
378#ifdef notyet | |
379static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event, 380 int param); 381static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest); | 354static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event, 355 int param); 356static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest); |
382#endif | 357static void ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest); |
383static int ctl_init(void); 384void ctl_shutdown(void); 385static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td); 386static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td); 387static int ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio); 388static int ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 389 struct ctl_ooa *ooa_hdr, 390 struct ctl_ooa_entry *kern_entries); 391static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 392 struct thread *td); 393static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *lun, 394 struct ctl_be_lun *be_lun); 395static int ctl_free_lun(struct ctl_lun *lun); 396static void ctl_create_lun(struct ctl_be_lun *be_lun); 397static struct ctl_port * ctl_io_port(struct ctl_io_hdr *io_hdr); | 358static int ctl_init(void); 359void ctl_shutdown(void); 360static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td); 361static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td); 362static int ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio); 363static int ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 364 struct ctl_ooa *ooa_hdr, 365 struct ctl_ooa_entry *kern_entries); 366static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 367 struct thread *td); 368static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *lun, 369 struct ctl_be_lun *be_lun); 370static int ctl_free_lun(struct ctl_lun *lun); 371static void ctl_create_lun(struct ctl_be_lun *be_lun); 372static struct ctl_port * ctl_io_port(struct ctl_io_hdr *io_hdr); |
398/** 399static void ctl_failover_change_pages(struct ctl_softc *softc, 400 struct ctl_scsiio *ctsio, int master); 401**/ | |
402 403static int ctl_do_mode_select(union ctl_io *io); 404static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, 405 uint64_t res_key, uint64_t sa_res_key, 406 uint8_t type, uint32_t residx, 407 struct ctl_scsiio *ctsio, 408 struct scsi_per_res_out *cdb, 409 struct scsi_per_res_out_parms* param); --- 20 unchanged lines hidden (view full) --- 430static ctl_action ctl_check_for_blockage(struct ctl_lun *lun, 431 union ctl_io *pending_io, union ctl_io *ooa_io); 432static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 433 union ctl_io *starting_io); 434static int ctl_check_blocked(struct ctl_lun *lun); 435static int ctl_scsiio_lun_check(struct ctl_lun *lun, 436 const struct ctl_cmd_entry *entry, 437 struct ctl_scsiio *ctsio); | 373 374static int ctl_do_mode_select(union ctl_io *io); 375static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, 376 uint64_t res_key, uint64_t sa_res_key, 377 uint8_t type, uint32_t residx, 378 struct ctl_scsiio *ctsio, 379 struct scsi_per_res_out *cdb, 380 struct scsi_per_res_out_parms* param); --- 20 unchanged lines hidden (view full) --- 401static ctl_action ctl_check_for_blockage(struct ctl_lun *lun, 402 union ctl_io *pending_io, union ctl_io *ooa_io); 403static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 404 union ctl_io *starting_io); 405static int ctl_check_blocked(struct ctl_lun *lun); 406static int ctl_scsiio_lun_check(struct ctl_lun *lun, 407 const struct ctl_cmd_entry *entry, 408 struct ctl_scsiio *ctsio); |
438//static int ctl_check_rtr(union ctl_io *pending_io, struct ctl_softc *softc); 439#ifdef notyet 440static void ctl_failover(void); 441#endif | 409static void ctl_failover_lun(struct ctl_lun *lun); 410static void ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua); 411static void ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua); 412static void ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua); 413static void ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua); |
442static void ctl_clr_ua_allluns(struct ctl_softc *ctl_softc, uint32_t initidx, 443 ctl_ua_type ua_type); 444static int ctl_scsiio_precheck(struct ctl_softc *ctl_softc, 445 struct ctl_scsiio *ctsio); 446static int ctl_scsiio(struct ctl_scsiio *ctsio); 447 448static int ctl_bus_reset(struct ctl_softc *ctl_softc, union ctl_io *io); 449static int ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io, --- 22 unchanged lines hidden (view full) --- 472static void ctl_datamove_remote(union ctl_io *io); 473static int ctl_process_done(union ctl_io *io); 474static void ctl_lun_thread(void *arg); 475static void ctl_thresh_thread(void *arg); 476static void ctl_work_thread(void *arg); 477static void ctl_enqueue_incoming(union ctl_io *io); 478static void ctl_enqueue_rtr(union ctl_io *io); 479static void ctl_enqueue_done(union ctl_io *io); | 414static void ctl_clr_ua_allluns(struct ctl_softc *ctl_softc, uint32_t initidx, 415 ctl_ua_type ua_type); 416static int ctl_scsiio_precheck(struct ctl_softc *ctl_softc, 417 struct ctl_scsiio *ctsio); 418static int ctl_scsiio(struct ctl_scsiio *ctsio); 419 420static int ctl_bus_reset(struct ctl_softc *ctl_softc, union ctl_io *io); 421static int ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io, --- 22 unchanged lines hidden (view full) --- 444static void ctl_datamove_remote(union ctl_io *io); 445static int ctl_process_done(union ctl_io *io); 446static void ctl_lun_thread(void *arg); 447static void ctl_thresh_thread(void *arg); 448static void ctl_work_thread(void *arg); 449static void ctl_enqueue_incoming(union ctl_io *io); 450static void ctl_enqueue_rtr(union ctl_io *io); 451static void ctl_enqueue_done(union ctl_io *io); |
480#ifdef notyet | |
481static void ctl_enqueue_isc(union ctl_io *io); | 452static void ctl_enqueue_isc(union ctl_io *io); |
482#endif | |
483static const struct ctl_cmd_entry * 484 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa); 485static const struct ctl_cmd_entry * 486 ctl_validate_command(struct ctl_scsiio *ctsio); 487static int ctl_cmd_applicable(uint8_t lun_type, 488 const struct ctl_cmd_entry *entry); 489 | 453static const struct ctl_cmd_entry * 454 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa); 455static const struct ctl_cmd_entry * 456 ctl_validate_command(struct ctl_scsiio *ctsio); 457static int ctl_cmd_applicable(uint8_t lun_type, 458 const struct ctl_cmd_entry *entry); 459 |
460static uint64_t ctl_get_prkey(struct ctl_lun *lun, uint32_t residx); 461static void ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx); 462static void ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx); 463static void ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key); 464 |
|
490/* 491 * Load the serialization table. This isn't very pretty, but is probably 492 * the easiest way to do it. 493 */ 494#include "ctl_ser_table.c" 495 496/* 497 * We only need to define open, close and ioctl routines for this driver. --- 16 unchanged lines hidden (view full) --- 514 "ctl", 515 ctl_module_event_handler, 516 NULL 517}; 518 519DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD); 520MODULE_VERSION(ctl, 1); 521 | 465/* 466 * Load the serialization table. This isn't very pretty, but is probably 467 * the easiest way to do it. 468 */ 469#include "ctl_ser_table.c" 470 471/* 472 * We only need to define open, close and ioctl routines for this driver. --- 16 unchanged lines hidden (view full) --- 489 "ctl", 490 ctl_module_event_handler, 491 NULL 492}; 493 494DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD); 495MODULE_VERSION(ctl, 1); 496 |
522#ifdef notyet | 497static struct ctl_frontend ha_frontend = 498{ 499 .name = "ha", 500}; 501 |
523static void 524ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc, 525 union ctl_ha_msg *msg_info) 526{ 527 struct ctl_scsiio *ctsio; 528 529 if (msg_info->hdr.original_sc == NULL) { 530 printf("%s: original_sc == NULL!\n", __func__); --- 5 unchanged lines hidden (view full) --- 536 ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 537 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 538 ctsio->io_hdr.status = msg_info->hdr.status; 539 ctsio->scsi_status = msg_info->scsi.scsi_status; 540 ctsio->sense_len = msg_info->scsi.sense_len; 541 ctsio->sense_residual = msg_info->scsi.sense_residual; 542 ctsio->residual = msg_info->scsi.residual; 543 memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data, | 502static void 503ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc, 504 union ctl_ha_msg *msg_info) 505{ 506 struct ctl_scsiio *ctsio; 507 508 if (msg_info->hdr.original_sc == NULL) { 509 printf("%s: original_sc == NULL!\n", __func__); --- 5 unchanged lines hidden (view full) --- 515 ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 516 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 517 ctsio->io_hdr.status = msg_info->hdr.status; 518 ctsio->scsi_status = msg_info->scsi.scsi_status; 519 ctsio->sense_len = msg_info->scsi.sense_len; 520 ctsio->sense_residual = msg_info->scsi.sense_residual; 521 ctsio->residual = msg_info->scsi.residual; 522 memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data, |
544 sizeof(ctsio->sense_data)); | 523 msg_info->scsi.sense_len); |
545 memcpy(&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, 546 &msg_info->scsi.lbalen, sizeof(msg_info->scsi.lbalen)); 547 ctl_enqueue_isc((union ctl_io *)ctsio); 548} 549 550static void 551ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc, 552 union ctl_ha_msg *msg_info) 553{ 554 struct ctl_scsiio *ctsio; 555 556 if (msg_info->hdr.serializing_sc == NULL) { 557 printf("%s: serializing_sc == NULL!\n", __func__); 558 /* XXX KDM now what? */ 559 return; 560 } 561 562 ctsio = &msg_info->hdr.serializing_sc->scsiio; | 524 memcpy(&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, 525 &msg_info->scsi.lbalen, sizeof(msg_info->scsi.lbalen)); 526 ctl_enqueue_isc((union ctl_io *)ctsio); 527} 528 529static void 530ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc, 531 union ctl_ha_msg *msg_info) 532{ 533 struct ctl_scsiio *ctsio; 534 535 if (msg_info->hdr.serializing_sc == NULL) { 536 printf("%s: serializing_sc == NULL!\n", __func__); 537 /* XXX KDM now what? */ 538 return; 539 } 540 541 ctsio = &msg_info->hdr.serializing_sc->scsiio; |
563#if 0 564 /* 565 * Attempt to catch the situation where an I/O has 566 * been freed, and we're using it again. 567 */ 568 if (ctsio->io_hdr.io_type == 0xff) { 569 union ctl_io *tmp_io; 570 tmp_io = (union ctl_io *)ctsio; 571 printf("%s: %p use after free!\n", __func__, 572 ctsio); 573 printf("%s: type %d msg %d cdb %x iptl: " 574 "%u:%u:%u tag 0x%04x " 575 "flag %#x status %x\n", 576 __func__, 577 tmp_io->io_hdr.io_type, 578 tmp_io->io_hdr.msg_type, 579 tmp_io->scsiio.cdb[0], 580 tmp_io->io_hdr.nexus.initid, 581 tmp_io->io_hdr.nexus.targ_port, 582 tmp_io->io_hdr.nexus.targ_lun, 583 (tmp_io->io_hdr.io_type == 584 CTL_IO_TASK) ? 585 tmp_io->taskio.tag_num : 586 tmp_io->scsiio.tag_num, 587 tmp_io->io_hdr.flags, 588 tmp_io->io_hdr.status); 589 } 590#endif | |
591 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 592 ctl_enqueue_isc((union ctl_io *)ctsio); 593} 594 | 542 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 543 ctl_enqueue_isc((union ctl_io *)ctsio); 544} 545 |
546void 547ctl_isc_announce_lun(struct ctl_lun *lun) 548{ 549 struct ctl_softc *softc = lun->ctl_softc; 550 union ctl_ha_msg *msg; 551 struct ctl_ha_msg_lun_pr_key pr_key; 552 int i, k; 553 554 if (softc->ha_link != CTL_HA_LINK_ONLINE) 555 return; 556 mtx_lock(&lun->lun_lock); 557 i = sizeof(msg->lun); 558 if (lun->lun_devid) 559 i += lun->lun_devid->len; 560 i += sizeof(pr_key) * lun->pr_key_count; 561alloc: 562 mtx_unlock(&lun->lun_lock); 563 msg = malloc(i, M_CTL, M_WAITOK); 564 mtx_lock(&lun->lun_lock); 565 k = sizeof(msg->lun); 566 if (lun->lun_devid) 567 k += lun->lun_devid->len; 568 k += sizeof(pr_key) * lun->pr_key_count; 569 if (i < k) { 570 free(msg, M_CTL); 571 i = k; 572 goto alloc; 573 } 574 bzero(&msg->lun, sizeof(msg->lun)); 575 msg->hdr.msg_type = CTL_MSG_LUN_SYNC; 576 msg->hdr.nexus.targ_lun = lun->lun; 577 msg->hdr.nexus.targ_mapped_lun = lun->lun; 578 msg->lun.flags = lun->flags; 579 msg->lun.pr_generation = lun->PRGeneration; 580 msg->lun.pr_res_idx = lun->pr_res_idx; 581 msg->lun.pr_res_type = lun->res_type; 582 msg->lun.pr_key_count = lun->pr_key_count; 583 i = 0; 584 if (lun->lun_devid) { 585 msg->lun.lun_devid_len = lun->lun_devid->len; 586 memcpy(&msg->lun.data[i], lun->lun_devid->data, 587 msg->lun.lun_devid_len); 588 i += msg->lun.lun_devid_len; 589 } 590 for (k = 0; k < CTL_MAX_INITIATORS; k++) { 591 if ((pr_key.pr_key = ctl_get_prkey(lun, k)) == 0) 592 continue; 593 pr_key.pr_iid = k; 594 memcpy(&msg->lun.data[i], &pr_key, sizeof(pr_key)); 595 i += sizeof(pr_key); 596 } 597 mtx_unlock(&lun->lun_lock); 598 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i, 599 M_WAITOK); 600 free(msg, M_CTL); 601} 602 603void 604ctl_isc_announce_port(struct ctl_port *port) 605{ 606 struct ctl_softc *softc = control_softc; 607 union ctl_ha_msg *msg; 608 int i; 609 610 if (port->targ_port < softc->port_min || 611 port->targ_port >= softc->port_max || 612 softc->ha_link != CTL_HA_LINK_ONLINE) 613 return; 614 i = sizeof(msg->port) + strlen(port->port_name) + 1; 615 if (port->lun_map) 616 i += sizeof(uint32_t) * CTL_MAX_LUNS; 617 if (port->port_devid) 618 i += port->port_devid->len; 619 if (port->target_devid) 620 i += port->target_devid->len; 621 msg = malloc(i, M_CTL, M_WAITOK); 622 bzero(&msg->port, sizeof(msg->port)); 623 msg->hdr.msg_type = CTL_MSG_PORT_SYNC; 624 msg->hdr.nexus.targ_port = port->targ_port; 625 msg->port.port_type = port->port_type; 626 msg->port.physical_port = port->physical_port; 627 msg->port.virtual_port = port->virtual_port; 628 msg->port.status = port->status; 629 i = 0; 630 msg->port.name_len = sprintf(&msg->port.data[i], 631 "%d:%s", softc->ha_id, port->port_name) + 1; 632 i += msg->port.name_len; 633 if (port->lun_map) { 634 msg->port.lun_map_len = sizeof(uint32_t) * CTL_MAX_LUNS; 635 memcpy(&msg->port.data[i], port->lun_map, 636 msg->port.lun_map_len); 637 i += msg->port.lun_map_len; 638 } 639 if (port->port_devid) { 640 msg->port.port_devid_len = port->port_devid->len; 641 memcpy(&msg->port.data[i], port->port_devid->data, 642 msg->port.port_devid_len); 643 i += msg->port.port_devid_len; 644 } 645 if (port->target_devid) { 646 msg->port.target_devid_len = port->target_devid->len; 647 memcpy(&msg->port.data[i], port->target_devid->data, 648 msg->port.target_devid_len); 649 i += msg->port.target_devid_len; 650 } 651 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i, 652 M_WAITOK); 653 free(msg, M_CTL); 654} 655 656static void 657ctl_isc_ha_link_up(struct ctl_softc *softc) 658{ 659 struct ctl_port *port; 660 struct ctl_lun *lun; 661 662 STAILQ_FOREACH(port, &softc->port_list, links) 663 ctl_isc_announce_port(port); 664 STAILQ_FOREACH(lun, &softc->lun_list, links) 665 ctl_isc_announce_lun(lun); 666} 667 668static void 669ctl_isc_ha_link_down(struct ctl_softc *softc) 670{ 671 struct ctl_port *port; 672 struct ctl_lun *lun; 673 union ctl_io *io; 674 675 mtx_lock(&softc->ctl_lock); 676 STAILQ_FOREACH(lun, &softc->lun_list, links) { 677 mtx_lock(&lun->lun_lock); 678 lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; 679 mtx_unlock(&lun->lun_lock); 680 681 mtx_unlock(&softc->ctl_lock); 682 io = ctl_alloc_io(softc->othersc_pool); 683 mtx_lock(&softc->ctl_lock); 684 ctl_zero_io(io); 685 io->io_hdr.msg_type = CTL_MSG_FAILOVER; 686 io->io_hdr.nexus.targ_mapped_lun = lun->lun; 687 ctl_enqueue_isc(io); 688 } 689 690 STAILQ_FOREACH(port, &softc->port_list, links) { 691 if (port->targ_port >= softc->port_min && 692 port->targ_port < softc->port_max) 693 continue; 694 port->status &= ~CTL_PORT_STATUS_ONLINE; 695 } 696 mtx_unlock(&softc->ctl_lock); 697} 698 699static void 700ctl_isc_ua(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 701{ 702 struct ctl_lun *lun; 703 uint32_t iid = ctl_get_initindex(&msg->hdr.nexus); 704 705 if (msg->hdr.nexus.targ_lun < CTL_MAX_LUNS && 706 (lun = softc->ctl_luns[msg->hdr.nexus.targ_lun]) != NULL) { 707 if (msg->ua.ua_all) { 708 if (msg->ua.ua_set) 709 ctl_est_ua_all(lun, iid, msg->ua.ua_type); 710 else 711 ctl_clr_ua_all(lun, iid, msg->ua.ua_type); 712 } else { 713 if (msg->ua.ua_set) 714 ctl_est_ua(lun, iid, msg->ua.ua_type); 715 else 716 ctl_clr_ua(lun, iid, msg->ua.ua_type); 717 } 718 } 719} 720 721static void 722ctl_isc_lun_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 723{ 724 struct ctl_lun *lun; 725 struct ctl_ha_msg_lun_pr_key pr_key; 726 int i, k; 727 728 lun = softc->ctl_luns[msg->hdr.nexus.targ_lun]; 729 if (lun == NULL) { 730 CTL_DEBUG_PRINT(("%s: Unknown LUN %d\n", __func__, 731 msg->hdr.nexus.targ_lun)); 732 } else { 733 mtx_lock(&lun->lun_lock); 734 i = (lun->lun_devid != NULL) ? lun->lun_devid->len : 0; 735 if (msg->lun.lun_devid_len != i || (i > 0 && 736 memcmp(&msg->lun.data[0], lun->lun_devid->data, i) != 0)) { 737 mtx_unlock(&lun->lun_lock); 738 printf("%s: Received conflicting HA LUN %d\n", 739 __func__, msg->hdr.nexus.targ_lun); 740 return; 741 } else { 742 /* Record whether peer is primary. */ 743 if ((msg->lun.flags & CTL_LUN_PRIMARY_SC) && 744 (msg->lun.flags & CTL_LUN_DISABLED) == 0) 745 lun->flags |= CTL_LUN_PEER_SC_PRIMARY; 746 else 747 lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; 748 749 /* If peer is primary and we are not -- use data */ 750 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 751 (lun->flags & CTL_LUN_PEER_SC_PRIMARY)) { 752 lun->PRGeneration = msg->lun.pr_generation; 753 lun->pr_res_idx = msg->lun.pr_res_idx; 754 lun->res_type = msg->lun.pr_res_type; 755 lun->pr_key_count = msg->lun.pr_key_count; 756 for (k = 0; k < CTL_MAX_INITIATORS; k++) 757 ctl_clr_prkey(lun, k); 758 for (k = 0; k < msg->lun.pr_key_count; k++) { 759 memcpy(&pr_key, &msg->lun.data[i], 760 sizeof(pr_key)); 761 ctl_alloc_prkey(lun, pr_key.pr_iid); 762 ctl_set_prkey(lun, pr_key.pr_iid, 763 pr_key.pr_key); 764 i += sizeof(pr_key); 765 } 766 } 767 768 mtx_unlock(&lun->lun_lock); 769 CTL_DEBUG_PRINT(("%s: Known LUN %d, peer is %s\n", 770 __func__, msg->hdr.nexus.targ_lun, 771 (msg->lun.flags & CTL_LUN_PRIMARY_SC) ? 772 "primary" : "secondary")); 773 774 /* If we are primary but peer doesn't know -- notify */ 775 if ((lun->flags & CTL_LUN_PRIMARY_SC) && 776 (msg->lun.flags & CTL_LUN_PEER_SC_PRIMARY) == 0) 777 ctl_isc_announce_lun(lun); 778 } 779 } 780} 781 782static void 783ctl_isc_port_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 784{ 785 struct ctl_port *port; 786 int i, new; 787 788 port = softc->ctl_ports[msg->hdr.nexus.targ_port]; 789 if (port == NULL) { 790 CTL_DEBUG_PRINT(("%s: New port %d\n", __func__, 791 msg->hdr.nexus.targ_port)); 792 new = 1; 793 port = malloc(sizeof(*port), M_CTL, M_WAITOK | M_ZERO); 794 port->frontend = &ha_frontend; 795 port->targ_port = msg->hdr.nexus.targ_port; 796 } else if (port->frontend == &ha_frontend) { 797 CTL_DEBUG_PRINT(("%s: Updated port %d\n", __func__, 798 msg->hdr.nexus.targ_port)); 799 new = 0; 800 } else { 801 printf("%s: Received conflicting HA port %d\n", 802 __func__, msg->hdr.nexus.targ_port); 803 return; 804 } 805 port->port_type = msg->port.port_type; 806 port->physical_port = msg->port.physical_port; 807 port->virtual_port = msg->port.virtual_port; 808 port->status = msg->port.status; 809 i = 0; 810 free(port->port_name, M_CTL); 811 port->port_name = strndup(&msg->port.data[i], msg->port.name_len, 812 M_CTL); 813 i += msg->port.name_len; 814 if (msg->port.lun_map_len != 0) { 815 if (port->lun_map == NULL) 816 port->lun_map = malloc(sizeof(uint32_t) * CTL_MAX_LUNS, 817 M_CTL, M_WAITOK); 818 memcpy(port->lun_map, &msg->port.data[i], 819 sizeof(uint32_t) * CTL_MAX_LUNS); 820 i += msg->port.lun_map_len; 821 } else { 822 free(port->lun_map, M_CTL); 823 port->lun_map = NULL; 824 } 825 if (msg->port.port_devid_len != 0) { 826 if (port->port_devid == NULL || 827 port->port_devid->len != msg->port.port_devid_len) { 828 free(port->port_devid, M_CTL); 829 port->port_devid = malloc(sizeof(struct ctl_devid) + 830 msg->port.port_devid_len, M_CTL, M_WAITOK); 831 } 832 memcpy(port->port_devid->data, &msg->port.data[i], 833 msg->port.port_devid_len); 834 port->port_devid->len = msg->port.port_devid_len; 835 i += msg->port.port_devid_len; 836 } else { 837 free(port->port_devid, M_CTL); 838 port->port_devid = NULL; 839 } 840 if (msg->port.target_devid_len != 0) { 841 if (port->target_devid == NULL || 842 port->target_devid->len != msg->port.target_devid_len) { 843 free(port->target_devid, M_CTL); 844 port->target_devid = malloc(sizeof(struct ctl_devid) + 845 msg->port.target_devid_len, M_CTL, M_WAITOK); 846 } 847 memcpy(port->target_devid->data, &msg->port.data[i], 848 msg->port.target_devid_len); 849 port->target_devid->len = msg->port.target_devid_len; 850 i += msg->port.target_devid_len; 851 } else { 852 free(port->port_devid, M_CTL); 853 port->port_devid = NULL; 854 } 855 if (new) { 856 if (ctl_port_register(port) != 0) { 857 printf("%s: ctl_port_register() failed with error\n", 858 __func__); 859 } 860 } 861} 862 |
|
595/* 596 * ISC (Inter Shelf Communication) event handler. Events from the HA 597 * subsystem come in here. 598 */ 599static void 600ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param) 601{ 602 struct ctl_softc *softc; 603 union ctl_io *io; 604 struct ctl_prio *presio; 605 ctl_ha_status isc_status; 606 607 softc = control_softc; | 863/* 864 * ISC (Inter Shelf Communication) event handler. Events from the HA 865 * subsystem come in here. 866 */ 867static void 868ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param) 869{ 870 struct ctl_softc *softc; 871 union ctl_io *io; 872 struct ctl_prio *presio; 873 ctl_ha_status isc_status; 874 875 softc = control_softc; |
608 io = NULL; 609 610 611#if 0 612 printf("CTL: Isc Msg event %d\n", event); 613#endif | 876 CTL_DEBUG_PRINT(("CTL: Isc Msg event %d\n", event)); |
614 if (event == CTL_HA_EVT_MSG_RECV) { | 877 if (event == CTL_HA_EVT_MSG_RECV) { |
615 union ctl_ha_msg msg_info; | 878 union ctl_ha_msg *msg, msgbuf; |
616 | 879 |
617 isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, &msg_info, 618 sizeof(msg_info), /*wait*/ 0); 619#if 0 620 printf("CTL: msg_type %d\n", msg_info.msg_type); 621#endif 622 if (isc_status != 0) { 623 printf("Error receiving message, status = %d\n", 624 isc_status); | 880 if (param > sizeof(msgbuf)) 881 msg = malloc(param, M_CTL, M_WAITOK); 882 else 883 msg = &msgbuf; 884 isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, msg, param, 885 M_WAITOK); 886 if (isc_status != CTL_HA_STATUS_SUCCESS) { 887 printf("%s: Error receiving message: %d\n", 888 __func__, isc_status); 889 if (msg != &msgbuf) 890 free(msg, M_CTL); |
625 return; 626 } 627 | 891 return; 892 } 893 |
628 switch (msg_info.hdr.msg_type) { | 894 CTL_DEBUG_PRINT(("CTL: msg_type %d\n", msg->msg_type)); 895 switch (msg->hdr.msg_type) { |
629 case CTL_MSG_SERIALIZE: | 896 case CTL_MSG_SERIALIZE: |
630#if 0 631 printf("Serialize\n"); 632#endif 633 io = ctl_alloc_io_nowait(softc->othersc_pool); 634 if (io == NULL) { 635 printf("ctl_isc_event_handler: can't allocate " 636 "ctl_io!\n"); 637 /* Bad Juju */ 638 /* Need to set busy and send msg back */ 639 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 640 msg_info.hdr.status = CTL_SCSI_ERROR; 641 msg_info.scsi.scsi_status = SCSI_STATUS_BUSY; 642 msg_info.scsi.sense_len = 0; 643 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 644 sizeof(msg_info), 0) > CTL_HA_STATUS_SUCCESS){ 645 } 646 goto bailout; 647 } | 897 io = ctl_alloc_io(softc->othersc_pool); |
648 ctl_zero_io(io); | 898 ctl_zero_io(io); |
649 // populate ctsio from msg_info | 899 // populate ctsio from msg |
650 io->io_hdr.io_type = CTL_IO_SCSI; 651 io->io_hdr.msg_type = CTL_MSG_SERIALIZE; | 900 io->io_hdr.io_type = CTL_IO_SCSI; 901 io->io_hdr.msg_type = CTL_MSG_SERIALIZE; |
652 io->io_hdr.original_sc = msg_info.hdr.original_sc; 653#if 0 654 printf("pOrig %x\n", (int)msg_info.original_sc); 655#endif | 902 io->io_hdr.original_sc = msg->hdr.original_sc; |
656 io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC | 657 CTL_FLAG_IO_ACTIVE; 658 /* 659 * If we're in serialization-only mode, we don't 660 * want to go through full done processing. Thus 661 * the COPY flag. 662 * 663 * XXX KDM add another flag that is more specific. 664 */ | 903 io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC | 904 CTL_FLAG_IO_ACTIVE; 905 /* 906 * If we're in serialization-only mode, we don't 907 * want to go through full done processing. Thus 908 * the COPY flag. 909 * 910 * XXX KDM add another flag that is more specific. 911 */ |
665 if (softc->ha_mode == CTL_HA_MODE_SER_ONLY) | 912 if (softc->ha_mode != CTL_HA_MODE_XFER) |
666 io->io_hdr.flags |= CTL_FLAG_INT_COPY; | 913 io->io_hdr.flags |= CTL_FLAG_INT_COPY; |
667 io->io_hdr.nexus = msg_info.hdr.nexus; | 914 io->io_hdr.nexus = msg->hdr.nexus; |
668#if 0 669 printf("port %u, iid %u, lun %u\n", 670 io->io_hdr.nexus.targ_port, 671 io->io_hdr.nexus.initid, 672 io->io_hdr.nexus.targ_lun); 673#endif | 915#if 0 916 printf("port %u, iid %u, lun %u\n", 917 io->io_hdr.nexus.targ_port, 918 io->io_hdr.nexus.initid, 919 io->io_hdr.nexus.targ_lun); 920#endif |
674 io->scsiio.tag_num = msg_info.scsi.tag_num; 675 io->scsiio.tag_type = msg_info.scsi.tag_type; 676 memcpy(io->scsiio.cdb, msg_info.scsi.cdb, | 921 io->scsiio.tag_num = msg->scsi.tag_num; 922 io->scsiio.tag_type = msg->scsi.tag_type; 923#ifdef CTL_TIME_IO 924 io->io_hdr.start_time = time_uptime; 925 getbintime(&io->io_hdr.start_bt); 926#endif /* CTL_TIME_IO */ 927 io->scsiio.cdb_len = msg->scsi.cdb_len; 928 memcpy(io->scsiio.cdb, msg->scsi.cdb, |
677 CTL_MAX_CDBLEN); 678 if (softc->ha_mode == CTL_HA_MODE_XFER) { 679 const struct ctl_cmd_entry *entry; 680 681 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 682 io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 683 io->io_hdr.flags |= 684 entry->flags & CTL_FLAG_DATA_MASK; 685 } 686 ctl_enqueue_isc(io); 687 break; 688 689 /* Performed on the Originating SC, XFER mode only */ 690 case CTL_MSG_DATAMOVE: { 691 struct ctl_sg_entry *sgl; 692 int i, j; 693 | 929 CTL_MAX_CDBLEN); 930 if (softc->ha_mode == CTL_HA_MODE_XFER) { 931 const struct ctl_cmd_entry *entry; 932 933 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 934 io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 935 io->io_hdr.flags |= 936 entry->flags & CTL_FLAG_DATA_MASK; 937 } 938 ctl_enqueue_isc(io); 939 break; 940 941 /* Performed on the Originating SC, XFER mode only */ 942 case CTL_MSG_DATAMOVE: { 943 struct ctl_sg_entry *sgl; 944 int i, j; 945 |
694 io = msg_info.hdr.original_sc; | 946 io = msg->hdr.original_sc; |
695 if (io == NULL) { 696 printf("%s: original_sc == NULL!\n", __func__); 697 /* XXX KDM do something here */ 698 break; 699 } 700 io->io_hdr.msg_type = CTL_MSG_DATAMOVE; 701 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 702 /* 703 * Keep track of this, we need to send it back over 704 * when the datamove is complete. 705 */ | 947 if (io == NULL) { 948 printf("%s: original_sc == NULL!\n", __func__); 949 /* XXX KDM do something here */ 950 break; 951 } 952 io->io_hdr.msg_type = CTL_MSG_DATAMOVE; 953 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 954 /* 955 * Keep track of this, we need to send it back over 956 * when the datamove is complete. 957 */ |
706 io->io_hdr.serializing_sc = msg_info.hdr.serializing_sc; | 958 io->io_hdr.serializing_sc = msg->hdr.serializing_sc; |
707 | 959 |
708 if (msg_info.dt.sg_sequence == 0) { 709 /* 710 * XXX KDM we use the preallocated S/G list 711 * here, but we'll need to change this to 712 * dynamic allocation if we need larger S/G 713 * lists. 714 */ 715 if (msg_info.dt.kern_sg_entries > 716 sizeof(io->io_hdr.remote_sglist) / 717 sizeof(io->io_hdr.remote_sglist[0])) { 718 printf("%s: number of S/G entries " 719 "needed %u > allocated num %zd\n", 720 __func__, 721 msg_info.dt.kern_sg_entries, 722 sizeof(io->io_hdr.remote_sglist)/ 723 sizeof(io->io_hdr.remote_sglist[0])); 724 725 /* 726 * XXX KDM send a message back to 727 * the other side to shut down the 728 * DMA. The error will come back 729 * through via the normal channel. 730 */ 731 break; 732 } 733 sgl = io->io_hdr.remote_sglist; 734 memset(sgl, 0, 735 sizeof(io->io_hdr.remote_sglist)); | 960 if (msg->dt.sg_sequence == 0) { 961 i = msg->dt.kern_sg_entries + 962 io->scsiio.kern_data_len / 963 CTL_HA_DATAMOVE_SEGMENT + 1; 964 sgl = malloc(sizeof(*sgl) * i, M_CTL, 965 M_WAITOK | M_ZERO); 966 io->io_hdr.remote_sglist = sgl; 967 io->io_hdr.local_sglist = 968 &sgl[msg->dt.kern_sg_entries]; |
736 737 io->scsiio.kern_data_ptr = (uint8_t *)sgl; 738 739 io->scsiio.kern_sg_entries = | 969 970 io->scsiio.kern_data_ptr = (uint8_t *)sgl; 971 972 io->scsiio.kern_sg_entries = |
740 msg_info.dt.kern_sg_entries; | 973 msg->dt.kern_sg_entries; |
741 io->scsiio.rem_sg_entries = | 974 io->scsiio.rem_sg_entries = |
742 msg_info.dt.kern_sg_entries; | 975 msg->dt.kern_sg_entries; |
743 io->scsiio.kern_data_len = | 976 io->scsiio.kern_data_len = |
744 msg_info.dt.kern_data_len; | 977 msg->dt.kern_data_len; |
745 io->scsiio.kern_total_len = | 978 io->scsiio.kern_total_len = |
746 msg_info.dt.kern_total_len; | 979 msg->dt.kern_total_len; |
747 io->scsiio.kern_data_resid = | 980 io->scsiio.kern_data_resid = |
748 msg_info.dt.kern_data_resid; | 981 msg->dt.kern_data_resid; |
749 io->scsiio.kern_rel_offset = | 982 io->scsiio.kern_rel_offset = |
750 msg_info.dt.kern_rel_offset; 751 /* 752 * Clear out per-DMA flags. 753 */ 754 io->io_hdr.flags &= ~CTL_FLAG_RDMA_MASK; 755 /* 756 * Add per-DMA flags that are set for this 757 * particular DMA request. 758 */ 759 io->io_hdr.flags |= msg_info.dt.flags & 760 CTL_FLAG_RDMA_MASK; | 983 msg->dt.kern_rel_offset; 984 io->io_hdr.flags &= ~CTL_FLAG_BUS_ADDR; 985 io->io_hdr.flags |= msg->dt.flags & 986 CTL_FLAG_BUS_ADDR; |
761 } else 762 sgl = (struct ctl_sg_entry *) 763 io->scsiio.kern_data_ptr; 764 | 987 } else 988 sgl = (struct ctl_sg_entry *) 989 io->scsiio.kern_data_ptr; 990 |
765 for (i = msg_info.dt.sent_sg_entries, j = 0; 766 i < (msg_info.dt.sent_sg_entries + 767 msg_info.dt.cur_sg_entries); i++, j++) { 768 sgl[i].addr = msg_info.dt.sg_list[j].addr; 769 sgl[i].len = msg_info.dt.sg_list[j].len; | 991 for (i = msg->dt.sent_sg_entries, j = 0; 992 i < (msg->dt.sent_sg_entries + 993 msg->dt.cur_sg_entries); i++, j++) { 994 sgl[i].addr = msg->dt.sg_list[j].addr; 995 sgl[i].len = msg->dt.sg_list[j].len; |
770 771#if 0 772 printf("%s: L: %p,%d -> %p,%d j=%d, i=%d\n", 773 __func__, | 996 997#if 0 998 printf("%s: L: %p,%d -> %p,%d j=%d, i=%d\n", 999 __func__, |
774 msg_info.dt.sg_list[j].addr, 775 msg_info.dt.sg_list[j].len, | 1000 msg->dt.sg_list[j].addr, 1001 msg->dt.sg_list[j].len, |
776 sgl[i].addr, sgl[i].len, j, i); 777#endif 778 } | 1002 sgl[i].addr, sgl[i].len, j, i); 1003#endif 1004 } |
779#if 0 780 memcpy(&sgl[msg_info.dt.sent_sg_entries], 781 msg_info.dt.sg_list, 782 sizeof(*sgl) * msg_info.dt.cur_sg_entries); 783#endif | |
784 785 /* 786 * If this is the last piece of the I/O, we've got 787 * the full S/G list. Queue processing in the thread. 788 * Otherwise wait for the next piece. 789 */ | 1005 1006 /* 1007 * If this is the last piece of the I/O, we've got 1008 * the full S/G list. Queue processing in the thread. 1009 * Otherwise wait for the next piece. 1010 */ |
790 if (msg_info.dt.sg_last != 0) | 1011 if (msg->dt.sg_last != 0) |
791 ctl_enqueue_isc(io); 792 break; 793 } 794 /* Performed on the Serializing (primary) SC, XFER mode only */ 795 case CTL_MSG_DATAMOVE_DONE: { | 1012 ctl_enqueue_isc(io); 1013 break; 1014 } 1015 /* Performed on the Serializing (primary) SC, XFER mode only */ 1016 case CTL_MSG_DATAMOVE_DONE: { |
796 if (msg_info.hdr.serializing_sc == NULL) { | 1017 if (msg->hdr.serializing_sc == NULL) { |
797 printf("%s: serializing_sc == NULL!\n", 798 __func__); 799 /* XXX KDM now what? */ 800 break; 801 } 802 /* 803 * We grab the sense information here in case 804 * there was a failure, so we can return status 805 * back to the initiator. 806 */ | 1018 printf("%s: serializing_sc == NULL!\n", 1019 __func__); 1020 /* XXX KDM now what? */ 1021 break; 1022 } 1023 /* 1024 * We grab the sense information here in case 1025 * there was a failure, so we can return status 1026 * back to the initiator. 1027 */ |
807 io = msg_info.hdr.serializing_sc; | 1028 io = msg->hdr.serializing_sc; |
808 io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE; | 1029 io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE; |
809 io->io_hdr.status = msg_info.hdr.status; 810 io->scsiio.scsi_status = msg_info.scsi.scsi_status; 811 io->scsiio.sense_len = msg_info.scsi.sense_len; 812 io->scsiio.sense_residual =msg_info.scsi.sense_residual; 813 io->io_hdr.port_status = msg_info.scsi.fetd_status; 814 io->scsiio.residual = msg_info.scsi.residual; 815 memcpy(&io->scsiio.sense_data,&msg_info.scsi.sense_data, 816 sizeof(io->scsiio.sense_data)); | 1030 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1031 io->io_hdr.port_status = msg->scsi.fetd_status; 1032 io->scsiio.residual = msg->scsi.residual; 1033 if (msg->hdr.status != CTL_STATUS_NONE) { 1034 io->io_hdr.status = msg->hdr.status; 1035 io->scsiio.scsi_status = msg->scsi.scsi_status; 1036 io->scsiio.sense_len = msg->scsi.sense_len; 1037 io->scsiio.sense_residual =msg->scsi.sense_residual; 1038 memcpy(&io->scsiio.sense_data, 1039 &msg->scsi.sense_data, 1040 msg->scsi.sense_len); 1041 } |
817 ctl_enqueue_isc(io); 818 break; 819 } 820 821 /* Preformed on Originating SC, SER_ONLY mode */ 822 case CTL_MSG_R2R: | 1042 ctl_enqueue_isc(io); 1043 break; 1044 } 1045 1046 /* Preformed on Originating SC, SER_ONLY mode */ 1047 case CTL_MSG_R2R: |
823 io = msg_info.hdr.original_sc; | 1048 io = msg->hdr.original_sc; |
824 if (io == NULL) { | 1049 if (io == NULL) { |
825 printf("%s: Major Bummer\n", __func__); 826 return; 827 } else { 828#if 0 829 printf("pOrig %x\n",(int) ctsio); 830#endif | 1050 printf("%s: original_sc == NULL!\n", 1051 __func__); 1052 break; |
831 } | 1053 } |
1054 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; |
|
832 io->io_hdr.msg_type = CTL_MSG_R2R; | 1055 io->io_hdr.msg_type = CTL_MSG_R2R; |
833 io->io_hdr.serializing_sc = msg_info.hdr.serializing_sc; | 1056 io->io_hdr.serializing_sc = msg->hdr.serializing_sc; |
834 ctl_enqueue_isc(io); 835 break; 836 837 /* 838 * Performed on Serializing(i.e. primary SC) SC in SER_ONLY 839 * mode. 840 * Performed on the Originating (i.e. secondary) SC in XFER 841 * mode 842 */ 843 case CTL_MSG_FINISH_IO: 844 if (softc->ha_mode == CTL_HA_MODE_XFER) | 1057 ctl_enqueue_isc(io); 1058 break; 1059 1060 /* 1061 * Performed on Serializing(i.e. primary SC) SC in SER_ONLY 1062 * mode. 1063 * Performed on the Originating (i.e. secondary) SC in XFER 1064 * mode 1065 */ 1066 case CTL_MSG_FINISH_IO: 1067 if (softc->ha_mode == CTL_HA_MODE_XFER) |
845 ctl_isc_handler_finish_xfer(softc, 846 &msg_info); | 1068 ctl_isc_handler_finish_xfer(softc, msg); |
847 else | 1069 else |
848 ctl_isc_handler_finish_ser_only(softc, 849 &msg_info); | 1070 ctl_isc_handler_finish_ser_only(softc, msg); |
850 break; 851 852 /* Preformed on Originating SC */ 853 case CTL_MSG_BAD_JUJU: | 1071 break; 1072 1073 /* Preformed on Originating SC */ 1074 case CTL_MSG_BAD_JUJU: |
854 io = msg_info.hdr.original_sc; | 1075 io = msg->hdr.original_sc; |
855 if (io == NULL) { 856 printf("%s: Bad JUJU!, original_sc is NULL!\n", 857 __func__); 858 break; 859 } | 1076 if (io == NULL) { 1077 printf("%s: Bad JUJU!, original_sc is NULL!\n", 1078 __func__); 1079 break; 1080 } |
860 ctl_copy_sense_data(&msg_info, io); | 1081 ctl_copy_sense_data(msg, io); |
861 /* 862 * IO should have already been cleaned up on other 863 * SC so clear this flag so we won't send a message 864 * back to finish the IO there. 865 */ 866 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 867 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 868 | 1082 /* 1083 * IO should have already been cleaned up on other 1084 * SC so clear this flag so we won't send a message 1085 * back to finish the IO there. 1086 */ 1087 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 1088 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1089 |
869 /* io = msg_info.hdr.serializing_sc; */ | 1090 /* io = msg->hdr.serializing_sc; */ |
870 io->io_hdr.msg_type = CTL_MSG_BAD_JUJU; 871 ctl_enqueue_isc(io); 872 break; 873 874 /* Handle resets sent from the other side */ 875 case CTL_MSG_MANAGE_TASKS: { 876 struct ctl_taskio *taskio; | 1091 io->io_hdr.msg_type = CTL_MSG_BAD_JUJU; 1092 ctl_enqueue_isc(io); 1093 break; 1094 1095 /* Handle resets sent from the other side */ 1096 case CTL_MSG_MANAGE_TASKS: { 1097 struct ctl_taskio *taskio; |
877 taskio = (struct ctl_taskio *)ctl_alloc_io_nowait( | 1098 taskio = (struct ctl_taskio *)ctl_alloc_io( |
878 softc->othersc_pool); | 1099 softc->othersc_pool); |
879 if (taskio == NULL) { 880 printf("ctl_isc_event_handler: can't allocate " 881 "ctl_io!\n"); 882 /* Bad Juju */ 883 /* should I just call the proper reset func 884 here??? */ 885 goto bailout; 886 } | |
887 ctl_zero_io((union ctl_io *)taskio); 888 taskio->io_hdr.io_type = CTL_IO_TASK; 889 taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; | 1100 ctl_zero_io((union ctl_io *)taskio); 1101 taskio->io_hdr.io_type = CTL_IO_TASK; 1102 taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; |
890 taskio->io_hdr.nexus = msg_info.hdr.nexus; 891 taskio->task_action = msg_info.task.task_action; 892 taskio->tag_num = msg_info.task.tag_num; 893 taskio->tag_type = msg_info.task.tag_type; | 1103 taskio->io_hdr.nexus = msg->hdr.nexus; 1104 taskio->task_action = msg->task.task_action; 1105 taskio->tag_num = msg->task.tag_num; 1106 taskio->tag_type = msg->task.tag_type; |
894#ifdef CTL_TIME_IO 895 taskio->io_hdr.start_time = time_uptime; 896 getbintime(&taskio->io_hdr.start_bt); | 1107#ifdef CTL_TIME_IO 1108 taskio->io_hdr.start_time = time_uptime; 1109 getbintime(&taskio->io_hdr.start_bt); |
897#if 0 898 cs_prof_gettime(&taskio->io_hdr.start_ticks); 899#endif | |
900#endif /* CTL_TIME_IO */ 901 ctl_run_task((union ctl_io *)taskio); 902 break; 903 } 904 /* Persistent Reserve action which needs attention */ 905 case CTL_MSG_PERS_ACTION: | 1110#endif /* CTL_TIME_IO */ 1111 ctl_run_task((union ctl_io *)taskio); 1112 break; 1113 } 1114 /* Persistent Reserve action which needs attention */ 1115 case CTL_MSG_PERS_ACTION: |
906 presio = (struct ctl_prio *)ctl_alloc_io_nowait( | 1116 presio = (struct ctl_prio *)ctl_alloc_io( |
907 softc->othersc_pool); | 1117 softc->othersc_pool); |
908 if (presio == NULL) { 909 printf("ctl_isc_event_handler: can't allocate " 910 "ctl_io!\n"); 911 /* Bad Juju */ 912 /* Need to set busy and send msg back */ 913 goto bailout; 914 } | |
915 ctl_zero_io((union ctl_io *)presio); 916 presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION; | 1118 ctl_zero_io((union ctl_io *)presio); 1119 presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION; |
917 presio->pr_msg = msg_info.pr; | 1120 presio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; 1121 presio->io_hdr.nexus = msg->hdr.nexus; 1122 presio->pr_msg = msg->pr; |
918 ctl_enqueue_isc((union ctl_io *)presio); 919 break; | 1123 ctl_enqueue_isc((union ctl_io *)presio); 1124 break; |
920 case CTL_MSG_SYNC_FE: 921 rcv_sync_msg = 1; | 1125 case CTL_MSG_UA: 1126 ctl_isc_ua(softc, msg, param); |
922 break; | 1127 break; |
1128 case CTL_MSG_PORT_SYNC: 1129 ctl_isc_port_sync(softc, msg, param); 1130 break; 1131 case CTL_MSG_LUN_SYNC: 1132 ctl_isc_lun_sync(softc, msg, param); 1133 break; |
|
923 default: | 1134 default: |
924 printf("How did I get here?\n"); | 1135 printf("Received HA message of unknown type %d\n", 1136 msg->hdr.msg_type); 1137 break; |
925 } | 1138 } |
926 } else if (event == CTL_HA_EVT_MSG_SENT) { 927 if (param != CTL_HA_STATUS_SUCCESS) { 928 printf("Bad status from ctl_ha_msg_send status %d\n", 929 param); | 1139 if (msg != &msgbuf) 1140 free(msg, M_CTL); 1141 } else if (event == CTL_HA_EVT_LINK_CHANGE) { 1142 printf("CTL: HA link status changed from %d to %d\n", 1143 softc->ha_link, param); 1144 if (param == softc->ha_link) 1145 return; 1146 if (softc->ha_link == CTL_HA_LINK_ONLINE) { 1147 softc->ha_link = param; 1148 ctl_isc_ha_link_down(softc); 1149 } else { 1150 softc->ha_link = param; 1151 if (softc->ha_link == CTL_HA_LINK_ONLINE) 1152 ctl_isc_ha_link_up(softc); |
930 } 931 return; | 1153 } 1154 return; |
932 } else if (event == CTL_HA_EVT_DISCONNECT) { 933 printf("CTL: Got a disconnect from Isc\n"); 934 return; | |
935 } else { 936 printf("ctl_isc_event_handler: Unknown event %d\n", event); 937 return; 938 } | 1155 } else { 1156 printf("ctl_isc_event_handler: Unknown event %d\n", event); 1157 return; 1158 } |
939 940bailout: 941 return; | |
942} 943 944static void 945ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest) 946{ | 1159} 1160 1161static void 1162ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest) 1163{ |
947 struct scsi_sense_data *sense; | |
948 | 1164 |
949 sense = &dest->scsiio.sense_data; 950 bcopy(&src->scsi.sense_data, sense, sizeof(*sense)); | 1165 memcpy(&dest->scsiio.sense_data, &src->scsi.sense_data, 1166 src->scsi.sense_len); |
951 dest->scsiio.scsi_status = src->scsi.scsi_status; 952 dest->scsiio.sense_len = src->scsi.sense_len; 953 dest->io_hdr.status = src->hdr.status; 954} | 1167 dest->scsiio.scsi_status = src->scsi.scsi_status; 1168 dest->scsiio.sense_len = src->scsi.sense_len; 1169 dest->io_hdr.status = src->hdr.status; 1170} |
955#endif | |
956 957static void | 1171 1172static void |
1173ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest) 1174{ 1175 1176 memcpy(&dest->scsi.sense_data, &src->scsiio.sense_data, 1177 src->scsiio.sense_len); 1178 dest->scsi.scsi_status = src->scsiio.scsi_status; 1179 dest->scsi.sense_len = src->scsiio.sense_len; 1180 dest->hdr.status = src->io_hdr.status; 1181} 1182 1183static void |
|
958ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 959{ | 1184ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 1185{ |
1186 struct ctl_softc *softc = lun->ctl_softc; |
|
960 ctl_ua_type *pu; 961 | 1187 ctl_ua_type *pu; 1188 |
1189 if (initidx < softc->init_min || initidx >= softc->init_max) 1190 return; |
|
962 mtx_assert(&lun->lun_lock, MA_OWNED); 963 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 964 if (pu == NULL) 965 return; 966 pu[initidx % CTL_MAX_INIT_PER_PORT] |= ua; 967} 968 969static void 970ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 971{ | 1191 mtx_assert(&lun->lun_lock, MA_OWNED); 1192 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 1193 if (pu == NULL) 1194 return; 1195 pu[initidx % CTL_MAX_INIT_PER_PORT] |= ua; 1196} 1197 1198static void 1199ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 1200{ |
1201 struct ctl_softc *softc = lun->ctl_softc; |
|
972 int i, j; 973 974 mtx_assert(&lun->lun_lock, MA_OWNED); | 1202 int i, j; 1203 1204 mtx_assert(&lun->lun_lock, MA_OWNED); |
975 for (i = 0; i < CTL_MAX_PORTS; i++) { | 1205 for (i = softc->port_min; i < softc->port_max; i++) { |
976 if (lun->pending_ua[i] == NULL) 977 continue; 978 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 979 if (i * CTL_MAX_INIT_PER_PORT + j == except) 980 continue; 981 lun->pending_ua[i][j] |= ua; 982 } 983 } 984} 985 986static void 987ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 988{ | 1206 if (lun->pending_ua[i] == NULL) 1207 continue; 1208 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 1209 if (i * CTL_MAX_INIT_PER_PORT + j == except) 1210 continue; 1211 lun->pending_ua[i][j] |= ua; 1212 } 1213 } 1214} 1215 1216static void 1217ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 1218{ |
1219 struct ctl_softc *softc = lun->ctl_softc; |
|
989 ctl_ua_type *pu; 990 | 1220 ctl_ua_type *pu; 1221 |
1222 if (initidx < softc->init_min || initidx >= softc->init_max) 1223 return; |
|
991 mtx_assert(&lun->lun_lock, MA_OWNED); 992 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 993 if (pu == NULL) 994 return; 995 pu[initidx % CTL_MAX_INIT_PER_PORT] &= ~ua; 996} 997 998static void 999ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 1000{ | 1224 mtx_assert(&lun->lun_lock, MA_OWNED); 1225 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 1226 if (pu == NULL) 1227 return; 1228 pu[initidx % CTL_MAX_INIT_PER_PORT] &= ~ua; 1229} 1230 1231static void 1232ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 1233{ |
1234 struct ctl_softc *softc = lun->ctl_softc; |
|
1001 int i, j; 1002 1003 mtx_assert(&lun->lun_lock, MA_OWNED); | 1235 int i, j; 1236 1237 mtx_assert(&lun->lun_lock, MA_OWNED); |
1004 for (i = 0; i < CTL_MAX_PORTS; i++) { | 1238 for (i = softc->port_min; i < softc->port_max; i++) { |
1005 if (lun->pending_ua[i] == NULL) 1006 continue; 1007 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 1008 if (i * CTL_MAX_INIT_PER_PORT + j == except) 1009 continue; 1010 lun->pending_ua[i][j] &= ~ua; 1011 } 1012 } --- 9 unchanged lines hidden (view full) --- 1022 STAILQ_FOREACH(lun, &ctl_softc->lun_list, links) { 1023 mtx_lock(&lun->lun_lock); 1024 ctl_clr_ua(lun, initidx, ua_type); 1025 mtx_unlock(&lun->lun_lock); 1026 } 1027} 1028 1029static int | 1239 if (lun->pending_ua[i] == NULL) 1240 continue; 1241 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 1242 if (i * CTL_MAX_INIT_PER_PORT + j == except) 1243 continue; 1244 lun->pending_ua[i][j] &= ~ua; 1245 } 1246 } --- 9 unchanged lines hidden (view full) --- 1256 STAILQ_FOREACH(lun, &ctl_softc->lun_list, links) { 1257 mtx_lock(&lun->lun_lock); 1258 ctl_clr_ua(lun, initidx, ua_type); 1259 mtx_unlock(&lun->lun_lock); 1260 } 1261} 1262 1263static int |
1030ctl_ha_state_sysctl(SYSCTL_HANDLER_ARGS) | 1264ctl_ha_role_sysctl(SYSCTL_HANDLER_ARGS) |
1031{ 1032 struct ctl_softc *softc = (struct ctl_softc *)arg1; 1033 struct ctl_lun *lun; | 1265{ 1266 struct ctl_softc *softc = (struct ctl_softc *)arg1; 1267 struct ctl_lun *lun; |
1268 struct ctl_lun_req ireq; |
|
1034 int error, value; 1035 | 1269 int error, value; 1270 |
1036 if (softc->flags & CTL_FLAG_ACTIVE_SHELF) 1037 value = 0; 1038 else 1039 value = 1; 1040 | 1271 value = (softc->flags & CTL_FLAG_ACTIVE_SHELF) ? 0 : 1; |
1041 error = sysctl_handle_int(oidp, &value, 0, req); 1042 if ((error != 0) || (req->newptr == NULL)) 1043 return (error); 1044 1045 mtx_lock(&softc->ctl_lock); 1046 if (value == 0) 1047 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1048 else 1049 softc->flags &= ~CTL_FLAG_ACTIVE_SHELF; 1050 STAILQ_FOREACH(lun, &softc->lun_list, links) { | 1272 error = sysctl_handle_int(oidp, &value, 0, req); 1273 if ((error != 0) || (req->newptr == NULL)) 1274 return (error); 1275 1276 mtx_lock(&softc->ctl_lock); 1277 if (value == 0) 1278 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1279 else 1280 softc->flags &= ~CTL_FLAG_ACTIVE_SHELF; 1281 STAILQ_FOREACH(lun, &softc->lun_list, links) { |
1051 mtx_lock(&lun->lun_lock); 1052 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 1053 mtx_unlock(&lun->lun_lock); | 1282 mtx_unlock(&softc->ctl_lock); 1283 bzero(&ireq, sizeof(ireq)); 1284 ireq.reqtype = CTL_LUNREQ_MODIFY; 1285 ireq.reqdata.modify.lun_id = lun->lun; 1286 lun->backend->ioctl(NULL, CTL_LUN_REQ, (caddr_t)&ireq, 0, 1287 curthread); 1288 if (ireq.status != CTL_LUN_OK) { 1289 printf("%s: CTL_LUNREQ_MODIFY returned %d '%s'\n", 1290 __func__, ireq.status, ireq.error_str); 1291 } 1292 mtx_lock(&softc->ctl_lock); |
1054 } 1055 mtx_unlock(&softc->ctl_lock); 1056 return (0); 1057} 1058 1059static int 1060ctl_init(void) 1061{ 1062 struct ctl_softc *softc; 1063 void *other_pool; 1064 int i, error, retval; | 1293 } 1294 mtx_unlock(&softc->ctl_lock); 1295 return (0); 1296} 1297 1298static int 1299ctl_init(void) 1300{ 1301 struct ctl_softc *softc; 1302 void *other_pool; 1303 int i, error, retval; |
1065 //int isc_retval; | |
1066 1067 retval = 0; | 1304 1305 retval = 0; |
1068 ctl_pause_rtr = 0; 1069 rcv_sync_msg = 0; 1070 | |
1071 control_softc = malloc(sizeof(*control_softc), M_DEVBUF, 1072 M_WAITOK | M_ZERO); 1073 softc = control_softc; 1074 1075 softc->dev = make_dev(&ctl_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, 1076 "cam/ctl"); 1077 1078 softc->dev->si_drv1 = softc; 1079 | 1306 control_softc = malloc(sizeof(*control_softc), M_DEVBUF, 1307 M_WAITOK | M_ZERO); 1308 softc = control_softc; 1309 1310 softc->dev = make_dev(&ctl_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, 1311 "cam/ctl"); 1312 1313 softc->dev->si_drv1 = softc; 1314 |
1080 /* 1081 * By default, return a "bad LUN" peripheral qualifier for unknown 1082 * LUNs. The user can override this default using the tunable or 1083 * sysctl. See the comment in ctl_inquiry_std() for more details. 1084 */ 1085 softc->inquiry_pq_no_lun = 1; 1086 TUNABLE_INT_FETCH("kern.cam.ctl.inquiry_pq_no_lun", 1087 &softc->inquiry_pq_no_lun); | |
1088 sysctl_ctx_init(&softc->sysctl_ctx); 1089 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, 1090 SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl", 1091 CTLFLAG_RD, 0, "CAM Target Layer"); 1092 1093 if (softc->sysctl_tree == NULL) { 1094 printf("%s: unable to allocate sysctl tree\n", __func__); 1095 destroy_dev(softc->dev); 1096 free(control_softc, M_DEVBUF); 1097 control_softc = NULL; 1098 return (ENOMEM); 1099 } 1100 | 1315 sysctl_ctx_init(&softc->sysctl_ctx); 1316 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, 1317 SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl", 1318 CTLFLAG_RD, 0, "CAM Target Layer"); 1319 1320 if (softc->sysctl_tree == NULL) { 1321 printf("%s: unable to allocate sysctl tree\n", __func__); 1322 destroy_dev(softc->dev); 1323 free(control_softc, M_DEVBUF); 1324 control_softc = NULL; 1325 return (ENOMEM); 1326 } 1327 |
1101 SYSCTL_ADD_INT(&softc->sysctl_ctx, 1102 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, 1103 "inquiry_pq_no_lun", CTLFLAG_RW, 1104 &softc->inquiry_pq_no_lun, 0, 1105 "Report no lun possible for invalid LUNs"); 1106 | |
1107 mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF); 1108 softc->io_zone = uma_zcreate("CTL IO", sizeof(union ctl_io), 1109 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 1110 softc->open_count = 0; 1111 1112 /* 1113 * Default to actually sending a SYNCHRONIZE CACHE command down to 1114 * the drive. 1115 */ 1116 softc->flags = CTL_FLAG_REAL_SYNC; 1117 | 1328 mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF); 1329 softc->io_zone = uma_zcreate("CTL IO", sizeof(union ctl_io), 1330 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 1331 softc->open_count = 0; 1332 1333 /* 1334 * Default to actually sending a SYNCHRONIZE CACHE command down to 1335 * the drive. 1336 */ 1337 softc->flags = CTL_FLAG_REAL_SYNC; 1338 |
1339 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1340 OID_AUTO, "ha_mode", CTLFLAG_RDTUN, (int *)&softc->ha_mode, 0, 1341 "HA mode (0 - act/stby, 1 - serialize only, 2 - xfer)"); 1342 |
|
1118 /* 1119 * In Copan's HA scheme, the "master" and "slave" roles are 1120 * figured out through the slot the controller is in. Although it 1121 * is an active/active system, someone has to be in charge. 1122 */ 1123 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1124 OID_AUTO, "ha_id", CTLFLAG_RDTUN, &softc->ha_id, 0, 1125 "HA head ID (0 - no HA)"); | 1343 /* 1344 * In Copan's HA scheme, the "master" and "slave" roles are 1345 * figured out through the slot the controller is in. Although it 1346 * is an active/active system, someone has to be in charge. 1347 */ 1348 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1349 OID_AUTO, "ha_id", CTLFLAG_RDTUN, &softc->ha_id, 0, 1350 "HA head ID (0 - no HA)"); |
1126 if (softc->ha_id == 0) { | 1351 if (softc->ha_id == 0 || softc->ha_id > NUM_TARGET_PORT_GROUPS) { |
1127 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1128 softc->is_single = 1; | 1352 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1353 softc->is_single = 1; |
1129 softc->port_offset = 0; 1130 } else 1131 softc->port_offset = (softc->ha_id - 1) * CTL_MAX_PORTS; 1132 softc->persis_offset = softc->port_offset * CTL_MAX_INIT_PER_PORT; | 1354 softc->port_cnt = CTL_MAX_PORTS; 1355 softc->port_min = 0; 1356 } else { 1357 softc->port_cnt = CTL_MAX_PORTS / NUM_TARGET_PORT_GROUPS; 1358 softc->port_min = (softc->ha_id - 1) * softc->port_cnt; 1359 } 1360 softc->port_max = softc->port_min + softc->port_cnt; 1361 softc->init_min = softc->port_min * CTL_MAX_INIT_PER_PORT; 1362 softc->init_max = softc->port_max * CTL_MAX_INIT_PER_PORT; |
1133 | 1363 |
1364 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1365 OID_AUTO, "ha_link", CTLFLAG_RD, (int *)&softc->ha_link, 0, 1366 "HA link state (0 - offline, 1 - unknown, 2 - online)"); 1367 |
|
1134 STAILQ_INIT(&softc->lun_list); 1135 STAILQ_INIT(&softc->pending_lun_queue); 1136 STAILQ_INIT(&softc->fe_list); 1137 STAILQ_INIT(&softc->port_list); 1138 STAILQ_INIT(&softc->be_list); 1139 ctl_tpc_init(softc); 1140 1141 if (ctl_pool_create(softc, "othersc", CTL_POOL_ENTRIES_OTHER_SC, --- 39 unchanged lines hidden (view full) --- 1181 &softc->ctl_proc, NULL, 0, 0, "ctl", "thresh"); 1182 if (error != 0) { 1183 printf("error creating CTL threshold thread!\n"); 1184 ctl_pool_free(other_pool); 1185 return (error); 1186 } 1187 1188 SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree), | 1368 STAILQ_INIT(&softc->lun_list); 1369 STAILQ_INIT(&softc->pending_lun_queue); 1370 STAILQ_INIT(&softc->fe_list); 1371 STAILQ_INIT(&softc->port_list); 1372 STAILQ_INIT(&softc->be_list); 1373 ctl_tpc_init(softc); 1374 1375 if (ctl_pool_create(softc, "othersc", CTL_POOL_ENTRIES_OTHER_SC, --- 39 unchanged lines hidden (view full) --- 1415 &softc->ctl_proc, NULL, 0, 0, "ctl", "thresh"); 1416 if (error != 0) { 1417 printf("error creating CTL threshold thread!\n"); 1418 ctl_pool_free(other_pool); 1419 return (error); 1420 } 1421 1422 SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree), |
1189 OID_AUTO, "ha_state", CTLTYPE_INT | CTLFLAG_RWTUN, 1190 softc, 0, ctl_ha_state_sysctl, "I", "HA state for this head"); | 1423 OID_AUTO, "ha_role", CTLTYPE_INT | CTLFLAG_RWTUN, 1424 softc, 0, ctl_ha_role_sysctl, "I", "HA role for this head"); 1425 1426 if (softc->is_single == 0) { 1427 ctl_frontend_register(&ha_frontend); 1428 if (ctl_ha_msg_init(softc) != CTL_HA_STATUS_SUCCESS) { 1429 printf("ctl_init: ctl_ha_msg_init failed.\n"); 1430 softc->is_single = 1; 1431 } else 1432 if (ctl_ha_msg_register(CTL_HA_CHAN_CTL, ctl_isc_event_handler) 1433 != CTL_HA_STATUS_SUCCESS) { 1434 printf("ctl_init: ctl_ha_msg_register failed.\n"); 1435 softc->is_single = 1; 1436 } 1437 } |
1191 return (0); 1192} 1193 1194void 1195ctl_shutdown(void) 1196{ 1197 struct ctl_softc *softc; 1198 struct ctl_lun *lun, *next_lun; 1199 1200 softc = (struct ctl_softc *)control_softc; 1201 | 1438 return (0); 1439} 1440 1441void 1442ctl_shutdown(void) 1443{ 1444 struct ctl_softc *softc; 1445 struct ctl_lun *lun, *next_lun; 1446 1447 softc = (struct ctl_softc *)control_softc; 1448 |
1449 if (softc->is_single == 0) { 1450 if (ctl_ha_msg_deregister(CTL_HA_CHAN_CTL) 1451 != CTL_HA_STATUS_SUCCESS) { 1452 printf("ctl_shutdown: ctl_ha_msg_deregister failed.\n"); 1453 } 1454 if (ctl_ha_msg_shutdown(softc) != CTL_HA_STATUS_SUCCESS) { 1455 printf("ctl_shutdown: ctl_ha_msg_shutdown failed.\n"); 1456 } 1457 ctl_frontend_deregister(&ha_frontend); 1458 } 1459 |
|
1202 mtx_lock(&softc->ctl_lock); 1203 1204 /* 1205 * Free up each LUN. 1206 */ 1207 for (lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; lun = next_lun){ 1208 next_lun = STAILQ_NEXT(lun, links); 1209 ctl_free_lun(lun); --- 43 unchanged lines hidden (view full) --- 1253} 1254 1255static int 1256ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td) 1257{ 1258 return (0); 1259} 1260 | 1460 mtx_lock(&softc->ctl_lock); 1461 1462 /* 1463 * Free up each LUN. 1464 */ 1465 for (lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; lun = next_lun){ 1466 next_lun = STAILQ_NEXT(lun, links); 1467 ctl_free_lun(lun); --- 43 unchanged lines hidden (view full) --- 1511} 1512 1513static int 1514ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td) 1515{ 1516 return (0); 1517} 1518 |
1261int 1262ctl_port_enable(ctl_port_type port_type) 1263{ 1264 struct ctl_softc *softc = control_softc; 1265 struct ctl_port *port; 1266 1267 if (softc->is_single == 0) { 1268 union ctl_ha_msg msg_info; 1269 int isc_retval; 1270 1271#if 0 1272 printf("%s: HA mode, synchronizing frontend enable\n", 1273 __func__); 1274#endif 1275 msg_info.hdr.msg_type = CTL_MSG_SYNC_FE; 1276 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1277 sizeof(msg_info), 1 )) > CTL_HA_STATUS_SUCCESS) { 1278 printf("Sync msg send error retval %d\n", isc_retval); 1279 } 1280 if (!rcv_sync_msg) { 1281 isc_retval=ctl_ha_msg_recv(CTL_HA_CHAN_CTL, &msg_info, 1282 sizeof(msg_info), 1); 1283 } 1284#if 0 1285 printf("CTL:Frontend Enable\n"); 1286 } else { 1287 printf("%s: single mode, skipping frontend synchronization\n", 1288 __func__); 1289#endif 1290 } 1291 1292 STAILQ_FOREACH(port, &softc->port_list, links) { 1293 if (port_type & port->port_type) 1294 { 1295#if 0 1296 printf("port %d\n", port->targ_port); 1297#endif 1298 ctl_port_online(port); 1299 } 1300 } 1301 1302 return (0); 1303} 1304 1305int 1306ctl_port_disable(ctl_port_type port_type) 1307{ 1308 struct ctl_softc *softc; 1309 struct ctl_port *port; 1310 1311 softc = control_softc; 1312 1313 STAILQ_FOREACH(port, &softc->port_list, links) { 1314 if (port_type & port->port_type) 1315 ctl_port_offline(port); 1316 } 1317 1318 return (0); 1319} 1320 | |
1321/* | 1519/* |
1322 * Returns 0 for success, 1 for failure. 1323 * Currently the only failure mode is if there aren't enough entries 1324 * allocated. So, in case of a failure, look at num_entries_dropped, 1325 * reallocate and try again. 1326 */ 1327int 1328ctl_port_list(struct ctl_port_entry *entries, int num_entries_alloced, 1329 int *num_entries_filled, int *num_entries_dropped, 1330 ctl_port_type port_type, int no_virtual) 1331{ 1332 struct ctl_softc *softc; 1333 struct ctl_port *port; 1334 int entries_dropped, entries_filled; 1335 int retval; 1336 int i; 1337 1338 softc = control_softc; 1339 1340 retval = 0; 1341 entries_filled = 0; 1342 entries_dropped = 0; 1343 1344 i = 0; 1345 mtx_lock(&softc->ctl_lock); 1346 STAILQ_FOREACH(port, &softc->port_list, links) { 1347 struct ctl_port_entry *entry; 1348 1349 if ((port->port_type & port_type) == 0) 1350 continue; 1351 1352 if ((no_virtual != 0) 1353 && (port->virtual_port != 0)) 1354 continue; 1355 1356 if (entries_filled >= num_entries_alloced) { 1357 entries_dropped++; 1358 continue; 1359 } 1360 entry = &entries[i]; 1361 1362 entry->port_type = port->port_type; 1363 strlcpy(entry->port_name, port->port_name, 1364 sizeof(entry->port_name)); 1365 entry->physical_port = port->physical_port; 1366 entry->virtual_port = port->virtual_port; 1367 entry->wwnn = port->wwnn; 1368 entry->wwpn = port->wwpn; 1369 1370 i++; 1371 entries_filled++; 1372 } 1373 1374 mtx_unlock(&softc->ctl_lock); 1375 1376 if (entries_dropped > 0) 1377 retval = 1; 1378 1379 *num_entries_dropped = entries_dropped; 1380 *num_entries_filled = entries_filled; 1381 1382 return (retval); 1383} 1384 1385/* | |
1386 * Remove an initiator by port number and initiator ID. 1387 * Returns 0 for success, -1 for failure. 1388 */ 1389int 1390ctl_remove_initiator(struct ctl_port *port, int iid) 1391{ 1392 struct ctl_softc *softc = control_softc; 1393 --- 192 unchanged lines hidden (view full) --- 1586 * (SER_ONLY mode). 1587 */ 1588static int 1589ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio) 1590{ 1591 struct ctl_softc *softc; 1592 union ctl_ha_msg msg_info; 1593 struct ctl_lun *lun; | 1520 * Remove an initiator by port number and initiator ID. 1521 * Returns 0 for success, -1 for failure. 1522 */ 1523int 1524ctl_remove_initiator(struct ctl_port *port, int iid) 1525{ 1526 struct ctl_softc *softc = control_softc; 1527 --- 192 unchanged lines hidden (view full) --- 1720 * (SER_ONLY mode). 1721 */ 1722static int 1723ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio) 1724{ 1725 struct ctl_softc *softc; 1726 union ctl_ha_msg msg_info; 1727 struct ctl_lun *lun; |
1728 const struct ctl_cmd_entry *entry; |
|
1594 int retval = 0; 1595 uint32_t targ_lun; 1596 1597 softc = control_softc; 1598 1599 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; | 1729 int retval = 0; 1730 uint32_t targ_lun; 1731 1732 softc = control_softc; 1733 1734 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; |
1600 lun = softc->ctl_luns[targ_lun]; 1601 if (lun==NULL) 1602 { | 1735 if ((targ_lun < CTL_MAX_LUNS) && 1736 ((lun = softc->ctl_luns[targ_lun]) != NULL)) { |
1603 /* | 1737 /* |
1738 * If the LUN is invalid, pretend that it doesn't exist. 1739 * It will go away as soon as all pending I/O has been 1740 * completed. 1741 */ 1742 mtx_lock(&lun->lun_lock); 1743 if (lun->flags & CTL_LUN_DISABLED) { 1744 mtx_unlock(&lun->lun_lock); 1745 lun = NULL; 1746 } 1747 } else 1748 lun = NULL; 1749 if (lun == NULL) { 1750 /* |
|
1604 * Why isn't LUN defined? The other side wouldn't 1605 * send a cmd if the LUN is undefined. 1606 */ 1607 printf("%s: Bad JUJU!, LUN is NULL!\n", __func__); 1608 | 1751 * Why isn't LUN defined? The other side wouldn't 1752 * send a cmd if the LUN is undefined. 1753 */ 1754 printf("%s: Bad JUJU!, LUN is NULL!\n", __func__); 1755 |
1609 /* "Logical unit not supported" */ 1610 ctl_set_sense_data(&msg_info.scsi.sense_data, 1611 lun, 1612 /*sense_format*/SSD_TYPE_NONE, 1613 /*current_error*/ 1, 1614 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1615 /*asc*/ 0x25, 1616 /*ascq*/ 0x00, 1617 SSD_ELEM_NONE); 1618 1619 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1620 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1621 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; | 1756 ctl_set_unsupported_lun(ctsio); 1757 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); |
1622 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1623 msg_info.hdr.serializing_sc = NULL; 1624 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; | 1758 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1759 msg_info.hdr.serializing_sc = NULL; 1760 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; |
1625 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1626 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1627 } | 1761 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1762 sizeof(msg_info.scsi), M_WAITOK); |
1628 return(1); | 1763 return(1); |
1764 } |
|
1629 | 1765 |
1766 entry = ctl_get_cmd_entry(ctsio, NULL); 1767 if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { 1768 mtx_unlock(&lun->lun_lock); 1769 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); 1770 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1771 msg_info.hdr.serializing_sc = NULL; 1772 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1773 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1774 sizeof(msg_info.scsi), M_WAITOK); 1775 return(1); |
|
1630 } 1631 | 1776 } 1777 |
1632 mtx_lock(&lun->lun_lock); 1633 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); | 1778 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun; 1779 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = lun->be_lun; |
1634 | 1780 |
1781 /* 1782 * Every I/O goes into the OOA queue for a 1783 * particular LUN, and stays there until completion. 1784 */ 1785#ifdef CTL_TIME_IO 1786 if (TAILQ_EMPTY(&lun->ooa_queue)) 1787 lun->idle_time += getsbinuptime() - lun->last_busy; 1788#endif 1789 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1790 |
|
1635 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 1636 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq, 1637 ooa_links))) { 1638 case CTL_ACTION_BLOCK: 1639 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 1640 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 1641 blocked_links); | 1791 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 1792 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq, 1793 ooa_links))) { 1794 case CTL_ACTION_BLOCK: 1795 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 1796 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 1797 blocked_links); |
1798 mtx_unlock(&lun->lun_lock); |
|
1642 break; 1643 case CTL_ACTION_PASS: 1644 case CTL_ACTION_SKIP: 1645 if (softc->ha_mode == CTL_HA_MODE_XFER) { 1646 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 1647 ctl_enqueue_rtr((union ctl_io *)ctsio); | 1799 break; 1800 case CTL_ACTION_PASS: 1801 case CTL_ACTION_SKIP: 1802 if (softc->ha_mode == CTL_HA_MODE_XFER) { 1803 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 1804 ctl_enqueue_rtr((union ctl_io *)ctsio); |
1805 mtx_unlock(&lun->lun_lock); |
|
1648 } else { | 1806 } else { |
1807 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 1808 mtx_unlock(&lun->lun_lock); |
|
1649 1650 /* send msg back to other side */ 1651 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1652 msg_info.hdr.serializing_sc = (union ctl_io *)ctsio; 1653 msg_info.hdr.msg_type = CTL_MSG_R2R; | 1809 1810 /* send msg back to other side */ 1811 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1812 msg_info.hdr.serializing_sc = (union ctl_io *)ctsio; 1813 msg_info.hdr.msg_type = CTL_MSG_R2R; |
1654#if 0 1655 printf("2. pOrig %x\n", (int)msg_info.hdr.original_sc); 1656#endif 1657 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1658 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1659 } | 1814 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1815 sizeof(msg_info.hdr), M_WAITOK); |
1660 } 1661 break; 1662 case CTL_ACTION_OVERLAP: | 1816 } 1817 break; 1818 case CTL_ACTION_OVERLAP: |
1663 /* OVERLAPPED COMMANDS ATTEMPTED */ 1664 ctl_set_sense_data(&msg_info.scsi.sense_data, 1665 lun, 1666 /*sense_format*/SSD_TYPE_NONE, 1667 /*current_error*/ 1, 1668 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1669 /*asc*/ 0x4E, 1670 /*ascq*/ 0x00, 1671 SSD_ELEM_NONE); | 1819 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1820 mtx_unlock(&lun->lun_lock); 1821 retval = 1; |
1672 | 1822 |
1673 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1674 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1675 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; | 1823 ctl_set_overlapped_cmd(ctsio); 1824 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); |
1676 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1677 msg_info.hdr.serializing_sc = NULL; 1678 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; | 1825 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1826 msg_info.hdr.serializing_sc = NULL; 1827 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; |
1679#if 0 1680 printf("BAD JUJU:Major Bummer Overlap\n"); 1681#endif 1682 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1683 retval = 1; 1684 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1685 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1686 } | 1828 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1829 sizeof(msg_info.scsi), M_WAITOK); |
1687 break; 1688 case CTL_ACTION_OVERLAP_TAG: | 1830 break; 1831 case CTL_ACTION_OVERLAP_TAG: |
1689 /* TAGGED OVERLAPPED COMMANDS (NN = QUEUE TAG) */ 1690 ctl_set_sense_data(&msg_info.scsi.sense_data, 1691 lun, 1692 /*sense_format*/SSD_TYPE_NONE, 1693 /*current_error*/ 1, 1694 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1695 /*asc*/ 0x4D, 1696 /*ascq*/ ctsio->tag_num & 0xff, 1697 SSD_ELEM_NONE); 1698 1699 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1700 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1701 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; | 1832 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1833 mtx_unlock(&lun->lun_lock); 1834 retval = 1; 1835 ctl_set_overlapped_tag(ctsio, ctsio->tag_num); 1836 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); |
1702 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1703 msg_info.hdr.serializing_sc = NULL; 1704 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; | 1837 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1838 msg_info.hdr.serializing_sc = NULL; 1839 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; |
1705#if 0 1706 printf("BAD JUJU:Major Bummer Overlap Tag\n"); 1707#endif 1708 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1709 retval = 1; 1710 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1711 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1712 } | 1840 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1841 sizeof(msg_info.scsi), M_WAITOK); |
1713 break; 1714 case CTL_ACTION_ERROR: 1715 default: | 1842 break; 1843 case CTL_ACTION_ERROR: 1844 default: |
1716 /* "Internal target failure" */ 1717 ctl_set_sense_data(&msg_info.scsi.sense_data, 1718 lun, 1719 /*sense_format*/SSD_TYPE_NONE, 1720 /*current_error*/ 1, 1721 /*sense_key*/ SSD_KEY_HARDWARE_ERROR, 1722 /*asc*/ 0x44, 1723 /*ascq*/ 0x00, 1724 SSD_ELEM_NONE); | 1845 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1846 mtx_unlock(&lun->lun_lock); 1847 retval = 1; |
1725 | 1848 |
1726 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1727 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1728 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; | 1849 ctl_set_internal_failure(ctsio, /*sks_valid*/ 0, 1850 /*retry_count*/ 0); 1851 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); |
1729 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1730 msg_info.hdr.serializing_sc = NULL; 1731 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; | 1852 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1853 msg_info.hdr.serializing_sc = NULL; 1854 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; |
1732#if 0 1733 printf("BAD JUJU:Major Bummer HW Error\n"); 1734#endif 1735 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1736 retval = 1; 1737 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1738 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1739 } | 1855 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1856 sizeof(msg_info.scsi), M_WAITOK); |
1740 break; 1741 } | 1857 break; 1858 } |
1742 mtx_unlock(&lun->lun_lock); | |
1743 return (retval); 1744} 1745 1746/* 1747 * Returns 0 for success, errno for failure. 1748 */ 1749static int 1750ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, --- 244 unchanged lines hidden (view full) --- 1995 struct ctl_port_entry *entry; 1996 1997 entry = (struct ctl_port_entry *)addr; 1998 1999 mtx_lock(&softc->ctl_lock); 2000 STAILQ_FOREACH(port, &softc->port_list, links) { 2001 int action, done; 2002 | 1859 return (retval); 1860} 1861 1862/* 1863 * Returns 0 for success, errno for failure. 1864 */ 1865static int 1866ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, --- 244 unchanged lines hidden (view full) --- 2111 struct ctl_port_entry *entry; 2112 2113 entry = (struct ctl_port_entry *)addr; 2114 2115 mtx_lock(&softc->ctl_lock); 2116 STAILQ_FOREACH(port, &softc->port_list, links) { 2117 int action, done; 2118 |
2119 if (port->targ_port < softc->port_min || 2120 port->targ_port >= softc->port_max) 2121 continue; 2122 |
|
2003 action = 0; 2004 done = 0; | 2123 action = 0; 2124 done = 0; |
2005 | |
2006 if ((entry->port_type == CTL_PORT_NONE) 2007 && (entry->targ_port == port->targ_port)) { 2008 /* 2009 * If the user only wants to enable or 2010 * disable or set WWNs on a specific port, 2011 * do the operation and we're done. 2012 */ 2013 action = 1; --- 13 unchanged lines hidden (view full) --- 2027 */ 2028 if (cmd == CTL_SET_PORT_WWNS) { 2029 printf("%s: Can't set WWNs on " 2030 "multiple ports\n", __func__); 2031 retval = EINVAL; 2032 break; 2033 } 2034 } | 2125 if ((entry->port_type == CTL_PORT_NONE) 2126 && (entry->targ_port == port->targ_port)) { 2127 /* 2128 * If the user only wants to enable or 2129 * disable or set WWNs on a specific port, 2130 * do the operation and we're done. 2131 */ 2132 action = 1; --- 13 unchanged lines hidden (view full) --- 2146 */ 2147 if (cmd == CTL_SET_PORT_WWNS) { 2148 printf("%s: Can't set WWNs on " 2149 "multiple ports\n", __func__); 2150 retval = EINVAL; 2151 break; 2152 } 2153 } |
2035 if (action != 0) { 2036 /* 2037 * XXX KDM we have to drop the lock here, 2038 * because the online/offline operations 2039 * can potentially block. We need to 2040 * reference count the frontends so they 2041 * can't go away, 2042 */ 2043 mtx_unlock(&softc->ctl_lock); | 2154 if (action == 0) 2155 continue; |
2044 | 2156 |
2045 if (cmd == CTL_ENABLE_PORT) { 2046 ctl_port_online(port); 2047 } else if (cmd == CTL_DISABLE_PORT) { 2048 ctl_port_offline(port); 2049 } 2050 | 2157 /* 2158 * XXX KDM we have to drop the lock here, because 2159 * the online/offline operations can potentially 2160 * block. We need to reference count the frontends 2161 * so they can't go away, 2162 */ 2163 if (cmd == CTL_ENABLE_PORT) { 2164 mtx_unlock(&softc->ctl_lock); 2165 ctl_port_online(port); |
2051 mtx_lock(&softc->ctl_lock); | 2166 mtx_lock(&softc->ctl_lock); |
2052 2053 if (cmd == CTL_SET_PORT_WWNS) 2054 ctl_port_set_wwns(port, 2055 (entry->flags & CTL_PORT_WWNN_VALID) ? 2056 1 : 0, entry->wwnn, 2057 (entry->flags & CTL_PORT_WWPN_VALID) ? 2058 1 : 0, entry->wwpn); | 2167 } else if (cmd == CTL_DISABLE_PORT) { 2168 mtx_unlock(&softc->ctl_lock); 2169 ctl_port_offline(port); 2170 mtx_lock(&softc->ctl_lock); 2171 } else if (cmd == CTL_SET_PORT_WWNS) { 2172 ctl_port_set_wwns(port, 2173 (entry->flags & CTL_PORT_WWNN_VALID) ? 2174 1 : 0, entry->wwnn, 2175 (entry->flags & CTL_PORT_WWPN_VALID) ? 2176 1 : 0, entry->wwpn); |
2059 } 2060 if (done != 0) 2061 break; 2062 } 2063 mtx_unlock(&softc->ctl_lock); 2064 break; 2065 } 2066 case CTL_GET_PORT_LIST: { --- 481 unchanged lines hidden (view full) --- 2548 struct ctl_lun *lun; 2549 2550 lun = softc->ctl_luns[i]; 2551 2552 if ((lun == NULL) 2553 || ((lun->flags & CTL_LUN_DISABLED) != 0)) 2554 continue; 2555 | 2177 } 2178 if (done != 0) 2179 break; 2180 } 2181 mtx_unlock(&softc->ctl_lock); 2182 break; 2183 } 2184 case CTL_GET_PORT_LIST: { --- 481 unchanged lines hidden (view full) --- 2666 struct ctl_lun *lun; 2667 2668 lun = softc->ctl_luns[i]; 2669 2670 if ((lun == NULL) 2671 || ((lun->flags & CTL_LUN_DISABLED) != 0)) 2672 continue; 2673 |
2556 for (j = 0; j < (CTL_MAX_PORTS * 2); j++) { | 2674 for (j = 0; j < CTL_MAX_PORTS; j++) { |
2557 if (lun->pr_keys[j] == NULL) 2558 continue; 2559 for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){ 2560 if (lun->pr_keys[j][k] == 0) 2561 continue; 2562 printf(" LUN %d port %d iid %d key " 2563 "%#jx\n", i, j, k, 2564 (uintmax_t)lun->pr_keys[j][k]); --- 275 unchanged lines hidden (view full) --- 2840 req->kern_args = ctl_copyin_args(req->num_args, 2841 req->args, req->error_str, sizeof(req->error_str)); 2842 if (req->kern_args == NULL) { 2843 req->status = CTL_LUN_ERROR; 2844 break; 2845 } 2846 } 2847 | 2675 if (lun->pr_keys[j] == NULL) 2676 continue; 2677 for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){ 2678 if (lun->pr_keys[j][k] == 0) 2679 continue; 2680 printf(" LUN %d port %d iid %d key " 2681 "%#jx\n", i, j, k, 2682 (uintmax_t)lun->pr_keys[j][k]); --- 275 unchanged lines hidden (view full) --- 2958 req->kern_args = ctl_copyin_args(req->num_args, 2959 req->args, req->error_str, sizeof(req->error_str)); 2960 if (req->kern_args == NULL) { 2961 req->status = CTL_LUN_ERROR; 2962 break; 2963 } 2964 } 2965 |
2848 retval = fe->ioctl(dev, cmd, addr, flag, td); | 2966 if (fe->ioctl) 2967 retval = fe->ioctl(dev, cmd, addr, flag, td); 2968 else 2969 retval = ENODEV; |
2849 2850 if (req->num_args > 0) { 2851 ctl_copyout_args(req->num_args, req->kern_args); 2852 ctl_free_args(req->num_args, req->kern_args); 2853 } 2854 break; 2855 } 2856 case CTL_PORT_LIST: { --- 142 unchanged lines hidden (view full) --- 2999 sbuf_delete(sb); 3000 break; 3001 } 3002 case CTL_LUN_MAP: { 3003 struct ctl_lun_map *lm = (struct ctl_lun_map *)addr; 3004 struct ctl_port *port; 3005 3006 mtx_lock(&softc->ctl_lock); | 2970 2971 if (req->num_args > 0) { 2972 ctl_copyout_args(req->num_args, req->kern_args); 2973 ctl_free_args(req->num_args, req->kern_args); 2974 } 2975 break; 2976 } 2977 case CTL_PORT_LIST: { --- 142 unchanged lines hidden (view full) --- 3120 sbuf_delete(sb); 3121 break; 3122 } 3123 case CTL_LUN_MAP: { 3124 struct ctl_lun_map *lm = (struct ctl_lun_map *)addr; 3125 struct ctl_port *port; 3126 3127 mtx_lock(&softc->ctl_lock); |
3007 if (lm->port >= CTL_MAX_PORTS || | 3128 if (lm->port < softc->port_min || 3129 lm->port >= softc->port_max || |
3008 (port = softc->ctl_ports[lm->port]) == NULL) { 3009 mtx_unlock(&softc->ctl_lock); 3010 return (ENXIO); 3011 } 3012 mtx_unlock(&softc->ctl_lock); // XXX: port_enable sleeps 3013 if (lm->plun < CTL_MAX_LUNS) { 3014 if (lm->lun == UINT32_MAX) 3015 retval = ctl_lun_map_unset(port, lm->plun); --- 46 unchanged lines hidden (view full) --- 3062 } 3063 } 3064 return (retval); 3065} 3066 3067uint32_t 3068ctl_get_initindex(struct ctl_nexus *nexus) 3069{ | 3130 (port = softc->ctl_ports[lm->port]) == NULL) { 3131 mtx_unlock(&softc->ctl_lock); 3132 return (ENXIO); 3133 } 3134 mtx_unlock(&softc->ctl_lock); // XXX: port_enable sleeps 3135 if (lm->plun < CTL_MAX_LUNS) { 3136 if (lm->lun == UINT32_MAX) 3137 retval = ctl_lun_map_unset(port, lm->plun); --- 46 unchanged lines hidden (view full) --- 3184 } 3185 } 3186 return (retval); 3187} 3188 3189uint32_t 3190ctl_get_initindex(struct ctl_nexus *nexus) 3191{ |
3070 if (nexus->targ_port < CTL_MAX_PORTS) 3071 return (nexus->initid + 3072 (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); 3073 else 3074 return (nexus->initid + 3075 ((nexus->targ_port - CTL_MAX_PORTS) * 3076 CTL_MAX_INIT_PER_PORT)); 3077} 3078 3079uint32_t 3080ctl_get_resindex(struct ctl_nexus *nexus) 3081{ | |
3082 return (nexus->initid + (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); 3083} 3084 | 3192 return (nexus->initid + (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); 3193} 3194 |
3085uint32_t 3086ctl_port_idx(int port_num) 3087{ 3088 if (port_num < CTL_MAX_PORTS) 3089 return(port_num); 3090 else 3091 return(port_num - CTL_MAX_PORTS); 3092} 3093 | |
3094int 3095ctl_lun_map_init(struct ctl_port *port) 3096{ 3097 struct ctl_softc *softc = control_softc; 3098 struct ctl_lun *lun; 3099 uint32_t i; 3100 3101 if (port->lun_map == NULL) 3102 port->lun_map = malloc(sizeof(uint32_t) * CTL_MAX_LUNS, 3103 M_CTL, M_NOWAIT); 3104 if (port->lun_map == NULL) 3105 return (ENOMEM); 3106 for (i = 0; i < CTL_MAX_LUNS; i++) 3107 port->lun_map[i] = UINT32_MAX; | 3195int 3196ctl_lun_map_init(struct ctl_port *port) 3197{ 3198 struct ctl_softc *softc = control_softc; 3199 struct ctl_lun *lun; 3200 uint32_t i; 3201 3202 if (port->lun_map == NULL) 3203 port->lun_map = malloc(sizeof(uint32_t) * CTL_MAX_LUNS, 3204 M_CTL, M_NOWAIT); 3205 if (port->lun_map == NULL) 3206 return (ENOMEM); 3207 for (i = 0; i < CTL_MAX_LUNS; i++) 3208 port->lun_map[i] = UINT32_MAX; |
3108 if (port->status & CTL_PORT_STATUS_ONLINE && 3109 port->lun_disable != NULL) { 3110 STAILQ_FOREACH(lun, &softc->lun_list, links) 3111 port->lun_disable(port->targ_lun_arg, lun->lun); | 3209 if (port->status & CTL_PORT_STATUS_ONLINE) { 3210 if (port->lun_disable != NULL) { 3211 STAILQ_FOREACH(lun, &softc->lun_list, links) 3212 port->lun_disable(port->targ_lun_arg, lun->lun); 3213 } 3214 ctl_isc_announce_port(port); |
3112 } 3113 return (0); 3114} 3115 3116int 3117ctl_lun_map_deinit(struct ctl_port *port) 3118{ 3119 struct ctl_softc *softc = control_softc; 3120 struct ctl_lun *lun; 3121 3122 if (port->lun_map == NULL) 3123 return (0); 3124 free(port->lun_map, M_CTL); 3125 port->lun_map = NULL; | 3215 } 3216 return (0); 3217} 3218 3219int 3220ctl_lun_map_deinit(struct ctl_port *port) 3221{ 3222 struct ctl_softc *softc = control_softc; 3223 struct ctl_lun *lun; 3224 3225 if (port->lun_map == NULL) 3226 return (0); 3227 free(port->lun_map, M_CTL); 3228 port->lun_map = NULL; |
3126 if (port->status & CTL_PORT_STATUS_ONLINE && 3127 port->lun_enable != NULL) { 3128 STAILQ_FOREACH(lun, &softc->lun_list, links) 3129 port->lun_enable(port->targ_lun_arg, lun->lun); | 3229 if (port->status & CTL_PORT_STATUS_ONLINE) { 3230 if (port->lun_enable != NULL) { 3231 STAILQ_FOREACH(lun, &softc->lun_list, links) 3232 port->lun_enable(port->targ_lun_arg, lun->lun); 3233 } 3234 ctl_isc_announce_port(port); |
3130 } 3131 return (0); 3132} 3133 3134int 3135ctl_lun_map_set(struct ctl_port *port, uint32_t plun, uint32_t glun) 3136{ 3137 int status; 3138 uint32_t old; 3139 3140 if (port->lun_map == NULL) { 3141 status = ctl_lun_map_init(port); 3142 if (status != 0) 3143 return (status); 3144 } 3145 old = port->lun_map[plun]; 3146 port->lun_map[plun] = glun; | 3235 } 3236 return (0); 3237} 3238 3239int 3240ctl_lun_map_set(struct ctl_port *port, uint32_t plun, uint32_t glun) 3241{ 3242 int status; 3243 uint32_t old; 3244 3245 if (port->lun_map == NULL) { 3246 status = ctl_lun_map_init(port); 3247 if (status != 0) 3248 return (status); 3249 } 3250 old = port->lun_map[plun]; 3251 port->lun_map[plun] = glun; |
3147 if ((port->status & CTL_PORT_STATUS_ONLINE) && old >= CTL_MAX_LUNS && 3148 port->lun_enable != NULL) 3149 port->lun_enable(port->targ_lun_arg, plun); | 3252 if ((port->status & CTL_PORT_STATUS_ONLINE) && old >= CTL_MAX_LUNS) { 3253 if (port->lun_enable != NULL) 3254 port->lun_enable(port->targ_lun_arg, plun); 3255 ctl_isc_announce_port(port); 3256 } |
3150 return (0); 3151} 3152 3153int 3154ctl_lun_map_unset(struct ctl_port *port, uint32_t plun) 3155{ 3156 uint32_t old; 3157 3158 if (port->lun_map == NULL) 3159 return (0); 3160 old = port->lun_map[plun]; 3161 port->lun_map[plun] = UINT32_MAX; | 3257 return (0); 3258} 3259 3260int 3261ctl_lun_map_unset(struct ctl_port *port, uint32_t plun) 3262{ 3263 uint32_t old; 3264 3265 if (port->lun_map == NULL) 3266 return (0); 3267 old = port->lun_map[plun]; 3268 port->lun_map[plun] = UINT32_MAX; |
3162 if ((port->status & CTL_PORT_STATUS_ONLINE) && old < CTL_MAX_LUNS && 3163 port->lun_disable != NULL) 3164 port->lun_disable(port->targ_lun_arg, plun); | 3269 if ((port->status & CTL_PORT_STATUS_ONLINE) && old < CTL_MAX_LUNS) { 3270 if (port->lun_disable != NULL) 3271 port->lun_disable(port->targ_lun_arg, plun); 3272 ctl_isc_announce_port(port); 3273 } |
3165 return (0); 3166} 3167 3168uint32_t 3169ctl_lun_map_from_port(struct ctl_port *port, uint32_t lun_id) 3170{ 3171 3172 if (port == NULL) --- 17 unchanged lines hidden (view full) --- 3190 return (i); 3191 } 3192 return (UINT32_MAX); 3193} 3194 3195static struct ctl_port * 3196ctl_io_port(struct ctl_io_hdr *io_hdr) 3197{ | 3274 return (0); 3275} 3276 3277uint32_t 3278ctl_lun_map_from_port(struct ctl_port *port, uint32_t lun_id) 3279{ 3280 3281 if (port == NULL) --- 17 unchanged lines hidden (view full) --- 3299 return (i); 3300 } 3301 return (UINT32_MAX); 3302} 3303 3304static struct ctl_port * 3305ctl_io_port(struct ctl_io_hdr *io_hdr) 3306{ |
3198 int port_num; | |
3199 | 3307 |
3200 port_num = io_hdr->nexus.targ_port; 3201 return (control_softc->ctl_ports[ctl_port_idx(port_num)]); | 3308 return (control_softc->ctl_ports[io_hdr->nexus.targ_port]); |
3202} 3203 | 3309} 3310 |
3204/* 3205 * Note: This only works for bitmask sizes that are at least 32 bits, and 3206 * that are a power of 2. 3207 */ | |
3208int | 3311int |
3209ctl_ffz(uint32_t *mask, uint32_t size) | 3312ctl_ffz(uint32_t *mask, uint32_t first, uint32_t last) |
3210{ | 3313{ |
3211 uint32_t num_chunks, num_pieces; 3212 int i, j; | 3314 int i; |
3213 | 3315 |
3214 num_chunks = (size >> 5); 3215 if (num_chunks == 0) 3216 num_chunks++; 3217 num_pieces = MIN((sizeof(uint32_t) * 8), size); 3218 3219 for (i = 0; i < num_chunks; i++) { 3220 for (j = 0; j < num_pieces; j++) { 3221 if ((mask[i] & (1 << j)) == 0) 3222 return ((i << 5) + j); 3223 } | 3316 for (i = first; i < last; i++) { 3317 if ((mask[i / 32] & (1 << (i % 32))) == 0) 3318 return (i); |
3224 } | 3319 } |
3225 | |
3226 return (-1); 3227} 3228 3229int 3230ctl_set_mask(uint32_t *mask, uint32_t bit) 3231{ 3232 uint32_t chunk, piece; 3233 --- 882 unchanged lines hidden (view full) --- 4116 if (lun->flags & CTL_LUN_MALLOCED) 4117 free(lun, M_CTL); 4118 be_lun->lun_config_status(be_lun->be_lun, 4119 CTL_LUN_CONFIG_FAILURE); 4120 return (ENOSPC); 4121 } 4122 lun_number = be_lun->req_lun_id; 4123 } else { | 3320 return (-1); 3321} 3322 3323int 3324ctl_set_mask(uint32_t *mask, uint32_t bit) 3325{ 3326 uint32_t chunk, piece; 3327 --- 882 unchanged lines hidden (view full) --- 4210 if (lun->flags & CTL_LUN_MALLOCED) 4211 free(lun, M_CTL); 4212 be_lun->lun_config_status(be_lun->be_lun, 4213 CTL_LUN_CONFIG_FAILURE); 4214 return (ENOSPC); 4215 } 4216 lun_number = be_lun->req_lun_id; 4217 } else { |
4124 lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, CTL_MAX_LUNS); | 4218 lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, 0, CTL_MAX_LUNS); |
4125 if (lun_number == -1) { 4126 mtx_unlock(&ctl_softc->ctl_lock); 4127 printf("ctl: can't allocate LUN, out of LUNs\n"); 4128 if (lun->flags & CTL_LUN_MALLOCED) 4129 free(lun, M_CTL); 4130 be_lun->lun_config_status(be_lun->be_lun, 4131 CTL_LUN_CONFIG_FAILURE); 4132 return (ENOSPC); --- 106 unchanged lines hidden (view full) --- 4239 atomic_subtract_int(&lun->be_lun->be->num_luns, 1); 4240 lun->be_lun->lun_shutdown(lun->be_lun->be_lun); 4241 4242 ctl_tpc_lun_shutdown(lun); 4243 mtx_destroy(&lun->lun_lock); 4244 free(lun->lun_devid, M_CTL); 4245 for (i = 0; i < CTL_MAX_PORTS; i++) 4246 free(lun->pending_ua[i], M_CTL); | 4219 if (lun_number == -1) { 4220 mtx_unlock(&ctl_softc->ctl_lock); 4221 printf("ctl: can't allocate LUN, out of LUNs\n"); 4222 if (lun->flags & CTL_LUN_MALLOCED) 4223 free(lun, M_CTL); 4224 be_lun->lun_config_status(be_lun->be_lun, 4225 CTL_LUN_CONFIG_FAILURE); 4226 return (ENOSPC); --- 106 unchanged lines hidden (view full) --- 4333 atomic_subtract_int(&lun->be_lun->be->num_luns, 1); 4334 lun->be_lun->lun_shutdown(lun->be_lun->be_lun); 4335 4336 ctl_tpc_lun_shutdown(lun); 4337 mtx_destroy(&lun->lun_lock); 4338 free(lun->lun_devid, M_CTL); 4339 for (i = 0; i < CTL_MAX_PORTS; i++) 4340 free(lun->pending_ua[i], M_CTL); |
4247 for (i = 0; i < 2 * CTL_MAX_PORTS; i++) | 4341 for (i = 0; i < CTL_MAX_PORTS; i++) |
4248 free(lun->pr_keys[i], M_CTL); 4249 free(lun->write_buffer, M_CTL); 4250 if (lun->flags & CTL_LUN_MALLOCED) 4251 free(lun, M_CTL); 4252 4253 STAILQ_FOREACH(nlun, &softc->lun_list, links) { 4254 mtx_lock(&nlun->lun_lock); 4255 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); --- 72 unchanged lines hidden (view full) --- 4328 printf("%s: FETD %s port %d returned error " 4329 "%d for lun_enable on lun %jd\n", 4330 __func__, port->port_name, port->targ_port, 4331 retval, (intmax_t)lun->lun); 4332 } 4333 } 4334 4335 mtx_unlock(&softc->ctl_lock); | 4342 free(lun->pr_keys[i], M_CTL); 4343 free(lun->write_buffer, M_CTL); 4344 if (lun->flags & CTL_LUN_MALLOCED) 4345 free(lun, M_CTL); 4346 4347 STAILQ_FOREACH(nlun, &softc->lun_list, links) { 4348 mtx_lock(&nlun->lun_lock); 4349 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); --- 72 unchanged lines hidden (view full) --- 4422 printf("%s: FETD %s port %d returned error " 4423 "%d for lun_enable on lun %jd\n", 4424 __func__, port->port_name, port->targ_port, 4425 retval, (intmax_t)lun->lun); 4426 } 4427 } 4428 4429 mtx_unlock(&softc->ctl_lock); |
4430 ctl_isc_announce_lun(lun); |
|
4336 4337 return (0); 4338} 4339 4340int 4341ctl_disable_lun(struct ctl_be_lun *be_lun) 4342{ 4343 struct ctl_softc *softc; --- 33 unchanged lines hidden (view full) --- 4377 printf("%s: FETD %s port %d returned error " 4378 "%d for lun_disable on lun %jd\n", 4379 __func__, port->port_name, port->targ_port, 4380 retval, (intmax_t)lun->lun); 4381 } 4382 } 4383 4384 mtx_unlock(&softc->ctl_lock); | 4431 4432 return (0); 4433} 4434 4435int 4436ctl_disable_lun(struct ctl_be_lun *be_lun) 4437{ 4438 struct ctl_softc *softc; --- 33 unchanged lines hidden (view full) --- 4472 printf("%s: FETD %s port %d returned error " 4473 "%d for lun_disable on lun %jd\n", 4474 __func__, port->port_name, port->targ_port, 4475 retval, (intmax_t)lun->lun); 4476 } 4477 } 4478 4479 mtx_unlock(&softc->ctl_lock); |
4480 ctl_isc_announce_lun(lun); |
|
4385 4386 return (0); 4387} 4388 4389int 4390ctl_start_lun(struct ctl_be_lun *be_lun) 4391{ 4392 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; --- 33 unchanged lines hidden (view full) --- 4426 4427 mtx_lock(&lun->lun_lock); 4428 lun->flags &= ~CTL_LUN_OFFLINE; 4429 mtx_unlock(&lun->lun_lock); 4430 return (0); 4431} 4432 4433int | 4481 4482 return (0); 4483} 4484 4485int 4486ctl_start_lun(struct ctl_be_lun *be_lun) 4487{ 4488 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; --- 33 unchanged lines hidden (view full) --- 4522 4523 mtx_lock(&lun->lun_lock); 4524 lun->flags &= ~CTL_LUN_OFFLINE; 4525 mtx_unlock(&lun->lun_lock); 4526 return (0); 4527} 4528 4529int |
4530ctl_lun_primary(struct ctl_be_lun *be_lun) 4531{ 4532 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4533 4534 mtx_lock(&lun->lun_lock); 4535 lun->flags |= CTL_LUN_PRIMARY_SC; 4536 mtx_unlock(&lun->lun_lock); 4537 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 4538 ctl_isc_announce_lun(lun); 4539 return (0); 4540} 4541 4542int 4543ctl_lun_secondary(struct ctl_be_lun *be_lun) 4544{ 4545 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4546 4547 mtx_lock(&lun->lun_lock); 4548 lun->flags &= ~CTL_LUN_PRIMARY_SC; 4549 mtx_unlock(&lun->lun_lock); 4550 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 4551 ctl_isc_announce_lun(lun); 4552 return (0); 4553} 4554 4555int |
|
4434ctl_invalidate_lun(struct ctl_be_lun *be_lun) 4435{ 4436 struct ctl_softc *softc; 4437 struct ctl_lun *lun; 4438 4439 lun = (struct ctl_lun *)be_lun->ctl_lun; 4440 softc = lun->ctl_softc; 4441 --- 48 unchanged lines hidden (view full) --- 4490 mtx_unlock(&lun->lun_lock); 4491 return (0); 4492} 4493 4494void 4495ctl_lun_capacity_changed(struct ctl_be_lun *be_lun) 4496{ 4497 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; | 4556ctl_invalidate_lun(struct ctl_be_lun *be_lun) 4557{ 4558 struct ctl_softc *softc; 4559 struct ctl_lun *lun; 4560 4561 lun = (struct ctl_lun *)be_lun->ctl_lun; 4562 softc = lun->ctl_softc; 4563 --- 48 unchanged lines hidden (view full) --- 4612 mtx_unlock(&lun->lun_lock); 4613 return (0); 4614} 4615 4616void 4617ctl_lun_capacity_changed(struct ctl_be_lun *be_lun) 4618{ 4619 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; |
4620 union ctl_ha_msg msg; |
|
4498 4499 mtx_lock(&lun->lun_lock); 4500 ctl_est_ua_all(lun, -1, CTL_UA_CAPACITY_CHANGED); 4501 mtx_unlock(&lun->lun_lock); | 4621 4622 mtx_lock(&lun->lun_lock); 4623 ctl_est_ua_all(lun, -1, CTL_UA_CAPACITY_CHANGED); 4624 mtx_unlock(&lun->lun_lock); |
4625 if (lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 4626 /* Send msg to other side. */ 4627 bzero(&msg.ua, sizeof(msg.ua)); 4628 msg.hdr.msg_type = CTL_MSG_UA; 4629 msg.hdr.nexus.initid = -1; 4630 msg.hdr.nexus.targ_port = -1; 4631 msg.hdr.nexus.targ_lun = lun->lun; 4632 msg.hdr.nexus.targ_mapped_lun = lun->lun; 4633 msg.ua.ua_all = 1; 4634 msg.ua.ua_set = 1; 4635 msg.ua.ua_type = CTL_UA_CAPACITY_CHANGED; 4636 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua), 4637 M_WAITOK); 4638 } |
|
4502} 4503 4504/* 4505 * Backend "memory move is complete" callback for requests that never 4506 * make it down to say RAIDCore's configuration code. 4507 */ 4508int 4509ctl_config_move_done(union ctl_io *io) --- 16 unchanged lines hidden (view full) --- 4526 * all the space we have in the sks field. 4527 */ 4528 ctl_set_internal_failure(&io->scsiio, 4529 /*sks_valid*/ 1, 4530 /*retry_count*/ 4531 io->io_hdr.port_status); 4532 } 4533 | 4639} 4640 4641/* 4642 * Backend "memory move is complete" callback for requests that never 4643 * make it down to say RAIDCore's configuration code. 4644 */ 4645int 4646ctl_config_move_done(union ctl_io *io) --- 16 unchanged lines hidden (view full) --- 4663 * all the space we have in the sks field. 4664 */ 4665 ctl_set_internal_failure(&io->scsiio, 4666 /*sks_valid*/ 1, 4667 /*retry_count*/ 4668 io->io_hdr.port_status); 4669 } 4670 |
4671 if (ctl_debug & CTL_DEBUG_CDB_DATA) 4672 ctl_data_print(io); |
|
4534 if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) || 4535 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 4536 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) || 4537 ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) { 4538 /* 4539 * XXX KDM just assuming a single pointer here, and not a 4540 * S/G list. If we start using S/G lists for config data, 4541 * we'll need to know how to clean them up here as well. --- 10 unchanged lines hidden (view full) --- 4552 * writes, because for those at least we know ahead of 4553 * time where the write will go and how long it is. For 4554 * config writes, though, that information is largely 4555 * contained within the write itself, thus we need to 4556 * parse out the data again. 4557 * 4558 * - Call some other function once the data is in? 4559 */ | 4673 if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) || 4674 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 4675 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) || 4676 ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) { 4677 /* 4678 * XXX KDM just assuming a single pointer here, and not a 4679 * S/G list. If we start using S/G lists for config data, 4680 * we'll need to know how to clean them up here as well. --- 10 unchanged lines hidden (view full) --- 4691 * writes, because for those at least we know ahead of 4692 * time where the write will go and how long it is. For 4693 * config writes, though, that information is largely 4694 * contained within the write itself, thus we need to 4695 * parse out the data again. 4696 * 4697 * - Call some other function once the data is in? 4698 */ |
4560 if (ctl_debug & CTL_DEBUG_CDB_DATA) 4561 ctl_data_print(io); | |
4562 4563 /* 4564 * XXX KDM call ctl_scsiio() again for now, and check flag 4565 * bits to see whether we're allocated or not. 4566 */ 4567 retval = ctl_scsiio(&io->scsiio); 4568 } 4569 return (retval); --- 106 unchanged lines hidden (view full) --- 4676 struct ctl_lun *lun; 4677 uint32_t residx; 4678 4679 length = 0; 4680 resv_id = 0; 4681 4682 CTL_DEBUG_PRINT(("ctl_scsi_release\n")); 4683 | 4699 4700 /* 4701 * XXX KDM call ctl_scsiio() again for now, and check flag 4702 * bits to see whether we're allocated or not. 4703 */ 4704 retval = ctl_scsiio(&io->scsiio); 4705 } 4706 return (retval); --- 106 unchanged lines hidden (view full) --- 4813 struct ctl_lun *lun; 4814 uint32_t residx; 4815 4816 length = 0; 4817 resv_id = 0; 4818 4819 CTL_DEBUG_PRINT(("ctl_scsi_release\n")); 4820 |
4684 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); | 4821 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); |
4685 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 4686 4687 switch (ctsio->cdb[0]) { 4688 case RELEASE_10: { 4689 struct scsi_release_10 *cdb; 4690 4691 cdb = (struct scsi_release_10 *)ctsio->cdb; 4692 --- 73 unchanged lines hidden (view full) --- 4766 thirdparty = 0; 4767 longid = 0; 4768 resv_id = 0; 4769 length = 0; 4770 thirdparty_id = 0; 4771 4772 CTL_DEBUG_PRINT(("ctl_reserve\n")); 4773 | 4822 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 4823 4824 switch (ctsio->cdb[0]) { 4825 case RELEASE_10: { 4826 struct scsi_release_10 *cdb; 4827 4828 cdb = (struct scsi_release_10 *)ctsio->cdb; 4829 --- 73 unchanged lines hidden (view full) --- 4903 thirdparty = 0; 4904 longid = 0; 4905 resv_id = 0; 4906 length = 0; 4907 thirdparty_id = 0; 4908 4909 CTL_DEBUG_PRINT(("ctl_reserve\n")); 4910 |
4774 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); | 4911 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); |
4775 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 4776 4777 switch (ctsio->cdb[0]) { 4778 case RESERVE_10: { 4779 struct scsi_reserve_10 *cdb; 4780 4781 cdb = (struct scsi_reserve_10 *)ctsio->cdb; 4782 --- 99 unchanged lines hidden (view full) --- 4882 ctl_done((union ctl_io *)ctsio); 4883 return (CTL_RETVAL_COMPLETE); 4884 } 4885 4886 if ((lun->flags & CTL_LUN_PR_RESERVED) 4887 && ((cdb->how & SSS_START)==0)) { 4888 uint32_t residx; 4889 | 4912 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 4913 4914 switch (ctsio->cdb[0]) { 4915 case RESERVE_10: { 4916 struct scsi_reserve_10 *cdb; 4917 4918 cdb = (struct scsi_reserve_10 *)ctsio->cdb; 4919 --- 99 unchanged lines hidden (view full) --- 5019 ctl_done((union ctl_io *)ctsio); 5020 return (CTL_RETVAL_COMPLETE); 5021 } 5022 5023 if ((lun->flags & CTL_LUN_PR_RESERVED) 5024 && ((cdb->how & SSS_START)==0)) { 5025 uint32_t residx; 5026 |
4890 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); | 5027 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); |
4891 if (ctl_get_prkey(lun, residx) == 0 4892 || (lun->pr_res_idx!=residx && lun->res_type < 4)) { 4893 4894 ctl_set_reservation_conflict(ctsio); 4895 ctl_done((union ctl_io *)ctsio); 4896 return (CTL_RETVAL_COMPLETE); 4897 } 4898 } --- 1949 unchanged lines hidden (view full) --- 6848 return (CTL_RETVAL_COMPLETE); 6849} 6850 6851int 6852ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio) 6853{ 6854 struct scsi_maintenance_in *cdb; 6855 int retval; | 5028 if (ctl_get_prkey(lun, residx) == 0 5029 || (lun->pr_res_idx!=residx && lun->res_type < 4)) { 5030 5031 ctl_set_reservation_conflict(ctsio); 5032 ctl_done((union ctl_io *)ctsio); 5033 return (CTL_RETVAL_COMPLETE); 5034 } 5035 } --- 1949 unchanged lines hidden (view full) --- 6985 return (CTL_RETVAL_COMPLETE); 6986} 6987 6988int 6989ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio) 6990{ 6991 struct scsi_maintenance_in *cdb; 6992 int retval; |
6856 int alloc_len, ext, total_len = 0, g, p, pc, pg, gs, os; | 6993 int alloc_len, ext, total_len = 0, g, pc, pg, gs, os; |
6857 int num_target_port_groups, num_target_ports; 6858 struct ctl_lun *lun; 6859 struct ctl_softc *softc; 6860 struct ctl_port *port; 6861 struct scsi_target_group_data *rtg_ptr; 6862 struct scsi_target_group_data_extended *rtg_ext_ptr; 6863 struct scsi_target_port_group_descriptor *tpg_desc; 6864 --- 39 unchanged lines hidden (view full) --- 6904 mtx_unlock(&softc->ctl_lock); 6905 6906 if (ext) 6907 total_len = sizeof(struct scsi_target_group_data_extended); 6908 else 6909 total_len = sizeof(struct scsi_target_group_data); 6910 total_len += sizeof(struct scsi_target_port_group_descriptor) * 6911 num_target_port_groups + | 6994 int num_target_port_groups, num_target_ports; 6995 struct ctl_lun *lun; 6996 struct ctl_softc *softc; 6997 struct ctl_port *port; 6998 struct scsi_target_group_data *rtg_ptr; 6999 struct scsi_target_group_data_extended *rtg_ext_ptr; 7000 struct scsi_target_port_group_descriptor *tpg_desc; 7001 --- 39 unchanged lines hidden (view full) --- 7041 mtx_unlock(&softc->ctl_lock); 7042 7043 if (ext) 7044 total_len = sizeof(struct scsi_target_group_data_extended); 7045 else 7046 total_len = sizeof(struct scsi_target_group_data); 7047 total_len += sizeof(struct scsi_target_port_group_descriptor) * 7048 num_target_port_groups + |
6912 sizeof(struct scsi_target_port_descriptor) * 6913 num_target_ports * num_target_port_groups; | 7049 sizeof(struct scsi_target_port_descriptor) * num_target_ports; |
6914 6915 alloc_len = scsi_4btoul(cdb->length); 6916 6917 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6918 6919 ctsio->kern_sg_entries = 0; 6920 6921 if (total_len < alloc_len) { --- 18 unchanged lines hidden (view full) --- 6940 } else { 6941 rtg_ptr = (struct scsi_target_group_data *) 6942 ctsio->kern_data_ptr; 6943 scsi_ulto4b(total_len - 4, rtg_ptr->length); 6944 tpg_desc = &rtg_ptr->groups[0]; 6945 } 6946 6947 mtx_lock(&softc->ctl_lock); | 7050 7051 alloc_len = scsi_4btoul(cdb->length); 7052 7053 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7054 7055 ctsio->kern_sg_entries = 0; 7056 7057 if (total_len < alloc_len) { --- 18 unchanged lines hidden (view full) --- 7076 } else { 7077 rtg_ptr = (struct scsi_target_group_data *) 7078 ctsio->kern_data_ptr; 7079 scsi_ulto4b(total_len - 4, rtg_ptr->length); 7080 tpg_desc = &rtg_ptr->groups[0]; 7081 } 7082 7083 mtx_lock(&softc->ctl_lock); |
6948 pg = softc->port_offset / CTL_MAX_PORTS; 6949 if (softc->flags & CTL_FLAG_ACTIVE_SHELF) { 6950 if (softc->ha_mode == CTL_HA_MODE_ACT_STBY) { 6951 gs = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 6952 os = TPG_ASYMMETRIC_ACCESS_STANDBY; 6953 } else if (lun->flags & CTL_LUN_PRIMARY_SC) { 6954 gs = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 6955 os = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 6956 } else { 6957 gs = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 6958 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 6959 } 6960 } else { | 7084 pg = softc->port_min / softc->port_cnt; 7085 if (softc->ha_link == CTL_HA_LINK_OFFLINE) 7086 gs = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE; 7087 else if (softc->ha_link == CTL_HA_LINK_UNKNOWN) 7088 gs = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; 7089 else if (softc->ha_mode == CTL_HA_MODE_ACT_STBY) |
6961 gs = TPG_ASYMMETRIC_ACCESS_STANDBY; | 7090 gs = TPG_ASYMMETRIC_ACCESS_STANDBY; |
7091 else 7092 gs = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 7093 if (lun->flags & CTL_LUN_PRIMARY_SC) { 7094 os = gs; 7095 gs = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7096 } else |
|
6962 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; | 7097 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; |
6963 } | |
6964 for (g = 0; g < num_target_port_groups; g++) { 6965 tpg_desc->pref_state = (g == pg) ? gs : os; | 7098 for (g = 0; g < num_target_port_groups; g++) { 7099 tpg_desc->pref_state = (g == pg) ? gs : os; |
6966 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP; | 7100 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP | 7101 TPG_U_SUP | TPG_T_SUP; |
6967 scsi_ulto2b(g + 1, tpg_desc->target_port_group); 6968 tpg_desc->status = TPG_IMPLICIT; 6969 pc = 0; 6970 STAILQ_FOREACH(port, &softc->port_list, links) { | 7102 scsi_ulto2b(g + 1, tpg_desc->target_port_group); 7103 tpg_desc->status = TPG_IMPLICIT; 7104 pc = 0; 7105 STAILQ_FOREACH(port, &softc->port_list, links) { |
7106 if (port->targ_port < g * softc->port_cnt || 7107 port->targ_port >= (g + 1) * softc->port_cnt) 7108 continue; |
|
6971 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 6972 continue; 6973 if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 6974 continue; | 7109 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7110 continue; 7111 if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 7112 continue; |
6975 p = port->targ_port % CTL_MAX_PORTS + g * CTL_MAX_PORTS; 6976 scsi_ulto2b(p, tpg_desc->descriptors[pc]. | 7113 scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc]. |
6977 relative_target_port_identifier); 6978 pc++; 6979 } 6980 tpg_desc->target_port_count = pc; 6981 tpg_desc = (struct scsi_target_port_group_descriptor *) 6982 &tpg_desc->descriptors[pc]; 6983 } 6984 mtx_unlock(&softc->ctl_lock); --- 352 unchanged lines hidden (view full) --- 7337 goto retry; 7338 } 7339 7340 scsi_ulto4b(lun->PRGeneration, res_keys->header.generation); 7341 7342 scsi_ulto4b(sizeof(struct scsi_per_res_key) * 7343 lun->pr_key_count, res_keys->header.length); 7344 | 7114 relative_target_port_identifier); 7115 pc++; 7116 } 7117 tpg_desc->target_port_count = pc; 7118 tpg_desc = (struct scsi_target_port_group_descriptor *) 7119 &tpg_desc->descriptors[pc]; 7120 } 7121 mtx_unlock(&softc->ctl_lock); --- 352 unchanged lines hidden (view full) --- 7474 goto retry; 7475 } 7476 7477 scsi_ulto4b(lun->PRGeneration, res_keys->header.generation); 7478 7479 scsi_ulto4b(sizeof(struct scsi_per_res_key) * 7480 lun->pr_key_count, res_keys->header.length); 7481 |
7345 for (i = 0, key_count = 0; i < 2*CTL_MAX_INITIATORS; i++) { | 7482 for (i = 0, key_count = 0; i < CTL_MAX_INITIATORS; i++) { |
7346 if ((key = ctl_get_prkey(lun, i)) == 0) 7347 continue; 7348 7349 /* 7350 * We used lun->pr_key_count to calculate the 7351 * size to allocate. If it turns out the number of 7352 * initiators with the registered flag set is 7353 * larger than that (i.e. they haven't been kept in --- 112 unchanged lines hidden (view full) --- 7466 printf("%s: reservation length changed, retrying\n", 7467 __func__); 7468 goto retry; 7469 } 7470 7471 scsi_ulto4b(lun->PRGeneration, res_status->header.generation); 7472 7473 res_desc = &res_status->desc[0]; | 7483 if ((key = ctl_get_prkey(lun, i)) == 0) 7484 continue; 7485 7486 /* 7487 * We used lun->pr_key_count to calculate the 7488 * size to allocate. If it turns out the number of 7489 * initiators with the registered flag set is 7490 * larger than that (i.e. they haven't been kept in --- 112 unchanged lines hidden (view full) --- 7603 printf("%s: reservation length changed, retrying\n", 7604 __func__); 7605 goto retry; 7606 } 7607 7608 scsi_ulto4b(lun->PRGeneration, res_status->header.generation); 7609 7610 res_desc = &res_status->desc[0]; |
7474 for (i = 0; i < 2*CTL_MAX_INITIATORS; i++) { | 7611 for (i = 0; i < CTL_MAX_INITIATORS; i++) { |
7475 if ((key = ctl_get_prkey(lun, i)) == 0) 7476 continue; 7477 7478 scsi_u64to8b(key, res_desc->res_key.key); 7479 if ((lun->flags & CTL_LUN_PR_RESERVED) && 7480 (lun->pr_res_idx == i || 7481 lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS)) { 7482 res_desc->flags = SPRI_FULL_R_HOLDER; 7483 res_desc->scopetype = lun->res_type; 7484 } 7485 scsi_ulto2b(i / CTL_MAX_INIT_PER_PORT, 7486 res_desc->rel_trgt_port_id); 7487 len = 0; | 7612 if ((key = ctl_get_prkey(lun, i)) == 0) 7613 continue; 7614 7615 scsi_u64to8b(key, res_desc->res_key.key); 7616 if ((lun->flags & CTL_LUN_PR_RESERVED) && 7617 (lun->pr_res_idx == i || 7618 lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS)) { 7619 res_desc->flags = SPRI_FULL_R_HOLDER; 7620 res_desc->scopetype = lun->res_type; 7621 } 7622 scsi_ulto2b(i / CTL_MAX_INIT_PER_PORT, 7623 res_desc->rel_trgt_port_id); 7624 len = 0; |
7488 port = softc->ctl_ports[ 7489 ctl_port_idx(i / CTL_MAX_INIT_PER_PORT)]; | 7625 port = softc->ctl_ports[i / CTL_MAX_INIT_PER_PORT]; |
7490 if (port != NULL) 7491 len = ctl_create_iid(port, 7492 i % CTL_MAX_INIT_PER_PORT, 7493 res_desc->transport_id); 7494 scsi_ulto4b(len, res_desc->additional_length); 7495 res_desc = (struct scsi_per_res_in_full_desc *) 7496 &res_desc->transport_id[len]; 7497 } --- 13 unchanged lines hidden (view full) --- 7511 7512 ctl_set_success(ctsio); 7513 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7514 ctsio->be_move_done = ctl_config_move_done; 7515 ctl_datamove((union ctl_io *)ctsio); 7516 return (CTL_RETVAL_COMPLETE); 7517} 7518 | 7626 if (port != NULL) 7627 len = ctl_create_iid(port, 7628 i % CTL_MAX_INIT_PER_PORT, 7629 res_desc->transport_id); 7630 scsi_ulto4b(len, res_desc->additional_length); 7631 res_desc = (struct scsi_per_res_in_full_desc *) 7632 &res_desc->transport_id[len]; 7633 } --- 13 unchanged lines hidden (view full) --- 7647 7648 ctl_set_success(ctsio); 7649 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7650 ctsio->be_move_done = ctl_config_move_done; 7651 ctl_datamove((union ctl_io *)ctsio); 7652 return (CTL_RETVAL_COMPLETE); 7653} 7654 |
7519static void 7520ctl_est_res_ua(struct ctl_lun *lun, uint32_t residx, ctl_ua_type ua) 7521{ 7522 int off = lun->ctl_softc->persis_offset; 7523 7524 if (residx >= off && residx < off + CTL_MAX_INITIATORS) 7525 ctl_est_ua(lun, residx - off, ua); 7526} 7527 | |
7528/* 7529 * Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if 7530 * it should return. 7531 */ 7532static int 7533ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key, 7534 uint64_t sa_res_key, uint8_t type, uint32_t residx, 7535 struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb, 7536 struct scsi_per_res_out_parms* param) 7537{ 7538 union ctl_ha_msg persis_io; | 7655/* 7656 * Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if 7657 * it should return. 7658 */ 7659static int 7660ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key, 7661 uint64_t sa_res_key, uint8_t type, uint32_t residx, 7662 struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb, 7663 struct scsi_per_res_out_parms* param) 7664{ 7665 union ctl_ha_msg persis_io; |
7539 int retval, i; 7540 int isc_retval; | 7666 int i; |
7541 | 7667 |
7542 retval = 0; 7543 | |
7544 mtx_lock(&lun->lun_lock); 7545 if (sa_res_key == 0) { 7546 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 7547 /* validate scope and type */ 7548 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7549 SPR_LU_SCOPE) { 7550 mtx_unlock(&lun->lun_lock); 7551 ctl_set_invalid_field(/*ctsio*/ ctsio, --- 17 unchanged lines hidden (view full) --- 7569 ctl_done((union ctl_io *)ctsio); 7570 return (1); 7571 } 7572 7573 /* 7574 * Unregister everybody else and build UA for 7575 * them 7576 */ | 7668 mtx_lock(&lun->lun_lock); 7669 if (sa_res_key == 0) { 7670 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 7671 /* validate scope and type */ 7672 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7673 SPR_LU_SCOPE) { 7674 mtx_unlock(&lun->lun_lock); 7675 ctl_set_invalid_field(/*ctsio*/ ctsio, --- 17 unchanged lines hidden (view full) --- 7693 ctl_done((union ctl_io *)ctsio); 7694 return (1); 7695 } 7696 7697 /* 7698 * Unregister everybody else and build UA for 7699 * them 7700 */ |
7577 for(i=0; i < 2*CTL_MAX_INITIATORS; i++) { | 7701 for(i = 0; i < CTL_MAX_INITIATORS; i++) { |
7578 if (i == residx || ctl_get_prkey(lun, i) == 0) 7579 continue; 7580 7581 ctl_clr_prkey(lun, i); | 7702 if (i == residx || ctl_get_prkey(lun, i) == 0) 7703 continue; 7704 7705 ctl_clr_prkey(lun, i); |
7582 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); | 7706 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); |
7583 } 7584 lun->pr_key_count = 1; 7585 lun->res_type = type; 7586 if (lun->res_type != SPR_TYPE_WR_EX_AR 7587 && lun->res_type != SPR_TYPE_EX_AC_AR) 7588 lun->pr_res_idx = residx; | 7707 } 7708 lun->pr_key_count = 1; 7709 lun->res_type = type; 7710 if (lun->res_type != SPR_TYPE_WR_EX_AR 7711 && lun->res_type != SPR_TYPE_EX_AC_AR) 7712 lun->pr_res_idx = residx; |
7713 lun->PRGeneration++; 7714 mtx_unlock(&lun->lun_lock); |
|
7589 7590 /* send msg to other side */ 7591 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7592 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7593 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7594 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7595 persis_io.pr.pr_info.res_type = type; 7596 memcpy(persis_io.pr.pr_info.sa_res_key, 7597 param->serv_act_res_key, 7598 sizeof(param->serv_act_res_key)); | 7715 7716 /* send msg to other side */ 7717 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7718 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7719 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7720 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7721 persis_io.pr.pr_info.res_type = type; 7722 memcpy(persis_io.pr.pr_info.sa_res_key, 7723 param->serv_act_res_key, 7724 sizeof(param->serv_act_res_key)); |
7599 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 7600 &persis_io, sizeof(persis_io), 0)) > 7601 CTL_HA_STATUS_SUCCESS) { 7602 printf("CTL:Persis Out error returned " 7603 "from ctl_ha_msg_send %d\n", 7604 isc_retval); 7605 } | 7725 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 7726 sizeof(persis_io.pr), M_WAITOK); |
7606 } else { 7607 /* not all registrants */ 7608 mtx_unlock(&lun->lun_lock); 7609 free(ctsio->kern_data_ptr, M_CTL); 7610 ctl_set_invalid_field(ctsio, 7611 /*sks_valid*/ 1, 7612 /*command*/ 0, 7613 /*field*/ 8, --- 24 unchanged lines hidden (view full) --- 7638 /*command*/ 0, 7639 /*field*/ 8, 7640 /*bit_valid*/ 0, 7641 /*bit*/ 0); 7642 ctl_done((union ctl_io *)ctsio); 7643 return (1); 7644 } 7645 | 7727 } else { 7728 /* not all registrants */ 7729 mtx_unlock(&lun->lun_lock); 7730 free(ctsio->kern_data_ptr, M_CTL); 7731 ctl_set_invalid_field(ctsio, 7732 /*sks_valid*/ 1, 7733 /*command*/ 0, 7734 /*field*/ 8, --- 24 unchanged lines hidden (view full) --- 7759 /*command*/ 0, 7760 /*field*/ 8, 7761 /*bit_valid*/ 0, 7762 /*bit*/ 0); 7763 ctl_done((union ctl_io *)ctsio); 7764 return (1); 7765 } 7766 |
7646 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { | 7767 for (i = 0; i < CTL_MAX_INITIATORS; i++) { |
7647 if (ctl_get_prkey(lun, i) != sa_res_key) 7648 continue; 7649 7650 found = 1; 7651 ctl_clr_prkey(lun, i); 7652 lun->pr_key_count--; | 7768 if (ctl_get_prkey(lun, i) != sa_res_key) 7769 continue; 7770 7771 found = 1; 7772 ctl_clr_prkey(lun, i); 7773 lun->pr_key_count--; |
7653 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); | 7774 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); |
7654 } 7655 if (!found) { 7656 mtx_unlock(&lun->lun_lock); 7657 free(ctsio->kern_data_ptr, M_CTL); 7658 ctl_set_reservation_conflict(ctsio); 7659 ctl_done((union ctl_io *)ctsio); 7660 return (CTL_RETVAL_COMPLETE); 7661 } | 7775 } 7776 if (!found) { 7777 mtx_unlock(&lun->lun_lock); 7778 free(ctsio->kern_data_ptr, M_CTL); 7779 ctl_set_reservation_conflict(ctsio); 7780 ctl_done((union ctl_io *)ctsio); 7781 return (CTL_RETVAL_COMPLETE); 7782 } |
7783 lun->PRGeneration++; 7784 mtx_unlock(&lun->lun_lock); 7785 |
|
7662 /* send msg to other side */ 7663 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7664 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7665 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7666 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7667 persis_io.pr.pr_info.res_type = type; 7668 memcpy(persis_io.pr.pr_info.sa_res_key, 7669 param->serv_act_res_key, 7670 sizeof(param->serv_act_res_key)); | 7786 /* send msg to other side */ 7787 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7788 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7789 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7790 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7791 persis_io.pr.pr_info.res_type = type; 7792 memcpy(persis_io.pr.pr_info.sa_res_key, 7793 param->serv_act_res_key, 7794 sizeof(param->serv_act_res_key)); |
7671 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 7672 &persis_io, sizeof(persis_io), 0)) > 7673 CTL_HA_STATUS_SUCCESS) { 7674 printf("CTL:Persis Out error returned from " 7675 "ctl_ha_msg_send %d\n", isc_retval); 7676 } | 7795 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 7796 sizeof(persis_io.pr), M_WAITOK); |
7677 } else { 7678 /* Reserved but not all registrants */ 7679 /* sa_res_key is res holder */ 7680 if (sa_res_key == ctl_get_prkey(lun, lun->pr_res_idx)) { 7681 /* validate scope and type */ 7682 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7683 SPR_LU_SCOPE) { 7684 mtx_unlock(&lun->lun_lock); --- 28 unchanged lines hidden (view full) --- 7713 * reservation generate UA(Reservations 7714 * Preempted) for all other registered nexuses 7715 * if the type has changed. Establish the new 7716 * reservation and holder. If res_key and 7717 * sa_res_key are the same do the above 7718 * except don't unregister the res holder. 7719 */ 7720 | 7797 } else { 7798 /* Reserved but not all registrants */ 7799 /* sa_res_key is res holder */ 7800 if (sa_res_key == ctl_get_prkey(lun, lun->pr_res_idx)) { 7801 /* validate scope and type */ 7802 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7803 SPR_LU_SCOPE) { 7804 mtx_unlock(&lun->lun_lock); --- 28 unchanged lines hidden (view full) --- 7833 * reservation generate UA(Reservations 7834 * Preempted) for all other registered nexuses 7835 * if the type has changed. Establish the new 7836 * reservation and holder. If res_key and 7837 * sa_res_key are the same do the above 7838 * except don't unregister the res holder. 7839 */ 7840 |
7721 for(i=0; i < 2*CTL_MAX_INITIATORS; i++) { | 7841 for(i = 0; i < CTL_MAX_INITIATORS; i++) { |
7722 if (i == residx || ctl_get_prkey(lun, i) == 0) 7723 continue; 7724 7725 if (sa_res_key == ctl_get_prkey(lun, i)) { 7726 ctl_clr_prkey(lun, i); 7727 lun->pr_key_count--; | 7842 if (i == residx || ctl_get_prkey(lun, i) == 0) 7843 continue; 7844 7845 if (sa_res_key == ctl_get_prkey(lun, i)) { 7846 ctl_clr_prkey(lun, i); 7847 lun->pr_key_count--; |
7728 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); | 7848 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); |
7729 } else if (type != lun->res_type 7730 && (lun->res_type == SPR_TYPE_WR_EX_RO 7731 || lun->res_type ==SPR_TYPE_EX_AC_RO)){ | 7849 } else if (type != lun->res_type 7850 && (lun->res_type == SPR_TYPE_WR_EX_RO 7851 || lun->res_type ==SPR_TYPE_EX_AC_RO)){ |
7732 ctl_est_res_ua(lun, i, CTL_UA_RES_RELEASE); | 7852 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); |
7733 } 7734 } 7735 lun->res_type = type; 7736 if (lun->res_type != SPR_TYPE_WR_EX_AR 7737 && lun->res_type != SPR_TYPE_EX_AC_AR) 7738 lun->pr_res_idx = residx; 7739 else 7740 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; | 7853 } 7854 } 7855 lun->res_type = type; 7856 if (lun->res_type != SPR_TYPE_WR_EX_AR 7857 && lun->res_type != SPR_TYPE_EX_AC_AR) 7858 lun->pr_res_idx = residx; 7859 else 7860 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; |
7861 lun->PRGeneration++; 7862 mtx_unlock(&lun->lun_lock); |
|
7741 7742 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7743 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7744 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7745 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7746 persis_io.pr.pr_info.res_type = type; 7747 memcpy(persis_io.pr.pr_info.sa_res_key, 7748 param->serv_act_res_key, 7749 sizeof(param->serv_act_res_key)); | 7863 7864 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7865 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7866 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7867 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7868 persis_io.pr.pr_info.res_type = type; 7869 memcpy(persis_io.pr.pr_info.sa_res_key, 7870 param->serv_act_res_key, 7871 sizeof(param->serv_act_res_key)); |
7750 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 7751 &persis_io, sizeof(persis_io), 0)) > 7752 CTL_HA_STATUS_SUCCESS) { 7753 printf("CTL:Persis Out error returned " 7754 "from ctl_ha_msg_send %d\n", 7755 isc_retval); 7756 } | 7872 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 7873 sizeof(persis_io.pr), M_WAITOK); |
7757 } else { 7758 /* 7759 * sa_res_key is not the res holder just 7760 * remove registrants 7761 */ 7762 int found=0; 7763 | 7874 } else { 7875 /* 7876 * sa_res_key is not the res holder just 7877 * remove registrants 7878 */ 7879 int found=0; 7880 |
7764 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { | 7881 for (i = 0; i < CTL_MAX_INITIATORS; i++) { |
7765 if (sa_res_key != ctl_get_prkey(lun, i)) 7766 continue; 7767 7768 found = 1; 7769 ctl_clr_prkey(lun, i); 7770 lun->pr_key_count--; | 7882 if (sa_res_key != ctl_get_prkey(lun, i)) 7883 continue; 7884 7885 found = 1; 7886 ctl_clr_prkey(lun, i); 7887 lun->pr_key_count--; |
7771 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); | 7888 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); |
7772 } 7773 7774 if (!found) { 7775 mtx_unlock(&lun->lun_lock); 7776 free(ctsio->kern_data_ptr, M_CTL); 7777 ctl_set_reservation_conflict(ctsio); 7778 ctl_done((union ctl_io *)ctsio); 7779 return (1); 7780 } | 7889 } 7890 7891 if (!found) { 7892 mtx_unlock(&lun->lun_lock); 7893 free(ctsio->kern_data_ptr, M_CTL); 7894 ctl_set_reservation_conflict(ctsio); 7895 ctl_done((union ctl_io *)ctsio); 7896 return (1); 7897 } |
7898 lun->PRGeneration++; 7899 mtx_unlock(&lun->lun_lock); 7900 |
|
7781 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7782 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7783 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7784 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7785 persis_io.pr.pr_info.res_type = type; 7786 memcpy(persis_io.pr.pr_info.sa_res_key, 7787 param->serv_act_res_key, 7788 sizeof(param->serv_act_res_key)); | 7901 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7902 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7903 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7904 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7905 persis_io.pr.pr_info.res_type = type; 7906 memcpy(persis_io.pr.pr_info.sa_res_key, 7907 param->serv_act_res_key, 7908 sizeof(param->serv_act_res_key)); |
7789 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 7790 &persis_io, sizeof(persis_io), 0)) > 7791 CTL_HA_STATUS_SUCCESS) { 7792 printf("CTL:Persis Out error returned " 7793 "from ctl_ha_msg_send %d\n", 7794 isc_retval); 7795 } | 7909 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 7910 sizeof(persis_io.pr), M_WAITOK); |
7796 } 7797 } | 7911 } 7912 } |
7798 7799 lun->PRGeneration++; 7800 mtx_unlock(&lun->lun_lock); 7801 7802 return (retval); | 7913 return (0); |
7803} 7804 7805static void 7806ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg) 7807{ 7808 uint64_t sa_res_key; 7809 int i; 7810 7811 sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key); 7812 7813 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 7814 || lun->pr_res_idx == CTL_PR_NO_RESERVATION 7815 || sa_res_key != ctl_get_prkey(lun, lun->pr_res_idx)) { 7816 if (sa_res_key == 0) { 7817 /* 7818 * Unregister everybody else and build UA for 7819 * them 7820 */ | 7914} 7915 7916static void 7917ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg) 7918{ 7919 uint64_t sa_res_key; 7920 int i; 7921 7922 sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key); 7923 7924 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 7925 || lun->pr_res_idx == CTL_PR_NO_RESERVATION 7926 || sa_res_key != ctl_get_prkey(lun, lun->pr_res_idx)) { 7927 if (sa_res_key == 0) { 7928 /* 7929 * Unregister everybody else and build UA for 7930 * them 7931 */ |
7821 for(i=0; i < 2*CTL_MAX_INITIATORS; i++) { | 7932 for(i = 0; i < CTL_MAX_INITIATORS; i++) { |
7822 if (i == msg->pr.pr_info.residx || 7823 ctl_get_prkey(lun, i) == 0) 7824 continue; 7825 7826 ctl_clr_prkey(lun, i); | 7933 if (i == msg->pr.pr_info.residx || 7934 ctl_get_prkey(lun, i) == 0) 7935 continue; 7936 7937 ctl_clr_prkey(lun, i); |
7827 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); | 7938 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); |
7828 } 7829 7830 lun->pr_key_count = 1; 7831 lun->res_type = msg->pr.pr_info.res_type; 7832 if (lun->res_type != SPR_TYPE_WR_EX_AR 7833 && lun->res_type != SPR_TYPE_EX_AC_AR) 7834 lun->pr_res_idx = msg->pr.pr_info.residx; 7835 } else { | 7939 } 7940 7941 lun->pr_key_count = 1; 7942 lun->res_type = msg->pr.pr_info.res_type; 7943 if (lun->res_type != SPR_TYPE_WR_EX_AR 7944 && lun->res_type != SPR_TYPE_EX_AC_AR) 7945 lun->pr_res_idx = msg->pr.pr_info.residx; 7946 } else { |
7836 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { | 7947 for (i = 0; i < CTL_MAX_INITIATORS; i++) { |
7837 if (sa_res_key == ctl_get_prkey(lun, i)) 7838 continue; 7839 7840 ctl_clr_prkey(lun, i); 7841 lun->pr_key_count--; | 7948 if (sa_res_key == ctl_get_prkey(lun, i)) 7949 continue; 7950 7951 ctl_clr_prkey(lun, i); 7952 lun->pr_key_count--; |
7842 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); | 7953 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); |
7843 } 7844 } 7845 } else { | 7954 } 7955 } 7956 } else { |
7846 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { | 7957 for (i = 0; i < CTL_MAX_INITIATORS; i++) { |
7847 if (i == msg->pr.pr_info.residx || 7848 ctl_get_prkey(lun, i) == 0) 7849 continue; 7850 7851 if (sa_res_key == ctl_get_prkey(lun, i)) { 7852 ctl_clr_prkey(lun, i); 7853 lun->pr_key_count--; | 7958 if (i == msg->pr.pr_info.residx || 7959 ctl_get_prkey(lun, i) == 0) 7960 continue; 7961 7962 if (sa_res_key == ctl_get_prkey(lun, i)) { 7963 ctl_clr_prkey(lun, i); 7964 lun->pr_key_count--; |
7854 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); | 7965 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); |
7855 } else if (msg->pr.pr_info.res_type != lun->res_type 7856 && (lun->res_type == SPR_TYPE_WR_EX_RO 7857 || lun->res_type == SPR_TYPE_EX_AC_RO)) { | 7966 } else if (msg->pr.pr_info.res_type != lun->res_type 7967 && (lun->res_type == SPR_TYPE_WR_EX_RO 7968 || lun->res_type == SPR_TYPE_EX_AC_RO)) { |
7858 ctl_est_res_ua(lun, i, CTL_UA_RES_RELEASE); | 7969 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); |
7859 } 7860 } 7861 lun->res_type = msg->pr.pr_info.res_type; 7862 if (lun->res_type != SPR_TYPE_WR_EX_AR 7863 && lun->res_type != SPR_TYPE_EX_AC_AR) 7864 lun->pr_res_idx = msg->pr.pr_info.residx; 7865 else 7866 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 7867 } 7868 lun->PRGeneration++; 7869 7870} 7871 7872 7873int 7874ctl_persistent_reserve_out(struct ctl_scsiio *ctsio) 7875{ 7876 int retval; | 7970 } 7971 } 7972 lun->res_type = msg->pr.pr_info.res_type; 7973 if (lun->res_type != SPR_TYPE_WR_EX_AR 7974 && lun->res_type != SPR_TYPE_EX_AC_AR) 7975 lun->pr_res_idx = msg->pr.pr_info.residx; 7976 else 7977 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 7978 } 7979 lun->PRGeneration++; 7980 7981} 7982 7983 7984int 7985ctl_persistent_reserve_out(struct ctl_scsiio *ctsio) 7986{ 7987 int retval; |
7877 int isc_retval; | |
7878 u_int32_t param_len; 7879 struct scsi_per_res_out *cdb; 7880 struct ctl_lun *lun; 7881 struct scsi_per_res_out_parms* param; 7882 struct ctl_softc *softc; 7883 uint32_t residx; 7884 uint64_t res_key, sa_res_key, key; 7885 uint8_t type; --- 53 unchanged lines hidden (view full) --- 7939 ctsio->be_move_done = ctl_config_move_done; 7940 ctl_datamove((union ctl_io *)ctsio); 7941 7942 return (CTL_RETVAL_COMPLETE); 7943 } 7944 7945 param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr; 7946 | 7988 u_int32_t param_len; 7989 struct scsi_per_res_out *cdb; 7990 struct ctl_lun *lun; 7991 struct scsi_per_res_out_parms* param; 7992 struct ctl_softc *softc; 7993 uint32_t residx; 7994 uint64_t res_key, sa_res_key, key; 7995 uint8_t type; --- 53 unchanged lines hidden (view full) --- 8049 ctsio->be_move_done = ctl_config_move_done; 8050 ctl_datamove((union ctl_io *)ctsio); 8051 8052 return (CTL_RETVAL_COMPLETE); 8053 } 8054 8055 param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr; 8056 |
7947 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); | 8057 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); |
7948 res_key = scsi_8btou64(param->res_key.key); 7949 sa_res_key = scsi_8btou64(param->serv_act_res_key); 7950 7951 /* 7952 * Validate the reservation key here except for SPRO_REG_IGNO 7953 * This must be done for all other service actions 7954 */ 7955 if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) { --- 98 unchanged lines hidden (view full) --- 8054 /* 8055 * If the reservation is a registrants 8056 * only type we need to generate a UA 8057 * for other registered inits. The 8058 * sense code should be RESERVATIONS 8059 * RELEASED 8060 */ 8061 | 8058 res_key = scsi_8btou64(param->res_key.key); 8059 sa_res_key = scsi_8btou64(param->serv_act_res_key); 8060 8061 /* 8062 * Validate the reservation key here except for SPRO_REG_IGNO 8063 * This must be done for all other service actions 8064 */ 8065 if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) { --- 98 unchanged lines hidden (view full) --- 8164 /* 8165 * If the reservation is a registrants 8166 * only type we need to generate a UA 8167 * for other registered inits. The 8168 * sense code should be RESERVATIONS 8169 * RELEASED 8170 */ 8171 |
8062 for (i = 0; i < CTL_MAX_INITIATORS;i++){ 8063 if (ctl_get_prkey(lun, i + 8064 softc->persis_offset) == 0) | 8172 for (i = softc->init_min; i < softc->init_max; i++){ 8173 if (ctl_get_prkey(lun, i) == 0) |
8065 continue; 8066 ctl_est_ua(lun, i, 8067 CTL_UA_RES_RELEASE); 8068 } 8069 } 8070 lun->res_type = 0; 8071 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8072 if (lun->pr_key_count==0) { 8073 lun->flags &= ~CTL_LUN_PR_RESERVED; 8074 lun->res_type = 0; 8075 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8076 } 8077 } | 8174 continue; 8175 ctl_est_ua(lun, i, 8176 CTL_UA_RES_RELEASE); 8177 } 8178 } 8179 lun->res_type = 0; 8180 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8181 if (lun->pr_key_count==0) { 8182 lun->flags &= ~CTL_LUN_PR_RESERVED; 8183 lun->res_type = 0; 8184 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8185 } 8186 } |
8187 lun->PRGeneration++; 8188 mtx_unlock(&lun->lun_lock); 8189 |
|
8078 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8079 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8080 persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY; 8081 persis_io.pr.pr_info.residx = residx; | 8190 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8191 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8192 persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY; 8193 persis_io.pr.pr_info.residx = residx; |
8082 if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8083 &persis_io, sizeof(persis_io), 0 )) > 8084 CTL_HA_STATUS_SUCCESS) { 8085 printf("CTL:Persis Out error returned from " 8086 "ctl_ha_msg_send %d\n", isc_retval); 8087 } | 8194 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8195 sizeof(persis_io.pr), M_WAITOK); |
8088 } else /* sa_res_key != 0 */ { 8089 8090 /* 8091 * If we aren't registered currently then increment 8092 * the key count and set the registered flag. 8093 */ 8094 ctl_alloc_prkey(lun, residx); 8095 if (ctl_get_prkey(lun, residx) == 0) 8096 lun->pr_key_count++; 8097 ctl_set_prkey(lun, residx, sa_res_key); | 8196 } else /* sa_res_key != 0 */ { 8197 8198 /* 8199 * If we aren't registered currently then increment 8200 * the key count and set the registered flag. 8201 */ 8202 ctl_alloc_prkey(lun, residx); 8203 if (ctl_get_prkey(lun, residx) == 0) 8204 lun->pr_key_count++; 8205 ctl_set_prkey(lun, residx, sa_res_key); |
8206 lun->PRGeneration++; 8207 mtx_unlock(&lun->lun_lock); |
|
8098 8099 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8100 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8101 persis_io.pr.pr_info.action = CTL_PR_REG_KEY; 8102 persis_io.pr.pr_info.residx = residx; 8103 memcpy(persis_io.pr.pr_info.sa_res_key, 8104 param->serv_act_res_key, 8105 sizeof(param->serv_act_res_key)); | 8208 8209 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8210 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8211 persis_io.pr.pr_info.action = CTL_PR_REG_KEY; 8212 persis_io.pr.pr_info.residx = residx; 8213 memcpy(persis_io.pr.pr_info.sa_res_key, 8214 param->serv_act_res_key, 8215 sizeof(param->serv_act_res_key)); |
8106 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8107 &persis_io, sizeof(persis_io), 0)) > 8108 CTL_HA_STATUS_SUCCESS) { 8109 printf("CTL:Persis Out error returned from " 8110 "ctl_ha_msg_send %d\n", isc_retval); 8111 } | 8216 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8217 sizeof(persis_io.pr), M_WAITOK); |
8112 } | 8218 } |
8113 lun->PRGeneration++; 8114 mtx_unlock(&lun->lun_lock); | |
8115 8116 break; 8117 } 8118 case SPRO_RESERVE: 8119#if 0 8120 printf("Reserve executed type %d\n", type); 8121#endif 8122 mtx_lock(&lun->lun_lock); --- 30 unchanged lines hidden (view full) --- 8153 mtx_unlock(&lun->lun_lock); 8154 8155 /* send msg to other side */ 8156 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8157 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8158 persis_io.pr.pr_info.action = CTL_PR_RESERVE; 8159 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8160 persis_io.pr.pr_info.res_type = type; | 8219 8220 break; 8221 } 8222 case SPRO_RESERVE: 8223#if 0 8224 printf("Reserve executed type %d\n", type); 8225#endif 8226 mtx_lock(&lun->lun_lock); --- 30 unchanged lines hidden (view full) --- 8257 mtx_unlock(&lun->lun_lock); 8258 8259 /* send msg to other side */ 8260 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8261 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8262 persis_io.pr.pr_info.action = CTL_PR_RESERVE; 8263 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8264 persis_io.pr.pr_info.res_type = type; |
8161 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8162 &persis_io, sizeof(persis_io), 0)) > 8163 CTL_HA_STATUS_SUCCESS) { 8164 printf("CTL:Persis Out error returned from " 8165 "ctl_ha_msg_send %d\n", isc_retval); 8166 } | 8265 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8266 sizeof(persis_io.pr), M_WAITOK); |
8167 } 8168 break; 8169 8170 case SPRO_RELEASE: 8171 mtx_lock(&lun->lun_lock); 8172 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) { 8173 /* No reservation exists return good status */ 8174 mtx_unlock(&lun->lun_lock); --- 27 unchanged lines hidden (view full) --- 8202 8203 /* 8204 * if this isn't an exclusive access 8205 * res generate UA for all other 8206 * registrants. 8207 */ 8208 if (type != SPR_TYPE_EX_AC 8209 && type != SPR_TYPE_WR_EX) { | 8267 } 8268 break; 8269 8270 case SPRO_RELEASE: 8271 mtx_lock(&lun->lun_lock); 8272 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) { 8273 /* No reservation exists return good status */ 8274 mtx_unlock(&lun->lun_lock); --- 27 unchanged lines hidden (view full) --- 8302 8303 /* 8304 * if this isn't an exclusive access 8305 * res generate UA for all other 8306 * registrants. 8307 */ 8308 if (type != SPR_TYPE_EX_AC 8309 && type != SPR_TYPE_WR_EX) { |
8210 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8211 if (i == residx || 8212 ctl_get_prkey(lun, 8213 i + softc->persis_offset) == 0) | 8310 for (i = softc->init_min; i < softc->init_max; i++) { 8311 if (i == residx || ctl_get_prkey(lun, i) == 0) |
8214 continue; 8215 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8216 } 8217 } 8218 mtx_unlock(&lun->lun_lock); | 8312 continue; 8313 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8314 } 8315 } 8316 mtx_unlock(&lun->lun_lock); |
8317 |
|
8219 /* Send msg to other side */ 8220 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8221 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8222 persis_io.pr.pr_info.action = CTL_PR_RELEASE; | 8318 /* Send msg to other side */ 8319 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8320 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8321 persis_io.pr.pr_info.action = CTL_PR_RELEASE; |
8223 if ((isc_retval=ctl_ha_msg_send( CTL_HA_CHAN_CTL, &persis_io, 8224 sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) { 8225 printf("CTL:Persis Out error returned from " 8226 "ctl_ha_msg_send %d\n", isc_retval); 8227 } | 8322 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8323 sizeof(persis_io.pr), M_WAITOK); |
8228 break; 8229 8230 case SPRO_CLEAR: 8231 /* send msg to other side */ 8232 8233 mtx_lock(&lun->lun_lock); 8234 lun->flags &= ~CTL_LUN_PR_RESERVED; 8235 lun->res_type = 0; 8236 lun->pr_key_count = 0; 8237 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8238 8239 ctl_clr_prkey(lun, residx); | 8324 break; 8325 8326 case SPRO_CLEAR: 8327 /* send msg to other side */ 8328 8329 mtx_lock(&lun->lun_lock); 8330 lun->flags &= ~CTL_LUN_PR_RESERVED; 8331 lun->res_type = 0; 8332 lun->pr_key_count = 0; 8333 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8334 8335 ctl_clr_prkey(lun, residx); |
8240 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) | 8336 for (i = 0; i < CTL_MAX_INITIATORS; i++) |
8241 if (ctl_get_prkey(lun, i) != 0) { 8242 ctl_clr_prkey(lun, i); | 8337 if (ctl_get_prkey(lun, i) != 0) { 8338 ctl_clr_prkey(lun, i); |
8243 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); | 8339 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); |
8244 } 8245 lun->PRGeneration++; 8246 mtx_unlock(&lun->lun_lock); | 8340 } 8341 lun->PRGeneration++; 8342 mtx_unlock(&lun->lun_lock); |
8343 |
|
8247 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8248 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8249 persis_io.pr.pr_info.action = CTL_PR_CLEAR; | 8344 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8345 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8346 persis_io.pr.pr_info.action = CTL_PR_CLEAR; |
8250 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8251 sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) { 8252 printf("CTL:Persis Out error returned from " 8253 "ctl_ha_msg_send %d\n", isc_retval); 8254 } | 8347 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8348 sizeof(persis_io.pr), M_WAITOK); |
8255 break; 8256 8257 case SPRO_PREEMPT: 8258 case SPRO_PRE_ABO: { 8259 int nretval; 8260 8261 nretval = ctl_pro_preempt(softc, lun, res_key, sa_res_key, type, 8262 residx, ctsio, cdb, param); --- 58 unchanged lines hidden (view full) --- 8321 /* 8322 * If the reservation is a registrants 8323 * only type we need to generate a UA 8324 * for other registered inits. The 8325 * sense code should be RESERVATIONS 8326 * RELEASED 8327 */ 8328 | 8349 break; 8350 8351 case SPRO_PREEMPT: 8352 case SPRO_PRE_ABO: { 8353 int nretval; 8354 8355 nretval = ctl_pro_preempt(softc, lun, res_key, sa_res_key, type, 8356 residx, ctsio, cdb, param); --- 58 unchanged lines hidden (view full) --- 8415 /* 8416 * If the reservation is a registrants 8417 * only type we need to generate a UA 8418 * for other registered inits. The 8419 * sense code should be RESERVATIONS 8420 * RELEASED 8421 */ 8422 |
8329 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8330 if (ctl_get_prkey(lun, i + 8331 softc->persis_offset) == 0) | 8423 for (i = softc->init_min; i < softc->init_max; i++) { 8424 if (ctl_get_prkey(lun, i) == 0) |
8332 continue; 8333 8334 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8335 } 8336 } 8337 lun->res_type = 0; 8338 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8339 if (lun->pr_key_count==0) { --- 14 unchanged lines hidden (view full) --- 8354 8355 case CTL_PR_RELEASE: 8356 /* 8357 * if this isn't an exclusive access res generate UA for all 8358 * other registrants. 8359 */ 8360 if (lun->res_type != SPR_TYPE_EX_AC 8361 && lun->res_type != SPR_TYPE_WR_EX) { | 8425 continue; 8426 8427 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8428 } 8429 } 8430 lun->res_type = 0; 8431 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8432 if (lun->pr_key_count==0) { --- 14 unchanged lines hidden (view full) --- 8447 8448 case CTL_PR_RELEASE: 8449 /* 8450 * if this isn't an exclusive access res generate UA for all 8451 * other registrants. 8452 */ 8453 if (lun->res_type != SPR_TYPE_EX_AC 8454 && lun->res_type != SPR_TYPE_WR_EX) { |
8362 for (i = 0; i < CTL_MAX_INITIATORS; i++) 8363 if (ctl_get_prkey(lun, i + softc->persis_offset) != 0) | 8455 for (i = softc->init_min; i < softc->init_max; i++) 8456 if (ctl_get_prkey(lun, i) != 0) |
8364 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8365 } 8366 8367 lun->flags &= ~CTL_LUN_PR_RESERVED; 8368 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8369 lun->res_type = 0; 8370 break; 8371 8372 case CTL_PR_PREEMPT: 8373 ctl_pro_preempt_other(lun, msg); 8374 break; 8375 case CTL_PR_CLEAR: 8376 lun->flags &= ~CTL_LUN_PR_RESERVED; 8377 lun->res_type = 0; 8378 lun->pr_key_count = 0; 8379 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8380 | 8457 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8458 } 8459 8460 lun->flags &= ~CTL_LUN_PR_RESERVED; 8461 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8462 lun->res_type = 0; 8463 break; 8464 8465 case CTL_PR_PREEMPT: 8466 ctl_pro_preempt_other(lun, msg); 8467 break; 8468 case CTL_PR_CLEAR: 8469 lun->flags &= ~CTL_LUN_PR_RESERVED; 8470 lun->res_type = 0; 8471 lun->pr_key_count = 0; 8472 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8473 |
8381 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { | 8474 for (i=0; i < CTL_MAX_INITIATORS; i++) { |
8382 if (ctl_get_prkey(lun, i) == 0) 8383 continue; 8384 ctl_clr_prkey(lun, i); | 8475 if (ctl_get_prkey(lun, i) == 0) 8476 continue; 8477 ctl_clr_prkey(lun, i); |
8385 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); | 8478 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); |
8386 } 8387 lun->PRGeneration++; 8388 break; 8389 } 8390 8391 mtx_unlock(&lun->lun_lock); 8392} 8393 --- 755 unchanged lines hidden (view full) --- 9149 CTL_DEBUG_PRINT(("ctl_tur\n")); 9150 9151 ctl_set_success(ctsio); 9152 ctl_done((union ctl_io *)ctsio); 9153 9154 return (CTL_RETVAL_COMPLETE); 9155} 9156 | 8479 } 8480 lun->PRGeneration++; 8481 break; 8482 } 8483 8484 mtx_unlock(&lun->lun_lock); 8485} 8486 --- 755 unchanged lines hidden (view full) --- 9242 CTL_DEBUG_PRINT(("ctl_tur\n")); 9243 9244 ctl_set_success(ctsio); 9245 ctl_done((union ctl_io *)ctsio); 9246 9247 return (CTL_RETVAL_COMPLETE); 9248} 9249 |
9157#ifdef notyet 9158static int 9159ctl_cmddt_inquiry(struct ctl_scsiio *ctsio) 9160{ 9161 9162} 9163#endif 9164 | |
9165/* 9166 * SCSI VPD page 0x00, the Supported VPD Pages page. 9167 */ 9168static int 9169ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len) 9170{ 9171 struct scsi_vpd_supported_pages *pages; 9172 int sup_page_size; --- 259 unchanged lines hidden (view full) --- 9432 struct ctl_softc *softc; 9433 struct ctl_lun *lun; 9434 struct ctl_port *port; 9435 int data_len; 9436 uint8_t proto; 9437 9438 softc = control_softc; 9439 | 9250/* 9251 * SCSI VPD page 0x00, the Supported VPD Pages page. 9252 */ 9253static int 9254ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len) 9255{ 9256 struct scsi_vpd_supported_pages *pages; 9257 int sup_page_size; --- 259 unchanged lines hidden (view full) --- 9517 struct ctl_softc *softc; 9518 struct ctl_lun *lun; 9519 struct ctl_port *port; 9520 int data_len; 9521 uint8_t proto; 9522 9523 softc = control_softc; 9524 |
9440 port = softc->ctl_ports[ctl_port_idx(ctsio->io_hdr.nexus.targ_port)]; | 9525 port = ctl_io_port(&ctsio->io_hdr); |
9441 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9442 9443 data_len = sizeof(struct scsi_vpd_device_id) + 9444 sizeof(struct scsi_vpd_id_descriptor) + 9445 sizeof(struct scsi_vpd_id_rel_trgt_port_id) + 9446 sizeof(struct scsi_vpd_id_descriptor) + 9447 sizeof(struct scsi_vpd_id_trgt_port_grp_id); 9448 if (lun && lun->lun_devid) 9449 data_len += lun->lun_devid->len; | 9526 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9527 9528 data_len = sizeof(struct scsi_vpd_device_id) + 9529 sizeof(struct scsi_vpd_id_descriptor) + 9530 sizeof(struct scsi_vpd_id_rel_trgt_port_id) + 9531 sizeof(struct scsi_vpd_id_descriptor) + 9532 sizeof(struct scsi_vpd_id_trgt_port_grp_id); 9533 if (lun && lun->lun_devid) 9534 data_len += lun->lun_devid->len; |
9450 if (port->port_devid) | 9535 if (port && port->port_devid) |
9451 data_len += port->port_devid->len; | 9536 data_len += port->port_devid->len; |
9452 if (port->target_devid) | 9537 if (port && port->target_devid) |
9453 data_len += port->target_devid->len; 9454 9455 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9456 devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr; 9457 ctsio->kern_sg_entries = 0; 9458 9459 if (data_len < alloc_len) { 9460 ctsio->residual = alloc_len - data_len; --- 15 unchanged lines hidden (view full) --- 9476 if (lun != NULL) 9477 devid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9478 lun->be_lun->lun_type; 9479 else 9480 devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9481 devid_ptr->page_code = SVPD_DEVICE_ID; 9482 scsi_ulto2b(data_len - 4, devid_ptr->length); 9483 | 9538 data_len += port->target_devid->len; 9539 9540 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9541 devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr; 9542 ctsio->kern_sg_entries = 0; 9543 9544 if (data_len < alloc_len) { 9545 ctsio->residual = alloc_len - data_len; --- 15 unchanged lines hidden (view full) --- 9561 if (lun != NULL) 9562 devid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9563 lun->be_lun->lun_type; 9564 else 9565 devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9566 devid_ptr->page_code = SVPD_DEVICE_ID; 9567 scsi_ulto2b(data_len - 4, devid_ptr->length); 9568 |
9484 if (port->port_type == CTL_PORT_FC) | 9569 if (port && port->port_type == CTL_PORT_FC) |
9485 proto = SCSI_PROTO_FC << 4; | 9570 proto = SCSI_PROTO_FC << 4; |
9486 else if (port->port_type == CTL_PORT_ISCSI) | 9571 else if (port && port->port_type == CTL_PORT_ISCSI) |
9487 proto = SCSI_PROTO_ISCSI << 4; 9488 else 9489 proto = SCSI_PROTO_SPI << 4; 9490 desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list; 9491 9492 /* 9493 * We're using a LUN association here. i.e., this device ID is a 9494 * per-LUN identifier. 9495 */ 9496 if (lun && lun->lun_devid) { 9497 memcpy(desc, lun->lun_devid->data, lun->lun_devid->len); 9498 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9499 lun->lun_devid->len); 9500 } 9501 9502 /* 9503 * This is for the WWPN which is a port association. 9504 */ | 9572 proto = SCSI_PROTO_ISCSI << 4; 9573 else 9574 proto = SCSI_PROTO_SPI << 4; 9575 desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list; 9576 9577 /* 9578 * We're using a LUN association here. i.e., this device ID is a 9579 * per-LUN identifier. 9580 */ 9581 if (lun && lun->lun_devid) { 9582 memcpy(desc, lun->lun_devid->data, lun->lun_devid->len); 9583 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9584 lun->lun_devid->len); 9585 } 9586 9587 /* 9588 * This is for the WWPN which is a port association. 9589 */ |
9505 if (port->port_devid) { | 9590 if (port && port->port_devid) { |
9506 memcpy(desc, port->port_devid->data, port->port_devid->len); 9507 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9508 port->port_devid->len); 9509 } 9510 9511 /* 9512 * This is for the Relative Target Port(type 4h) identifier 9513 */ --- 7 unchanged lines hidden (view full) --- 9521 9522 /* 9523 * This is for the Target Port Group(type 5h) identifier 9524 */ 9525 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9526 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9527 SVPD_ID_TYPE_TPORTGRP; 9528 desc->length = 4; | 9591 memcpy(desc, port->port_devid->data, port->port_devid->len); 9592 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9593 port->port_devid->len); 9594 } 9595 9596 /* 9597 * This is for the Relative Target Port(type 4h) identifier 9598 */ --- 7 unchanged lines hidden (view full) --- 9606 9607 /* 9608 * This is for the Target Port Group(type 5h) identifier 9609 */ 9610 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9611 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9612 SVPD_ID_TYPE_TPORTGRP; 9613 desc->length = 4; |
9529 scsi_ulto2b(ctsio->io_hdr.nexus.targ_port / CTL_MAX_PORTS + 1, | 9614 scsi_ulto2b(ctsio->io_hdr.nexus.targ_port / softc->port_cnt + 1, |
9530 &desc->identifier[2]); 9531 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9532 sizeof(struct scsi_vpd_id_trgt_port_grp_id)); 9533 9534 /* 9535 * This is for the Target identifier 9536 */ | 9615 &desc->identifier[2]); 9616 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9617 sizeof(struct scsi_vpd_id_trgt_port_grp_id)); 9618 9619 /* 9620 * This is for the Target identifier 9621 */ |
9537 if (port->target_devid) { | 9622 if (port && port->target_devid) { |
9538 memcpy(desc, port->target_devid->data, port->target_devid->len); 9539 } 9540 9541 ctl_set_success(ctsio); 9542 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9543 ctsio->be_move_done = ctl_config_move_done; 9544 ctl_datamove((union ctl_io *)ctsio); 9545 return (CTL_RETVAL_COMPLETE); 9546} 9547 9548static int 9549ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len) 9550{ 9551 struct ctl_softc *softc = control_softc; 9552 struct scsi_vpd_scsi_ports *sp; 9553 struct scsi_vpd_port_designation *pd; 9554 struct scsi_vpd_port_designation_cont *pdc; 9555 struct ctl_lun *lun; 9556 struct ctl_port *port; | 9623 memcpy(desc, port->target_devid->data, port->target_devid->len); 9624 } 9625 9626 ctl_set_success(ctsio); 9627 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9628 ctsio->be_move_done = ctl_config_move_done; 9629 ctl_datamove((union ctl_io *)ctsio); 9630 return (CTL_RETVAL_COMPLETE); 9631} 9632 9633static int 9634ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len) 9635{ 9636 struct ctl_softc *softc = control_softc; 9637 struct scsi_vpd_scsi_ports *sp; 9638 struct scsi_vpd_port_designation *pd; 9639 struct scsi_vpd_port_designation_cont *pdc; 9640 struct ctl_lun *lun; 9641 struct ctl_port *port; |
9557 int data_len, num_target_ports, iid_len, id_len, g, pg, p; 9558 int num_target_port_groups; | 9642 int data_len, num_target_ports, iid_len, id_len; |
9559 9560 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9561 | 9643 9644 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9645 |
9562 if (softc->is_single) 9563 num_target_port_groups = 1; 9564 else 9565 num_target_port_groups = NUM_TARGET_PORT_GROUPS; | |
9566 num_target_ports = 0; 9567 iid_len = 0; 9568 id_len = 0; 9569 mtx_lock(&softc->ctl_lock); 9570 STAILQ_FOREACH(port, &softc->port_list, links) { 9571 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 9572 continue; 9573 if (lun != NULL && 9574 ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 9575 continue; 9576 num_target_ports++; 9577 if (port->init_devid) 9578 iid_len += port->init_devid->len; 9579 if (port->port_devid) 9580 id_len += port->port_devid->len; 9581 } 9582 mtx_unlock(&softc->ctl_lock); 9583 | 9646 num_target_ports = 0; 9647 iid_len = 0; 9648 id_len = 0; 9649 mtx_lock(&softc->ctl_lock); 9650 STAILQ_FOREACH(port, &softc->port_list, links) { 9651 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 9652 continue; 9653 if (lun != NULL && 9654 ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 9655 continue; 9656 num_target_ports++; 9657 if (port->init_devid) 9658 iid_len += port->init_devid->len; 9659 if (port->port_devid) 9660 id_len += port->port_devid->len; 9661 } 9662 mtx_unlock(&softc->ctl_lock); 9663 |
9584 data_len = sizeof(struct scsi_vpd_scsi_ports) + num_target_port_groups * | 9664 data_len = sizeof(struct scsi_vpd_scsi_ports) + |
9585 num_target_ports * (sizeof(struct scsi_vpd_port_designation) + 9586 sizeof(struct scsi_vpd_port_designation_cont)) + iid_len + id_len; 9587 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9588 sp = (struct scsi_vpd_scsi_ports *)ctsio->kern_data_ptr; 9589 ctsio->kern_sg_entries = 0; 9590 9591 if (data_len < alloc_len) { 9592 ctsio->residual = alloc_len - data_len; --- 20 unchanged lines hidden (view full) --- 9613 sp->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9614 9615 sp->page_code = SVPD_SCSI_PORTS; 9616 scsi_ulto2b(data_len - sizeof(struct scsi_vpd_scsi_ports), 9617 sp->page_length); 9618 pd = &sp->design[0]; 9619 9620 mtx_lock(&softc->ctl_lock); | 9665 num_target_ports * (sizeof(struct scsi_vpd_port_designation) + 9666 sizeof(struct scsi_vpd_port_designation_cont)) + iid_len + id_len; 9667 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9668 sp = (struct scsi_vpd_scsi_ports *)ctsio->kern_data_ptr; 9669 ctsio->kern_sg_entries = 0; 9670 9671 if (data_len < alloc_len) { 9672 ctsio->residual = alloc_len - data_len; --- 20 unchanged lines hidden (view full) --- 9693 sp->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9694 9695 sp->page_code = SVPD_SCSI_PORTS; 9696 scsi_ulto2b(data_len - sizeof(struct scsi_vpd_scsi_ports), 9697 sp->page_length); 9698 pd = &sp->design[0]; 9699 9700 mtx_lock(&softc->ctl_lock); |
9621 pg = softc->port_offset / CTL_MAX_PORTS; 9622 for (g = 0; g < num_target_port_groups; g++) { 9623 STAILQ_FOREACH(port, &softc->port_list, links) { 9624 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 9625 continue; 9626 if (lun != NULL && 9627 ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 9628 continue; 9629 p = port->targ_port % CTL_MAX_PORTS + g * CTL_MAX_PORTS; 9630 scsi_ulto2b(p, pd->relative_port_id); 9631 if (port->init_devid && g == pg) { 9632 iid_len = port->init_devid->len; 9633 memcpy(pd->initiator_transportid, 9634 port->init_devid->data, port->init_devid->len); 9635 } else 9636 iid_len = 0; 9637 scsi_ulto2b(iid_len, pd->initiator_transportid_length); 9638 pdc = (struct scsi_vpd_port_designation_cont *) 9639 (&pd->initiator_transportid[iid_len]); 9640 if (port->port_devid && g == pg) { 9641 id_len = port->port_devid->len; 9642 memcpy(pdc->target_port_descriptors, 9643 port->port_devid->data, port->port_devid->len); 9644 } else 9645 id_len = 0; 9646 scsi_ulto2b(id_len, pdc->target_port_descriptors_length); 9647 pd = (struct scsi_vpd_port_designation *) 9648 ((uint8_t *)pdc->target_port_descriptors + id_len); 9649 } | 9701 STAILQ_FOREACH(port, &softc->port_list, links) { 9702 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 9703 continue; 9704 if (lun != NULL && 9705 ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 9706 continue; 9707 scsi_ulto2b(port->targ_port, pd->relative_port_id); 9708 if (port->init_devid) { 9709 iid_len = port->init_devid->len; 9710 memcpy(pd->initiator_transportid, 9711 port->init_devid->data, port->init_devid->len); 9712 } else 9713 iid_len = 0; 9714 scsi_ulto2b(iid_len, pd->initiator_transportid_length); 9715 pdc = (struct scsi_vpd_port_designation_cont *) 9716 (&pd->initiator_transportid[iid_len]); 9717 if (port->port_devid) { 9718 id_len = port->port_devid->len; 9719 memcpy(pdc->target_port_descriptors, 9720 port->port_devid->data, port->port_devid->len); 9721 } else 9722 id_len = 0; 9723 scsi_ulto2b(id_len, pdc->target_port_descriptors_length); 9724 pd = (struct scsi_vpd_port_designation *) 9725 ((uint8_t *)pdc->target_port_descriptors + id_len); |
9650 } 9651 mtx_unlock(&softc->ctl_lock); 9652 9653 ctl_set_success(ctsio); 9654 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9655 ctsio->be_move_done = ctl_config_move_done; 9656 ctl_datamove((union ctl_io *)ctsio); 9657 return (CTL_RETVAL_COMPLETE); --- 250 unchanged lines hidden (view full) --- 9908 * Standard INQUIRY data. 9909 */ 9910static int 9911ctl_inquiry_std(struct ctl_scsiio *ctsio) 9912{ 9913 struct scsi_inquiry_data *inq_ptr; 9914 struct scsi_inquiry *cdb; 9915 struct ctl_softc *softc; | 9726 } 9727 mtx_unlock(&softc->ctl_lock); 9728 9729 ctl_set_success(ctsio); 9730 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9731 ctsio->be_move_done = ctl_config_move_done; 9732 ctl_datamove((union ctl_io *)ctsio); 9733 return (CTL_RETVAL_COMPLETE); --- 250 unchanged lines hidden (view full) --- 9984 * Standard INQUIRY data. 9985 */ 9986static int 9987ctl_inquiry_std(struct ctl_scsiio *ctsio) 9988{ 9989 struct scsi_inquiry_data *inq_ptr; 9990 struct scsi_inquiry *cdb; 9991 struct ctl_softc *softc; |
9992 struct ctl_port *port; |
|
9916 struct ctl_lun *lun; 9917 char *val; 9918 uint32_t alloc_len, data_len; 9919 ctl_port_type port_type; 9920 9921 softc = control_softc; 9922 9923 /* 9924 * Figure out whether we're talking to a Fibre Channel port or not. 9925 * We treat the ioctl front end, and any SCSI adapters, as packetized 9926 * SCSI front ends. 9927 */ | 9993 struct ctl_lun *lun; 9994 char *val; 9995 uint32_t alloc_len, data_len; 9996 ctl_port_type port_type; 9997 9998 softc = control_softc; 9999 10000 /* 10001 * Figure out whether we're talking to a Fibre Channel port or not. 10002 * We treat the ioctl front end, and any SCSI adapters, as packetized 10003 * SCSI front ends. 10004 */ |
9928 port_type = softc->ctl_ports[ 9929 ctl_port_idx(ctsio->io_hdr.nexus.targ_port)]->port_type; | 10005 port = ctl_io_port(&ctsio->io_hdr); 10006 if (port != NULL) 10007 port_type = port->port_type; 10008 else 10009 port_type = CTL_PORT_SCSI; |
9930 if (port_type == CTL_PORT_IOCTL || port_type == CTL_PORT_INTERNAL) 9931 port_type = CTL_PORT_SCSI; 9932 9933 lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9934 cdb = (struct scsi_inquiry *)ctsio->cdb; 9935 alloc_len = scsi_2btoul(cdb->length); 9936 9937 /* --- 13 unchanged lines hidden (view full) --- 9951 ctsio->kern_data_len = data_len; 9952 ctsio->kern_total_len = data_len; 9953 } else { 9954 ctsio->residual = 0; 9955 ctsio->kern_data_len = alloc_len; 9956 ctsio->kern_total_len = alloc_len; 9957 } 9958 | 10010 if (port_type == CTL_PORT_IOCTL || port_type == CTL_PORT_INTERNAL) 10011 port_type = CTL_PORT_SCSI; 10012 10013 lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10014 cdb = (struct scsi_inquiry *)ctsio->cdb; 10015 alloc_len = scsi_2btoul(cdb->length); 10016 10017 /* --- 13 unchanged lines hidden (view full) --- 10031 ctsio->kern_data_len = data_len; 10032 ctsio->kern_total_len = data_len; 10033 } else { 10034 ctsio->residual = 0; 10035 ctsio->kern_data_len = alloc_len; 10036 ctsio->kern_total_len = alloc_len; 10037 } 10038 |
9959 /* 9960 * If we have a LUN configured, report it as connected. Otherwise, 9961 * report that it is offline or no device is supported, depending 9962 * on the value of inquiry_pq_no_lun. 9963 * 9964 * According to the spec (SPC-4 r34), the peripheral qualifier 9965 * SID_QUAL_LU_OFFLINE (001b) is used in the following scenario: 9966 * 9967 * "A peripheral device having the specified peripheral device type 9968 * is not connected to this logical unit. However, the device 9969 * server is capable of supporting the specified peripheral device 9970 * type on this logical unit." 9971 * 9972 * According to the same spec, the peripheral qualifier 9973 * SID_QUAL_BAD_LU (011b) is used in this scenario: 9974 * 9975 * "The device server is not capable of supporting a peripheral 9976 * device on this logical unit. For this peripheral qualifier the 9977 * peripheral device type shall be set to 1Fh. All other peripheral 9978 * device type values are reserved for this peripheral qualifier." 9979 * 9980 * Given the text, it would seem that we probably want to report that 9981 * the LUN is offline here. There is no LUN connected, but we can 9982 * support a LUN at the given LUN number. 9983 * 9984 * In the real world, though, it sounds like things are a little 9985 * different: 9986 * 9987 * - Linux, when presented with a LUN with the offline peripheral 9988 * qualifier, will create an sg driver instance for it. So when 9989 * you attach it to CTL, you wind up with a ton of sg driver 9990 * instances. (One for every LUN that Linux bothered to probe.) 9991 * Linux does this despite the fact that it issues a REPORT LUNs 9992 * to LUN 0 to get the inventory of supported LUNs. 9993 * 9994 * - There is other anecdotal evidence (from Emulex folks) about 9995 * arrays that use the offline peripheral qualifier for LUNs that 9996 * are on the "passive" path in an active/passive array. 9997 * 9998 * So the solution is provide a hopefully reasonable default 9999 * (return bad/no LUN) and allow the user to change the behavior 10000 * with a tunable/sysctl variable. 10001 */ 10002 if (lun != NULL) 10003 inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10004 lun->be_lun->lun_type; 10005 else if (softc->inquiry_pq_no_lun == 0) 10006 inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 10007 else | 10039 if (lun != NULL) { 10040 if ((lun->flags & CTL_LUN_PRIMARY_SC) || 10041 softc->ha_link >= CTL_HA_LINK_UNKNOWN) { 10042 inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10043 lun->be_lun->lun_type; 10044 } else { 10045 inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | 10046 lun->be_lun->lun_type; 10047 } 10048 } else |
10008 inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE; 10009 10010 /* RMB in byte 2 is 0 */ 10011 inq_ptr->version = SCSI_REV_SPC4; 10012 10013 /* 10014 * According to SAM-3, even if a device only supports a single 10015 * level of LUN addressing, it should still set the HISUP bit: --- 573 unchanged lines hidden (view full) --- 10589/* 10590 * Assumptions: 10591 * - An I/O has just completed, and has been removed from the per-LUN OOA 10592 * queue, so some items on the blocked queue may now be unblocked. 10593 */ 10594static int 10595ctl_check_blocked(struct ctl_lun *lun) 10596{ | 10049 inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE; 10050 10051 /* RMB in byte 2 is 0 */ 10052 inq_ptr->version = SCSI_REV_SPC4; 10053 10054 /* 10055 * According to SAM-3, even if a device only supports a single 10056 * level of LUN addressing, it should still set the HISUP bit: --- 573 unchanged lines hidden (view full) --- 10630/* 10631 * Assumptions: 10632 * - An I/O has just completed, and has been removed from the per-LUN OOA 10633 * queue, so some items on the blocked queue may now be unblocked. 10634 */ 10635static int 10636ctl_check_blocked(struct ctl_lun *lun) 10637{ |
10638 struct ctl_softc *softc = lun->ctl_softc; |
|
10597 union ctl_io *cur_blocked, *next_blocked; 10598 10599 mtx_assert(&lun->lun_lock, MA_OWNED); 10600 10601 /* 10602 * Run forward from the head of the blocked queue, checking each 10603 * entry against the I/Os prior to it on the OOA queue to see if 10604 * there is still any blockage. --- 29 unchanged lines hidden (view full) --- 10634 /* 10635 * This shouldn't happen! In theory we've already 10636 * checked this command for overlap... 10637 */ 10638 break; 10639 case CTL_ACTION_PASS: 10640 case CTL_ACTION_SKIP: { 10641 const struct ctl_cmd_entry *entry; | 10639 union ctl_io *cur_blocked, *next_blocked; 10640 10641 mtx_assert(&lun->lun_lock, MA_OWNED); 10642 10643 /* 10644 * Run forward from the head of the blocked queue, checking each 10645 * entry against the I/Os prior to it on the OOA queue to see if 10646 * there is still any blockage. --- 29 unchanged lines hidden (view full) --- 10676 /* 10677 * This shouldn't happen! In theory we've already 10678 * checked this command for overlap... 10679 */ 10680 break; 10681 case CTL_ACTION_PASS: 10682 case CTL_ACTION_SKIP: { 10683 const struct ctl_cmd_entry *entry; |
10642 int isc_retval; | |
10643 10644 /* 10645 * The skip case shouldn't happen, this transaction 10646 * should have never made it onto the blocked queue. 10647 */ 10648 /* 10649 * This I/O is no longer blocked, we can remove it 10650 * from the blocked queue. Since this is a TAILQ 10651 * (doubly linked list), we can do O(1) removals 10652 * from any place on the list. 10653 */ 10654 TAILQ_REMOVE(&lun->blocked_queue, &cur_blocked->io_hdr, 10655 blocked_links); 10656 cur_blocked->io_hdr.flags &= ~CTL_FLAG_BLOCKED; 10657 | 10684 10685 /* 10686 * The skip case shouldn't happen, this transaction 10687 * should have never made it onto the blocked queue. 10688 */ 10689 /* 10690 * This I/O is no longer blocked, we can remove it 10691 * from the blocked queue. Since this is a TAILQ 10692 * (doubly linked list), we can do O(1) removals 10693 * from any place on the list. 10694 */ 10695 TAILQ_REMOVE(&lun->blocked_queue, &cur_blocked->io_hdr, 10696 blocked_links); 10697 cur_blocked->io_hdr.flags &= ~CTL_FLAG_BLOCKED; 10698 |
10658 if (cur_blocked->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC){ | 10699 if ((softc->ha_mode != CTL_HA_MODE_XFER) && 10700 (cur_blocked->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)){ |
10659 /* 10660 * Need to send IO back to original side to 10661 * run 10662 */ 10663 union ctl_ha_msg msg_info; 10664 | 10701 /* 10702 * Need to send IO back to original side to 10703 * run 10704 */ 10705 union ctl_ha_msg msg_info; 10706 |
10707 cur_blocked->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; |
|
10665 msg_info.hdr.original_sc = 10666 cur_blocked->io_hdr.original_sc; 10667 msg_info.hdr.serializing_sc = cur_blocked; 10668 msg_info.hdr.msg_type = CTL_MSG_R2R; | 10708 msg_info.hdr.original_sc = 10709 cur_blocked->io_hdr.original_sc; 10710 msg_info.hdr.serializing_sc = cur_blocked; 10711 msg_info.hdr.msg_type = CTL_MSG_R2R; |
10669 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 10670 &msg_info, sizeof(msg_info), 0)) > 10671 CTL_HA_STATUS_SUCCESS) { 10672 printf("CTL:Check Blocked error from " 10673 "ctl_ha_msg_send %d\n", 10674 isc_retval); 10675 } | 10712 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 10713 sizeof(msg_info.hdr), M_NOWAIT); |
10676 break; 10677 } 10678 entry = ctl_get_cmd_entry(&cur_blocked->scsiio, NULL); 10679 10680 /* 10681 * Check this I/O for LUN state changes that may 10682 * have happened while this command was blocked. 10683 * The LUN state may have been changed by a command --- 42 unchanged lines hidden (view full) --- 10726 int retval; 10727 uint32_t residx; 10728 10729 retval = 0; 10730 10731 mtx_assert(&lun->lun_lock, MA_OWNED); 10732 10733 /* | 10714 break; 10715 } 10716 entry = ctl_get_cmd_entry(&cur_blocked->scsiio, NULL); 10717 10718 /* 10719 * Check this I/O for LUN state changes that may 10720 * have happened while this command was blocked. 10721 * The LUN state may have been changed by a command --- 42 unchanged lines hidden (view full) --- 10764 int retval; 10765 uint32_t residx; 10766 10767 retval = 0; 10768 10769 mtx_assert(&lun->lun_lock, MA_OWNED); 10770 10771 /* |
10734 * If this shelf is a secondary shelf controller, we have to reject 10735 * any media access commands. | 10772 * If this shelf is a secondary shelf controller, we may have to 10773 * reject some commands disallowed by HA mode and link state. |
10736 */ | 10774 */ |
10737 if ((softc->flags & CTL_FLAG_ACTIVE_SHELF) == 0 && 10738 (entry->flags & CTL_CMD_FLAG_OK_ON_SECONDARY) == 0) { 10739 ctl_set_lun_standby(ctsio); 10740 retval = 1; 10741 goto bailout; | 10775 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) { 10776 if (softc->ha_link == CTL_HA_LINK_OFFLINE && 10777 (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { 10778 ctl_set_lun_unavail(ctsio); 10779 retval = 1; 10780 goto bailout; 10781 } 10782 if ((lun->flags & CTL_LUN_PEER_SC_PRIMARY) == 0 && 10783 (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { 10784 ctl_set_lun_transit(ctsio); 10785 retval = 1; 10786 goto bailout; 10787 } 10788 if (softc->ha_mode == CTL_HA_MODE_ACT_STBY && 10789 (entry->flags & CTL_CMD_FLAG_OK_ON_STANDBY) == 0) { 10790 ctl_set_lun_standby(ctsio); 10791 retval = 1; 10792 goto bailout; 10793 } 10794 10795 /* The rest of checks are only done on executing side */ 10796 if (softc->ha_mode == CTL_HA_MODE_XFER) 10797 goto bailout; |
10742 } 10743 10744 if (entry->pattern & CTL_LUN_PAT_WRITE) { 10745 if (lun->be_lun && 10746 lun->be_lun->flags & CTL_LUN_FLAG_READONLY) { 10747 ctl_set_sense(ctsio, /*current_error*/ 1, 10748 /*sense_key*/ SSD_KEY_DATA_PROTECT, 10749 /*asc*/ 0x27, /*ascq*/ 0x01, SSD_ELEM_NONE); --- 10 unchanged lines hidden (view full) --- 10760 } 10761 } 10762 10763 /* 10764 * Check for a reservation conflict. If this command isn't allowed 10765 * even on reserved LUNs, and if this initiator isn't the one who 10766 * reserved us, reject the command with a reservation conflict. 10767 */ | 10798 } 10799 10800 if (entry->pattern & CTL_LUN_PAT_WRITE) { 10801 if (lun->be_lun && 10802 lun->be_lun->flags & CTL_LUN_FLAG_READONLY) { 10803 ctl_set_sense(ctsio, /*current_error*/ 1, 10804 /*sense_key*/ SSD_KEY_DATA_PROTECT, 10805 /*asc*/ 0x27, /*ascq*/ 0x01, SSD_ELEM_NONE); --- 10 unchanged lines hidden (view full) --- 10816 } 10817 } 10818 10819 /* 10820 * Check for a reservation conflict. If this command isn't allowed 10821 * even on reserved LUNs, and if this initiator isn't the one who 10822 * reserved us, reject the command with a reservation conflict. 10823 */ |
10768 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); | 10824 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); |
10769 if ((lun->flags & CTL_LUN_RESERVED) 10770 && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) { 10771 if (lun->res_idx != residx) { 10772 ctl_set_reservation_conflict(ctsio); 10773 retval = 1; 10774 goto bailout; 10775 } 10776 } --- 13 unchanged lines hidden (view full) --- 10790 * conflict. 10791 */ 10792 if (ctl_get_prkey(lun, residx) == 0 10793 || (residx != lun->pr_res_idx && lun->res_type < 4)) { 10794 ctl_set_reservation_conflict(ctsio); 10795 retval = 1; 10796 goto bailout; 10797 } | 10825 if ((lun->flags & CTL_LUN_RESERVED) 10826 && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) { 10827 if (lun->res_idx != residx) { 10828 ctl_set_reservation_conflict(ctsio); 10829 retval = 1; 10830 goto bailout; 10831 } 10832 } --- 13 unchanged lines hidden (view full) --- 10846 * conflict. 10847 */ 10848 if (ctl_get_prkey(lun, residx) == 0 10849 || (residx != lun->pr_res_idx && lun->res_type < 4)) { 10850 ctl_set_reservation_conflict(ctsio); 10851 retval = 1; 10852 goto bailout; 10853 } |
10798 | |
10799 } 10800 10801 if ((lun->flags & CTL_LUN_OFFLINE) | 10854 } 10855 10856 if ((lun->flags & CTL_LUN_OFFLINE) |
10802 && ((entry->flags & CTL_CMD_FLAG_OK_ON_OFFLINE) == 0)) { | 10857 && ((entry->flags & CTL_CMD_FLAG_OK_ON_STANDBY) == 0)) { |
10803 ctl_set_lun_not_ready(ctsio); 10804 retval = 1; 10805 goto bailout; 10806 } 10807 | 10858 ctl_set_lun_not_ready(ctsio); 10859 retval = 1; 10860 goto bailout; 10861 } 10862 |
10808 /* 10809 * If the LUN is stopped, see if this particular command is allowed 10810 * for a stopped lun. Otherwise, reject it with 0x04,0x02. 10811 */ | |
10812 if ((lun->flags & CTL_LUN_STOPPED) 10813 && ((entry->flags & CTL_CMD_FLAG_OK_ON_STOPPED) == 0)) { 10814 /* "Logical unit not ready, initializing cmd. required" */ 10815 ctl_set_lun_stopped(ctsio); 10816 retval = 1; 10817 goto bailout; 10818 } 10819 10820 if ((lun->flags & CTL_LUN_INOPERABLE) 10821 && ((entry->flags & CTL_CMD_FLAG_OK_ON_INOPERABLE) == 0)) { 10822 /* "Medium format corrupted" */ 10823 ctl_set_medium_format_corrupted(ctsio); 10824 retval = 1; 10825 goto bailout; 10826 } 10827 10828bailout: 10829 return (retval); | 10863 if ((lun->flags & CTL_LUN_STOPPED) 10864 && ((entry->flags & CTL_CMD_FLAG_OK_ON_STOPPED) == 0)) { 10865 /* "Logical unit not ready, initializing cmd. required" */ 10866 ctl_set_lun_stopped(ctsio); 10867 retval = 1; 10868 goto bailout; 10869 } 10870 10871 if ((lun->flags & CTL_LUN_INOPERABLE) 10872 && ((entry->flags & CTL_CMD_FLAG_OK_ON_INOPERABLE) == 0)) { 10873 /* "Medium format corrupted" */ 10874 ctl_set_medium_format_corrupted(ctsio); 10875 retval = 1; 10876 goto bailout; 10877 } 10878 10879bailout: 10880 return (retval); |
10830 | |
10831} 10832 10833static void 10834ctl_failover_io(union ctl_io *io, int have_lock) 10835{ 10836 ctl_set_busy(&io->scsiio); 10837 ctl_done(io); 10838} 10839 | 10881} 10882 10883static void 10884ctl_failover_io(union ctl_io *io, int have_lock) 10885{ 10886 ctl_set_busy(&io->scsiio); 10887 ctl_done(io); 10888} 10889 |
10840#ifdef notyet | |
10841static void | 10890static void |
10842ctl_failover(void) | 10891ctl_failover_lun(struct ctl_lun *lun) |
10843{ | 10892{ |
10844 struct ctl_lun *lun; 10845 struct ctl_softc *softc; 10846 union ctl_io *next_io, *pending_io; 10847 union ctl_io *io; 10848 int lun_idx; | 10893 struct ctl_softc *softc = lun->ctl_softc; 10894 struct ctl_io_hdr *io, *next_io; |
10849 | 10895 |
10850 softc = control_softc; 10851 10852 mtx_lock(&softc->ctl_lock); 10853 /* 10854 * Remove any cmds from the other SC from the rtr queue. These 10855 * will obviously only be for LUNs for which we're the primary. 10856 * We can't send status or get/send data for these commands. 10857 * Since they haven't been executed yet, we can just remove them. 10858 * We'll either abort them or delete them below, depending on 10859 * which HA mode we're in. 10860 */ 10861#ifdef notyet 10862 mtx_lock(&softc->queue_lock); 10863 for (io = (union ctl_io *)STAILQ_FIRST(&softc->rtr_queue); 10864 io != NULL; io = next_io) { 10865 next_io = (union ctl_io *)STAILQ_NEXT(&io->io_hdr, links); 10866 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) 10867 STAILQ_REMOVE(&softc->rtr_queue, &io->io_hdr, 10868 ctl_io_hdr, links); 10869 } 10870 mtx_unlock(&softc->queue_lock); 10871#endif 10872 10873 for (lun_idx=0; lun_idx < softc->num_luns; lun_idx++) { 10874 lun = softc->ctl_luns[lun_idx]; 10875 if (lun==NULL) 10876 continue; 10877 10878 /* 10879 * Processor LUNs are primary on both sides. 10880 * XXX will this always be true? 10881 */ 10882 if (lun->be_lun->lun_type == T_PROCESSOR) 10883 continue; 10884 10885 if ((lun->flags & CTL_LUN_PRIMARY_SC) 10886 && (softc->ha_mode == CTL_HA_MODE_SER_ONLY)) { 10887 printf("FAILOVER: primary lun %d\n", lun_idx); 10888 /* 10889 * Remove all commands from the other SC. First from the 10890 * blocked queue then from the ooa queue. Once we have 10891 * removed them. Call ctl_check_blocked to see if there 10892 * is anything that can run. 10893 */ 10894 for (io = (union ctl_io *)TAILQ_FIRST( 10895 &lun->blocked_queue); io != NULL; io = next_io) { 10896 10897 next_io = (union ctl_io *)TAILQ_NEXT( 10898 &io->io_hdr, blocked_links); 10899 10900 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) { 10901 TAILQ_REMOVE(&lun->blocked_queue, 10902 &io->io_hdr,blocked_links); 10903 io->io_hdr.flags &= ~CTL_FLAG_BLOCKED; 10904 TAILQ_REMOVE(&lun->ooa_queue, 10905 &io->io_hdr, ooa_links); 10906 10907 ctl_free_io(io); | 10896 CTL_DEBUG_PRINT(("FAILOVER for lun %ju\n", lun->lun)); 10897 if (softc->ha_mode == CTL_HA_MODE_XFER) { 10898 TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { 10899 /* We are master */ 10900 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 10901 if (io->flags & CTL_FLAG_IO_ACTIVE) { 10902 io->flags |= CTL_FLAG_ABORT; 10903 } else { /* This can be only due to DATAMOVE */ 10904 io->msg_type = CTL_MSG_DATAMOVE_DONE; 10905 io->flags |= CTL_FLAG_IO_ACTIVE; 10906 io->port_status = 31340; 10907 ctl_enqueue_isc((union ctl_io *)io); |
10908 } 10909 } | 10908 } 10909 } |
10910 10911 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); 10912 io != NULL; io = next_io) { 10913 10914 next_io = (union ctl_io *)TAILQ_NEXT( 10915 &io->io_hdr, ooa_links); 10916 10917 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) { 10918 10919 TAILQ_REMOVE(&lun->ooa_queue, 10920 &io->io_hdr, 10921 ooa_links); 10922 10923 ctl_free_io(io); 10924 } 10925 } 10926 ctl_check_blocked(lun); 10927 } else if ((lun->flags & CTL_LUN_PRIMARY_SC) 10928 && (softc->ha_mode == CTL_HA_MODE_XFER)) { 10929 10930 printf("FAILOVER: primary lun %d\n", lun_idx); 10931 /* 10932 * Abort all commands from the other SC. We can't 10933 * send status back for them now. These should get 10934 * cleaned up when they are completed or come out 10935 * for a datamove operation. 10936 */ 10937 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); 10938 io != NULL; io = next_io) { 10939 next_io = (union ctl_io *)TAILQ_NEXT( 10940 &io->io_hdr, ooa_links); 10941 10942 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) 10943 io->io_hdr.flags |= CTL_FLAG_ABORT; 10944 } 10945 } else if (((lun->flags & CTL_LUN_PRIMARY_SC) == 0) 10946 && (softc->ha_mode == CTL_HA_MODE_XFER)) { 10947 10948 printf("FAILOVER: secondary lun %d\n", lun_idx); 10949 10950 lun->flags |= CTL_LUN_PRIMARY_SC; 10951 10952 /* 10953 * We send all I/O that was sent to this controller 10954 * and redirected to the other side back with 10955 * busy status, and have the initiator retry it. 10956 * Figuring out how much data has been transferred, 10957 * etc. and picking up where we left off would be 10958 * very tricky. 10959 * 10960 * XXX KDM need to remove I/O from the blocked 10961 * queue as well! 10962 */ 10963 for (pending_io = (union ctl_io *)TAILQ_FIRST( 10964 &lun->ooa_queue); pending_io != NULL; 10965 pending_io = next_io) { 10966 10967 next_io = (union ctl_io *)TAILQ_NEXT( 10968 &pending_io->io_hdr, ooa_links); 10969 10970 pending_io->io_hdr.flags &= 10971 ~CTL_FLAG_SENT_2OTHER_SC; 10972 10973 if (pending_io->io_hdr.flags & 10974 CTL_FLAG_IO_ACTIVE) { 10975 pending_io->io_hdr.flags |= 10976 CTL_FLAG_FAILOVER; | 10910 /* We are slave */ 10911 if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { 10912 io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; 10913 if (io->flags & CTL_FLAG_IO_ACTIVE) { 10914 io->flags |= CTL_FLAG_FAILOVER; |
10977 } else { | 10915 } else { |
10978 ctl_set_busy(&pending_io->scsiio); 10979 ctl_done(pending_io); | 10916 ctl_set_busy(&((union ctl_io *)io)-> 10917 scsiio); 10918 ctl_done((union ctl_io *)io); |
10980 } 10981 } | 10919 } 10920 } |
10982 10983 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 10984 } else if (((lun->flags & CTL_LUN_PRIMARY_SC) == 0) 10985 && (softc->ha_mode == CTL_HA_MODE_SER_ONLY)) { 10986 printf("FAILOVER: secondary lun %d\n", lun_idx); 10987 /* 10988 * if the first io on the OOA is not on the RtR queue 10989 * add it. 10990 */ 10991 lun->flags |= CTL_LUN_PRIMARY_SC; 10992 10993 pending_io = (union ctl_io *)TAILQ_FIRST( 10994 &lun->ooa_queue); 10995 if (pending_io==NULL) { 10996 printf("Nothing on OOA queue\n"); 10997 continue; | 10921 } 10922 } else { /* SERIALIZE modes */ 10923 TAILQ_FOREACH_SAFE(io, &lun->blocked_queue, blocked_links, 10924 next_io) { 10925 /* We are master */ 10926 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 10927 TAILQ_REMOVE(&lun->blocked_queue, io, 10928 blocked_links); 10929 io->flags &= ~CTL_FLAG_BLOCKED; 10930 TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links); 10931 ctl_free_io((union ctl_io *)io); |
10998 } | 10932 } |
10999 11000 pending_io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 11001 if ((pending_io->io_hdr.flags & 11002 CTL_FLAG_IS_WAS_ON_RTR) == 0) { 11003 pending_io->io_hdr.flags |= 11004 CTL_FLAG_IS_WAS_ON_RTR; 11005 ctl_enqueue_rtr(pending_io); | 10933 } 10934 TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { 10935 /* We are master */ 10936 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 10937 TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links); 10938 ctl_free_io((union ctl_io *)io); |
11006 } | 10939 } |
11007#if 0 11008 else 11009 { 11010 printf("Tag 0x%04x is running\n", 11011 pending_io->scsiio.tag_num); 11012 } 11013#endif 11014 11015 next_io = (union ctl_io *)TAILQ_NEXT( 11016 &pending_io->io_hdr, ooa_links); 11017 for (pending_io=next_io; pending_io != NULL; 11018 pending_io = next_io) { 11019 pending_io->io_hdr.flags &= 11020 ~CTL_FLAG_SENT_2OTHER_SC; 11021 next_io = (union ctl_io *)TAILQ_NEXT( 11022 &pending_io->io_hdr, ooa_links); 11023 if (pending_io->io_hdr.flags & 11024 CTL_FLAG_IS_WAS_ON_RTR) { 11025#if 0 11026 printf("Tag 0x%04x is running\n", 11027 pending_io->scsiio.tag_num); 11028#endif 11029 continue; | 10940 /* We are slave */ 10941 if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { 10942 io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; 10943 if (!(io->flags & CTL_FLAG_IO_ACTIVE)) { 10944 ctl_set_busy(&((union ctl_io *)io)-> 10945 scsiio); 10946 ctl_done((union ctl_io *)io); |
11030 } | 10947 } |
11031 11032 switch (ctl_check_ooa(lun, pending_io, 11033 (union ctl_io *)TAILQ_PREV( 11034 &pending_io->io_hdr, ctl_ooaq, 11035 ooa_links))) { 11036 11037 case CTL_ACTION_BLOCK: 11038 TAILQ_INSERT_TAIL(&lun->blocked_queue, 11039 &pending_io->io_hdr, 11040 blocked_links); 11041 pending_io->io_hdr.flags |= 11042 CTL_FLAG_BLOCKED; 11043 break; 11044 case CTL_ACTION_PASS: 11045 case CTL_ACTION_SKIP: 11046 pending_io->io_hdr.flags |= 11047 CTL_FLAG_IS_WAS_ON_RTR; 11048 ctl_enqueue_rtr(pending_io); 11049 break; 11050 case CTL_ACTION_OVERLAP: 11051 ctl_set_overlapped_cmd( 11052 (struct ctl_scsiio *)pending_io); 11053 ctl_done(pending_io); 11054 break; 11055 case CTL_ACTION_OVERLAP_TAG: 11056 ctl_set_overlapped_tag( 11057 (struct ctl_scsiio *)pending_io, 11058 pending_io->scsiio.tag_num & 0xff); 11059 ctl_done(pending_io); 11060 break; 11061 case CTL_ACTION_ERROR: 11062 default: 11063 ctl_set_internal_failure( 11064 (struct ctl_scsiio *)pending_io, 11065 0, // sks_valid 11066 0); //retry count 11067 ctl_done(pending_io); 11068 break; 11069 } | |
11070 } | 10948 } |
11071 11072 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 11073 } else { 11074 panic("Unhandled HA mode failover, LUN flags = %#x, " 11075 "ha_mode = #%x", lun->flags, softc->ha_mode); | |
11076 } | 10949 } |
10950 ctl_check_blocked(lun); |
|
11077 } | 10951 } |
11078 ctl_pause_rtr = 0; 11079 mtx_unlock(&softc->ctl_lock); | |
11080} | 10952} |
11081#endif | |
11082 11083static int 11084ctl_scsiio_precheck(struct ctl_softc *softc, struct ctl_scsiio *ctsio) 11085{ 11086 struct ctl_lun *lun; 11087 const struct ctl_cmd_entry *entry; 11088 uint32_t initidx, targ_lun; 11089 int retval; --- 15 unchanged lines hidden (view full) --- 11105 mtx_unlock(&lun->lun_lock); 11106 lun = NULL; 11107 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL; 11108 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL; 11109 } else { 11110 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun; 11111 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = 11112 lun->be_lun; | 10953 10954static int 10955ctl_scsiio_precheck(struct ctl_softc *softc, struct ctl_scsiio *ctsio) 10956{ 10957 struct ctl_lun *lun; 10958 const struct ctl_cmd_entry *entry; 10959 uint32_t initidx, targ_lun; 10960 int retval; --- 15 unchanged lines hidden (view full) --- 10976 mtx_unlock(&lun->lun_lock); 10977 lun = NULL; 10978 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL; 10979 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL; 10980 } else { 10981 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun; 10982 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = 10983 lun->be_lun; |
11113 if (lun->be_lun->lun_type == T_PROCESSOR) { 11114 ctsio->io_hdr.flags |= CTL_FLAG_CONTROL_DEV; 11115 } | |
11116 11117 /* 11118 * Every I/O goes into the OOA queue for a 11119 * particular LUN, and stays there until completion. 11120 */ 11121#ifdef CTL_TIME_IO 11122 if (TAILQ_EMPTY(&lun->ooa_queue)) { 11123 lun->idle_time += getsbinuptime() - --- 118 unchanged lines hidden (view full) --- 11242 * XXX CHD this is where we want to send IO to other side if 11243 * this LUN is secondary on this SC. We will need to make a copy 11244 * of the IO and flag the IO on this side as SENT_2OTHER and the flag 11245 * the copy we send as FROM_OTHER. 11246 * We also need to stuff the address of the original IO so we can 11247 * find it easily. Something similar will need be done on the other 11248 * side so when we are done we can find the copy. 11249 */ | 10984 10985 /* 10986 * Every I/O goes into the OOA queue for a 10987 * particular LUN, and stays there until completion. 10988 */ 10989#ifdef CTL_TIME_IO 10990 if (TAILQ_EMPTY(&lun->ooa_queue)) { 10991 lun->idle_time += getsbinuptime() - --- 118 unchanged lines hidden (view full) --- 11110 * XXX CHD this is where we want to send IO to other side if 11111 * this LUN is secondary on this SC. We will need to make a copy 11112 * of the IO and flag the IO on this side as SENT_2OTHER and the flag 11113 * the copy we send as FROM_OTHER. 11114 * We also need to stuff the address of the original IO so we can 11115 * find it easily. Something similar will need be done on the other 11116 * side so when we are done we can find the copy. 11117 */ |
11250 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) { | 11118 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 11119 (lun->flags & CTL_LUN_PEER_SC_PRIMARY) != 0) { |
11251 union ctl_ha_msg msg_info; 11252 int isc_retval; 11253 11254 ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; | 11120 union ctl_ha_msg msg_info; 11121 int isc_retval; 11122 11123 ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; |
11124 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 11125 mtx_unlock(&lun->lun_lock); |
|
11255 11256 msg_info.hdr.msg_type = CTL_MSG_SERIALIZE; 11257 msg_info.hdr.original_sc = (union ctl_io *)ctsio; | 11126 11127 msg_info.hdr.msg_type = CTL_MSG_SERIALIZE; 11128 msg_info.hdr.original_sc = (union ctl_io *)ctsio; |
11258#if 0 11259 printf("1. ctsio %p\n", ctsio); 11260#endif | |
11261 msg_info.hdr.serializing_sc = NULL; 11262 msg_info.hdr.nexus = ctsio->io_hdr.nexus; 11263 msg_info.scsi.tag_num = ctsio->tag_num; 11264 msg_info.scsi.tag_type = ctsio->tag_type; | 11129 msg_info.hdr.serializing_sc = NULL; 11130 msg_info.hdr.nexus = ctsio->io_hdr.nexus; 11131 msg_info.scsi.tag_num = ctsio->tag_num; 11132 msg_info.scsi.tag_type = ctsio->tag_type; |
11133 msg_info.scsi.cdb_len = ctsio->cdb_len; |
|
11265 memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN); 11266 | 11134 memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN); 11135 |
11267 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 11268 11269 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 11270 (void *)&msg_info, sizeof(msg_info), 0)) > 11271 CTL_HA_STATUS_SUCCESS) { 11272 printf("CTL:precheck, ctl_ha_msg_send returned %d\n", 11273 isc_retval); 11274 printf("CTL:opcode is %x\n", ctsio->cdb[0]); 11275 } else { 11276#if 0 11277 printf("CTL:Precheck sent msg, opcode is %x\n",opcode); 11278#endif | 11136 if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11137 sizeof(msg_info.scsi) - sizeof(msg_info.scsi.sense_data), 11138 M_WAITOK)) > CTL_HA_STATUS_SUCCESS) { 11139 ctl_set_busy(ctsio); 11140 ctl_done((union ctl_io *)ctsio); 11141 return (retval); |
11279 } | 11142 } |
11280 11281 /* 11282 * XXX KDM this I/O is off the incoming queue, but hasn't 11283 * been inserted on any other queue. We may need to come 11284 * up with a holding queue while we wait for serialization 11285 * so that we have an idea of what we're waiting for from 11286 * the other side. 11287 */ 11288 mtx_unlock(&lun->lun_lock); | |
11289 return (retval); 11290 } 11291 11292 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 11293 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, 11294 ctl_ooaq, ooa_links))) { 11295 case CTL_ACTION_BLOCK: 11296 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; --- 153 unchanged lines hidden (view full) --- 11450 ctl_ua_type ua_type) 11451{ 11452 struct ctl_lun *lun; 11453 int retval; 11454 11455 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 11456 union ctl_ha_msg msg_info; 11457 | 11143 return (retval); 11144 } 11145 11146 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 11147 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, 11148 ctl_ooaq, ooa_links))) { 11149 case CTL_ACTION_BLOCK: 11150 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; --- 153 unchanged lines hidden (view full) --- 11304 ctl_ua_type ua_type) 11305{ 11306 struct ctl_lun *lun; 11307 int retval; 11308 11309 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 11310 union ctl_ha_msg msg_info; 11311 |
11458 io->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; | |
11459 msg_info.hdr.nexus = io->io_hdr.nexus; 11460 if (ua_type==CTL_UA_TARG_RESET) 11461 msg_info.task.task_action = CTL_TASK_TARGET_RESET; 11462 else 11463 msg_info.task.task_action = CTL_TASK_BUS_RESET; 11464 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11465 msg_info.hdr.original_sc = NULL; 11466 msg_info.hdr.serializing_sc = NULL; | 11312 msg_info.hdr.nexus = io->io_hdr.nexus; 11313 if (ua_type==CTL_UA_TARG_RESET) 11314 msg_info.task.task_action = CTL_TASK_TARGET_RESET; 11315 else 11316 msg_info.task.task_action = CTL_TASK_BUS_RESET; 11317 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11318 msg_info.hdr.original_sc = NULL; 11319 msg_info.hdr.serializing_sc = NULL; |
11467 if (CTL_HA_STATUS_SUCCESS != ctl_ha_msg_send(CTL_HA_CHAN_CTL, 11468 (void *)&msg_info, sizeof(msg_info), 0)) { 11469 } | 11320 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11321 sizeof(msg_info.task), M_WAITOK); |
11470 } 11471 retval = 0; 11472 11473 mtx_lock(&softc->ctl_lock); 11474 STAILQ_FOREACH(lun, &softc->lun_list, links) 11475 retval += ctl_lun_reset(lun, io, ua_type); 11476 mtx_unlock(&softc->ctl_lock); 11477 --- 101 unchanged lines hidden (view full) --- 11579 11580 msg_info.hdr.nexus = xio->io_hdr.nexus; 11581 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 11582 msg_info.task.tag_num = xio->scsiio.tag_num; 11583 msg_info.task.tag_type = xio->scsiio.tag_type; 11584 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11585 msg_info.hdr.original_sc = NULL; 11586 msg_info.hdr.serializing_sc = NULL; | 11322 } 11323 retval = 0; 11324 11325 mtx_lock(&softc->ctl_lock); 11326 STAILQ_FOREACH(lun, &softc->lun_list, links) 11327 retval += ctl_lun_reset(lun, io, ua_type); 11328 mtx_unlock(&softc->ctl_lock); 11329 --- 101 unchanged lines hidden (view full) --- 11431 11432 msg_info.hdr.nexus = xio->io_hdr.nexus; 11433 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 11434 msg_info.task.tag_num = xio->scsiio.tag_num; 11435 msg_info.task.tag_type = xio->scsiio.tag_type; 11436 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11437 msg_info.hdr.original_sc = NULL; 11438 msg_info.hdr.serializing_sc = NULL; |
11587 ctl_ha_msg_send(CTL_HA_CHAN_CTL, 11588 (void *)&msg_info, sizeof(msg_info), 0); | 11439 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11440 sizeof(msg_info.task), M_NOWAIT); |
11589 } 11590 } 11591 } 11592} 11593 11594static int 11595ctl_abort_task_set(union ctl_io *io) 11596{ --- 27 unchanged lines hidden (view full) --- 11624 return (0); 11625} 11626 11627static int 11628ctl_i_t_nexus_reset(union ctl_io *io) 11629{ 11630 struct ctl_softc *softc = control_softc; 11631 struct ctl_lun *lun; | 11441 } 11442 } 11443 } 11444} 11445 11446static int 11447ctl_abort_task_set(union ctl_io *io) 11448{ --- 27 unchanged lines hidden (view full) --- 11476 return (0); 11477} 11478 11479static int 11480ctl_i_t_nexus_reset(union ctl_io *io) 11481{ 11482 struct ctl_softc *softc = control_softc; 11483 struct ctl_lun *lun; |
11632 uint32_t initidx, residx; | 11484 uint32_t initidx; |
11633 11634 initidx = ctl_get_initindex(&io->io_hdr.nexus); | 11485 11486 initidx = ctl_get_initindex(&io->io_hdr.nexus); |
11635 residx = ctl_get_resindex(&io->io_hdr.nexus); | |
11636 mtx_lock(&softc->ctl_lock); 11637 STAILQ_FOREACH(lun, &softc->lun_list, links) { 11638 mtx_lock(&lun->lun_lock); 11639 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, 11640 io->io_hdr.nexus.initid, 11641 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 11642#ifdef CTL_WITH_CA 11643 ctl_clear_mask(lun->have_ca, initidx); 11644#endif | 11487 mtx_lock(&softc->ctl_lock); 11488 STAILQ_FOREACH(lun, &softc->lun_list, links) { 11489 mtx_lock(&lun->lun_lock); 11490 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, 11491 io->io_hdr.nexus.initid, 11492 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 11493#ifdef CTL_WITH_CA 11494 ctl_clear_mask(lun->have_ca, initidx); 11495#endif |
11645 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx)) | 11496 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == initidx)) |
11646 lun->flags &= ~CTL_LUN_RESERVED; 11647 ctl_est_ua(lun, initidx, CTL_UA_I_T_NEXUS_LOSS); 11648 mtx_unlock(&lun->lun_lock); 11649 } 11650 mtx_unlock(&softc->ctl_lock); 11651 return (0); 11652} 11653 --- 88 unchanged lines hidden (view full) --- 11742 */ 11743 if (xio->scsiio.tag_num == io->taskio.tag_num) { 11744 xio->io_hdr.flags |= CTL_FLAG_ABORT; 11745 found = 1; 11746 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0 && 11747 !(lun->flags & CTL_LUN_PRIMARY_SC)) { 11748 union ctl_ha_msg msg_info; 11749 | 11497 lun->flags &= ~CTL_LUN_RESERVED; 11498 ctl_est_ua(lun, initidx, CTL_UA_I_T_NEXUS_LOSS); 11499 mtx_unlock(&lun->lun_lock); 11500 } 11501 mtx_unlock(&softc->ctl_lock); 11502 return (0); 11503} 11504 --- 88 unchanged lines hidden (view full) --- 11593 */ 11594 if (xio->scsiio.tag_num == io->taskio.tag_num) { 11595 xio->io_hdr.flags |= CTL_FLAG_ABORT; 11596 found = 1; 11597 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0 && 11598 !(lun->flags & CTL_LUN_PRIMARY_SC)) { 11599 union ctl_ha_msg msg_info; 11600 |
11750 io->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; | |
11751 msg_info.hdr.nexus = io->io_hdr.nexus; 11752 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 11753 msg_info.task.tag_num = io->taskio.tag_num; 11754 msg_info.task.tag_type = io->taskio.tag_type; 11755 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11756 msg_info.hdr.original_sc = NULL; 11757 msg_info.hdr.serializing_sc = NULL; 11758#if 0 11759 printf("Sent Abort to other side\n"); 11760#endif | 11601 msg_info.hdr.nexus = io->io_hdr.nexus; 11602 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 11603 msg_info.task.tag_num = io->taskio.tag_num; 11604 msg_info.task.tag_type = io->taskio.tag_type; 11605 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11606 msg_info.hdr.original_sc = NULL; 11607 msg_info.hdr.serializing_sc = NULL; 11608#if 0 11609 printf("Sent Abort to other side\n"); 11610#endif |
11761 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, 11762 (void *)&msg_info, sizeof(msg_info), 0) != 11763 CTL_HA_STATUS_SUCCESS) { 11764 } | 11611 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11612 sizeof(msg_info.task), M_NOWAIT); |
11765 } 11766#if 0 11767 printf("ctl_abort_task: found I/O to abort\n"); 11768#endif 11769 } 11770 } 11771 mtx_unlock(&lun->lun_lock); 11772 --- 77 unchanged lines hidden (view full) --- 11850 if ((targ_lun < CTL_MAX_LUNS) 11851 && (softc->ctl_luns[targ_lun] != NULL)) 11852 lun = softc->ctl_luns[targ_lun]; 11853 else { 11854 mtx_unlock(&softc->ctl_lock); 11855 retval = 1; 11856 break; 11857 } | 11613 } 11614#if 0 11615 printf("ctl_abort_task: found I/O to abort\n"); 11616#endif 11617 } 11618 } 11619 mtx_unlock(&lun->lun_lock); 11620 --- 77 unchanged lines hidden (view full) --- 11698 if ((targ_lun < CTL_MAX_LUNS) 11699 && (softc->ctl_luns[targ_lun] != NULL)) 11700 lun = softc->ctl_luns[targ_lun]; 11701 else { 11702 mtx_unlock(&softc->ctl_lock); 11703 retval = 1; 11704 break; 11705 } |
11706 retval = ctl_lun_reset(lun, io, CTL_UA_LUN_RESET); 11707 mtx_unlock(&softc->ctl_lock); |
|
11858 | 11708 |
11859 if (!(io->io_hdr.flags & 11860 CTL_FLAG_FROM_OTHER_SC)) { | 11709 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0) { |
11861 union ctl_ha_msg msg_info; 11862 | 11710 union ctl_ha_msg msg_info; 11711 |
11863 io->io_hdr.flags |= 11864 CTL_FLAG_SENT_2OTHER_SC; 11865 msg_info.hdr.msg_type = 11866 CTL_MSG_MANAGE_TASKS; | 11712 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; |
11867 msg_info.hdr.nexus = io->io_hdr.nexus; | 11713 msg_info.hdr.nexus = io->io_hdr.nexus; |
11868 msg_info.task.task_action = 11869 CTL_TASK_LUN_RESET; | 11714 msg_info.task.task_action = CTL_TASK_LUN_RESET; |
11870 msg_info.hdr.original_sc = NULL; 11871 msg_info.hdr.serializing_sc = NULL; | 11715 msg_info.hdr.original_sc = NULL; 11716 msg_info.hdr.serializing_sc = NULL; |
11872 if (CTL_HA_STATUS_SUCCESS != 11873 ctl_ha_msg_send(CTL_HA_CHAN_CTL, 11874 (void *)&msg_info, 11875 sizeof(msg_info), 0)) { 11876 } | 11717 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11718 sizeof(msg_info.task), M_WAITOK); |
11877 } | 11719 } |
11878 11879 retval = ctl_lun_reset(lun, io, 11880 CTL_UA_LUN_RESET); 11881 mtx_unlock(&softc->ctl_lock); | |
11882 break; 11883 } 11884 case CTL_TASK_TARGET_RESET: 11885 retval = ctl_target_reset(softc, io, CTL_UA_TARG_RESET); 11886 break; 11887 case CTL_TASK_BUS_RESET: 11888 retval = ctl_bus_reset(softc, io); 11889 break; --- 81 unchanged lines hidden (view full) --- 11971 free_io = 0; 11972 ctl_datamove_remote(io); 11973 break; 11974 case CTL_MSG_DATAMOVE_DONE: 11975 /* Only used in XFER mode */ 11976 free_io = 0; 11977 io->scsiio.be_move_done(io); 11978 break; | 11720 break; 11721 } 11722 case CTL_TASK_TARGET_RESET: 11723 retval = ctl_target_reset(softc, io, CTL_UA_TARG_RESET); 11724 break; 11725 case CTL_TASK_BUS_RESET: 11726 retval = ctl_bus_reset(softc, io); 11727 break; --- 81 unchanged lines hidden (view full) --- 11809 free_io = 0; 11810 ctl_datamove_remote(io); 11811 break; 11812 case CTL_MSG_DATAMOVE_DONE: 11813 /* Only used in XFER mode */ 11814 free_io = 0; 11815 io->scsiio.be_move_done(io); 11816 break; |
11817 case CTL_MSG_FAILOVER: 11818 mtx_lock(&lun->lun_lock); 11819 ctl_failover_lun(lun); 11820 mtx_unlock(&lun->lun_lock); 11821 free_io = 1; 11822 break; |
|
11979 default: 11980 free_io = 1; 11981 printf("%s: Invalid message type %d\n", 11982 __func__, io->io_hdr.msg_type); 11983 break; 11984 } 11985 if (free_io) 11986 ctl_free_io(io); --- 185 unchanged lines hidden (view full) --- 12172 (intmax_t)time_uptime - io->io_hdr.start_time); 12173 sbuf_finish(&sb); 12174 printf("%s", sbuf_data(&sb)); 12175 } 12176#endif /* CTL_TIME_IO */ 12177 12178#ifdef CTL_IO_DELAY 12179 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { | 11823 default: 11824 free_io = 1; 11825 printf("%s: Invalid message type %d\n", 11826 __func__, io->io_hdr.msg_type); 11827 break; 11828 } 11829 if (free_io) 11830 ctl_free_io(io); --- 185 unchanged lines hidden (view full) --- 12016 (intmax_t)time_uptime - io->io_hdr.start_time); 12017 sbuf_finish(&sb); 12018 printf("%s", sbuf_data(&sb)); 12019 } 12020#endif /* CTL_TIME_IO */ 12021 12022#ifdef CTL_IO_DELAY 12023 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { |
12180 struct ctl_lun *lun; 12181 12182 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 12183 | |
12184 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 12185 } else { 12186 struct ctl_lun *lun; 12187 12188 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 12189 if ((lun != NULL) 12190 && (lun->delay_info.datamove_delay > 0)) { 12191 --- 60 unchanged lines hidden (view full) --- 12252 * pass by reference, only by value between controllers. 12253 * So we can't pass a pointer to the S/G list, only as many 12254 * S/G entries as we can fit in here. If it's possible for 12255 * us to get more than CTL_HA_MAX_SG_ENTRIES S/G entries, 12256 * then we need to break this up into multiple transfers. 12257 */ 12258 if (io->scsiio.kern_sg_entries == 0) { 12259 msg.dt.kern_sg_entries = 1; | 12024 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 12025 } else { 12026 struct ctl_lun *lun; 12027 12028 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 12029 if ((lun != NULL) 12030 && (lun->delay_info.datamove_delay > 0)) { 12031 --- 60 unchanged lines hidden (view full) --- 12092 * pass by reference, only by value between controllers. 12093 * So we can't pass a pointer to the S/G list, only as many 12094 * S/G entries as we can fit in here. If it's possible for 12095 * us to get more than CTL_HA_MAX_SG_ENTRIES S/G entries, 12096 * then we need to break this up into multiple transfers. 12097 */ 12098 if (io->scsiio.kern_sg_entries == 0) { 12099 msg.dt.kern_sg_entries = 1; |
12100#if 0 |
|
12260 /* | 12101 /* |
12261 * If this is in cached memory, flush the cache 12262 * before we send the DMA request to the other 12263 * controller. We want to do this in either the 12264 * read or the write case. The read case is 12265 * straightforward. In the write case, we want to 12266 * make sure nothing is in the local cache that 12267 * could overwrite the DMAed data. 12268 */ 12269 if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) { 12270 /* 12271 * XXX KDM use bus_dmamap_sync() here. 12272 */ 12273 } 12274 12275 /* | |
12276 * Convert to a physical address if this is a 12277 * virtual address. 12278 */ 12279 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 12280 msg.dt.sg_list[0].addr = 12281 io->scsiio.kern_data_ptr; 12282 } else { 12283 /* 12284 * XXX KDM use busdma here! 12285 */ | 12102 * Convert to a physical address if this is a 12103 * virtual address. 12104 */ 12105 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 12106 msg.dt.sg_list[0].addr = 12107 io->scsiio.kern_data_ptr; 12108 } else { 12109 /* 12110 * XXX KDM use busdma here! 12111 */ |
12286#if 0 | |
12287 msg.dt.sg_list[0].addr = (void *) 12288 vtophys(io->scsiio.kern_data_ptr); | 12112 msg.dt.sg_list[0].addr = (void *) 12113 vtophys(io->scsiio.kern_data_ptr); |
12289#endif | |
12290 } | 12114 } |
12115#else 12116 KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, 12117 ("HA does not support BUS_ADDR")); 12118 msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr; 12119#endif |
|
12291 12292 msg.dt.sg_list[0].len = io->scsiio.kern_data_len; 12293 do_sg_copy = 0; 12294 } else { | 12120 12121 msg.dt.sg_list[0].len = io->scsiio.kern_data_len; 12122 do_sg_copy = 0; 12123 } else { |
12295 struct ctl_sg_entry *sgl; 12296 12297 do_sg_copy = 1; | |
12298 msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries; | 12124 msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries; |
12299 sgl = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; 12300 if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) { 12301 /* 12302 * XXX KDM use bus_dmamap_sync() here. 12303 */ 12304 } | 12125 do_sg_copy = 1; |
12305 } 12306 12307 msg.dt.kern_data_len = io->scsiio.kern_data_len; 12308 msg.dt.kern_total_len = io->scsiio.kern_total_len; 12309 msg.dt.kern_data_resid = io->scsiio.kern_data_resid; 12310 msg.dt.kern_rel_offset = io->scsiio.kern_rel_offset; 12311 msg.dt.sg_sequence = 0; 12312 --- 22 unchanged lines hidden (view full) --- 12335 * case is straightforward. In the write 12336 * case, we want to make sure nothing is 12337 * in the local cache that could overwrite 12338 * the DMAed data. 12339 */ 12340 12341 for (i = sg_entries_sent, j = 0; 12342 i < msg.dt.cur_sg_entries; i++, j++) { | 12126 } 12127 12128 msg.dt.kern_data_len = io->scsiio.kern_data_len; 12129 msg.dt.kern_total_len = io->scsiio.kern_total_len; 12130 msg.dt.kern_data_resid = io->scsiio.kern_data_resid; 12131 msg.dt.kern_rel_offset = io->scsiio.kern_rel_offset; 12132 msg.dt.sg_sequence = 0; 12133 --- 22 unchanged lines hidden (view full) --- 12156 * case is straightforward. In the write 12157 * case, we want to make sure nothing is 12158 * in the local cache that could overwrite 12159 * the DMAed data. 12160 */ 12161 12162 for (i = sg_entries_sent, j = 0; 12163 i < msg.dt.cur_sg_entries; i++, j++) { |
12164#if 0 |
|
12343 if ((io->io_hdr.flags & | 12165 if ((io->io_hdr.flags & |
12344 CTL_FLAG_NO_DATASYNC) == 0) { 12345 /* 12346 * XXX KDM use bus_dmamap_sync() 12347 */ 12348 } 12349 if ((io->io_hdr.flags & | |
12350 CTL_FLAG_BUS_ADDR) == 0) { 12351 /* 12352 * XXX KDM use busdma. 12353 */ | 12166 CTL_FLAG_BUS_ADDR) == 0) { 12167 /* 12168 * XXX KDM use busdma. 12169 */ |
12354#if 0 | |
12355 msg.dt.sg_list[j].addr =(void *) 12356 vtophys(sgl[i].addr); | 12170 msg.dt.sg_list[j].addr =(void *) 12171 vtophys(sgl[i].addr); |
12357#endif | |
12358 } else { 12359 msg.dt.sg_list[j].addr = 12360 sgl[i].addr; 12361 } | 12172 } else { 12173 msg.dt.sg_list[j].addr = 12174 sgl[i].addr; 12175 } |
12176#else 12177 KASSERT((io->io_hdr.flags & 12178 CTL_FLAG_BUS_ADDR) == 0, 12179 ("HA does not support BUS_ADDR")); 12180 msg.dt.sg_list[j].addr = sgl[i].addr; 12181#endif |
|
12362 msg.dt.sg_list[j].len = sgl[i].len; 12363 } 12364 } 12365 12366 sg_entries_sent += msg.dt.cur_sg_entries; 12367 if (sg_entries_sent >= msg.dt.kern_sg_entries) 12368 msg.dt.sg_last = 1; 12369 else 12370 msg.dt.sg_last = 0; 12371 | 12182 msg.dt.sg_list[j].len = sgl[i].len; 12183 } 12184 } 12185 12186 sg_entries_sent += msg.dt.cur_sg_entries; 12187 if (sg_entries_sent >= msg.dt.kern_sg_entries) 12188 msg.dt.sg_last = 1; 12189 else 12190 msg.dt.sg_last = 0; 12191 |
12372 /* 12373 * XXX KDM drop and reacquire the lock here? 12374 */ | |
12375 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, | 12192 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, |
12376 sizeof(msg), 0) > CTL_HA_STATUS_SUCCESS) { 12377 /* 12378 * XXX do something here. 12379 */ | 12193 sizeof(msg.dt) - sizeof(msg.dt.sg_list) + 12194 sizeof(struct ctl_sg_entry)*msg.dt.cur_sg_entries, 12195 M_WAITOK) > CTL_HA_STATUS_SUCCESS) { 12196 io->io_hdr.port_status = 31341; 12197 io->scsiio.be_move_done(io); 12198 return; |
12380 } 12381 12382 msg.dt.sent_sg_entries = sg_entries_sent; 12383 } 12384 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; | 12199 } 12200 12201 msg.dt.sent_sg_entries = sg_entries_sent; 12202 } 12203 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; |
12385 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) 12386 ctl_failover_io(io, /*have_lock*/ 0); 12387 | |
12388 } else { 12389 12390 /* 12391 * Lookup the fe_datamove() function for this particular 12392 * front end. 12393 */ | 12204 } else { 12205 12206 /* 12207 * Lookup the fe_datamove() function for this particular 12208 * front end. 12209 */ |
12394 fe_datamove = 12395 control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove; | 12210 fe_datamove = ctl_io_port(&io->io_hdr)->fe_datamove; |
12396 12397 fe_datamove(io); 12398 } 12399} 12400 12401static void 12402ctl_send_datamove_done(union ctl_io *io, int have_lock) 12403{ 12404 union ctl_ha_msg msg; | 12211 12212 fe_datamove(io); 12213 } 12214} 12215 12216static void 12217ctl_send_datamove_done(union ctl_io *io, int have_lock) 12218{ 12219 union ctl_ha_msg msg; |
12405 int isc_status; | |
12406 12407 memset(&msg, 0, sizeof(msg)); 12408 12409 msg.hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 12410 msg.hdr.original_sc = io; 12411 msg.hdr.serializing_sc = io->io_hdr.serializing_sc; 12412 msg.hdr.nexus = io->io_hdr.nexus; 12413 msg.hdr.status = io->io_hdr.status; 12414 msg.scsi.tag_num = io->scsiio.tag_num; 12415 msg.scsi.tag_type = io->scsiio.tag_type; 12416 msg.scsi.scsi_status = io->scsiio.scsi_status; 12417 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, | 12220 12221 memset(&msg, 0, sizeof(msg)); 12222 12223 msg.hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 12224 msg.hdr.original_sc = io; 12225 msg.hdr.serializing_sc = io->io_hdr.serializing_sc; 12226 msg.hdr.nexus = io->io_hdr.nexus; 12227 msg.hdr.status = io->io_hdr.status; 12228 msg.scsi.tag_num = io->scsiio.tag_num; 12229 msg.scsi.tag_type = io->scsiio.tag_type; 12230 msg.scsi.scsi_status = io->scsiio.scsi_status; 12231 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, |
12418 sizeof(io->scsiio.sense_data)); | 12232 io->scsiio.sense_len); |
12419 msg.scsi.sense_len = io->scsiio.sense_len; 12420 msg.scsi.sense_residual = io->scsiio.sense_residual; 12421 msg.scsi.fetd_status = io->io_hdr.port_status; 12422 msg.scsi.residual = io->scsiio.residual; 12423 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 12424 12425 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12426 ctl_failover_io(io, /*have_lock*/ have_lock); 12427 return; 12428 } 12429 | 12233 msg.scsi.sense_len = io->scsiio.sense_len; 12234 msg.scsi.sense_residual = io->scsiio.sense_residual; 12235 msg.scsi.fetd_status = io->io_hdr.port_status; 12236 msg.scsi.residual = io->scsiio.residual; 12237 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 12238 12239 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12240 ctl_failover_io(io, /*have_lock*/ have_lock); 12241 return; 12242 } 12243 |
12430 isc_status = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg), 0); 12431 if (isc_status > CTL_HA_STATUS_SUCCESS) { 12432 /* XXX do something if this fails */ 12433 } 12434 | 12244 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 12245 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + 12246 msg.scsi.sense_len, M_WAITOK); |
12435} 12436 12437/* 12438 * The DMA to the remote side is done, now we need to tell the other side 12439 * we're done so it can continue with its data movement. 12440 */ 12441static void 12442ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq) 12443{ 12444 union ctl_io *io; | 12247} 12248 12249/* 12250 * The DMA to the remote side is done, now we need to tell the other side 12251 * we're done so it can continue with its data movement. 12252 */ 12253static void 12254ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq) 12255{ 12256 union ctl_io *io; |
12257 int i; |
|
12445 12446 io = rq->context; 12447 12448 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12449 printf("%s: ISC DMA write failed with error %d", __func__, 12450 rq->ret); 12451 ctl_set_internal_failure(&io->scsiio, 12452 /*sks_valid*/ 1, 12453 /*retry_count*/ rq->ret); 12454 } 12455 12456 ctl_dt_req_free(rq); 12457 | 12258 12259 io = rq->context; 12260 12261 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12262 printf("%s: ISC DMA write failed with error %d", __func__, 12263 rq->ret); 12264 ctl_set_internal_failure(&io->scsiio, 12265 /*sks_valid*/ 1, 12266 /*retry_count*/ rq->ret); 12267 } 12268 12269 ctl_dt_req_free(rq); 12270 |
12271 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12272 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12273 free(io->io_hdr.remote_sglist, M_CTL); 12274 io->io_hdr.remote_sglist = NULL; 12275 io->io_hdr.local_sglist = NULL; 12276 |
|
12458 /* | 12277 /* |
12459 * In this case, we had to malloc the memory locally. Free it. 12460 */ 12461 if ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0) { 12462 int i; 12463 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12464 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12465 } 12466 /* | |
12467 * The data is in local and remote memory, so now we need to send 12468 * status (good or back) back to the other side. 12469 */ 12470 ctl_send_datamove_done(io, /*have_lock*/ 0); 12471} 12472 12473/* 12474 * We've moved the data from the host/controller into local memory. Now we --- 32 unchanged lines hidden (view full) --- 12507 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 12508 12509 /* 12510 * Use a custom move done callback, since we need to send completion 12511 * back to the other controller, not to the backend on this side. 12512 */ 12513 io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb; 12514 | 12278 * The data is in local and remote memory, so now we need to send 12279 * status (good or back) back to the other side. 12280 */ 12281 ctl_send_datamove_done(io, /*have_lock*/ 0); 12282} 12283 12284/* 12285 * We've moved the data from the host/controller into local memory. Now we --- 32 unchanged lines hidden (view full) --- 12318 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 12319 12320 /* 12321 * Use a custom move done callback, since we need to send completion 12322 * back to the other controller, not to the backend on this side. 12323 */ 12324 io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb; 12325 |
12515 fe_datamove = control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove; | 12326 fe_datamove = ctl_io_port(&io->io_hdr)->fe_datamove; |
12516 12517 fe_datamove(io); 12518 12519 return; 12520 12521} 12522 12523static int 12524ctl_datamove_remote_dm_read_cb(union ctl_io *io) 12525{ 12526#if 0 12527 char str[256]; 12528 char path_str[64]; 12529 struct sbuf sb; 12530#endif | 12327 12328 fe_datamove(io); 12329 12330 return; 12331 12332} 12333 12334static int 12335ctl_datamove_remote_dm_read_cb(union ctl_io *io) 12336{ 12337#if 0 12338 char str[256]; 12339 char path_str[64]; 12340 struct sbuf sb; 12341#endif |
12342 int i; |
|
12531 | 12343 |
12532 /* 12533 * In this case, we had to malloc the memory locally. Free it. 12534 */ 12535 if ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0) { 12536 int i; 12537 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12538 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12539 } | 12344 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12345 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12346 free(io->io_hdr.remote_sglist, M_CTL); 12347 io->io_hdr.remote_sglist = NULL; 12348 io->io_hdr.local_sglist = NULL; |
12540 12541#if 0 12542 scsi_path_string(io, path_str, sizeof(path_str)); 12543 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12544 sbuf_cat(&sb, path_str); 12545 scsi_command_string(&io->scsiio, NULL, &sb); 12546 sbuf_printf(&sb, "\n"); 12547 sbuf_cat(&sb, path_str); --- 20 unchanged lines hidden (view full) --- 12568ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq) 12569{ 12570 union ctl_io *io; 12571 void (*fe_datamove)(union ctl_io *io); 12572 12573 io = rq->context; 12574 12575 if (rq->ret != CTL_HA_STATUS_SUCCESS) { | 12349 12350#if 0 12351 scsi_path_string(io, path_str, sizeof(path_str)); 12352 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12353 sbuf_cat(&sb, path_str); 12354 scsi_command_string(&io->scsiio, NULL, &sb); 12355 sbuf_printf(&sb, "\n"); 12356 sbuf_cat(&sb, path_str); --- 20 unchanged lines hidden (view full) --- 12377ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq) 12378{ 12379 union ctl_io *io; 12380 void (*fe_datamove)(union ctl_io *io); 12381 12382 io = rq->context; 12383 12384 if (rq->ret != CTL_HA_STATUS_SUCCESS) { |
12576 printf("%s: ISC DMA read failed with error %d", __func__, | 12385 printf("%s: ISC DMA read failed with error %d\n", __func__, |
12577 rq->ret); 12578 ctl_set_internal_failure(&io->scsiio, 12579 /*sks_valid*/ 1, 12580 /*retry_count*/ rq->ret); 12581 } 12582 12583 ctl_dt_req_free(rq); 12584 12585 /* Switch the pointer over so the FETD knows what to do */ 12586 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 12587 12588 /* 12589 * Use a custom move done callback, since we need to send completion 12590 * back to the other controller, not to the backend on this side. 12591 */ 12592 io->scsiio.be_move_done = ctl_datamove_remote_dm_read_cb; 12593 12594 /* XXX KDM add checks like the ones in ctl_datamove? */ 12595 | 12386 rq->ret); 12387 ctl_set_internal_failure(&io->scsiio, 12388 /*sks_valid*/ 1, 12389 /*retry_count*/ rq->ret); 12390 } 12391 12392 ctl_dt_req_free(rq); 12393 12394 /* Switch the pointer over so the FETD knows what to do */ 12395 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 12396 12397 /* 12398 * Use a custom move done callback, since we need to send completion 12399 * back to the other controller, not to the backend on this side. 12400 */ 12401 io->scsiio.be_move_done = ctl_datamove_remote_dm_read_cb; 12402 12403 /* XXX KDM add checks like the ones in ctl_datamove? */ 12404 |
12596 fe_datamove = control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove; | 12405 fe_datamove = ctl_io_port(&io->io_hdr)->fe_datamove; |
12597 12598 fe_datamove(io); 12599} 12600 12601static int 12602ctl_datamove_remote_sgl_setup(union ctl_io *io) 12603{ 12604 struct ctl_sg_entry *local_sglist, *remote_sglist; | 12406 12407 fe_datamove(io); 12408} 12409 12410static int 12411ctl_datamove_remote_sgl_setup(union ctl_io *io) 12412{ 12413 struct ctl_sg_entry *local_sglist, *remote_sglist; |
12605 struct ctl_sg_entry *local_dma_sglist, *remote_dma_sglist; | |
12606 struct ctl_softc *softc; | 12414 struct ctl_softc *softc; |
12415 uint32_t len_to_go; |
|
12607 int retval; 12608 int i; 12609 12610 retval = 0; 12611 softc = control_softc; | 12416 int retval; 12417 int i; 12418 12419 retval = 0; 12420 softc = control_softc; |
12612 | |
12613 local_sglist = io->io_hdr.local_sglist; | 12421 local_sglist = io->io_hdr.local_sglist; |
12614 local_dma_sglist = io->io_hdr.local_dma_sglist; | |
12615 remote_sglist = io->io_hdr.remote_sglist; | 12422 remote_sglist = io->io_hdr.remote_sglist; |
12616 remote_dma_sglist = io->io_hdr.remote_dma_sglist; | 12423 len_to_go = io->scsiio.kern_data_len; |
12617 | 12424 |
12618 if (io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) { 12619 for (i = 0; i < io->scsiio.kern_sg_entries; i++) { 12620 local_sglist[i].len = remote_sglist[i].len; | 12425 /* 12426 * The difficult thing here is that the size of the various 12427 * S/G segments may be different than the size from the 12428 * remote controller. That'll make it harder when DMAing 12429 * the data back to the other side. 12430 */ 12431 for (i = 0; len_to_go > 0; i++) { 12432 local_sglist[i].len = MIN(len_to_go, CTL_HA_DATAMOVE_SEGMENT); 12433 local_sglist[i].addr = 12434 malloc(local_sglist[i].len, M_CTL, M_WAITOK); |
12621 | 12435 |
12622 /* 12623 * XXX Detect the situation where the RS-level I/O 12624 * redirector on the other side has already read the 12625 * data off of the AOR RS on this side, and 12626 * transferred it to remote (mirror) memory on the 12627 * other side. Since we already have the data in 12628 * memory here, we just need to use it. 12629 * 12630 * XXX KDM this can probably be removed once we 12631 * get the cache device code in and take the 12632 * current AOR implementation out. 12633 */ 12634#ifdef NEEDTOPORT 12635 if ((remote_sglist[i].addr >= 12636 (void *)vtophys(softc->mirr->addr)) 12637 && (remote_sglist[i].addr < 12638 ((void *)vtophys(softc->mirr->addr) + 12639 CacheMirrorOffset))) { 12640 local_sglist[i].addr = remote_sglist[i].addr - 12641 CacheMirrorOffset; 12642 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 12643 CTL_FLAG_DATA_IN) 12644 io->io_hdr.flags |= CTL_FLAG_REDIR_DONE; 12645 } else { 12646 local_sglist[i].addr = remote_sglist[i].addr + 12647 CacheMirrorOffset; 12648 } 12649#endif 12650#if 0 12651 printf("%s: local %p, remote %p, len %d\n", 12652 __func__, local_sglist[i].addr, 12653 remote_sglist[i].addr, local_sglist[i].len); 12654#endif 12655 } 12656 } else { 12657 uint32_t len_to_go; | 12436 len_to_go -= local_sglist[i].len; 12437 } 12438 /* 12439 * Reset the number of S/G entries accordingly. The original 12440 * number of S/G entries is available in rem_sg_entries. 12441 */ 12442 io->scsiio.kern_sg_entries = i; |
12658 | 12443 |
12659 /* 12660 * In this case, we don't have automatically allocated 12661 * memory for this I/O on this controller. This typically 12662 * happens with internal CTL I/O -- e.g. inquiry, mode 12663 * sense, etc. Anything coming from RAIDCore will have 12664 * a mirror area available. 12665 */ 12666 len_to_go = io->scsiio.kern_data_len; 12667 12668 /* 12669 * Clear the no datasync flag, we have to use malloced 12670 * buffers. 12671 */ 12672 io->io_hdr.flags &= ~CTL_FLAG_NO_DATASYNC; 12673 12674 /* 12675 * The difficult thing here is that the size of the various 12676 * S/G segments may be different than the size from the 12677 * remote controller. That'll make it harder when DMAing 12678 * the data back to the other side. 12679 */ 12680 for (i = 0; (i < sizeof(io->io_hdr.remote_sglist) / 12681 sizeof(io->io_hdr.remote_sglist[0])) && 12682 (len_to_go > 0); i++) { 12683 local_sglist[i].len = MIN(len_to_go, 131072); 12684 CTL_SIZE_8B(local_dma_sglist[i].len, 12685 local_sglist[i].len); 12686 local_sglist[i].addr = 12687 malloc(local_dma_sglist[i].len, M_CTL,M_WAITOK); 12688 12689 local_dma_sglist[i].addr = local_sglist[i].addr; 12690 12691 if (local_sglist[i].addr == NULL) { 12692 int j; 12693 12694 printf("malloc failed for %zd bytes!", 12695 local_dma_sglist[i].len); 12696 for (j = 0; j < i; j++) { 12697 free(local_sglist[j].addr, M_CTL); 12698 } 12699 ctl_set_internal_failure(&io->scsiio, 12700 /*sks_valid*/ 1, 12701 /*retry_count*/ 4857); 12702 retval = 1; 12703 goto bailout_error; 12704 12705 } 12706 /* XXX KDM do we need a sync here? */ 12707 12708 len_to_go -= local_sglist[i].len; 12709 } 12710 /* 12711 * Reset the number of S/G entries accordingly. The 12712 * original number of S/G entries is available in 12713 * rem_sg_entries. 12714 */ 12715 io->scsiio.kern_sg_entries = i; 12716 | |
12717#if 0 | 12444#if 0 |
12718 printf("%s: kern_sg_entries = %d\n", __func__, 12719 io->scsiio.kern_sg_entries); 12720 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12721 printf("%s: sg[%d] = %p, %d (DMA: %d)\n", __func__, i, 12722 local_sglist[i].addr, local_sglist[i].len, 12723 local_dma_sglist[i].len); | 12445 printf("%s: kern_sg_entries = %d\n", __func__, 12446 io->scsiio.kern_sg_entries); 12447 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12448 printf("%s: sg[%d] = %p, %d\n", __func__, i, 12449 local_sglist[i].addr, local_sglist[i].len); |
12724#endif | 12450#endif |
12725 } | |
12726 | 12451 |
12727 | |
12728 return (retval); | 12452 return (retval); |
12729 12730bailout_error: 12731 12732 ctl_send_datamove_done(io, /*have_lock*/ 0); 12733 12734 return (retval); | |
12735} 12736 12737static int 12738ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 12739 ctl_ha_dt_cb callback) 12740{ 12741 struct ctl_ha_dt_req *rq; 12742 struct ctl_sg_entry *remote_sglist, *local_sglist; | 12453} 12454 12455static int 12456ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 12457 ctl_ha_dt_cb callback) 12458{ 12459 struct ctl_ha_dt_req *rq; 12460 struct ctl_sg_entry *remote_sglist, *local_sglist; |
12743 struct ctl_sg_entry *remote_dma_sglist, *local_dma_sglist; | |
12744 uint32_t local_used, remote_used, total_used; | 12461 uint32_t local_used, remote_used, total_used; |
12745 int retval; 12746 int i, j; | 12462 int i, j, isc_ret; |
12747 | 12463 |
12748 retval = 0; 12749 | |
12750 rq = ctl_dt_req_alloc(); 12751 12752 /* 12753 * If we failed to allocate the request, and if the DMA didn't fail 12754 * anyway, set busy status. This is just a resource allocation 12755 * failure. 12756 */ 12757 if ((rq == NULL) --- 8 unchanged lines hidden (view full) --- 12766 /* 12767 * The data move failed. We need to return status back 12768 * to the other controller. No point in trying to DMA 12769 * data to the remote controller. 12770 */ 12771 12772 ctl_send_datamove_done(io, /*have_lock*/ 0); 12773 | 12464 rq = ctl_dt_req_alloc(); 12465 12466 /* 12467 * If we failed to allocate the request, and if the DMA didn't fail 12468 * anyway, set busy status. This is just a resource allocation 12469 * failure. 12470 */ 12471 if ((rq == NULL) --- 8 unchanged lines hidden (view full) --- 12480 /* 12481 * The data move failed. We need to return status back 12482 * to the other controller. No point in trying to DMA 12483 * data to the remote controller. 12484 */ 12485 12486 ctl_send_datamove_done(io, /*have_lock*/ 0); 12487 |
12774 retval = 1; 12775 12776 goto bailout; | 12488 return (1); |
12777 } 12778 12779 local_sglist = io->io_hdr.local_sglist; | 12489 } 12490 12491 local_sglist = io->io_hdr.local_sglist; |
12780 local_dma_sglist = io->io_hdr.local_dma_sglist; | |
12781 remote_sglist = io->io_hdr.remote_sglist; | 12492 remote_sglist = io->io_hdr.remote_sglist; |
12782 remote_dma_sglist = io->io_hdr.remote_dma_sglist; | |
12783 local_used = 0; 12784 remote_used = 0; 12785 total_used = 0; 12786 | 12493 local_used = 0; 12494 remote_used = 0; 12495 total_used = 0; 12496 |
12787 if (io->io_hdr.flags & CTL_FLAG_REDIR_DONE) { 12788 rq->ret = CTL_HA_STATUS_SUCCESS; 12789 rq->context = io; 12790 callback(rq); 12791 goto bailout; 12792 } 12793 | |
12794 /* 12795 * Pull/push the data over the wire from/to the other controller. 12796 * This takes into account the possibility that the local and 12797 * remote sglists may not be identical in terms of the size of 12798 * the elements and the number of elements. 12799 * 12800 * One fundamental assumption here is that the length allocated for 12801 * both the local and remote sglists is identical. Otherwise, we've 12802 * essentially got a coding error of some sort. 12803 */ | 12497 /* 12498 * Pull/push the data over the wire from/to the other controller. 12499 * This takes into account the possibility that the local and 12500 * remote sglists may not be identical in terms of the size of 12501 * the elements and the number of elements. 12502 * 12503 * One fundamental assumption here is that the length allocated for 12504 * both the local and remote sglists is identical. Otherwise, we've 12505 * essentially got a coding error of some sort. 12506 */ |
12507 isc_ret = CTL_HA_STATUS_SUCCESS; |
|
12804 for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) { | 12508 for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) { |
12805 int isc_ret; 12806 uint32_t cur_len, dma_length; | 12509 uint32_t cur_len; |
12807 uint8_t *tmp_ptr; 12808 | 12510 uint8_t *tmp_ptr; 12511 |
12809 rq->id = CTL_HA_DATA_CTL; | |
12810 rq->command = command; 12811 rq->context = io; 12812 12813 /* 12814 * Both pointers should be aligned. But it is possible 12815 * that the allocation length is not. They should both 12816 * also have enough slack left over at the end, though, 12817 * to round up to the next 8 byte boundary. 12818 */ 12819 cur_len = MIN(local_sglist[i].len - local_used, 12820 remote_sglist[j].len - remote_used); | 12512 rq->command = command; 12513 rq->context = io; 12514 12515 /* 12516 * Both pointers should be aligned. But it is possible 12517 * that the allocation length is not. They should both 12518 * also have enough slack left over at the end, though, 12519 * to round up to the next 8 byte boundary. 12520 */ 12521 cur_len = MIN(local_sglist[i].len - local_used, 12522 remote_sglist[j].len - remote_used); |
12523 rq->size = cur_len; |
|
12821 | 12524 |
12822 /* 12823 * In this case, we have a size issue and need to decrease 12824 * the size, except in the case where we actually have less 12825 * than 8 bytes left. In that case, we need to increase 12826 * the DMA length to get the last bit. 12827 */ 12828 if ((cur_len & 0x7) != 0) { 12829 if (cur_len > 0x7) { 12830 cur_len = cur_len - (cur_len & 0x7); 12831 dma_length = cur_len; 12832 } else { 12833 CTL_SIZE_8B(dma_length, cur_len); 12834 } 12835 12836 } else 12837 dma_length = cur_len; 12838 12839 /* 12840 * If we had to allocate memory for this I/O, instead of using 12841 * the non-cached mirror memory, we'll need to flush the cache 12842 * before trying to DMA to the other controller. 12843 * 12844 * We could end up doing this multiple times for the same 12845 * segment if we have a larger local segment than remote 12846 * segment. That shouldn't be an issue. 12847 */ 12848 if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) { 12849 /* 12850 * XXX KDM use bus_dmamap_sync() here. 12851 */ 12852 } 12853 12854 rq->size = dma_length; 12855 | |
12856 tmp_ptr = (uint8_t *)local_sglist[i].addr; 12857 tmp_ptr += local_used; 12858 | 12525 tmp_ptr = (uint8_t *)local_sglist[i].addr; 12526 tmp_ptr += local_used; 12527 |
12528#if 0 |
|
12859 /* Use physical addresses when talking to ISC hardware */ 12860 if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) { 12861 /* XXX KDM use busdma */ | 12529 /* Use physical addresses when talking to ISC hardware */ 12530 if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) { 12531 /* XXX KDM use busdma */ |
12862#if 0 | |
12863 rq->local = vtophys(tmp_ptr); | 12532 rq->local = vtophys(tmp_ptr); |
12864#endif | |
12865 } else 12866 rq->local = tmp_ptr; | 12533 } else 12534 rq->local = tmp_ptr; |
12535#else 12536 KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, 12537 ("HA does not support BUS_ADDR")); 12538 rq->local = tmp_ptr; 12539#endif |
|
12867 12868 tmp_ptr = (uint8_t *)remote_sglist[j].addr; 12869 tmp_ptr += remote_used; 12870 rq->remote = tmp_ptr; 12871 12872 rq->callback = NULL; 12873 12874 local_used += cur_len; --- 7 unchanged lines hidden (view full) --- 12882 j++; 12883 remote_used = 0; 12884 } 12885 total_used += cur_len; 12886 12887 if (total_used >= io->scsiio.kern_data_len) 12888 rq->callback = callback; 12889 | 12540 12541 tmp_ptr = (uint8_t *)remote_sglist[j].addr; 12542 tmp_ptr += remote_used; 12543 rq->remote = tmp_ptr; 12544 12545 rq->callback = NULL; 12546 12547 local_used += cur_len; --- 7 unchanged lines hidden (view full) --- 12555 j++; 12556 remote_used = 0; 12557 } 12558 total_used += cur_len; 12559 12560 if (total_used >= io->scsiio.kern_data_len) 12561 rq->callback = callback; 12562 |
12890 if ((rq->size & 0x7) != 0) { 12891 printf("%s: warning: size %d is not on 8b boundary\n", 12892 __func__, rq->size); 12893 } 12894 if (((uintptr_t)rq->local & 0x7) != 0) { 12895 printf("%s: warning: local %p not on 8b boundary\n", 12896 __func__, rq->local); 12897 } 12898 if (((uintptr_t)rq->remote & 0x7) != 0) { 12899 printf("%s: warning: remote %p not on 8b boundary\n", 12900 __func__, rq->local); 12901 } | |
12902#if 0 12903 printf("%s: %s: local %#x remote %#x size %d\n", __func__, 12904 (command == CTL_HA_DT_CMD_WRITE) ? "WRITE" : "READ", 12905 rq->local, rq->remote, rq->size); 12906#endif 12907 12908 isc_ret = ctl_dt_single(rq); | 12563#if 0 12564 printf("%s: %s: local %#x remote %#x size %d\n", __func__, 12565 (command == CTL_HA_DT_CMD_WRITE) ? "WRITE" : "READ", 12566 rq->local, rq->remote, rq->size); 12567#endif 12568 12569 isc_ret = ctl_dt_single(rq); |
12909 if (isc_ret == CTL_HA_STATUS_WAIT) 12910 continue; 12911 12912 if (isc_ret == CTL_HA_STATUS_DISCONNECT) { 12913 rq->ret = CTL_HA_STATUS_SUCCESS; 12914 } else { 12915 rq->ret = isc_ret; 12916 } | 12570 if (isc_ret > CTL_HA_STATUS_SUCCESS) 12571 break; 12572 } 12573 if (isc_ret != CTL_HA_STATUS_WAIT) { 12574 rq->ret = isc_ret; |
12917 callback(rq); | 12575 callback(rq); |
12918 goto bailout; | |
12919 } 12920 | 12576 } 12577 |
12921bailout: 12922 return (retval); 12923 | 12578 return (0); |
12924} 12925 12926static void 12927ctl_datamove_remote_read(union ctl_io *io) 12928{ 12929 int retval; 12930 int i; 12931 12932 /* 12933 * This will send an error to the other controller in the case of a 12934 * failure. 12935 */ 12936 retval = ctl_datamove_remote_sgl_setup(io); 12937 if (retval != 0) 12938 return; 12939 12940 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ, 12941 ctl_datamove_remote_read_cb); | 12579} 12580 12581static void 12582ctl_datamove_remote_read(union ctl_io *io) 12583{ 12584 int retval; 12585 int i; 12586 12587 /* 12588 * This will send an error to the other controller in the case of a 12589 * failure. 12590 */ 12591 retval = ctl_datamove_remote_sgl_setup(io); 12592 if (retval != 0) 12593 return; 12594 12595 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ, 12596 ctl_datamove_remote_read_cb); |
12942 if ((retval != 0) 12943 && ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0)) { | 12597 if (retval != 0) { |
12944 /* 12945 * Make sure we free memory if there was an error.. The 12946 * ctl_datamove_remote_xfer() function will send the 12947 * datamove done message, or call the callback with an 12948 * error if there is a problem. 12949 */ 12950 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12951 free(io->io_hdr.local_sglist[i].addr, M_CTL); | 12598 /* 12599 * Make sure we free memory if there was an error.. The 12600 * ctl_datamove_remote_xfer() function will send the 12601 * datamove done message, or call the callback with an 12602 * error if there is a problem. 12603 */ 12604 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12605 free(io->io_hdr.local_sglist[i].addr, M_CTL); |
12606 free(io->io_hdr.remote_sglist, M_CTL); 12607 io->io_hdr.remote_sglist = NULL; 12608 io->io_hdr.local_sglist = NULL; |
|
12952 } 12953 12954 return; 12955} 12956 12957/* 12958 * Process a datamove request from the other controller. This is used for 12959 * XFER mode only, not SER_ONLY mode. For writes, we DMA into local memory 12960 * first. Once that is complete, the data gets DMAed into the remote 12961 * controller's memory. For reads, we DMA from the remote controller's 12962 * memory into our memory first, and then move it out to the FETD. 12963 */ 12964static void 12965ctl_datamove_remote(union ctl_io *io) 12966{ | 12609 } 12610 12611 return; 12612} 12613 12614/* 12615 * Process a datamove request from the other controller. This is used for 12616 * XFER mode only, not SER_ONLY mode. For writes, we DMA into local memory 12617 * first. Once that is complete, the data gets DMAed into the remote 12618 * controller's memory. For reads, we DMA from the remote controller's 12619 * memory into our memory first, and then move it out to the FETD. 12620 */ 12621static void 12622ctl_datamove_remote(union ctl_io *io) 12623{ |
12967 struct ctl_softc *softc; | |
12968 | 12624 |
12969 softc = control_softc; | 12625 mtx_assert(&control_softc->ctl_lock, MA_NOTOWNED); |
12970 | 12626 |
12971 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); | 12627 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12628 ctl_failover_io(io, /*have_lock*/ 0); 12629 return; 12630 } |
12972 12973 /* 12974 * Note that we look for an aborted I/O here, but don't do some of 12975 * the other checks that ctl_datamove() normally does. 12976 * We don't need to run the datamove delay code, since that should 12977 * have been done if need be on the other controller. 12978 */ 12979 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 12980 printf("%s: tag 0x%04x on (%u:%u:%u) aborted\n", __func__, 12981 io->scsiio.tag_num, io->io_hdr.nexus.initid, 12982 io->io_hdr.nexus.targ_port, 12983 io->io_hdr.nexus.targ_lun); 12984 io->io_hdr.port_status = 31338; 12985 ctl_send_datamove_done(io, /*have_lock*/ 0); 12986 return; 12987 } 12988 | 12631 12632 /* 12633 * Note that we look for an aborted I/O here, but don't do some of 12634 * the other checks that ctl_datamove() normally does. 12635 * We don't need to run the datamove delay code, since that should 12636 * have been done if need be on the other controller. 12637 */ 12638 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 12639 printf("%s: tag 0x%04x on (%u:%u:%u) aborted\n", __func__, 12640 io->scsiio.tag_num, io->io_hdr.nexus.initid, 12641 io->io_hdr.nexus.targ_port, 12642 io->io_hdr.nexus.targ_lun); 12643 io->io_hdr.port_status = 31338; 12644 ctl_send_datamove_done(io, /*have_lock*/ 0); 12645 return; 12646 } 12647 |
12989 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) { | 12648 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) |
12990 ctl_datamove_remote_write(io); | 12649 ctl_datamove_remote_write(io); |
12991 } else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN){ | 12650 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) |
12992 ctl_datamove_remote_read(io); | 12651 ctl_datamove_remote_read(io); |
12993 } else { 12994 union ctl_ha_msg msg; 12995 struct scsi_sense_data *sense; 12996 uint8_t sks[3]; 12997 int retry_count; 12998 12999 memset(&msg, 0, sizeof(msg)); 13000 13001 msg.hdr.msg_type = CTL_MSG_BAD_JUJU; 13002 msg.hdr.status = CTL_SCSI_ERROR; 13003 msg.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 13004 13005 retry_count = 4243; 13006 13007 sense = &msg.scsi.sense_data; 13008 sks[0] = SSD_SCS_VALID; 13009 sks[1] = (retry_count >> 8) & 0xff; 13010 sks[2] = retry_count & 0xff; 13011 13012 /* "Internal target failure" */ 13013 scsi_set_sense_data(sense, 13014 /*sense_format*/ SSD_TYPE_NONE, 13015 /*current_error*/ 1, 13016 /*sense_key*/ SSD_KEY_HARDWARE_ERROR, 13017 /*asc*/ 0x44, 13018 /*ascq*/ 0x00, 13019 /*type*/ SSD_ELEM_SKS, 13020 /*size*/ sizeof(sks), 13021 /*data*/ sks, 13022 SSD_ELEM_NONE); 13023 13024 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 13025 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 13026 ctl_failover_io(io, /*have_lock*/ 1); 13027 return; 13028 } 13029 13030 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg), 0) > 13031 CTL_HA_STATUS_SUCCESS) { 13032 /* XXX KDM what to do if this fails? */ 13033 } 13034 return; | 12652 else { 12653 io->io_hdr.port_status = 31339; 12654 ctl_send_datamove_done(io, /*have_lock*/ 0); |
13035 } | 12655 } |
13036 | |
13037} 13038 13039static int 13040ctl_process_done(union ctl_io *io) 13041{ 13042 struct ctl_lun *lun; 13043 struct ctl_softc *softc = control_softc; 13044 void (*fe_done)(union ctl_io *io); | 12656} 12657 12658static int 12659ctl_process_done(union ctl_io *io) 12660{ 12661 struct ctl_lun *lun; 12662 struct ctl_softc *softc = control_softc; 12663 void (*fe_done)(union ctl_io *io); |
13045 uint32_t targ_port = ctl_port_idx(io->io_hdr.nexus.targ_port); | 12664 union ctl_ha_msg msg; 12665 uint32_t targ_port = io->io_hdr.nexus.targ_port; |
13046 13047 CTL_DEBUG_PRINT(("ctl_process_done\n")); 13048 | 12666 12667 CTL_DEBUG_PRINT(("ctl_process_done\n")); 12668 |
13049 fe_done = softc->ctl_ports[targ_port]->fe_done; | 12669 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0) 12670 fe_done = softc->ctl_ports[targ_port]->fe_done; 12671 else 12672 fe_done = NULL; |
13050 13051#ifdef CTL_TIME_IO 13052 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 13053 char str[256]; 13054 char path_str[64]; 13055 struct sbuf sb; 13056 13057 ctl_scsi_path_string(io, path_str, sizeof(path_str)); --- 141 unchanged lines hidden (view full) --- 13199 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS && 13200 (ctl_debug & CTL_DEBUG_INFO) != 0) 13201 ctl_io_error_print(io, NULL); 13202 13203 /* 13204 * Tell the FETD or the other shelf controller we're done with this 13205 * command. Note that only SCSI commands get to this point. Task 13206 * management commands are completed above. | 12673 12674#ifdef CTL_TIME_IO 12675 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 12676 char str[256]; 12677 char path_str[64]; 12678 struct sbuf sb; 12679 12680 ctl_scsi_path_string(io, path_str, sizeof(path_str)); --- 141 unchanged lines hidden (view full) --- 12822 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS && 12823 (ctl_debug & CTL_DEBUG_INFO) != 0) 12824 ctl_io_error_print(io, NULL); 12825 12826 /* 12827 * Tell the FETD or the other shelf controller we're done with this 12828 * command. Note that only SCSI commands get to this point. Task 12829 * management commands are completed above. |
13207 * 13208 * We only send status to the other controller if we're in XFER 13209 * mode. In SER_ONLY mode, the I/O is done on the controller that 13210 * received the I/O (from CTL's perspective), and so the status is 13211 * generated there. 13212 * 13213 * XXX KDM if we hold the lock here, we could cause a deadlock 13214 * if the frontend comes back in in this context to queue 13215 * something. | |
13216 */ | 12830 */ |
12831 if ((softc->ha_mode != CTL_HA_MODE_XFER) && 12832 (io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC)) { 12833 memset(&msg, 0, sizeof(msg)); 12834 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 12835 msg.hdr.serializing_sc = io->io_hdr.serializing_sc; 12836 msg.hdr.nexus = io->io_hdr.nexus; 12837 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 12838 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data), 12839 M_WAITOK); 12840 } |
|
13217 if ((softc->ha_mode == CTL_HA_MODE_XFER) 13218 && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { | 12841 if ((softc->ha_mode == CTL_HA_MODE_XFER) 12842 && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { |
13219 union ctl_ha_msg msg; 13220 | |
13221 memset(&msg, 0, sizeof(msg)); 13222 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 13223 msg.hdr.original_sc = io->io_hdr.original_sc; 13224 msg.hdr.nexus = io->io_hdr.nexus; 13225 msg.hdr.status = io->io_hdr.status; 13226 msg.scsi.scsi_status = io->scsiio.scsi_status; 13227 msg.scsi.tag_num = io->scsiio.tag_num; 13228 msg.scsi.tag_type = io->scsiio.tag_type; 13229 msg.scsi.sense_len = io->scsiio.sense_len; 13230 msg.scsi.sense_residual = io->scsiio.sense_residual; 13231 msg.scsi.residual = io->scsiio.residual; 13232 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, | 12843 memset(&msg, 0, sizeof(msg)); 12844 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 12845 msg.hdr.original_sc = io->io_hdr.original_sc; 12846 msg.hdr.nexus = io->io_hdr.nexus; 12847 msg.hdr.status = io->io_hdr.status; 12848 msg.scsi.scsi_status = io->scsiio.scsi_status; 12849 msg.scsi.tag_num = io->scsiio.tag_num; 12850 msg.scsi.tag_type = io->scsiio.tag_type; 12851 msg.scsi.sense_len = io->scsiio.sense_len; 12852 msg.scsi.sense_residual = io->scsiio.sense_residual; 12853 msg.scsi.residual = io->scsiio.residual; 12854 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, |
13233 sizeof(io->scsiio.sense_data)); | 12855 io->scsiio.sense_len); |
13234 /* 13235 * We copy this whether or not this is an I/O-related 13236 * command. Otherwise, we'd have to go and check to see 13237 * whether it's a read/write command, and it really isn't 13238 * worth it. 13239 */ 13240 memcpy(&msg.scsi.lbalen, 13241 &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, 13242 sizeof(msg.scsi.lbalen)); 13243 | 12856 /* 12857 * We copy this whether or not this is an I/O-related 12858 * command. Otherwise, we'd have to go and check to see 12859 * whether it's a read/write command, and it really isn't 12860 * worth it. 12861 */ 12862 memcpy(&msg.scsi.lbalen, 12863 &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, 12864 sizeof(msg.scsi.lbalen)); 12865 |
13244 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 13245 sizeof(msg), 0) > CTL_HA_STATUS_SUCCESS) { 13246 /* XXX do something here */ 13247 } 13248 | 12866 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 12867 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + 12868 msg.scsi.sense_len, M_WAITOK); |
13249 ctl_free_io(io); 13250 } else 13251 fe_done(io); 13252 13253 return (CTL_RETVAL_COMPLETE); 13254} 13255 13256#ifdef CTL_WITH_CA --- 138 unchanged lines hidden (view full) --- 13395 13396 /* 13397 * This is an internal copy of an I/O, and should not go through 13398 * the normal done processing logic. 13399 */ 13400 if (io->io_hdr.flags & CTL_FLAG_INT_COPY) 13401 return; 13402 | 12869 ctl_free_io(io); 12870 } else 12871 fe_done(io); 12872 12873 return (CTL_RETVAL_COMPLETE); 12874} 12875 12876#ifdef CTL_WITH_CA --- 138 unchanged lines hidden (view full) --- 13015 13016 /* 13017 * This is an internal copy of an I/O, and should not go through 13018 * the normal done processing logic. 13019 */ 13020 if (io->io_hdr.flags & CTL_FLAG_INT_COPY) 13021 return; 13022 |
13403 /* 13404 * We need to send a msg to the serializing shelf to finish the IO 13405 * as well. We don't send a finish message to the other shelf if 13406 * this is a task management command. Task management commands 13407 * aren't serialized in the OOA queue, but rather just executed on 13408 * both shelf controllers for commands that originated on that 13409 * controller. 13410 */ 13411 if ((io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC) 13412 && (io->io_hdr.io_type != CTL_IO_TASK)) { 13413 union ctl_ha_msg msg_io; 13414 13415 msg_io.hdr.msg_type = CTL_MSG_FINISH_IO; 13416 msg_io.hdr.serializing_sc = io->io_hdr.serializing_sc; 13417 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_io, 13418 sizeof(msg_io), 0 ) != CTL_HA_STATUS_SUCCESS) { 13419 } 13420 /* continue on to finish IO */ 13421 } | |
13422#ifdef CTL_IO_DELAY 13423 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 13424 struct ctl_lun *lun; 13425 13426 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13427 13428 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 13429 } else { --- 14 unchanged lines hidden (view full) --- 13444 return; 13445 } 13446 } 13447#endif /* CTL_IO_DELAY */ 13448 13449 ctl_enqueue_done(io); 13450} 13451 | 13023#ifdef CTL_IO_DELAY 13024 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 13025 struct ctl_lun *lun; 13026 13027 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13028 13029 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 13030 } else { --- 14 unchanged lines hidden (view full) --- 13045 return; 13046 } 13047 } 13048#endif /* CTL_IO_DELAY */ 13049 13050 ctl_enqueue_done(io); 13051} 13052 |
13452int 13453ctl_isc(struct ctl_scsiio *ctsio) 13454{ 13455 struct ctl_lun *lun; 13456 int retval; 13457 13458 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13459 13460 CTL_DEBUG_PRINT(("ctl_isc: command: %02x\n", ctsio->cdb[0])); 13461 13462 CTL_DEBUG_PRINT(("ctl_isc: calling data_submit()\n")); 13463 13464 retval = lun->backend->data_submit((union ctl_io *)ctsio); 13465 13466 return (retval); 13467} 13468 13469 | |
13470static void 13471ctl_work_thread(void *arg) 13472{ 13473 struct ctl_thread *thr = (struct ctl_thread *)arg; 13474 struct ctl_softc *softc = thr->ctl_softc; 13475 union ctl_io *io; 13476 int retval; 13477 --- 33 unchanged lines hidden (view full) --- 13511 STAILQ_REMOVE_HEAD(&thr->incoming_queue, links); 13512 mtx_unlock(&thr->queue_lock); 13513 if (io->io_hdr.io_type == CTL_IO_TASK) 13514 ctl_run_task(io); 13515 else 13516 ctl_scsiio_precheck(softc, &io->scsiio); 13517 continue; 13518 } | 13053static void 13054ctl_work_thread(void *arg) 13055{ 13056 struct ctl_thread *thr = (struct ctl_thread *)arg; 13057 struct ctl_softc *softc = thr->ctl_softc; 13058 union ctl_io *io; 13059 int retval; 13060 --- 33 unchanged lines hidden (view full) --- 13094 STAILQ_REMOVE_HEAD(&thr->incoming_queue, links); 13095 mtx_unlock(&thr->queue_lock); 13096 if (io->io_hdr.io_type == CTL_IO_TASK) 13097 ctl_run_task(io); 13098 else 13099 ctl_scsiio_precheck(softc, &io->scsiio); 13100 continue; 13101 } |
13519 if (!ctl_pause_rtr) { 13520 io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue); 13521 if (io != NULL) { 13522 STAILQ_REMOVE_HEAD(&thr->rtr_queue, links); 13523 mtx_unlock(&thr->queue_lock); 13524 retval = ctl_scsiio(&io->scsiio); 13525 if (retval != CTL_RETVAL_COMPLETE) 13526 CTL_DEBUG_PRINT(("ctl_scsiio failed\n")); 13527 continue; 13528 } | 13102 io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue); 13103 if (io != NULL) { 13104 STAILQ_REMOVE_HEAD(&thr->rtr_queue, links); 13105 mtx_unlock(&thr->queue_lock); 13106 retval = ctl_scsiio(&io->scsiio); 13107 if (retval != CTL_RETVAL_COMPLETE) 13108 CTL_DEBUG_PRINT(("ctl_scsiio failed\n")); 13109 continue; |
13529 } 13530 13531 /* Sleep until we have something to do. */ 13532 mtx_sleep(thr, &thr->queue_lock, PDROP | PRIBIO, "-", 0); 13533 } 13534} 13535 13536static void --- 26 unchanged lines hidden (view full) --- 13563ctl_thresh_thread(void *arg) 13564{ 13565 struct ctl_softc *softc = (struct ctl_softc *)arg; 13566 struct ctl_lun *lun; 13567 struct ctl_be_lun *be_lun; 13568 struct scsi_da_rw_recovery_page *rwpage; 13569 struct ctl_logical_block_provisioning_page *page; 13570 const char *attr; | 13110 } 13111 13112 /* Sleep until we have something to do. */ 13113 mtx_sleep(thr, &thr->queue_lock, PDROP | PRIBIO, "-", 0); 13114 } 13115} 13116 13117static void --- 26 unchanged lines hidden (view full) --- 13144ctl_thresh_thread(void *arg) 13145{ 13146 struct ctl_softc *softc = (struct ctl_softc *)arg; 13147 struct ctl_lun *lun; 13148 struct ctl_be_lun *be_lun; 13149 struct scsi_da_rw_recovery_page *rwpage; 13150 struct ctl_logical_block_provisioning_page *page; 13151 const char *attr; |
13152 union ctl_ha_msg msg; |
|
13571 uint64_t thres, val; | 13153 uint64_t thres, val; |
13572 int i, e; | 13154 int i, e, set; |
13573 13574 CTL_DEBUG_PRINT(("ctl_thresh_thread starting\n")); 13575 13576 for (;;) { 13577 mtx_lock(&softc->ctl_lock); 13578 STAILQ_FOREACH(lun, &softc->lun_list, links) { 13579 be_lun = lun->be_lun; 13580 if ((lun->flags & CTL_LUN_DISABLED) || 13581 (lun->flags & CTL_LUN_OFFLINE) || 13582 lun->backend->lun_attr == NULL) 13583 continue; | 13155 13156 CTL_DEBUG_PRINT(("ctl_thresh_thread starting\n")); 13157 13158 for (;;) { 13159 mtx_lock(&softc->ctl_lock); 13160 STAILQ_FOREACH(lun, &softc->lun_list, links) { 13161 be_lun = lun->be_lun; 13162 if ((lun->flags & CTL_LUN_DISABLED) || 13163 (lun->flags & CTL_LUN_OFFLINE) || 13164 lun->backend->lun_attr == NULL) 13165 continue; |
13166 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 13167 softc->ha_mode == CTL_HA_MODE_XFER) 13168 continue; |
|
13584 rwpage = &lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT]; 13585 if ((rwpage->byte8 & SMS_RWER_LBPERE) == 0) 13586 continue; 13587 e = 0; 13588 page = &lun->mode_pages.lbp_page[CTL_PAGE_CURRENT]; 13589 for (i = 0; i < CTL_NUM_LBP_THRESH; i++) { 13590 if ((page->descr[i].flags & SLBPPD_ENABLED) == 0) 13591 continue; --- 28 unchanged lines hidden (view full) --- 13620 e |= (val <= thres); 13621 } 13622 mtx_lock(&lun->lun_lock); 13623 if (e) { 13624 if (lun->lasttpt == 0 || 13625 time_uptime - lun->lasttpt >= CTL_LBP_UA_PERIOD) { 13626 lun->lasttpt = time_uptime; 13627 ctl_est_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); | 13169 rwpage = &lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT]; 13170 if ((rwpage->byte8 & SMS_RWER_LBPERE) == 0) 13171 continue; 13172 e = 0; 13173 page = &lun->mode_pages.lbp_page[CTL_PAGE_CURRENT]; 13174 for (i = 0; i < CTL_NUM_LBP_THRESH; i++) { 13175 if ((page->descr[i].flags & SLBPPD_ENABLED) == 0) 13176 continue; --- 28 unchanged lines hidden (view full) --- 13205 e |= (val <= thres); 13206 } 13207 mtx_lock(&lun->lun_lock); 13208 if (e) { 13209 if (lun->lasttpt == 0 || 13210 time_uptime - lun->lasttpt >= CTL_LBP_UA_PERIOD) { 13211 lun->lasttpt = time_uptime; 13212 ctl_est_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); |
13628 } | 13213 set = 1; 13214 } else 13215 set = 0; |
13629 } else { 13630 lun->lasttpt = 0; 13631 ctl_clr_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); | 13216 } else { 13217 lun->lasttpt = 0; 13218 ctl_clr_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); |
13219 set = -1; |
|
13632 } 13633 mtx_unlock(&lun->lun_lock); | 13220 } 13221 mtx_unlock(&lun->lun_lock); |
13222 if (set != 0 && 13223 lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 13224 /* Send msg to other side. */ 13225 bzero(&msg.ua, sizeof(msg.ua)); 13226 msg.hdr.msg_type = CTL_MSG_UA; 13227 msg.hdr.nexus.initid = -1; 13228 msg.hdr.nexus.targ_port = -1; 13229 msg.hdr.nexus.targ_lun = lun->lun; 13230 msg.hdr.nexus.targ_mapped_lun = lun->lun; 13231 msg.ua.ua_all = 1; 13232 msg.ua.ua_set = (set > 0); 13233 msg.ua.ua_type = CTL_UA_THIN_PROV_THRES; 13234 mtx_unlock(&softc->ctl_lock); // XXX 13235 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 13236 sizeof(msg.ua), M_WAITOK); 13237 mtx_lock(&softc->ctl_lock); 13238 } |
|
13634 } 13635 mtx_unlock(&softc->ctl_lock); 13636 pause("-", CTL_LBP_PERIOD * hz); 13637 } 13638} 13639 13640static void 13641ctl_enqueue_incoming(union ctl_io *io) --- 32 unchanged lines hidden (view full) --- 13674 13675 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13676 mtx_lock(&thr->queue_lock); 13677 STAILQ_INSERT_TAIL(&thr->done_queue, &io->io_hdr, links); 13678 mtx_unlock(&thr->queue_lock); 13679 wakeup(thr); 13680} 13681 | 13239 } 13240 mtx_unlock(&softc->ctl_lock); 13241 pause("-", CTL_LBP_PERIOD * hz); 13242 } 13243} 13244 13245static void 13246ctl_enqueue_incoming(union ctl_io *io) --- 32 unchanged lines hidden (view full) --- 13279 13280 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13281 mtx_lock(&thr->queue_lock); 13282 STAILQ_INSERT_TAIL(&thr->done_queue, &io->io_hdr, links); 13283 mtx_unlock(&thr->queue_lock); 13284 wakeup(thr); 13285} 13286 |
13682#ifdef notyet | |
13683static void 13684ctl_enqueue_isc(union ctl_io *io) 13685{ 13686 struct ctl_softc *softc = control_softc; 13687 struct ctl_thread *thr; 13688 13689 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13690 mtx_lock(&thr->queue_lock); 13691 STAILQ_INSERT_TAIL(&thr->isc_queue, &io->io_hdr, links); 13692 mtx_unlock(&thr->queue_lock); 13693 wakeup(thr); 13694} 13695 | 13287static void 13288ctl_enqueue_isc(union ctl_io *io) 13289{ 13290 struct ctl_softc *softc = control_softc; 13291 struct ctl_thread *thr; 13292 13293 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13294 mtx_lock(&thr->queue_lock); 13295 STAILQ_INSERT_TAIL(&thr->isc_queue, &io->io_hdr, links); 13296 mtx_unlock(&thr->queue_lock); 13297 wakeup(thr); 13298} 13299 |
13696/* Initialization and failover */ 13697 13698void 13699ctl_init_isc_msg(void) 13700{ 13701 printf("CTL: Still calling this thing\n"); 13702} 13703 | |
13704/* | 13300/* |
13705 * Init component 13706 * Initializes component into configuration defined by bootMode 13707 * (see hasc-sv.c) 13708 * returns hasc_Status: 13709 * OK 13710 * ERROR - fatal error 13711 */ 13712static ctl_ha_comp_status 13713ctl_isc_init(struct ctl_ha_component *c) 13714{ 13715 ctl_ha_comp_status ret = CTL_HA_COMP_STATUS_OK; 13716 13717 c->status = ret; 13718 return ret; 13719} 13720 13721/* Start component 13722 * Starts component in state requested. If component starts successfully, 13723 * it must set its own state to the requestrd state 13724 * When requested state is HASC_STATE_HA, the component may refine it 13725 * by adding _SLAVE or _MASTER flags. 13726 * Currently allowed state transitions are: 13727 * UNKNOWN->HA - initial startup 13728 * UNKNOWN->SINGLE - initial startup when no parter detected 13729 * HA->SINGLE - failover 13730 * returns ctl_ha_comp_status: 13731 * OK - component successfully started in requested state 13732 * FAILED - could not start the requested state, failover may 13733 * be possible 13734 * ERROR - fatal error detected, no future startup possible 13735 */ 13736static ctl_ha_comp_status 13737ctl_isc_start(struct ctl_ha_component *c, ctl_ha_state state) 13738{ 13739 ctl_ha_comp_status ret = CTL_HA_COMP_STATUS_OK; 13740 13741 printf("%s: go\n", __func__); 13742 13743 // UNKNOWN->HA or UNKNOWN->SINGLE (bootstrap) 13744 if (c->state == CTL_HA_STATE_UNKNOWN ) { 13745 control_softc->is_single = 0; 13746 if (ctl_ha_msg_create(CTL_HA_CHAN_CTL, ctl_isc_event_handler) 13747 != CTL_HA_STATUS_SUCCESS) { 13748 printf("ctl_isc_start: ctl_ha_msg_create failed.\n"); 13749 ret = CTL_HA_COMP_STATUS_ERROR; 13750 } 13751 } else if (CTL_HA_STATE_IS_HA(c->state) 13752 && CTL_HA_STATE_IS_SINGLE(state)){ 13753 // HA->SINGLE transition 13754 ctl_failover(); 13755 control_softc->is_single = 1; 13756 } else { 13757 printf("ctl_isc_start:Invalid state transition %X->%X\n", 13758 c->state, state); 13759 ret = CTL_HA_COMP_STATUS_ERROR; 13760 } 13761 if (CTL_HA_STATE_IS_SINGLE(state)) 13762 control_softc->is_single = 1; 13763 13764 c->state = state; 13765 c->status = ret; 13766 return ret; 13767} 13768 13769/* 13770 * Quiesce component 13771 * The component must clear any error conditions (set status to OK) and 13772 * prepare itself to another Start call 13773 * returns ctl_ha_comp_status: 13774 * OK 13775 * ERROR 13776 */ 13777static ctl_ha_comp_status 13778ctl_isc_quiesce(struct ctl_ha_component *c) 13779{ 13780 int ret = CTL_HA_COMP_STATUS_OK; 13781 13782 ctl_pause_rtr = 1; 13783 c->status = ret; 13784 return ret; 13785} 13786 13787struct ctl_ha_component ctl_ha_component_ctlisc = 13788{ 13789 .name = "CTL ISC", 13790 .state = CTL_HA_STATE_UNKNOWN, 13791 .init = ctl_isc_init, 13792 .start = ctl_isc_start, 13793 .quiesce = ctl_isc_quiesce 13794}; 13795#endif 13796 13797/* | |
13798 * vim: ts=8 13799 */ | 13301 * vim: ts=8 13302 */ |