1/*- 2 * Copyright (c) 2003-2009 Silicon Graphics International Corp. 3 * Copyright (c) 2012 The FreeBSD Foundation |
4 * Copyright (c) 2015 Alexander Motin <mav@FreeBSD.org> |
5 * All rights reserved. 6 * 7 * Portions of this software were developed by Edward Tomasz Napierala 8 * under sponsorship from the FreeBSD Foundation. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: --- 25 unchanged lines hidden (view full) --- 38 * CAM Target Layer, a SCSI device emulation subsystem. 39 * 40 * Author: Ken Merry <ken@FreeBSD.org> 41 */ 42 43#define _CTL_C 44 45#include <sys/cdefs.h> |
46__FBSDID("$FreeBSD: stable/10/sys/cam/ctl/ctl.c 288732 2015-10-05 08:57:16Z mav $"); |
47 48#include <sys/param.h> 49#include <sys/systm.h> 50#include <sys/ctype.h> 51#include <sys/kernel.h> 52#include <sys/types.h> 53#include <sys/kthread.h> 54#include <sys/bio.h> --- 25 unchanged lines hidden (view full) --- 80#include <cam/ctl/ctl_private.h> 81#include <cam/ctl/ctl_debug.h> 82#include <cam/ctl/ctl_scsi_all.h> 83#include <cam/ctl/ctl_error.h> 84 85struct ctl_softc *control_softc = NULL; 86 87/* |
88 * Template mode pages. 89 */ 90 91/* 92 * Note that these are default values only. The actual values will be 93 * filled in when the user does a mode sense. 94 */ 95const static struct copan_debugconf_subpage debugconf_page_default = { --- 232 unchanged lines hidden (view full) --- 328 /*count*/{0, 0, 0, 0}}, 329 {/*flags*/0, 330 /*resource*/0, 331 /*reserved*/{0, 0}, 332 /*count*/{0, 0, 0, 0}} 333 } 334}; 335 |
336SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer"); 337static int worker_threads = -1; 338TUNABLE_INT("kern.cam.ctl.worker_threads", &worker_threads); 339SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN, 340 &worker_threads, 1, "Number of worker threads"); 341static int ctl_debug = CTL_DEBUG_NONE; 342TUNABLE_INT("kern.cam.ctl.debug", &ctl_debug); 343SYSCTL_INT(_kern_cam_ctl, OID_AUTO, debug, CTLFLAG_RWTUN, 344 &ctl_debug, 0, "Enabled debug flags"); 345 346/* 347 * Supported pages (0x00), Serial number (0x80), Device ID (0x83), 348 * Extended INQUIRY Data (0x86), Mode Page Policy (0x87), 349 * SCSI Ports (0x88), Third-party Copy (0x8F), Block limits (0xB0), 350 * Block Device Characteristics (0xB1) and Logical Block Provisioning (0xB2) 351 */ 352#define SCSI_EVPD_NUM_SUPPORTED_PAGES 10 353 |
354static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event, 355 int param); 356static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest); |
357static void ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest); |
358static int ctl_init(void); 359void ctl_shutdown(void); 360static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td); 361static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td); 362static int ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio); 363static int ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 364 struct ctl_ooa *ooa_hdr, 365 struct ctl_ooa_entry *kern_entries); 366static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 367 struct thread *td); 368static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *lun, 369 struct ctl_be_lun *be_lun); 370static int ctl_free_lun(struct ctl_lun *lun); 371static void ctl_create_lun(struct ctl_be_lun *be_lun); 372static struct ctl_port * ctl_io_port(struct ctl_io_hdr *io_hdr); |
373 374static int ctl_do_mode_select(union ctl_io *io); 375static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, 376 uint64_t res_key, uint64_t sa_res_key, 377 uint8_t type, uint32_t residx, 378 struct ctl_scsiio *ctsio, 379 struct scsi_per_res_out *cdb, 380 struct scsi_per_res_out_parms* param); --- 20 unchanged lines hidden (view full) --- 401static ctl_action ctl_check_for_blockage(struct ctl_lun *lun, 402 union ctl_io *pending_io, union ctl_io *ooa_io); 403static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 404 union ctl_io *starting_io); 405static int ctl_check_blocked(struct ctl_lun *lun); 406static int ctl_scsiio_lun_check(struct ctl_lun *lun, 407 const struct ctl_cmd_entry *entry, 408 struct ctl_scsiio *ctsio); |
409static void ctl_failover_lun(struct ctl_lun *lun); 410static void ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua); 411static void ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua); 412static void ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua); 413static void ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua); |
414static void ctl_clr_ua_allluns(struct ctl_softc *ctl_softc, uint32_t initidx, 415 ctl_ua_type ua_type); 416static int ctl_scsiio_precheck(struct ctl_softc *ctl_softc, 417 struct ctl_scsiio *ctsio); 418static int ctl_scsiio(struct ctl_scsiio *ctsio); 419 420static int ctl_bus_reset(struct ctl_softc *ctl_softc, union ctl_io *io); 421static int ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io, --- 22 unchanged lines hidden (view full) --- 444static void ctl_datamove_remote(union ctl_io *io); 445static int ctl_process_done(union ctl_io *io); 446static void ctl_lun_thread(void *arg); 447static void ctl_thresh_thread(void *arg); 448static void ctl_work_thread(void *arg); 449static void ctl_enqueue_incoming(union ctl_io *io); 450static void ctl_enqueue_rtr(union ctl_io *io); 451static void ctl_enqueue_done(union ctl_io *io); |
452static void ctl_enqueue_isc(union ctl_io *io); |
453static const struct ctl_cmd_entry * 454 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa); 455static const struct ctl_cmd_entry * 456 ctl_validate_command(struct ctl_scsiio *ctsio); 457static int ctl_cmd_applicable(uint8_t lun_type, 458 const struct ctl_cmd_entry *entry); 459 |
460static uint64_t ctl_get_prkey(struct ctl_lun *lun, uint32_t residx); 461static void ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx); 462static void ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx); 463static void ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key); 464 |
465/* 466 * Load the serialization table. This isn't very pretty, but is probably 467 * the easiest way to do it. 468 */ 469#include "ctl_ser_table.c" 470 471/* 472 * We only need to define open, close and ioctl routines for this driver. --- 16 unchanged lines hidden (view full) --- 489 "ctl", 490 ctl_module_event_handler, 491 NULL 492}; 493 494DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD); 495MODULE_VERSION(ctl, 1); 496 |
497static struct ctl_frontend ha_frontend = 498{ 499 .name = "ha", 500}; 501 |
502static void 503ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc, 504 union ctl_ha_msg *msg_info) 505{ 506 struct ctl_scsiio *ctsio; 507 508 if (msg_info->hdr.original_sc == NULL) { 509 printf("%s: original_sc == NULL!\n", __func__); --- 5 unchanged lines hidden (view full) --- 515 ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 516 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 517 ctsio->io_hdr.status = msg_info->hdr.status; 518 ctsio->scsi_status = msg_info->scsi.scsi_status; 519 ctsio->sense_len = msg_info->scsi.sense_len; 520 ctsio->sense_residual = msg_info->scsi.sense_residual; 521 ctsio->residual = msg_info->scsi.residual; 522 memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data, |
523 msg_info->scsi.sense_len); |
524 memcpy(&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, 525 &msg_info->scsi.lbalen, sizeof(msg_info->scsi.lbalen)); 526 ctl_enqueue_isc((union ctl_io *)ctsio); 527} 528 529static void 530ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc, 531 union ctl_ha_msg *msg_info) 532{ 533 struct ctl_scsiio *ctsio; 534 535 if (msg_info->hdr.serializing_sc == NULL) { 536 printf("%s: serializing_sc == NULL!\n", __func__); 537 /* XXX KDM now what? */ 538 return; 539 } 540 541 ctsio = &msg_info->hdr.serializing_sc->scsiio; |
542 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 543 ctl_enqueue_isc((union ctl_io *)ctsio); 544} 545 |
546void 547ctl_isc_announce_lun(struct ctl_lun *lun) 548{ 549 struct ctl_softc *softc = lun->ctl_softc; 550 union ctl_ha_msg *msg; 551 struct ctl_ha_msg_lun_pr_key pr_key; 552 int i, k; 553 554 if (softc->ha_link != CTL_HA_LINK_ONLINE) 555 return; 556 mtx_lock(&lun->lun_lock); 557 i = sizeof(msg->lun); 558 if (lun->lun_devid) 559 i += lun->lun_devid->len; 560 i += sizeof(pr_key) * lun->pr_key_count; 561alloc: 562 mtx_unlock(&lun->lun_lock); 563 msg = malloc(i, M_CTL, M_WAITOK); 564 mtx_lock(&lun->lun_lock); 565 k = sizeof(msg->lun); 566 if (lun->lun_devid) 567 k += lun->lun_devid->len; 568 k += sizeof(pr_key) * lun->pr_key_count; 569 if (i < k) { 570 free(msg, M_CTL); 571 i = k; 572 goto alloc; 573 } 574 bzero(&msg->lun, sizeof(msg->lun)); 575 msg->hdr.msg_type = CTL_MSG_LUN_SYNC; 576 msg->hdr.nexus.targ_lun = lun->lun; 577 msg->hdr.nexus.targ_mapped_lun = lun->lun; 578 msg->lun.flags = lun->flags; 579 msg->lun.pr_generation = lun->PRGeneration; 580 msg->lun.pr_res_idx = lun->pr_res_idx; 581 msg->lun.pr_res_type = lun->res_type; 582 msg->lun.pr_key_count = lun->pr_key_count; 583 i = 0; 584 if (lun->lun_devid) { 585 msg->lun.lun_devid_len = lun->lun_devid->len; 586 memcpy(&msg->lun.data[i], lun->lun_devid->data, 587 msg->lun.lun_devid_len); 588 i += msg->lun.lun_devid_len; 589 } 590 for (k = 0; k < CTL_MAX_INITIATORS; k++) { 591 if ((pr_key.pr_key = ctl_get_prkey(lun, k)) == 0) 592 continue; 593 pr_key.pr_iid = k; 594 memcpy(&msg->lun.data[i], &pr_key, sizeof(pr_key)); 595 i += sizeof(pr_key); 596 } 597 mtx_unlock(&lun->lun_lock); 598 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i, 599 M_WAITOK); 600 free(msg, M_CTL); 601} 602 603void 604ctl_isc_announce_port(struct ctl_port *port) 605{ 606 struct ctl_softc *softc = control_softc; 607 union ctl_ha_msg *msg; 608 int i; 609 610 if (port->targ_port < softc->port_min || 611 port->targ_port >= softc->port_max || 612 softc->ha_link != CTL_HA_LINK_ONLINE) 613 return; 614 i = sizeof(msg->port) + strlen(port->port_name) + 1; 615 if (port->lun_map) 616 i += sizeof(uint32_t) * CTL_MAX_LUNS; 617 if (port->port_devid) 618 i += port->port_devid->len; 619 if (port->target_devid) 620 i += port->target_devid->len; 621 msg = malloc(i, M_CTL, M_WAITOK); 622 bzero(&msg->port, sizeof(msg->port)); 623 msg->hdr.msg_type = CTL_MSG_PORT_SYNC; 624 msg->hdr.nexus.targ_port = port->targ_port; 625 msg->port.port_type = port->port_type; 626 msg->port.physical_port = port->physical_port; 627 msg->port.virtual_port = port->virtual_port; 628 msg->port.status = port->status; 629 i = 0; 630 msg->port.name_len = sprintf(&msg->port.data[i], 631 "%d:%s", softc->ha_id, port->port_name) + 1; 632 i += msg->port.name_len; 633 if (port->lun_map) { 634 msg->port.lun_map_len = sizeof(uint32_t) * CTL_MAX_LUNS; 635 memcpy(&msg->port.data[i], port->lun_map, 636 msg->port.lun_map_len); 637 i += msg->port.lun_map_len; 638 } 639 if (port->port_devid) { 640 msg->port.port_devid_len = port->port_devid->len; 641 memcpy(&msg->port.data[i], port->port_devid->data, 642 msg->port.port_devid_len); 643 i += msg->port.port_devid_len; 644 } 645 if (port->target_devid) { 646 msg->port.target_devid_len = port->target_devid->len; 647 memcpy(&msg->port.data[i], port->target_devid->data, 648 msg->port.target_devid_len); 649 i += msg->port.target_devid_len; 650 } 651 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i, 652 M_WAITOK); 653 free(msg, M_CTL); 654} 655 656static void 657ctl_isc_ha_link_up(struct ctl_softc *softc) 658{ 659 struct ctl_port *port; 660 struct ctl_lun *lun; 661 662 STAILQ_FOREACH(port, &softc->port_list, links) 663 ctl_isc_announce_port(port); 664 STAILQ_FOREACH(lun, &softc->lun_list, links) 665 ctl_isc_announce_lun(lun); 666} 667 668static void 669ctl_isc_ha_link_down(struct ctl_softc *softc) 670{ 671 struct ctl_port *port; 672 struct ctl_lun *lun; 673 union ctl_io *io; 674 675 mtx_lock(&softc->ctl_lock); 676 STAILQ_FOREACH(lun, &softc->lun_list, links) { 677 mtx_lock(&lun->lun_lock); 678 lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; 679 mtx_unlock(&lun->lun_lock); 680 681 mtx_unlock(&softc->ctl_lock); 682 io = ctl_alloc_io(softc->othersc_pool); 683 mtx_lock(&softc->ctl_lock); 684 ctl_zero_io(io); 685 io->io_hdr.msg_type = CTL_MSG_FAILOVER; 686 io->io_hdr.nexus.targ_mapped_lun = lun->lun; 687 ctl_enqueue_isc(io); 688 } 689 690 STAILQ_FOREACH(port, &softc->port_list, links) { 691 if (port->targ_port >= softc->port_min && 692 port->targ_port < softc->port_max) 693 continue; 694 port->status &= ~CTL_PORT_STATUS_ONLINE; 695 } 696 mtx_unlock(&softc->ctl_lock); 697} 698 699static void 700ctl_isc_ua(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 701{ 702 struct ctl_lun *lun; 703 uint32_t iid = ctl_get_initindex(&msg->hdr.nexus); 704 705 if (msg->hdr.nexus.targ_lun < CTL_MAX_LUNS && 706 (lun = softc->ctl_luns[msg->hdr.nexus.targ_lun]) != NULL) { 707 if (msg->ua.ua_all) { 708 if (msg->ua.ua_set) 709 ctl_est_ua_all(lun, iid, msg->ua.ua_type); 710 else 711 ctl_clr_ua_all(lun, iid, msg->ua.ua_type); 712 } else { 713 if (msg->ua.ua_set) 714 ctl_est_ua(lun, iid, msg->ua.ua_type); 715 else 716 ctl_clr_ua(lun, iid, msg->ua.ua_type); 717 } 718 } 719} 720 721static void 722ctl_isc_lun_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 723{ 724 struct ctl_lun *lun; 725 struct ctl_ha_msg_lun_pr_key pr_key; 726 int i, k; 727 728 lun = softc->ctl_luns[msg->hdr.nexus.targ_lun]; 729 if (lun == NULL) { 730 CTL_DEBUG_PRINT(("%s: Unknown LUN %d\n", __func__, 731 msg->hdr.nexus.targ_lun)); 732 } else { 733 mtx_lock(&lun->lun_lock); 734 i = (lun->lun_devid != NULL) ? lun->lun_devid->len : 0; 735 if (msg->lun.lun_devid_len != i || (i > 0 && 736 memcmp(&msg->lun.data[0], lun->lun_devid->data, i) != 0)) { 737 mtx_unlock(&lun->lun_lock); 738 printf("%s: Received conflicting HA LUN %d\n", 739 __func__, msg->hdr.nexus.targ_lun); 740 return; 741 } else { 742 /* Record whether peer is primary. */ 743 if ((msg->lun.flags & CTL_LUN_PRIMARY_SC) && 744 (msg->lun.flags & CTL_LUN_DISABLED) == 0) 745 lun->flags |= CTL_LUN_PEER_SC_PRIMARY; 746 else 747 lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; 748 749 /* If peer is primary and we are not -- use data */ 750 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 751 (lun->flags & CTL_LUN_PEER_SC_PRIMARY)) { 752 lun->PRGeneration = msg->lun.pr_generation; 753 lun->pr_res_idx = msg->lun.pr_res_idx; 754 lun->res_type = msg->lun.pr_res_type; 755 lun->pr_key_count = msg->lun.pr_key_count; 756 for (k = 0; k < CTL_MAX_INITIATORS; k++) 757 ctl_clr_prkey(lun, k); 758 for (k = 0; k < msg->lun.pr_key_count; k++) { 759 memcpy(&pr_key, &msg->lun.data[i], 760 sizeof(pr_key)); 761 ctl_alloc_prkey(lun, pr_key.pr_iid); 762 ctl_set_prkey(lun, pr_key.pr_iid, 763 pr_key.pr_key); 764 i += sizeof(pr_key); 765 } 766 } 767 768 mtx_unlock(&lun->lun_lock); 769 CTL_DEBUG_PRINT(("%s: Known LUN %d, peer is %s\n", 770 __func__, msg->hdr.nexus.targ_lun, 771 (msg->lun.flags & CTL_LUN_PRIMARY_SC) ? 772 "primary" : "secondary")); 773 774 /* If we are primary but peer doesn't know -- notify */ 775 if ((lun->flags & CTL_LUN_PRIMARY_SC) && 776 (msg->lun.flags & CTL_LUN_PEER_SC_PRIMARY) == 0) 777 ctl_isc_announce_lun(lun); 778 } 779 } 780} 781 782static void 783ctl_isc_port_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 784{ 785 struct ctl_port *port; 786 int i, new; 787 788 port = softc->ctl_ports[msg->hdr.nexus.targ_port]; 789 if (port == NULL) { 790 CTL_DEBUG_PRINT(("%s: New port %d\n", __func__, 791 msg->hdr.nexus.targ_port)); 792 new = 1; 793 port = malloc(sizeof(*port), M_CTL, M_WAITOK | M_ZERO); 794 port->frontend = &ha_frontend; 795 port->targ_port = msg->hdr.nexus.targ_port; 796 } else if (port->frontend == &ha_frontend) { 797 CTL_DEBUG_PRINT(("%s: Updated port %d\n", __func__, 798 msg->hdr.nexus.targ_port)); 799 new = 0; 800 } else { 801 printf("%s: Received conflicting HA port %d\n", 802 __func__, msg->hdr.nexus.targ_port); 803 return; 804 } 805 port->port_type = msg->port.port_type; 806 port->physical_port = msg->port.physical_port; 807 port->virtual_port = msg->port.virtual_port; 808 port->status = msg->port.status; 809 i = 0; 810 free(port->port_name, M_CTL); 811 port->port_name = strndup(&msg->port.data[i], msg->port.name_len, 812 M_CTL); 813 i += msg->port.name_len; 814 if (msg->port.lun_map_len != 0) { 815 if (port->lun_map == NULL) 816 port->lun_map = malloc(sizeof(uint32_t) * CTL_MAX_LUNS, 817 M_CTL, M_WAITOK); 818 memcpy(port->lun_map, &msg->port.data[i], 819 sizeof(uint32_t) * CTL_MAX_LUNS); 820 i += msg->port.lun_map_len; 821 } else { 822 free(port->lun_map, M_CTL); 823 port->lun_map = NULL; 824 } 825 if (msg->port.port_devid_len != 0) { 826 if (port->port_devid == NULL || 827 port->port_devid->len != msg->port.port_devid_len) { 828 free(port->port_devid, M_CTL); 829 port->port_devid = malloc(sizeof(struct ctl_devid) + 830 msg->port.port_devid_len, M_CTL, M_WAITOK); 831 } 832 memcpy(port->port_devid->data, &msg->port.data[i], 833 msg->port.port_devid_len); 834 port->port_devid->len = msg->port.port_devid_len; 835 i += msg->port.port_devid_len; 836 } else { 837 free(port->port_devid, M_CTL); 838 port->port_devid = NULL; 839 } 840 if (msg->port.target_devid_len != 0) { 841 if (port->target_devid == NULL || 842 port->target_devid->len != msg->port.target_devid_len) { 843 free(port->target_devid, M_CTL); 844 port->target_devid = malloc(sizeof(struct ctl_devid) + 845 msg->port.target_devid_len, M_CTL, M_WAITOK); 846 } 847 memcpy(port->target_devid->data, &msg->port.data[i], 848 msg->port.target_devid_len); 849 port->target_devid->len = msg->port.target_devid_len; 850 i += msg->port.target_devid_len; 851 } else { 852 free(port->port_devid, M_CTL); 853 port->port_devid = NULL; 854 } 855 if (new) { 856 if (ctl_port_register(port) != 0) { 857 printf("%s: ctl_port_register() failed with error\n", 858 __func__); 859 } 860 } 861} 862 |
863/* 864 * ISC (Inter Shelf Communication) event handler. Events from the HA 865 * subsystem come in here. 866 */ 867static void 868ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param) 869{ 870 struct ctl_softc *softc; 871 union ctl_io *io; 872 struct ctl_prio *presio; 873 ctl_ha_status isc_status; 874 875 softc = control_softc; |
876 CTL_DEBUG_PRINT(("CTL: Isc Msg event %d\n", event)); |
877 if (event == CTL_HA_EVT_MSG_RECV) { |
878 union ctl_ha_msg *msg, msgbuf; |
879 |
880 if (param > sizeof(msgbuf)) 881 msg = malloc(param, M_CTL, M_WAITOK); 882 else 883 msg = &msgbuf; 884 isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, msg, param, 885 M_WAITOK); 886 if (isc_status != CTL_HA_STATUS_SUCCESS) { 887 printf("%s: Error receiving message: %d\n", 888 __func__, isc_status); 889 if (msg != &msgbuf) 890 free(msg, M_CTL); |
891 return; 892 } 893 |
894 CTL_DEBUG_PRINT(("CTL: msg_type %d\n", msg->msg_type)); 895 switch (msg->hdr.msg_type) { |
896 case CTL_MSG_SERIALIZE: |
897 io = ctl_alloc_io(softc->othersc_pool); |
898 ctl_zero_io(io); |
899 // populate ctsio from msg |
900 io->io_hdr.io_type = CTL_IO_SCSI; 901 io->io_hdr.msg_type = CTL_MSG_SERIALIZE; |
902 io->io_hdr.original_sc = msg->hdr.original_sc; |
903 io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC | 904 CTL_FLAG_IO_ACTIVE; 905 /* 906 * If we're in serialization-only mode, we don't 907 * want to go through full done processing. Thus 908 * the COPY flag. 909 * 910 * XXX KDM add another flag that is more specific. 911 */ |
912 if (softc->ha_mode != CTL_HA_MODE_XFER) |
913 io->io_hdr.flags |= CTL_FLAG_INT_COPY; |
914 io->io_hdr.nexus = msg->hdr.nexus; |
915#if 0 916 printf("port %u, iid %u, lun %u\n", 917 io->io_hdr.nexus.targ_port, 918 io->io_hdr.nexus.initid, 919 io->io_hdr.nexus.targ_lun); 920#endif |
921 io->scsiio.tag_num = msg->scsi.tag_num; 922 io->scsiio.tag_type = msg->scsi.tag_type; 923#ifdef CTL_TIME_IO 924 io->io_hdr.start_time = time_uptime; 925 getbintime(&io->io_hdr.start_bt); 926#endif /* CTL_TIME_IO */ 927 io->scsiio.cdb_len = msg->scsi.cdb_len; 928 memcpy(io->scsiio.cdb, msg->scsi.cdb, |
929 CTL_MAX_CDBLEN); 930 if (softc->ha_mode == CTL_HA_MODE_XFER) { 931 const struct ctl_cmd_entry *entry; 932 933 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 934 io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 935 io->io_hdr.flags |= 936 entry->flags & CTL_FLAG_DATA_MASK; 937 } 938 ctl_enqueue_isc(io); 939 break; 940 941 /* Performed on the Originating SC, XFER mode only */ 942 case CTL_MSG_DATAMOVE: { 943 struct ctl_sg_entry *sgl; 944 int i, j; 945 |
946 io = msg->hdr.original_sc; |
947 if (io == NULL) { 948 printf("%s: original_sc == NULL!\n", __func__); 949 /* XXX KDM do something here */ 950 break; 951 } 952 io->io_hdr.msg_type = CTL_MSG_DATAMOVE; 953 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 954 /* 955 * Keep track of this, we need to send it back over 956 * when the datamove is complete. 957 */ |
958 io->io_hdr.serializing_sc = msg->hdr.serializing_sc; |
959 |
960 if (msg->dt.sg_sequence == 0) { 961 i = msg->dt.kern_sg_entries + 962 io->scsiio.kern_data_len / 963 CTL_HA_DATAMOVE_SEGMENT + 1; 964 sgl = malloc(sizeof(*sgl) * i, M_CTL, 965 M_WAITOK | M_ZERO); 966 io->io_hdr.remote_sglist = sgl; 967 io->io_hdr.local_sglist = 968 &sgl[msg->dt.kern_sg_entries]; |
969 970 io->scsiio.kern_data_ptr = (uint8_t *)sgl; 971 972 io->scsiio.kern_sg_entries = |
973 msg->dt.kern_sg_entries; |
974 io->scsiio.rem_sg_entries = |
975 msg->dt.kern_sg_entries; |
976 io->scsiio.kern_data_len = |
977 msg->dt.kern_data_len; |
978 io->scsiio.kern_total_len = |
979 msg->dt.kern_total_len; |
980 io->scsiio.kern_data_resid = |
981 msg->dt.kern_data_resid; |
982 io->scsiio.kern_rel_offset = |
983 msg->dt.kern_rel_offset; 984 io->io_hdr.flags &= ~CTL_FLAG_BUS_ADDR; 985 io->io_hdr.flags |= msg->dt.flags & 986 CTL_FLAG_BUS_ADDR; |
987 } else 988 sgl = (struct ctl_sg_entry *) 989 io->scsiio.kern_data_ptr; 990 |
991 for (i = msg->dt.sent_sg_entries, j = 0; 992 i < (msg->dt.sent_sg_entries + 993 msg->dt.cur_sg_entries); i++, j++) { 994 sgl[i].addr = msg->dt.sg_list[j].addr; 995 sgl[i].len = msg->dt.sg_list[j].len; |
996 997#if 0 998 printf("%s: L: %p,%d -> %p,%d j=%d, i=%d\n", 999 __func__, |
1000 msg->dt.sg_list[j].addr, 1001 msg->dt.sg_list[j].len, |
1002 sgl[i].addr, sgl[i].len, j, i); 1003#endif 1004 } |
1005 1006 /* 1007 * If this is the last piece of the I/O, we've got 1008 * the full S/G list. Queue processing in the thread. 1009 * Otherwise wait for the next piece. 1010 */ |
1011 if (msg->dt.sg_last != 0) |
1012 ctl_enqueue_isc(io); 1013 break; 1014 } 1015 /* Performed on the Serializing (primary) SC, XFER mode only */ 1016 case CTL_MSG_DATAMOVE_DONE: { |
1017 if (msg->hdr.serializing_sc == NULL) { |
1018 printf("%s: serializing_sc == NULL!\n", 1019 __func__); 1020 /* XXX KDM now what? */ 1021 break; 1022 } 1023 /* 1024 * We grab the sense information here in case 1025 * there was a failure, so we can return status 1026 * back to the initiator. 1027 */ |
1028 io = msg->hdr.serializing_sc; |
1029 io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE; |
1030 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1031 io->io_hdr.port_status = msg->scsi.fetd_status; 1032 io->scsiio.residual = msg->scsi.residual; 1033 if (msg->hdr.status != CTL_STATUS_NONE) { 1034 io->io_hdr.status = msg->hdr.status; 1035 io->scsiio.scsi_status = msg->scsi.scsi_status; 1036 io->scsiio.sense_len = msg->scsi.sense_len; 1037 io->scsiio.sense_residual =msg->scsi.sense_residual; 1038 memcpy(&io->scsiio.sense_data, 1039 &msg->scsi.sense_data, 1040 msg->scsi.sense_len); 1041 } |
1042 ctl_enqueue_isc(io); 1043 break; 1044 } 1045 1046 /* Preformed on Originating SC, SER_ONLY mode */ 1047 case CTL_MSG_R2R: |
1048 io = msg->hdr.original_sc; |
1049 if (io == NULL) { |
1050 printf("%s: original_sc == NULL!\n", 1051 __func__); 1052 break; |
1053 } |
1054 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; |
1055 io->io_hdr.msg_type = CTL_MSG_R2R; |
1056 io->io_hdr.serializing_sc = msg->hdr.serializing_sc; |
1057 ctl_enqueue_isc(io); 1058 break; 1059 1060 /* 1061 * Performed on Serializing(i.e. primary SC) SC in SER_ONLY 1062 * mode. 1063 * Performed on the Originating (i.e. secondary) SC in XFER 1064 * mode 1065 */ 1066 case CTL_MSG_FINISH_IO: 1067 if (softc->ha_mode == CTL_HA_MODE_XFER) |
1068 ctl_isc_handler_finish_xfer(softc, msg); |
1069 else |
1070 ctl_isc_handler_finish_ser_only(softc, msg); |
1071 break; 1072 1073 /* Preformed on Originating SC */ 1074 case CTL_MSG_BAD_JUJU: |
1075 io = msg->hdr.original_sc; |
1076 if (io == NULL) { 1077 printf("%s: Bad JUJU!, original_sc is NULL!\n", 1078 __func__); 1079 break; 1080 } |
1081 ctl_copy_sense_data(msg, io); |
1082 /* 1083 * IO should have already been cleaned up on other 1084 * SC so clear this flag so we won't send a message 1085 * back to finish the IO there. 1086 */ 1087 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 1088 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1089 |
1090 /* io = msg->hdr.serializing_sc; */ |
1091 io->io_hdr.msg_type = CTL_MSG_BAD_JUJU; 1092 ctl_enqueue_isc(io); 1093 break; 1094 1095 /* Handle resets sent from the other side */ 1096 case CTL_MSG_MANAGE_TASKS: { 1097 struct ctl_taskio *taskio; |
1098 taskio = (struct ctl_taskio *)ctl_alloc_io( |
1099 softc->othersc_pool); |
1100 ctl_zero_io((union ctl_io *)taskio); 1101 taskio->io_hdr.io_type = CTL_IO_TASK; 1102 taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; |
1103 taskio->io_hdr.nexus = msg->hdr.nexus; 1104 taskio->task_action = msg->task.task_action; 1105 taskio->tag_num = msg->task.tag_num; 1106 taskio->tag_type = msg->task.tag_type; |
1107#ifdef CTL_TIME_IO 1108 taskio->io_hdr.start_time = time_uptime; 1109 getbintime(&taskio->io_hdr.start_bt); |
1110#endif /* CTL_TIME_IO */ 1111 ctl_run_task((union ctl_io *)taskio); 1112 break; 1113 } 1114 /* Persistent Reserve action which needs attention */ 1115 case CTL_MSG_PERS_ACTION: |
1116 presio = (struct ctl_prio *)ctl_alloc_io( |
1117 softc->othersc_pool); |
1118 ctl_zero_io((union ctl_io *)presio); 1119 presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION; |
1120 presio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; 1121 presio->io_hdr.nexus = msg->hdr.nexus; 1122 presio->pr_msg = msg->pr; |
1123 ctl_enqueue_isc((union ctl_io *)presio); 1124 break; |
1125 case CTL_MSG_UA: 1126 ctl_isc_ua(softc, msg, param); |
1127 break; |
1128 case CTL_MSG_PORT_SYNC: 1129 ctl_isc_port_sync(softc, msg, param); 1130 break; 1131 case CTL_MSG_LUN_SYNC: 1132 ctl_isc_lun_sync(softc, msg, param); 1133 break; |
1134 default: |
1135 printf("Received HA message of unknown type %d\n", 1136 msg->hdr.msg_type); 1137 break; |
1138 } |
1139 if (msg != &msgbuf) 1140 free(msg, M_CTL); 1141 } else if (event == CTL_HA_EVT_LINK_CHANGE) { 1142 printf("CTL: HA link status changed from %d to %d\n", 1143 softc->ha_link, param); 1144 if (param == softc->ha_link) 1145 return; 1146 if (softc->ha_link == CTL_HA_LINK_ONLINE) { 1147 softc->ha_link = param; 1148 ctl_isc_ha_link_down(softc); 1149 } else { 1150 softc->ha_link = param; 1151 if (softc->ha_link == CTL_HA_LINK_ONLINE) 1152 ctl_isc_ha_link_up(softc); |
1153 } 1154 return; |
1155 } else { 1156 printf("ctl_isc_event_handler: Unknown event %d\n", event); 1157 return; 1158 } |
1159} 1160 1161static void 1162ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest) 1163{ |
1164 |
1165 memcpy(&dest->scsiio.sense_data, &src->scsi.sense_data, 1166 src->scsi.sense_len); |
1167 dest->scsiio.scsi_status = src->scsi.scsi_status; 1168 dest->scsiio.sense_len = src->scsi.sense_len; 1169 dest->io_hdr.status = src->hdr.status; 1170} |
1171 1172static void |
1173ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest) 1174{ 1175 1176 memcpy(&dest->scsi.sense_data, &src->scsiio.sense_data, 1177 src->scsiio.sense_len); 1178 dest->scsi.scsi_status = src->scsiio.scsi_status; 1179 dest->scsi.sense_len = src->scsiio.sense_len; 1180 dest->hdr.status = src->io_hdr.status; 1181} 1182 1183static void |
1184ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 1185{ |
1186 struct ctl_softc *softc = lun->ctl_softc; |
1187 ctl_ua_type *pu; 1188 |
1189 if (initidx < softc->init_min || initidx >= softc->init_max) 1190 return; |
1191 mtx_assert(&lun->lun_lock, MA_OWNED); 1192 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 1193 if (pu == NULL) 1194 return; 1195 pu[initidx % CTL_MAX_INIT_PER_PORT] |= ua; 1196} 1197 1198static void 1199ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 1200{ |
1201 struct ctl_softc *softc = lun->ctl_softc; |
1202 int i, j; 1203 1204 mtx_assert(&lun->lun_lock, MA_OWNED); |
1205 for (i = softc->port_min; i < softc->port_max; i++) { |
1206 if (lun->pending_ua[i] == NULL) 1207 continue; 1208 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 1209 if (i * CTL_MAX_INIT_PER_PORT + j == except) 1210 continue; 1211 lun->pending_ua[i][j] |= ua; 1212 } 1213 } 1214} 1215 1216static void 1217ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 1218{ |
1219 struct ctl_softc *softc = lun->ctl_softc; |
1220 ctl_ua_type *pu; 1221 |
1222 if (initidx < softc->init_min || initidx >= softc->init_max) 1223 return; |
1224 mtx_assert(&lun->lun_lock, MA_OWNED); 1225 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 1226 if (pu == NULL) 1227 return; 1228 pu[initidx % CTL_MAX_INIT_PER_PORT] &= ~ua; 1229} 1230 1231static void 1232ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 1233{ |
1234 struct ctl_softc *softc = lun->ctl_softc; |
1235 int i, j; 1236 1237 mtx_assert(&lun->lun_lock, MA_OWNED); |
1238 for (i = softc->port_min; i < softc->port_max; i++) { |
1239 if (lun->pending_ua[i] == NULL) 1240 continue; 1241 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 1242 if (i * CTL_MAX_INIT_PER_PORT + j == except) 1243 continue; 1244 lun->pending_ua[i][j] &= ~ua; 1245 } 1246 } --- 9 unchanged lines hidden (view full) --- 1256 STAILQ_FOREACH(lun, &ctl_softc->lun_list, links) { 1257 mtx_lock(&lun->lun_lock); 1258 ctl_clr_ua(lun, initidx, ua_type); 1259 mtx_unlock(&lun->lun_lock); 1260 } 1261} 1262 1263static int |
1264ctl_ha_role_sysctl(SYSCTL_HANDLER_ARGS) |
1265{ 1266 struct ctl_softc *softc = (struct ctl_softc *)arg1; 1267 struct ctl_lun *lun; |
1268 struct ctl_lun_req ireq; |
1269 int error, value; 1270 |
1271 value = (softc->flags & CTL_FLAG_ACTIVE_SHELF) ? 0 : 1; |
1272 error = sysctl_handle_int(oidp, &value, 0, req); 1273 if ((error != 0) || (req->newptr == NULL)) 1274 return (error); 1275 1276 mtx_lock(&softc->ctl_lock); 1277 if (value == 0) 1278 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1279 else 1280 softc->flags &= ~CTL_FLAG_ACTIVE_SHELF; 1281 STAILQ_FOREACH(lun, &softc->lun_list, links) { |
1282 mtx_unlock(&softc->ctl_lock); 1283 bzero(&ireq, sizeof(ireq)); 1284 ireq.reqtype = CTL_LUNREQ_MODIFY; 1285 ireq.reqdata.modify.lun_id = lun->lun; 1286 lun->backend->ioctl(NULL, CTL_LUN_REQ, (caddr_t)&ireq, 0, 1287 curthread); 1288 if (ireq.status != CTL_LUN_OK) { 1289 printf("%s: CTL_LUNREQ_MODIFY returned %d '%s'\n", 1290 __func__, ireq.status, ireq.error_str); 1291 } 1292 mtx_lock(&softc->ctl_lock); |
1293 } 1294 mtx_unlock(&softc->ctl_lock); 1295 return (0); 1296} 1297 1298static int 1299ctl_init(void) 1300{ 1301 struct ctl_softc *softc; 1302 void *other_pool; 1303 int i, error, retval; |
1304 1305 retval = 0; |
1306 control_softc = malloc(sizeof(*control_softc), M_DEVBUF, 1307 M_WAITOK | M_ZERO); 1308 softc = control_softc; 1309 1310 softc->dev = make_dev(&ctl_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, 1311 "cam/ctl"); 1312 1313 softc->dev->si_drv1 = softc; 1314 |
1315 sysctl_ctx_init(&softc->sysctl_ctx); 1316 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, 1317 SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl", 1318 CTLFLAG_RD, 0, "CAM Target Layer"); 1319 1320 if (softc->sysctl_tree == NULL) { 1321 printf("%s: unable to allocate sysctl tree\n", __func__); 1322 destroy_dev(softc->dev); 1323 free(control_softc, M_DEVBUF); 1324 control_softc = NULL; 1325 return (ENOMEM); 1326 } 1327 |
1328 mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF); 1329 softc->io_zone = uma_zcreate("CTL IO", sizeof(union ctl_io), 1330 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 1331 softc->open_count = 0; 1332 1333 /* 1334 * Default to actually sending a SYNCHRONIZE CACHE command down to 1335 * the drive. 1336 */ 1337 softc->flags = CTL_FLAG_REAL_SYNC; 1338 |
1339 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1340 OID_AUTO, "ha_mode", CTLFLAG_RDTUN, (int *)&softc->ha_mode, 0, 1341 "HA mode (0 - act/stby, 1 - serialize only, 2 - xfer)"); 1342 |
1343 /* 1344 * In Copan's HA scheme, the "master" and "slave" roles are 1345 * figured out through the slot the controller is in. Although it 1346 * is an active/active system, someone has to be in charge. 1347 */ 1348 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1349 OID_AUTO, "ha_id", CTLFLAG_RDTUN, &softc->ha_id, 0, 1350 "HA head ID (0 - no HA)"); |
1351 if (softc->ha_id == 0 || softc->ha_id > NUM_TARGET_PORT_GROUPS) { |
1352 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1353 softc->is_single = 1; |
1354 softc->port_cnt = CTL_MAX_PORTS; 1355 softc->port_min = 0; 1356 } else { 1357 softc->port_cnt = CTL_MAX_PORTS / NUM_TARGET_PORT_GROUPS; 1358 softc->port_min = (softc->ha_id - 1) * softc->port_cnt; 1359 } 1360 softc->port_max = softc->port_min + softc->port_cnt; 1361 softc->init_min = softc->port_min * CTL_MAX_INIT_PER_PORT; 1362 softc->init_max = softc->port_max * CTL_MAX_INIT_PER_PORT; |
1363 |
1364 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1365 OID_AUTO, "ha_link", CTLFLAG_RD, (int *)&softc->ha_link, 0, 1366 "HA link state (0 - offline, 1 - unknown, 2 - online)"); 1367 |
1368 STAILQ_INIT(&softc->lun_list); 1369 STAILQ_INIT(&softc->pending_lun_queue); 1370 STAILQ_INIT(&softc->fe_list); 1371 STAILQ_INIT(&softc->port_list); 1372 STAILQ_INIT(&softc->be_list); 1373 ctl_tpc_init(softc); 1374 1375 if (ctl_pool_create(softc, "othersc", CTL_POOL_ENTRIES_OTHER_SC, --- 39 unchanged lines hidden (view full) --- 1415 &softc->ctl_proc, NULL, 0, 0, "ctl", "thresh"); 1416 if (error != 0) { 1417 printf("error creating CTL threshold thread!\n"); 1418 ctl_pool_free(other_pool); 1419 return (error); 1420 } 1421 1422 SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree), |
1423 OID_AUTO, "ha_role", CTLTYPE_INT | CTLFLAG_RWTUN, 1424 softc, 0, ctl_ha_role_sysctl, "I", "HA role for this head"); 1425 1426 if (softc->is_single == 0) { 1427 ctl_frontend_register(&ha_frontend); 1428 if (ctl_ha_msg_init(softc) != CTL_HA_STATUS_SUCCESS) { 1429 printf("ctl_init: ctl_ha_msg_init failed.\n"); 1430 softc->is_single = 1; 1431 } else 1432 if (ctl_ha_msg_register(CTL_HA_CHAN_CTL, ctl_isc_event_handler) 1433 != CTL_HA_STATUS_SUCCESS) { 1434 printf("ctl_init: ctl_ha_msg_register failed.\n"); 1435 softc->is_single = 1; 1436 } 1437 } |
1438 return (0); 1439} 1440 1441void 1442ctl_shutdown(void) 1443{ 1444 struct ctl_softc *softc; 1445 struct ctl_lun *lun, *next_lun; 1446 1447 softc = (struct ctl_softc *)control_softc; 1448 |
1449 if (softc->is_single == 0) { 1450 if (ctl_ha_msg_deregister(CTL_HA_CHAN_CTL) 1451 != CTL_HA_STATUS_SUCCESS) { 1452 printf("ctl_shutdown: ctl_ha_msg_deregister failed.\n"); 1453 } 1454 if (ctl_ha_msg_shutdown(softc) != CTL_HA_STATUS_SUCCESS) { 1455 printf("ctl_shutdown: ctl_ha_msg_shutdown failed.\n"); 1456 } 1457 ctl_frontend_deregister(&ha_frontend); 1458 } 1459 |
1460 mtx_lock(&softc->ctl_lock); 1461 1462 /* 1463 * Free up each LUN. 1464 */ 1465 for (lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; lun = next_lun){ 1466 next_lun = STAILQ_NEXT(lun, links); 1467 ctl_free_lun(lun); --- 43 unchanged lines hidden (view full) --- 1511} 1512 1513static int 1514ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td) 1515{ 1516 return (0); 1517} 1518 |
1519/* |
1520 * Remove an initiator by port number and initiator ID. 1521 * Returns 0 for success, -1 for failure. 1522 */ 1523int 1524ctl_remove_initiator(struct ctl_port *port, int iid) 1525{ 1526 struct ctl_softc *softc = control_softc; 1527 --- 192 unchanged lines hidden (view full) --- 1720 * (SER_ONLY mode). 1721 */ 1722static int 1723ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio) 1724{ 1725 struct ctl_softc *softc; 1726 union ctl_ha_msg msg_info; 1727 struct ctl_lun *lun; |
1728 const struct ctl_cmd_entry *entry; |
1729 int retval = 0; 1730 uint32_t targ_lun; 1731 1732 softc = control_softc; 1733 1734 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; |
1735 if ((targ_lun < CTL_MAX_LUNS) && 1736 ((lun = softc->ctl_luns[targ_lun]) != NULL)) { |
1737 /* |
1738 * If the LUN is invalid, pretend that it doesn't exist. 1739 * It will go away as soon as all pending I/O has been 1740 * completed. 1741 */ 1742 mtx_lock(&lun->lun_lock); 1743 if (lun->flags & CTL_LUN_DISABLED) { 1744 mtx_unlock(&lun->lun_lock); 1745 lun = NULL; 1746 } 1747 } else 1748 lun = NULL; 1749 if (lun == NULL) { 1750 /* |
1751 * Why isn't LUN defined? The other side wouldn't 1752 * send a cmd if the LUN is undefined. 1753 */ 1754 printf("%s: Bad JUJU!, LUN is NULL!\n", __func__); 1755 |
1756 ctl_set_unsupported_lun(ctsio); 1757 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); |
1758 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1759 msg_info.hdr.serializing_sc = NULL; 1760 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; |
1761 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1762 sizeof(msg_info.scsi), M_WAITOK); |
1763 return(1); |
1764 } |
1765 |
1766 entry = ctl_get_cmd_entry(ctsio, NULL); 1767 if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { 1768 mtx_unlock(&lun->lun_lock); 1769 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); 1770 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1771 msg_info.hdr.serializing_sc = NULL; 1772 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1773 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1774 sizeof(msg_info.scsi), M_WAITOK); 1775 return(1); |
1776 } 1777 |
1778 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun; 1779 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = lun->be_lun; |
1780 |
1781 /* 1782 * Every I/O goes into the OOA queue for a 1783 * particular LUN, and stays there until completion. 1784 */ 1785#ifdef CTL_TIME_IO 1786 if (TAILQ_EMPTY(&lun->ooa_queue)) 1787 lun->idle_time += getsbinuptime() - lun->last_busy; 1788#endif 1789 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1790 |
1791 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 1792 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq, 1793 ooa_links))) { 1794 case CTL_ACTION_BLOCK: 1795 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 1796 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 1797 blocked_links); |
1798 mtx_unlock(&lun->lun_lock); |
1799 break; 1800 case CTL_ACTION_PASS: 1801 case CTL_ACTION_SKIP: 1802 if (softc->ha_mode == CTL_HA_MODE_XFER) { 1803 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 1804 ctl_enqueue_rtr((union ctl_io *)ctsio); |
1805 mtx_unlock(&lun->lun_lock); |
1806 } else { |
1807 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 1808 mtx_unlock(&lun->lun_lock); |
1809 1810 /* send msg back to other side */ 1811 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1812 msg_info.hdr.serializing_sc = (union ctl_io *)ctsio; 1813 msg_info.hdr.msg_type = CTL_MSG_R2R; |
1814 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1815 sizeof(msg_info.hdr), M_WAITOK); |
1816 } 1817 break; 1818 case CTL_ACTION_OVERLAP: |
1819 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1820 mtx_unlock(&lun->lun_lock); 1821 retval = 1; |
1822 |
1823 ctl_set_overlapped_cmd(ctsio); 1824 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); |
1825 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1826 msg_info.hdr.serializing_sc = NULL; 1827 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; |
1828 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1829 sizeof(msg_info.scsi), M_WAITOK); |
1830 break; 1831 case CTL_ACTION_OVERLAP_TAG: |
1832 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1833 mtx_unlock(&lun->lun_lock); 1834 retval = 1; 1835 ctl_set_overlapped_tag(ctsio, ctsio->tag_num); 1836 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); |
1837 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1838 msg_info.hdr.serializing_sc = NULL; 1839 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; |
1840 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1841 sizeof(msg_info.scsi), M_WAITOK); |
1842 break; 1843 case CTL_ACTION_ERROR: 1844 default: |
1845 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1846 mtx_unlock(&lun->lun_lock); 1847 retval = 1; |
1848 |
1849 ctl_set_internal_failure(ctsio, /*sks_valid*/ 0, 1850 /*retry_count*/ 0); 1851 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); |
1852 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1853 msg_info.hdr.serializing_sc = NULL; 1854 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; |
1855 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1856 sizeof(msg_info.scsi), M_WAITOK); |
1857 break; 1858 } |
1859 return (retval); 1860} 1861 1862/* 1863 * Returns 0 for success, errno for failure. 1864 */ 1865static int 1866ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, --- 244 unchanged lines hidden (view full) --- 2111 struct ctl_port_entry *entry; 2112 2113 entry = (struct ctl_port_entry *)addr; 2114 2115 mtx_lock(&softc->ctl_lock); 2116 STAILQ_FOREACH(port, &softc->port_list, links) { 2117 int action, done; 2118 |
2119 if (port->targ_port < softc->port_min || 2120 port->targ_port >= softc->port_max) 2121 continue; 2122 |
2123 action = 0; 2124 done = 0; |
2125 if ((entry->port_type == CTL_PORT_NONE) 2126 && (entry->targ_port == port->targ_port)) { 2127 /* 2128 * If the user only wants to enable or 2129 * disable or set WWNs on a specific port, 2130 * do the operation and we're done. 2131 */ 2132 action = 1; --- 13 unchanged lines hidden (view full) --- 2146 */ 2147 if (cmd == CTL_SET_PORT_WWNS) { 2148 printf("%s: Can't set WWNs on " 2149 "multiple ports\n", __func__); 2150 retval = EINVAL; 2151 break; 2152 } 2153 } |
2154 if (action == 0) 2155 continue; |
2156 |
2157 /* 2158 * XXX KDM we have to drop the lock here, because 2159 * the online/offline operations can potentially 2160 * block. We need to reference count the frontends 2161 * so they can't go away, 2162 */ 2163 if (cmd == CTL_ENABLE_PORT) { 2164 mtx_unlock(&softc->ctl_lock); 2165 ctl_port_online(port); |
2166 mtx_lock(&softc->ctl_lock); |
2167 } else if (cmd == CTL_DISABLE_PORT) { 2168 mtx_unlock(&softc->ctl_lock); 2169 ctl_port_offline(port); 2170 mtx_lock(&softc->ctl_lock); 2171 } else if (cmd == CTL_SET_PORT_WWNS) { 2172 ctl_port_set_wwns(port, 2173 (entry->flags & CTL_PORT_WWNN_VALID) ? 2174 1 : 0, entry->wwnn, 2175 (entry->flags & CTL_PORT_WWPN_VALID) ? 2176 1 : 0, entry->wwpn); |
2177 } 2178 if (done != 0) 2179 break; 2180 } 2181 mtx_unlock(&softc->ctl_lock); 2182 break; 2183 } 2184 case CTL_GET_PORT_LIST: { --- 481 unchanged lines hidden (view full) --- 2666 struct ctl_lun *lun; 2667 2668 lun = softc->ctl_luns[i]; 2669 2670 if ((lun == NULL) 2671 || ((lun->flags & CTL_LUN_DISABLED) != 0)) 2672 continue; 2673 |
2674 for (j = 0; j < CTL_MAX_PORTS; j++) { |
2675 if (lun->pr_keys[j] == NULL) 2676 continue; 2677 for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){ 2678 if (lun->pr_keys[j][k] == 0) 2679 continue; 2680 printf(" LUN %d port %d iid %d key " 2681 "%#jx\n", i, j, k, 2682 (uintmax_t)lun->pr_keys[j][k]); --- 275 unchanged lines hidden (view full) --- 2958 req->kern_args = ctl_copyin_args(req->num_args, 2959 req->args, req->error_str, sizeof(req->error_str)); 2960 if (req->kern_args == NULL) { 2961 req->status = CTL_LUN_ERROR; 2962 break; 2963 } 2964 } 2965 |
2966 if (fe->ioctl) 2967 retval = fe->ioctl(dev, cmd, addr, flag, td); 2968 else 2969 retval = ENODEV; |
2970 2971 if (req->num_args > 0) { 2972 ctl_copyout_args(req->num_args, req->kern_args); 2973 ctl_free_args(req->num_args, req->kern_args); 2974 } 2975 break; 2976 } 2977 case CTL_PORT_LIST: { --- 142 unchanged lines hidden (view full) --- 3120 sbuf_delete(sb); 3121 break; 3122 } 3123 case CTL_LUN_MAP: { 3124 struct ctl_lun_map *lm = (struct ctl_lun_map *)addr; 3125 struct ctl_port *port; 3126 3127 mtx_lock(&softc->ctl_lock); |
3128 if (lm->port < softc->port_min || 3129 lm->port >= softc->port_max || |
3130 (port = softc->ctl_ports[lm->port]) == NULL) { 3131 mtx_unlock(&softc->ctl_lock); 3132 return (ENXIO); 3133 } 3134 mtx_unlock(&softc->ctl_lock); // XXX: port_enable sleeps 3135 if (lm->plun < CTL_MAX_LUNS) { 3136 if (lm->lun == UINT32_MAX) 3137 retval = ctl_lun_map_unset(port, lm->plun); --- 46 unchanged lines hidden (view full) --- 3184 } 3185 } 3186 return (retval); 3187} 3188 3189uint32_t 3190ctl_get_initindex(struct ctl_nexus *nexus) 3191{ |
3192 return (nexus->initid + (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); 3193} 3194 |
3195int 3196ctl_lun_map_init(struct ctl_port *port) 3197{ 3198 struct ctl_softc *softc = control_softc; 3199 struct ctl_lun *lun; 3200 uint32_t i; 3201 3202 if (port->lun_map == NULL) 3203 port->lun_map = malloc(sizeof(uint32_t) * CTL_MAX_LUNS, 3204 M_CTL, M_NOWAIT); 3205 if (port->lun_map == NULL) 3206 return (ENOMEM); 3207 for (i = 0; i < CTL_MAX_LUNS; i++) 3208 port->lun_map[i] = UINT32_MAX; |
3209 if (port->status & CTL_PORT_STATUS_ONLINE) { 3210 if (port->lun_disable != NULL) { 3211 STAILQ_FOREACH(lun, &softc->lun_list, links) 3212 port->lun_disable(port->targ_lun_arg, lun->lun); 3213 } 3214 ctl_isc_announce_port(port); |
3215 } 3216 return (0); 3217} 3218 3219int 3220ctl_lun_map_deinit(struct ctl_port *port) 3221{ 3222 struct ctl_softc *softc = control_softc; 3223 struct ctl_lun *lun; 3224 3225 if (port->lun_map == NULL) 3226 return (0); 3227 free(port->lun_map, M_CTL); 3228 port->lun_map = NULL; |
3229 if (port->status & CTL_PORT_STATUS_ONLINE) { 3230 if (port->lun_enable != NULL) { 3231 STAILQ_FOREACH(lun, &softc->lun_list, links) 3232 port->lun_enable(port->targ_lun_arg, lun->lun); 3233 } 3234 ctl_isc_announce_port(port); |
3235 } 3236 return (0); 3237} 3238 3239int 3240ctl_lun_map_set(struct ctl_port *port, uint32_t plun, uint32_t glun) 3241{ 3242 int status; 3243 uint32_t old; 3244 3245 if (port->lun_map == NULL) { 3246 status = ctl_lun_map_init(port); 3247 if (status != 0) 3248 return (status); 3249 } 3250 old = port->lun_map[plun]; 3251 port->lun_map[plun] = glun; |
3252 if ((port->status & CTL_PORT_STATUS_ONLINE) && old >= CTL_MAX_LUNS) { 3253 if (port->lun_enable != NULL) 3254 port->lun_enable(port->targ_lun_arg, plun); 3255 ctl_isc_announce_port(port); 3256 } |
3257 return (0); 3258} 3259 3260int 3261ctl_lun_map_unset(struct ctl_port *port, uint32_t plun) 3262{ 3263 uint32_t old; 3264 3265 if (port->lun_map == NULL) 3266 return (0); 3267 old = port->lun_map[plun]; 3268 port->lun_map[plun] = UINT32_MAX; |
3269 if ((port->status & CTL_PORT_STATUS_ONLINE) && old < CTL_MAX_LUNS) { 3270 if (port->lun_disable != NULL) 3271 port->lun_disable(port->targ_lun_arg, plun); 3272 ctl_isc_announce_port(port); 3273 } |
3274 return (0); 3275} 3276 3277uint32_t 3278ctl_lun_map_from_port(struct ctl_port *port, uint32_t lun_id) 3279{ 3280 3281 if (port == NULL) --- 17 unchanged lines hidden (view full) --- 3299 return (i); 3300 } 3301 return (UINT32_MAX); 3302} 3303 3304static struct ctl_port * 3305ctl_io_port(struct ctl_io_hdr *io_hdr) 3306{ |
3307 |
3308 return (control_softc->ctl_ports[io_hdr->nexus.targ_port]); |
3309} 3310 |
3311int |
3312ctl_ffz(uint32_t *mask, uint32_t first, uint32_t last) |
3313{ |
3314 int i; |
3315 |
3316 for (i = first; i < last; i++) { 3317 if ((mask[i / 32] & (1 << (i % 32))) == 0) 3318 return (i); |
3319 } |
3320 return (-1); 3321} 3322 3323int 3324ctl_set_mask(uint32_t *mask, uint32_t bit) 3325{ 3326 uint32_t chunk, piece; 3327 --- 882 unchanged lines hidden (view full) --- 4210 if (lun->flags & CTL_LUN_MALLOCED) 4211 free(lun, M_CTL); 4212 be_lun->lun_config_status(be_lun->be_lun, 4213 CTL_LUN_CONFIG_FAILURE); 4214 return (ENOSPC); 4215 } 4216 lun_number = be_lun->req_lun_id; 4217 } else { |
4218 lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, 0, CTL_MAX_LUNS); |
4219 if (lun_number == -1) { 4220 mtx_unlock(&ctl_softc->ctl_lock); 4221 printf("ctl: can't allocate LUN, out of LUNs\n"); 4222 if (lun->flags & CTL_LUN_MALLOCED) 4223 free(lun, M_CTL); 4224 be_lun->lun_config_status(be_lun->be_lun, 4225 CTL_LUN_CONFIG_FAILURE); 4226 return (ENOSPC); --- 106 unchanged lines hidden (view full) --- 4333 atomic_subtract_int(&lun->be_lun->be->num_luns, 1); 4334 lun->be_lun->lun_shutdown(lun->be_lun->be_lun); 4335 4336 ctl_tpc_lun_shutdown(lun); 4337 mtx_destroy(&lun->lun_lock); 4338 free(lun->lun_devid, M_CTL); 4339 for (i = 0; i < CTL_MAX_PORTS; i++) 4340 free(lun->pending_ua[i], M_CTL); |
4341 for (i = 0; i < CTL_MAX_PORTS; i++) |
4342 free(lun->pr_keys[i], M_CTL); 4343 free(lun->write_buffer, M_CTL); 4344 if (lun->flags & CTL_LUN_MALLOCED) 4345 free(lun, M_CTL); 4346 4347 STAILQ_FOREACH(nlun, &softc->lun_list, links) { 4348 mtx_lock(&nlun->lun_lock); 4349 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); --- 72 unchanged lines hidden (view full) --- 4422 printf("%s: FETD %s port %d returned error " 4423 "%d for lun_enable on lun %jd\n", 4424 __func__, port->port_name, port->targ_port, 4425 retval, (intmax_t)lun->lun); 4426 } 4427 } 4428 4429 mtx_unlock(&softc->ctl_lock); |
4430 ctl_isc_announce_lun(lun); |
4431 4432 return (0); 4433} 4434 4435int 4436ctl_disable_lun(struct ctl_be_lun *be_lun) 4437{ 4438 struct ctl_softc *softc; --- 33 unchanged lines hidden (view full) --- 4472 printf("%s: FETD %s port %d returned error " 4473 "%d for lun_disable on lun %jd\n", 4474 __func__, port->port_name, port->targ_port, 4475 retval, (intmax_t)lun->lun); 4476 } 4477 } 4478 4479 mtx_unlock(&softc->ctl_lock); |
4480 ctl_isc_announce_lun(lun); |
4481 4482 return (0); 4483} 4484 4485int 4486ctl_start_lun(struct ctl_be_lun *be_lun) 4487{ 4488 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; --- 33 unchanged lines hidden (view full) --- 4522 4523 mtx_lock(&lun->lun_lock); 4524 lun->flags &= ~CTL_LUN_OFFLINE; 4525 mtx_unlock(&lun->lun_lock); 4526 return (0); 4527} 4528 4529int |
4530ctl_lun_primary(struct ctl_be_lun *be_lun) 4531{ 4532 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4533 4534 mtx_lock(&lun->lun_lock); 4535 lun->flags |= CTL_LUN_PRIMARY_SC; 4536 mtx_unlock(&lun->lun_lock); 4537 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 4538 ctl_isc_announce_lun(lun); 4539 return (0); 4540} 4541 4542int 4543ctl_lun_secondary(struct ctl_be_lun *be_lun) 4544{ 4545 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4546 4547 mtx_lock(&lun->lun_lock); 4548 lun->flags &= ~CTL_LUN_PRIMARY_SC; 4549 mtx_unlock(&lun->lun_lock); 4550 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 4551 ctl_isc_announce_lun(lun); 4552 return (0); 4553} 4554 4555int |
4556ctl_invalidate_lun(struct ctl_be_lun *be_lun) 4557{ 4558 struct ctl_softc *softc; 4559 struct ctl_lun *lun; 4560 4561 lun = (struct ctl_lun *)be_lun->ctl_lun; 4562 softc = lun->ctl_softc; 4563 --- 48 unchanged lines hidden (view full) --- 4612 mtx_unlock(&lun->lun_lock); 4613 return (0); 4614} 4615 4616void 4617ctl_lun_capacity_changed(struct ctl_be_lun *be_lun) 4618{ 4619 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; |
4620 union ctl_ha_msg msg; |
4621 4622 mtx_lock(&lun->lun_lock); 4623 ctl_est_ua_all(lun, -1, CTL_UA_CAPACITY_CHANGED); 4624 mtx_unlock(&lun->lun_lock); |
4625 if (lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 4626 /* Send msg to other side. */ 4627 bzero(&msg.ua, sizeof(msg.ua)); 4628 msg.hdr.msg_type = CTL_MSG_UA; 4629 msg.hdr.nexus.initid = -1; 4630 msg.hdr.nexus.targ_port = -1; 4631 msg.hdr.nexus.targ_lun = lun->lun; 4632 msg.hdr.nexus.targ_mapped_lun = lun->lun; 4633 msg.ua.ua_all = 1; 4634 msg.ua.ua_set = 1; 4635 msg.ua.ua_type = CTL_UA_CAPACITY_CHANGED; 4636 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua), 4637 M_WAITOK); 4638 } |
4639} 4640 4641/* 4642 * Backend "memory move is complete" callback for requests that never 4643 * make it down to say RAIDCore's configuration code. 4644 */ 4645int 4646ctl_config_move_done(union ctl_io *io) --- 16 unchanged lines hidden (view full) --- 4663 * all the space we have in the sks field. 4664 */ 4665 ctl_set_internal_failure(&io->scsiio, 4666 /*sks_valid*/ 1, 4667 /*retry_count*/ 4668 io->io_hdr.port_status); 4669 } 4670 |
4671 if (ctl_debug & CTL_DEBUG_CDB_DATA) 4672 ctl_data_print(io); |
4673 if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) || 4674 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 4675 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) || 4676 ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) { 4677 /* 4678 * XXX KDM just assuming a single pointer here, and not a 4679 * S/G list. If we start using S/G lists for config data, 4680 * we'll need to know how to clean them up here as well. --- 10 unchanged lines hidden (view full) --- 4691 * writes, because for those at least we know ahead of 4692 * time where the write will go and how long it is. For 4693 * config writes, though, that information is largely 4694 * contained within the write itself, thus we need to 4695 * parse out the data again. 4696 * 4697 * - Call some other function once the data is in? 4698 */ |
4699 4700 /* 4701 * XXX KDM call ctl_scsiio() again for now, and check flag 4702 * bits to see whether we're allocated or not. 4703 */ 4704 retval = ctl_scsiio(&io->scsiio); 4705 } 4706 return (retval); --- 106 unchanged lines hidden (view full) --- 4813 struct ctl_lun *lun; 4814 uint32_t residx; 4815 4816 length = 0; 4817 resv_id = 0; 4818 4819 CTL_DEBUG_PRINT(("ctl_scsi_release\n")); 4820 |
4821 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); |
4822 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 4823 4824 switch (ctsio->cdb[0]) { 4825 case RELEASE_10: { 4826 struct scsi_release_10 *cdb; 4827 4828 cdb = (struct scsi_release_10 *)ctsio->cdb; 4829 --- 73 unchanged lines hidden (view full) --- 4903 thirdparty = 0; 4904 longid = 0; 4905 resv_id = 0; 4906 length = 0; 4907 thirdparty_id = 0; 4908 4909 CTL_DEBUG_PRINT(("ctl_reserve\n")); 4910 |
4911 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); |
4912 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 4913 4914 switch (ctsio->cdb[0]) { 4915 case RESERVE_10: { 4916 struct scsi_reserve_10 *cdb; 4917 4918 cdb = (struct scsi_reserve_10 *)ctsio->cdb; 4919 --- 99 unchanged lines hidden (view full) --- 5019 ctl_done((union ctl_io *)ctsio); 5020 return (CTL_RETVAL_COMPLETE); 5021 } 5022 5023 if ((lun->flags & CTL_LUN_PR_RESERVED) 5024 && ((cdb->how & SSS_START)==0)) { 5025 uint32_t residx; 5026 |
5027 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); |
5028 if (ctl_get_prkey(lun, residx) == 0 5029 || (lun->pr_res_idx!=residx && lun->res_type < 4)) { 5030 5031 ctl_set_reservation_conflict(ctsio); 5032 ctl_done((union ctl_io *)ctsio); 5033 return (CTL_RETVAL_COMPLETE); 5034 } 5035 } --- 1949 unchanged lines hidden (view full) --- 6985 return (CTL_RETVAL_COMPLETE); 6986} 6987 6988int 6989ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio) 6990{ 6991 struct scsi_maintenance_in *cdb; 6992 int retval; |
6993 int alloc_len, ext, total_len = 0, g, pc, pg, gs, os; |
6994 int num_target_port_groups, num_target_ports; 6995 struct ctl_lun *lun; 6996 struct ctl_softc *softc; 6997 struct ctl_port *port; 6998 struct scsi_target_group_data *rtg_ptr; 6999 struct scsi_target_group_data_extended *rtg_ext_ptr; 7000 struct scsi_target_port_group_descriptor *tpg_desc; 7001 --- 39 unchanged lines hidden (view full) --- 7041 mtx_unlock(&softc->ctl_lock); 7042 7043 if (ext) 7044 total_len = sizeof(struct scsi_target_group_data_extended); 7045 else 7046 total_len = sizeof(struct scsi_target_group_data); 7047 total_len += sizeof(struct scsi_target_port_group_descriptor) * 7048 num_target_port_groups + |
7049 sizeof(struct scsi_target_port_descriptor) * num_target_ports; |
7050 7051 alloc_len = scsi_4btoul(cdb->length); 7052 7053 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7054 7055 ctsio->kern_sg_entries = 0; 7056 7057 if (total_len < alloc_len) { --- 18 unchanged lines hidden (view full) --- 7076 } else { 7077 rtg_ptr = (struct scsi_target_group_data *) 7078 ctsio->kern_data_ptr; 7079 scsi_ulto4b(total_len - 4, rtg_ptr->length); 7080 tpg_desc = &rtg_ptr->groups[0]; 7081 } 7082 7083 mtx_lock(&softc->ctl_lock); |
7084 pg = softc->port_min / softc->port_cnt; 7085 if (softc->ha_link == CTL_HA_LINK_OFFLINE) 7086 gs = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE; 7087 else if (softc->ha_link == CTL_HA_LINK_UNKNOWN) 7088 gs = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; 7089 else if (softc->ha_mode == CTL_HA_MODE_ACT_STBY) |
7090 gs = TPG_ASYMMETRIC_ACCESS_STANDBY; |
7091 else 7092 gs = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 7093 if (lun->flags & CTL_LUN_PRIMARY_SC) { 7094 os = gs; 7095 gs = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7096 } else |
7097 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; |
7098 for (g = 0; g < num_target_port_groups; g++) { 7099 tpg_desc->pref_state = (g == pg) ? gs : os; |
7100 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP | 7101 TPG_U_SUP | TPG_T_SUP; |
7102 scsi_ulto2b(g + 1, tpg_desc->target_port_group); 7103 tpg_desc->status = TPG_IMPLICIT; 7104 pc = 0; 7105 STAILQ_FOREACH(port, &softc->port_list, links) { |
7106 if (port->targ_port < g * softc->port_cnt || 7107 port->targ_port >= (g + 1) * softc->port_cnt) 7108 continue; |
7109 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7110 continue; 7111 if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 7112 continue; |
7113 scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc]. |
7114 relative_target_port_identifier); 7115 pc++; 7116 } 7117 tpg_desc->target_port_count = pc; 7118 tpg_desc = (struct scsi_target_port_group_descriptor *) 7119 &tpg_desc->descriptors[pc]; 7120 } 7121 mtx_unlock(&softc->ctl_lock); --- 352 unchanged lines hidden (view full) --- 7474 goto retry; 7475 } 7476 7477 scsi_ulto4b(lun->PRGeneration, res_keys->header.generation); 7478 7479 scsi_ulto4b(sizeof(struct scsi_per_res_key) * 7480 lun->pr_key_count, res_keys->header.length); 7481 |
7482 for (i = 0, key_count = 0; i < CTL_MAX_INITIATORS; i++) { |
7483 if ((key = ctl_get_prkey(lun, i)) == 0) 7484 continue; 7485 7486 /* 7487 * We used lun->pr_key_count to calculate the 7488 * size to allocate. If it turns out the number of 7489 * initiators with the registered flag set is 7490 * larger than that (i.e. they haven't been kept in --- 112 unchanged lines hidden (view full) --- 7603 printf("%s: reservation length changed, retrying\n", 7604 __func__); 7605 goto retry; 7606 } 7607 7608 scsi_ulto4b(lun->PRGeneration, res_status->header.generation); 7609 7610 res_desc = &res_status->desc[0]; |
7611 for (i = 0; i < CTL_MAX_INITIATORS; i++) { |
7612 if ((key = ctl_get_prkey(lun, i)) == 0) 7613 continue; 7614 7615 scsi_u64to8b(key, res_desc->res_key.key); 7616 if ((lun->flags & CTL_LUN_PR_RESERVED) && 7617 (lun->pr_res_idx == i || 7618 lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS)) { 7619 res_desc->flags = SPRI_FULL_R_HOLDER; 7620 res_desc->scopetype = lun->res_type; 7621 } 7622 scsi_ulto2b(i / CTL_MAX_INIT_PER_PORT, 7623 res_desc->rel_trgt_port_id); 7624 len = 0; |
7625 port = softc->ctl_ports[i / CTL_MAX_INIT_PER_PORT]; |
7626 if (port != NULL) 7627 len = ctl_create_iid(port, 7628 i % CTL_MAX_INIT_PER_PORT, 7629 res_desc->transport_id); 7630 scsi_ulto4b(len, res_desc->additional_length); 7631 res_desc = (struct scsi_per_res_in_full_desc *) 7632 &res_desc->transport_id[len]; 7633 } --- 13 unchanged lines hidden (view full) --- 7647 7648 ctl_set_success(ctsio); 7649 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7650 ctsio->be_move_done = ctl_config_move_done; 7651 ctl_datamove((union ctl_io *)ctsio); 7652 return (CTL_RETVAL_COMPLETE); 7653} 7654 |
7655/* 7656 * Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if 7657 * it should return. 7658 */ 7659static int 7660ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key, 7661 uint64_t sa_res_key, uint8_t type, uint32_t residx, 7662 struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb, 7663 struct scsi_per_res_out_parms* param) 7664{ 7665 union ctl_ha_msg persis_io; |
7666 int i; |
7667 |
7668 mtx_lock(&lun->lun_lock); 7669 if (sa_res_key == 0) { 7670 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 7671 /* validate scope and type */ 7672 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7673 SPR_LU_SCOPE) { 7674 mtx_unlock(&lun->lun_lock); 7675 ctl_set_invalid_field(/*ctsio*/ ctsio, --- 17 unchanged lines hidden (view full) --- 7693 ctl_done((union ctl_io *)ctsio); 7694 return (1); 7695 } 7696 7697 /* 7698 * Unregister everybody else and build UA for 7699 * them 7700 */ |
7701 for(i = 0; i < CTL_MAX_INITIATORS; i++) { |
7702 if (i == residx || ctl_get_prkey(lun, i) == 0) 7703 continue; 7704 7705 ctl_clr_prkey(lun, i); |
7706 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); |
7707 } 7708 lun->pr_key_count = 1; 7709 lun->res_type = type; 7710 if (lun->res_type != SPR_TYPE_WR_EX_AR 7711 && lun->res_type != SPR_TYPE_EX_AC_AR) 7712 lun->pr_res_idx = residx; |
7713 lun->PRGeneration++; 7714 mtx_unlock(&lun->lun_lock); |
7715 7716 /* send msg to other side */ 7717 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7718 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7719 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7720 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7721 persis_io.pr.pr_info.res_type = type; 7722 memcpy(persis_io.pr.pr_info.sa_res_key, 7723 param->serv_act_res_key, 7724 sizeof(param->serv_act_res_key)); |
7725 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 7726 sizeof(persis_io.pr), M_WAITOK); |
7727 } else { 7728 /* not all registrants */ 7729 mtx_unlock(&lun->lun_lock); 7730 free(ctsio->kern_data_ptr, M_CTL); 7731 ctl_set_invalid_field(ctsio, 7732 /*sks_valid*/ 1, 7733 /*command*/ 0, 7734 /*field*/ 8, --- 24 unchanged lines hidden (view full) --- 7759 /*command*/ 0, 7760 /*field*/ 8, 7761 /*bit_valid*/ 0, 7762 /*bit*/ 0); 7763 ctl_done((union ctl_io *)ctsio); 7764 return (1); 7765 } 7766 |
7767 for (i = 0; i < CTL_MAX_INITIATORS; i++) { |
7768 if (ctl_get_prkey(lun, i) != sa_res_key) 7769 continue; 7770 7771 found = 1; 7772 ctl_clr_prkey(lun, i); 7773 lun->pr_key_count--; |
7774 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); |
7775 } 7776 if (!found) { 7777 mtx_unlock(&lun->lun_lock); 7778 free(ctsio->kern_data_ptr, M_CTL); 7779 ctl_set_reservation_conflict(ctsio); 7780 ctl_done((union ctl_io *)ctsio); 7781 return (CTL_RETVAL_COMPLETE); 7782 } |
7783 lun->PRGeneration++; 7784 mtx_unlock(&lun->lun_lock); 7785 |
7786 /* send msg to other side */ 7787 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7788 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7789 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7790 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7791 persis_io.pr.pr_info.res_type = type; 7792 memcpy(persis_io.pr.pr_info.sa_res_key, 7793 param->serv_act_res_key, 7794 sizeof(param->serv_act_res_key)); |
7795 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 7796 sizeof(persis_io.pr), M_WAITOK); |
7797 } else { 7798 /* Reserved but not all registrants */ 7799 /* sa_res_key is res holder */ 7800 if (sa_res_key == ctl_get_prkey(lun, lun->pr_res_idx)) { 7801 /* validate scope and type */ 7802 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7803 SPR_LU_SCOPE) { 7804 mtx_unlock(&lun->lun_lock); --- 28 unchanged lines hidden (view full) --- 7833 * reservation generate UA(Reservations 7834 * Preempted) for all other registered nexuses 7835 * if the type has changed. Establish the new 7836 * reservation and holder. If res_key and 7837 * sa_res_key are the same do the above 7838 * except don't unregister the res holder. 7839 */ 7840 |
7841 for(i = 0; i < CTL_MAX_INITIATORS; i++) { |
7842 if (i == residx || ctl_get_prkey(lun, i) == 0) 7843 continue; 7844 7845 if (sa_res_key == ctl_get_prkey(lun, i)) { 7846 ctl_clr_prkey(lun, i); 7847 lun->pr_key_count--; |
7848 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); |
7849 } else if (type != lun->res_type 7850 && (lun->res_type == SPR_TYPE_WR_EX_RO 7851 || lun->res_type ==SPR_TYPE_EX_AC_RO)){ |
7852 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); |
7853 } 7854 } 7855 lun->res_type = type; 7856 if (lun->res_type != SPR_TYPE_WR_EX_AR 7857 && lun->res_type != SPR_TYPE_EX_AC_AR) 7858 lun->pr_res_idx = residx; 7859 else 7860 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; |
7861 lun->PRGeneration++; 7862 mtx_unlock(&lun->lun_lock); |
7863 7864 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7865 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7866 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7867 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7868 persis_io.pr.pr_info.res_type = type; 7869 memcpy(persis_io.pr.pr_info.sa_res_key, 7870 param->serv_act_res_key, 7871 sizeof(param->serv_act_res_key)); |
7872 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 7873 sizeof(persis_io.pr), M_WAITOK); |
7874 } else { 7875 /* 7876 * sa_res_key is not the res holder just 7877 * remove registrants 7878 */ 7879 int found=0; 7880 |
7881 for (i = 0; i < CTL_MAX_INITIATORS; i++) { |
7882 if (sa_res_key != ctl_get_prkey(lun, i)) 7883 continue; 7884 7885 found = 1; 7886 ctl_clr_prkey(lun, i); 7887 lun->pr_key_count--; |
7888 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); |
7889 } 7890 7891 if (!found) { 7892 mtx_unlock(&lun->lun_lock); 7893 free(ctsio->kern_data_ptr, M_CTL); 7894 ctl_set_reservation_conflict(ctsio); 7895 ctl_done((union ctl_io *)ctsio); 7896 return (1); 7897 } |
7898 lun->PRGeneration++; 7899 mtx_unlock(&lun->lun_lock); 7900 |
7901 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7902 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7903 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7904 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7905 persis_io.pr.pr_info.res_type = type; 7906 memcpy(persis_io.pr.pr_info.sa_res_key, 7907 param->serv_act_res_key, 7908 sizeof(param->serv_act_res_key)); |
7909 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 7910 sizeof(persis_io.pr), M_WAITOK); |
7911 } 7912 } |
7913 return (0); |
7914} 7915 7916static void 7917ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg) 7918{ 7919 uint64_t sa_res_key; 7920 int i; 7921 7922 sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key); 7923 7924 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 7925 || lun->pr_res_idx == CTL_PR_NO_RESERVATION 7926 || sa_res_key != ctl_get_prkey(lun, lun->pr_res_idx)) { 7927 if (sa_res_key == 0) { 7928 /* 7929 * Unregister everybody else and build UA for 7930 * them 7931 */ |
7932 for(i = 0; i < CTL_MAX_INITIATORS; i++) { |
7933 if (i == msg->pr.pr_info.residx || 7934 ctl_get_prkey(lun, i) == 0) 7935 continue; 7936 7937 ctl_clr_prkey(lun, i); |
7938 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); |
7939 } 7940 7941 lun->pr_key_count = 1; 7942 lun->res_type = msg->pr.pr_info.res_type; 7943 if (lun->res_type != SPR_TYPE_WR_EX_AR 7944 && lun->res_type != SPR_TYPE_EX_AC_AR) 7945 lun->pr_res_idx = msg->pr.pr_info.residx; 7946 } else { |
7947 for (i = 0; i < CTL_MAX_INITIATORS; i++) { |
7948 if (sa_res_key == ctl_get_prkey(lun, i)) 7949 continue; 7950 7951 ctl_clr_prkey(lun, i); 7952 lun->pr_key_count--; |
7953 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); |
7954 } 7955 } 7956 } else { |
7957 for (i = 0; i < CTL_MAX_INITIATORS; i++) { |
7958 if (i == msg->pr.pr_info.residx || 7959 ctl_get_prkey(lun, i) == 0) 7960 continue; 7961 7962 if (sa_res_key == ctl_get_prkey(lun, i)) { 7963 ctl_clr_prkey(lun, i); 7964 lun->pr_key_count--; |
7965 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); |
7966 } else if (msg->pr.pr_info.res_type != lun->res_type 7967 && (lun->res_type == SPR_TYPE_WR_EX_RO 7968 || lun->res_type == SPR_TYPE_EX_AC_RO)) { |
7969 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); |
7970 } 7971 } 7972 lun->res_type = msg->pr.pr_info.res_type; 7973 if (lun->res_type != SPR_TYPE_WR_EX_AR 7974 && lun->res_type != SPR_TYPE_EX_AC_AR) 7975 lun->pr_res_idx = msg->pr.pr_info.residx; 7976 else 7977 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 7978 } 7979 lun->PRGeneration++; 7980 7981} 7982 7983 7984int 7985ctl_persistent_reserve_out(struct ctl_scsiio *ctsio) 7986{ 7987 int retval; |
7988 u_int32_t param_len; 7989 struct scsi_per_res_out *cdb; 7990 struct ctl_lun *lun; 7991 struct scsi_per_res_out_parms* param; 7992 struct ctl_softc *softc; 7993 uint32_t residx; 7994 uint64_t res_key, sa_res_key, key; 7995 uint8_t type; --- 53 unchanged lines hidden (view full) --- 8049 ctsio->be_move_done = ctl_config_move_done; 8050 ctl_datamove((union ctl_io *)ctsio); 8051 8052 return (CTL_RETVAL_COMPLETE); 8053 } 8054 8055 param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr; 8056 |
8057 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); |
8058 res_key = scsi_8btou64(param->res_key.key); 8059 sa_res_key = scsi_8btou64(param->serv_act_res_key); 8060 8061 /* 8062 * Validate the reservation key here except for SPRO_REG_IGNO 8063 * This must be done for all other service actions 8064 */ 8065 if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) { --- 98 unchanged lines hidden (view full) --- 8164 /* 8165 * If the reservation is a registrants 8166 * only type we need to generate a UA 8167 * for other registered inits. The 8168 * sense code should be RESERVATIONS 8169 * RELEASED 8170 */ 8171 |
8172 for (i = softc->init_min; i < softc->init_max; i++){ 8173 if (ctl_get_prkey(lun, i) == 0) |
8174 continue; 8175 ctl_est_ua(lun, i, 8176 CTL_UA_RES_RELEASE); 8177 } 8178 } 8179 lun->res_type = 0; 8180 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8181 if (lun->pr_key_count==0) { 8182 lun->flags &= ~CTL_LUN_PR_RESERVED; 8183 lun->res_type = 0; 8184 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8185 } 8186 } |
8187 lun->PRGeneration++; 8188 mtx_unlock(&lun->lun_lock); 8189 |
8190 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8191 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8192 persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY; 8193 persis_io.pr.pr_info.residx = residx; |
8194 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8195 sizeof(persis_io.pr), M_WAITOK); |
8196 } else /* sa_res_key != 0 */ { 8197 8198 /* 8199 * If we aren't registered currently then increment 8200 * the key count and set the registered flag. 8201 */ 8202 ctl_alloc_prkey(lun, residx); 8203 if (ctl_get_prkey(lun, residx) == 0) 8204 lun->pr_key_count++; 8205 ctl_set_prkey(lun, residx, sa_res_key); |
8206 lun->PRGeneration++; 8207 mtx_unlock(&lun->lun_lock); |
8208 8209 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8210 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8211 persis_io.pr.pr_info.action = CTL_PR_REG_KEY; 8212 persis_io.pr.pr_info.residx = residx; 8213 memcpy(persis_io.pr.pr_info.sa_res_key, 8214 param->serv_act_res_key, 8215 sizeof(param->serv_act_res_key)); |
8216 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8217 sizeof(persis_io.pr), M_WAITOK); |
8218 } |
8219 8220 break; 8221 } 8222 case SPRO_RESERVE: 8223#if 0 8224 printf("Reserve executed type %d\n", type); 8225#endif 8226 mtx_lock(&lun->lun_lock); --- 30 unchanged lines hidden (view full) --- 8257 mtx_unlock(&lun->lun_lock); 8258 8259 /* send msg to other side */ 8260 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8261 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8262 persis_io.pr.pr_info.action = CTL_PR_RESERVE; 8263 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8264 persis_io.pr.pr_info.res_type = type; |
8265 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8266 sizeof(persis_io.pr), M_WAITOK); |
8267 } 8268 break; 8269 8270 case SPRO_RELEASE: 8271 mtx_lock(&lun->lun_lock); 8272 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) { 8273 /* No reservation exists return good status */ 8274 mtx_unlock(&lun->lun_lock); --- 27 unchanged lines hidden (view full) --- 8302 8303 /* 8304 * if this isn't an exclusive access 8305 * res generate UA for all other 8306 * registrants. 8307 */ 8308 if (type != SPR_TYPE_EX_AC 8309 && type != SPR_TYPE_WR_EX) { |
8310 for (i = softc->init_min; i < softc->init_max; i++) { 8311 if (i == residx || ctl_get_prkey(lun, i) == 0) |
8312 continue; 8313 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8314 } 8315 } 8316 mtx_unlock(&lun->lun_lock); |
8317 |
8318 /* Send msg to other side */ 8319 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8320 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8321 persis_io.pr.pr_info.action = CTL_PR_RELEASE; |
8322 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8323 sizeof(persis_io.pr), M_WAITOK); |
8324 break; 8325 8326 case SPRO_CLEAR: 8327 /* send msg to other side */ 8328 8329 mtx_lock(&lun->lun_lock); 8330 lun->flags &= ~CTL_LUN_PR_RESERVED; 8331 lun->res_type = 0; 8332 lun->pr_key_count = 0; 8333 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8334 8335 ctl_clr_prkey(lun, residx); |
8336 for (i = 0; i < CTL_MAX_INITIATORS; i++) |
8337 if (ctl_get_prkey(lun, i) != 0) { 8338 ctl_clr_prkey(lun, i); |
8339 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); |
8340 } 8341 lun->PRGeneration++; 8342 mtx_unlock(&lun->lun_lock); |
8343 |
8344 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8345 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8346 persis_io.pr.pr_info.action = CTL_PR_CLEAR; |
8347 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8348 sizeof(persis_io.pr), M_WAITOK); |
8349 break; 8350 8351 case SPRO_PREEMPT: 8352 case SPRO_PRE_ABO: { 8353 int nretval; 8354 8355 nretval = ctl_pro_preempt(softc, lun, res_key, sa_res_key, type, 8356 residx, ctsio, cdb, param); --- 58 unchanged lines hidden (view full) --- 8415 /* 8416 * If the reservation is a registrants 8417 * only type we need to generate a UA 8418 * for other registered inits. The 8419 * sense code should be RESERVATIONS 8420 * RELEASED 8421 */ 8422 |
8423 for (i = softc->init_min; i < softc->init_max; i++) { 8424 if (ctl_get_prkey(lun, i) == 0) |
8425 continue; 8426 8427 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8428 } 8429 } 8430 lun->res_type = 0; 8431 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8432 if (lun->pr_key_count==0) { --- 14 unchanged lines hidden (view full) --- 8447 8448 case CTL_PR_RELEASE: 8449 /* 8450 * if this isn't an exclusive access res generate UA for all 8451 * other registrants. 8452 */ 8453 if (lun->res_type != SPR_TYPE_EX_AC 8454 && lun->res_type != SPR_TYPE_WR_EX) { |
8455 for (i = softc->init_min; i < softc->init_max; i++) 8456 if (ctl_get_prkey(lun, i) != 0) |
8457 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8458 } 8459 8460 lun->flags &= ~CTL_LUN_PR_RESERVED; 8461 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8462 lun->res_type = 0; 8463 break; 8464 8465 case CTL_PR_PREEMPT: 8466 ctl_pro_preempt_other(lun, msg); 8467 break; 8468 case CTL_PR_CLEAR: 8469 lun->flags &= ~CTL_LUN_PR_RESERVED; 8470 lun->res_type = 0; 8471 lun->pr_key_count = 0; 8472 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8473 |
8474 for (i=0; i < CTL_MAX_INITIATORS; i++) { |
8475 if (ctl_get_prkey(lun, i) == 0) 8476 continue; 8477 ctl_clr_prkey(lun, i); |
8478 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); |
8479 } 8480 lun->PRGeneration++; 8481 break; 8482 } 8483 8484 mtx_unlock(&lun->lun_lock); 8485} 8486 --- 755 unchanged lines hidden (view full) --- 9242 CTL_DEBUG_PRINT(("ctl_tur\n")); 9243 9244 ctl_set_success(ctsio); 9245 ctl_done((union ctl_io *)ctsio); 9246 9247 return (CTL_RETVAL_COMPLETE); 9248} 9249 |
9250/* 9251 * SCSI VPD page 0x00, the Supported VPD Pages page. 9252 */ 9253static int 9254ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len) 9255{ 9256 struct scsi_vpd_supported_pages *pages; 9257 int sup_page_size; --- 259 unchanged lines hidden (view full) --- 9517 struct ctl_softc *softc; 9518 struct ctl_lun *lun; 9519 struct ctl_port *port; 9520 int data_len; 9521 uint8_t proto; 9522 9523 softc = control_softc; 9524 |
9525 port = ctl_io_port(&ctsio->io_hdr); |
9526 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9527 9528 data_len = sizeof(struct scsi_vpd_device_id) + 9529 sizeof(struct scsi_vpd_id_descriptor) + 9530 sizeof(struct scsi_vpd_id_rel_trgt_port_id) + 9531 sizeof(struct scsi_vpd_id_descriptor) + 9532 sizeof(struct scsi_vpd_id_trgt_port_grp_id); 9533 if (lun && lun->lun_devid) 9534 data_len += lun->lun_devid->len; |
9535 if (port && port->port_devid) |
9536 data_len += port->port_devid->len; |
9537 if (port && port->target_devid) |
9538 data_len += port->target_devid->len; 9539 9540 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9541 devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr; 9542 ctsio->kern_sg_entries = 0; 9543 9544 if (data_len < alloc_len) { 9545 ctsio->residual = alloc_len - data_len; --- 15 unchanged lines hidden (view full) --- 9561 if (lun != NULL) 9562 devid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9563 lun->be_lun->lun_type; 9564 else 9565 devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9566 devid_ptr->page_code = SVPD_DEVICE_ID; 9567 scsi_ulto2b(data_len - 4, devid_ptr->length); 9568 |
9569 if (port && port->port_type == CTL_PORT_FC) |
9570 proto = SCSI_PROTO_FC << 4; |
9571 else if (port && port->port_type == CTL_PORT_ISCSI) |
9572 proto = SCSI_PROTO_ISCSI << 4; 9573 else 9574 proto = SCSI_PROTO_SPI << 4; 9575 desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list; 9576 9577 /* 9578 * We're using a LUN association here. i.e., this device ID is a 9579 * per-LUN identifier. 9580 */ 9581 if (lun && lun->lun_devid) { 9582 memcpy(desc, lun->lun_devid->data, lun->lun_devid->len); 9583 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9584 lun->lun_devid->len); 9585 } 9586 9587 /* 9588 * This is for the WWPN which is a port association. 9589 */ |
9590 if (port && port->port_devid) { |
9591 memcpy(desc, port->port_devid->data, port->port_devid->len); 9592 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9593 port->port_devid->len); 9594 } 9595 9596 /* 9597 * This is for the Relative Target Port(type 4h) identifier 9598 */ --- 7 unchanged lines hidden (view full) --- 9606 9607 /* 9608 * This is for the Target Port Group(type 5h) identifier 9609 */ 9610 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9611 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9612 SVPD_ID_TYPE_TPORTGRP; 9613 desc->length = 4; |
9614 scsi_ulto2b(ctsio->io_hdr.nexus.targ_port / softc->port_cnt + 1, |
9615 &desc->identifier[2]); 9616 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9617 sizeof(struct scsi_vpd_id_trgt_port_grp_id)); 9618 9619 /* 9620 * This is for the Target identifier 9621 */ |
9622 if (port && port->target_devid) { |
9623 memcpy(desc, port->target_devid->data, port->target_devid->len); 9624 } 9625 9626 ctl_set_success(ctsio); 9627 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9628 ctsio->be_move_done = ctl_config_move_done; 9629 ctl_datamove((union ctl_io *)ctsio); 9630 return (CTL_RETVAL_COMPLETE); 9631} 9632 9633static int 9634ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len) 9635{ 9636 struct ctl_softc *softc = control_softc; 9637 struct scsi_vpd_scsi_ports *sp; 9638 struct scsi_vpd_port_designation *pd; 9639 struct scsi_vpd_port_designation_cont *pdc; 9640 struct ctl_lun *lun; 9641 struct ctl_port *port; |
9642 int data_len, num_target_ports, iid_len, id_len; |
9643 9644 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9645 |
9646 num_target_ports = 0; 9647 iid_len = 0; 9648 id_len = 0; 9649 mtx_lock(&softc->ctl_lock); 9650 STAILQ_FOREACH(port, &softc->port_list, links) { 9651 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 9652 continue; 9653 if (lun != NULL && 9654 ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 9655 continue; 9656 num_target_ports++; 9657 if (port->init_devid) 9658 iid_len += port->init_devid->len; 9659 if (port->port_devid) 9660 id_len += port->port_devid->len; 9661 } 9662 mtx_unlock(&softc->ctl_lock); 9663 |
9664 data_len = sizeof(struct scsi_vpd_scsi_ports) + |
9665 num_target_ports * (sizeof(struct scsi_vpd_port_designation) + 9666 sizeof(struct scsi_vpd_port_designation_cont)) + iid_len + id_len; 9667 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9668 sp = (struct scsi_vpd_scsi_ports *)ctsio->kern_data_ptr; 9669 ctsio->kern_sg_entries = 0; 9670 9671 if (data_len < alloc_len) { 9672 ctsio->residual = alloc_len - data_len; --- 20 unchanged lines hidden (view full) --- 9693 sp->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9694 9695 sp->page_code = SVPD_SCSI_PORTS; 9696 scsi_ulto2b(data_len - sizeof(struct scsi_vpd_scsi_ports), 9697 sp->page_length); 9698 pd = &sp->design[0]; 9699 9700 mtx_lock(&softc->ctl_lock); |
9701 STAILQ_FOREACH(port, &softc->port_list, links) { 9702 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 9703 continue; 9704 if (lun != NULL && 9705 ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 9706 continue; 9707 scsi_ulto2b(port->targ_port, pd->relative_port_id); 9708 if (port->init_devid) { 9709 iid_len = port->init_devid->len; 9710 memcpy(pd->initiator_transportid, 9711 port->init_devid->data, port->init_devid->len); 9712 } else 9713 iid_len = 0; 9714 scsi_ulto2b(iid_len, pd->initiator_transportid_length); 9715 pdc = (struct scsi_vpd_port_designation_cont *) 9716 (&pd->initiator_transportid[iid_len]); 9717 if (port->port_devid) { 9718 id_len = port->port_devid->len; 9719 memcpy(pdc->target_port_descriptors, 9720 port->port_devid->data, port->port_devid->len); 9721 } else 9722 id_len = 0; 9723 scsi_ulto2b(id_len, pdc->target_port_descriptors_length); 9724 pd = (struct scsi_vpd_port_designation *) 9725 ((uint8_t *)pdc->target_port_descriptors + id_len); |
9726 } 9727 mtx_unlock(&softc->ctl_lock); 9728 9729 ctl_set_success(ctsio); 9730 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9731 ctsio->be_move_done = ctl_config_move_done; 9732 ctl_datamove((union ctl_io *)ctsio); 9733 return (CTL_RETVAL_COMPLETE); --- 250 unchanged lines hidden (view full) --- 9984 * Standard INQUIRY data. 9985 */ 9986static int 9987ctl_inquiry_std(struct ctl_scsiio *ctsio) 9988{ 9989 struct scsi_inquiry_data *inq_ptr; 9990 struct scsi_inquiry *cdb; 9991 struct ctl_softc *softc; |
9992 struct ctl_port *port; |
9993 struct ctl_lun *lun; 9994 char *val; 9995 uint32_t alloc_len, data_len; 9996 ctl_port_type port_type; 9997 9998 softc = control_softc; 9999 10000 /* 10001 * Figure out whether we're talking to a Fibre Channel port or not. 10002 * We treat the ioctl front end, and any SCSI adapters, as packetized 10003 * SCSI front ends. 10004 */ |
10005 port = ctl_io_port(&ctsio->io_hdr); 10006 if (port != NULL) 10007 port_type = port->port_type; 10008 else 10009 port_type = CTL_PORT_SCSI; |
10010 if (port_type == CTL_PORT_IOCTL || port_type == CTL_PORT_INTERNAL) 10011 port_type = CTL_PORT_SCSI; 10012 10013 lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10014 cdb = (struct scsi_inquiry *)ctsio->cdb; 10015 alloc_len = scsi_2btoul(cdb->length); 10016 10017 /* --- 13 unchanged lines hidden (view full) --- 10031 ctsio->kern_data_len = data_len; 10032 ctsio->kern_total_len = data_len; 10033 } else { 10034 ctsio->residual = 0; 10035 ctsio->kern_data_len = alloc_len; 10036 ctsio->kern_total_len = alloc_len; 10037 } 10038 |
10039 if (lun != NULL) { 10040 if ((lun->flags & CTL_LUN_PRIMARY_SC) || 10041 softc->ha_link >= CTL_HA_LINK_UNKNOWN) { 10042 inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10043 lun->be_lun->lun_type; 10044 } else { 10045 inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | 10046 lun->be_lun->lun_type; 10047 } 10048 } else |
10049 inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE; 10050 10051 /* RMB in byte 2 is 0 */ 10052 inq_ptr->version = SCSI_REV_SPC4; 10053 10054 /* 10055 * According to SAM-3, even if a device only supports a single 10056 * level of LUN addressing, it should still set the HISUP bit: --- 573 unchanged lines hidden (view full) --- 10630/* 10631 * Assumptions: 10632 * - An I/O has just completed, and has been removed from the per-LUN OOA 10633 * queue, so some items on the blocked queue may now be unblocked. 10634 */ 10635static int 10636ctl_check_blocked(struct ctl_lun *lun) 10637{ |
10638 struct ctl_softc *softc = lun->ctl_softc; |
10639 union ctl_io *cur_blocked, *next_blocked; 10640 10641 mtx_assert(&lun->lun_lock, MA_OWNED); 10642 10643 /* 10644 * Run forward from the head of the blocked queue, checking each 10645 * entry against the I/Os prior to it on the OOA queue to see if 10646 * there is still any blockage. --- 29 unchanged lines hidden (view full) --- 10676 /* 10677 * This shouldn't happen! In theory we've already 10678 * checked this command for overlap... 10679 */ 10680 break; 10681 case CTL_ACTION_PASS: 10682 case CTL_ACTION_SKIP: { 10683 const struct ctl_cmd_entry *entry; |
10684 10685 /* 10686 * The skip case shouldn't happen, this transaction 10687 * should have never made it onto the blocked queue. 10688 */ 10689 /* 10690 * This I/O is no longer blocked, we can remove it 10691 * from the blocked queue. Since this is a TAILQ 10692 * (doubly linked list), we can do O(1) removals 10693 * from any place on the list. 10694 */ 10695 TAILQ_REMOVE(&lun->blocked_queue, &cur_blocked->io_hdr, 10696 blocked_links); 10697 cur_blocked->io_hdr.flags &= ~CTL_FLAG_BLOCKED; 10698 |
10699 if ((softc->ha_mode != CTL_HA_MODE_XFER) && 10700 (cur_blocked->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)){ |
10701 /* 10702 * Need to send IO back to original side to 10703 * run 10704 */ 10705 union ctl_ha_msg msg_info; 10706 |
10707 cur_blocked->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; |
10708 msg_info.hdr.original_sc = 10709 cur_blocked->io_hdr.original_sc; 10710 msg_info.hdr.serializing_sc = cur_blocked; 10711 msg_info.hdr.msg_type = CTL_MSG_R2R; |
10712 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 10713 sizeof(msg_info.hdr), M_NOWAIT); |
10714 break; 10715 } 10716 entry = ctl_get_cmd_entry(&cur_blocked->scsiio, NULL); 10717 10718 /* 10719 * Check this I/O for LUN state changes that may 10720 * have happened while this command was blocked. 10721 * The LUN state may have been changed by a command --- 42 unchanged lines hidden (view full) --- 10764 int retval; 10765 uint32_t residx; 10766 10767 retval = 0; 10768 10769 mtx_assert(&lun->lun_lock, MA_OWNED); 10770 10771 /* |
10772 * If this shelf is a secondary shelf controller, we may have to 10773 * reject some commands disallowed by HA mode and link state. |
10774 */ |
10775 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) { 10776 if (softc->ha_link == CTL_HA_LINK_OFFLINE && 10777 (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { 10778 ctl_set_lun_unavail(ctsio); 10779 retval = 1; 10780 goto bailout; 10781 } 10782 if ((lun->flags & CTL_LUN_PEER_SC_PRIMARY) == 0 && 10783 (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { 10784 ctl_set_lun_transit(ctsio); 10785 retval = 1; 10786 goto bailout; 10787 } 10788 if (softc->ha_mode == CTL_HA_MODE_ACT_STBY && 10789 (entry->flags & CTL_CMD_FLAG_OK_ON_STANDBY) == 0) { 10790 ctl_set_lun_standby(ctsio); 10791 retval = 1; 10792 goto bailout; 10793 } 10794 10795 /* The rest of checks are only done on executing side */ 10796 if (softc->ha_mode == CTL_HA_MODE_XFER) 10797 goto bailout; |
10798 } 10799 10800 if (entry->pattern & CTL_LUN_PAT_WRITE) { 10801 if (lun->be_lun && 10802 lun->be_lun->flags & CTL_LUN_FLAG_READONLY) { 10803 ctl_set_sense(ctsio, /*current_error*/ 1, 10804 /*sense_key*/ SSD_KEY_DATA_PROTECT, 10805 /*asc*/ 0x27, /*ascq*/ 0x01, SSD_ELEM_NONE); --- 10 unchanged lines hidden (view full) --- 10816 } 10817 } 10818 10819 /* 10820 * Check for a reservation conflict. If this command isn't allowed 10821 * even on reserved LUNs, and if this initiator isn't the one who 10822 * reserved us, reject the command with a reservation conflict. 10823 */ |
10824 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); |
10825 if ((lun->flags & CTL_LUN_RESERVED) 10826 && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) { 10827 if (lun->res_idx != residx) { 10828 ctl_set_reservation_conflict(ctsio); 10829 retval = 1; 10830 goto bailout; 10831 } 10832 } --- 13 unchanged lines hidden (view full) --- 10846 * conflict. 10847 */ 10848 if (ctl_get_prkey(lun, residx) == 0 10849 || (residx != lun->pr_res_idx && lun->res_type < 4)) { 10850 ctl_set_reservation_conflict(ctsio); 10851 retval = 1; 10852 goto bailout; 10853 } |
10854 } 10855 10856 if ((lun->flags & CTL_LUN_OFFLINE) |
10857 && ((entry->flags & CTL_CMD_FLAG_OK_ON_STANDBY) == 0)) { |
10858 ctl_set_lun_not_ready(ctsio); 10859 retval = 1; 10860 goto bailout; 10861 } 10862 |
10863 if ((lun->flags & CTL_LUN_STOPPED) 10864 && ((entry->flags & CTL_CMD_FLAG_OK_ON_STOPPED) == 0)) { 10865 /* "Logical unit not ready, initializing cmd. required" */ 10866 ctl_set_lun_stopped(ctsio); 10867 retval = 1; 10868 goto bailout; 10869 } 10870 10871 if ((lun->flags & CTL_LUN_INOPERABLE) 10872 && ((entry->flags & CTL_CMD_FLAG_OK_ON_INOPERABLE) == 0)) { 10873 /* "Medium format corrupted" */ 10874 ctl_set_medium_format_corrupted(ctsio); 10875 retval = 1; 10876 goto bailout; 10877 } 10878 10879bailout: 10880 return (retval); |
10881} 10882 10883static void 10884ctl_failover_io(union ctl_io *io, int have_lock) 10885{ 10886 ctl_set_busy(&io->scsiio); 10887 ctl_done(io); 10888} 10889 |
10890static void |
10891ctl_failover_lun(struct ctl_lun *lun) |
10892{ |
10893 struct ctl_softc *softc = lun->ctl_softc; 10894 struct ctl_io_hdr *io, *next_io; |
10895 |
10896 CTL_DEBUG_PRINT(("FAILOVER for lun %ju\n", lun->lun)); 10897 if (softc->ha_mode == CTL_HA_MODE_XFER) { 10898 TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { 10899 /* We are master */ 10900 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 10901 if (io->flags & CTL_FLAG_IO_ACTIVE) { 10902 io->flags |= CTL_FLAG_ABORT; 10903 } else { /* This can be only due to DATAMOVE */ 10904 io->msg_type = CTL_MSG_DATAMOVE_DONE; 10905 io->flags |= CTL_FLAG_IO_ACTIVE; 10906 io->port_status = 31340; 10907 ctl_enqueue_isc((union ctl_io *)io); |
10908 } 10909 } |
10910 /* We are slave */ 10911 if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { 10912 io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; 10913 if (io->flags & CTL_FLAG_IO_ACTIVE) { 10914 io->flags |= CTL_FLAG_FAILOVER; |
10915 } else { |
10916 ctl_set_busy(&((union ctl_io *)io)-> 10917 scsiio); 10918 ctl_done((union ctl_io *)io); |
10919 } 10920 } |
10921 } 10922 } else { /* SERIALIZE modes */ 10923 TAILQ_FOREACH_SAFE(io, &lun->blocked_queue, blocked_links, 10924 next_io) { 10925 /* We are master */ 10926 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 10927 TAILQ_REMOVE(&lun->blocked_queue, io, 10928 blocked_links); 10929 io->flags &= ~CTL_FLAG_BLOCKED; 10930 TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links); 10931 ctl_free_io((union ctl_io *)io); |
10932 } |
10933 } 10934 TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { 10935 /* We are master */ 10936 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 10937 TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links); 10938 ctl_free_io((union ctl_io *)io); |
10939 } |
10940 /* We are slave */ 10941 if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { 10942 io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; 10943 if (!(io->flags & CTL_FLAG_IO_ACTIVE)) { 10944 ctl_set_busy(&((union ctl_io *)io)-> 10945 scsiio); 10946 ctl_done((union ctl_io *)io); |
10947 } |
10948 } |
10949 } |
10950 ctl_check_blocked(lun); |
10951 } |
10952} |
10953 10954static int 10955ctl_scsiio_precheck(struct ctl_softc *softc, struct ctl_scsiio *ctsio) 10956{ 10957 struct ctl_lun *lun; 10958 const struct ctl_cmd_entry *entry; 10959 uint32_t initidx, targ_lun; 10960 int retval; --- 15 unchanged lines hidden (view full) --- 10976 mtx_unlock(&lun->lun_lock); 10977 lun = NULL; 10978 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL; 10979 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL; 10980 } else { 10981 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun; 10982 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = 10983 lun->be_lun; |
10984 10985 /* 10986 * Every I/O goes into the OOA queue for a 10987 * particular LUN, and stays there until completion. 10988 */ 10989#ifdef CTL_TIME_IO 10990 if (TAILQ_EMPTY(&lun->ooa_queue)) { 10991 lun->idle_time += getsbinuptime() - --- 118 unchanged lines hidden (view full) --- 11110 * XXX CHD this is where we want to send IO to other side if 11111 * this LUN is secondary on this SC. We will need to make a copy 11112 * of the IO and flag the IO on this side as SENT_2OTHER and the flag 11113 * the copy we send as FROM_OTHER. 11114 * We also need to stuff the address of the original IO so we can 11115 * find it easily. Something similar will need be done on the other 11116 * side so when we are done we can find the copy. 11117 */ |
11118 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 11119 (lun->flags & CTL_LUN_PEER_SC_PRIMARY) != 0) { |
11120 union ctl_ha_msg msg_info; 11121 int isc_retval; 11122 11123 ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; |
11124 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 11125 mtx_unlock(&lun->lun_lock); |
11126 11127 msg_info.hdr.msg_type = CTL_MSG_SERIALIZE; 11128 msg_info.hdr.original_sc = (union ctl_io *)ctsio; |
11129 msg_info.hdr.serializing_sc = NULL; 11130 msg_info.hdr.nexus = ctsio->io_hdr.nexus; 11131 msg_info.scsi.tag_num = ctsio->tag_num; 11132 msg_info.scsi.tag_type = ctsio->tag_type; |
11133 msg_info.scsi.cdb_len = ctsio->cdb_len; |
11134 memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN); 11135 |
11136 if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11137 sizeof(msg_info.scsi) - sizeof(msg_info.scsi.sense_data), 11138 M_WAITOK)) > CTL_HA_STATUS_SUCCESS) { 11139 ctl_set_busy(ctsio); 11140 ctl_done((union ctl_io *)ctsio); 11141 return (retval); |
11142 } |
11143 return (retval); 11144 } 11145 11146 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 11147 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, 11148 ctl_ooaq, ooa_links))) { 11149 case CTL_ACTION_BLOCK: 11150 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; --- 153 unchanged lines hidden (view full) --- 11304 ctl_ua_type ua_type) 11305{ 11306 struct ctl_lun *lun; 11307 int retval; 11308 11309 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 11310 union ctl_ha_msg msg_info; 11311 |
11312 msg_info.hdr.nexus = io->io_hdr.nexus; 11313 if (ua_type==CTL_UA_TARG_RESET) 11314 msg_info.task.task_action = CTL_TASK_TARGET_RESET; 11315 else 11316 msg_info.task.task_action = CTL_TASK_BUS_RESET; 11317 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11318 msg_info.hdr.original_sc = NULL; 11319 msg_info.hdr.serializing_sc = NULL; |
11320 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11321 sizeof(msg_info.task), M_WAITOK); |
11322 } 11323 retval = 0; 11324 11325 mtx_lock(&softc->ctl_lock); 11326 STAILQ_FOREACH(lun, &softc->lun_list, links) 11327 retval += ctl_lun_reset(lun, io, ua_type); 11328 mtx_unlock(&softc->ctl_lock); 11329 --- 101 unchanged lines hidden (view full) --- 11431 11432 msg_info.hdr.nexus = xio->io_hdr.nexus; 11433 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 11434 msg_info.task.tag_num = xio->scsiio.tag_num; 11435 msg_info.task.tag_type = xio->scsiio.tag_type; 11436 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11437 msg_info.hdr.original_sc = NULL; 11438 msg_info.hdr.serializing_sc = NULL; |
11439 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11440 sizeof(msg_info.task), M_NOWAIT); |
11441 } 11442 } 11443 } 11444} 11445 11446static int 11447ctl_abort_task_set(union ctl_io *io) 11448{ --- 27 unchanged lines hidden (view full) --- 11476 return (0); 11477} 11478 11479static int 11480ctl_i_t_nexus_reset(union ctl_io *io) 11481{ 11482 struct ctl_softc *softc = control_softc; 11483 struct ctl_lun *lun; |
11484 uint32_t initidx; |
11485 11486 initidx = ctl_get_initindex(&io->io_hdr.nexus); |
11487 mtx_lock(&softc->ctl_lock); 11488 STAILQ_FOREACH(lun, &softc->lun_list, links) { 11489 mtx_lock(&lun->lun_lock); 11490 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, 11491 io->io_hdr.nexus.initid, 11492 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 11493#ifdef CTL_WITH_CA 11494 ctl_clear_mask(lun->have_ca, initidx); 11495#endif |
11496 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == initidx)) |
11497 lun->flags &= ~CTL_LUN_RESERVED; 11498 ctl_est_ua(lun, initidx, CTL_UA_I_T_NEXUS_LOSS); 11499 mtx_unlock(&lun->lun_lock); 11500 } 11501 mtx_unlock(&softc->ctl_lock); 11502 return (0); 11503} 11504 --- 88 unchanged lines hidden (view full) --- 11593 */ 11594 if (xio->scsiio.tag_num == io->taskio.tag_num) { 11595 xio->io_hdr.flags |= CTL_FLAG_ABORT; 11596 found = 1; 11597 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0 && 11598 !(lun->flags & CTL_LUN_PRIMARY_SC)) { 11599 union ctl_ha_msg msg_info; 11600 |
11601 msg_info.hdr.nexus = io->io_hdr.nexus; 11602 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 11603 msg_info.task.tag_num = io->taskio.tag_num; 11604 msg_info.task.tag_type = io->taskio.tag_type; 11605 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11606 msg_info.hdr.original_sc = NULL; 11607 msg_info.hdr.serializing_sc = NULL; 11608#if 0 11609 printf("Sent Abort to other side\n"); 11610#endif |
11611 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11612 sizeof(msg_info.task), M_NOWAIT); |
11613 } 11614#if 0 11615 printf("ctl_abort_task: found I/O to abort\n"); 11616#endif 11617 } 11618 } 11619 mtx_unlock(&lun->lun_lock); 11620 --- 77 unchanged lines hidden (view full) --- 11698 if ((targ_lun < CTL_MAX_LUNS) 11699 && (softc->ctl_luns[targ_lun] != NULL)) 11700 lun = softc->ctl_luns[targ_lun]; 11701 else { 11702 mtx_unlock(&softc->ctl_lock); 11703 retval = 1; 11704 break; 11705 } |
11706 retval = ctl_lun_reset(lun, io, CTL_UA_LUN_RESET); 11707 mtx_unlock(&softc->ctl_lock); |
11708 |
11709 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0) { |
11710 union ctl_ha_msg msg_info; 11711 |
11712 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; |
11713 msg_info.hdr.nexus = io->io_hdr.nexus; |
11714 msg_info.task.task_action = CTL_TASK_LUN_RESET; |
11715 msg_info.hdr.original_sc = NULL; 11716 msg_info.hdr.serializing_sc = NULL; |
11717 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11718 sizeof(msg_info.task), M_WAITOK); |
11719 } |
11720 break; 11721 } 11722 case CTL_TASK_TARGET_RESET: 11723 retval = ctl_target_reset(softc, io, CTL_UA_TARG_RESET); 11724 break; 11725 case CTL_TASK_BUS_RESET: 11726 retval = ctl_bus_reset(softc, io); 11727 break; --- 81 unchanged lines hidden (view full) --- 11809 free_io = 0; 11810 ctl_datamove_remote(io); 11811 break; 11812 case CTL_MSG_DATAMOVE_DONE: 11813 /* Only used in XFER mode */ 11814 free_io = 0; 11815 io->scsiio.be_move_done(io); 11816 break; |
11817 case CTL_MSG_FAILOVER: 11818 mtx_lock(&lun->lun_lock); 11819 ctl_failover_lun(lun); 11820 mtx_unlock(&lun->lun_lock); 11821 free_io = 1; 11822 break; |
11823 default: 11824 free_io = 1; 11825 printf("%s: Invalid message type %d\n", 11826 __func__, io->io_hdr.msg_type); 11827 break; 11828 } 11829 if (free_io) 11830 ctl_free_io(io); --- 185 unchanged lines hidden (view full) --- 12016 (intmax_t)time_uptime - io->io_hdr.start_time); 12017 sbuf_finish(&sb); 12018 printf("%s", sbuf_data(&sb)); 12019 } 12020#endif /* CTL_TIME_IO */ 12021 12022#ifdef CTL_IO_DELAY 12023 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { |
12024 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 12025 } else { 12026 struct ctl_lun *lun; 12027 12028 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 12029 if ((lun != NULL) 12030 && (lun->delay_info.datamove_delay > 0)) { 12031 --- 60 unchanged lines hidden (view full) --- 12092 * pass by reference, only by value between controllers. 12093 * So we can't pass a pointer to the S/G list, only as many 12094 * S/G entries as we can fit in here. If it's possible for 12095 * us to get more than CTL_HA_MAX_SG_ENTRIES S/G entries, 12096 * then we need to break this up into multiple transfers. 12097 */ 12098 if (io->scsiio.kern_sg_entries == 0) { 12099 msg.dt.kern_sg_entries = 1; |
12100#if 0 |
12101 /* |
12102 * Convert to a physical address if this is a 12103 * virtual address. 12104 */ 12105 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 12106 msg.dt.sg_list[0].addr = 12107 io->scsiio.kern_data_ptr; 12108 } else { 12109 /* 12110 * XXX KDM use busdma here! 12111 */ |
12112 msg.dt.sg_list[0].addr = (void *) 12113 vtophys(io->scsiio.kern_data_ptr); |
12114 } |
12115#else 12116 KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, 12117 ("HA does not support BUS_ADDR")); 12118 msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr; 12119#endif |
12120 12121 msg.dt.sg_list[0].len = io->scsiio.kern_data_len; 12122 do_sg_copy = 0; 12123 } else { |
12124 msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries; |
12125 do_sg_copy = 1; |
12126 } 12127 12128 msg.dt.kern_data_len = io->scsiio.kern_data_len; 12129 msg.dt.kern_total_len = io->scsiio.kern_total_len; 12130 msg.dt.kern_data_resid = io->scsiio.kern_data_resid; 12131 msg.dt.kern_rel_offset = io->scsiio.kern_rel_offset; 12132 msg.dt.sg_sequence = 0; 12133 --- 22 unchanged lines hidden (view full) --- 12156 * case is straightforward. In the write 12157 * case, we want to make sure nothing is 12158 * in the local cache that could overwrite 12159 * the DMAed data. 12160 */ 12161 12162 for (i = sg_entries_sent, j = 0; 12163 i < msg.dt.cur_sg_entries; i++, j++) { |
12164#if 0 |
12165 if ((io->io_hdr.flags & |
12166 CTL_FLAG_BUS_ADDR) == 0) { 12167 /* 12168 * XXX KDM use busdma. 12169 */ |
12170 msg.dt.sg_list[j].addr =(void *) 12171 vtophys(sgl[i].addr); |
12172 } else { 12173 msg.dt.sg_list[j].addr = 12174 sgl[i].addr; 12175 } |
12176#else 12177 KASSERT((io->io_hdr.flags & 12178 CTL_FLAG_BUS_ADDR) == 0, 12179 ("HA does not support BUS_ADDR")); 12180 msg.dt.sg_list[j].addr = sgl[i].addr; 12181#endif |
12182 msg.dt.sg_list[j].len = sgl[i].len; 12183 } 12184 } 12185 12186 sg_entries_sent += msg.dt.cur_sg_entries; 12187 if (sg_entries_sent >= msg.dt.kern_sg_entries) 12188 msg.dt.sg_last = 1; 12189 else 12190 msg.dt.sg_last = 0; 12191 |
12192 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, |
12193 sizeof(msg.dt) - sizeof(msg.dt.sg_list) + 12194 sizeof(struct ctl_sg_entry)*msg.dt.cur_sg_entries, 12195 M_WAITOK) > CTL_HA_STATUS_SUCCESS) { 12196 io->io_hdr.port_status = 31341; 12197 io->scsiio.be_move_done(io); 12198 return; |
12199 } 12200 12201 msg.dt.sent_sg_entries = sg_entries_sent; 12202 } 12203 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; |
12204 } else { 12205 12206 /* 12207 * Lookup the fe_datamove() function for this particular 12208 * front end. 12209 */ |
12210 fe_datamove = ctl_io_port(&io->io_hdr)->fe_datamove; |
12211 12212 fe_datamove(io); 12213 } 12214} 12215 12216static void 12217ctl_send_datamove_done(union ctl_io *io, int have_lock) 12218{ 12219 union ctl_ha_msg msg; |
12220 12221 memset(&msg, 0, sizeof(msg)); 12222 12223 msg.hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 12224 msg.hdr.original_sc = io; 12225 msg.hdr.serializing_sc = io->io_hdr.serializing_sc; 12226 msg.hdr.nexus = io->io_hdr.nexus; 12227 msg.hdr.status = io->io_hdr.status; 12228 msg.scsi.tag_num = io->scsiio.tag_num; 12229 msg.scsi.tag_type = io->scsiio.tag_type; 12230 msg.scsi.scsi_status = io->scsiio.scsi_status; 12231 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, |
12232 io->scsiio.sense_len); |
12233 msg.scsi.sense_len = io->scsiio.sense_len; 12234 msg.scsi.sense_residual = io->scsiio.sense_residual; 12235 msg.scsi.fetd_status = io->io_hdr.port_status; 12236 msg.scsi.residual = io->scsiio.residual; 12237 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 12238 12239 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12240 ctl_failover_io(io, /*have_lock*/ have_lock); 12241 return; 12242 } 12243 |
12244 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 12245 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + 12246 msg.scsi.sense_len, M_WAITOK); |
12247} 12248 12249/* 12250 * The DMA to the remote side is done, now we need to tell the other side 12251 * we're done so it can continue with its data movement. 12252 */ 12253static void 12254ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq) 12255{ 12256 union ctl_io *io; |
12257 int i; |
12258 12259 io = rq->context; 12260 12261 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12262 printf("%s: ISC DMA write failed with error %d", __func__, 12263 rq->ret); 12264 ctl_set_internal_failure(&io->scsiio, 12265 /*sks_valid*/ 1, 12266 /*retry_count*/ rq->ret); 12267 } 12268 12269 ctl_dt_req_free(rq); 12270 |
12271 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12272 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12273 free(io->io_hdr.remote_sglist, M_CTL); 12274 io->io_hdr.remote_sglist = NULL; 12275 io->io_hdr.local_sglist = NULL; 12276 |
12277 /* |
12278 * The data is in local and remote memory, so now we need to send 12279 * status (good or back) back to the other side. 12280 */ 12281 ctl_send_datamove_done(io, /*have_lock*/ 0); 12282} 12283 12284/* 12285 * We've moved the data from the host/controller into local memory. Now we --- 32 unchanged lines hidden (view full) --- 12318 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 12319 12320 /* 12321 * Use a custom move done callback, since we need to send completion 12322 * back to the other controller, not to the backend on this side. 12323 */ 12324 io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb; 12325 |
12326 fe_datamove = ctl_io_port(&io->io_hdr)->fe_datamove; |
12327 12328 fe_datamove(io); 12329 12330 return; 12331 12332} 12333 12334static int 12335ctl_datamove_remote_dm_read_cb(union ctl_io *io) 12336{ 12337#if 0 12338 char str[256]; 12339 char path_str[64]; 12340 struct sbuf sb; 12341#endif |
12342 int i; |
12343 |
12344 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12345 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12346 free(io->io_hdr.remote_sglist, M_CTL); 12347 io->io_hdr.remote_sglist = NULL; 12348 io->io_hdr.local_sglist = NULL; |
12349 12350#if 0 12351 scsi_path_string(io, path_str, sizeof(path_str)); 12352 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12353 sbuf_cat(&sb, path_str); 12354 scsi_command_string(&io->scsiio, NULL, &sb); 12355 sbuf_printf(&sb, "\n"); 12356 sbuf_cat(&sb, path_str); --- 20 unchanged lines hidden (view full) --- 12377ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq) 12378{ 12379 union ctl_io *io; 12380 void (*fe_datamove)(union ctl_io *io); 12381 12382 io = rq->context; 12383 12384 if (rq->ret != CTL_HA_STATUS_SUCCESS) { |
12385 printf("%s: ISC DMA read failed with error %d\n", __func__, |
12386 rq->ret); 12387 ctl_set_internal_failure(&io->scsiio, 12388 /*sks_valid*/ 1, 12389 /*retry_count*/ rq->ret); 12390 } 12391 12392 ctl_dt_req_free(rq); 12393 12394 /* Switch the pointer over so the FETD knows what to do */ 12395 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 12396 12397 /* 12398 * Use a custom move done callback, since we need to send completion 12399 * back to the other controller, not to the backend on this side. 12400 */ 12401 io->scsiio.be_move_done = ctl_datamove_remote_dm_read_cb; 12402 12403 /* XXX KDM add checks like the ones in ctl_datamove? */ 12404 |
12405 fe_datamove = ctl_io_port(&io->io_hdr)->fe_datamove; |
12406 12407 fe_datamove(io); 12408} 12409 12410static int 12411ctl_datamove_remote_sgl_setup(union ctl_io *io) 12412{ 12413 struct ctl_sg_entry *local_sglist, *remote_sglist; |
12414 struct ctl_softc *softc; |
12415 uint32_t len_to_go; |
12416 int retval; 12417 int i; 12418 12419 retval = 0; 12420 softc = control_softc; |
12421 local_sglist = io->io_hdr.local_sglist; |
12422 remote_sglist = io->io_hdr.remote_sglist; |
12423 len_to_go = io->scsiio.kern_data_len; |
12424 |
12425 /* 12426 * The difficult thing here is that the size of the various 12427 * S/G segments may be different than the size from the 12428 * remote controller. That'll make it harder when DMAing 12429 * the data back to the other side. 12430 */ 12431 for (i = 0; len_to_go > 0; i++) { 12432 local_sglist[i].len = MIN(len_to_go, CTL_HA_DATAMOVE_SEGMENT); 12433 local_sglist[i].addr = 12434 malloc(local_sglist[i].len, M_CTL, M_WAITOK); |
12435 |
12436 len_to_go -= local_sglist[i].len; 12437 } 12438 /* 12439 * Reset the number of S/G entries accordingly. The original 12440 * number of S/G entries is available in rem_sg_entries. 12441 */ 12442 io->scsiio.kern_sg_entries = i; |
12443 |
12444#if 0 |
12445 printf("%s: kern_sg_entries = %d\n", __func__, 12446 io->scsiio.kern_sg_entries); 12447 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12448 printf("%s: sg[%d] = %p, %d\n", __func__, i, 12449 local_sglist[i].addr, local_sglist[i].len); |
12450#endif |
12451 |
12452 return (retval); |
12453} 12454 12455static int 12456ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 12457 ctl_ha_dt_cb callback) 12458{ 12459 struct ctl_ha_dt_req *rq; 12460 struct ctl_sg_entry *remote_sglist, *local_sglist; |
12461 uint32_t local_used, remote_used, total_used; |
12462 int i, j, isc_ret; |
12463 |
12464 rq = ctl_dt_req_alloc(); 12465 12466 /* 12467 * If we failed to allocate the request, and if the DMA didn't fail 12468 * anyway, set busy status. This is just a resource allocation 12469 * failure. 12470 */ 12471 if ((rq == NULL) --- 8 unchanged lines hidden (view full) --- 12480 /* 12481 * The data move failed. We need to return status back 12482 * to the other controller. No point in trying to DMA 12483 * data to the remote controller. 12484 */ 12485 12486 ctl_send_datamove_done(io, /*have_lock*/ 0); 12487 |
12488 return (1); |
12489 } 12490 12491 local_sglist = io->io_hdr.local_sglist; |
12492 remote_sglist = io->io_hdr.remote_sglist; |
12493 local_used = 0; 12494 remote_used = 0; 12495 total_used = 0; 12496 |
12497 /* 12498 * Pull/push the data over the wire from/to the other controller. 12499 * This takes into account the possibility that the local and 12500 * remote sglists may not be identical in terms of the size of 12501 * the elements and the number of elements. 12502 * 12503 * One fundamental assumption here is that the length allocated for 12504 * both the local and remote sglists is identical. Otherwise, we've 12505 * essentially got a coding error of some sort. 12506 */ |
12507 isc_ret = CTL_HA_STATUS_SUCCESS; |
12508 for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) { |
12509 uint32_t cur_len; |
12510 uint8_t *tmp_ptr; 12511 |
12512 rq->command = command; 12513 rq->context = io; 12514 12515 /* 12516 * Both pointers should be aligned. But it is possible 12517 * that the allocation length is not. They should both 12518 * also have enough slack left over at the end, though, 12519 * to round up to the next 8 byte boundary. 12520 */ 12521 cur_len = MIN(local_sglist[i].len - local_used, 12522 remote_sglist[j].len - remote_used); |
12523 rq->size = cur_len; |
12524 |
12525 tmp_ptr = (uint8_t *)local_sglist[i].addr; 12526 tmp_ptr += local_used; 12527 |
12528#if 0 |
12529 /* Use physical addresses when talking to ISC hardware */ 12530 if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) { 12531 /* XXX KDM use busdma */ |
12532 rq->local = vtophys(tmp_ptr); |
12533 } else 12534 rq->local = tmp_ptr; |
12535#else 12536 KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, 12537 ("HA does not support BUS_ADDR")); 12538 rq->local = tmp_ptr; 12539#endif |
12540 12541 tmp_ptr = (uint8_t *)remote_sglist[j].addr; 12542 tmp_ptr += remote_used; 12543 rq->remote = tmp_ptr; 12544 12545 rq->callback = NULL; 12546 12547 local_used += cur_len; --- 7 unchanged lines hidden (view full) --- 12555 j++; 12556 remote_used = 0; 12557 } 12558 total_used += cur_len; 12559 12560 if (total_used >= io->scsiio.kern_data_len) 12561 rq->callback = callback; 12562 |
12563#if 0 12564 printf("%s: %s: local %#x remote %#x size %d\n", __func__, 12565 (command == CTL_HA_DT_CMD_WRITE) ? "WRITE" : "READ", 12566 rq->local, rq->remote, rq->size); 12567#endif 12568 12569 isc_ret = ctl_dt_single(rq); |
12570 if (isc_ret > CTL_HA_STATUS_SUCCESS) 12571 break; 12572 } 12573 if (isc_ret != CTL_HA_STATUS_WAIT) { 12574 rq->ret = isc_ret; |
12575 callback(rq); |
12576 } 12577 |
12578 return (0); |
12579} 12580 12581static void 12582ctl_datamove_remote_read(union ctl_io *io) 12583{ 12584 int retval; 12585 int i; 12586 12587 /* 12588 * This will send an error to the other controller in the case of a 12589 * failure. 12590 */ 12591 retval = ctl_datamove_remote_sgl_setup(io); 12592 if (retval != 0) 12593 return; 12594 12595 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ, 12596 ctl_datamove_remote_read_cb); |
12597 if (retval != 0) { |
12598 /* 12599 * Make sure we free memory if there was an error.. The 12600 * ctl_datamove_remote_xfer() function will send the 12601 * datamove done message, or call the callback with an 12602 * error if there is a problem. 12603 */ 12604 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12605 free(io->io_hdr.local_sglist[i].addr, M_CTL); |
12606 free(io->io_hdr.remote_sglist, M_CTL); 12607 io->io_hdr.remote_sglist = NULL; 12608 io->io_hdr.local_sglist = NULL; |
12609 } 12610 12611 return; 12612} 12613 12614/* 12615 * Process a datamove request from the other controller. This is used for 12616 * XFER mode only, not SER_ONLY mode. For writes, we DMA into local memory 12617 * first. Once that is complete, the data gets DMAed into the remote 12618 * controller's memory. For reads, we DMA from the remote controller's 12619 * memory into our memory first, and then move it out to the FETD. 12620 */ 12621static void 12622ctl_datamove_remote(union ctl_io *io) 12623{ |
12624 |
12625 mtx_assert(&control_softc->ctl_lock, MA_NOTOWNED); |
12626 |
12627 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12628 ctl_failover_io(io, /*have_lock*/ 0); 12629 return; 12630 } |
12631 12632 /* 12633 * Note that we look for an aborted I/O here, but don't do some of 12634 * the other checks that ctl_datamove() normally does. 12635 * We don't need to run the datamove delay code, since that should 12636 * have been done if need be on the other controller. 12637 */ 12638 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 12639 printf("%s: tag 0x%04x on (%u:%u:%u) aborted\n", __func__, 12640 io->scsiio.tag_num, io->io_hdr.nexus.initid, 12641 io->io_hdr.nexus.targ_port, 12642 io->io_hdr.nexus.targ_lun); 12643 io->io_hdr.port_status = 31338; 12644 ctl_send_datamove_done(io, /*have_lock*/ 0); 12645 return; 12646 } 12647 |
12648 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) |
12649 ctl_datamove_remote_write(io); |
12650 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) |
12651 ctl_datamove_remote_read(io); |
12652 else { 12653 io->io_hdr.port_status = 31339; 12654 ctl_send_datamove_done(io, /*have_lock*/ 0); |
12655 } |
12656} 12657 12658static int 12659ctl_process_done(union ctl_io *io) 12660{ 12661 struct ctl_lun *lun; 12662 struct ctl_softc *softc = control_softc; 12663 void (*fe_done)(union ctl_io *io); |
12664 union ctl_ha_msg msg; 12665 uint32_t targ_port = io->io_hdr.nexus.targ_port; |
12666 12667 CTL_DEBUG_PRINT(("ctl_process_done\n")); 12668 |
12669 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0) 12670 fe_done = softc->ctl_ports[targ_port]->fe_done; 12671 else 12672 fe_done = NULL; |
12673 12674#ifdef CTL_TIME_IO 12675 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 12676 char str[256]; 12677 char path_str[64]; 12678 struct sbuf sb; 12679 12680 ctl_scsi_path_string(io, path_str, sizeof(path_str)); --- 141 unchanged lines hidden (view full) --- 12822 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS && 12823 (ctl_debug & CTL_DEBUG_INFO) != 0) 12824 ctl_io_error_print(io, NULL); 12825 12826 /* 12827 * Tell the FETD or the other shelf controller we're done with this 12828 * command. Note that only SCSI commands get to this point. Task 12829 * management commands are completed above. |
12830 */ |
12831 if ((softc->ha_mode != CTL_HA_MODE_XFER) && 12832 (io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC)) { 12833 memset(&msg, 0, sizeof(msg)); 12834 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 12835 msg.hdr.serializing_sc = io->io_hdr.serializing_sc; 12836 msg.hdr.nexus = io->io_hdr.nexus; 12837 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 12838 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data), 12839 M_WAITOK); 12840 } |
12841 if ((softc->ha_mode == CTL_HA_MODE_XFER) 12842 && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { |
12843 memset(&msg, 0, sizeof(msg)); 12844 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 12845 msg.hdr.original_sc = io->io_hdr.original_sc; 12846 msg.hdr.nexus = io->io_hdr.nexus; 12847 msg.hdr.status = io->io_hdr.status; 12848 msg.scsi.scsi_status = io->scsiio.scsi_status; 12849 msg.scsi.tag_num = io->scsiio.tag_num; 12850 msg.scsi.tag_type = io->scsiio.tag_type; 12851 msg.scsi.sense_len = io->scsiio.sense_len; 12852 msg.scsi.sense_residual = io->scsiio.sense_residual; 12853 msg.scsi.residual = io->scsiio.residual; 12854 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, |
12855 io->scsiio.sense_len); |
12856 /* 12857 * We copy this whether or not this is an I/O-related 12858 * command. Otherwise, we'd have to go and check to see 12859 * whether it's a read/write command, and it really isn't 12860 * worth it. 12861 */ 12862 memcpy(&msg.scsi.lbalen, 12863 &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, 12864 sizeof(msg.scsi.lbalen)); 12865 |
12866 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 12867 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + 12868 msg.scsi.sense_len, M_WAITOK); |
12869 ctl_free_io(io); 12870 } else 12871 fe_done(io); 12872 12873 return (CTL_RETVAL_COMPLETE); 12874} 12875 12876#ifdef CTL_WITH_CA --- 138 unchanged lines hidden (view full) --- 13015 13016 /* 13017 * This is an internal copy of an I/O, and should not go through 13018 * the normal done processing logic. 13019 */ 13020 if (io->io_hdr.flags & CTL_FLAG_INT_COPY) 13021 return; 13022 |
13023#ifdef CTL_IO_DELAY 13024 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 13025 struct ctl_lun *lun; 13026 13027 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13028 13029 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 13030 } else { --- 14 unchanged lines hidden (view full) --- 13045 return; 13046 } 13047 } 13048#endif /* CTL_IO_DELAY */ 13049 13050 ctl_enqueue_done(io); 13051} 13052 |
13053static void 13054ctl_work_thread(void *arg) 13055{ 13056 struct ctl_thread *thr = (struct ctl_thread *)arg; 13057 struct ctl_softc *softc = thr->ctl_softc; 13058 union ctl_io *io; 13059 int retval; 13060 --- 33 unchanged lines hidden (view full) --- 13094 STAILQ_REMOVE_HEAD(&thr->incoming_queue, links); 13095 mtx_unlock(&thr->queue_lock); 13096 if (io->io_hdr.io_type == CTL_IO_TASK) 13097 ctl_run_task(io); 13098 else 13099 ctl_scsiio_precheck(softc, &io->scsiio); 13100 continue; 13101 } |
13102 io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue); 13103 if (io != NULL) { 13104 STAILQ_REMOVE_HEAD(&thr->rtr_queue, links); 13105 mtx_unlock(&thr->queue_lock); 13106 retval = ctl_scsiio(&io->scsiio); 13107 if (retval != CTL_RETVAL_COMPLETE) 13108 CTL_DEBUG_PRINT(("ctl_scsiio failed\n")); 13109 continue; |
13110 } 13111 13112 /* Sleep until we have something to do. */ 13113 mtx_sleep(thr, &thr->queue_lock, PDROP | PRIBIO, "-", 0); 13114 } 13115} 13116 13117static void --- 26 unchanged lines hidden (view full) --- 13144ctl_thresh_thread(void *arg) 13145{ 13146 struct ctl_softc *softc = (struct ctl_softc *)arg; 13147 struct ctl_lun *lun; 13148 struct ctl_be_lun *be_lun; 13149 struct scsi_da_rw_recovery_page *rwpage; 13150 struct ctl_logical_block_provisioning_page *page; 13151 const char *attr; |
13152 union ctl_ha_msg msg; |
13153 uint64_t thres, val; |
13154 int i, e, set; |
13155 13156 CTL_DEBUG_PRINT(("ctl_thresh_thread starting\n")); 13157 13158 for (;;) { 13159 mtx_lock(&softc->ctl_lock); 13160 STAILQ_FOREACH(lun, &softc->lun_list, links) { 13161 be_lun = lun->be_lun; 13162 if ((lun->flags & CTL_LUN_DISABLED) || 13163 (lun->flags & CTL_LUN_OFFLINE) || 13164 lun->backend->lun_attr == NULL) 13165 continue; |
13166 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 13167 softc->ha_mode == CTL_HA_MODE_XFER) 13168 continue; |
13169 rwpage = &lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT]; 13170 if ((rwpage->byte8 & SMS_RWER_LBPERE) == 0) 13171 continue; 13172 e = 0; 13173 page = &lun->mode_pages.lbp_page[CTL_PAGE_CURRENT]; 13174 for (i = 0; i < CTL_NUM_LBP_THRESH; i++) { 13175 if ((page->descr[i].flags & SLBPPD_ENABLED) == 0) 13176 continue; --- 28 unchanged lines hidden (view full) --- 13205 e |= (val <= thres); 13206 } 13207 mtx_lock(&lun->lun_lock); 13208 if (e) { 13209 if (lun->lasttpt == 0 || 13210 time_uptime - lun->lasttpt >= CTL_LBP_UA_PERIOD) { 13211 lun->lasttpt = time_uptime; 13212 ctl_est_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); |
13213 set = 1; 13214 } else 13215 set = 0; |
13216 } else { 13217 lun->lasttpt = 0; 13218 ctl_clr_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); |
13219 set = -1; |
13220 } 13221 mtx_unlock(&lun->lun_lock); |
13222 if (set != 0 && 13223 lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 13224 /* Send msg to other side. */ 13225 bzero(&msg.ua, sizeof(msg.ua)); 13226 msg.hdr.msg_type = CTL_MSG_UA; 13227 msg.hdr.nexus.initid = -1; 13228 msg.hdr.nexus.targ_port = -1; 13229 msg.hdr.nexus.targ_lun = lun->lun; 13230 msg.hdr.nexus.targ_mapped_lun = lun->lun; 13231 msg.ua.ua_all = 1; 13232 msg.ua.ua_set = (set > 0); 13233 msg.ua.ua_type = CTL_UA_THIN_PROV_THRES; 13234 mtx_unlock(&softc->ctl_lock); // XXX 13235 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 13236 sizeof(msg.ua), M_WAITOK); 13237 mtx_lock(&softc->ctl_lock); 13238 } |
13239 } 13240 mtx_unlock(&softc->ctl_lock); 13241 pause("-", CTL_LBP_PERIOD * hz); 13242 } 13243} 13244 13245static void 13246ctl_enqueue_incoming(union ctl_io *io) --- 32 unchanged lines hidden (view full) --- 13279 13280 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13281 mtx_lock(&thr->queue_lock); 13282 STAILQ_INSERT_TAIL(&thr->done_queue, &io->io_hdr, links); 13283 mtx_unlock(&thr->queue_lock); 13284 wakeup(thr); 13285} 13286 |
13287static void 13288ctl_enqueue_isc(union ctl_io *io) 13289{ 13290 struct ctl_softc *softc = control_softc; 13291 struct ctl_thread *thr; 13292 13293 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13294 mtx_lock(&thr->queue_lock); 13295 STAILQ_INSERT_TAIL(&thr->isc_queue, &io->io_hdr, links); 13296 mtx_unlock(&thr->queue_lock); 13297 wakeup(thr); 13298} 13299 |
13300/* |
13301 * vim: ts=8 13302 */ |