Deleted Added
full compact
mrsas_cam.c (323819) mrsas_cam.c (342716)
1/*
2 * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy
3 * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy
4 * Support: freebsdraid@avagotech.com
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:

--- 17 unchanged lines hidden (view full) ---

26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 */
32
33#include <sys/cdefs.h>
1/*
2 * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy
3 * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy
4 * Support: freebsdraid@avagotech.com
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:

--- 17 unchanged lines hidden (view full) ---

26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: stable/11/sys/dev/mrsas/mrsas_cam.c 323819 2017-09-20 17:49:57Z jkim $");
34__FBSDID("$FreeBSD: stable/11/sys/dev/mrsas/mrsas_cam.c 342716 2019-01-03 07:45:52Z kadesai $");
35
36#include "dev/mrsas/mrsas.h"
37
38#include <cam/cam.h>
39#include <cam/cam_ccb.h>
40#include <cam/cam_sim.h>
41#include <cam/cam_xpt_sim.h>
42#include <cam/cam_debug.h>
43#include <cam/cam_periph.h>
44#include <cam/cam_xpt_periph.h>
45
46#include <cam/scsi/scsi_all.h>
47#include <cam/scsi/scsi_message.h>
48#include <sys/taskqueue.h>
49#include <sys/kernel.h>
50
35
36#include "dev/mrsas/mrsas.h"
37
38#include <cam/cam.h>
39#include <cam/cam_ccb.h>
40#include <cam/cam_sim.h>
41#include <cam/cam_xpt_sim.h>
42#include <cam/cam_debug.h>
43#include <cam/cam_periph.h>
44#include <cam/cam_xpt_periph.h>
45
46#include <cam/scsi/scsi_all.h>
47#include <cam/scsi/scsi_message.h>
48#include <sys/taskqueue.h>
49#include <sys/kernel.h>
50
51
52#include <sys/time.h> /* XXX for pcpu.h */
53#include <sys/pcpu.h> /* XXX for PCPU_GET */
54
55#define smp_processor_id() PCPU_GET(cpuid)
56
57/*
58 * Function prototypes
59 */

--- 41 unchanged lines hidden (view full) ---

101static int mrsas_issue_tm(struct mrsas_softc *sc,
102 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc);
103static void
104mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs,
105 int nseg, int error);
106static int32_t
107mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim,
108 union ccb *ccb);
51#include <sys/time.h> /* XXX for pcpu.h */
52#include <sys/pcpu.h> /* XXX for PCPU_GET */
53
54#define smp_processor_id() PCPU_GET(cpuid)
55
56/*
57 * Function prototypes
58 */

--- 41 unchanged lines hidden (view full) ---

100static int mrsas_issue_tm(struct mrsas_softc *sc,
101 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc);
102static void
103mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs,
104 int nseg, int error);
105static int32_t
106mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim,
107 union ccb *ccb);
108
109static boolean_t mrsas_is_prp_possible(struct mrsas_mpt_cmd *cmd,
110 bus_dma_segment_t *segs, int nsegs);
111static void mrsas_build_ieee_sgl(struct mrsas_mpt_cmd *cmd,
112 bus_dma_segment_t *segs, int nseg);
113static void mrsas_build_prp_nvme(struct mrsas_mpt_cmd *cmd,
114 bus_dma_segment_t *segs, int nseg);
115
109struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc);
110MRSAS_REQUEST_DESCRIPTOR_UNION *
111 mrsas_get_request_desc(struct mrsas_softc *sc, u_int16_t index);
112
116struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc);
117MRSAS_REQUEST_DESCRIPTOR_UNION *
118 mrsas_get_request_desc(struct mrsas_softc *sc, u_int16_t index);
119
113extern void
114mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status,
115 u_int8_t extStatus);
116extern int mrsas_reset_targets(struct mrsas_softc *sc);
117extern u_int16_t MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map);
118extern u_int32_t
119MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map,
120 struct mrsas_softc *sc);
121extern void mrsas_isr(void *arg);
122extern void mrsas_aen_handler(struct mrsas_softc *sc);
123extern u_int8_t
124MR_BuildRaidContext(struct mrsas_softc *sc,
125 struct IO_REQUEST_INFO *io_info, RAID_CONTEXT * pRAID_Context,
126 MR_DRV_RAID_MAP_ALL * map);
127extern u_int16_t
128MR_LdSpanArrayGet(u_int32_t ld, u_int32_t span,
129 MR_DRV_RAID_MAP_ALL * map);
130extern u_int16_t
131mrsas_get_updated_dev_handle(struct mrsas_softc *sc,
132 PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *io_info);
120extern int mrsas_reset_targets(struct mrsas_softc *sc);
121extern u_int16_t MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map);
122extern u_int32_t
123MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map,
124 struct mrsas_softc *sc);
125extern void mrsas_isr(void *arg);
126extern void mrsas_aen_handler(struct mrsas_softc *sc);
127extern u_int8_t
128MR_BuildRaidContext(struct mrsas_softc *sc,
129 struct IO_REQUEST_INFO *io_info, RAID_CONTEXT * pRAID_Context,
130 MR_DRV_RAID_MAP_ALL * map);
131extern u_int16_t
132MR_LdSpanArrayGet(u_int32_t ld, u_int32_t span,
133 MR_DRV_RAID_MAP_ALL * map);
134extern u_int16_t
135mrsas_get_updated_dev_handle(struct mrsas_softc *sc,
136 PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *io_info);
133extern u_int8_t
134megasas_get_best_arm(PLD_LOAD_BALANCE_INFO lbInfo, u_int8_t arm,
135 u_int64_t block, u_int32_t count);
136extern int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex);
137extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
138extern void mrsas_disable_intr(struct mrsas_softc *sc);
139extern void mrsas_enable_intr(struct mrsas_softc *sc);
137extern int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex);
138extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
139extern void mrsas_disable_intr(struct mrsas_softc *sc);
140extern void mrsas_enable_intr(struct mrsas_softc *sc);
141void mrsas_prepare_secondRaid1_IO(struct mrsas_softc *sc,
142 struct mrsas_mpt_cmd *cmd);
140
143
141
142/*
143 * mrsas_cam_attach: Main entry to CAM subsystem
144 * input: Adapter instance soft state
145 *
146 * This function is called from mrsas_attach() during initialization to perform
147 * SIM allocations and XPT bus registration. If the kernel version is 7.4 or
148 * earlier, it would also initiate a bus scan.
149 */
150int
151mrsas_cam_attach(struct mrsas_softc *sc)
152{
153 struct cam_devq *devq;
154 int mrsas_cam_depth;
155
144/*
145 * mrsas_cam_attach: Main entry to CAM subsystem
146 * input: Adapter instance soft state
147 *
148 * This function is called from mrsas_attach() during initialization to perform
149 * SIM allocations and XPT bus registration. If the kernel version is 7.4 or
150 * earlier, it would also initiate a bus scan.
151 */
152int
153mrsas_cam_attach(struct mrsas_softc *sc)
154{
155 struct cam_devq *devq;
156 int mrsas_cam_depth;
157
156 mrsas_cam_depth = sc->max_fw_cmds - MRSAS_INTERNAL_CMDS;
158 mrsas_cam_depth = sc->max_scsi_cmds;
157
158 if ((devq = cam_simq_alloc(mrsas_cam_depth)) == NULL) {
159 device_printf(sc->mrsas_dev, "Cannot allocate SIM queue\n");
160 return (ENOMEM);
161 }
162 /*
163 * Create SIM for bus 0 and register, also create path
164 */

--- 283 unchanged lines hidden (view full) ---

448 * copies the IO and depending if the IO is read/write or inquiry, it would
449 * call mrsas_build_ldio() or mrsas_build_dcdb(), respectively. It returns 0
450 * if the command is sent to firmware successfully, otherwise it returns 1.
451 */
452static int32_t
453mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim,
454 union ccb *ccb)
455{
159
160 if ((devq = cam_simq_alloc(mrsas_cam_depth)) == NULL) {
161 device_printf(sc->mrsas_dev, "Cannot allocate SIM queue\n");
162 return (ENOMEM);
163 }
164 /*
165 * Create SIM for bus 0 and register, also create path
166 */

--- 283 unchanged lines hidden (view full) ---

450 * copies the IO and depending if the IO is read/write or inquiry, it would
451 * call mrsas_build_ldio() or mrsas_build_dcdb(), respectively. It returns 0
452 * if the command is sent to firmware successfully, otherwise it returns 1.
453 */
454static int32_t
455mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim,
456 union ccb *ccb)
457{
456 struct mrsas_mpt_cmd *cmd;
458 struct mrsas_mpt_cmd *cmd, *r1_cmd = NULL;
457 struct ccb_hdr *ccb_h = &(ccb->ccb_h);
458 struct ccb_scsiio *csio = &(ccb->csio);
459 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
460 u_int8_t cmd_type;
461
462 if ((csio->cdb_io.cdb_bytes[0]) == SYNCHRONIZE_CACHE &&
463 (!sc->fw_sync_cache_support)) {
464 ccb->ccb_h.status = CAM_REQ_CMP;
465 xpt_done(ccb);
466 return (0);
467 }
468 ccb_h->status |= CAM_SIM_QUEUED;
469 cmd = mrsas_get_mpt_cmd(sc);
470
471 if (!cmd) {
472 ccb_h->status |= CAM_REQUEUE_REQ;
473 xpt_done(ccb);
474 return (0);
475 }
459 struct ccb_hdr *ccb_h = &(ccb->ccb_h);
460 struct ccb_scsiio *csio = &(ccb->csio);
461 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
462 u_int8_t cmd_type;
463
464 if ((csio->cdb_io.cdb_bytes[0]) == SYNCHRONIZE_CACHE &&
465 (!sc->fw_sync_cache_support)) {
466 ccb->ccb_h.status = CAM_REQ_CMP;
467 xpt_done(ccb);
468 return (0);
469 }
470 ccb_h->status |= CAM_SIM_QUEUED;
471 cmd = mrsas_get_mpt_cmd(sc);
472
473 if (!cmd) {
474 ccb_h->status |= CAM_REQUEUE_REQ;
475 xpt_done(ccb);
476 return (0);
477 }
478
476 if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
477 if (ccb_h->flags & CAM_DIR_IN)
478 cmd->flags |= MRSAS_DIR_IN;
479 if (ccb_h->flags & CAM_DIR_OUT)
480 cmd->flags |= MRSAS_DIR_OUT;
481 } else
482 cmd->flags = MRSAS_DIR_NONE; /* no data */
483

--- 84 unchanged lines hidden (view full) ---

568 /* Check for IO type READ-WRITE targeted for Logical Volume */
569 cmd_type = mrsas_find_io_type(sim, ccb);
570 switch (cmd_type) {
571 case READ_WRITE_LDIO:
572 /* Build READ-WRITE IO for Logical Volume */
573 if (mrsas_build_ldio_rw(sc, cmd, ccb)) {
574 device_printf(sc->mrsas_dev, "Build RW LDIO failed.\n");
575 mtx_unlock(&sc->raidmap_lock);
479 if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
480 if (ccb_h->flags & CAM_DIR_IN)
481 cmd->flags |= MRSAS_DIR_IN;
482 if (ccb_h->flags & CAM_DIR_OUT)
483 cmd->flags |= MRSAS_DIR_OUT;
484 } else
485 cmd->flags = MRSAS_DIR_NONE; /* no data */
486

--- 84 unchanged lines hidden (view full) ---

571 /* Check for IO type READ-WRITE targeted for Logical Volume */
572 cmd_type = mrsas_find_io_type(sim, ccb);
573 switch (cmd_type) {
574 case READ_WRITE_LDIO:
575 /* Build READ-WRITE IO for Logical Volume */
576 if (mrsas_build_ldio_rw(sc, cmd, ccb)) {
577 device_printf(sc->mrsas_dev, "Build RW LDIO failed.\n");
578 mtx_unlock(&sc->raidmap_lock);
579 mrsas_release_mpt_cmd(cmd);
576 return (1);
577 }
578 break;
579 case NON_READ_WRITE_LDIO:
580 /* Build NON READ-WRITE IO for Logical Volume */
581 if (mrsas_build_ldio_nonrw(sc, cmd, ccb)) {
582 device_printf(sc->mrsas_dev, "Build NON-RW LDIO failed.\n");
583 mtx_unlock(&sc->raidmap_lock);
580 return (1);
581 }
582 break;
583 case NON_READ_WRITE_LDIO:
584 /* Build NON READ-WRITE IO for Logical Volume */
585 if (mrsas_build_ldio_nonrw(sc, cmd, ccb)) {
586 device_printf(sc->mrsas_dev, "Build NON-RW LDIO failed.\n");
587 mtx_unlock(&sc->raidmap_lock);
588 mrsas_release_mpt_cmd(cmd);
584 return (1);
585 }
586 break;
587 case READ_WRITE_SYSPDIO:
588 case NON_READ_WRITE_SYSPDIO:
589 if (sc->secure_jbod_support &&
590 (cmd_type == NON_READ_WRITE_SYSPDIO)) {
591 /* Build NON-RW IO for JBOD */
592 if (mrsas_build_syspdio(sc, cmd, ccb, sim, 0)) {
593 device_printf(sc->mrsas_dev,
594 "Build SYSPDIO failed.\n");
595 mtx_unlock(&sc->raidmap_lock);
589 return (1);
590 }
591 break;
592 case READ_WRITE_SYSPDIO:
593 case NON_READ_WRITE_SYSPDIO:
594 if (sc->secure_jbod_support &&
595 (cmd_type == NON_READ_WRITE_SYSPDIO)) {
596 /* Build NON-RW IO for JBOD */
597 if (mrsas_build_syspdio(sc, cmd, ccb, sim, 0)) {
598 device_printf(sc->mrsas_dev,
599 "Build SYSPDIO failed.\n");
600 mtx_unlock(&sc->raidmap_lock);
601 mrsas_release_mpt_cmd(cmd);
596 return (1);
597 }
598 } else {
599 /* Build RW IO for JBOD */
600 if (mrsas_build_syspdio(sc, cmd, ccb, sim, 1)) {
601 device_printf(sc->mrsas_dev,
602 "Build SYSPDIO failed.\n");
603 mtx_unlock(&sc->raidmap_lock);
602 return (1);
603 }
604 } else {
605 /* Build RW IO for JBOD */
606 if (mrsas_build_syspdio(sc, cmd, ccb, sim, 1)) {
607 device_printf(sc->mrsas_dev,
608 "Build SYSPDIO failed.\n");
609 mtx_unlock(&sc->raidmap_lock);
610 mrsas_release_mpt_cmd(cmd);
604 return (1);
605 }
606 }
607 }
608 mtx_unlock(&sc->raidmap_lock);
609
610 if (cmd->flags == MRSAS_DIR_IN) /* from device */
611 cmd->io_request->Control |= MPI2_SCSIIO_CONTROL_READ;

--- 6 unchanged lines hidden (view full) ---

618 cmd->io_request->SenseBufferLength = MRSAS_SCSI_SENSE_BUFFERSIZE;
619
620 req_desc = cmd->request_desc;
621 req_desc->SCSIIO.SMID = cmd->index;
622
623 /*
624 * Start timer for IO timeout. Default timeout value is 90 second.
625 */
611 return (1);
612 }
613 }
614 }
615 mtx_unlock(&sc->raidmap_lock);
616
617 if (cmd->flags == MRSAS_DIR_IN) /* from device */
618 cmd->io_request->Control |= MPI2_SCSIIO_CONTROL_READ;

--- 6 unchanged lines hidden (view full) ---

625 cmd->io_request->SenseBufferLength = MRSAS_SCSI_SENSE_BUFFERSIZE;
626
627 req_desc = cmd->request_desc;
628 req_desc->SCSIIO.SMID = cmd->index;
629
630 /*
631 * Start timer for IO timeout. Default timeout value is 90 second.
632 */
633 cmd->callout_owner = true;
626#if (__FreeBSD_version >= 1000510)
627 callout_reset_sbt(&cmd->cm_callout, SBT_1S * 180, 0,
628 mrsas_scsiio_timeout, cmd, 0);
629#else
630 callout_reset(&cmd->cm_callout, (180000 * hz) / 1000,
631 mrsas_scsiio_timeout, cmd);
632#endif
634#if (__FreeBSD_version >= 1000510)
635 callout_reset_sbt(&cmd->cm_callout, SBT_1S * 180, 0,
636 mrsas_scsiio_timeout, cmd, 0);
637#else
638 callout_reset(&cmd->cm_callout, (180000 * hz) / 1000,
639 mrsas_scsiio_timeout, cmd);
640#endif
633 mrsas_atomic_inc(&sc->fw_outstanding);
634
641
635 if (mrsas_atomic_read(&sc->fw_outstanding) > sc->io_cmds_highwater)
642 if (mrsas_atomic_inc_return(&sc->fw_outstanding) > sc->io_cmds_highwater)
636 sc->io_cmds_highwater++;
637
643 sc->io_cmds_highwater++;
644
638 mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high);
645 /*
646 * if it is raid 1/10 fp write capable.
647 * try to get second command from pool and construct it.
648 * From FW, it has confirmed that lba values of two PDs corresponds to
649 * single R1/10 LD are always same
650 *
651 */
652 /*
653 * driver side count always should be less than max_fw_cmds to get
654 * new command
655 */
656 if (cmd->r1_alt_dev_handle != MR_DEVHANDLE_INVALID) {
657 mrsas_atomic_inc(&sc->fw_outstanding);
658 mrsas_prepare_secondRaid1_IO(sc, cmd);
659 mrsas_fire_cmd(sc, req_desc->addr.u.low,
660 req_desc->addr.u.high);
661 r1_cmd = cmd->peer_cmd;
662 mrsas_fire_cmd(sc, r1_cmd->request_desc->addr.u.low,
663 r1_cmd->request_desc->addr.u.high);
664 } else {
665 mrsas_fire_cmd(sc, req_desc->addr.u.low,
666 req_desc->addr.u.high);
667 }
668
639 return (0);
640
641done:
642 xpt_done(ccb);
643 return (0);
644}
645
646/*

--- 47 unchanged lines hidden (view full) ---

694
695 memset((uint8_t *)cmd->io_request, 0, MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE);
696 cmd->data = NULL;
697 cmd->length = 0;
698 cmd->flags = 0;
699 cmd->error_code = 0;
700 cmd->load_balance = 0;
701 cmd->ccb_ptr = NULL;
669 return (0);
670
671done:
672 xpt_done(ccb);
673 return (0);
674}
675
676/*

--- 47 unchanged lines hidden (view full) ---

724
725 memset((uint8_t *)cmd->io_request, 0, MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE);
726 cmd->data = NULL;
727 cmd->length = 0;
728 cmd->flags = 0;
729 cmd->error_code = 0;
730 cmd->load_balance = 0;
731 cmd->ccb_ptr = NULL;
702
703out:
704 mtx_unlock(&sc->mpt_cmd_pool_lock);
705 return cmd;
706}
707
708/*
709 * mrsas_release_mpt_cmd: Return a cmd to free command pool
710 * input: Command packet for return to free command pool
711 *
712 * This function returns an MPT command to the free command list.
713 */
714void
715mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd)
716{
717 struct mrsas_softc *sc = cmd->sc;
718
719 mtx_lock(&sc->mpt_cmd_pool_lock);
732out:
733 mtx_unlock(&sc->mpt_cmd_pool_lock);
734 return cmd;
735}
736
737/*
738 * mrsas_release_mpt_cmd: Return a cmd to free command pool
739 * input: Command packet for return to free command pool
740 *
741 * This function returns an MPT command to the free command list.
742 */
743void
744mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd)
745{
746 struct mrsas_softc *sc = cmd->sc;
747
748 mtx_lock(&sc->mpt_cmd_pool_lock);
749 cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
720 cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
750 cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
751 cmd->peer_cmd = NULL;
752 cmd->cmd_completed = 0;
753 memset((uint8_t *)cmd->io_request, 0,
754 sizeof(MRSAS_RAID_SCSI_IO_REQUEST));
721 TAILQ_INSERT_HEAD(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
722 mtx_unlock(&sc->mpt_cmd_pool_lock);
723
724 return;
725}
726
727/*
728 * mrsas_get_request_desc: Get request descriptor from array
729 * input: Adapter instance soft state
730 * SMID index
731 *
732 * This function returns a pointer to the request descriptor.
733 */
734MRSAS_REQUEST_DESCRIPTOR_UNION *
735mrsas_get_request_desc(struct mrsas_softc *sc, u_int16_t index)
736{
737 u_int8_t *p;
738
755 TAILQ_INSERT_HEAD(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
756 mtx_unlock(&sc->mpt_cmd_pool_lock);
757
758 return;
759}
760
761/*
762 * mrsas_get_request_desc: Get request descriptor from array
763 * input: Adapter instance soft state
764 * SMID index
765 *
766 * This function returns a pointer to the request descriptor.
767 */
768MRSAS_REQUEST_DESCRIPTOR_UNION *
769mrsas_get_request_desc(struct mrsas_softc *sc, u_int16_t index)
770{
771 u_int8_t *p;
772
739 if (index >= sc->max_fw_cmds) {
740 device_printf(sc->mrsas_dev, "Invalid SMID (0x%x)request for desc\n", index);
741 return NULL;
742 }
773 KASSERT(index < sc->max_fw_cmds, ("req_desc is out of range"));
743 p = sc->req_desc + sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * index;
744
745 return (MRSAS_REQUEST_DESCRIPTOR_UNION *) p;
746}
747
774 p = sc->req_desc + sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * index;
775
776 return (MRSAS_REQUEST_DESCRIPTOR_UNION *) p;
777}
778
779
780
781
782/* mrsas_prepare_secondRaid1_IO
783 * It prepares the raid 1 second IO
784 */
785void
786mrsas_prepare_secondRaid1_IO(struct mrsas_softc *sc,
787 struct mrsas_mpt_cmd *cmd)
788{
789 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc, *req_desc2 = NULL;
790 struct mrsas_mpt_cmd *r1_cmd;
791
792 r1_cmd = cmd->peer_cmd;
793 req_desc = cmd->request_desc;
794
795 /*
796 * copy the io request frame as well as 8 SGEs data for r1
797 * command
798 */
799 memcpy(r1_cmd->io_request, cmd->io_request,
800 (sizeof(MRSAS_RAID_SCSI_IO_REQUEST)));
801 memcpy(&r1_cmd->io_request->SGL, &cmd->io_request->SGL,
802 (sc->max_sge_in_main_msg * sizeof(MPI2_SGE_IO_UNION)));
803
804 /* sense buffer is different for r1 command */
805 r1_cmd->io_request->SenseBufferLowAddress = r1_cmd->sense_phys_addr;
806 r1_cmd->ccb_ptr = cmd->ccb_ptr;
807
808 req_desc2 = mrsas_get_request_desc(sc, r1_cmd->index - 1);
809 req_desc2->addr.Words = 0;
810 r1_cmd->request_desc = req_desc2;
811 req_desc2->SCSIIO.SMID = r1_cmd->index;
812 req_desc2->SCSIIO.RequestFlags = req_desc->SCSIIO.RequestFlags;
813 r1_cmd->request_desc->SCSIIO.DevHandle = cmd->r1_alt_dev_handle;
814 r1_cmd->r1_alt_dev_handle = cmd->io_request->DevHandle;
815 r1_cmd->io_request->DevHandle = cmd->r1_alt_dev_handle;
816 cmd->io_request->RaidContext.raid_context_g35.smid.peerSMID =
817 r1_cmd->index;
818 r1_cmd->io_request->RaidContext.raid_context_g35.smid.peerSMID =
819 cmd->index;
820 /*
821 * MSIxIndex of both commands request descriptors
822 * should be same
823 */
824 r1_cmd->request_desc->SCSIIO.MSIxIndex = cmd->request_desc->SCSIIO.MSIxIndex;
825 /* span arm is different for r1 cmd */
826 r1_cmd->io_request->RaidContext.raid_context_g35.spanArm =
827 cmd->io_request->RaidContext.raid_context_g35.spanArm + 1;
828
829}
830
831
748/*
749 * mrsas_build_ldio_rw: Builds an LDIO command
750 * input: Adapter instance soft state
751 * Pointer to command packet
752 * Pointer to CCB
753 *
754 * This function builds the LDIO command packet. It returns 0 if the command is
755 * built successfully, otherwise it returns a 1.

--- 5 unchanged lines hidden (view full) ---

761 struct ccb_hdr *ccb_h = &(ccb->ccb_h);
762 struct ccb_scsiio *csio = &(ccb->csio);
763 u_int32_t device_id;
764 MRSAS_RAID_SCSI_IO_REQUEST *io_request;
765
766 device_id = ccb_h->target_id;
767
768 io_request = cmd->io_request;
832/*
833 * mrsas_build_ldio_rw: Builds an LDIO command
834 * input: Adapter instance soft state
835 * Pointer to command packet
836 * Pointer to CCB
837 *
838 * This function builds the LDIO command packet. It returns 0 if the command is
839 * built successfully, otherwise it returns a 1.

--- 5 unchanged lines hidden (view full) ---

845 struct ccb_hdr *ccb_h = &(ccb->ccb_h);
846 struct ccb_scsiio *csio = &(ccb->csio);
847 u_int32_t device_id;
848 MRSAS_RAID_SCSI_IO_REQUEST *io_request;
849
850 device_id = ccb_h->target_id;
851
852 io_request = cmd->io_request;
769 io_request->RaidContext.VirtualDiskTgtId = device_id;
770 io_request->RaidContext.status = 0;
771 io_request->RaidContext.exStatus = 0;
853 io_request->RaidContext.raid_context.VirtualDiskTgtId = device_id;
854 io_request->RaidContext.raid_context.status = 0;
855 io_request->RaidContext.raid_context.exStatus = 0;
772
773 /* just the cdb len, other flags zero, and ORed-in later for FP */
774 io_request->IoFlags = csio->cdb_len;
775
776 if (mrsas_setup_io(sc, cmd, ccb, device_id, io_request) != SUCCESS)
777 device_printf(sc->mrsas_dev, "Build ldio or fpio error\n");
778
779 io_request->DataLength = cmd->length;
780
781 if (mrsas_map_request(sc, cmd, ccb) == SUCCESS) {
782 if (cmd->sge_count > sc->max_num_sge) {
783 device_printf(sc->mrsas_dev, "Error: sge_count (0x%x) exceeds"
784 "max (0x%x) allowed\n", cmd->sge_count, sc->max_num_sge);
785 return (FAIL);
786 }
856
857 /* just the cdb len, other flags zero, and ORed-in later for FP */
858 io_request->IoFlags = csio->cdb_len;
859
860 if (mrsas_setup_io(sc, cmd, ccb, device_id, io_request) != SUCCESS)
861 device_printf(sc->mrsas_dev, "Build ldio or fpio error\n");
862
863 io_request->DataLength = cmd->length;
864
865 if (mrsas_map_request(sc, cmd, ccb) == SUCCESS) {
866 if (cmd->sge_count > sc->max_num_sge) {
867 device_printf(sc->mrsas_dev, "Error: sge_count (0x%x) exceeds"
868 "max (0x%x) allowed\n", cmd->sge_count, sc->max_num_sge);
869 return (FAIL);
870 }
787 /*
788 * numSGE store lower 8 bit of sge_count. numSGEExt store
789 * higher 8 bit of sge_count
790 */
791 io_request->RaidContext.numSGE = cmd->sge_count;
792 io_request->RaidContext.numSGEExt = (uint8_t)(cmd->sge_count >> 8);
871 if (sc->is_ventura)
872 io_request->RaidContext.raid_context_g35.numSGE = cmd->sge_count;
873 else {
874 /*
875 * numSGE store lower 8 bit of sge_count. numSGEExt store
876 * higher 8 bit of sge_count
877 */
878 io_request->RaidContext.raid_context.numSGE = cmd->sge_count;
879 io_request->RaidContext.raid_context.numSGEExt = (uint8_t)(cmd->sge_count >> 8);
880 }
793
794 } else {
795 device_printf(sc->mrsas_dev, "Data map/load failed.\n");
796 return (FAIL);
797 }
798 return (0);
799}
800
881
882 } else {
883 device_printf(sc->mrsas_dev, "Data map/load failed.\n");
884 return (FAIL);
885 }
886 return (0);
887}
888
889/* stream detection on read and and write IOs */
890static void
891mrsas_stream_detect(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
892 struct IO_REQUEST_INFO *io_info)
893{
894 u_int32_t device_id = io_info->ldTgtId;
895 LD_STREAM_DETECT *current_ld_SD = sc->streamDetectByLD[device_id];
896 u_int32_t *track_stream = &current_ld_SD->mruBitMap;
897 u_int32_t streamNum, shiftedValues, unshiftedValues;
898 u_int32_t indexValueMask, shiftedValuesMask;
899 int i;
900 boolean_t isReadAhead = false;
901 STREAM_DETECT *current_SD;
902
903 /* find possible stream */
904 for (i = 0; i < MAX_STREAMS_TRACKED; ++i) {
905 streamNum = (*track_stream >> (i * BITS_PER_INDEX_STREAM)) &
906 STREAM_MASK;
907 current_SD = &current_ld_SD->streamTrack[streamNum];
908 /*
909 * if we found a stream, update the raid context and
910 * also update the mruBitMap
911 */
912 if (current_SD->nextSeqLBA &&
913 io_info->ldStartBlock >= current_SD->nextSeqLBA &&
914 (io_info->ldStartBlock <= (current_SD->nextSeqLBA+32)) &&
915 (current_SD->isRead == io_info->isRead)) {
916 if (io_info->ldStartBlock != current_SD->nextSeqLBA &&
917 (!io_info->isRead || !isReadAhead)) {
918 /*
919 * Once the API availible we need to change this.
920 * At this point we are not allowing any gap
921 */
922 continue;
923 }
924 cmd->io_request->RaidContext.raid_context_g35.streamDetected = TRUE;
925 current_SD->nextSeqLBA = io_info->ldStartBlock + io_info->numBlocks;
926 /*
927 * update the mruBitMap LRU
928 */
929 shiftedValuesMask = (1 << i * BITS_PER_INDEX_STREAM) - 1 ;
930 shiftedValues = ((*track_stream & shiftedValuesMask) <<
931 BITS_PER_INDEX_STREAM);
932 indexValueMask = STREAM_MASK << i * BITS_PER_INDEX_STREAM;
933 unshiftedValues = (*track_stream) &
934 (~(shiftedValuesMask | indexValueMask));
935 *track_stream =
936 (unshiftedValues | shiftedValues | streamNum);
937 return;
938 }
939 }
940 /*
941 * if we did not find any stream, create a new one from the least recently used
942 */
943 streamNum = (*track_stream >>
944 ((MAX_STREAMS_TRACKED - 1) * BITS_PER_INDEX_STREAM)) & STREAM_MASK;
945 current_SD = &current_ld_SD->streamTrack[streamNum];
946 current_SD->isRead = io_info->isRead;
947 current_SD->nextSeqLBA = io_info->ldStartBlock + io_info->numBlocks;
948 *track_stream = (((*track_stream & ZERO_LAST_STREAM) << 4) | streamNum);
949 return;
950}
951
952
801/*
802 * mrsas_setup_io: Set up data including Fast Path I/O
803 * input: Adapter instance soft state
804 * Pointer to command packet
805 * Pointer to CCB
806 *
807 * This function builds the DCDB inquiry command. It returns 0 if the command
808 * is built successfully, otherwise it returns a 1.
809 */
810int
811mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
812 union ccb *ccb, u_int32_t device_id,
813 MRSAS_RAID_SCSI_IO_REQUEST * io_request)
814{
815 struct ccb_hdr *ccb_h = &(ccb->ccb_h);
816 struct ccb_scsiio *csio = &(ccb->csio);
817 struct IO_REQUEST_INFO io_info;
818 MR_DRV_RAID_MAP_ALL *map_ptr;
953/*
954 * mrsas_setup_io: Set up data including Fast Path I/O
955 * input: Adapter instance soft state
956 * Pointer to command packet
957 * Pointer to CCB
958 *
959 * This function builds the DCDB inquiry command. It returns 0 if the command
960 * is built successfully, otherwise it returns a 1.
961 */
962int
963mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
964 union ccb *ccb, u_int32_t device_id,
965 MRSAS_RAID_SCSI_IO_REQUEST * io_request)
966{
967 struct ccb_hdr *ccb_h = &(ccb->ccb_h);
968 struct ccb_scsiio *csio = &(ccb->csio);
969 struct IO_REQUEST_INFO io_info;
970 MR_DRV_RAID_MAP_ALL *map_ptr;
971 struct mrsas_mpt_cmd *r1_cmd = NULL;
972
819 MR_LD_RAID *raid;
820 u_int8_t fp_possible;
821 u_int32_t start_lba_hi, start_lba_lo, ld_block_size, ld;
822 u_int32_t datalength = 0;
823
973 MR_LD_RAID *raid;
974 u_int8_t fp_possible;
975 u_int32_t start_lba_hi, start_lba_lo, ld_block_size, ld;
976 u_int32_t datalength = 0;
977
978 io_request->RaidContext.raid_context.VirtualDiskTgtId = device_id;
979
824 start_lba_lo = 0;
825 start_lba_hi = 0;
826 fp_possible = 0;
827
828 /*
829 * READ_6 (0x08) or WRITE_6 (0x0A) cdb
830 */
831 if (csio->cdb_len == 6) {

--- 43 unchanged lines hidden (view full) ---

875 ((u_int32_t)csio->cdb_io.cdb_bytes[3] << 16) |
876 (u_int32_t)csio->cdb_io.cdb_bytes[4] << 8 |
877 ((u_int32_t)csio->cdb_io.cdb_bytes[5]);
878 }
879 memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO));
880 io_info.ldStartBlock = ((u_int64_t)start_lba_hi << 32) | start_lba_lo;
881 io_info.numBlocks = datalength;
882 io_info.ldTgtId = device_id;
980 start_lba_lo = 0;
981 start_lba_hi = 0;
982 fp_possible = 0;
983
984 /*
985 * READ_6 (0x08) or WRITE_6 (0x0A) cdb
986 */
987 if (csio->cdb_len == 6) {

--- 43 unchanged lines hidden (view full) ---

1031 ((u_int32_t)csio->cdb_io.cdb_bytes[3] << 16) |
1032 (u_int32_t)csio->cdb_io.cdb_bytes[4] << 8 |
1033 ((u_int32_t)csio->cdb_io.cdb_bytes[5]);
1034 }
1035 memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO));
1036 io_info.ldStartBlock = ((u_int64_t)start_lba_hi << 32) | start_lba_lo;
1037 io_info.numBlocks = datalength;
1038 io_info.ldTgtId = device_id;
1039 io_info.r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
883
1040
1041 io_request->DataLength = cmd->length;
1042
884 switch (ccb_h->flags & CAM_DIR_MASK) {
885 case CAM_DIR_IN:
886 io_info.isRead = 1;
887 break;
888 case CAM_DIR_OUT:
889 io_info.isRead = 0;
890 break;
891 case CAM_DIR_NONE:
892 default:
893 mrsas_dprint(sc, MRSAS_TRACE, "From %s : DMA Flag is %d \n", __func__, ccb_h->flags & CAM_DIR_MASK);
894 break;
895 }
896
897 map_ptr = sc->ld_drv_map[(sc->map_id & 1)];
898 ld_block_size = MR_LdBlockSizeGet(device_id, map_ptr, sc);
899
900 ld = MR_TargetIdToLdGet(device_id, map_ptr);
901 if ((ld >= MAX_LOGICAL_DRIVES_EXT) || (!sc->fast_path_io)) {
1043 switch (ccb_h->flags & CAM_DIR_MASK) {
1044 case CAM_DIR_IN:
1045 io_info.isRead = 1;
1046 break;
1047 case CAM_DIR_OUT:
1048 io_info.isRead = 0;
1049 break;
1050 case CAM_DIR_NONE:
1051 default:
1052 mrsas_dprint(sc, MRSAS_TRACE, "From %s : DMA Flag is %d \n", __func__, ccb_h->flags & CAM_DIR_MASK);
1053 break;
1054 }
1055
1056 map_ptr = sc->ld_drv_map[(sc->map_id & 1)];
1057 ld_block_size = MR_LdBlockSizeGet(device_id, map_ptr, sc);
1058
1059 ld = MR_TargetIdToLdGet(device_id, map_ptr);
1060 if ((ld >= MAX_LOGICAL_DRIVES_EXT) || (!sc->fast_path_io)) {
902 io_request->RaidContext.regLockFlags = 0;
1061 io_request->RaidContext.raid_context.regLockFlags = 0;
903 fp_possible = 0;
904 } else {
1062 fp_possible = 0;
1063 } else {
905 if (MR_BuildRaidContext(sc, &io_info, &io_request->RaidContext, map_ptr))
1064 if (MR_BuildRaidContext(sc, &io_info, &io_request->RaidContext.raid_context, map_ptr))
906 fp_possible = io_info.fpOkForIo;
907 }
908
909 raid = MR_LdRaidGet(ld, map_ptr);
910 /* Store the TM capability value in cmd */
911 cmd->tmCapable = raid->capability.tmCapable;
912
913 cmd->request_desc->SCSIIO.MSIxIndex =
914 sc->msix_vectors ? smp_processor_id() % sc->msix_vectors : 0;
915
1065 fp_possible = io_info.fpOkForIo;
1066 }
1067
1068 raid = MR_LdRaidGet(ld, map_ptr);
1069 /* Store the TM capability value in cmd */
1070 cmd->tmCapable = raid->capability.tmCapable;
1071
1072 cmd->request_desc->SCSIIO.MSIxIndex =
1073 sc->msix_vectors ? smp_processor_id() % sc->msix_vectors : 0;
1074
1075 if (sc->is_ventura) {
1076 if (sc->streamDetectByLD) {
1077 mtx_lock(&sc->stream_lock);
1078 mrsas_stream_detect(sc, cmd, &io_info);
1079 mtx_unlock(&sc->stream_lock);
1080 /* In ventura if stream detected for a read and
1081 * it is read ahead capable make this IO as LDIO */
1082 if (io_request->RaidContext.raid_context_g35.streamDetected &&
1083 io_info.isRead && io_info.raCapable)
1084 fp_possible = FALSE;
1085 }
916
1086
1087 /* Set raid 1/10 fast path write capable bit in io_info.
1088 * Note - reset peer_cmd and r1_alt_dev_handle if fp_possible
1089 * disabled after this point. Try not to add more check for
1090 * fp_possible toggle after this.
1091 */
1092 if (fp_possible &&
1093 (io_info.r1_alt_dev_handle != MR_DEVHANDLE_INVALID) &&
1094 (raid->level == 1) && !io_info.isRead) {
1095 r1_cmd = mrsas_get_mpt_cmd(sc);
1096 if (!r1_cmd) {
1097 fp_possible = FALSE;
1098 printf("Avago debug fp disable from %s %d \n",
1099 __func__, __LINE__);
1100 } else {
1101 cmd->peer_cmd = r1_cmd;
1102 r1_cmd->peer_cmd = cmd;
1103 }
1104 }
1105 }
1106
917 if (fp_possible) {
918 mrsas_set_pd_lba(io_request, csio->cdb_len, &io_info, ccb, map_ptr,
919 start_lba_lo, ld_block_size);
920 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
921 cmd->request_desc->SCSIIO.RequestFlags =
922 (MPI2_REQ_DESCRIPT_FLAGS_FP_IO <<
923 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
924 if (sc->mrsas_gen3_ctrl) {
1107 if (fp_possible) {
1108 mrsas_set_pd_lba(io_request, csio->cdb_len, &io_info, ccb, map_ptr,
1109 start_lba_lo, ld_block_size);
1110 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1111 cmd->request_desc->SCSIIO.RequestFlags =
1112 (MPI2_REQ_DESCRIPT_FLAGS_FP_IO <<
1113 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1114 if (sc->mrsas_gen3_ctrl) {
925 if (io_request->RaidContext.regLockFlags == REGION_TYPE_UNUSED)
1115 if (io_request->RaidContext.raid_context.regLockFlags == REGION_TYPE_UNUSED)
926 cmd->request_desc->SCSIIO.RequestFlags =
927 (MRSAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
928 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1116 cmd->request_desc->SCSIIO.RequestFlags =
1117 (MRSAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1118 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
929 io_request->RaidContext.Type = MPI2_TYPE_CUDA;
930 io_request->RaidContext.nseg = 0x1;
1119 io_request->RaidContext.raid_context.Type = MPI2_TYPE_CUDA;
1120 io_request->RaidContext.raid_context.nseg = 0x1;
931 io_request->IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
1121 io_request->IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
932 io_request->RaidContext.regLockFlags |=
1122 io_request->RaidContext.raid_context.regLockFlags |=
933 (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
934 MR_RL_FLAGS_SEQ_NUM_ENABLE);
1123 (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
1124 MR_RL_FLAGS_SEQ_NUM_ENABLE);
1125 } else if (sc->is_ventura) {
1126 io_request->RaidContext.raid_context_g35.Type = MPI2_TYPE_CUDA;
1127 io_request->RaidContext.raid_context_g35.nseg = 0x1;
1128 io_request->RaidContext.raid_context_g35.routingFlags.bits.sqn = 1;
1129 io_request->IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
1130 if (io_request->RaidContext.raid_context_g35.routingFlags.bits.sld) {
1131 io_request->RaidContext.raid_context_g35.RAIDFlags =
1132 (MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS
1133 << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
1134 }
935 }
936 if ((sc->load_balance_info[device_id].loadBalanceFlag) &&
937 (io_info.isRead)) {
938 io_info.devHandle =
939 mrsas_get_updated_dev_handle(sc,
940 &sc->load_balance_info[device_id], &io_info);
941 cmd->load_balance = MRSAS_LOAD_BALANCE_FLAG;
942 cmd->pd_r1_lb = io_info.pd_after_lb;
1135 }
1136 if ((sc->load_balance_info[device_id].loadBalanceFlag) &&
1137 (io_info.isRead)) {
1138 io_info.devHandle =
1139 mrsas_get_updated_dev_handle(sc,
1140 &sc->load_balance_info[device_id], &io_info);
1141 cmd->load_balance = MRSAS_LOAD_BALANCE_FLAG;
1142 cmd->pd_r1_lb = io_info.pd_after_lb;
1143 if (sc->is_ventura)
1144 io_request->RaidContext.raid_context_g35.spanArm = io_info.span_arm;
1145 else
1146 io_request->RaidContext.raid_context.spanArm = io_info.span_arm;
943 } else
944 cmd->load_balance = 0;
1147 } else
1148 cmd->load_balance = 0;
1149
1150 if (sc->is_ventura)
1151 cmd->r1_alt_dev_handle = io_info.r1_alt_dev_handle;
1152 else
1153 cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
1154
945 cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle;
946 io_request->DevHandle = io_info.devHandle;
1155 cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle;
1156 io_request->DevHandle = io_info.devHandle;
1157 cmd->pdInterface = io_info.pdInterface;
947 } else {
948 /* Not FP IO */
1158 } else {
1159 /* Not FP IO */
949 io_request->RaidContext.timeoutValue = map_ptr->raidMap.fpPdIoTimeoutSec;
1160 io_request->RaidContext.raid_context.timeoutValue = map_ptr->raidMap.fpPdIoTimeoutSec;
950 cmd->request_desc->SCSIIO.RequestFlags =
951 (MRSAS_REQ_DESCRIPT_FLAGS_LD_IO <<
952 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
953 if (sc->mrsas_gen3_ctrl) {
1161 cmd->request_desc->SCSIIO.RequestFlags =
1162 (MRSAS_REQ_DESCRIPT_FLAGS_LD_IO <<
1163 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1164 if (sc->mrsas_gen3_ctrl) {
954 if (io_request->RaidContext.regLockFlags == REGION_TYPE_UNUSED)
1165 if (io_request->RaidContext.raid_context.regLockFlags == REGION_TYPE_UNUSED)
955 cmd->request_desc->SCSIIO.RequestFlags =
956 (MRSAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
957 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1166 cmd->request_desc->SCSIIO.RequestFlags =
1167 (MRSAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1168 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
958 io_request->RaidContext.Type = MPI2_TYPE_CUDA;
959 io_request->RaidContext.regLockFlags |=
1169 io_request->RaidContext.raid_context.Type = MPI2_TYPE_CUDA;
1170 io_request->RaidContext.raid_context.regLockFlags |=
960 (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
961 MR_RL_FLAGS_SEQ_NUM_ENABLE);
1171 (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
1172 MR_RL_FLAGS_SEQ_NUM_ENABLE);
962 io_request->RaidContext.nseg = 0x1;
1173 io_request->RaidContext.raid_context.nseg = 0x1;
1174 } else if (sc->is_ventura) {
1175 io_request->RaidContext.raid_context_g35.Type = MPI2_TYPE_CUDA;
1176 io_request->RaidContext.raid_context_g35.routingFlags.bits.sqn = 1;
1177 io_request->RaidContext.raid_context_g35.nseg = 0x1;
963 }
964 io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST;
965 io_request->DevHandle = device_id;
966 }
967 return (0);
968}
969
970/*

--- 8 unchanged lines hidden (view full) ---

979int
980mrsas_build_ldio_nonrw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
981 union ccb *ccb)
982{
983 struct ccb_hdr *ccb_h = &(ccb->ccb_h);
984 u_int32_t device_id, ld;
985 MR_DRV_RAID_MAP_ALL *map_ptr;
986 MR_LD_RAID *raid;
1178 }
1179 io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST;
1180 io_request->DevHandle = device_id;
1181 }
1182 return (0);
1183}
1184
1185/*

--- 8 unchanged lines hidden (view full) ---

1194int
1195mrsas_build_ldio_nonrw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
1196 union ccb *ccb)
1197{
1198 struct ccb_hdr *ccb_h = &(ccb->ccb_h);
1199 u_int32_t device_id, ld;
1200 MR_DRV_RAID_MAP_ALL *map_ptr;
1201 MR_LD_RAID *raid;
1202 RAID_CONTEXT *pRAID_Context;
987 MRSAS_RAID_SCSI_IO_REQUEST *io_request;
988
989 io_request = cmd->io_request;
990 device_id = ccb_h->target_id;
991
992 map_ptr = sc->ld_drv_map[(sc->map_id & 1)];
993 ld = MR_TargetIdToLdGet(device_id, map_ptr);
994 raid = MR_LdRaidGet(ld, map_ptr);
1203 MRSAS_RAID_SCSI_IO_REQUEST *io_request;
1204
1205 io_request = cmd->io_request;
1206 device_id = ccb_h->target_id;
1207
1208 map_ptr = sc->ld_drv_map[(sc->map_id & 1)];
1209 ld = MR_TargetIdToLdGet(device_id, map_ptr);
1210 raid = MR_LdRaidGet(ld, map_ptr);
1211 /* get RAID_Context pointer */
1212 pRAID_Context = &io_request->RaidContext.raid_context;
995 /* Store the TM capability value in cmd */
996 cmd->tmCapable = raid->capability.tmCapable;
997
998 /* FW path for LD Non-RW (SCSI management commands) */
999 io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST;
1000 io_request->DevHandle = device_id;
1001 cmd->request_desc->SCSIIO.RequestFlags =
1002 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1003 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1004
1213 /* Store the TM capability value in cmd */
1214 cmd->tmCapable = raid->capability.tmCapable;
1215
1216 /* FW path for LD Non-RW (SCSI management commands) */
1217 io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST;
1218 io_request->DevHandle = device_id;
1219 cmd->request_desc->SCSIIO.RequestFlags =
1220 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1221 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1222
1005 io_request->RaidContext.VirtualDiskTgtId = device_id;
1223 io_request->RaidContext.raid_context.VirtualDiskTgtId = device_id;
1006 io_request->LUN[1] = ccb_h->target_lun & 0xF;
1007 io_request->DataLength = cmd->length;
1008
1009 if (mrsas_map_request(sc, cmd, ccb) == SUCCESS) {
1010 if (cmd->sge_count > sc->max_num_sge) {
1011 device_printf(sc->mrsas_dev, "Error: sge_count (0x%x) exceeds"
1012 "max (0x%x) allowed\n", cmd->sge_count, sc->max_num_sge);
1013 return (1);
1014 }
1224 io_request->LUN[1] = ccb_h->target_lun & 0xF;
1225 io_request->DataLength = cmd->length;
1226
1227 if (mrsas_map_request(sc, cmd, ccb) == SUCCESS) {
1228 if (cmd->sge_count > sc->max_num_sge) {
1229 device_printf(sc->mrsas_dev, "Error: sge_count (0x%x) exceeds"
1230 "max (0x%x) allowed\n", cmd->sge_count, sc->max_num_sge);
1231 return (1);
1232 }
1015 /*
1016 * numSGE store lower 8 bit of sge_count. numSGEExt store
1017 * higher 8 bit of sge_count
1018 */
1019 io_request->RaidContext.numSGE = cmd->sge_count;
1020 io_request->RaidContext.numSGEExt = (uint8_t)(cmd->sge_count >> 8);
1233 if (sc->is_ventura)
1234 io_request->RaidContext.raid_context_g35.numSGE = cmd->sge_count;
1235 else {
1236 /*
1237 * numSGE store lower 8 bit of sge_count. numSGEExt store
1238 * higher 8 bit of sge_count
1239 */
1240 io_request->RaidContext.raid_context.numSGE = cmd->sge_count;
1241 io_request->RaidContext.raid_context.numSGEExt = (uint8_t)(cmd->sge_count >> 8);
1242 }
1021 } else {
1022 device_printf(sc->mrsas_dev, "Data map/load failed.\n");
1023 return (1);
1024 }
1025 return (0);
1026}
1027
1028/*

--- 8 unchanged lines hidden (view full) ---

1037int
1038mrsas_build_syspdio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
1039 union ccb *ccb, struct cam_sim *sim, u_int8_t fp_possible)
1040{
1041 struct ccb_hdr *ccb_h = &(ccb->ccb_h);
1042 u_int32_t device_id;
1043 MR_DRV_RAID_MAP_ALL *local_map_ptr;
1044 MRSAS_RAID_SCSI_IO_REQUEST *io_request;
1243 } else {
1244 device_printf(sc->mrsas_dev, "Data map/load failed.\n");
1245 return (1);
1246 }
1247 return (0);
1248}
1249
1250/*

--- 8 unchanged lines hidden (view full) ---

1259int
1260mrsas_build_syspdio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
1261 union ccb *ccb, struct cam_sim *sim, u_int8_t fp_possible)
1262{
1263 struct ccb_hdr *ccb_h = &(ccb->ccb_h);
1264 u_int32_t device_id;
1265 MR_DRV_RAID_MAP_ALL *local_map_ptr;
1266 MRSAS_RAID_SCSI_IO_REQUEST *io_request;
1267 RAID_CONTEXT *pRAID_Context;
1045 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
1046
1047 io_request = cmd->io_request;
1268 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
1269
1270 io_request = cmd->io_request;
1271 /* get RAID_Context pointer */
1272 pRAID_Context = &io_request->RaidContext.raid_context;
1048 device_id = ccb_h->target_id;
1049 local_map_ptr = sc->ld_drv_map[(sc->map_id & 1)];
1273 device_id = ccb_h->target_id;
1274 local_map_ptr = sc->ld_drv_map[(sc->map_id & 1)];
1050 io_request->RaidContext.RAIDFlags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD
1275 io_request->RaidContext.raid_context.RAIDFlags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD
1051 << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
1276 << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
1052 io_request->RaidContext.regLockFlags = 0;
1053 io_request->RaidContext.regLockRowLBA = 0;
1054 io_request->RaidContext.regLockLength = 0;
1277 io_request->RaidContext.raid_context.regLockFlags = 0;
1278 io_request->RaidContext.raid_context.regLockRowLBA = 0;
1279 io_request->RaidContext.raid_context.regLockLength = 0;
1055
1280
1281 cmd->pdInterface = sc->target_list[device_id].interface_type;
1282
1056 /* If FW supports PD sequence number */
1057 if (sc->use_seqnum_jbod_fp &&
1058 sc->pd_list[device_id].driveType == 0x00) {
1059 //printf("Using Drv seq num\n");
1060 pd_sync = (void *)sc->jbodmap_mem[(sc->pd_seq_map_id - 1) & 1];
1061 cmd->tmCapable = pd_sync->seq[device_id].capability.tmCapable;
1283 /* If FW supports PD sequence number */
1284 if (sc->use_seqnum_jbod_fp &&
1285 sc->pd_list[device_id].driveType == 0x00) {
1286 //printf("Using Drv seq num\n");
1287 pd_sync = (void *)sc->jbodmap_mem[(sc->pd_seq_map_id - 1) & 1];
1288 cmd->tmCapable = pd_sync->seq[device_id].capability.tmCapable;
1062 io_request->RaidContext.VirtualDiskTgtId = device_id + 255;
1063 io_request->RaidContext.configSeqNum = pd_sync->seq[device_id].seqNum;
1289 /* More than 256 PD/JBOD support for Ventura */
1290 if (sc->support_morethan256jbod)
1291 io_request->RaidContext.raid_context.VirtualDiskTgtId =
1292 pd_sync->seq[device_id].pdTargetId;
1293 else
1294 io_request->RaidContext.raid_context.VirtualDiskTgtId =
1295 device_id + 255;
1296 io_request->RaidContext.raid_context.configSeqNum = pd_sync->seq[device_id].seqNum;
1064 io_request->DevHandle = pd_sync->seq[device_id].devHandle;
1297 io_request->DevHandle = pd_sync->seq[device_id].devHandle;
1065 io_request->RaidContext.regLockFlags |=
1066 (MR_RL_FLAGS_SEQ_NUM_ENABLE | MR_RL_FLAGS_GRANT_DESTINATION_CUDA);
1067 io_request->RaidContext.Type = MPI2_TYPE_CUDA;
1068 io_request->RaidContext.nseg = 0x1;
1298 if (sc->is_ventura)
1299 io_request->RaidContext.raid_context_g35.routingFlags.bits.sqn = 1;
1300 else
1301 io_request->RaidContext.raid_context.regLockFlags |=
1302 (MR_RL_FLAGS_SEQ_NUM_ENABLE | MR_RL_FLAGS_GRANT_DESTINATION_CUDA);
1303 /* raid_context.Type = MPI2_TYPE_CUDA is valid only,
1304 * if FW support Jbod Sequence number
1305 */
1306 io_request->RaidContext.raid_context.Type = MPI2_TYPE_CUDA;
1307 io_request->RaidContext.raid_context.nseg = 0x1;
1069 } else if (sc->fast_path_io) {
1070 //printf("Using LD RAID map\n");
1308 } else if (sc->fast_path_io) {
1309 //printf("Using LD RAID map\n");
1071 io_request->RaidContext.VirtualDiskTgtId = device_id;
1072 io_request->RaidContext.configSeqNum = 0;
1310 io_request->RaidContext.raid_context.VirtualDiskTgtId = device_id;
1311 io_request->RaidContext.raid_context.configSeqNum = 0;
1073 local_map_ptr = sc->ld_drv_map[(sc->map_id & 1)];
1074 io_request->DevHandle =
1075 local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
1076 } else {
1077 //printf("Using FW PATH\n");
1078 /* Want to send all IO via FW path */
1312 local_map_ptr = sc->ld_drv_map[(sc->map_id & 1)];
1313 io_request->DevHandle =
1314 local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
1315 } else {
1316 //printf("Using FW PATH\n");
1317 /* Want to send all IO via FW path */
1079 io_request->RaidContext.VirtualDiskTgtId = device_id;
1080 io_request->RaidContext.configSeqNum = 0;
1081 io_request->DevHandle = 0xFFFF;
1318 io_request->RaidContext.raid_context.VirtualDiskTgtId = device_id;
1319 io_request->RaidContext.raid_context.configSeqNum = 0;
1320 io_request->DevHandle = MR_DEVHANDLE_INVALID;
1082 }
1083
1084 cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle;
1085 cmd->request_desc->SCSIIO.MSIxIndex =
1086 sc->msix_vectors ? smp_processor_id() % sc->msix_vectors : 0;
1087
1088 if (!fp_possible) {
1089 /* system pd firmware path */
1090 io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST;
1091 cmd->request_desc->SCSIIO.RequestFlags =
1092 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1093 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1321 }
1322
1323 cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle;
1324 cmd->request_desc->SCSIIO.MSIxIndex =
1325 sc->msix_vectors ? smp_processor_id() % sc->msix_vectors : 0;
1326
1327 if (!fp_possible) {
1328 /* system pd firmware path */
1329 io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST;
1330 cmd->request_desc->SCSIIO.RequestFlags =
1331 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1332 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1094 io_request->RaidContext.timeoutValue =
1333 io_request->RaidContext.raid_context.timeoutValue =
1095 local_map_ptr->raidMap.fpPdIoTimeoutSec;
1334 local_map_ptr->raidMap.fpPdIoTimeoutSec;
1096 io_request->RaidContext.VirtualDiskTgtId = device_id;
1335 io_request->RaidContext.raid_context.VirtualDiskTgtId = device_id;
1097 } else {
1098 /* system pd fast path */
1099 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1336 } else {
1337 /* system pd fast path */
1338 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1100 io_request->RaidContext.timeoutValue = local_map_ptr->raidMap.fpPdIoTimeoutSec;
1339 io_request->RaidContext.raid_context.timeoutValue = local_map_ptr->raidMap.fpPdIoTimeoutSec;
1101
1102 /*
1103 * NOTE - For system pd RW cmds only IoFlags will be FAST_PATH
1104 * Because the NON RW cmds will now go via FW Queue
1105 * and not the Exception queue
1106 */
1340
1341 /*
1342 * NOTE - For system pd RW cmds only IoFlags will be FAST_PATH
1343 * Because the NON RW cmds will now go via FW Queue
1344 * and not the Exception queue
1345 */
1107 io_request->IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
1346 if (sc->mrsas_gen3_ctrl || sc->is_ventura)
1347 io_request->IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
1108
1109 cmd->request_desc->SCSIIO.RequestFlags =
1110 (MPI2_REQ_DESCRIPT_FLAGS_FP_IO <<
1111 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1112 }
1113
1114 io_request->LUN[1] = ccb_h->target_lun & 0xF;
1115 io_request->DataLength = cmd->length;
1116
1117 if (mrsas_map_request(sc, cmd, ccb) == SUCCESS) {
1118 if (cmd->sge_count > sc->max_num_sge) {
1119 device_printf(sc->mrsas_dev, "Error: sge_count (0x%x) exceeds"
1120 "max (0x%x) allowed\n", cmd->sge_count, sc->max_num_sge);
1121 return (1);
1122 }
1348
1349 cmd->request_desc->SCSIIO.RequestFlags =
1350 (MPI2_REQ_DESCRIPT_FLAGS_FP_IO <<
1351 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1352 }
1353
1354 io_request->LUN[1] = ccb_h->target_lun & 0xF;
1355 io_request->DataLength = cmd->length;
1356
1357 if (mrsas_map_request(sc, cmd, ccb) == SUCCESS) {
1358 if (cmd->sge_count > sc->max_num_sge) {
1359 device_printf(sc->mrsas_dev, "Error: sge_count (0x%x) exceeds"
1360 "max (0x%x) allowed\n", cmd->sge_count, sc->max_num_sge);
1361 return (1);
1362 }
1123 /*
1124 * numSGE store lower 8 bit of sge_count. numSGEExt store
1125 * higher 8 bit of sge_count
1126 */
1127 io_request->RaidContext.numSGE = cmd->sge_count;
1128 io_request->RaidContext.numSGEExt = (uint8_t)(cmd->sge_count >> 8);
1363 if (sc->is_ventura)
1364 io_request->RaidContext.raid_context_g35.numSGE = cmd->sge_count;
1365 else {
1366 /*
1367 * numSGE store lower 8 bit of sge_count. numSGEExt store
1368 * higher 8 bit of sge_count
1369 */
1370 io_request->RaidContext.raid_context.numSGE = cmd->sge_count;
1371 io_request->RaidContext.raid_context.numSGEExt = (uint8_t)(cmd->sge_count >> 8);
1372 }
1129 } else {
1130 device_printf(sc->mrsas_dev, "Data map/load failed.\n");
1131 return (1);
1132 }
1133 return (0);
1134}
1135
1136/*
1373 } else {
1374 device_printf(sc->mrsas_dev, "Data map/load failed.\n");
1375 return (1);
1376 }
1377 return (0);
1378}
1379
1380/*
1381 * mrsas_is_prp_possible: This function will tell whether PRPs should be built or not
1382 * sc: Adapter instance soft state
1383 * cmd: MPT command frame pointer
1384 * nsesg: Number of OS SGEs
1385 *
1386 * This function will check whether IO is qualified to build PRPs
1387 * return: true: if PRP should be built
1388 * false: if IEEE SGLs should be built
1389 */
1390static boolean_t mrsas_is_prp_possible(struct mrsas_mpt_cmd *cmd,
1391 bus_dma_segment_t *segs, int nsegs)
1392{
1393 struct mrsas_softc *sc = cmd->sc;
1394 int i;
1395 u_int32_t data_length = 0;
1396 bool build_prp = false;
1397 u_int32_t mr_nvme_pg_size;
1398
1399 mr_nvme_pg_size = max(sc->nvme_page_size, MR_DEFAULT_NVME_PAGE_SIZE);
1400 data_length = cmd->length;
1401
1402 if (data_length > (mr_nvme_pg_size * 5))
1403 build_prp = true;
1404 else if ((data_length > (mr_nvme_pg_size * 4)) &&
1405 (data_length <= (mr_nvme_pg_size * 5))) {
1406 /* check if 1st SG entry size is < residual beyond 4 pages */
1407 if ((segs[0].ds_len) < (data_length - (mr_nvme_pg_size * 4)))
1408 build_prp = true;
1409 }
1410
1411 /*check for SGE holes here*/
1412 for (i = 0; i < nsegs; i++) {
1413 /* check for mid SGEs */
1414 if ((i != 0) && (i != (nsegs - 1))) {
1415 if ((segs[i].ds_addr % mr_nvme_pg_size) ||
1416 (segs[i].ds_len % mr_nvme_pg_size)) {
1417 build_prp = false;
1418 mrsas_atomic_inc(&sc->sge_holes);
1419 break;
1420 }
1421 }
1422
1423 /* check for first SGE*/
1424 if ((nsegs > 1) && (i == 0)) {
1425 if ((segs[i].ds_addr + segs[i].ds_len) % mr_nvme_pg_size) {
1426 build_prp = false;
1427 mrsas_atomic_inc(&sc->sge_holes);
1428 break;
1429 }
1430 }
1431
1432 /* check for Last SGE*/
1433 if ((nsegs > 1) && (i == (nsegs - 1))) {
1434 if (segs[i].ds_addr % mr_nvme_pg_size) {
1435 build_prp = false;
1436 mrsas_atomic_inc(&sc->sge_holes);
1437 break;
1438 }
1439 }
1440
1441 }
1442
1443 return build_prp;
1444}
1445
1446/*
1137 * mrsas_map_request: Map and load data
1138 * input: Adapter instance soft state
1139 * Pointer to command packet
1140 *
1141 * For data from OS, map and load the data buffer into bus space. The SG list
1142 * is built in the callback. If the bus dmamap load is not successful,
1143 * cmd->error_code will contain the error code and a 1 is returned.
1144 */

--- 45 unchanged lines hidden (view full) ---

1190 if (cmd->flags & MRSAS_DIR_OUT)
1191 bus_dmamap_sync(sc->data_tag, cmd->data_dmamap, BUS_DMASYNC_POSTWRITE);
1192 mtx_lock(&sc->io_lock);
1193 bus_dmamap_unload(sc->data_tag, cmd->data_dmamap);
1194 mtx_unlock(&sc->io_lock);
1195 }
1196}
1197
1447 * mrsas_map_request: Map and load data
1448 * input: Adapter instance soft state
1449 * Pointer to command packet
1450 *
1451 * For data from OS, map and load the data buffer into bus space. The SG list
1452 * is built in the callback. If the bus dmamap load is not successful,
1453 * cmd->error_code will contain the error code and a 1 is returned.
1454 */

--- 45 unchanged lines hidden (view full) ---

1500 if (cmd->flags & MRSAS_DIR_OUT)
1501 bus_dmamap_sync(sc->data_tag, cmd->data_dmamap, BUS_DMASYNC_POSTWRITE);
1502 mtx_lock(&sc->io_lock);
1503 bus_dmamap_unload(sc->data_tag, cmd->data_dmamap);
1504 mtx_unlock(&sc->io_lock);
1505 }
1506}
1507
1198/*
1199 * mrsas_data_load_cb: Callback entry point
1200 * input: Pointer to command packet as argument
1201 * Pointer to segment
1202 * Number of segments Error
1203 *
1204 * This is the callback function of the bus dma map load. It builds the SG
1205 * list.
1508/**
1509 * mrsas_build_ieee_sgl - Prepare IEEE SGLs
1510 * @sc: Adapter soft state
1511 * @segs: OS SGEs pointers
1512 * @nseg: Number of OS SGEs
1513 * @cmd: Fusion command frame
1514 * return: void
1206 */
1515 */
1207static void
1208mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1516static void mrsas_build_ieee_sgl(struct mrsas_mpt_cmd *cmd, bus_dma_segment_t *segs, int nseg)
1209{
1517{
1210 struct mrsas_mpt_cmd *cmd = (struct mrsas_mpt_cmd *)arg;
1211 struct mrsas_softc *sc = cmd->sc;
1212 MRSAS_RAID_SCSI_IO_REQUEST *io_request;
1213 pMpi25IeeeSgeChain64_t sgl_ptr;
1214 int i = 0, sg_processed = 0;
1215
1518 struct mrsas_softc *sc = cmd->sc;
1519 MRSAS_RAID_SCSI_IO_REQUEST *io_request;
1520 pMpi25IeeeSgeChain64_t sgl_ptr;
1521 int i = 0, sg_processed = 0;
1522
1216 if (error) {
1217 cmd->error_code = error;
1218 device_printf(sc->mrsas_dev, "mrsas_data_load_cb: error=%d\n", error);
1219 if (error == EFBIG) {
1220 cmd->ccb_ptr->ccb_h.status = CAM_REQ_TOO_BIG;
1221 return;
1222 }
1223 }
1224 if (cmd->flags & MRSAS_DIR_IN)
1225 bus_dmamap_sync(cmd->sc->data_tag, cmd->data_dmamap,
1226 BUS_DMASYNC_PREREAD);
1227 if (cmd->flags & MRSAS_DIR_OUT)
1228 bus_dmamap_sync(cmd->sc->data_tag, cmd->data_dmamap,
1229 BUS_DMASYNC_PREWRITE);
1230 if (nseg > sc->max_num_sge) {
1231 device_printf(sc->mrsas_dev, "SGE count is too large or 0.\n");
1232 return;
1233 }
1234 io_request = cmd->io_request;
1235 sgl_ptr = (pMpi25IeeeSgeChain64_t)&io_request->SGL;
1236
1523 io_request = cmd->io_request;
1524 sgl_ptr = (pMpi25IeeeSgeChain64_t)&io_request->SGL;
1525
1237 if (sc->mrsas_gen3_ctrl) {
1526 if (sc->mrsas_gen3_ctrl || sc->is_ventura) {
1238 pMpi25IeeeSgeChain64_t sgl_ptr_end = sgl_ptr;
1239
1240 sgl_ptr_end += sc->max_sge_in_main_msg - 1;
1241 sgl_ptr_end->Flags = 0;
1242 }
1243 if (nseg != 0) {
1244 for (i = 0; i < nseg; i++) {
1245 sgl_ptr->Address = segs[i].ds_addr;
1246 sgl_ptr->Length = segs[i].ds_len;
1247 sgl_ptr->Flags = 0;
1527 pMpi25IeeeSgeChain64_t sgl_ptr_end = sgl_ptr;
1528
1529 sgl_ptr_end += sc->max_sge_in_main_msg - 1;
1530 sgl_ptr_end->Flags = 0;
1531 }
1532 if (nseg != 0) {
1533 for (i = 0; i < nseg; i++) {
1534 sgl_ptr->Address = segs[i].ds_addr;
1535 sgl_ptr->Length = segs[i].ds_len;
1536 sgl_ptr->Flags = 0;
1248 if (sc->mrsas_gen3_ctrl) {
1537 if (sc->mrsas_gen3_ctrl || sc->is_ventura) {
1249 if (i == nseg - 1)
1250 sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST;
1251 }
1252 sgl_ptr++;
1253 sg_processed = i + 1;
1254 if ((sg_processed == (sc->max_sge_in_main_msg - 1)) &&
1538 if (i == nseg - 1)
1539 sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST;
1540 }
1541 sgl_ptr++;
1542 sg_processed = i + 1;
1543 if ((sg_processed == (sc->max_sge_in_main_msg - 1)) &&
1255 (nseg > sc->max_sge_in_main_msg)) {
1544 (nseg > sc->max_sge_in_main_msg)) {
1256 pMpi25IeeeSgeChain64_t sg_chain;
1257
1545 pMpi25IeeeSgeChain64_t sg_chain;
1546
1258 if (sc->mrsas_gen3_ctrl) {
1547 if (sc->mrsas_gen3_ctrl || sc->is_ventura) {
1259 if ((cmd->io_request->IoFlags & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
1548 if ((cmd->io_request->IoFlags & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
1260 != MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
1549 != MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
1261 cmd->io_request->ChainOffset = sc->chain_offset_io_request;
1262 else
1263 cmd->io_request->ChainOffset = 0;
1264 } else
1265 cmd->io_request->ChainOffset = sc->chain_offset_io_request;
1266 sg_chain = sgl_ptr;
1550 cmd->io_request->ChainOffset = sc->chain_offset_io_request;
1551 else
1552 cmd->io_request->ChainOffset = 0;
1553 } else
1554 cmd->io_request->ChainOffset = sc->chain_offset_io_request;
1555 sg_chain = sgl_ptr;
1267 if (sc->mrsas_gen3_ctrl)
1556 if (sc->mrsas_gen3_ctrl || sc->is_ventura)
1268 sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT;
1269 else
1270 sg_chain->Flags = (IEEE_SGE_FLAGS_CHAIN_ELEMENT | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
1271 sg_chain->Length = (sizeof(MPI2_SGE_IO_UNION) * (nseg - sg_processed));
1272 sg_chain->Address = cmd->chain_frame_phys_addr;
1273 sgl_ptr = (pMpi25IeeeSgeChain64_t)cmd->chain_frame;
1274 }
1275 }
1276 }
1557 sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT;
1558 else
1559 sg_chain->Flags = (IEEE_SGE_FLAGS_CHAIN_ELEMENT | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
1560 sg_chain->Length = (sizeof(MPI2_SGE_IO_UNION) * (nseg - sg_processed));
1561 sg_chain->Address = cmd->chain_frame_phys_addr;
1562 sgl_ptr = (pMpi25IeeeSgeChain64_t)cmd->chain_frame;
1563 }
1564 }
1565 }
1566}
1567
1568/**
1569 * mrsas_build_prp_nvme - Prepare PRPs(Physical Region Page)- SGLs specific to NVMe drives only
1570 * @sc: Adapter soft state
1571 * @segs: OS SGEs pointers
1572 * @nseg: Number of OS SGEs
1573 * @cmd: Fusion command frame
1574 * return: void
1575 */
1576static void mrsas_build_prp_nvme(struct mrsas_mpt_cmd *cmd, bus_dma_segment_t *segs, int nseg)
1577{
1578 struct mrsas_softc *sc = cmd->sc;
1579 int sge_len, offset, num_prp_in_chain = 0;
1580 pMpi25IeeeSgeChain64_t main_chain_element, ptr_first_sgl, sgl_ptr;
1581 u_int64_t *ptr_sgl, *ptr_sgl_phys;
1582 u_int64_t sge_addr;
1583 u_int32_t page_mask, page_mask_result, i = 0;
1584 u_int32_t first_prp_len;
1585 int data_len = cmd->length;
1586 u_int32_t mr_nvme_pg_size = max(sc->nvme_page_size,
1587 MR_DEFAULT_NVME_PAGE_SIZE);
1588
1589 sgl_ptr = (pMpi25IeeeSgeChain64_t) &cmd->io_request->SGL;
1590 /*
1591 * NVMe has a very convoluted PRP format. One PRP is required
1592 * for each page or partial page. We need to split up OS SG
1593 * entries if they are longer than one page or cross a page
1594 * boundary. We also have to insert a PRP list pointer entry as
1595 * the last entry in each physical page of the PRP list.
1596 *
1597 * NOTE: The first PRP "entry" is actually placed in the first
1598 * SGL entry in the main message in IEEE 64 format. The 2nd
1599 * entry in the main message is the chain element, and the rest
1600 * of the PRP entries are built in the contiguous PCIe buffer.
1601 */
1602 page_mask = mr_nvme_pg_size - 1;
1603 ptr_sgl = (u_int64_t *) cmd->chain_frame;
1604 ptr_sgl_phys = (u_int64_t *) cmd->chain_frame_phys_addr;;
1605
1606 /* Build chain frame element which holds all PRPs except first*/
1607 main_chain_element = (pMpi25IeeeSgeChain64_t)
1608 ((u_int8_t *)sgl_ptr + sizeof(MPI25_IEEE_SGE_CHAIN64));
1609
1610
1611 main_chain_element->Address = (u_int64_t) ptr_sgl_phys;
1612 main_chain_element->NextChainOffset = 0;
1613 main_chain_element->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1614 IEEE_SGE_FLAGS_SYSTEM_ADDR |
1615 MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP;
1616
1617
1618 /* Build first PRP, SGE need not to be PAGE aligned*/
1619 ptr_first_sgl = sgl_ptr;
1620 sge_addr = segs[i].ds_addr;
1621 sge_len = segs[i].ds_len;
1622 i++;
1623
1624 offset = (u_int32_t) (sge_addr & page_mask);
1625 first_prp_len = mr_nvme_pg_size - offset;
1626
1627 ptr_first_sgl->Address = sge_addr;
1628 ptr_first_sgl->Length = first_prp_len;
1629
1630 data_len -= first_prp_len;
1631
1632 if (sge_len > first_prp_len) {
1633 sge_addr += first_prp_len;
1634 sge_len -= first_prp_len;
1635 } else if (sge_len == first_prp_len) {
1636 sge_addr = segs[i].ds_addr;
1637 sge_len = segs[i].ds_len;
1638 i++;
1639 }
1640
1641 for (;;) {
1642
1643 offset = (u_int32_t) (sge_addr & page_mask);
1644
1645 /* Put PRP pointer due to page boundary*/
1646 page_mask_result = (uintptr_t)(ptr_sgl + 1) & page_mask;
1647 if (!page_mask_result) {
1648 device_printf(sc->mrsas_dev, "BRCM: Put prp pointer as we are at page boundary"
1649 " ptr_sgl: 0x%p\n", ptr_sgl);
1650 ptr_sgl_phys++;
1651 *ptr_sgl = (uintptr_t)ptr_sgl_phys;
1652 ptr_sgl++;
1653 num_prp_in_chain++;
1654 }
1655
1656 *ptr_sgl = sge_addr;
1657 ptr_sgl++;
1658 ptr_sgl_phys++;
1659 num_prp_in_chain++;
1660
1661
1662 sge_addr += mr_nvme_pg_size;
1663 sge_len -= mr_nvme_pg_size;
1664 data_len -= mr_nvme_pg_size;
1665
1666 if (data_len <= 0)
1667 break;
1668
1669 if (sge_len > 0)
1670 continue;
1671
1672 sge_addr = segs[i].ds_addr;
1673 sge_len = segs[i].ds_len;
1674 i++;
1675 }
1676
1677 main_chain_element->Length = num_prp_in_chain * sizeof(u_int64_t);
1678 mrsas_atomic_inc(&sc->prp_count);
1679
1680}
1681
1682/*
1683 * mrsas_data_load_cb: Callback entry point to build SGLs
1684 * input: Pointer to command packet as argument
1685 * Pointer to segment
1686 * Number of segments Error
1687 *
1688 * This is the callback function of the bus dma map load. It builds SG list
1689 */
1690static void
1691mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1692{
1693 struct mrsas_mpt_cmd *cmd = (struct mrsas_mpt_cmd *)arg;
1694 struct mrsas_softc *sc = cmd->sc;
1695 boolean_t build_prp = false;
1696
1697 if (error) {
1698 cmd->error_code = error;
1699 device_printf(sc->mrsas_dev, "mrsas_data_load_cb_prp: error=%d\n", error);
1700 if (error == EFBIG) {
1701 cmd->ccb_ptr->ccb_h.status = CAM_REQ_TOO_BIG;
1702 return;
1703 }
1704 }
1705 if (cmd->flags & MRSAS_DIR_IN)
1706 bus_dmamap_sync(cmd->sc->data_tag, cmd->data_dmamap,
1707 BUS_DMASYNC_PREREAD);
1708 if (cmd->flags & MRSAS_DIR_OUT)
1709 bus_dmamap_sync(cmd->sc->data_tag, cmd->data_dmamap,
1710 BUS_DMASYNC_PREWRITE);
1711 if (nseg > sc->max_num_sge) {
1712 device_printf(sc->mrsas_dev, "SGE count is too large or 0.\n");
1713 return;
1714 }
1715
1716 /* Check for whether PRPs should be built or IEEE SGLs*/
1717 if ((cmd->io_request->IoFlags & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) &&
1718 (cmd->pdInterface == NVME_PD))
1719 build_prp = mrsas_is_prp_possible(cmd, segs, nseg);
1720
1721 if (build_prp == true)
1722 mrsas_build_prp_nvme(cmd, segs, nseg);
1723 else
1724 mrsas_build_ieee_sgl(cmd, segs, nseg);
1725
1277 cmd->sge_count = nseg;
1278}
1279
1280/*
1281 * mrsas_freeze_simq: Freeze SIM queue
1282 * input: Pointer to command packet
1283 * Pointer to SIM
1284 *

--- 30 unchanged lines hidden (view full) ---

1315 * This function calls ummap request and releases the MPT command.
1316 */
1317void
1318mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd)
1319{
1320 mrsas_unmap_request(sc, cmd);
1321
1322 mtx_lock(&sc->sim_lock);
1726 cmd->sge_count = nseg;
1727}
1728
1729/*
1730 * mrsas_freeze_simq: Freeze SIM queue
1731 * input: Pointer to command packet
1732 * Pointer to SIM
1733 *

--- 30 unchanged lines hidden (view full) ---

1764 * This function calls ummap request and releases the MPT command.
1765 */
1766void
1767mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd)
1768{
1769 mrsas_unmap_request(sc, cmd);
1770
1771 mtx_lock(&sc->sim_lock);
1323 callout_stop(&cmd->cm_callout);
1772 if (cmd->callout_owner) {
1773 callout_stop(&cmd->cm_callout);
1774 cmd->callout_owner = false;
1775 }
1324 xpt_done(cmd->ccb_ptr);
1325 cmd->ccb_ptr = NULL;
1326 mtx_unlock(&sc->sim_lock);
1327 mrsas_release_mpt_cmd(cmd);
1328}
1329
1330/*
1331 * mrsas_cam_poll: Polling entry point

--- 355 unchanged lines hidden ---
1776 xpt_done(cmd->ccb_ptr);
1777 cmd->ccb_ptr = NULL;
1778 mtx_unlock(&sc->sim_lock);
1779 mrsas_release_mpt_cmd(cmd);
1780}
1781
1782/*
1783 * mrsas_cam_poll: Polling entry point

--- 355 unchanged lines hidden ---