748 isp->isp_port = pci_get_function(dev); 749 } 750 751 /* 752 * Make sure we're in reset state. 753 */ 754 ISP_LOCK(isp); 755 isp_reset(isp); 756 if (isp->isp_state != ISP_RESETSTATE) { 757 ISP_UNLOCK(isp); 758 goto bad; 759 } 760 isp_init(isp); 761 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_INITSTATE) { 762 isp_uninit(isp); 763 ISP_UNLOCK(isp); 764 goto bad; 765 } 766 isp_attach(isp); 767 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_RUNSTATE) { 768 isp_uninit(isp); 769 ISP_UNLOCK(isp); 770 goto bad; 771 } 772 /* 773 * XXXX: Here is where we might unload the f/w module 774 * XXXX: (or decrease the reference count to it). 775 */ 776 ISP_UNLOCK(isp); 777 return (0); 778 779bad: 780 781 if (pcs && pcs->ih) { 782 (void) bus_teardown_intr(dev, irq, pcs->ih); 783 } 784 785 if (locksetup && isp) { 786 mtx_destroy(&isp->isp_osinfo.lock); 787 } 788 789 if (irq) { 790 (void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq); 791 } 792 793 794 if (regs) { 795 (void) bus_release_resource(dev, rtp, rgd, regs); 796 } 797 798 if (pcs) { 799 if (pcs->pci_isp.isp_param) 800 free(pcs->pci_isp.isp_param, M_DEVBUF); 801 free(pcs, M_DEVBUF); 802 } 803 804 /* 805 * XXXX: Here is where we might unload the f/w module 806 * XXXX: (or decrease the reference count to it). 807 */ 808 return (ENXIO); 809} 810 811static void 812isp_pci_intr(void *arg) 813{ 814 struct ispsoftc *isp = arg; 815 u_int16_t isr, sema, mbox; 816 817 ISP_LOCK(isp); 818 isp->isp_intcnt++; 819 if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) { 820 isp->isp_intbogus++; 821 } else { 822 int iok = isp->isp_osinfo.intsok; 823 isp->isp_osinfo.intsok = 0; 824 isp_intr(isp, isr, sema, mbox); 825 isp->isp_osinfo.intsok = iok; 826 } 827 ISP_UNLOCK(isp); 828} 829 830 831#define IspVirt2Off(a, x) \ 832 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \ 833 _BLK_REG_SHFT] + ((x) & 0xff)) 834 835#define BXR2(pcs, off) \ 836 bus_space_read_2(pcs->pci_st, pcs->pci_sh, off) 837#define BXW2(pcs, off, v) \ 838 bus_space_write_2(pcs->pci_st, pcs->pci_sh, off, v) 839 840 841static INLINE int 842isp_pci_rd_debounced(struct ispsoftc *isp, int off, u_int16_t *rp) 843{ 844 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 845 u_int16_t val0, val1; 846 int i = 0; 847 848 do { 849 val0 = BXR2(pcs, IspVirt2Off(isp, off)); 850 val1 = BXR2(pcs, IspVirt2Off(isp, off)); 851 } while (val0 != val1 && ++i < 1000); 852 if (val0 != val1) { 853 return (1); 854 } 855 *rp = val0; 856 return (0); 857} 858 859static int 860isp_pci_rd_isr(struct ispsoftc *isp, u_int16_t *isrp, 861 u_int16_t *semap, u_int16_t *mbp) 862{ 863 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 864 u_int16_t isr, sema; 865 866 if (IS_2100(isp)) { 867 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) { 868 return (0); 869 } 870 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) { 871 return (0); 872 } 873 } else { 874 isr = BXR2(pcs, IspVirt2Off(isp, BIU_ISR)); 875 sema = BXR2(pcs, IspVirt2Off(isp, BIU_SEMA)); 876 } 877 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema); 878 isr &= INT_PENDING_MASK(isp); 879 sema &= BIU_SEMA_LOCK; 880 if (isr == 0 && sema == 0) { 881 return (0); 882 } 883 *isrp = isr; 884 if ((*semap = sema) != 0) { 885 if (IS_2100(isp)) { 886 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) { 887 return (0); 888 } 889 } else { 890 *mbp = BXR2(pcs, IspVirt2Off(isp, OUTMAILBOX0)); 891 } 892 } 893 return (1); 894} 895 896static int 897isp_pci_rd_isr_2300(struct ispsoftc *isp, u_int16_t *isrp, 898 u_int16_t *semap, u_int16_t *mbox0p) 899{ 900 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 901 u_int32_t r2hisr; 902 903 if (!(BXR2(pcs, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) { 904 *isrp = 0; 905 return (0); 906 } 907 r2hisr = bus_space_read_4(pcs->pci_st, pcs->pci_sh, 908 IspVirt2Off(pcs, BIU_R2HSTSLO)); 909 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 910 if ((r2hisr & BIU_R2HST_INTR) == 0) { 911 *isrp = 0; 912 return (0); 913 } 914 switch (r2hisr & BIU_R2HST_ISTAT_MASK) { 915 case ISPR2HST_ROM_MBX_OK: 916 case ISPR2HST_ROM_MBX_FAIL: 917 case ISPR2HST_MBX_OK: 918 case ISPR2HST_MBX_FAIL: 919 case ISPR2HST_ASYNC_EVENT: 920 *isrp = r2hisr & 0xffff; 921 *mbox0p = (r2hisr >> 16); 922 *semap = 1; 923 return (1); 924 case ISPR2HST_RIO_16: 925 *isrp = r2hisr & 0xffff; 926 *mbox0p = ASYNC_RIO1; 927 *semap = 1; 928 return (1); 929 case ISPR2HST_FPOST: 930 *isrp = r2hisr & 0xffff; 931 *mbox0p = ASYNC_CMD_CMPLT; 932 *semap = 1; 933 return (1); 934 case ISPR2HST_FPOST_CTIO: 935 *isrp = r2hisr & 0xffff; 936 *mbox0p = ASYNC_CTIO_DONE; 937 *semap = 1; 938 return (1); 939 case ISPR2HST_RSPQ_UPDATE: 940 *isrp = r2hisr & 0xffff; 941 *mbox0p = 0; 942 *semap = 0; 943 return (1); 944 default: 945 return (0); 946 } 947} 948 949static u_int16_t 950isp_pci_rd_reg(struct ispsoftc *isp, int regoff) 951{ 952 u_int16_t rv; 953 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 954 int oldconf = 0; 955 956 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 957 /* 958 * We will assume that someone has paused the RISC processor. 959 */ 960 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 961 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 962 oldconf | BIU_PCI_CONF1_SXP); 963 } 964 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 965 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 966 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 967 } 968 return (rv); 969} 970 971static void 972isp_pci_wr_reg(struct ispsoftc *isp, int regoff, u_int16_t val) 973{ 974 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 975 int oldconf = 0; 976 977 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 978 /* 979 * We will assume that someone has paused the RISC processor. 980 */ 981 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 982 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 983 oldconf | BIU_PCI_CONF1_SXP); 984 } 985 BXW2(pcs, IspVirt2Off(isp, regoff), val); 986 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 987 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 988 } 989} 990 991static u_int16_t 992isp_pci_rd_reg_1080(struct ispsoftc *isp, int regoff) 993{ 994 u_int16_t rv, oc = 0; 995 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 996 997 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 998 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 999 u_int16_t tc; 1000 /* 1001 * We will assume that someone has paused the RISC processor. 1002 */ 1003 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1004 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1005 if (regoff & SXP_BANK1_SELECT) 1006 tc |= BIU_PCI1080_CONF1_SXP1; 1007 else 1008 tc |= BIU_PCI1080_CONF1_SXP0; 1009 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 1010 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1011 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1012 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1013 oc | BIU_PCI1080_CONF1_DMA); 1014 } 1015 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 1016 if (oc) { 1017 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 1018 } 1019 return (rv); 1020} 1021 1022static void 1023isp_pci_wr_reg_1080(struct ispsoftc *isp, int regoff, u_int16_t val) 1024{ 1025 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1026 int oc = 0; 1027 1028 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 1029 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 1030 u_int16_t tc; 1031 /* 1032 * We will assume that someone has paused the RISC processor. 1033 */ 1034 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1035 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1036 if (regoff & SXP_BANK1_SELECT) 1037 tc |= BIU_PCI1080_CONF1_SXP1; 1038 else 1039 tc |= BIU_PCI1080_CONF1_SXP0; 1040 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 1041 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1042 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1043 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1044 oc | BIU_PCI1080_CONF1_DMA); 1045 } 1046 BXW2(pcs, IspVirt2Off(isp, regoff), val); 1047 if (oc) { 1048 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 1049 } 1050} 1051 1052 1053struct imush { 1054 struct ispsoftc *isp; 1055 int error; 1056}; 1057 1058static void imc(void *, bus_dma_segment_t *, int, int); 1059 1060static void 1061imc(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1062{ 1063 struct imush *imushp = (struct imush *) arg; 1064 if (error) { 1065 imushp->error = error; 1066 } else { 1067 struct ispsoftc *isp =imushp->isp; 1068 bus_addr_t addr = segs->ds_addr; 1069 1070 isp->isp_rquest_dma = addr; 1071 addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1072 isp->isp_result_dma = addr; 1073 if (IS_FC(isp)) { 1074 addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1075 FCPARAM(isp)->isp_scdma = addr; 1076 } 1077 } 1078} 1079 1080/* 1081 * Should be BUS_SPACE_MAXSIZE, but MAXPHYS is larger than BUS_SPACE_MAXSIZE 1082 */ 1083#define ISP_NSEGS ((MAXPHYS / PAGE_SIZE) + 1) 1084 1085static int 1086isp_pci_mbxdma(struct ispsoftc *isp) 1087{ 1088 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1089 caddr_t base; 1090 u_int32_t len; 1091 int i, error, ns; 1092 bus_size_t alim, slim, xlim; 1093 struct imush im; 1094 1095 /* 1096 * Already been here? If so, leave... 1097 */ 1098 if (isp->isp_rquest) { 1099 return (0); 1100 } 1101 1102#ifdef ISP_DAC_SUPPORTED 1103 alim = BUS_SPACE_UNRESTRICTED; 1104 xlim = BUS_SPACE_MAXADDR_32BIT; 1105#else 1106 xlim = alim = BUS_SPACE_MAXADDR_32BIT; 1107#endif 1108 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) { 1109 slim = BUS_SPACE_MAXADDR_32BIT; 1110 } else { 1111 slim = BUS_SPACE_MAXADDR_24BIT; 1112 } 1113 1114 ISP_UNLOCK(isp); 1115 if (bus_dma_tag_create(NULL, 1, slim+1, alim, alim, 1116 NULL, NULL, BUS_SPACE_MAXSIZE, ISP_NSEGS, slim, 0, 1117 busdma_lock_mutex, &Giant, &pcs->dmat)) { 1118 isp_prt(isp, ISP_LOGERR, "could not create master dma tag"); 1119 ISP_LOCK(isp); 1120 return(1); 1121 } 1122 1123 1124 len = sizeof (XS_T **) * isp->isp_maxcmds; 1125 isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1126 if (isp->isp_xflist == NULL) { 1127 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array"); 1128 ISP_LOCK(isp); 1129 return (1); 1130 } 1131#ifdef ISP_TARGET_MODE 1132 len = sizeof (void **) * isp->isp_maxcmds; 1133 isp->isp_tgtlist = (void **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1134 if (isp->isp_tgtlist == NULL) { 1135 isp_prt(isp, ISP_LOGERR, "cannot alloc tgtlist array"); 1136 ISP_LOCK(isp); 1137 return (1); 1138 } 1139#endif 1140 len = sizeof (bus_dmamap_t) * isp->isp_maxcmds; 1141 pcs->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK); 1142 if (pcs->dmaps == NULL) { 1143 isp_prt(isp, ISP_LOGERR, "can't alloc dma map storage"); 1144 free(isp->isp_xflist, M_DEVBUF); 1145#ifdef ISP_TARGET_MODE 1146 free(isp->isp_tgtlist, M_DEVBUF); 1147#endif 1148 ISP_LOCK(isp); 1149 return (1); 1150 } 1151 1152 /* 1153 * Allocate and map the request, result queues, plus FC scratch area. 1154 */ 1155 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1156 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1157 if (IS_FC(isp)) { 1158 len += ISP2100_SCRLEN; 1159 } 1160 1161 ns = (len / PAGE_SIZE) + 1; 1162 if (bus_dma_tag_create(pcs->dmat, QENTRY_LEN, slim+1, xlim, xlim, 1163 NULL, NULL, len, ns, slim, 0, busdma_lock_mutex, &Giant, 1164 &isp->isp_cdmat)) { 1165 isp_prt(isp, ISP_LOGERR, 1166 "cannot create a dma tag for control spaces"); 1167 free(pcs->dmaps, M_DEVBUF); 1168 free(isp->isp_xflist, M_DEVBUF); 1169#ifdef ISP_TARGET_MODE 1170 free(isp->isp_tgtlist, M_DEVBUF); 1171#endif 1172 ISP_LOCK(isp); 1173 return (1); 1174 } 1175 1176 if (bus_dmamem_alloc(isp->isp_cdmat, (void **)&base, BUS_DMA_NOWAIT, 1177 &isp->isp_cdmap) != 0) { 1178 isp_prt(isp, ISP_LOGERR, 1179 "cannot allocate %d bytes of CCB memory", len); 1180 bus_dma_tag_destroy(isp->isp_cdmat); 1181 free(isp->isp_xflist, M_DEVBUF); 1182#ifdef ISP_TARGET_MODE 1183 free(isp->isp_tgtlist, M_DEVBUF); 1184#endif 1185 free(pcs->dmaps, M_DEVBUF); 1186 ISP_LOCK(isp); 1187 return (1); 1188 } 1189 1190 for (i = 0; i < isp->isp_maxcmds; i++) { 1191 error = bus_dmamap_create(pcs->dmat, 0, &pcs->dmaps[i]); 1192 if (error) { 1193 isp_prt(isp, ISP_LOGERR, 1194 "error %d creating per-cmd DMA maps", error); 1195 while (--i >= 0) { 1196 bus_dmamap_destroy(pcs->dmat, pcs->dmaps[i]); 1197 } 1198 goto bad; 1199 } 1200 } 1201 1202 im.isp = isp; 1203 im.error = 0; 1204 bus_dmamap_load(isp->isp_cdmat, isp->isp_cdmap, base, len, imc, &im, 0); 1205 if (im.error) { 1206 isp_prt(isp, ISP_LOGERR, 1207 "error %d loading dma map for control areas", im.error); 1208 goto bad; 1209 } 1210 1211 isp->isp_rquest = base; 1212 base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1213 isp->isp_result = base; 1214 if (IS_FC(isp)) { 1215 base += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1216 FCPARAM(isp)->isp_scratch = base; 1217 } 1218 ISP_LOCK(isp); 1219 return (0); 1220 1221bad: 1222 bus_dmamem_free(isp->isp_cdmat, base, isp->isp_cdmap); 1223 bus_dma_tag_destroy(isp->isp_cdmat); 1224 free(isp->isp_xflist, M_DEVBUF); 1225#ifdef ISP_TARGET_MODE 1226 free(isp->isp_tgtlist, M_DEVBUF); 1227#endif 1228 free(pcs->dmaps, M_DEVBUF); 1229 ISP_LOCK(isp); 1230 isp->isp_rquest = NULL; 1231 return (1); 1232} 1233 1234typedef struct { 1235 struct ispsoftc *isp; 1236 void *cmd_token; 1237 void *rq; 1238 u_int16_t *nxtip; 1239 u_int16_t optr; 1240 u_int error; 1241} mush_t; 1242 1243#define MUSHERR_NOQENTRIES -2 1244 1245#ifdef ISP_TARGET_MODE 1246/* 1247 * We need to handle DMA for target mode differently from initiator mode. 1248 * 1249 * DMA mapping and construction and submission of CTIO Request Entries 1250 * and rendevous for completion are very tightly coupled because we start 1251 * out by knowing (per platform) how much data we have to move, but we 1252 * don't know, up front, how many DMA mapping segments will have to be used 1253 * cover that data, so we don't know how many CTIO Request Entries we 1254 * will end up using. Further, for performance reasons we may want to 1255 * (on the last CTIO for Fibre Channel), send status too (if all went well). 1256 * 1257 * The standard vector still goes through isp_pci_dmasetup, but the callback 1258 * for the DMA mapping routines comes here instead with the whole transfer 1259 * mapped and a pointer to a partially filled in already allocated request 1260 * queue entry. We finish the job. 1261 */ 1262static void tdma_mk(void *, bus_dma_segment_t *, int, int); 1263static void tdma_mkfc(void *, bus_dma_segment_t *, int, int); 1264 1265#define STATUS_WITH_DATA 1 1266 1267static void 1268tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1269{ 1270 mush_t *mp; 1271 struct ccb_scsiio *csio; 1272 struct ispsoftc *isp; 1273 struct isp_pcisoftc *pcs; 1274 bus_dmamap_t *dp; 1275 ct_entry_t *cto, *qe; 1276 u_int8_t scsi_status; 1277 u_int16_t curi, nxti, handle; 1278 u_int32_t sflags; 1279 int32_t resid; 1280 int nth_ctio, nctios, send_status; 1281 1282 mp = (mush_t *) arg; 1283 if (error) { 1284 mp->error = error; 1285 return; 1286 } 1287 1288 isp = mp->isp; 1289 csio = mp->cmd_token; 1290 cto = mp->rq; 1291 curi = isp->isp_reqidx; 1292 qe = (ct_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 1293 1294 cto->ct_xfrlen = 0; 1295 cto->ct_seg_count = 0; 1296 cto->ct_header.rqs_entry_count = 1; 1297 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1298 1299 if (nseg == 0) { 1300 cto->ct_header.rqs_seqno = 1; 1301 isp_prt(isp, ISP_LOGTDEBUG1, 1302 "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d", 1303 cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid, 1304 cto->ct_tag_val, cto->ct_flags, cto->ct_status, 1305 cto->ct_scsi_status, cto->ct_resid); 1306 ISP_TDQE(isp, "tdma_mk[no data]", curi, cto); 1307 isp_put_ctio(isp, cto, qe); 1308 return; 1309 } 1310 1311 nctios = nseg / ISP_RQDSEG; 1312 if (nseg % ISP_RQDSEG) { 1313 nctios++; 1314 } 1315 1316 /* 1317 * Save syshandle, and potentially any SCSI status, which we'll 1318 * reinsert on the last CTIO we're going to send. 1319 */ 1320 1321 handle = cto->ct_syshandle; 1322 cto->ct_syshandle = 0; 1323 cto->ct_header.rqs_seqno = 0; 1324 send_status = (cto->ct_flags & CT_SENDSTATUS) != 0; 1325 1326 if (send_status) { 1327 sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR); 1328 cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR); 1329 /* 1330 * Preserve residual. 1331 */ 1332 resid = cto->ct_resid; 1333 1334 /* 1335 * Save actual SCSI status. 1336 */ 1337 scsi_status = cto->ct_scsi_status; 1338 1339#ifndef STATUS_WITH_DATA 1340 sflags |= CT_NO_DATA; 1341 /* 1342 * We can't do a status at the same time as a data CTIO, so 1343 * we need to synthesize an extra CTIO at this level. 1344 */ 1345 nctios++; 1346#endif 1347 } else { 1348 sflags = scsi_status = resid = 0; 1349 } 1350 1351 cto->ct_resid = 0; 1352 cto->ct_scsi_status = 0; 1353 1354 pcs = (struct isp_pcisoftc *)isp; 1355 dp = &pcs->dmaps[isp_handle_index(handle)]; 1356 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1357 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 1358 } else { 1359 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 1360 } 1361 1362 nxti = *mp->nxtip; 1363 1364 for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) { 1365 int seglim; 1366 1367 seglim = nseg; 1368 if (seglim) { 1369 int seg; 1370 1371 if (seglim > ISP_RQDSEG) 1372 seglim = ISP_RQDSEG; 1373 1374 for (seg = 0; seg < seglim; seg++, nseg--) { 1375 /* 1376 * Unlike normal initiator commands, we don't 1377 * do any swizzling here. 1378 */ 1379 cto->ct_dataseg[seg].ds_count = dm_segs->ds_len; 1380 cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr; 1381 cto->ct_xfrlen += dm_segs->ds_len; 1382 dm_segs++; 1383 } 1384 cto->ct_seg_count = seg; 1385 } else { 1386 /* 1387 * This case should only happen when we're sending an 1388 * extra CTIO with final status. 1389 */ 1390 if (send_status == 0) { 1391 isp_prt(isp, ISP_LOGWARN, 1392 "tdma_mk ran out of segments"); 1393 mp->error = EINVAL; 1394 return; 1395 } 1396 } 1397 1398 /* 1399 * At this point, the fields ct_lun, ct_iid, ct_tagval, 1400 * ct_tagtype, and ct_timeout have been carried over 1401 * unchanged from what our caller had set. 1402 * 1403 * The dataseg fields and the seg_count fields we just got 1404 * through setting. The data direction we've preserved all 1405 * along and only clear it if we're now sending status. 1406 */ 1407 1408 if (nth_ctio == nctios - 1) { 1409 /* 1410 * We're the last in a sequence of CTIOs, so mark 1411 * this CTIO and save the handle to the CCB such that 1412 * when this CTIO completes we can free dma resources 1413 * and do whatever else we need to do to finish the 1414 * rest of the command. We *don't* give this to the 1415 * firmware to work on- the caller will do that. 1416 */ 1417 1418 cto->ct_syshandle = handle; 1419 cto->ct_header.rqs_seqno = 1; 1420 1421 if (send_status) { 1422 cto->ct_scsi_status = scsi_status; 1423 cto->ct_flags |= sflags; 1424 cto->ct_resid = resid; 1425 } 1426 if (send_status) { 1427 isp_prt(isp, ISP_LOGTDEBUG1, 1428 "CTIO[%x] lun%d iid %d tag %x ct_flags %x " 1429 "scsi status %x resid %d", 1430 cto->ct_fwhandle, csio->ccb_h.target_lun, 1431 cto->ct_iid, cto->ct_tag_val, cto->ct_flags, 1432 cto->ct_scsi_status, cto->ct_resid); 1433 } else { 1434 isp_prt(isp, ISP_LOGTDEBUG1, 1435 "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x", 1436 cto->ct_fwhandle, csio->ccb_h.target_lun, 1437 cto->ct_iid, cto->ct_tag_val, 1438 cto->ct_flags); 1439 } 1440 isp_put_ctio(isp, cto, qe); 1441 ISP_TDQE(isp, "last tdma_mk", curi, cto); 1442 if (nctios > 1) { 1443 MEMORYBARRIER(isp, SYNC_REQUEST, 1444 curi, QENTRY_LEN); 1445 } 1446 } else { 1447 ct_entry_t *oqe = qe; 1448 1449 /* 1450 * Make sure syshandle fields are clean 1451 */ 1452 cto->ct_syshandle = 0; 1453 cto->ct_header.rqs_seqno = 0; 1454 1455 isp_prt(isp, ISP_LOGTDEBUG1, 1456 "CTIO[%x] lun%d for ID%d ct_flags 0x%x", 1457 cto->ct_fwhandle, csio->ccb_h.target_lun, 1458 cto->ct_iid, cto->ct_flags); 1459 1460 /* 1461 * Get a new CTIO 1462 */ 1463 qe = (ct_entry_t *) 1464 ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1465 nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp)); 1466 if (nxti == mp->optr) { 1467 isp_prt(isp, ISP_LOGTDEBUG0, 1468 "Queue Overflow in tdma_mk"); 1469 mp->error = MUSHERR_NOQENTRIES; 1470 return; 1471 } 1472 1473 /* 1474 * Now that we're done with the old CTIO, 1475 * flush it out to the request queue. 1476 */ 1477 ISP_TDQE(isp, "dma_tgt_fc", curi, cto); 1478 isp_put_ctio(isp, cto, oqe); 1479 if (nth_ctio != 0) { 1480 MEMORYBARRIER(isp, SYNC_REQUEST, curi, 1481 QENTRY_LEN); 1482 } 1483 curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp)); 1484 1485 /* 1486 * Reset some fields in the CTIO so we can reuse 1487 * for the next one we'll flush to the request 1488 * queue. 1489 */ 1490 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 1491 cto->ct_header.rqs_entry_count = 1; 1492 cto->ct_header.rqs_flags = 0; 1493 cto->ct_status = 0; 1494 cto->ct_scsi_status = 0; 1495 cto->ct_xfrlen = 0; 1496 cto->ct_resid = 0; 1497 cto->ct_seg_count = 0; 1498 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1499 } 1500 } 1501 *mp->nxtip = nxti; 1502} 1503 1504/* 1505 * We don't have to do multiple CTIOs here. Instead, we can just do 1506 * continuation segments as needed. This greatly simplifies the code 1507 * improves performance. 1508 */ 1509 1510static void 1511tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1512{ 1513 mush_t *mp; 1514 struct ccb_scsiio *csio; 1515 struct ispsoftc *isp; 1516 ct2_entry_t *cto, *qe; 1517 u_int16_t curi, nxti; 1518 int segcnt; 1519 1520 mp = (mush_t *) arg; 1521 if (error) { 1522 mp->error = error; 1523 return; 1524 } 1525 1526 isp = mp->isp; 1527 csio = mp->cmd_token; 1528 cto = mp->rq; 1529 1530 curi = isp->isp_reqidx; 1531 qe = (ct2_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 1532 1533 if (nseg == 0) { 1534 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) { 1535 isp_prt(isp, ISP_LOGWARN, 1536 "dma2_tgt_fc, a status CTIO2 without MODE1 " 1537 "set (0x%x)", cto->ct_flags); 1538 mp->error = EINVAL; 1539 return; 1540 } 1541 /* 1542 * We preserve ct_lun, ct_iid, ct_rxid. We set the data 1543 * flags to NO DATA and clear relative offset flags. 1544 * We preserve the ct_resid and the response area. 1545 */ 1546 cto->ct_header.rqs_seqno = 1; 1547 cto->ct_seg_count = 0; 1548 cto->ct_reloff = 0; 1549 isp_prt(isp, ISP_LOGTDEBUG1, 1550 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts " 1551 "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun, 1552 cto->ct_iid, cto->ct_flags, cto->ct_status, 1553 cto->rsp.m1.ct_scsi_status, cto->ct_resid); 1554 isp_put_ctio2(isp, cto, qe); 1555 ISP_TDQE(isp, "dma2_tgt_fc[no data]", curi, qe); 1556 return; 1557 } 1558 1559 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) { 1560 isp_prt(isp, ISP_LOGERR, 1561 "dma2_tgt_fc, a data CTIO2 without MODE0 set " 1562 "(0x%x)", cto->ct_flags); 1563 mp->error = EINVAL; 1564 return; 1565 } 1566 1567 1568 nxti = *mp->nxtip; 1569 1570 /* 1571 * Set up the CTIO2 data segments. 1572 */ 1573 for (segcnt = 0; cto->ct_seg_count < ISP_RQDSEG_T2 && segcnt < nseg; 1574 cto->ct_seg_count++, segcnt++) { 1575 cto->rsp.m0.ct_dataseg[cto->ct_seg_count].ds_base = 1576 dm_segs[segcnt].ds_addr; 1577 cto->rsp.m0.ct_dataseg[cto->ct_seg_count].ds_count = 1578 dm_segs[segcnt].ds_len; 1579 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 1580 isp_prt(isp, ISP_LOGTDEBUG1, 1581 "isp_send_ctio2: ent0[%d]0x%jx:%ju", 1582 cto->ct_seg_count, (uintmax_t)dm_segs[segcnt].ds_addr, 1583 (uintmax_t)dm_segs[segcnt].ds_len); 1584 } 1585 1586 while (segcnt < nseg) { 1587 u_int16_t curip; 1588 int seg; 1589 ispcontreq_t local, *crq = &local, *qep; 1590 1591 qep = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1592 curip = nxti; 1593 nxti = ISP_NXT_QENTRY(curip, RQUEST_QUEUE_LEN(isp)); 1594 if (nxti == mp->optr) { 1595 ISP_UNLOCK(isp); 1596 isp_prt(isp, ISP_LOGTDEBUG0, 1597 "tdma_mkfc: request queue overflow"); 1598 mp->error = MUSHERR_NOQENTRIES; 1599 return; 1600 } 1601 cto->ct_header.rqs_entry_count++; 1602 MEMZERO((void *)crq, sizeof (*crq)); 1603 crq->req_header.rqs_entry_count = 1; 1604 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 1605 for (seg = 0; segcnt < nseg && seg < ISP_CDSEG; 1606 segcnt++, seg++) { 1607 crq->req_dataseg[seg].ds_base = dm_segs[segcnt].ds_addr; 1608 crq->req_dataseg[seg].ds_count = dm_segs[segcnt].ds_len; 1609 isp_prt(isp, ISP_LOGTDEBUG1, 1610 "isp_send_ctio2: ent%d[%d]%jx:%ju", 1611 cto->ct_header.rqs_entry_count-1, seg, 1612 (uintmax_t)dm_segs[segcnt].ds_addr, 1613 (uintmax_t)dm_segs[segcnt].ds_len); 1614 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 1615 cto->ct_seg_count++; 1616 } 1617 MEMORYBARRIER(isp, SYNC_REQUEST, curip, QENTRY_LEN); 1618 isp_put_cont_req(isp, crq, qep); 1619 ISP_TDQE(isp, "cont entry", curi, qep); 1620 } 1621 1622 /* 1623 * No do final twiddling for the CTIO itself. 1624 */ 1625 cto->ct_header.rqs_seqno = 1; 1626 isp_prt(isp, ISP_LOGTDEBUG1, 1627 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts 0x%x resid %d", 1628 cto->ct_rxid, csio->ccb_h.target_lun, (int) cto->ct_iid, 1629 cto->ct_flags, cto->ct_status, cto->rsp.m1.ct_scsi_status, 1630 cto->ct_resid); 1631 isp_put_ctio2(isp, cto, qe); 1632 ISP_TDQE(isp, "last dma2_tgt_fc", curi, qe); 1633 *mp->nxtip = nxti; 1634} 1635#endif 1636 1637static void dma2(void *, bus_dma_segment_t *, int, int); 1638 1639#ifdef PAE 1640#define LOWD(x) ((uint32_t) x) 1641#define HIWD(x) ((uint32_t) (x >> 32)) 1642 1643static void 1644dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1645{ 1646 mush_t *mp; 1647 struct ispsoftc *isp; 1648 struct ccb_scsiio *csio; 1649 struct isp_pcisoftc *pcs; 1650 bus_dmamap_t *dp; 1651 bus_dma_segment_t *eseg; 1652 ispreq64_t *rq; 1653 int seglim, datalen; 1654 u_int16_t nxti; 1655 1656 mp = (mush_t *) arg; 1657 if (error) { 1658 mp->error = error; 1659 return; 1660 } 1661 1662 if (nseg < 1) { 1663 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 1664 mp->error = EFAULT; 1665 return; 1666 } 1667 csio = mp->cmd_token; 1668 isp = mp->isp; 1669 rq = mp->rq; 1670 pcs = (struct isp_pcisoftc *)mp->isp; 1671 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 1672 nxti = *mp->nxtip; 1673 1674 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1675 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 1676 } else { 1677 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 1678 } 1679 datalen = XS_XFRLEN(csio); 1680 1681 /* 1682 * We're passed an initial partially filled in entry that 1683 * has most fields filled in except for data transfer 1684 * related values. 1685 * 1686 * Our job is to fill in the initial request queue entry and 1687 * then to start allocating and filling in continuation entries 1688 * until we've covered the entire transfer. 1689 */ 1690 1691 if (IS_FC(isp)) { 1692 rq->req_header.rqs_entry_type = RQSTYPE_T3RQS; 1693 seglim = ISP_RQDSEG_T3; 1694 ((ispreqt3_t *)rq)->req_totalcnt = datalen; 1695 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1696 ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_IN; 1697 } else { 1698 ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_OUT; 1699 } 1700 } else { 1701 rq->req_header.rqs_entry_type = RQSTYPE_A64; 1702 if (csio->cdb_len > 12) { 1703 seglim = 0; 1704 } else { 1705 seglim = ISP_RQDSEG_A64; 1706 } 1707 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1708 rq->req_flags |= REQFLAG_DATA_IN; 1709 } else { 1710 rq->req_flags |= REQFLAG_DATA_OUT; 1711 } 1712 } 1713 1714 eseg = dm_segs + nseg; 1715 1716 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 1717 if (IS_FC(isp)) { 1718 ispreqt3_t *rq3 = (ispreqt3_t *)rq; 1719 rq3->req_dataseg[rq3->req_seg_count].ds_base = 1720 LOWD(dm_segs->ds_addr); 1721 rq3->req_dataseg[rq3->req_seg_count].ds_basehi = 1722 HIWD(dm_segs->ds_addr); 1723 rq3->req_dataseg[rq3->req_seg_count].ds_count = 1724 dm_segs->ds_len; 1725 } else { 1726 rq->req_dataseg[rq->req_seg_count].ds_base = 1727 LOWD(dm_segs->ds_addr); 1728 rq->req_dataseg[rq->req_seg_count].ds_basehi = 1729 HIWD(dm_segs->ds_addr); 1730 rq->req_dataseg[rq->req_seg_count].ds_count = 1731 dm_segs->ds_len; 1732 } 1733 datalen -= dm_segs->ds_len; 1734 rq->req_seg_count++; 1735 dm_segs++; 1736 } 1737 1738 while (datalen > 0 && dm_segs != eseg) { 1739 u_int16_t onxti; 1740 ispcontreq64_t local, *crq = &local, *cqe; 1741 1742 cqe = (ispcontreq64_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1743 onxti = nxti; 1744 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 1745 if (nxti == mp->optr) { 1746 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 1747 mp->error = MUSHERR_NOQENTRIES; 1748 return; 1749 } 1750 rq->req_header.rqs_entry_count++; 1751 MEMZERO((void *)crq, sizeof (*crq)); 1752 crq->req_header.rqs_entry_count = 1; 1753 crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT; 1754 1755 seglim = 0; 1756 while (datalen > 0 && seglim < ISP_CDSEG64 && dm_segs != eseg) { 1757 crq->req_dataseg[seglim].ds_base = 1758 LOWD(dm_segs->ds_addr); 1759 crq->req_dataseg[seglim].ds_basehi = 1760 HIWD(dm_segs->ds_addr); 1761 crq->req_dataseg[seglim].ds_count = 1762 dm_segs->ds_len; 1763 rq->req_seg_count++; 1764 dm_segs++; 1765 seglim++; 1766 datalen -= dm_segs->ds_len; 1767 } 1768 isp_put_cont64_req(isp, crq, cqe); 1769 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 1770 } 1771 *mp->nxtip = nxti; 1772} 1773#else 1774static void 1775dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1776{ 1777 mush_t *mp; 1778 struct ispsoftc *isp; 1779 struct ccb_scsiio *csio; 1780 struct isp_pcisoftc *pcs; 1781 bus_dmamap_t *dp; 1782 bus_dma_segment_t *eseg; 1783 ispreq_t *rq; 1784 int seglim, datalen; 1785 u_int16_t nxti; 1786 1787 mp = (mush_t *) arg; 1788 if (error) { 1789 mp->error = error; 1790 return; 1791 } 1792 1793 if (nseg < 1) { 1794 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 1795 mp->error = EFAULT; 1796 return; 1797 } 1798 csio = mp->cmd_token; 1799 isp = mp->isp; 1800 rq = mp->rq; 1801 pcs = (struct isp_pcisoftc *)mp->isp; 1802 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 1803 nxti = *mp->nxtip; 1804 1805 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1806 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 1807 } else { 1808 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 1809 } 1810 1811 datalen = XS_XFRLEN(csio); 1812 1813 /* 1814 * We're passed an initial partially filled in entry that 1815 * has most fields filled in except for data transfer 1816 * related values. 1817 * 1818 * Our job is to fill in the initial request queue entry and 1819 * then to start allocating and filling in continuation entries 1820 * until we've covered the entire transfer. 1821 */ 1822 1823 if (IS_FC(isp)) { 1824 seglim = ISP_RQDSEG_T2; 1825 ((ispreqt2_t *)rq)->req_totalcnt = datalen; 1826 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1827 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN; 1828 } else { 1829 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT; 1830 } 1831 } else { 1832 if (csio->cdb_len > 12) { 1833 seglim = 0; 1834 } else { 1835 seglim = ISP_RQDSEG; 1836 } 1837 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1838 rq->req_flags |= REQFLAG_DATA_IN; 1839 } else { 1840 rq->req_flags |= REQFLAG_DATA_OUT; 1841 } 1842 } 1843 1844 eseg = dm_segs + nseg; 1845 1846 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 1847 if (IS_FC(isp)) { 1848 ispreqt2_t *rq2 = (ispreqt2_t *)rq; 1849 rq2->req_dataseg[rq2->req_seg_count].ds_base = 1850 dm_segs->ds_addr; 1851 rq2->req_dataseg[rq2->req_seg_count].ds_count = 1852 dm_segs->ds_len; 1853 } else { 1854 rq->req_dataseg[rq->req_seg_count].ds_base = 1855 dm_segs->ds_addr; 1856 rq->req_dataseg[rq->req_seg_count].ds_count = 1857 dm_segs->ds_len; 1858 } 1859 datalen -= dm_segs->ds_len; 1860 rq->req_seg_count++; 1861 dm_segs++; 1862 } 1863 1864 while (datalen > 0 && dm_segs != eseg) { 1865 u_int16_t onxti; 1866 ispcontreq_t local, *crq = &local, *cqe; 1867 1868 cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1869 onxti = nxti; 1870 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 1871 if (nxti == mp->optr) { 1872 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 1873 mp->error = MUSHERR_NOQENTRIES; 1874 return; 1875 } 1876 rq->req_header.rqs_entry_count++; 1877 MEMZERO((void *)crq, sizeof (*crq)); 1878 crq->req_header.rqs_entry_count = 1; 1879 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 1880 1881 seglim = 0; 1882 while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) { 1883 crq->req_dataseg[seglim].ds_base = 1884 dm_segs->ds_addr; 1885 crq->req_dataseg[seglim].ds_count = 1886 dm_segs->ds_len; 1887 rq->req_seg_count++; 1888 dm_segs++; 1889 seglim++; 1890 datalen -= dm_segs->ds_len; 1891 } 1892 isp_put_cont_req(isp, crq, cqe); 1893 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 1894 } 1895 *mp->nxtip = nxti; 1896} 1897#endif 1898 1899static int 1900isp_pci_dmasetup(struct ispsoftc *isp, struct ccb_scsiio *csio, ispreq_t *rq, 1901 u_int16_t *nxtip, u_int16_t optr) 1902{ 1903 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1904 ispreq_t *qep; 1905 bus_dmamap_t *dp = NULL; 1906 mush_t mush, *mp; 1907 void (*eptr)(void *, bus_dma_segment_t *, int, int); 1908 1909 qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx); 1910#ifdef ISP_TARGET_MODE 1911 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) { 1912 if (IS_FC(isp)) { 1913 eptr = tdma_mkfc; 1914 } else { 1915 eptr = tdma_mk; 1916 } 1917 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 1918 (csio->dxfer_len == 0)) { 1919 mp = &mush; 1920 mp->isp = isp; 1921 mp->cmd_token = csio; 1922 mp->rq = rq; /* really a ct_entry_t or ct2_entry_t */ 1923 mp->nxtip = nxtip; 1924 mp->optr = optr; 1925 mp->error = 0; 1926 (*eptr)(mp, NULL, 0, 0); 1927 goto mbxsync; 1928 } 1929 } else 1930#endif 1931 eptr = dma2; 1932 1933 1934 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 1935 (csio->dxfer_len == 0)) { 1936 rq->req_seg_count = 1; 1937 goto mbxsync; 1938 } 1939 1940 /* 1941 * Do a virtual grapevine step to collect info for 1942 * the callback dma allocation that we have to use... 1943 */ 1944 mp = &mush; 1945 mp->isp = isp; 1946 mp->cmd_token = csio; 1947 mp->rq = rq; 1948 mp->nxtip = nxtip; 1949 mp->optr = optr; 1950 mp->error = 0; 1951 1952 if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 1953 if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) { 1954 int error, s; 1955 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 1956 s = splsoftvm(); 1957 error = bus_dmamap_load(pcs->dmat, *dp, 1958 csio->data_ptr, csio->dxfer_len, eptr, mp, 0); 1959 if (error == EINPROGRESS) { 1960 bus_dmamap_unload(pcs->dmat, *dp); 1961 mp->error = EINVAL; 1962 isp_prt(isp, ISP_LOGERR, 1963 "deferred dma allocation not supported"); 1964 } else if (error && mp->error == 0) { 1965#ifdef DIAGNOSTIC 1966 isp_prt(isp, ISP_LOGERR, 1967 "error %d in dma mapping code", error); 1968#endif 1969 mp->error = error; 1970 } 1971 splx(s); 1972 } else { 1973 /* Pointer to physical buffer */ 1974 struct bus_dma_segment seg; 1975 seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr; 1976 seg.ds_len = csio->dxfer_len; 1977 (*eptr)(mp, &seg, 1, 0); 1978 } 1979 } else { 1980 struct bus_dma_segment *segs; 1981 1982 if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) { 1983 isp_prt(isp, ISP_LOGERR, 1984 "Physical segment pointers unsupported"); 1985 mp->error = EINVAL; 1986 } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 1987 isp_prt(isp, ISP_LOGERR, 1988 "Virtual segment addresses unsupported"); 1989 mp->error = EINVAL; 1990 } else { 1991 /* Just use the segments provided */ 1992 segs = (struct bus_dma_segment *) csio->data_ptr; 1993 (*eptr)(mp, segs, csio->sglist_cnt, 0); 1994 } 1995 } 1996 if (mp->error) { 1997 int retval = CMD_COMPLETE; 1998 if (mp->error == MUSHERR_NOQENTRIES) { 1999 retval = CMD_EAGAIN; 2000 } else if (mp->error == EFBIG) { 2001 XS_SETERR(csio, CAM_REQ_TOO_BIG); 2002 } else if (mp->error == EINVAL) { 2003 XS_SETERR(csio, CAM_REQ_INVALID); 2004 } else { 2005 XS_SETERR(csio, CAM_UNREC_HBA_ERROR); 2006 } 2007 return (retval); 2008 } 2009mbxsync: 2010 switch (rq->req_header.rqs_entry_type) { 2011 case RQSTYPE_REQUEST: 2012 isp_put_request(isp, rq, qep); 2013 break; 2014 case RQSTYPE_CMDONLY: 2015 isp_put_extended_request(isp, (ispextreq_t *)rq, 2016 (ispextreq_t *)qep); 2017 break; 2018 case RQSTYPE_T2RQS: 2019 isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) qep); 2020 break; 2021 case RQSTYPE_A64: 2022 case RQSTYPE_T3RQS: 2023 isp_put_request_t3(isp, (ispreqt3_t *) rq, (ispreqt3_t *) qep); 2024 break; 2025 } 2026 return (CMD_QUEUED); 2027} 2028 2029static void 2030isp_pci_dmateardown(struct ispsoftc *isp, XS_T *xs, u_int16_t handle) 2031{ 2032 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 2033 bus_dmamap_t *dp = &pcs->dmaps[isp_handle_index(handle)]; 2034 if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2035 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTREAD); 2036 } else { 2037 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTWRITE); 2038 } 2039 bus_dmamap_unload(pcs->dmat, *dp); 2040} 2041 2042 2043static void 2044isp_pci_reset1(struct ispsoftc *isp) 2045{ 2046 /* Make sure the BIOS is disabled */ 2047 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS); 2048 /* and enable interrupts */ 2049 ENABLE_INTS(isp); 2050} 2051 2052static void 2053isp_pci_dumpregs(struct ispsoftc *isp, const char *msg) 2054{ 2055 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 2056 if (msg) 2057 printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg); 2058 else 2059 printf("%s:\n", device_get_nameunit(isp->isp_dev)); 2060 if (IS_SCSI(isp)) 2061 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1)); 2062 else 2063 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR)); 2064 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR), 2065 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA)); 2066 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR)); 2067 2068 2069 if (IS_SCSI(isp)) { 2070 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); 2071 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n", 2072 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS), 2073 ISP_READ(isp, CDMA_FIFO_STS)); 2074 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n", 2075 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS), 2076 ISP_READ(isp, DDMA_FIFO_STS)); 2077 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n", 2078 ISP_READ(isp, SXP_INTERRUPT), 2079 ISP_READ(isp, SXP_GROSS_ERR), 2080 ISP_READ(isp, SXP_PINS_CTRL)); 2081 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); 2082 } 2083 printf(" mbox regs: %x %x %x %x %x\n", 2084 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1), 2085 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3), 2086 ISP_READ(isp, OUTMAILBOX4)); 2087 printf(" PCI Status Command/Status=%x\n", 2088 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1)); 2089}
| 770 isp->isp_port = pci_get_function(dev); 771 } 772 773 /* 774 * Make sure we're in reset state. 775 */ 776 ISP_LOCK(isp); 777 isp_reset(isp); 778 if (isp->isp_state != ISP_RESETSTATE) { 779 ISP_UNLOCK(isp); 780 goto bad; 781 } 782 isp_init(isp); 783 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_INITSTATE) { 784 isp_uninit(isp); 785 ISP_UNLOCK(isp); 786 goto bad; 787 } 788 isp_attach(isp); 789 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_RUNSTATE) { 790 isp_uninit(isp); 791 ISP_UNLOCK(isp); 792 goto bad; 793 } 794 /* 795 * XXXX: Here is where we might unload the f/w module 796 * XXXX: (or decrease the reference count to it). 797 */ 798 ISP_UNLOCK(isp); 799 return (0); 800 801bad: 802 803 if (pcs && pcs->ih) { 804 (void) bus_teardown_intr(dev, irq, pcs->ih); 805 } 806 807 if (locksetup && isp) { 808 mtx_destroy(&isp->isp_osinfo.lock); 809 } 810 811 if (irq) { 812 (void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq); 813 } 814 815 816 if (regs) { 817 (void) bus_release_resource(dev, rtp, rgd, regs); 818 } 819 820 if (pcs) { 821 if (pcs->pci_isp.isp_param) 822 free(pcs->pci_isp.isp_param, M_DEVBUF); 823 free(pcs, M_DEVBUF); 824 } 825 826 /* 827 * XXXX: Here is where we might unload the f/w module 828 * XXXX: (or decrease the reference count to it). 829 */ 830 return (ENXIO); 831} 832 833static void 834isp_pci_intr(void *arg) 835{ 836 struct ispsoftc *isp = arg; 837 u_int16_t isr, sema, mbox; 838 839 ISP_LOCK(isp); 840 isp->isp_intcnt++; 841 if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) { 842 isp->isp_intbogus++; 843 } else { 844 int iok = isp->isp_osinfo.intsok; 845 isp->isp_osinfo.intsok = 0; 846 isp_intr(isp, isr, sema, mbox); 847 isp->isp_osinfo.intsok = iok; 848 } 849 ISP_UNLOCK(isp); 850} 851 852 853#define IspVirt2Off(a, x) \ 854 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \ 855 _BLK_REG_SHFT] + ((x) & 0xff)) 856 857#define BXR2(pcs, off) \ 858 bus_space_read_2(pcs->pci_st, pcs->pci_sh, off) 859#define BXW2(pcs, off, v) \ 860 bus_space_write_2(pcs->pci_st, pcs->pci_sh, off, v) 861 862 863static INLINE int 864isp_pci_rd_debounced(struct ispsoftc *isp, int off, u_int16_t *rp) 865{ 866 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 867 u_int16_t val0, val1; 868 int i = 0; 869 870 do { 871 val0 = BXR2(pcs, IspVirt2Off(isp, off)); 872 val1 = BXR2(pcs, IspVirt2Off(isp, off)); 873 } while (val0 != val1 && ++i < 1000); 874 if (val0 != val1) { 875 return (1); 876 } 877 *rp = val0; 878 return (0); 879} 880 881static int 882isp_pci_rd_isr(struct ispsoftc *isp, u_int16_t *isrp, 883 u_int16_t *semap, u_int16_t *mbp) 884{ 885 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 886 u_int16_t isr, sema; 887 888 if (IS_2100(isp)) { 889 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) { 890 return (0); 891 } 892 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) { 893 return (0); 894 } 895 } else { 896 isr = BXR2(pcs, IspVirt2Off(isp, BIU_ISR)); 897 sema = BXR2(pcs, IspVirt2Off(isp, BIU_SEMA)); 898 } 899 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema); 900 isr &= INT_PENDING_MASK(isp); 901 sema &= BIU_SEMA_LOCK; 902 if (isr == 0 && sema == 0) { 903 return (0); 904 } 905 *isrp = isr; 906 if ((*semap = sema) != 0) { 907 if (IS_2100(isp)) { 908 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) { 909 return (0); 910 } 911 } else { 912 *mbp = BXR2(pcs, IspVirt2Off(isp, OUTMAILBOX0)); 913 } 914 } 915 return (1); 916} 917 918static int 919isp_pci_rd_isr_2300(struct ispsoftc *isp, u_int16_t *isrp, 920 u_int16_t *semap, u_int16_t *mbox0p) 921{ 922 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 923 u_int32_t r2hisr; 924 925 if (!(BXR2(pcs, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) { 926 *isrp = 0; 927 return (0); 928 } 929 r2hisr = bus_space_read_4(pcs->pci_st, pcs->pci_sh, 930 IspVirt2Off(pcs, BIU_R2HSTSLO)); 931 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 932 if ((r2hisr & BIU_R2HST_INTR) == 0) { 933 *isrp = 0; 934 return (0); 935 } 936 switch (r2hisr & BIU_R2HST_ISTAT_MASK) { 937 case ISPR2HST_ROM_MBX_OK: 938 case ISPR2HST_ROM_MBX_FAIL: 939 case ISPR2HST_MBX_OK: 940 case ISPR2HST_MBX_FAIL: 941 case ISPR2HST_ASYNC_EVENT: 942 *isrp = r2hisr & 0xffff; 943 *mbox0p = (r2hisr >> 16); 944 *semap = 1; 945 return (1); 946 case ISPR2HST_RIO_16: 947 *isrp = r2hisr & 0xffff; 948 *mbox0p = ASYNC_RIO1; 949 *semap = 1; 950 return (1); 951 case ISPR2HST_FPOST: 952 *isrp = r2hisr & 0xffff; 953 *mbox0p = ASYNC_CMD_CMPLT; 954 *semap = 1; 955 return (1); 956 case ISPR2HST_FPOST_CTIO: 957 *isrp = r2hisr & 0xffff; 958 *mbox0p = ASYNC_CTIO_DONE; 959 *semap = 1; 960 return (1); 961 case ISPR2HST_RSPQ_UPDATE: 962 *isrp = r2hisr & 0xffff; 963 *mbox0p = 0; 964 *semap = 0; 965 return (1); 966 default: 967 return (0); 968 } 969} 970 971static u_int16_t 972isp_pci_rd_reg(struct ispsoftc *isp, int regoff) 973{ 974 u_int16_t rv; 975 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 976 int oldconf = 0; 977 978 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 979 /* 980 * We will assume that someone has paused the RISC processor. 981 */ 982 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 983 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 984 oldconf | BIU_PCI_CONF1_SXP); 985 } 986 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 987 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 988 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 989 } 990 return (rv); 991} 992 993static void 994isp_pci_wr_reg(struct ispsoftc *isp, int regoff, u_int16_t val) 995{ 996 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 997 int oldconf = 0; 998 999 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1000 /* 1001 * We will assume that someone has paused the RISC processor. 1002 */ 1003 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1004 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1005 oldconf | BIU_PCI_CONF1_SXP); 1006 } 1007 BXW2(pcs, IspVirt2Off(isp, regoff), val); 1008 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1009 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 1010 } 1011} 1012 1013static u_int16_t 1014isp_pci_rd_reg_1080(struct ispsoftc *isp, int regoff) 1015{ 1016 u_int16_t rv, oc = 0; 1017 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1018 1019 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 1020 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 1021 u_int16_t tc; 1022 /* 1023 * We will assume that someone has paused the RISC processor. 1024 */ 1025 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1026 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1027 if (regoff & SXP_BANK1_SELECT) 1028 tc |= BIU_PCI1080_CONF1_SXP1; 1029 else 1030 tc |= BIU_PCI1080_CONF1_SXP0; 1031 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 1032 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1033 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1034 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1035 oc | BIU_PCI1080_CONF1_DMA); 1036 } 1037 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 1038 if (oc) { 1039 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 1040 } 1041 return (rv); 1042} 1043 1044static void 1045isp_pci_wr_reg_1080(struct ispsoftc *isp, int regoff, u_int16_t val) 1046{ 1047 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1048 int oc = 0; 1049 1050 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 1051 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 1052 u_int16_t tc; 1053 /* 1054 * We will assume that someone has paused the RISC processor. 1055 */ 1056 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1057 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1058 if (regoff & SXP_BANK1_SELECT) 1059 tc |= BIU_PCI1080_CONF1_SXP1; 1060 else 1061 tc |= BIU_PCI1080_CONF1_SXP0; 1062 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 1063 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1064 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1065 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1066 oc | BIU_PCI1080_CONF1_DMA); 1067 } 1068 BXW2(pcs, IspVirt2Off(isp, regoff), val); 1069 if (oc) { 1070 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 1071 } 1072} 1073 1074 1075struct imush { 1076 struct ispsoftc *isp; 1077 int error; 1078}; 1079 1080static void imc(void *, bus_dma_segment_t *, int, int); 1081 1082static void 1083imc(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1084{ 1085 struct imush *imushp = (struct imush *) arg; 1086 if (error) { 1087 imushp->error = error; 1088 } else { 1089 struct ispsoftc *isp =imushp->isp; 1090 bus_addr_t addr = segs->ds_addr; 1091 1092 isp->isp_rquest_dma = addr; 1093 addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1094 isp->isp_result_dma = addr; 1095 if (IS_FC(isp)) { 1096 addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1097 FCPARAM(isp)->isp_scdma = addr; 1098 } 1099 } 1100} 1101 1102/* 1103 * Should be BUS_SPACE_MAXSIZE, but MAXPHYS is larger than BUS_SPACE_MAXSIZE 1104 */ 1105#define ISP_NSEGS ((MAXPHYS / PAGE_SIZE) + 1) 1106 1107static int 1108isp_pci_mbxdma(struct ispsoftc *isp) 1109{ 1110 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1111 caddr_t base; 1112 u_int32_t len; 1113 int i, error, ns; 1114 bus_size_t alim, slim, xlim; 1115 struct imush im; 1116 1117 /* 1118 * Already been here? If so, leave... 1119 */ 1120 if (isp->isp_rquest) { 1121 return (0); 1122 } 1123 1124#ifdef ISP_DAC_SUPPORTED 1125 alim = BUS_SPACE_UNRESTRICTED; 1126 xlim = BUS_SPACE_MAXADDR_32BIT; 1127#else 1128 xlim = alim = BUS_SPACE_MAXADDR_32BIT; 1129#endif 1130 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) { 1131 slim = BUS_SPACE_MAXADDR_32BIT; 1132 } else { 1133 slim = BUS_SPACE_MAXADDR_24BIT; 1134 } 1135 1136 ISP_UNLOCK(isp); 1137 if (bus_dma_tag_create(NULL, 1, slim+1, alim, alim, 1138 NULL, NULL, BUS_SPACE_MAXSIZE, ISP_NSEGS, slim, 0, 1139 busdma_lock_mutex, &Giant, &pcs->dmat)) { 1140 isp_prt(isp, ISP_LOGERR, "could not create master dma tag"); 1141 ISP_LOCK(isp); 1142 return(1); 1143 } 1144 1145 1146 len = sizeof (XS_T **) * isp->isp_maxcmds; 1147 isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1148 if (isp->isp_xflist == NULL) { 1149 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array"); 1150 ISP_LOCK(isp); 1151 return (1); 1152 } 1153#ifdef ISP_TARGET_MODE 1154 len = sizeof (void **) * isp->isp_maxcmds; 1155 isp->isp_tgtlist = (void **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1156 if (isp->isp_tgtlist == NULL) { 1157 isp_prt(isp, ISP_LOGERR, "cannot alloc tgtlist array"); 1158 ISP_LOCK(isp); 1159 return (1); 1160 } 1161#endif 1162 len = sizeof (bus_dmamap_t) * isp->isp_maxcmds; 1163 pcs->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK); 1164 if (pcs->dmaps == NULL) { 1165 isp_prt(isp, ISP_LOGERR, "can't alloc dma map storage"); 1166 free(isp->isp_xflist, M_DEVBUF); 1167#ifdef ISP_TARGET_MODE 1168 free(isp->isp_tgtlist, M_DEVBUF); 1169#endif 1170 ISP_LOCK(isp); 1171 return (1); 1172 } 1173 1174 /* 1175 * Allocate and map the request, result queues, plus FC scratch area. 1176 */ 1177 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1178 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1179 if (IS_FC(isp)) { 1180 len += ISP2100_SCRLEN; 1181 } 1182 1183 ns = (len / PAGE_SIZE) + 1; 1184 if (bus_dma_tag_create(pcs->dmat, QENTRY_LEN, slim+1, xlim, xlim, 1185 NULL, NULL, len, ns, slim, 0, busdma_lock_mutex, &Giant, 1186 &isp->isp_cdmat)) { 1187 isp_prt(isp, ISP_LOGERR, 1188 "cannot create a dma tag for control spaces"); 1189 free(pcs->dmaps, M_DEVBUF); 1190 free(isp->isp_xflist, M_DEVBUF); 1191#ifdef ISP_TARGET_MODE 1192 free(isp->isp_tgtlist, M_DEVBUF); 1193#endif 1194 ISP_LOCK(isp); 1195 return (1); 1196 } 1197 1198 if (bus_dmamem_alloc(isp->isp_cdmat, (void **)&base, BUS_DMA_NOWAIT, 1199 &isp->isp_cdmap) != 0) { 1200 isp_prt(isp, ISP_LOGERR, 1201 "cannot allocate %d bytes of CCB memory", len); 1202 bus_dma_tag_destroy(isp->isp_cdmat); 1203 free(isp->isp_xflist, M_DEVBUF); 1204#ifdef ISP_TARGET_MODE 1205 free(isp->isp_tgtlist, M_DEVBUF); 1206#endif 1207 free(pcs->dmaps, M_DEVBUF); 1208 ISP_LOCK(isp); 1209 return (1); 1210 } 1211 1212 for (i = 0; i < isp->isp_maxcmds; i++) { 1213 error = bus_dmamap_create(pcs->dmat, 0, &pcs->dmaps[i]); 1214 if (error) { 1215 isp_prt(isp, ISP_LOGERR, 1216 "error %d creating per-cmd DMA maps", error); 1217 while (--i >= 0) { 1218 bus_dmamap_destroy(pcs->dmat, pcs->dmaps[i]); 1219 } 1220 goto bad; 1221 } 1222 } 1223 1224 im.isp = isp; 1225 im.error = 0; 1226 bus_dmamap_load(isp->isp_cdmat, isp->isp_cdmap, base, len, imc, &im, 0); 1227 if (im.error) { 1228 isp_prt(isp, ISP_LOGERR, 1229 "error %d loading dma map for control areas", im.error); 1230 goto bad; 1231 } 1232 1233 isp->isp_rquest = base; 1234 base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1235 isp->isp_result = base; 1236 if (IS_FC(isp)) { 1237 base += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1238 FCPARAM(isp)->isp_scratch = base; 1239 } 1240 ISP_LOCK(isp); 1241 return (0); 1242 1243bad: 1244 bus_dmamem_free(isp->isp_cdmat, base, isp->isp_cdmap); 1245 bus_dma_tag_destroy(isp->isp_cdmat); 1246 free(isp->isp_xflist, M_DEVBUF); 1247#ifdef ISP_TARGET_MODE 1248 free(isp->isp_tgtlist, M_DEVBUF); 1249#endif 1250 free(pcs->dmaps, M_DEVBUF); 1251 ISP_LOCK(isp); 1252 isp->isp_rquest = NULL; 1253 return (1); 1254} 1255 1256typedef struct { 1257 struct ispsoftc *isp; 1258 void *cmd_token; 1259 void *rq; 1260 u_int16_t *nxtip; 1261 u_int16_t optr; 1262 u_int error; 1263} mush_t; 1264 1265#define MUSHERR_NOQENTRIES -2 1266 1267#ifdef ISP_TARGET_MODE 1268/* 1269 * We need to handle DMA for target mode differently from initiator mode. 1270 * 1271 * DMA mapping and construction and submission of CTIO Request Entries 1272 * and rendevous for completion are very tightly coupled because we start 1273 * out by knowing (per platform) how much data we have to move, but we 1274 * don't know, up front, how many DMA mapping segments will have to be used 1275 * cover that data, so we don't know how many CTIO Request Entries we 1276 * will end up using. Further, for performance reasons we may want to 1277 * (on the last CTIO for Fibre Channel), send status too (if all went well). 1278 * 1279 * The standard vector still goes through isp_pci_dmasetup, but the callback 1280 * for the DMA mapping routines comes here instead with the whole transfer 1281 * mapped and a pointer to a partially filled in already allocated request 1282 * queue entry. We finish the job. 1283 */ 1284static void tdma_mk(void *, bus_dma_segment_t *, int, int); 1285static void tdma_mkfc(void *, bus_dma_segment_t *, int, int); 1286 1287#define STATUS_WITH_DATA 1 1288 1289static void 1290tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1291{ 1292 mush_t *mp; 1293 struct ccb_scsiio *csio; 1294 struct ispsoftc *isp; 1295 struct isp_pcisoftc *pcs; 1296 bus_dmamap_t *dp; 1297 ct_entry_t *cto, *qe; 1298 u_int8_t scsi_status; 1299 u_int16_t curi, nxti, handle; 1300 u_int32_t sflags; 1301 int32_t resid; 1302 int nth_ctio, nctios, send_status; 1303 1304 mp = (mush_t *) arg; 1305 if (error) { 1306 mp->error = error; 1307 return; 1308 } 1309 1310 isp = mp->isp; 1311 csio = mp->cmd_token; 1312 cto = mp->rq; 1313 curi = isp->isp_reqidx; 1314 qe = (ct_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 1315 1316 cto->ct_xfrlen = 0; 1317 cto->ct_seg_count = 0; 1318 cto->ct_header.rqs_entry_count = 1; 1319 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1320 1321 if (nseg == 0) { 1322 cto->ct_header.rqs_seqno = 1; 1323 isp_prt(isp, ISP_LOGTDEBUG1, 1324 "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d", 1325 cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid, 1326 cto->ct_tag_val, cto->ct_flags, cto->ct_status, 1327 cto->ct_scsi_status, cto->ct_resid); 1328 ISP_TDQE(isp, "tdma_mk[no data]", curi, cto); 1329 isp_put_ctio(isp, cto, qe); 1330 return; 1331 } 1332 1333 nctios = nseg / ISP_RQDSEG; 1334 if (nseg % ISP_RQDSEG) { 1335 nctios++; 1336 } 1337 1338 /* 1339 * Save syshandle, and potentially any SCSI status, which we'll 1340 * reinsert on the last CTIO we're going to send. 1341 */ 1342 1343 handle = cto->ct_syshandle; 1344 cto->ct_syshandle = 0; 1345 cto->ct_header.rqs_seqno = 0; 1346 send_status = (cto->ct_flags & CT_SENDSTATUS) != 0; 1347 1348 if (send_status) { 1349 sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR); 1350 cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR); 1351 /* 1352 * Preserve residual. 1353 */ 1354 resid = cto->ct_resid; 1355 1356 /* 1357 * Save actual SCSI status. 1358 */ 1359 scsi_status = cto->ct_scsi_status; 1360 1361#ifndef STATUS_WITH_DATA 1362 sflags |= CT_NO_DATA; 1363 /* 1364 * We can't do a status at the same time as a data CTIO, so 1365 * we need to synthesize an extra CTIO at this level. 1366 */ 1367 nctios++; 1368#endif 1369 } else { 1370 sflags = scsi_status = resid = 0; 1371 } 1372 1373 cto->ct_resid = 0; 1374 cto->ct_scsi_status = 0; 1375 1376 pcs = (struct isp_pcisoftc *)isp; 1377 dp = &pcs->dmaps[isp_handle_index(handle)]; 1378 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1379 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 1380 } else { 1381 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 1382 } 1383 1384 nxti = *mp->nxtip; 1385 1386 for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) { 1387 int seglim; 1388 1389 seglim = nseg; 1390 if (seglim) { 1391 int seg; 1392 1393 if (seglim > ISP_RQDSEG) 1394 seglim = ISP_RQDSEG; 1395 1396 for (seg = 0; seg < seglim; seg++, nseg--) { 1397 /* 1398 * Unlike normal initiator commands, we don't 1399 * do any swizzling here. 1400 */ 1401 cto->ct_dataseg[seg].ds_count = dm_segs->ds_len; 1402 cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr; 1403 cto->ct_xfrlen += dm_segs->ds_len; 1404 dm_segs++; 1405 } 1406 cto->ct_seg_count = seg; 1407 } else { 1408 /* 1409 * This case should only happen when we're sending an 1410 * extra CTIO with final status. 1411 */ 1412 if (send_status == 0) { 1413 isp_prt(isp, ISP_LOGWARN, 1414 "tdma_mk ran out of segments"); 1415 mp->error = EINVAL; 1416 return; 1417 } 1418 } 1419 1420 /* 1421 * At this point, the fields ct_lun, ct_iid, ct_tagval, 1422 * ct_tagtype, and ct_timeout have been carried over 1423 * unchanged from what our caller had set. 1424 * 1425 * The dataseg fields and the seg_count fields we just got 1426 * through setting. The data direction we've preserved all 1427 * along and only clear it if we're now sending status. 1428 */ 1429 1430 if (nth_ctio == nctios - 1) { 1431 /* 1432 * We're the last in a sequence of CTIOs, so mark 1433 * this CTIO and save the handle to the CCB such that 1434 * when this CTIO completes we can free dma resources 1435 * and do whatever else we need to do to finish the 1436 * rest of the command. We *don't* give this to the 1437 * firmware to work on- the caller will do that. 1438 */ 1439 1440 cto->ct_syshandle = handle; 1441 cto->ct_header.rqs_seqno = 1; 1442 1443 if (send_status) { 1444 cto->ct_scsi_status = scsi_status; 1445 cto->ct_flags |= sflags; 1446 cto->ct_resid = resid; 1447 } 1448 if (send_status) { 1449 isp_prt(isp, ISP_LOGTDEBUG1, 1450 "CTIO[%x] lun%d iid %d tag %x ct_flags %x " 1451 "scsi status %x resid %d", 1452 cto->ct_fwhandle, csio->ccb_h.target_lun, 1453 cto->ct_iid, cto->ct_tag_val, cto->ct_flags, 1454 cto->ct_scsi_status, cto->ct_resid); 1455 } else { 1456 isp_prt(isp, ISP_LOGTDEBUG1, 1457 "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x", 1458 cto->ct_fwhandle, csio->ccb_h.target_lun, 1459 cto->ct_iid, cto->ct_tag_val, 1460 cto->ct_flags); 1461 } 1462 isp_put_ctio(isp, cto, qe); 1463 ISP_TDQE(isp, "last tdma_mk", curi, cto); 1464 if (nctios > 1) { 1465 MEMORYBARRIER(isp, SYNC_REQUEST, 1466 curi, QENTRY_LEN); 1467 } 1468 } else { 1469 ct_entry_t *oqe = qe; 1470 1471 /* 1472 * Make sure syshandle fields are clean 1473 */ 1474 cto->ct_syshandle = 0; 1475 cto->ct_header.rqs_seqno = 0; 1476 1477 isp_prt(isp, ISP_LOGTDEBUG1, 1478 "CTIO[%x] lun%d for ID%d ct_flags 0x%x", 1479 cto->ct_fwhandle, csio->ccb_h.target_lun, 1480 cto->ct_iid, cto->ct_flags); 1481 1482 /* 1483 * Get a new CTIO 1484 */ 1485 qe = (ct_entry_t *) 1486 ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1487 nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp)); 1488 if (nxti == mp->optr) { 1489 isp_prt(isp, ISP_LOGTDEBUG0, 1490 "Queue Overflow in tdma_mk"); 1491 mp->error = MUSHERR_NOQENTRIES; 1492 return; 1493 } 1494 1495 /* 1496 * Now that we're done with the old CTIO, 1497 * flush it out to the request queue. 1498 */ 1499 ISP_TDQE(isp, "dma_tgt_fc", curi, cto); 1500 isp_put_ctio(isp, cto, oqe); 1501 if (nth_ctio != 0) { 1502 MEMORYBARRIER(isp, SYNC_REQUEST, curi, 1503 QENTRY_LEN); 1504 } 1505 curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp)); 1506 1507 /* 1508 * Reset some fields in the CTIO so we can reuse 1509 * for the next one we'll flush to the request 1510 * queue. 1511 */ 1512 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 1513 cto->ct_header.rqs_entry_count = 1; 1514 cto->ct_header.rqs_flags = 0; 1515 cto->ct_status = 0; 1516 cto->ct_scsi_status = 0; 1517 cto->ct_xfrlen = 0; 1518 cto->ct_resid = 0; 1519 cto->ct_seg_count = 0; 1520 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1521 } 1522 } 1523 *mp->nxtip = nxti; 1524} 1525 1526/* 1527 * We don't have to do multiple CTIOs here. Instead, we can just do 1528 * continuation segments as needed. This greatly simplifies the code 1529 * improves performance. 1530 */ 1531 1532static void 1533tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1534{ 1535 mush_t *mp; 1536 struct ccb_scsiio *csio; 1537 struct ispsoftc *isp; 1538 ct2_entry_t *cto, *qe; 1539 u_int16_t curi, nxti; 1540 int segcnt; 1541 1542 mp = (mush_t *) arg; 1543 if (error) { 1544 mp->error = error; 1545 return; 1546 } 1547 1548 isp = mp->isp; 1549 csio = mp->cmd_token; 1550 cto = mp->rq; 1551 1552 curi = isp->isp_reqidx; 1553 qe = (ct2_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 1554 1555 if (nseg == 0) { 1556 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) { 1557 isp_prt(isp, ISP_LOGWARN, 1558 "dma2_tgt_fc, a status CTIO2 without MODE1 " 1559 "set (0x%x)", cto->ct_flags); 1560 mp->error = EINVAL; 1561 return; 1562 } 1563 /* 1564 * We preserve ct_lun, ct_iid, ct_rxid. We set the data 1565 * flags to NO DATA and clear relative offset flags. 1566 * We preserve the ct_resid and the response area. 1567 */ 1568 cto->ct_header.rqs_seqno = 1; 1569 cto->ct_seg_count = 0; 1570 cto->ct_reloff = 0; 1571 isp_prt(isp, ISP_LOGTDEBUG1, 1572 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts " 1573 "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun, 1574 cto->ct_iid, cto->ct_flags, cto->ct_status, 1575 cto->rsp.m1.ct_scsi_status, cto->ct_resid); 1576 isp_put_ctio2(isp, cto, qe); 1577 ISP_TDQE(isp, "dma2_tgt_fc[no data]", curi, qe); 1578 return; 1579 } 1580 1581 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) { 1582 isp_prt(isp, ISP_LOGERR, 1583 "dma2_tgt_fc, a data CTIO2 without MODE0 set " 1584 "(0x%x)", cto->ct_flags); 1585 mp->error = EINVAL; 1586 return; 1587 } 1588 1589 1590 nxti = *mp->nxtip; 1591 1592 /* 1593 * Set up the CTIO2 data segments. 1594 */ 1595 for (segcnt = 0; cto->ct_seg_count < ISP_RQDSEG_T2 && segcnt < nseg; 1596 cto->ct_seg_count++, segcnt++) { 1597 cto->rsp.m0.ct_dataseg[cto->ct_seg_count].ds_base = 1598 dm_segs[segcnt].ds_addr; 1599 cto->rsp.m0.ct_dataseg[cto->ct_seg_count].ds_count = 1600 dm_segs[segcnt].ds_len; 1601 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 1602 isp_prt(isp, ISP_LOGTDEBUG1, 1603 "isp_send_ctio2: ent0[%d]0x%jx:%ju", 1604 cto->ct_seg_count, (uintmax_t)dm_segs[segcnt].ds_addr, 1605 (uintmax_t)dm_segs[segcnt].ds_len); 1606 } 1607 1608 while (segcnt < nseg) { 1609 u_int16_t curip; 1610 int seg; 1611 ispcontreq_t local, *crq = &local, *qep; 1612 1613 qep = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1614 curip = nxti; 1615 nxti = ISP_NXT_QENTRY(curip, RQUEST_QUEUE_LEN(isp)); 1616 if (nxti == mp->optr) { 1617 ISP_UNLOCK(isp); 1618 isp_prt(isp, ISP_LOGTDEBUG0, 1619 "tdma_mkfc: request queue overflow"); 1620 mp->error = MUSHERR_NOQENTRIES; 1621 return; 1622 } 1623 cto->ct_header.rqs_entry_count++; 1624 MEMZERO((void *)crq, sizeof (*crq)); 1625 crq->req_header.rqs_entry_count = 1; 1626 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 1627 for (seg = 0; segcnt < nseg && seg < ISP_CDSEG; 1628 segcnt++, seg++) { 1629 crq->req_dataseg[seg].ds_base = dm_segs[segcnt].ds_addr; 1630 crq->req_dataseg[seg].ds_count = dm_segs[segcnt].ds_len; 1631 isp_prt(isp, ISP_LOGTDEBUG1, 1632 "isp_send_ctio2: ent%d[%d]%jx:%ju", 1633 cto->ct_header.rqs_entry_count-1, seg, 1634 (uintmax_t)dm_segs[segcnt].ds_addr, 1635 (uintmax_t)dm_segs[segcnt].ds_len); 1636 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 1637 cto->ct_seg_count++; 1638 } 1639 MEMORYBARRIER(isp, SYNC_REQUEST, curip, QENTRY_LEN); 1640 isp_put_cont_req(isp, crq, qep); 1641 ISP_TDQE(isp, "cont entry", curi, qep); 1642 } 1643 1644 /* 1645 * No do final twiddling for the CTIO itself. 1646 */ 1647 cto->ct_header.rqs_seqno = 1; 1648 isp_prt(isp, ISP_LOGTDEBUG1, 1649 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts 0x%x resid %d", 1650 cto->ct_rxid, csio->ccb_h.target_lun, (int) cto->ct_iid, 1651 cto->ct_flags, cto->ct_status, cto->rsp.m1.ct_scsi_status, 1652 cto->ct_resid); 1653 isp_put_ctio2(isp, cto, qe); 1654 ISP_TDQE(isp, "last dma2_tgt_fc", curi, qe); 1655 *mp->nxtip = nxti; 1656} 1657#endif 1658 1659static void dma2(void *, bus_dma_segment_t *, int, int); 1660 1661#ifdef PAE 1662#define LOWD(x) ((uint32_t) x) 1663#define HIWD(x) ((uint32_t) (x >> 32)) 1664 1665static void 1666dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1667{ 1668 mush_t *mp; 1669 struct ispsoftc *isp; 1670 struct ccb_scsiio *csio; 1671 struct isp_pcisoftc *pcs; 1672 bus_dmamap_t *dp; 1673 bus_dma_segment_t *eseg; 1674 ispreq64_t *rq; 1675 int seglim, datalen; 1676 u_int16_t nxti; 1677 1678 mp = (mush_t *) arg; 1679 if (error) { 1680 mp->error = error; 1681 return; 1682 } 1683 1684 if (nseg < 1) { 1685 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 1686 mp->error = EFAULT; 1687 return; 1688 } 1689 csio = mp->cmd_token; 1690 isp = mp->isp; 1691 rq = mp->rq; 1692 pcs = (struct isp_pcisoftc *)mp->isp; 1693 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 1694 nxti = *mp->nxtip; 1695 1696 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1697 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 1698 } else { 1699 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 1700 } 1701 datalen = XS_XFRLEN(csio); 1702 1703 /* 1704 * We're passed an initial partially filled in entry that 1705 * has most fields filled in except for data transfer 1706 * related values. 1707 * 1708 * Our job is to fill in the initial request queue entry and 1709 * then to start allocating and filling in continuation entries 1710 * until we've covered the entire transfer. 1711 */ 1712 1713 if (IS_FC(isp)) { 1714 rq->req_header.rqs_entry_type = RQSTYPE_T3RQS; 1715 seglim = ISP_RQDSEG_T3; 1716 ((ispreqt3_t *)rq)->req_totalcnt = datalen; 1717 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1718 ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_IN; 1719 } else { 1720 ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_OUT; 1721 } 1722 } else { 1723 rq->req_header.rqs_entry_type = RQSTYPE_A64; 1724 if (csio->cdb_len > 12) { 1725 seglim = 0; 1726 } else { 1727 seglim = ISP_RQDSEG_A64; 1728 } 1729 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1730 rq->req_flags |= REQFLAG_DATA_IN; 1731 } else { 1732 rq->req_flags |= REQFLAG_DATA_OUT; 1733 } 1734 } 1735 1736 eseg = dm_segs + nseg; 1737 1738 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 1739 if (IS_FC(isp)) { 1740 ispreqt3_t *rq3 = (ispreqt3_t *)rq; 1741 rq3->req_dataseg[rq3->req_seg_count].ds_base = 1742 LOWD(dm_segs->ds_addr); 1743 rq3->req_dataseg[rq3->req_seg_count].ds_basehi = 1744 HIWD(dm_segs->ds_addr); 1745 rq3->req_dataseg[rq3->req_seg_count].ds_count = 1746 dm_segs->ds_len; 1747 } else { 1748 rq->req_dataseg[rq->req_seg_count].ds_base = 1749 LOWD(dm_segs->ds_addr); 1750 rq->req_dataseg[rq->req_seg_count].ds_basehi = 1751 HIWD(dm_segs->ds_addr); 1752 rq->req_dataseg[rq->req_seg_count].ds_count = 1753 dm_segs->ds_len; 1754 } 1755 datalen -= dm_segs->ds_len; 1756 rq->req_seg_count++; 1757 dm_segs++; 1758 } 1759 1760 while (datalen > 0 && dm_segs != eseg) { 1761 u_int16_t onxti; 1762 ispcontreq64_t local, *crq = &local, *cqe; 1763 1764 cqe = (ispcontreq64_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1765 onxti = nxti; 1766 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 1767 if (nxti == mp->optr) { 1768 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 1769 mp->error = MUSHERR_NOQENTRIES; 1770 return; 1771 } 1772 rq->req_header.rqs_entry_count++; 1773 MEMZERO((void *)crq, sizeof (*crq)); 1774 crq->req_header.rqs_entry_count = 1; 1775 crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT; 1776 1777 seglim = 0; 1778 while (datalen > 0 && seglim < ISP_CDSEG64 && dm_segs != eseg) { 1779 crq->req_dataseg[seglim].ds_base = 1780 LOWD(dm_segs->ds_addr); 1781 crq->req_dataseg[seglim].ds_basehi = 1782 HIWD(dm_segs->ds_addr); 1783 crq->req_dataseg[seglim].ds_count = 1784 dm_segs->ds_len; 1785 rq->req_seg_count++; 1786 dm_segs++; 1787 seglim++; 1788 datalen -= dm_segs->ds_len; 1789 } 1790 isp_put_cont64_req(isp, crq, cqe); 1791 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 1792 } 1793 *mp->nxtip = nxti; 1794} 1795#else 1796static void 1797dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1798{ 1799 mush_t *mp; 1800 struct ispsoftc *isp; 1801 struct ccb_scsiio *csio; 1802 struct isp_pcisoftc *pcs; 1803 bus_dmamap_t *dp; 1804 bus_dma_segment_t *eseg; 1805 ispreq_t *rq; 1806 int seglim, datalen; 1807 u_int16_t nxti; 1808 1809 mp = (mush_t *) arg; 1810 if (error) { 1811 mp->error = error; 1812 return; 1813 } 1814 1815 if (nseg < 1) { 1816 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 1817 mp->error = EFAULT; 1818 return; 1819 } 1820 csio = mp->cmd_token; 1821 isp = mp->isp; 1822 rq = mp->rq; 1823 pcs = (struct isp_pcisoftc *)mp->isp; 1824 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 1825 nxti = *mp->nxtip; 1826 1827 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1828 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 1829 } else { 1830 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 1831 } 1832 1833 datalen = XS_XFRLEN(csio); 1834 1835 /* 1836 * We're passed an initial partially filled in entry that 1837 * has most fields filled in except for data transfer 1838 * related values. 1839 * 1840 * Our job is to fill in the initial request queue entry and 1841 * then to start allocating and filling in continuation entries 1842 * until we've covered the entire transfer. 1843 */ 1844 1845 if (IS_FC(isp)) { 1846 seglim = ISP_RQDSEG_T2; 1847 ((ispreqt2_t *)rq)->req_totalcnt = datalen; 1848 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1849 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN; 1850 } else { 1851 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT; 1852 } 1853 } else { 1854 if (csio->cdb_len > 12) { 1855 seglim = 0; 1856 } else { 1857 seglim = ISP_RQDSEG; 1858 } 1859 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1860 rq->req_flags |= REQFLAG_DATA_IN; 1861 } else { 1862 rq->req_flags |= REQFLAG_DATA_OUT; 1863 } 1864 } 1865 1866 eseg = dm_segs + nseg; 1867 1868 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 1869 if (IS_FC(isp)) { 1870 ispreqt2_t *rq2 = (ispreqt2_t *)rq; 1871 rq2->req_dataseg[rq2->req_seg_count].ds_base = 1872 dm_segs->ds_addr; 1873 rq2->req_dataseg[rq2->req_seg_count].ds_count = 1874 dm_segs->ds_len; 1875 } else { 1876 rq->req_dataseg[rq->req_seg_count].ds_base = 1877 dm_segs->ds_addr; 1878 rq->req_dataseg[rq->req_seg_count].ds_count = 1879 dm_segs->ds_len; 1880 } 1881 datalen -= dm_segs->ds_len; 1882 rq->req_seg_count++; 1883 dm_segs++; 1884 } 1885 1886 while (datalen > 0 && dm_segs != eseg) { 1887 u_int16_t onxti; 1888 ispcontreq_t local, *crq = &local, *cqe; 1889 1890 cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1891 onxti = nxti; 1892 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 1893 if (nxti == mp->optr) { 1894 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 1895 mp->error = MUSHERR_NOQENTRIES; 1896 return; 1897 } 1898 rq->req_header.rqs_entry_count++; 1899 MEMZERO((void *)crq, sizeof (*crq)); 1900 crq->req_header.rqs_entry_count = 1; 1901 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 1902 1903 seglim = 0; 1904 while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) { 1905 crq->req_dataseg[seglim].ds_base = 1906 dm_segs->ds_addr; 1907 crq->req_dataseg[seglim].ds_count = 1908 dm_segs->ds_len; 1909 rq->req_seg_count++; 1910 dm_segs++; 1911 seglim++; 1912 datalen -= dm_segs->ds_len; 1913 } 1914 isp_put_cont_req(isp, crq, cqe); 1915 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 1916 } 1917 *mp->nxtip = nxti; 1918} 1919#endif 1920 1921static int 1922isp_pci_dmasetup(struct ispsoftc *isp, struct ccb_scsiio *csio, ispreq_t *rq, 1923 u_int16_t *nxtip, u_int16_t optr) 1924{ 1925 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1926 ispreq_t *qep; 1927 bus_dmamap_t *dp = NULL; 1928 mush_t mush, *mp; 1929 void (*eptr)(void *, bus_dma_segment_t *, int, int); 1930 1931 qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx); 1932#ifdef ISP_TARGET_MODE 1933 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) { 1934 if (IS_FC(isp)) { 1935 eptr = tdma_mkfc; 1936 } else { 1937 eptr = tdma_mk; 1938 } 1939 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 1940 (csio->dxfer_len == 0)) { 1941 mp = &mush; 1942 mp->isp = isp; 1943 mp->cmd_token = csio; 1944 mp->rq = rq; /* really a ct_entry_t or ct2_entry_t */ 1945 mp->nxtip = nxtip; 1946 mp->optr = optr; 1947 mp->error = 0; 1948 (*eptr)(mp, NULL, 0, 0); 1949 goto mbxsync; 1950 } 1951 } else 1952#endif 1953 eptr = dma2; 1954 1955 1956 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 1957 (csio->dxfer_len == 0)) { 1958 rq->req_seg_count = 1; 1959 goto mbxsync; 1960 } 1961 1962 /* 1963 * Do a virtual grapevine step to collect info for 1964 * the callback dma allocation that we have to use... 1965 */ 1966 mp = &mush; 1967 mp->isp = isp; 1968 mp->cmd_token = csio; 1969 mp->rq = rq; 1970 mp->nxtip = nxtip; 1971 mp->optr = optr; 1972 mp->error = 0; 1973 1974 if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 1975 if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) { 1976 int error, s; 1977 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 1978 s = splsoftvm(); 1979 error = bus_dmamap_load(pcs->dmat, *dp, 1980 csio->data_ptr, csio->dxfer_len, eptr, mp, 0); 1981 if (error == EINPROGRESS) { 1982 bus_dmamap_unload(pcs->dmat, *dp); 1983 mp->error = EINVAL; 1984 isp_prt(isp, ISP_LOGERR, 1985 "deferred dma allocation not supported"); 1986 } else if (error && mp->error == 0) { 1987#ifdef DIAGNOSTIC 1988 isp_prt(isp, ISP_LOGERR, 1989 "error %d in dma mapping code", error); 1990#endif 1991 mp->error = error; 1992 } 1993 splx(s); 1994 } else { 1995 /* Pointer to physical buffer */ 1996 struct bus_dma_segment seg; 1997 seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr; 1998 seg.ds_len = csio->dxfer_len; 1999 (*eptr)(mp, &seg, 1, 0); 2000 } 2001 } else { 2002 struct bus_dma_segment *segs; 2003 2004 if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) { 2005 isp_prt(isp, ISP_LOGERR, 2006 "Physical segment pointers unsupported"); 2007 mp->error = EINVAL; 2008 } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 2009 isp_prt(isp, ISP_LOGERR, 2010 "Virtual segment addresses unsupported"); 2011 mp->error = EINVAL; 2012 } else { 2013 /* Just use the segments provided */ 2014 segs = (struct bus_dma_segment *) csio->data_ptr; 2015 (*eptr)(mp, segs, csio->sglist_cnt, 0); 2016 } 2017 } 2018 if (mp->error) { 2019 int retval = CMD_COMPLETE; 2020 if (mp->error == MUSHERR_NOQENTRIES) { 2021 retval = CMD_EAGAIN; 2022 } else if (mp->error == EFBIG) { 2023 XS_SETERR(csio, CAM_REQ_TOO_BIG); 2024 } else if (mp->error == EINVAL) { 2025 XS_SETERR(csio, CAM_REQ_INVALID); 2026 } else { 2027 XS_SETERR(csio, CAM_UNREC_HBA_ERROR); 2028 } 2029 return (retval); 2030 } 2031mbxsync: 2032 switch (rq->req_header.rqs_entry_type) { 2033 case RQSTYPE_REQUEST: 2034 isp_put_request(isp, rq, qep); 2035 break; 2036 case RQSTYPE_CMDONLY: 2037 isp_put_extended_request(isp, (ispextreq_t *)rq, 2038 (ispextreq_t *)qep); 2039 break; 2040 case RQSTYPE_T2RQS: 2041 isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) qep); 2042 break; 2043 case RQSTYPE_A64: 2044 case RQSTYPE_T3RQS: 2045 isp_put_request_t3(isp, (ispreqt3_t *) rq, (ispreqt3_t *) qep); 2046 break; 2047 } 2048 return (CMD_QUEUED); 2049} 2050 2051static void 2052isp_pci_dmateardown(struct ispsoftc *isp, XS_T *xs, u_int16_t handle) 2053{ 2054 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 2055 bus_dmamap_t *dp = &pcs->dmaps[isp_handle_index(handle)]; 2056 if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2057 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTREAD); 2058 } else { 2059 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTWRITE); 2060 } 2061 bus_dmamap_unload(pcs->dmat, *dp); 2062} 2063 2064 2065static void 2066isp_pci_reset1(struct ispsoftc *isp) 2067{ 2068 /* Make sure the BIOS is disabled */ 2069 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS); 2070 /* and enable interrupts */ 2071 ENABLE_INTS(isp); 2072} 2073 2074static void 2075isp_pci_dumpregs(struct ispsoftc *isp, const char *msg) 2076{ 2077 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 2078 if (msg) 2079 printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg); 2080 else 2081 printf("%s:\n", device_get_nameunit(isp->isp_dev)); 2082 if (IS_SCSI(isp)) 2083 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1)); 2084 else 2085 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR)); 2086 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR), 2087 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA)); 2088 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR)); 2089 2090 2091 if (IS_SCSI(isp)) { 2092 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); 2093 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n", 2094 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS), 2095 ISP_READ(isp, CDMA_FIFO_STS)); 2096 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n", 2097 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS), 2098 ISP_READ(isp, DDMA_FIFO_STS)); 2099 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n", 2100 ISP_READ(isp, SXP_INTERRUPT), 2101 ISP_READ(isp, SXP_GROSS_ERR), 2102 ISP_READ(isp, SXP_PINS_CTRL)); 2103 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); 2104 } 2105 printf(" mbox regs: %x %x %x %x %x\n", 2106 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1), 2107 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3), 2108 ISP_READ(isp, OUTMAILBOX4)); 2109 printf(" PCI Status Command/Status=%x\n", 2110 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1)); 2111}
|