Lines Matching refs:acb

109 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
111 static int arcmsr_iop_confirm(struct AdapterControlBlock *acb);
123 static void arcmsr_iop_init(struct AdapterControlBlock *acb);
124 static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb);
125 static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb);
126 static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
128 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
129 static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb);
130 static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb);
133 static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb);
134 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb);
136 static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb);
137 static void arcmsr_hbaE_message_isr(struct AdapterControlBlock *acb);
138 static void arcmsr_hbaE_postqueue_isr(struct AdapterControlBlock *acb);
139 static void arcmsr_hbaF_postqueue_isr(struct AdapterControlBlock *acb);
140 static void arcmsr_hardware_reset(struct AdapterControlBlock *acb);
142 static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
144 static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb);
244 static void arcmsr_free_io_queue(struct AdapterControlBlock *acb)
246 switch (acb->adapter_type) {
251 dma_free_coherent(&acb->pdev->dev, acb->ioqueue_size,
252 acb->dma_coherent2, acb->dma_coherent_handle2);
257 static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb)
259 struct pci_dev *pdev = acb->pdev;
260 switch (acb->adapter_type){
262 acb->pmuA = ioremap(pci_resource_start(pdev,0), pci_resource_len(pdev,0));
263 if (!acb->pmuA) {
264 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
273 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
279 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
282 acb->mem_base0 = mem_base0;
283 acb->mem_base1 = mem_base1;
287 acb->pmuC = ioremap(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
288 if (!acb->pmuC) {
289 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
292 if (readl(&acb->pmuC->outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
293 writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &acb->pmuC->outbound_doorbell_clear);/*clear interrupt*/
307 acb->host->host_no);
310 acb->mem_base0 = mem_base0;
314 acb->pmuE = ioremap(pci_resource_start(pdev, 1),
316 if (!acb->pmuE) {
318 acb->host->host_no);
321 writel(0, &acb->pmuE->host_int_status); /*clear interrupt*/
322 writel(ARCMSR_HBEMU_DOORBELL_SYNC, &acb->pmuE->iobound_doorbell); /* synchronize doorbell to 0 */
323 acb->in_doorbell = 0;
324 acb->out_doorbell = 0;
328 acb->pmuF = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
329 if (!acb->pmuF) {
331 acb->host->host_no);
334 writel(0, &acb->pmuF->host_int_status); /* clear interrupt */
335 writel(ARCMSR_HBFMU_DOORBELL_SYNC, &acb->pmuF->iobound_doorbell);
336 acb->in_doorbell = 0;
337 acb->out_doorbell = 0;
344 static void arcmsr_unmap_pciregion(struct AdapterControlBlock *acb)
346 switch (acb->adapter_type) {
348 iounmap(acb->pmuA);
351 iounmap(acb->mem_base0);
352 iounmap(acb->mem_base1);
355 iounmap(acb->pmuC);
358 iounmap(acb->mem_base0);
361 iounmap(acb->pmuE);
364 iounmap(acb->pmuF);
372 struct AdapterControlBlock *acb = dev_id;
374 handle_state = arcmsr_interrupt(acb);
401 static uint8_t arcmsr_hbaA_wait_msgint_ready(struct AdapterControlBlock *acb)
403 struct MessageUnit_A __iomem *reg = acb->pmuA;
419 static uint8_t arcmsr_hbaB_wait_msgint_ready(struct AdapterControlBlock *acb)
421 struct MessageUnit_B *reg = acb->pmuB;
492 static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb)
494 struct MessageUnit_A __iomem *reg = acb->pmuA;
498 if (arcmsr_hbaA_wait_msgint_ready(acb))
503 timeout, retry count down = %d \n", acb->host->host_no, retry_count);
508 static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb)
510 struct MessageUnit_B *reg = acb->pmuB;
514 if (arcmsr_hbaB_wait_msgint_ready(acb))
519 timeout,retry count down = %d \n", acb->host->host_no, retry_count);
577 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
579 switch (acb->adapter_type) {
582 arcmsr_hbaA_flush_cache(acb);
585 arcmsr_hbaB_flush_cache(acb);
588 arcmsr_hbaC_flush_cache(acb);
591 arcmsr_hbaD_flush_cache(acb);
595 arcmsr_hbaE_flush_cache(acb);
600 static void arcmsr_hbaB_assign_regAddr(struct AdapterControlBlock *acb)
602 struct MessageUnit_B *reg = acb->pmuB;
604 if (acb->pdev->device == PCI_DEVICE_ID_ARECA_1203) {
620 static void arcmsr_hbaD_assign_regAddr(struct AdapterControlBlock *acb)
622 struct MessageUnit_D *reg = acb->pmuD;
652 static void arcmsr_hbaF_assign_regAddr(struct AdapterControlBlock *acb)
657 memset(acb->dma_coherent2, 0xff, acb->completeQ_size);
658 acb->message_wbuffer = (uint32_t *)round_up((unsigned long)acb->dma_coherent2 +
659 acb->completeQ_size, 4);
660 acb->message_rbuffer = ((void *)acb->message_wbuffer) + 0x100;
661 acb->msgcode_rwbuffer = ((void *)acb->message_wbuffer) + 0x200;
662 memset((void *)acb->message_wbuffer, 0, MESG_RW_BUFFER_SIZE);
663 host_buffer_dma = round_up(acb->dma_coherent_handle2 + acb->completeQ_size, 4);
664 pmuF = acb->pmuF;
673 static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb)
678 struct pci_dev *pdev = acb->pdev;
680 switch (acb->adapter_type) {
682 acb->ioqueue_size = roundup(sizeof(struct MessageUnit_B), 32);
683 dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size,
686 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
689 acb->dma_coherent_handle2 = dma_coherent_handle;
690 acb->dma_coherent2 = dma_coherent;
691 acb->pmuB = (struct MessageUnit_B *)dma_coherent;
692 arcmsr_hbaB_assign_regAddr(acb);
696 acb->ioqueue_size = roundup(sizeof(struct MessageUnit_D), 32);
697 dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size,
700 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
703 acb->dma_coherent_handle2 = dma_coherent_handle;
704 acb->dma_coherent2 = dma_coherent;
705 acb->pmuD = (struct MessageUnit_D *)dma_coherent;
706 arcmsr_hbaD_assign_regAddr(acb);
712 acb->ioqueue_size = roundup(completeQ_size, 32);
713 dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size,
716 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
719 acb->dma_coherent_handle2 = dma_coherent_handle;
720 acb->dma_coherent2 = dma_coherent;
721 acb->pCompletionQ = dma_coherent;
722 acb->completionQ_entry = acb->ioqueue_size / sizeof(struct deliver_completeQ);
723 acb->doneq_index = 0;
730 arcmsr_wait_firmware_ready(acb);
731 QueueDepth = depthTbl[readl(&acb->pmuF->outbound_msgaddr1) & 7];
732 acb->completeQ_size = sizeof(struct deliver_completeQ) * QueueDepth + 128;
733 acb->ioqueue_size = roundup(acb->completeQ_size + MESG_RW_BUFFER_SIZE, 32);
734 dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size,
737 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
740 acb->dma_coherent_handle2 = dma_coherent_handle;
741 acb->dma_coherent2 = dma_coherent;
742 acb->pCompletionQ = dma_coherent;
743 acb->completionQ_entry = acb->completeQ_size / sizeof(struct deliver_completeQ);
744 acb->doneq_index = 0;
745 arcmsr_hbaF_assign_regAddr(acb);
754 static int arcmsr_alloc_xor_buffer(struct AdapterControlBlock *acb)
757 struct pci_dev *pdev = acb->pdev;
766 xor_ram = (acb->firm_PicStatus >> 24) & 0x0f;
767 acb->xor_mega = (xor_ram - 1) * 32 + 128 + 3;
768 acb->init2cfg_size = sizeof(struct HostRamBuf) +
769 (sizeof(struct XorHandle) * acb->xor_mega);
770 dma_coherent = dma_alloc_coherent(&pdev->dev, acb->init2cfg_size,
772 acb->xorVirt = dma_coherent;
773 acb->xorPhys = dma_coherent_handle;
776 acb->xorVirtOffset = sizeof(struct HostRamBuf) +
777 (sizeof(struct Xor_sg) * acb->xor_mega);
779 (unsigned long)acb->xorVirtOffset);
780 for (i = 0; i < acb->xor_mega; i++) {
792 acb->host->host_no, i);
797 pRamBuf = (struct HostRamBuf *)acb->xorVirt;
805 static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
807 struct pci_dev *pdev = acb->pdev;
820 acb->devstate[i][j] = ARECA_RAID_GONE;
824 firm_config_version = acb->firm_cfg_version;
829 acb->host->max_sectors = max_xfer_len/512;
830 acb->host->sg_tablesize = max_sg_entrys;
832 acb->uncache_size = roundup_ccbsize * acb->maxFreeCCB;
833 if (acb->adapter_type != ACB_ADAPTER_TYPE_F)
834 acb->uncache_size += acb->ioqueue_size;
835 dma_coherent = dma_alloc_coherent(&pdev->dev, acb->uncache_size, &dma_coherent_handle, GFP_KERNEL);
837 printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error\n", acb->host->host_no);
840 acb->dma_coherent = dma_coherent;
841 acb->dma_coherent_handle = dma_coherent_handle;
842 memset(dma_coherent, 0, acb->uncache_size);
843 acb->ccbsize = roundup_ccbsize;
846 acb->vir2phy_offset = (unsigned long)dma_coherent - (unsigned long)dma_coherent_handle;
847 for(i = 0; i < acb->maxFreeCCB; i++){
849 switch (acb->adapter_type) {
861 acb->pccb_pool[i] = ccb_tmp;
862 ccb_tmp->acb = acb;
867 acb->maxFreeCCB = i;
868 acb->host->can_queue = i;
872 list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
876 if (acb->adapter_type != ACB_ADAPTER_TYPE_F) {
877 acb->dma_coherent_handle2 = dma_coherent_handle;
878 acb->dma_coherent2 = ccb_tmp;
880 switch (acb->adapter_type) {
882 acb->pmuB = (struct MessageUnit_B *)acb->dma_coherent2;
883 arcmsr_hbaB_assign_regAddr(acb);
886 acb->pmuD = (struct MessageUnit_D *)acb->dma_coherent2;
887 arcmsr_hbaD_assign_regAddr(acb);
890 acb->pCompletionQ = acb->dma_coherent2;
891 acb->completionQ_entry = acb->ioqueue_size / sizeof(struct deliver_completeQ);
892 acb->doneq_index = 0;
895 if ((acb->firm_PicStatus >> 24) & 0x0f) {
896 if (arcmsr_alloc_xor_buffer(acb))
904 struct AdapterControlBlock *acb = container_of(work,
906 char *acb_dev_map = (char *)acb->device_map;
913 switch (acb->adapter_type) {
915 struct MessageUnit_A __iomem *reg = acb->pmuA;
922 struct MessageUnit_B *reg = acb->pmuB;
929 struct MessageUnit_C __iomem *reg = acb->pmuC;
936 struct MessageUnit_D *reg = acb->pmuD;
943 struct MessageUnit_E __iomem *reg = acb->pmuE;
950 signature = (uint32_t __iomem *)(&acb->msgcode_rwbuffer[0]);
951 devicemap = (char __iomem *)(&acb->msgcode_rwbuffer[21]);
967 scsi_add_device(acb->host,
971 psdev = scsi_device_lookup(acb->host,
985 acb->acb_flags &= ~ACB_F_MSG_GET_CONFIG;
989 arcmsr_request_irq(struct pci_dev *pdev, struct AdapterControlBlock *acb)
999 pr_info("arcmsr%d: msi-x enabled\n", acb->host->host_no);
1017 acb->vector_count = nvec;
1020 flags, "arcmsr", acb)) {
1022 acb->host->host_no, pci_irq_vector(pdev, i));
1030 free_irq(pci_irq_vector(pdev, i), acb);
1051 static int arcmsr_set_dma_mask(struct AdapterControlBlock *acb)
1053 struct pci_dev *pcidev = acb->pdev;
1056 if (((acb->adapter_type == ACB_ADAPTER_TYPE_A) && !dma_mask_64) ||
1059 if (acb->adapter_type <= ACB_ADAPTER_TYPE_B)
1081 struct AdapterControlBlock *acb;
1095 acb = (struct AdapterControlBlock *) host->hostdata;
1096 memset(acb,0,sizeof(struct AdapterControlBlock));
1097 acb->pdev = pdev;
1098 acb->adapter_type = id->driver_data;
1099 if (arcmsr_set_dma_mask(acb))
1101 acb->host = host;
1119 spin_lock_init(&acb->eh_lock);
1120 spin_lock_init(&acb->ccblist_lock);
1121 spin_lock_init(&acb->postq_lock);
1122 spin_lock_init(&acb->doneq_lock);
1123 spin_lock_init(&acb->rqbuffer_lock);
1124 spin_lock_init(&acb->wqbuffer_lock);
1125 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
1128 acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
1129 INIT_LIST_HEAD(&acb->ccb_free_list);
1130 error = arcmsr_remap_pciregion(acb);
1134 error = arcmsr_alloc_io_queue(acb);
1137 error = arcmsr_get_firmware_spec(acb);
1141 if (acb->adapter_type != ACB_ADAPTER_TYPE_F)
1142 arcmsr_free_io_queue(acb);
1143 error = arcmsr_alloc_ccb_pool(acb);
1151 if (arcmsr_request_irq(pdev, acb) == FAILED)
1153 arcmsr_iop_init(acb);
1154 arcmsr_init_get_devmap_timer(acb);
1156 arcmsr_init_set_datetime_timer(acb);
1157 if(arcmsr_alloc_sysfs_attr(acb))
1163 del_timer_sync(&acb->refresh_timer);
1164 del_timer_sync(&acb->eternal_timer);
1165 flush_work(&acb->arcmsr_do_message_isr_bh);
1166 arcmsr_stop_adapter_bgrb(acb);
1167 arcmsr_flush_adapter_cache(acb);
1168 arcmsr_free_irq(pdev, acb);
1172 arcmsr_free_ccb_pool(acb);
1175 arcmsr_free_io_queue(acb);
1177 arcmsr_unmap_pciregion(acb);
1188 struct AdapterControlBlock *acb)
1192 for (i = 0; i < acb->vector_count; i++)
1193 free_irq(pci_irq_vector(pdev, i), acb);
1201 struct AdapterControlBlock *acb =
1204 arcmsr_disable_outbound_ints(acb);
1205 arcmsr_free_irq(pdev, acb);
1206 del_timer_sync(&acb->eternal_timer);
1208 del_timer_sync(&acb->refresh_timer);
1209 flush_work(&acb->arcmsr_do_message_isr_bh);
1210 arcmsr_stop_adapter_bgrb(acb);
1211 arcmsr_flush_adapter_cache(acb);
1219 struct AdapterControlBlock *acb =
1222 if (arcmsr_set_dma_mask(acb))
1224 if (arcmsr_request_irq(pdev, acb) == FAILED)
1226 switch (acb->adapter_type) {
1228 struct MessageUnit_B *reg = acb->pmuB;
1239 writel(0, &acb->pmuE->host_int_status);
1240 writel(ARCMSR_HBEMU_DOORBELL_SYNC, &acb->pmuE->iobound_doorbell);
1241 acb->in_doorbell = 0;
1242 acb->out_doorbell = 0;
1243 acb->doneq_index = 0;
1246 writel(0, &acb->pmuF->host_int_status);
1247 writel(ARCMSR_HBFMU_DOORBELL_SYNC, &acb->pmuF->iobound_doorbell);
1248 acb->in_doorbell = 0;
1249 acb->out_doorbell = 0;
1250 acb->doneq_index = 0;
1251 arcmsr_hbaF_assign_regAddr(acb);
1254 arcmsr_iop_init(acb);
1255 arcmsr_init_get_devmap_timer(acb);
1257 arcmsr_init_set_datetime_timer(acb);
1260 arcmsr_stop_adapter_bgrb(acb);
1261 arcmsr_flush_adapter_cache(acb);
1264 arcmsr_free_ccb_pool(acb);
1265 if (acb->adapter_type == ACB_ADAPTER_TYPE_F)
1266 arcmsr_free_io_queue(acb);
1267 arcmsr_unmap_pciregion(acb);
1272 static uint8_t arcmsr_hbaA_abort_allcmd(struct AdapterControlBlock *acb)
1274 struct MessageUnit_A __iomem *reg = acb->pmuA;
1276 if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
1279 , acb->host->host_no);
1285 static uint8_t arcmsr_hbaB_abort_allcmd(struct AdapterControlBlock *acb)
1287 struct MessageUnit_B *reg = acb->pmuB;
1290 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
1293 , acb->host->host_no);
1340 static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
1343 switch (acb->adapter_type) {
1345 rtnval = arcmsr_hbaA_abort_allcmd(acb);
1348 rtnval = arcmsr_hbaB_abort_allcmd(acb);
1351 rtnval = arcmsr_hbaC_abort_allcmd(acb);
1354 rtnval = arcmsr_hbaD_abort_allcmd(acb);
1358 rtnval = arcmsr_hbaE_abort_allcmd(acb);
1366 struct AdapterControlBlock *acb = ccb->acb;
1369 atomic_dec(&acb->ccboutstandingcount);
1372 spin_lock_irqsave(&acb->ccblist_lock, flags);
1373 list_add_tail(&ccb->list, &acb->ccb_free_list);
1374 spin_unlock_irqrestore(&acb->ccblist_lock, flags);
1398 static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
1401 switch (acb->adapter_type) {
1403 struct MessageUnit_A __iomem *reg = acb->pmuA;
1410 struct MessageUnit_B *reg = acb->pmuB;
1416 struct MessageUnit_C __iomem *reg = acb->pmuC;
1423 struct MessageUnit_D *reg = acb->pmuD;
1430 struct MessageUnit_E __iomem *reg = acb->pmuE;
1440 static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb,
1447 if (acb->devstate[id][lun] == ARECA_RAID_GONE)
1448 acb->devstate[id][lun] = ARECA_RAID_GOOD;
1454 acb->devstate[id][lun] = ARECA_RAID_GONE;
1463 acb->devstate[id][lun] = ARECA_RAID_GONE;
1470 acb->devstate[id][lun] = ARECA_RAID_GOOD;
1480 , acb->host->host_no
1484 acb->devstate[id][lun] = ARECA_RAID_GONE;
1492 static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct CommandControlBlock *pCCB, bool error)
1494 if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
1501 acb->host->host_no, pCCB);
1506 done acb = '0x%p'"
1509 , acb->host->host_no
1510 , acb
1512 , pCCB->acb
1514 , atomic_read(&acb->ccboutstandingcount));
1517 arcmsr_report_ccb_state(acb, pCCB, error);
1520 static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
1529 switch (acb->adapter_type) {
1532 struct MessageUnit_A __iomem *reg = acb->pmuA;
1535 acb->outbound_int_enable;
1539 && (i++ < acb->maxOutstanding)) {
1541 if (acb->cdb_phyadd_hipart)
1542 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
1543 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
1546 arcmsr_drain_donequeue(acb, pCCB, error);
1552 struct MessageUnit_B *reg = acb->pmuB;
1560 if (acb->cdb_phyadd_hipart)
1561 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
1562 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
1565 arcmsr_drain_donequeue(acb, pCCB, error);
1574 struct MessageUnit_C __iomem *reg = acb->pmuC;
1575 while ((readl(&reg->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < acb->maxOutstanding)) {
1579 if (acb->cdb_phyadd_hipart)
1580 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
1581 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
1584 arcmsr_drain_donequeue(acb, pCCB, error);
1589 struct MessageUnit_D *pmu = acb->pmuD;
1594 residual = atomic_read(&acb->ccboutstandingcount);
1596 spin_lock_irqsave(&acb->doneq_lock, flags);
1608 spin_unlock_irqrestore(&acb->doneq_lock, flags);
1612 if (acb->cdb_phyadd_hipart)
1613 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
1615 (acb->vir2phy_offset + ccb_cdb_phy);
1621 arcmsr_drain_donequeue(acb, pCCB, error);
1625 spin_unlock_irqrestore(&acb->doneq_lock, flags);
1634 arcmsr_hbaE_postqueue_isr(acb);
1637 arcmsr_hbaF_postqueue_isr(acb);
1642 static void arcmsr_remove_scsi_devices(struct AdapterControlBlock *acb)
1644 char *acb_dev_map = (char *)acb->device_map;
1650 for (i = 0; i < acb->maxFreeCCB; i++) {
1651 ccb = acb->pccb_pool[i];
1663 psdev = scsi_device_lookup(acb->host,
1678 static void arcmsr_free_pcidev(struct AdapterControlBlock *acb)
1683 host = acb->host;
1684 arcmsr_free_sysfs_attr(acb);
1686 flush_work(&acb->arcmsr_do_message_isr_bh);
1687 del_timer_sync(&acb->eternal_timer);
1689 del_timer_sync(&acb->refresh_timer);
1690 pdev = acb->pdev;
1691 arcmsr_free_irq(pdev, acb);
1692 arcmsr_free_ccb_pool(acb);
1693 if (acb->adapter_type == ACB_ADAPTER_TYPE_F)
1694 arcmsr_free_io_queue(acb);
1695 arcmsr_unmap_pciregion(acb);
1704 struct AdapterControlBlock *acb =
1711 acb->acb_flags &= ~ACB_F_IOP_INITED;
1712 acb->acb_flags |= ACB_F_ADAPTER_REMOVED;
1713 arcmsr_remove_scsi_devices(acb);
1714 arcmsr_free_pcidev(acb);
1717 arcmsr_free_sysfs_attr(acb);
1719 flush_work(&acb->arcmsr_do_message_isr_bh);
1720 del_timer_sync(&acb->eternal_timer);
1722 del_timer_sync(&acb->refresh_timer);
1723 arcmsr_disable_outbound_ints(acb);
1724 arcmsr_stop_adapter_bgrb(acb);
1725 arcmsr_flush_adapter_cache(acb);
1726 acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
1727 acb->acb_flags &= ~ACB_F_IOP_INITED;
1729 for (poll_count = 0; poll_count < acb->maxOutstanding; poll_count++){
1730 if (!atomic_read(&acb->ccboutstandingcount))
1732 arcmsr_interrupt(acb);/* FIXME: need spinlock */
1736 if (atomic_read(&acb->ccboutstandingcount)) {
1739 arcmsr_abort_allcmd(acb);
1740 arcmsr_done4abort_postqueue(acb);
1741 for (i = 0; i < acb->maxFreeCCB; i++) {
1742 struct CommandControlBlock *ccb = acb->pccb_pool[i];
1750 arcmsr_free_irq(pdev, acb);
1751 arcmsr_free_ccb_pool(acb);
1752 if (acb->adapter_type == ACB_ADAPTER_TYPE_F)
1753 arcmsr_free_io_queue(acb);
1754 arcmsr_unmap_pciregion(acb);
1763 struct AdapterControlBlock *acb =
1765 if (acb->acb_flags & ACB_F_ADAPTER_REMOVED)
1767 del_timer_sync(&acb->eternal_timer);
1769 del_timer_sync(&acb->refresh_timer);
1770 arcmsr_disable_outbound_ints(acb);
1771 arcmsr_free_irq(pdev, acb);
1772 flush_work(&acb->arcmsr_do_message_isr_bh);
1773 arcmsr_stop_adapter_bgrb(acb);
1774 arcmsr_flush_adapter_cache(acb);
1791 static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
1795 switch (acb->adapter_type) {
1798 struct MessageUnit_A __iomem *reg = acb->pmuA;
1803 acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
1808 struct MessageUnit_B *reg = acb->pmuB;
1814 acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
1818 struct MessageUnit_C __iomem *reg = acb->pmuC;
1821 acb->outbound_int_enable = ~(intmask_org & mask) & 0x0000000f;
1825 struct MessageUnit_D *reg = acb->pmuD;
1833 struct MessageUnit_E __iomem *reg = acb->pmuE;
1842 static int arcmsr_build_ccb(struct AdapterControlBlock *acb,
1862 if (unlikely(nseg > acb->host->sg_tablesize || nseg < 0))
1897 static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb)
1901 atomic_inc(&acb->ccboutstandingcount);
1903 switch (acb->adapter_type) {
1905 struct MessageUnit_A __iomem *reg = acb->pmuA;
1916 struct MessageUnit_B *reg = acb->pmuB;
1934 struct MessageUnit_C __iomem *phbcmu = acb->pmuC;
1944 struct MessageUnit_D *pmu = acb->pmuD;
1950 spin_lock_irqsave(&acb->postq_lock, flags);
1963 spin_unlock_irqrestore(&acb->postq_lock, flags);
1967 struct MessageUnit_E __iomem *pmu = acb->pmuE;
1977 struct MessageUnit_F __iomem *pmu = acb->pmuF;
1996 static void arcmsr_hbaA_stop_bgrb(struct AdapterControlBlock *acb)
1998 struct MessageUnit_A __iomem *reg = acb->pmuA;
1999 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
2001 if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
2004 , acb->host->host_no);
2008 static void arcmsr_hbaB_stop_bgrb(struct AdapterControlBlock *acb)
2010 struct MessageUnit_B *reg = acb->pmuB;
2011 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
2014 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
2017 , acb->host->host_no);
2060 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
2062 switch (acb->adapter_type) {
2064 arcmsr_hbaA_stop_bgrb(acb);
2067 arcmsr_hbaB_stop_bgrb(acb);
2070 arcmsr_hbaC_stop_bgrb(acb);
2073 arcmsr_hbaD_stop_bgrb(acb);
2077 arcmsr_hbaE_stop_bgrb(acb);
2082 static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb)
2084 if (acb->xor_mega) {
2089 pXorPhys = (struct Xor_sg *)(acb->xorVirt +
2091 pXorVirt = (void **)((unsigned long)acb->xorVirt +
2092 (unsigned long)acb->xorVirtOffset);
2093 for (i = 0; i < acb->xor_mega; i++) {
2095 dma_free_coherent(&acb->pdev->dev,
2104 dma_free_coherent(&acb->pdev->dev, acb->init2cfg_size,
2105 acb->xorVirt, acb->xorPhys);
2107 dma_free_coherent(&acb->pdev->dev, acb->uncache_size, acb->dma_coherent, acb->dma_coherent_handle);
2110 static void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
2112 switch (acb->adapter_type) {
2114 struct MessageUnit_A __iomem *reg = acb->pmuA;
2119 struct MessageUnit_B *reg = acb->pmuB;
2124 struct MessageUnit_C __iomem *reg = acb->pmuC;
2130 struct MessageUnit_D *reg = acb->pmuD;
2137 struct MessageUnit_E __iomem *reg = acb->pmuE;
2138 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK;
2139 writel(acb->out_doorbell, &reg->iobound_doorbell);
2145 static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
2147 switch (acb->adapter_type) {
2149 struct MessageUnit_A __iomem *reg = acb->pmuA;
2159 struct MessageUnit_B *reg = acb->pmuB;
2168 struct MessageUnit_C __iomem *reg = acb->pmuC;
2177 struct MessageUnit_D *reg = acb->pmuD;
2184 struct MessageUnit_E __iomem *reg = acb->pmuE;
2185 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_WRITE_OK;
2186 writel(acb->out_doorbell, &reg->iobound_doorbell);
2192 struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb)
2195 switch (acb->adapter_type) {
2198 struct MessageUnit_A __iomem *reg = acb->pmuA;
2203 struct MessageUnit_B *reg = acb->pmuB;
2208 struct MessageUnit_C __iomem *phbcmu = acb->pmuC;
2213 struct MessageUnit_D *reg = acb->pmuD;
2218 struct MessageUnit_E __iomem *reg = acb->pmuE;
2223 qbuffer = (struct QBUFFER __iomem *)acb->message_rbuffer;
2230 static struct QBUFFER __iomem *arcmsr_get_iop_wqbuffer(struct AdapterControlBlock *acb)
2233 switch (acb->adapter_type) {
2236 struct MessageUnit_A __iomem *reg = acb->pmuA;
2241 struct MessageUnit_B *reg = acb->pmuB;
2246 struct MessageUnit_C __iomem *reg = acb->pmuC;
2251 struct MessageUnit_D *reg = acb->pmuD;
2256 struct MessageUnit_E __iomem *reg = acb->pmuE;
2261 pqbuffer = (struct QBUFFER __iomem *)acb->message_wbuffer;
2268 arcmsr_Read_iop_rqbuffer_in_DWORD(struct AdapterControlBlock *acb,
2294 pQbuffer = &acb->rqbuffer[acb->rqbuf_putIndex];
2296 acb->rqbuf_putIndex++;
2298 acb->rqbuf_putIndex %= ARCMSR_MAX_QBUFFER;
2304 arcmsr_iop_message_read(acb);
2309 arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *acb,
2316 if (acb->adapter_type > ACB_ADAPTER_TYPE_B)
2317 return arcmsr_Read_iop_rqbuffer_in_DWORD(acb, prbuffer);
2321 pQbuffer = &acb->rqbuffer[acb->rqbuf_putIndex];
2323 acb->rqbuf_putIndex++;
2324 acb->rqbuf_putIndex %= ARCMSR_MAX_QBUFFER;
2328 arcmsr_iop_message_read(acb);
2332 static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb)
2338 spin_lock_irqsave(&acb->rqbuffer_lock, flags);
2339 prbuffer = arcmsr_get_iop_rqbuffer(acb);
2340 if (acb->rqbuf_putIndex >= acb->rqbuf_getIndex) {
2342 (acb->rqbuf_putIndex - acb->rqbuf_getIndex);
2344 buf_empty_len = acb->rqbuf_getIndex - acb->rqbuf_putIndex - 1;
2346 if (arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0)
2347 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
2349 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
2350 spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
2353 static void arcmsr_write_ioctldata2iop_in_DWORD(struct AdapterControlBlock *acb)
2361 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
2367 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
2368 pwbuffer = arcmsr_get_iop_wqbuffer(acb);
2370 while ((acb->wqbuf_getIndex != acb->wqbuf_putIndex)
2372 pQbuffer = &acb->wqbuffer[acb->wqbuf_getIndex];
2374 acb->wqbuf_getIndex++;
2375 acb->wqbuf_getIndex %= ARCMSR_MAX_QBUFFER;
2393 arcmsr_iop_message_wrote(acb);
2398 arcmsr_write_ioctldata2iop(struct AdapterControlBlock *acb)
2405 if (acb->adapter_type > ACB_ADAPTER_TYPE_B) {
2406 arcmsr_write_ioctldata2iop_in_DWORD(acb);
2409 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
2410 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
2411 pwbuffer = arcmsr_get_iop_wqbuffer(acb);
2413 while ((acb->wqbuf_getIndex != acb->wqbuf_putIndex)
2415 pQbuffer = &acb->wqbuffer[acb->wqbuf_getIndex];
2417 acb->wqbuf_getIndex++;
2418 acb->wqbuf_getIndex %= ARCMSR_MAX_QBUFFER;
2423 arcmsr_iop_message_wrote(acb);
2427 static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb)
2431 spin_lock_irqsave(&acb->wqbuffer_lock, flags);
2432 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED;
2433 if (acb->wqbuf_getIndex != acb->wqbuf_putIndex)
2434 arcmsr_write_ioctldata2iop(acb);
2435 if (acb->wqbuf_getIndex == acb->wqbuf_putIndex)
2436 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
2437 spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
2440 static void arcmsr_hbaA_doorbell_isr(struct AdapterControlBlock *acb)
2443 struct MessageUnit_A __iomem *reg = acb->pmuA;
2448 arcmsr_iop2drv_data_wrote_handle(acb);
2450 arcmsr_iop2drv_data_read_handle(acb);
2536 static void arcmsr_hbaA_postqueue_isr(struct AdapterControlBlock *acb)
2539 struct MessageUnit_A __iomem *reg = acb->pmuA;
2547 if (acb->cdb_phyadd_hipart)
2548 cdb_phy_addr = cdb_phy_addr | acb->cdb_phyadd_hipart;
2549 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + cdb_phy_addr);
2552 arcmsr_drain_donequeue(acb, pCCB, error);
2555 static void arcmsr_hbaB_postqueue_isr(struct AdapterControlBlock *acb)
2559 struct MessageUnit_B *reg = acb->pmuB;
2568 if (acb->cdb_phyadd_hipart)
2569 cdb_phy_addr = cdb_phy_addr | acb->cdb_phyadd_hipart;
2570 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + cdb_phy_addr);
2573 arcmsr_drain_donequeue(acb, pCCB, error);
2581 static void arcmsr_hbaC_postqueue_isr(struct AdapterControlBlock *acb)
2590 phbcmu = acb->pmuC;
2597 if (acb->cdb_phyadd_hipart)
2598 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
2599 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset
2606 arcmsr_drain_donequeue(acb, ccb, error);
2616 static void arcmsr_hbaD_postqueue_isr(struct AdapterControlBlock *acb)
2626 spin_lock_irqsave(&acb->doneq_lock, flags);
2627 pmu = acb->pmuD;
2641 if (acb->cdb_phyadd_hipart)
2642 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
2643 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset
2649 arcmsr_drain_donequeue(acb, ccb, error);
2657 spin_unlock_irqrestore(&acb->doneq_lock, flags);
2660 static void arcmsr_hbaE_postqueue_isr(struct AdapterControlBlock *acb)
2669 spin_lock_irqsave(&acb->doneq_lock, flags);
2670 doneq_index = acb->doneq_index;
2671 pmu = acb->pmuE;
2673 cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID;
2674 ccb = acb->pccb_pool[cmdSMID];
2675 error = (acb->pCompletionQ[doneq_index].cmdFlag
2677 arcmsr_drain_donequeue(acb, ccb, error);
2679 if (doneq_index >= acb->completionQ_entry)
2682 acb->doneq_index = doneq_index;
2684 spin_unlock_irqrestore(&acb->doneq_lock, flags);
2687 static void arcmsr_hbaF_postqueue_isr(struct AdapterControlBlock *acb)
2696 spin_lock_irqsave(&acb->doneq_lock, flags);
2697 doneq_index = acb->doneq_index;
2698 phbcmu = acb->pmuF;
2700 cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID;
2703 ccb = acb->pccb_pool[cmdSMID];
2704 error = (acb->pCompletionQ[doneq_index].cmdFlag &
2706 arcmsr_drain_donequeue(acb, ccb, error);
2707 acb->pCompletionQ[doneq_index].cmdSMID = 0xffff;
2709 if (doneq_index >= acb->completionQ_entry)
2712 acb->doneq_index = doneq_index;
2714 spin_unlock_irqrestore(&acb->doneq_lock, flags);
2725 static void arcmsr_hbaA_message_isr(struct AdapterControlBlock *acb)
2727 struct MessageUnit_A __iomem *reg = acb->pmuA;
2730 if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
2731 schedule_work(&acb->arcmsr_do_message_isr_bh);
2733 static void arcmsr_hbaB_message_isr(struct AdapterControlBlock *acb)
2735 struct MessageUnit_B *reg = acb->pmuB;
2739 if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
2740 schedule_work(&acb->arcmsr_do_message_isr_bh);
2751 static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *acb)
2753 struct MessageUnit_C __iomem *reg = acb->pmuC;
2756 if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
2757 schedule_work(&acb->arcmsr_do_message_isr_bh);
2760 static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb)
2762 struct MessageUnit_D *reg = acb->pmuD;
2766 if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
2767 schedule_work(&acb->arcmsr_do_message_isr_bh);
2770 static void arcmsr_hbaE_message_isr(struct AdapterControlBlock *acb)
2772 struct MessageUnit_E __iomem *reg = acb->pmuE;
2775 if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
2776 schedule_work(&acb->arcmsr_do_message_isr_bh);
2779 static int arcmsr_hbaA_handle_isr(struct AdapterControlBlock *acb)
2782 struct MessageUnit_A __iomem *reg = acb->pmuA;
2784 acb->outbound_int_enable;
2790 arcmsr_hbaA_doorbell_isr(acb);
2792 arcmsr_hbaA_postqueue_isr(acb);
2794 arcmsr_hbaA_message_isr(acb);
2796 acb->outbound_int_enable;
2803 static int arcmsr_hbaB_handle_isr(struct AdapterControlBlock *acb)
2806 struct MessageUnit_B *reg = acb->pmuB;
2808 acb->outbound_int_enable;
2815 arcmsr_iop2drv_data_wrote_handle(acb);
2817 arcmsr_iop2drv_data_read_handle(acb);
2819 arcmsr_hbaB_postqueue_isr(acb);
2821 arcmsr_hbaB_message_isr(acb);
2823 acb->outbound_int_enable;
2932 static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
2934 switch (acb->adapter_type) {
2936 return arcmsr_hbaA_handle_isr(acb);
2938 return arcmsr_hbaB_handle_isr(acb);
2940 return arcmsr_hbaC_handle_isr(acb);
2942 return arcmsr_hbaD_handle_isr(acb);
2944 return arcmsr_hbaE_handle_isr(acb);
2946 return arcmsr_hbaF_handle_isr(acb);
2952 static void arcmsr_iop_parking(struct AdapterControlBlock *acb)
2954 if (acb) {
2956 if (acb->acb_flags & ACB_F_MSG_START_BGRB) {
2958 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
2959 intmask_org = arcmsr_disable_outbound_ints(acb);
2960 arcmsr_stop_adapter_bgrb(acb);
2961 arcmsr_flush_adapter_cache(acb);
2962 arcmsr_enable_outbound_ints(acb, intmask_org);
2968 void arcmsr_clear_iop2drv_rqueue_buffer(struct AdapterControlBlock *acb)
2972 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2974 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2975 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2976 acb->rqbuf_getIndex = 0;
2977 acb->rqbuf_putIndex = 0;
2978 arcmsr_iop_message_read(acb);
2980 } else if (acb->rqbuf_getIndex !=
2981 acb->rqbuf_putIndex) {
2982 acb->rqbuf_getIndex = 0;
2983 acb->rqbuf_putIndex = 0;
2991 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
3031 spin_lock_irqsave(&acb->rqbuffer_lock, flags);
3032 if (acb->rqbuf_getIndex != acb->rqbuf_putIndex) {
3033 unsigned int tail = acb->rqbuf_getIndex;
3034 unsigned int head = acb->rqbuf_putIndex;
3042 memcpy(ptmpQbuffer, acb->rqbuffer + tail, allxfer_len);
3044 memcpy(ptmpQbuffer, acb->rqbuffer + tail, cnt_to_end);
3045 memcpy(ptmpQbuffer + cnt_to_end, acb->rqbuffer, allxfer_len - cnt_to_end);
3047 acb->rqbuf_getIndex = (acb->rqbuf_getIndex + allxfer_len) % ARCMSR_MAX_QBUFFER;
3051 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
3053 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
3054 prbuffer = arcmsr_get_iop_rqbuffer(acb);
3055 if (arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0)
3056 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
3058 spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
3061 if (acb->fw_flag == FW_DEADLOCK)
3090 spin_lock_irqsave(&acb->wqbuffer_lock, flags);
3091 if (acb->wqbuf_putIndex != acb->wqbuf_getIndex) {
3094 arcmsr_write_ioctldata2iop(acb);
3103 pQbuffer = &acb->wqbuffer[acb->wqbuf_putIndex];
3104 cnt2end = ARCMSR_MAX_QBUFFER - acb->wqbuf_putIndex;
3109 acb->wqbuf_putIndex = 0;
3110 pQbuffer = acb->wqbuffer;
3113 acb->wqbuf_putIndex += user_len;
3114 acb->wqbuf_putIndex %= ARCMSR_MAX_QBUFFER;
3115 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
3116 acb->acb_flags &=
3118 arcmsr_write_ioctldata2iop(acb);
3121 spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
3123 if (acb->fw_flag == FW_DEADLOCK)
3132 uint8_t *pQbuffer = acb->rqbuffer;
3134 arcmsr_clear_iop2drv_rqueue_buffer(acb);
3135 spin_lock_irqsave(&acb->rqbuffer_lock, flags);
3136 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
3137 acb->rqbuf_getIndex = 0;
3138 acb->rqbuf_putIndex = 0;
3140 spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
3141 if (acb->fw_flag == FW_DEADLOCK)
3150 uint8_t *pQbuffer = acb->wqbuffer;
3151 spin_lock_irqsave(&acb->wqbuffer_lock, flags);
3152 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
3154 acb->wqbuf_getIndex = 0;
3155 acb->wqbuf_putIndex = 0;
3157 spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
3158 if (acb->fw_flag == FW_DEADLOCK)
3168 arcmsr_clear_iop2drv_rqueue_buffer(acb);
3169 spin_lock_irqsave(&acb->rqbuffer_lock, flags);
3170 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
3171 acb->rqbuf_getIndex = 0;
3172 acb->rqbuf_putIndex = 0;
3173 pQbuffer = acb->rqbuffer;
3175 spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
3176 spin_lock_irqsave(&acb->wqbuffer_lock, flags);
3177 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
3179 acb->wqbuf_getIndex = 0;
3180 acb->wqbuf_putIndex = 0;
3181 pQbuffer = acb->wqbuffer;
3183 spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
3184 if (acb->fw_flag == FW_DEADLOCK)
3193 if (acb->fw_flag == FW_DEADLOCK)
3203 if (acb->fw_flag == FW_DEADLOCK)
3214 if (acb->fw_flag == FW_DEADLOCK)
3220 arcmsr_iop_parking(acb);
3224 if (acb->fw_flag == FW_DEADLOCK)
3230 arcmsr_flush_adapter_cache(acb);
3245 static struct CommandControlBlock *arcmsr_get_freeccb(struct AdapterControlBlock *acb)
3251 spin_lock_irqsave(&acb->ccblist_lock, flags);
3252 head = &acb->ccb_free_list;
3257 spin_unlock_irqrestore(&acb->ccblist_lock, flags);
3260 spin_unlock_irqrestore(&acb->ccblist_lock, flags);
3264 static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
3304 if (arcmsr_iop_message_xfer(acb, cmd))
3317 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
3321 if (acb->acb_flags & ACB_F_ADAPTER_REMOVED) {
3330 arcmsr_handle_virtual_command(acb, cmd);
3333 ccb = arcmsr_get_freeccb(acb);
3336 if (arcmsr_build_ccb( acb, ccb, cmd ) == FAILED) {
3341 arcmsr_post_ccb(acb, ccb);
3404 static bool arcmsr_hbaA_get_config(struct AdapterControlBlock *acb)
3406 struct MessageUnit_A __iomem *reg = acb->pmuA;
3408 arcmsr_wait_firmware_ready(acb);
3410 if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
3412 miscellaneous data' timeout \n", acb->host->host_no);
3415 arcmsr_get_adapter_config(acb, reg->message_rwbuffer);
3418 static bool arcmsr_hbaB_get_config(struct AdapterControlBlock *acb)
3420 struct MessageUnit_B *reg = acb->pmuB;
3422 arcmsr_wait_firmware_ready(acb);
3424 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
3425 printk(KERN_ERR "arcmsr%d: can't set driver mode.\n", acb->host->host_no);
3429 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
3431 miscellaneous data' timeout \n", acb->host->host_no);
3434 arcmsr_get_adapter_config(acb, reg->message_rwbuffer);
3461 static bool arcmsr_hbaD_get_config(struct AdapterControlBlock *acb)
3463 struct MessageUnit_D *reg = acb->pmuD;
3465 if (readl(acb->pmuD->outbound_doorbell) &
3468 acb->pmuD->outbound_doorbell);/*clear interrupt*/
3470 arcmsr_wait_firmware_ready(acb);
3474 if (!arcmsr_hbaD_wait_msgint_ready(acb)) {
3476 "miscellaneous data timeout\n", acb->host->host_no);
3479 arcmsr_get_adapter_config(acb, reg->msgcode_rwbuffer);
3534 static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
3538 switch (acb->adapter_type) {
3540 rtn = arcmsr_hbaA_get_config(acb);
3543 rtn = arcmsr_hbaB_get_config(acb);
3546 rtn = arcmsr_hbaC_get_config(acb);
3549 rtn = arcmsr_hbaD_get_config(acb);
3552 rtn = arcmsr_hbaE_get_config(acb);
3555 rtn = arcmsr_hbaF_get_config(acb);
3560 acb->maxOutstanding = acb->firm_numbers_queue - 1;
3561 if (acb->host->can_queue >= acb->firm_numbers_queue)
3562 acb->host->can_queue = acb->maxOutstanding;
3564 acb->maxOutstanding = acb->host->can_queue;
3565 acb->maxFreeCCB = acb->host->can_queue;
3566 if (acb->maxFreeCCB < ARCMSR_MAX_FREECCB_NUM)
3567 acb->maxFreeCCB += 64;
3571 static int arcmsr_hbaA_polling_ccbdone(struct AdapterControlBlock *acb,
3574 struct MessageUnit_A __iomem *reg = acb->pmuA;
3584 outbound_intstatus = readl(&reg->outbound_intstatus) & acb->outbound_int_enable;
3601 if (acb->cdb_phyadd_hipart)
3602 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
3603 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
3606 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
3610 , acb->host->host_no
3621 , acb->host->host_no
3623 , atomic_read(&acb->ccboutstandingcount));
3627 arcmsr_report_ccb_state(acb, ccb, error);
3632 static int arcmsr_hbaB_polling_ccbdone(struct AdapterControlBlock *acb,
3635 struct MessageUnit_B *reg = acb->pmuB;
3670 if (acb->cdb_phyadd_hipart)
3671 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
3672 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
3675 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
3679 ,acb->host->host_no
3690 , acb->host->host_no
3692 , atomic_read(&acb->ccboutstandingcount));
3696 arcmsr_report_ccb_state(acb, ccb, error);
3701 static int arcmsr_hbaC_polling_ccbdone(struct AdapterControlBlock *acb,
3704 struct MessageUnit_C __iomem *reg = acb->pmuC;
3731 if (acb->cdb_phyadd_hipart)
3732 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
3733 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
3737 if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
3741 , acb->host->host_no
3752 , acb->host->host_no
3754 , atomic_read(&acb->ccboutstandingcount));
3758 arcmsr_report_ccb_state(acb, pCCB, error);
3763 static int arcmsr_hbaD_polling_ccbdone(struct AdapterControlBlock *acb,
3772 struct MessageUnit_D *pmu = acb->pmuD;
3777 spin_lock_irqsave(&acb->doneq_lock, flags);
3781 spin_unlock_irqrestore(&acb->doneq_lock, flags);
3800 spin_unlock_irqrestore(&acb->doneq_lock, flags);
3803 if (acb->cdb_phyadd_hipart)
3804 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
3805 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset +
3810 if ((pCCB->acb != acb) ||
3816 , acb->host->host_no
3827 , acb->host->host_no
3829 , atomic_read(&acb->ccboutstandingcount));
3834 arcmsr_report_ccb_state(acb, pCCB, error);
3839 static int arcmsr_hbaE_polling_ccbdone(struct AdapterControlBlock *acb,
3848 struct MessageUnit_E __iomem *reg = acb->pmuE;
3853 spin_lock_irqsave(&acb->doneq_lock, flags);
3854 doneq_index = acb->doneq_index;
3857 spin_unlock_irqrestore(&acb->doneq_lock, flags);
3870 cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID;
3872 if (doneq_index >= acb->completionQ_entry)
3874 acb->doneq_index = doneq_index;
3875 spin_unlock_irqrestore(&acb->doneq_lock, flags);
3876 pCCB = acb->pccb_pool[cmdSMID];
3879 if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
3884 , acb->host->host_no
3895 , acb->host->host_no
3897 , atomic_read(&acb->ccboutstandingcount));
3900 error = (acb->pCompletionQ[doneq_index].cmdFlag &
3902 arcmsr_report_ccb_state(acb, pCCB, error);
3908 static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
3912 switch (acb->adapter_type) {
3915 rtn = arcmsr_hbaA_polling_ccbdone(acb, poll_ccb);
3918 rtn = arcmsr_hbaB_polling_ccbdone(acb, poll_ccb);
3921 rtn = arcmsr_hbaC_polling_ccbdone(acb, poll_ccb);
3924 rtn = arcmsr_hbaD_polling_ccbdone(acb, poll_ccb);
3928 rtn = arcmsr_hbaE_polling_ccbdone(acb, poll_ccb);
4026 static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
4037 switch (acb->adapter_type) {
4040 dma_coherent_handle = acb->dma_coherent_handle2;
4044 dma_coherent_handle = acb->dma_coherent_handle +
4048 dma_coherent_handle = acb->dma_coherent_handle;
4053 acb->cdb_phyaddr_hi32 = cdb_phyaddr_hi32;
4054 acb->cdb_phyadd_hipart = ((uint64_t)cdb_phyaddr_hi32) << 32;
4060 switch (acb->adapter_type) {
4064 struct MessageUnit_A __iomem *reg = acb->pmuA;
4070 if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
4073 acb->host->host_no);
4083 struct MessageUnit_B *reg = acb->pmuB;
4087 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
4089 acb->host->host_no);
4105 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
4107 timeout \n",acb->host->host_no);
4111 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
4113 acb->host->host_no);
4119 struct MessageUnit_C __iomem *reg = acb->pmuC;
4122 acb->adapter_index, cdb_phyaddr_hi32);
4127 if (!arcmsr_hbaC_wait_msgint_ready(acb)) {
4129 timeout \n", acb->host->host_no);
4136 struct MessageUnit_D *reg = acb->pmuD;
4147 if (!arcmsr_hbaD_wait_msgint_ready(acb)) {
4149 acb->host->host_no);
4155 struct MessageUnit_E __iomem *reg = acb->pmuE;
4160 writel(acb->ccbsize, &reg->msgcode_rwbuffer[4]);
4161 writel(lower_32_bits(acb->dma_coherent_handle2), &reg->msgcode_rwbuffer[5]);
4162 writel(upper_32_bits(acb->dma_coherent_handle2), &reg->msgcode_rwbuffer[6]);
4163 writel(acb->ioqueue_size, &reg->msgcode_rwbuffer[7]);
4165 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
4166 writel(acb->out_doorbell, &reg->iobound_doorbell);
4167 if (!arcmsr_hbaE_wait_msgint_ready(acb)) {
4169 acb->host->host_no);
4175 struct MessageUnit_F __iomem *reg = acb->pmuF;
4177 acb->msgcode_rwbuffer[0] = ARCMSR_SIGNATURE_SET_CONFIG;
4178 acb->msgcode_rwbuffer[1] = ARCMSR_SIGNATURE_1886;
4179 acb->msgcode_rwbuffer[2] = cdb_phyaddr;
4180 acb->msgcode_rwbuffer[3] = cdb_phyaddr_hi32;
4181 acb->msgcode_rwbuffer[4] = acb->ccbsize;
4182 acb->msgcode_rwbuffer[5] = lower_32_bits(acb->dma_coherent_handle2);
4183 acb->msgcode_rwbuffer[6] = upper_32_bits(acb->dma_coherent_handle2);
4184 acb->msgcode_rwbuffer[7] = acb->completeQ_size;
4185 if (acb->xor_mega) {
4186 acb->msgcode_rwbuffer[8] = 0x455AA; //Linux init 2
4187 acb->msgcode_rwbuffer[9] = 0;
4188 acb->msgcode_rwbuffer[10] = lower_32_bits(acb->xorPhys);
4189 acb->msgcode_rwbuffer[11] = upper_32_bits(acb->xorPhys);
4192 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
4193 writel(acb->out_doorbell, &reg->iobound_doorbell);
4194 if (!arcmsr_hbaE_wait_msgint_ready(acb)) {
4196 acb->host->host_no);
4205 static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb)
4208 switch (acb->adapter_type) {
4211 struct MessageUnit_A __iomem *reg = acb->pmuA;
4213 if (!(acb->acb_flags & ACB_F_IOP_INITED))
4221 struct MessageUnit_B *reg = acb->pmuB;
4223 if (!(acb->acb_flags & ACB_F_IOP_INITED))
4231 struct MessageUnit_C __iomem *reg = acb->pmuC;
4233 if (!(acb->acb_flags & ACB_F_IOP_INITED))
4240 struct MessageUnit_D *reg = acb->pmuD;
4242 if (!(acb->acb_flags & ACB_F_IOP_INITED))
4251 struct MessageUnit_E __iomem *reg = acb->pmuE;
4253 if (!(acb->acb_flags & ACB_F_IOP_INITED))
4264 struct AdapterControlBlock *acb = from_timer(acb, t, eternal_timer);
4265 if (acb->acb_flags & (ACB_F_MSG_GET_CONFIG | ACB_F_BUS_RESET | ACB_F_ABORT)) {
4266 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
4268 acb->fw_flag = FW_NORMAL;
4269 switch (acb->adapter_type) {
4271 struct MessageUnit_A __iomem *reg = acb->pmuA;
4276 struct MessageUnit_B *reg = acb->pmuB;
4281 struct MessageUnit_C __iomem *reg = acb->pmuC;
4287 struct MessageUnit_D *reg = acb->pmuD;
4292 struct MessageUnit_E __iomem *reg = acb->pmuE;
4294 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
4295 writel(acb->out_doorbell, &reg->iobound_doorbell);
4299 struct MessageUnit_F __iomem *reg = acb->pmuF;
4306 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
4307 writel(acb->out_doorbell, &reg->iobound_doorbell);
4313 acb->acb_flags |= ACB_F_MSG_GET_CONFIG;
4315 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
4319 static void arcmsr_hbaA_start_bgrb(struct AdapterControlBlock *acb)
4321 struct MessageUnit_A __iomem *reg = acb->pmuA;
4322 acb->acb_flags |= ACB_F_MSG_START_BGRB;
4324 if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
4326 rebuild' timeout \n", acb->host->host_no);
4330 static void arcmsr_hbaB_start_bgrb(struct AdapterControlBlock *acb)
4332 struct MessageUnit_B *reg = acb->pmuB;
4333 acb->acb_flags |= ACB_F_MSG_START_BGRB;
4335 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
4337 rebuild' timeout \n",acb->host->host_no);
4380 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
4382 switch (acb->adapter_type) {
4384 arcmsr_hbaA_start_bgrb(acb);
4387 arcmsr_hbaB_start_bgrb(acb);
4390 arcmsr_hbaC_start_bgrb(acb);
4393 arcmsr_hbaD_start_bgrb(acb);
4397 arcmsr_hbaE_start_bgrb(acb);
4402 static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb)
4404 switch (acb->adapter_type) {
4406 struct MessageUnit_A __iomem *reg = acb->pmuA;
4417 struct MessageUnit_B *reg = acb->pmuB;
4434 struct MessageUnit_C __iomem *reg = acb->pmuC;
4455 struct MessageUnit_D *reg = acb->pmuD;
4478 struct MessageUnit_E __iomem *reg = acb->pmuE;
4481 acb->in_doorbell = readl(&reg->iobound_doorbell);
4483 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK;
4484 writel(acb->out_doorbell, &reg->iobound_doorbell);
4487 tmp = acb->in_doorbell;
4488 acb->in_doorbell = readl(&reg->iobound_doorbell);
4489 if((tmp ^ acb->in_doorbell) & ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK) {
4491 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK;
4492 writel(acb->out_doorbell, &reg->iobound_doorbell);
4501 static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb)
4503 switch (acb->adapter_type) {
4508 struct MessageUnit_B *reg = acb->pmuB;
4510 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
4522 static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
4526 struct MessageUnit_A __iomem *pmuA = acb->pmuA;
4527 struct MessageUnit_C __iomem *pmuC = acb->pmuC;
4528 struct MessageUnit_D *pmuD = acb->pmuD;
4531 printk(KERN_NOTICE "arcmsr%d: executing hw bus reset .....\n", acb->host->host_no);
4533 pci_read_config_byte(acb->pdev, i, &value[i]);
4536 if (acb->dev_id == 0x1680) {
4538 } else if (acb->dev_id == 0x1880) {
4549 } else if (acb->dev_id == 0x1884) {
4550 struct MessageUnit_E __iomem *pmuE = acb->pmuE;
4562 } else if (acb->dev_id == 0x1214) {
4565 pci_write_config_byte(acb->pdev, 0x84, 0x20);
4570 pci_write_config_byte(acb->pdev, i, value[i]);
4576 static bool arcmsr_reset_in_progress(struct AdapterControlBlock *acb)
4580 switch(acb->adapter_type) {
4582 struct MessageUnit_A __iomem *reg = acb->pmuA;
4588 struct MessageUnit_B *reg = acb->pmuB;
4594 struct MessageUnit_C __iomem *reg = acb->pmuC;
4599 struct MessageUnit_D *reg = acb->pmuD;
4606 struct MessageUnit_E __iomem *reg = acb->pmuE;
4615 static void arcmsr_iop_init(struct AdapterControlBlock *acb)
4619 intmask_org = arcmsr_disable_outbound_ints(acb);
4620 arcmsr_wait_firmware_ready(acb);
4621 arcmsr_iop_confirm(acb);
4623 arcmsr_start_adapter_bgrb(acb);
4625 arcmsr_clear_doorbell_queue_buffer(acb);
4626 arcmsr_enable_eoi_mode(acb);
4628 arcmsr_enable_outbound_ints(acb, intmask_org);
4629 acb->acb_flags |= ACB_F_IOP_INITED;
4632 static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb)
4640 if (atomic_read(&acb->ccboutstandingcount) != 0) {
4642 intmask_org = arcmsr_disable_outbound_ints(acb);
4644 rtnval = arcmsr_abort_allcmd(acb);
4646 arcmsr_done4abort_postqueue(acb);
4647 for (i = 0; i < acb->maxFreeCCB; i++) {
4648 ccb = acb->pccb_pool[i];
4653 spin_lock_irqsave(&acb->ccblist_lock, flags);
4654 list_add_tail(&ccb->list, &acb->ccb_free_list);
4655 spin_unlock_irqrestore(&acb->ccblist_lock, flags);
4658 atomic_set(&acb->ccboutstandingcount, 0);
4660 arcmsr_enable_outbound_ints(acb, intmask_org);
4668 struct AdapterControlBlock *acb;
4671 acb = (struct AdapterControlBlock *) cmd->device->host->hostdata;
4672 if (acb->acb_flags & ACB_F_ADAPTER_REMOVED)
4675 " num_aborts = %d \n", acb->num_resets, acb->num_aborts);
4676 acb->num_resets++;
4678 if (acb->acb_flags & ACB_F_BUS_RESET) {
4681 timeout = wait_event_timeout(wait_q, (acb->acb_flags
4686 acb->acb_flags |= ACB_F_BUS_RESET;
4687 if (!arcmsr_iop_reset(acb)) {
4688 arcmsr_hardware_reset(acb);
4689 acb->acb_flags &= ~ACB_F_IOP_INITED;
4692 if (arcmsr_reset_in_progress(acb)) {
4694 acb->fw_flag = FW_DEADLOCK;
4697 acb->host->host_no);
4703 arcmsr_iop_init(acb);
4704 acb->fw_flag = FW_NORMAL;
4705 mod_timer(&acb->eternal_timer, jiffies +
4707 acb->acb_flags &= ~ACB_F_BUS_RESET;
4711 acb->acb_flags &= ~ACB_F_BUS_RESET;
4712 acb->fw_flag = FW_NORMAL;
4713 mod_timer(&acb->eternal_timer, jiffies +
4720 static int arcmsr_abort_one_cmd(struct AdapterControlBlock *acb,
4724 rtn = arcmsr_polling_ccbdone(acb, ccb);
4730 struct AdapterControlBlock *acb =
4736 if (acb->acb_flags & ACB_F_ADAPTER_REMOVED)
4740 acb->host->host_no, cmd->device->id, (u32)cmd->device->lun);
4741 acb->acb_flags |= ACB_F_ABORT;
4742 acb->num_aborts++;
4749 if (!atomic_read(&acb->ccboutstandingcount)) {
4750 acb->acb_flags &= ~ACB_F_ABORT;
4754 intmask_org = arcmsr_disable_outbound_ints(acb);
4755 for (i = 0; i < acb->maxFreeCCB; i++) {
4756 struct CommandControlBlock *ccb = acb->pccb_pool[i];
4759 rtn = arcmsr_abort_one_cmd(acb, ccb);
4763 acb->acb_flags &= ~ACB_F_ABORT;
4764 arcmsr_enable_outbound_ints(acb, intmask_org);
4770 struct AdapterControlBlock *acb =
4775 switch (acb->pdev->device) {