• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500-V1.0.1.40_1.0.68/src/linux/linux-2.6/drivers/scsi/

Lines Matching defs:hba

47 static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag);
48 static void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag);
49 static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg);
77 static void hptiop_request_callback(struct hptiop_hba *hba, u32 tag)
80 return hptiop_host_request_callback(hba,
83 return hptiop_iop_request_callback(hba, tag);
86 static inline void hptiop_drain_outbound_queue(struct hptiop_hba *hba)
90 while ((req = readl(&hba->iop->outbound_queue)) != IOPMU_QUEUE_EMPTY) {
93 hptiop_request_callback(hba, req);
98 ((char __iomem *)hba->iop + req);
102 hptiop_request_callback(hba, req);
107 hptiop_request_callback(hba, req);
112 static int __iop_intr(struct hptiop_hba *hba)
114 struct hpt_iopmu __iomem *iop = hba->iop;
124 hptiop_message_callback(hba, msg);
129 hptiop_drain_outbound_queue(hba);
136 static int iop_send_sync_request(struct hptiop_hba *hba,
147 writel((unsigned long)req - (unsigned long)hba->iop,
148 &hba->iop->inbound_queue);
150 hptiop_pci_posting_flush(hba->iop);
153 __iop_intr(hba);
162 static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec)
166 hba->msg_done = 0;
168 writel(msg, &hba->iop->inbound_msgaddr0);
170 hptiop_pci_posting_flush(hba->iop);
173 spin_lock_irq(hba->host->host_lock);
174 __iop_intr(hba);
175 spin_unlock_irq(hba->host->host_lock);
176 if (hba->msg_done)
181 return hba->msg_done? 0 : -1;
184 static int iop_get_config(struct hptiop_hba *hba,
190 req32 = readl(&hba->iop->inbound_queue);
195 ((unsigned long)hba->iop + req32);
202 if (iop_send_sync_request(hba, req, 20000)) {
208 writel(req32, &hba->iop->outbound_queue);
212 static int iop_set_config(struct hptiop_hba *hba,
218 req32 = readl(&hba->iop->inbound_queue);
223 ((unsigned long)hba->iop + req32);
235 if (iop_send_sync_request(hba, req, 20000)) {
240 writel(req32, &hba->iop->outbound_queue);
244 static int hptiop_initialize_iop(struct hptiop_hba *hba)
246 struct hpt_iopmu __iomem *iop = hba->iop;
252 hba->initialized = 1;
255 if (iop_send_sync_msg(hba,
258 hba->host->host_no);
264 static int hptiop_map_pci_bar(struct hptiop_hba *hba)
268 struct pci_dev *pcidev = hba->pcidev;
272 hba->host->host_no);
282 hba->host->host_no);
286 hba->iop = mem_base_virt;
287 dprintk("hptiop_map_pci_bar: iop=%p\n", hba->iop);
291 static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg)
295 if (!hba->initialized)
299 atomic_set(&hba->resetting, 0);
300 wake_up(&hba->reset_wq);
303 hba->msg_done = 1;
306 static inline struct hptiop_request *get_req(struct hptiop_hba *hba)
310 dprintk("get_req : req=%p\n", hba->req_list);
312 ret = hba->req_list;
314 hba->req_list = ret->next;
319 static inline void free_req(struct hptiop_hba *hba, struct hptiop_request *req)
322 req->next = hba->req_list;
323 hba->req_list = req;
326 static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag)
331 req = (struct hpt_iop_request_scsi_command *)hba->reqs[tag].req_virt;
340 scp = hba->reqs[tag].scp;
344 pci_unmap_sg(hba->pcidev,
350 pci_unmap_single(hba->pcidev,
392 free_req(hba, &hba->reqs[tag]);
395 void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag)
402 ((unsigned long)hba->iop + tag);
431 writel(tag, &hba->iop->outbound_queue);
436 struct hptiop_hba *hba = dev_id;
440 spin_lock_irqsave(hba->host->host_lock, flags);
441 handled = __iop_intr(hba);
442 spin_unlock_irqrestore(hba->host->host_lock, flags);
450 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
460 HPT_SCP(scp)->sgcnt = pci_map_sg(hba->pcidev,
464 BUG_ON(HPT_SCP(scp)->sgcnt > hba->max_sg_descriptors);
477 hba->pcidev,
494 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
502 _req = get_req(hba);
523 scp->device->id > hba->max_devices) {
525 free_req(hba, _req);
555 &hba->iop->inbound_queue);
570 static int hptiop_reset_hba(struct hptiop_hba *hba)
572 if (atomic_xchg(&hba->resetting, 1) == 0) {
573 atomic_inc(&hba->reset_count);
575 &hba->iop->inbound_msgaddr0);
576 hptiop_pci_posting_flush(hba->iop);
579 wait_event_timeout(hba->reset_wq,
580 atomic_read(&hba->resetting) == 0, 60 * HZ);
582 if (atomic_read(&hba->resetting)) {
584 printk(KERN_ERR "scsi%d: reset failed\n", hba->host->host_no);
588 if (iop_send_sync_msg(hba,
591 hba->host->host_no);
600 struct hptiop_hba * hba = (struct hptiop_hba *)host->hostdata;
606 return hptiop_reset_hba(hba)? FAILED : SUCCESS;
626 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
629 hba->firmware_version >> 24,
630 (hba->firmware_version >> 16) & 0xff,
631 (hba->firmware_version >> 8) & 0xff,
632 hba->firmware_version & 0xff);
677 struct hptiop_hba *hba;
716 hba = (struct hptiop_hba *)host->hostdata;
718 hba->pcidev = pcidev;
719 hba->host = host;
720 hba->initialized = 0;
722 atomic_set(&hba->resetting, 0);
723 atomic_set(&hba->reset_count, 0);
725 init_waitqueue_head(&hba->reset_wq);
726 init_waitqueue_head(&hba->ioctl_wq);
734 if (hptiop_map_pci_bar(hba))
737 if (iop_wait_ready(hba->iop, 20000)) {
739 hba->host->host_no);
743 if (iop_get_config(hba, &iop_config)) {
745 hba->host->host_no);
749 hba->max_requests = min(le32_to_cpu(iop_config.max_requests),
751 hba->max_devices = le32_to_cpu(iop_config.max_devices);
752 hba->max_request_size = le32_to_cpu(iop_config.request_size);
753 hba->max_sg_descriptors = le32_to_cpu(iop_config.max_sg_count);
754 hba->firmware_version = le32_to_cpu(iop_config.firmware_version);
755 hba->sdram_size = le32_to_cpu(iop_config.sdram_size);
767 if (iop_set_config(hba, &set_config)) {
769 hba->host->host_no);
776 driver_name, hba)) {
778 hba->host->host_no, pcidev->irq);
784 + sizeof(struct hpt_iopsg) * (hba->max_sg_descriptors - 1);
788 dprintk("req_size=%d, max_requests=%d\n", req_size, hba->max_requests);
790 hba->req_size = req_size;
792 hba->req_size*hba->max_requests + 0x20,
797 hba->host->host_no);
801 hba->dma_coherent = start_virt;
802 hba->dma_coherent_handle = start_phy;
811 hba->req_list = start_virt;
812 for (i = 0; i < hba->max_requests; i++) {
813 hba->reqs[i].next = NULL;
814 hba->reqs[i].req_virt = start_virt;
815 hba->reqs[i].req_shifted_phy = start_phy >> 5;
816 hba->reqs[i].index = i;
817 free_req(hba, &hba->reqs[i]);
818 start_virt = (char *)start_virt + hba->req_size;
819 start_phy = start_phy + hba->req_size;
823 if (hptiop_initialize_iop(hba))
828 hba->host->host_no);
835 dprintk("scsi%d: hptiop_probe successfully\n", hba->host->host_no);
839 dma_free_coherent(&hba->pcidev->dev,
840 hba->req_size*hba->max_requests + 0x20,
841 hba->dma_coherent, hba->dma_coherent_handle);
844 free_irq(hba->pcidev->irq, hba);
847 iounmap(hba->iop);
865 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
866 struct hpt_iopmu __iomem *iop = hba->iop;
869 dprintk("hptiop_shutdown(%p)\n", hba);
872 if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000))
874 hba->host->host_no);
887 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
889 dprintk("scsi%d: hptiop_remove\n", hba->host->host_no);
895 free_irq(hba->pcidev->irq, hba);
897 dma_free_coherent(&hba->pcidev->dev,
898 hba->req_size * hba->max_requests + 0x20,
899 hba->dma_coherent,
900 hba->dma_coherent_handle);
902 iounmap(hba->iop);
904 pci_release_regions(hba->pcidev);
905 pci_set_drvdata(hba->pcidev, NULL);
906 pci_disable_device(hba->pcidev);