Lines Matching refs:hba

80 static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
82 static void hptiop_request_callback_itl(struct hpt_iop_hba *hba,
84 static void hptiop_request_callback_mv(struct hpt_iop_hba *hba, u_int64_t req);
85 static void hptiop_request_callback_mvfrey(struct hpt_iop_hba *hba,
87 static void hptiop_os_message_callback(struct hpt_iop_hba *hba, u_int32_t msg);
88 static int hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
90 static int hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
92 static int hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba,
94 static int hptiop_rescan_bus(struct hpt_iop_hba *hba);
95 static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba);
96 static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba);
97 static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba);
98 static int hptiop_get_config_itl(struct hpt_iop_hba *hba,
100 static int hptiop_get_config_mv(struct hpt_iop_hba *hba,
102 static int hptiop_get_config_mvfrey(struct hpt_iop_hba *hba,
104 static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
106 static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
108 static int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba,
110 static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba);
111 static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba);
112 static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba);
113 static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba);
114 static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba);
115 static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
117 static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
120 static int hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba,
123 static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
126 static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
129 static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba,
132 static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg);
133 static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg);
134 static void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg);
135 static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba);
136 static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba);
137 static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba);
138 static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba);
139 static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba);
140 static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba);
141 static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb);
142 static int hptiop_os_query_remove_device(struct hpt_iop_hba *hba, int tid);
152 static void hptiop_release_resource(struct hpt_iop_hba *hba);
169 #define BUS_SPACE_WRT4_ITL(offset, value) bus_space_write_4(hba->bar0t,\
170 hba->bar0h, offsetof(struct hpt_iopmu_itl, offset), (value))
171 #define BUS_SPACE_RD4_ITL(offset) bus_space_read_4(hba->bar0t,\
172 hba->bar0h, offsetof(struct hpt_iopmu_itl, offset))
174 #define BUS_SPACE_WRT4_MV0(offset, value) bus_space_write_4(hba->bar0t,\
175 hba->bar0h, offsetof(struct hpt_iopmv_regs, offset), value)
176 #define BUS_SPACE_RD4_MV0(offset) bus_space_read_4(hba->bar0t,\
177 hba->bar0h, offsetof(struct hpt_iopmv_regs, offset))
178 #define BUS_SPACE_WRT4_MV2(offset, value) bus_space_write_4(hba->bar2t,\
179 hba->bar2h, offsetof(struct hpt_iopmu_mv, offset), value)
180 #define BUS_SPACE_RD4_MV2(offset) bus_space_read_4(hba->bar2t,\
181 hba->bar2h, offsetof(struct hpt_iopmu_mv, offset))
183 #define BUS_SPACE_WRT4_MVFREY2(offset, value) bus_space_write_4(hba->bar2t,\
184 hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset), value)
185 #define BUS_SPACE_RD4_MVFREY2(offset) bus_space_read_4(hba->bar2t,\
186 hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset))
191 struct hpt_iop_hba *hba = hba_from_dev(dev);
193 if (hba==NULL)
195 if (hba->flag & HPT_IOCTL_FLAG_OPEN)
197 hba->flag |= HPT_IOCTL_FLAG_OPEN;
204 struct hpt_iop_hba *hba = hba_from_dev(dev);
205 hba->flag &= ~(u_int32_t)HPT_IOCTL_FLAG_OPEN;
213 struct hpt_iop_hba *hba = hba_from_dev(dev);
219 ret = hba->ops->do_ioctl(hba,
223 ret = hptiop_rescan_bus(hba);
232 static u_int64_t hptiop_mv_outbound_read(struct hpt_iop_hba *hba)
239 bus_space_read_region_4(hba->bar2t, hba->bar2h,
255 static void hptiop_mv_inbound_write(u_int64_t p, struct hpt_iop_hba *hba)
263 bus_space_write_region_4(hba->bar2t, hba->bar2h,
270 static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg)
276 static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg)
285 static void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg)
291 static int hptiop_wait_ready_itl(struct hpt_iop_hba * hba, u_int32_t millisec)
312 static int hptiop_wait_ready_mv(struct hpt_iop_hba * hba, u_int32_t millisec)
314 if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec))
320 static int hptiop_wait_ready_mvfrey(struct hpt_iop_hba * hba,
323 if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec))
329 static void hptiop_request_callback_itl(struct hpt_iop_hba * hba,
340 if (hba->firmware_version > 0x01020000 ||
341 hba->interface_version > 0x01020000) {
342 srb = hba->srb[index & ~(u_int32_t)
351 srb = hba->srb[index &
361 temp = bus_space_read_4(hba->bar0t, hba->bar0h, index +
363 result = bus_space_read_4(hba->bar0t, hba->bar0h, index +
369 bus_space_write_region_4(hba->bar0t, hba->bar0h, index +
372 wakeup((void *)((unsigned long)hba->u.itl.mu + index));
377 bus_space_read_region_4(hba->bar0t, hba->bar0h, index +
381 dxfer = bus_space_read_4(hba->bar0t, hba->bar0h,
400 bus_dmamap_sync(hba->io_dmat,
402 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
405 bus_dmamap_sync(hba->io_dmat,
407 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
438 bus_space_read_region_1(hba->bar0t, hba->bar0h,
460 hptiop_free_srb(hba, srb);
466 static void hptiop_drain_outbound_queue_itl(struct hpt_iop_hba *hba)
472 hptiop_request_callback_itl(hba, req);
477 ((char *)hba->u.itl.mu + req);
478 temp = bus_space_read_4(hba->bar0t,
479 hba->bar0h,req +
484 bus_space_read_region_4(hba->bar0t,
485 hba->bar0h,req +
490 hptiop_request_callback_itl(hba, req);
493 bus_space_write_region_4(hba->bar0t,
494 hba->bar0h,req +
500 hptiop_request_callback_itl(hba, req);
505 static int hptiop_intr_itl(struct hpt_iop_hba * hba)
516 hptiop_os_message_callback(hba, msg);
521 hptiop_drain_outbound_queue_itl(hba);
528 static void hptiop_request_callback_mv(struct hpt_iop_hba * hba,
539 srb = hba->srb[context >> MVIOP_REQUEST_NUMBER_START_BIT];
558 bus_dmamap_sync(hba->io_dmat,
560 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
563 bus_dmamap_sync(hba->io_dmat,
565 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
606 hptiop_free_srb(hba, srb);
609 struct hpt_iop_request_ioctl_command *req = hba->ctlcfg_ptr;
611 hba->config_done = 1;
613 hba->config_done = -1;
618 hba->config_done = 1;
620 device_printf(hba->pcidev, "wrong callback type\n");
624 static void hptiop_request_callback_mvfrey(struct hpt_iop_hba * hba,
637 hba->config_done = 1;
641 srb = hba->srb[(_tag >> 4) & 0xff];
665 bus_dmamap_sync(hba->io_dmat,
667 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
670 bus_dmamap_sync(hba->io_dmat,
672 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
713 hptiop_free_srb(hba, srb);
718 hba->config_done = 1;
720 hba->config_done = -1;
721 wakeup((struct hpt_iop_request_ioctl_command *)hba->ctlcfg_ptr);
724 device_printf(hba->pcidev, "wrong callback type\n");
729 static void hptiop_drain_outbound_queue_mv(struct hpt_iop_hba * hba)
733 while ((req = hptiop_mv_outbound_read(hba))) {
736 hptiop_request_callback_mv(hba, req);
742 static int hptiop_intr_mv(struct hpt_iop_hba * hba)
755 hptiop_os_message_callback(hba, msg);
760 hptiop_drain_outbound_queue_mv(hba);
767 static int hptiop_intr_mvfrey(struct hpt_iop_hba * hba)
772 if (hba->initialized) {
781 hptiop_os_message_callback(hba, msg);
790 cptr = *hba->u.mvfrey.outlist_cptr & 0xff;
791 while (hba->u.mvfrey.outlist_rptr != cptr) {
792 hba->u.mvfrey.outlist_rptr++;
793 if (hba->u.mvfrey.outlist_rptr == hba->u.mvfrey.list_count) {
794 hba->u.mvfrey.outlist_rptr = 0;
797 _tag = hba->u.mvfrey.outlist[hba->u.mvfrey.outlist_rptr].val;
798 hptiop_request_callback_mvfrey(hba, _tag);
801 } while (cptr != (*hba->u.mvfrey.outlist_cptr & 0xff));
804 if (hba->initialized) {
811 static int hptiop_send_sync_request_itl(struct hpt_iop_hba * hba,
821 hptiop_intr_itl(hba);
822 bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 +
833 static int hptiop_send_sync_request_mv(struct hpt_iop_hba *hba,
838 hba->config_done = 0;
840 phy_addr = hba->ctlcfgcmd_phy |
845 hptiop_mv_inbound_write(phy_addr, hba);
849 hptiop_intr_mv(hba);
850 if (hba->config_done)
857 static int hptiop_send_sync_request_mvfrey(struct hpt_iop_hba *hba,
865 hba->config_done = 0;
867 phy_addr = hba->ctlcfgcmd_phy;
875 hba->u.mvfrey.inlist_wptr++;
876 index = hba->u.mvfrey.inlist_wptr & 0x3fff;
878 if (index == hba->u.mvfrey.list_count) {
880 hba->u.mvfrey.inlist_wptr &= ~0x3fff;
881 hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
884 hba->u.mvfrey.inlist[index].addr = phy_addr;
885 hba->u.mvfrey.inlist[index].intrfc_len = (reqhdr->size + 3) / 4;
887 BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
891 hptiop_intr_mvfrey(hba);
892 if (hba->config_done)
899 static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
904 hba->msg_done = 0;
905 hba->ops->post_msg(hba, msg);
908 hba->ops->iop_intr(hba);
909 if (hba->msg_done)
914 return hba->msg_done? 0 : -1;
917 static int hptiop_get_config_itl(struct hpt_iop_hba * hba,
932 bus_space_write_region_4(hba->bar0t, hba->bar0h,
936 if (hptiop_send_sync_request_itl(hba, req32, 20000)) {
941 bus_space_read_region_4(hba->bar0t, hba->bar0h,
950 static int hptiop_get_config_mv(struct hpt_iop_hba * hba,
955 if (!(req = hba->ctlcfg_ptr))
964 if (hptiop_send_sync_request_mv(hba, req, 20000)) {
973 static int hptiop_get_config_mvfrey(struct hpt_iop_hba * hba,
976 struct hpt_iop_request_get_config *info = hba->u.mvfrey.config;
1004 static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
1020 bus_space_write_region_4(hba->bar0t, hba->bar0h, req32,
1024 if (hptiop_send_sync_request_itl(hba, req32, 20000)) {
1034 static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
1039 if (!(req = hba->ctlcfg_ptr))
1053 if (hptiop_send_sync_request_mv(hba, req, 20000)) {
1061 static int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba,
1066 if (!(req = hba->ctlcfg_ptr))
1078 if (hptiop_send_sync_request_mvfrey(hba, req, 20000)) {
1086 static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
1094 (hba->max_request_size -
1096 device_printf(hba->pcidev, "request size beyond max value");
1105 req.header.context = req32 + (u_int64_t)(unsigned long)hba->u.itl.mu;
1111 bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, (u_int32_t *)&req,
1114 hptiop_lock_adapter(hba);
1119 bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 +
1123 if (hptiop_sleep(hba, (void *)((unsigned long)hba->u.itl.mu + req32),
1126 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1127 bus_space_read_region_4(hba->bar0t, hba->bar0h,req32 +
1133 hptiop_unlock_adapter(hba);
1137 static int hptiop_bus_space_copyin(struct hpt_iop_hba *hba, u_int32_t bus,
1146 bus_space_write_1(hba->bar0t, hba->bar0h, bus + i, byte);
1152 static int hptiop_bus_space_copyout(struct hpt_iop_hba *hba, u_int32_t bus,
1159 byte = bus_space_read_1(hba->bar0t, hba->bar0h, bus + i);
1167 static int hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
1182 if (hptiop_bus_space_copyin(hba, req32 +
1187 if (hptiop_post_ioctl_command_itl(hba, req32, pParams))
1190 result = bus_space_read_4(hba->bar0t, hba->bar0h, req32 +
1196 if (hptiop_bus_space_copyout(hba, req32 +
1203 if (hptiop_bus_space_copyout(hba, req32 +
1220 static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
1228 (hba->max_request_size -
1230 device_printf(hba->pcidev, "request size beyond max value");
1245 req_phy = hba->ctlcfgcmd_phy | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size;
1246 hptiop_mv_inbound_write(req_phy, hba);
1250 while (hba->config_done == 0) {
1251 if (hptiop_sleep(hba, req, PPAUSE,
1254 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1259 static int hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
1268 req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr);
1269 hba->config_done = 0;
1270 hptiop_lock_adapter(hba);
1275 if (hptiop_post_ioctl_command_mv(hba, req, pParams))
1278 if (hba->config_done == 1) {
1291 hptiop_unlock_adapter(hba);
1295 hptiop_unlock_adapter(hba);
1300 static int hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba,
1307 phy_addr = hba->ctlcfgcmd_phy;
1310 (hba->max_request_size -
1312 device_printf(hba->pcidev, "request size beyond max value");
1332 hba->u.mvfrey.inlist_wptr++;
1333 index = hba->u.mvfrey.inlist_wptr & 0x3fff;
1335 if (index == hba->u.mvfrey.list_count) {
1337 hba->u.mvfrey.inlist_wptr &= ~0x3fff;
1338 hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
1341 hba->u.mvfrey.inlist[index].addr = phy_addr;
1342 hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4;
1344 BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
1347 while (hba->config_done == 0) {
1348 if (hptiop_sleep(hba, req, PPAUSE,
1351 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1356 static int hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba,
1365 req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr);
1366 hba->config_done = 0;
1367 hptiop_lock_adapter(hba);
1372 if (hptiop_post_ioctl_command_mvfrey(hba, req, pParams))
1375 if (hba->config_done == 1) {
1388 hptiop_unlock_adapter(hba);
1392 hptiop_unlock_adapter(hba);
1397 static int hptiop_rescan_bus(struct hpt_iop_hba * hba)
1403 if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(hba->sim),
1417 static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba)
1419 hba->bar0_rid = 0x10;
1420 hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1421 SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1423 if (hba->bar0_res == NULL) {
1424 device_printf(hba->pcidev,
1428 hba->bar0t = rman_get_bustag(hba->bar0_res);
1429 hba->bar0h = rman_get_bushandle(hba->bar0_res);
1430 hba->u.itl.mu = (struct hpt_iopmu_itl *)
1431 rman_get_virtual(hba->bar0_res);
1433 if (!hba->u.itl.mu) {
1434 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1435 hba->bar0_rid, hba->bar0_res);
1436 device_printf(hba->pcidev, "alloc mem res failed\n");
1443 static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba)
1445 hba->bar0_rid = 0x10;
1446 hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1447 SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1449 if (hba->bar0_res == NULL) {
1450 device_printf(hba->pcidev, "failed to get iop bar0.\n");
1453 hba->bar0t = rman_get_bustag(hba->bar0_res);
1454 hba->bar0h = rman_get_bushandle(hba->bar0_res);
1455 hba->u.mv.regs = (struct hpt_iopmv_regs *)
1456 rman_get_virtual(hba->bar0_res);
1458 if (!hba->u.mv.regs) {
1459 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1460 hba->bar0_rid, hba->bar0_res);
1461 device_printf(hba->pcidev, "alloc bar0 mem res failed\n");
1465 hba->bar2_rid = 0x18;
1466 hba->bar2_res = bus_alloc_resource_any(hba->pcidev,
1467 SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE);
1469 if (hba->bar2_res == NULL) {
1470 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1471 hba->bar0_rid, hba->bar0_res);
1472 device_printf(hba->pcidev, "failed to get iop bar2.\n");
1476 hba->bar2t = rman_get_bustag(hba->bar2_res);
1477 hba->bar2h = rman_get_bushandle(hba->bar2_res);
1478 hba->u.mv.mu = (struct hpt_iopmu_mv *)rman_get_virtual(hba->bar2_res);
1480 if (!hba->u.mv.mu) {
1481 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1482 hba->bar0_rid, hba->bar0_res);
1483 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1484 hba->bar2_rid, hba->bar2_res);
1485 device_printf(hba->pcidev, "alloc mem bar2 res failed\n");
1492 static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba)
1494 hba->bar0_rid = 0x10;
1495 hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1496 SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1498 if (hba->bar0_res == NULL) {
1499 device_printf(hba->pcidev, "failed to get iop bar0.\n");
1502 hba->bar0t = rman_get_bustag(hba->bar0_res);
1503 hba->bar0h = rman_get_bushandle(hba->bar0_res);
1504 hba->u.mvfrey.config = (struct hpt_iop_request_get_config *)
1505 rman_get_virtual(hba->bar0_res);
1507 if (!hba->u.mvfrey.config) {
1508 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1509 hba->bar0_rid, hba->bar0_res);
1510 device_printf(hba->pcidev, "alloc bar0 mem res failed\n");
1514 hba->bar2_rid = 0x18;
1515 hba->bar2_res = bus_alloc_resource_any(hba->pcidev,
1516 SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE);
1518 if (hba->bar2_res == NULL) {
1519 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1520 hba->bar0_rid, hba->bar0_res);
1521 device_printf(hba->pcidev, "failed to get iop bar2.\n");
1525 hba->bar2t = rman_get_bustag(hba->bar2_res);
1526 hba->bar2h = rman_get_bushandle(hba->bar2_res);
1527 hba->u.mvfrey.mu =
1528 (struct hpt_iopmu_mvfrey *)rman_get_virtual(hba->bar2_res);
1530 if (!hba->u.mvfrey.mu) {
1531 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1532 hba->bar0_rid, hba->bar0_res);
1533 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1534 hba->bar2_rid, hba->bar2_res);
1535 device_printf(hba->pcidev, "alloc mem bar2 res failed\n");
1542 static void hptiop_release_pci_res_itl(struct hpt_iop_hba *hba)
1544 if (hba->bar0_res)
1545 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1546 hba->bar0_rid, hba->bar0_res);
1549 static void hptiop_release_pci_res_mv(struct hpt_iop_hba *hba)
1551 if (hba->bar0_res)
1552 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1553 hba->bar0_rid, hba->bar0_res);
1554 if (hba->bar2_res)
1555 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1556 hba->bar2_rid, hba->bar2_res);
1559 static void hptiop_release_pci_res_mvfrey(struct hpt_iop_hba *hba)
1561 if (hba->bar0_res)
1562 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1563 hba->bar0_rid, hba->bar0_res);
1564 if (hba->bar2_res)
1565 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1566 hba->bar2_rid, hba->bar2_res);
1569 static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba)
1571 if (bus_dma_tag_create(hba->parent_dmat,
1583 &hba->ctlcfg_dmat)) {
1584 device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n");
1588 if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr,
1590 &hba->ctlcfg_dmamap) != 0) {
1591 device_printf(hba->pcidev,
1593 bus_dma_tag_destroy(hba->ctlcfg_dmat);
1597 if (bus_dmamap_load(hba->ctlcfg_dmat,
1598 hba->ctlcfg_dmamap, hba->ctlcfg_ptr,
1600 hptiop_mv_map_ctlcfg, hba, 0)) {
1601 device_printf(hba->pcidev, "bus_dmamap_load failed!\n");
1602 if (hba->ctlcfg_dmat) {
1603 bus_dmamem_free(hba->ctlcfg_dmat,
1604 hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1605 bus_dma_tag_destroy(hba->ctlcfg_dmat);
1613 static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba)
1623 hba->u.mvfrey.list_count = list_count;
1624 hba->u.mvfrey.internal_mem_size = 0x800
1628 if (bus_dma_tag_create(hba->parent_dmat,
1634 hba->u.mvfrey.internal_mem_size,
1640 &hba->ctlcfg_dmat)) {
1641 device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n");
1645 if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr,
1647 &hba->ctlcfg_dmamap) != 0) {
1648 device_printf(hba->pcidev,
1650 bus_dma_tag_destroy(hba->ctlcfg_dmat);
1654 if (bus_dmamap_load(hba->ctlcfg_dmat,
1655 hba->ctlcfg_dmamap, hba->ctlcfg_ptr,
1656 hba->u.mvfrey.internal_mem_size,
1657 hptiop_mvfrey_map_ctlcfg, hba, 0)) {
1658 device_printf(hba->pcidev, "bus_dmamap_load failed!\n");
1659 if (hba->ctlcfg_dmat) {
1660 bus_dmamem_free(hba->ctlcfg_dmat,
1661 hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1662 bus_dma_tag_destroy(hba->ctlcfg_dmat);
1670 static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba) {
1674 static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba)
1676 if (hba->ctlcfg_dmat) {
1677 bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
1678 bus_dmamem_free(hba->ctlcfg_dmat,
1679 hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1680 bus_dma_tag_destroy(hba->ctlcfg_dmat);
1686 static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba)
1688 if (hba->ctlcfg_dmat) {
1689 bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
1690 bus_dmamem_free(hba->ctlcfg_dmat,
1691 hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1692 bus_dma_tag_destroy(hba->ctlcfg_dmat);
1698 static int hptiop_reset_comm_mvfrey(struct hpt_iop_hba *hba)
1702 if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET_COMM, 3000))
1711 hba->u.mvfrey.inlist_phy & 0xffffffff);
1713 (hba->u.mvfrey.inlist_phy >> 16) >> 16);
1716 hba->u.mvfrey.outlist_phy & 0xffffffff);
1718 (hba->u.mvfrey.outlist_phy >> 16) >> 16);
1721 hba->u.mvfrey.outlist_cptr_phy & 0xffffffff);
1723 (hba->u.mvfrey.outlist_cptr_phy >> 16) >> 16);
1725 hba->u.mvfrey.inlist_wptr = (hba->u.mvfrey.list_count - 1)
1727 *hba->u.mvfrey.outlist_cptr = (hba->u.mvfrey.list_count - 1)
1729 hba->u.mvfrey.outlist_rptr = hba->u.mvfrey.list_count - 1;
1811 struct hpt_iop_hba *hba;
1870 hba = (struct hpt_iop_hba *)device_get_softc(dev);
1871 bzero(hba, sizeof(struct hpt_iop_hba));
1872 hba->ops = ops;
1874 KdPrint(("hba->ops=%p\n", hba->ops));
1880 struct hpt_iop_hba *hba = (struct hpt_iop_hba *)device_get_softc(dev);
1893 pci_get_function(dev), hba->ops));
1896 hba->pcidev = dev;
1897 hba->pciunit = unit;
1899 if (hba->ops->alloc_pci_res(hba))
1902 if (hba->ops->iop_wait_ready(hba, 2000)) {
1907 mtx_init(&hba->lock, "hptioplock", NULL, MTX_DEF);
1921 &hba->parent_dmat /* tag */))
1927 if (hba->ops->family == MV_BASED_IOP) {
1928 if (hba->ops->internal_memalloc(hba)) {
1934 if (hba->ops->get_config(hba, &iop_config)) {
1939 hba->firmware_version = iop_config.firmware_version;
1940 hba->interface_version = iop_config.interface_version;
1941 hba->max_requests = iop_config.max_requests;
1942 hba->max_devices = iop_config.max_devices;
1943 hba->max_request_size = iop_config.request_size;
1944 hba->max_sg_count = iop_config.max_sg_count;
1946 if (hba->ops->family == MVFREY_BASED_IOP) {
1947 if (hba->ops->internal_memalloc(hba)) {
1951 if (hba->ops->reset_comm(hba)) {
1957 if (bus_dma_tag_create(hba->parent_dmat,/* parent */
1963 PAGE_SIZE * (hba->max_sg_count-1), /* maxsize */
1964 hba->max_sg_count, /* nsegments */
1968 &hba->lock, /* lockfuncarg */
1969 &hba->io_dmat /* tag */))
1975 if (bus_dma_tag_create(hba->parent_dmat,/* parent */
1987 &hba->srb_dmat /* tag */))
1993 if (bus_dmamem_alloc(hba->srb_dmat, (void **)&hba->uncached_ptr,
1995 &hba->srb_dmamap) != 0)
2001 if (bus_dmamap_load(hba->srb_dmat,
2002 hba->srb_dmamap, hba->uncached_ptr,
2004 hptiop_map_srb, hba, 0))
2010 if ((devq = cam_simq_alloc(hba->max_requests - 1 )) == NULL) {
2015 hba->sim = cam_sim_alloc(hptiop_action, hptiop_poll, driver_name,
2016 hba, unit, &hba->lock, hba->max_requests - 1, 1, devq);
2017 if (!hba->sim) {
2022 hptiop_lock_adapter(hba);
2023 if (xpt_bus_register(hba->sim, dev, 0) != CAM_SUCCESS)
2029 if (xpt_create_path(&hba->path, /*periph */ NULL,
2030 cam_sim_path(hba->sim), CAM_TARGET_WILDCARD,
2035 hptiop_unlock_adapter(hba);
2039 set_config.vbus_id = cam_sim_path(hba->sim);
2042 if (hba->ops->set_config(hba, &set_config)) {
2047 xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5);
2051 ccb.callback_arg = hba->sim;
2055 if ((hba->irq_res = bus_alloc_resource_any(hba->pcidev, SYS_RES_IRQ,
2061 if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM | INTR_MPSAFE,
2062 NULL, hptiop_pci_intr, hba, &hba->irq_handle))
2068 if (hptiop_send_sync_msg(hba,
2074 hba->ops->enable_intr(hba);
2075 hba->initialized = 1;
2077 hba->ioctl_dev = make_dev(&hptiop_cdevsw, unit,
2086 bus_teardown_intr(dev, hba->irq_res, hba->irq_handle);
2089 bus_release_resource(dev, SYS_RES_IRQ, 0, hba->irq_res);
2091 hptiop_lock_adapter(hba);
2093 xpt_free_path(hba->path);
2096 xpt_bus_deregister(cam_sim_path(hba->sim));
2099 cam_sim_free(hba->sim, /*free devq*/ TRUE);
2100 hptiop_unlock_adapter(hba);
2103 if (hba->uncached_ptr)
2104 bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
2107 if (hba->uncached_ptr)
2108 bus_dmamem_free(hba->srb_dmat,
2109 hba->uncached_ptr, hba->srb_dmamap);
2112 if (hba->srb_dmat)
2113 bus_dma_tag_destroy(hba->srb_dmat);
2116 if (hba->io_dmat)
2117 bus_dma_tag_destroy(hba->io_dmat);
2120 hba->ops->internal_memfree(hba);
2123 if (hba->parent_dmat)
2124 bus_dma_tag_destroy(hba->parent_dmat);
2127 if (hba->ops->release_pci_res)
2128 hba->ops->release_pci_res(hba);
2135 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
2139 hptiop_lock_adapter(hba);
2140 for (i = 0; i < hba->max_devices; i++)
2141 if (hptiop_os_query_remove_device(hba, i)) {
2143 hba->pciunit, i);
2149 if (hptiop_send_sync_msg(hba,
2152 hptiop_unlock_adapter(hba);
2154 hptiop_release_resource(hba);
2157 hptiop_unlock_adapter(hba);
2163 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
2167 if (hba->flag & HPT_IOCTL_FLAG_OPEN) {
2168 device_printf(dev, "%d device is busy", hba->pciunit);
2172 hba->ops->disable_intr(hba);
2174 if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000))
2182 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
2183 hptiop_lock_adapter(hba);
2184 hba->ops->iop_intr(hba);
2185 hptiop_unlock_adapter(hba);
2190 struct hpt_iop_hba *hba;
2192 hba = cam_sim_softc(sim);
2193 hba->ops->iop_intr(hba);
2201 static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba)
2207 static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba)
2218 static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba)
2230 static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba)
2241 static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba)
2252 static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba)
2266 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)argv;
2267 if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000))
2269 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000);
2272 static void *hptiop_get_srb(struct hpt_iop_hba * hba)
2276 if (hba->srb_list) {
2277 srb = hba->srb_list;
2278 hba->srb_list = srb->next;
2285 static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb)
2287 srb->next = hba->srb_list;
2288 hba->srb_list = srb;
2293 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)cam_sim_softc(sim);
2301 ccb->ccb_h.target_id >= hba->max_devices ||
2309 if ((srb = hptiop_get_srb(hba)) == NULL) {
2310 device_printf(hba->pcidev, "srb allocated failed");
2317 error = bus_dmamap_load_ccb(hba->io_dmat,
2325 device_printf(hba->pcidev,
2327 hba->pciunit, error);
2328 xpt_freeze_simq(hba->sim, 1);
2330 hptiop_free_srb(hba, srb);
2338 device_printf(hba->pcidev, "reset adapter");
2339 hba->msg_done = 0;
2340 hptiop_reset_adapter(hba);
2361 cpi->max_target = hba->max_devices;
2365 cpi->initiator_id = hba->max_devices;
2388 static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
2411 device_printf(hba->pcidev, "invalid req offset\n");
2413 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
2414 hptiop_free_srb(hba, srb);
2443 bus_space_write_region_1(hba->bar0t, hba->bar0h, iop_req32,
2447 bus_dmamap_sync(hba->io_dmat,
2451 bus_dmamap_sync(hba->io_dmat,
2486 bus_dmamap_sync(hba->io_dmat,
2489 bus_dmamap_sync(hba->io_dmat,
2493 if (hba->firmware_version > 0x01020000
2494 || hba->interface_version > 0x01020000) {
2513 static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
2551 bus_dmamap_sync(hba->io_dmat,
2555 bus_dmamap_sync(hba->io_dmat,
2564 | imin(3, size), hba);
2567 static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba,
2605 bus_dmamap_sync(hba->io_dmat,
2609 bus_dmamap_sync(hba->io_dmat,
2619 hba->u.mvfrey.inlist_wptr++;
2620 index = hba->u.mvfrey.inlist_wptr & 0x3fff;
2622 if (index == hba->u.mvfrey.list_count) {
2624 hba->u.mvfrey.inlist_wptr &= ~0x3fff;
2625 hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
2628 hba->u.mvfrey.inlist[index].addr = req_phy;
2629 hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4;
2631 BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
2635 callout_reset(&srb->timeout, 20 * hz, hptiop_reset_adapter, hba);
2644 struct hpt_iop_hba *hba = srb->hba;
2646 if (error || nsegs > hba->max_sg_count) {
2652 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
2653 hptiop_free_srb(hba, srb);
2658 hba->ops->post_req(hba, srb, segs, nsegs);
2664 struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg;
2665 hba->ctlcfgcmd_phy = ((u_int64_t)segs->ds_addr + 0x1F)
2667 hba->ctlcfg_ptr = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F)
2674 struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg;
2677 u_int32_t list_count = hba->u.mvfrey.list_count;
2681 p = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F)
2684 hba->ctlcfgcmd_phy = phy;
2685 hba->ctlcfg_ptr = p;
2690 hba->u.mvfrey.inlist = (struct mvfrey_inlist_entry *)p;
2691 hba->u.mvfrey.inlist_phy = phy;
2696 hba->u.mvfrey.outlist = (struct mvfrey_outlist_entry *)p;
2697 hba->u.mvfrey.outlist_phy = phy;
2702 hba->u.mvfrey.outlist_cptr = (u_int32_t *)p;
2703 hba->u.mvfrey.outlist_cptr_phy = phy;
2709 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
2715 device_printf(hba->pcidev, "hptiop_map_srb error");
2721 (((unsigned long)hba->uncached_ptr + 0x1F)
2728 if (bus_dmamap_create(hba->io_dmat,
2730 device_printf(hba->pcidev, "dmamap create failed");
2735 tmp_srb->hba = hba;
2737 if (hba->ctlcfg_ptr == 0) {/*itl iop*/
2747 callout_init_mtx(&tmp_srb->timeout, &hba->lock, 0);
2748 hptiop_free_srb(hba, tmp_srb);
2749 hba->srb[i] = tmp_srb;
2753 device_printf(hba->pcidev, "invalid alignment");
2759 static void hptiop_os_message_callback(struct hpt_iop_hba * hba, u_int32_t msg)
2761 hba->msg_done = 1;
2764 static int hptiop_os_query_remove_device(struct hpt_iop_hba * hba,
2771 status = xpt_create_path(&path, NULL, hba->sim->path_id, target_id, 0);
2776 device_printf(hba->pcidev, "%d ,"
2779 hba->pciunit, target_id, periph->refcount);
2788 static void hptiop_release_resource(struct hpt_iop_hba *hba)
2792 if (hba->ioctl_dev)
2793 destroy_dev(hba->ioctl_dev);
2795 if (hba->path) {
2798 xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5);
2802 ccb.callback_arg = hba->sim;
2804 xpt_free_path(hba->path);
2807 if (hba->irq_handle)
2808 bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle);
2810 if (hba->sim) {
2811 hptiop_lock_adapter(hba);
2812 xpt_bus_deregister(cam_sim_path(hba->sim));
2813 cam_sim_free(hba->sim, TRUE);
2814 hptiop_unlock_adapter(hba);
2817 if (hba->ctlcfg_dmat) {
2818 bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
2819 bus_dmamem_free(hba->ctlcfg_dmat,
2820 hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
2821 bus_dma_tag_destroy(hba->ctlcfg_dmat);
2825 struct hpt_iop_srb *srb = hba->srb[i];
2827 bus_dmamap_destroy(hba->io_dmat, srb->dma_map);
2831 if (hba->srb_dmat) {
2832 bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
2833 bus_dmamap_destroy(hba->srb_dmat, hba->srb_dmamap);
2834 bus_dma_tag_destroy(hba->srb_dmat);
2837 if (hba->io_dmat)
2838 bus_dma_tag_destroy(hba->io_dmat);
2840 if (hba->parent_dmat)
2841 bus_dma_tag_destroy(hba->parent_dmat);
2843 if (hba->irq_res)
2844 bus_release_resource(hba->pcidev, SYS_RES_IRQ,
2845 0, hba->irq_res);
2847 if (hba->bar0_res)
2848 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
2849 hba->bar0_rid, hba->bar0_res);
2850 if (hba->bar2_res)
2851 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
2852 hba->bar2_rid, hba->bar2_res);
2853 mtx_destroy(&hba->lock);