• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /freebsd-12-stable/sys/dev/hptiop/

Lines Matching defs:hba

82 static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
84 static void hptiop_request_callback_itl(struct hpt_iop_hba *hba,
86 static void hptiop_request_callback_mv(struct hpt_iop_hba *hba, u_int64_t req);
87 static void hptiop_request_callback_mvfrey(struct hpt_iop_hba *hba,
89 static void hptiop_os_message_callback(struct hpt_iop_hba *hba, u_int32_t msg);
90 static int hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
92 static int hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
94 static int hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba,
96 static int hptiop_rescan_bus(struct hpt_iop_hba *hba);
97 static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba);
98 static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba);
99 static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba);
100 static int hptiop_get_config_itl(struct hpt_iop_hba *hba,
102 static int hptiop_get_config_mv(struct hpt_iop_hba *hba,
104 static int hptiop_get_config_mvfrey(struct hpt_iop_hba *hba,
106 static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
108 static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
110 static int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba,
112 static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba);
113 static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba);
114 static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba);
115 static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba);
116 static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba);
117 static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
119 static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
122 static int hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba,
125 static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
128 static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
131 static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba,
134 static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg);
135 static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg);
136 static void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg);
137 static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba);
138 static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba);
139 static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba);
140 static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba);
141 static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba);
142 static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba);
143 static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb);
144 static int hptiop_os_query_remove_device(struct hpt_iop_hba *hba, int tid);
154 static void hptiop_release_resource(struct hpt_iop_hba *hba);
171 #define BUS_SPACE_WRT4_ITL(offset, value) bus_space_write_4(hba->bar0t,\
172 hba->bar0h, offsetof(struct hpt_iopmu_itl, offset), (value))
173 #define BUS_SPACE_RD4_ITL(offset) bus_space_read_4(hba->bar0t,\
174 hba->bar0h, offsetof(struct hpt_iopmu_itl, offset))
176 #define BUS_SPACE_WRT4_MV0(offset, value) bus_space_write_4(hba->bar0t,\
177 hba->bar0h, offsetof(struct hpt_iopmv_regs, offset), value)
178 #define BUS_SPACE_RD4_MV0(offset) bus_space_read_4(hba->bar0t,\
179 hba->bar0h, offsetof(struct hpt_iopmv_regs, offset))
180 #define BUS_SPACE_WRT4_MV2(offset, value) bus_space_write_4(hba->bar2t,\
181 hba->bar2h, offsetof(struct hpt_iopmu_mv, offset), value)
182 #define BUS_SPACE_RD4_MV2(offset) bus_space_read_4(hba->bar2t,\
183 hba->bar2h, offsetof(struct hpt_iopmu_mv, offset))
185 #define BUS_SPACE_WRT4_MVFREY2(offset, value) bus_space_write_4(hba->bar2t,\
186 hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset), value)
187 #define BUS_SPACE_RD4_MVFREY2(offset) bus_space_read_4(hba->bar2t,\
188 hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset))
193 struct hpt_iop_hba *hba = hba_from_dev(dev);
195 if (hba==NULL)
197 if (hba->flag & HPT_IOCTL_FLAG_OPEN)
199 hba->flag |= HPT_IOCTL_FLAG_OPEN;
206 struct hpt_iop_hba *hba = hba_from_dev(dev);
207 hba->flag &= ~(u_int32_t)HPT_IOCTL_FLAG_OPEN;
215 struct hpt_iop_hba *hba = hba_from_dev(dev);
221 ret = hba->ops->do_ioctl(hba,
225 ret = hptiop_rescan_bus(hba);
234 static u_int64_t hptiop_mv_outbound_read(struct hpt_iop_hba *hba)
241 bus_space_read_region_4(hba->bar2t, hba->bar2h,
257 static void hptiop_mv_inbound_write(u_int64_t p, struct hpt_iop_hba *hba)
265 bus_space_write_region_4(hba->bar2t, hba->bar2h,
272 static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg)
278 static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg)
287 static void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg)
293 static int hptiop_wait_ready_itl(struct hpt_iop_hba * hba, u_int32_t millisec)
314 static int hptiop_wait_ready_mv(struct hpt_iop_hba * hba, u_int32_t millisec)
316 if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec))
322 static int hptiop_wait_ready_mvfrey(struct hpt_iop_hba * hba,
325 if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec))
331 static void hptiop_request_callback_itl(struct hpt_iop_hba * hba,
342 if (hba->firmware_version > 0x01020000 ||
343 hba->interface_version > 0x01020000) {
344 srb = hba->srb[index & ~(u_int32_t)
353 srb = hba->srb[index &
363 temp = bus_space_read_4(hba->bar0t, hba->bar0h, index +
365 result = bus_space_read_4(hba->bar0t, hba->bar0h, index +
371 bus_space_write_region_4(hba->bar0t, hba->bar0h, index +
374 wakeup((void *)((unsigned long)hba->u.itl.mu + index));
379 bus_space_read_region_4(hba->bar0t, hba->bar0h, index +
383 dxfer = bus_space_read_4(hba->bar0t, hba->bar0h,
402 bus_dmamap_sync(hba->io_dmat,
404 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
407 bus_dmamap_sync(hba->io_dmat,
409 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
440 bus_space_read_region_1(hba->bar0t, hba->bar0h,
462 hptiop_free_srb(hba, srb);
468 static void hptiop_drain_outbound_queue_itl(struct hpt_iop_hba *hba)
474 hptiop_request_callback_itl(hba, req);
479 ((char *)hba->u.itl.mu + req);
480 temp = bus_space_read_4(hba->bar0t,
481 hba->bar0h,req +
486 bus_space_read_region_4(hba->bar0t,
487 hba->bar0h,req +
492 hptiop_request_callback_itl(hba, req);
495 bus_space_write_region_4(hba->bar0t,
496 hba->bar0h,req +
502 hptiop_request_callback_itl(hba, req);
507 static int hptiop_intr_itl(struct hpt_iop_hba * hba)
518 hptiop_os_message_callback(hba, msg);
523 hptiop_drain_outbound_queue_itl(hba);
530 static void hptiop_request_callback_mv(struct hpt_iop_hba * hba,
541 srb = hba->srb[context >> MVIOP_REQUEST_NUMBER_START_BIT];
560 bus_dmamap_sync(hba->io_dmat,
562 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
565 bus_dmamap_sync(hba->io_dmat,
567 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
608 hptiop_free_srb(hba, srb);
611 struct hpt_iop_request_ioctl_command *req = hba->ctlcfg_ptr;
613 hba->config_done = 1;
615 hba->config_done = -1;
620 hba->config_done = 1;
622 device_printf(hba->pcidev, "wrong callback type\n");
626 static void hptiop_request_callback_mvfrey(struct hpt_iop_hba * hba,
639 hba->config_done = 1;
643 srb = hba->srb[(_tag >> 4) & 0xff];
667 bus_dmamap_sync(hba->io_dmat,
669 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
672 bus_dmamap_sync(hba->io_dmat,
674 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
715 hptiop_free_srb(hba, srb);
720 hba->config_done = 1;
722 hba->config_done = -1;
723 wakeup((struct hpt_iop_request_ioctl_command *)hba->ctlcfg_ptr);
726 device_printf(hba->pcidev, "wrong callback type\n");
731 static void hptiop_drain_outbound_queue_mv(struct hpt_iop_hba * hba)
735 while ((req = hptiop_mv_outbound_read(hba))) {
738 hptiop_request_callback_mv(hba, req);
744 static int hptiop_intr_mv(struct hpt_iop_hba * hba)
757 hptiop_os_message_callback(hba, msg);
762 hptiop_drain_outbound_queue_mv(hba);
769 static int hptiop_intr_mvfrey(struct hpt_iop_hba * hba)
774 if (hba->initialized) {
783 hptiop_os_message_callback(hba, msg);
792 cptr = *hba->u.mvfrey.outlist_cptr & 0xff;
793 while (hba->u.mvfrey.outlist_rptr != cptr) {
794 hba->u.mvfrey.outlist_rptr++;
795 if (hba->u.mvfrey.outlist_rptr == hba->u.mvfrey.list_count) {
796 hba->u.mvfrey.outlist_rptr = 0;
799 _tag = hba->u.mvfrey.outlist[hba->u.mvfrey.outlist_rptr].val;
800 hptiop_request_callback_mvfrey(hba, _tag);
803 } while (cptr != (*hba->u.mvfrey.outlist_cptr & 0xff));
806 if (hba->initialized) {
813 static int hptiop_send_sync_request_itl(struct hpt_iop_hba * hba,
823 hptiop_intr_itl(hba);
824 bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 +
835 static int hptiop_send_sync_request_mv(struct hpt_iop_hba *hba,
840 hba->config_done = 0;
842 phy_addr = hba->ctlcfgcmd_phy |
847 hptiop_mv_inbound_write(phy_addr, hba);
851 hptiop_intr_mv(hba);
852 if (hba->config_done)
859 static int hptiop_send_sync_request_mvfrey(struct hpt_iop_hba *hba,
867 hba->config_done = 0;
869 phy_addr = hba->ctlcfgcmd_phy;
877 hba->u.mvfrey.inlist_wptr++;
878 index = hba->u.mvfrey.inlist_wptr & 0x3fff;
880 if (index == hba->u.mvfrey.list_count) {
882 hba->u.mvfrey.inlist_wptr &= ~0x3fff;
883 hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
886 hba->u.mvfrey.inlist[index].addr = phy_addr;
887 hba->u.mvfrey.inlist[index].intrfc_len = (reqhdr->size + 3) / 4;
889 BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
893 hptiop_intr_mvfrey(hba);
894 if (hba->config_done)
901 static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
906 hba->msg_done = 0;
907 hba->ops->post_msg(hba, msg);
910 hba->ops->iop_intr(hba);
911 if (hba->msg_done)
916 return hba->msg_done? 0 : -1;
919 static int hptiop_get_config_itl(struct hpt_iop_hba * hba,
934 bus_space_write_region_4(hba->bar0t, hba->bar0h,
938 if (hptiop_send_sync_request_itl(hba, req32, 20000)) {
943 bus_space_read_region_4(hba->bar0t, hba->bar0h,
952 static int hptiop_get_config_mv(struct hpt_iop_hba * hba,
957 if (!(req = hba->ctlcfg_ptr))
966 if (hptiop_send_sync_request_mv(hba, req, 20000)) {
975 static int hptiop_get_config_mvfrey(struct hpt_iop_hba * hba,
978 struct hpt_iop_request_get_config *info = hba->u.mvfrey.config;
1006 static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
1022 bus_space_write_region_4(hba->bar0t, hba->bar0h, req32,
1026 if (hptiop_send_sync_request_itl(hba, req32, 20000)) {
1036 static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
1041 if (!(req = hba->ctlcfg_ptr))
1055 if (hptiop_send_sync_request_mv(hba, req, 20000)) {
1063 static int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba,
1068 if (!(req = hba->ctlcfg_ptr))
1080 if (hptiop_send_sync_request_mvfrey(hba, req, 20000)) {
1088 static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
1096 (hba->max_request_size -
1098 device_printf(hba->pcidev, "request size beyond max value");
1107 req.header.context = req32 + (u_int64_t)(unsigned long)hba->u.itl.mu;
1113 bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, (u_int32_t *)&req,
1116 hptiop_lock_adapter(hba);
1121 bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 +
1125 if (hptiop_sleep(hba, (void *)((unsigned long)hba->u.itl.mu + req32),
1128 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1129 bus_space_read_region_4(hba->bar0t, hba->bar0h,req32 +
1135 hptiop_unlock_adapter(hba);
1139 static int hptiop_bus_space_copyin(struct hpt_iop_hba *hba, u_int32_t bus,
1148 bus_space_write_1(hba->bar0t, hba->bar0h, bus + i, byte);
1154 static int hptiop_bus_space_copyout(struct hpt_iop_hba *hba, u_int32_t bus,
1161 byte = bus_space_read_1(hba->bar0t, hba->bar0h, bus + i);
1169 static int hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
1184 if (hptiop_bus_space_copyin(hba, req32 +
1189 if (hptiop_post_ioctl_command_itl(hba, req32, pParams))
1192 result = bus_space_read_4(hba->bar0t, hba->bar0h, req32 +
1198 if (hptiop_bus_space_copyout(hba, req32 +
1205 if (hptiop_bus_space_copyout(hba, req32 +
1222 static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
1230 (hba->max_request_size -
1232 device_printf(hba->pcidev, "request size beyond max value");
1247 req_phy = hba->ctlcfgcmd_phy | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size;
1248 hptiop_mv_inbound_write(req_phy, hba);
1252 while (hba->config_done == 0) {
1253 if (hptiop_sleep(hba, req, PPAUSE,
1256 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1261 static int hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
1270 req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr);
1271 hba->config_done = 0;
1272 hptiop_lock_adapter(hba);
1277 if (hptiop_post_ioctl_command_mv(hba, req, pParams))
1280 if (hba->config_done == 1) {
1293 hptiop_unlock_adapter(hba);
1297 hptiop_unlock_adapter(hba);
1302 static int hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba,
1309 phy_addr = hba->ctlcfgcmd_phy;
1312 (hba->max_request_size -
1314 device_printf(hba->pcidev, "request size beyond max value");
1334 hba->u.mvfrey.inlist_wptr++;
1335 index = hba->u.mvfrey.inlist_wptr & 0x3fff;
1337 if (index == hba->u.mvfrey.list_count) {
1339 hba->u.mvfrey.inlist_wptr &= ~0x3fff;
1340 hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
1343 hba->u.mvfrey.inlist[index].addr = phy_addr;
1344 hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4;
1346 BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
1349 while (hba->config_done == 0) {
1350 if (hptiop_sleep(hba, req, PPAUSE,
1353 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1358 static int hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba,
1367 req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr);
1368 hba->config_done = 0;
1369 hptiop_lock_adapter(hba);
1374 if (hptiop_post_ioctl_command_mvfrey(hba, req, pParams))
1377 if (hba->config_done == 1) {
1390 hptiop_unlock_adapter(hba);
1394 hptiop_unlock_adapter(hba);
1399 static int hptiop_rescan_bus(struct hpt_iop_hba * hba)
1405 if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(hba->sim),
1419 static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba)
1421 hba->bar0_rid = 0x10;
1422 hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1423 SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1425 if (hba->bar0_res == NULL) {
1426 device_printf(hba->pcidev,
1430 hba->bar0t = rman_get_bustag(hba->bar0_res);
1431 hba->bar0h = rman_get_bushandle(hba->bar0_res);
1432 hba->u.itl.mu = (struct hpt_iopmu_itl *)
1433 rman_get_virtual(hba->bar0_res);
1435 if (!hba->u.itl.mu) {
1436 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1437 hba->bar0_rid, hba->bar0_res);
1438 device_printf(hba->pcidev, "alloc mem res failed\n");
1445 static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba)
1447 hba->bar0_rid = 0x10;
1448 hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1449 SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1451 if (hba->bar0_res == NULL) {
1452 device_printf(hba->pcidev, "failed to get iop bar0.\n");
1455 hba->bar0t = rman_get_bustag(hba->bar0_res);
1456 hba->bar0h = rman_get_bushandle(hba->bar0_res);
1457 hba->u.mv.regs = (struct hpt_iopmv_regs *)
1458 rman_get_virtual(hba->bar0_res);
1460 if (!hba->u.mv.regs) {
1461 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1462 hba->bar0_rid, hba->bar0_res);
1463 device_printf(hba->pcidev, "alloc bar0 mem res failed\n");
1467 hba->bar2_rid = 0x18;
1468 hba->bar2_res = bus_alloc_resource_any(hba->pcidev,
1469 SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE);
1471 if (hba->bar2_res == NULL) {
1472 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1473 hba->bar0_rid, hba->bar0_res);
1474 device_printf(hba->pcidev, "failed to get iop bar2.\n");
1478 hba->bar2t = rman_get_bustag(hba->bar2_res);
1479 hba->bar2h = rman_get_bushandle(hba->bar2_res);
1480 hba->u.mv.mu = (struct hpt_iopmu_mv *)rman_get_virtual(hba->bar2_res);
1482 if (!hba->u.mv.mu) {
1483 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1484 hba->bar0_rid, hba->bar0_res);
1485 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1486 hba->bar2_rid, hba->bar2_res);
1487 device_printf(hba->pcidev, "alloc mem bar2 res failed\n");
1494 static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba)
1496 hba->bar0_rid = 0x10;
1497 hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1498 SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1500 if (hba->bar0_res == NULL) {
1501 device_printf(hba->pcidev, "failed to get iop bar0.\n");
1504 hba->bar0t = rman_get_bustag(hba->bar0_res);
1505 hba->bar0h = rman_get_bushandle(hba->bar0_res);
1506 hba->u.mvfrey.config = (struct hpt_iop_request_get_config *)
1507 rman_get_virtual(hba->bar0_res);
1509 if (!hba->u.mvfrey.config) {
1510 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1511 hba->bar0_rid, hba->bar0_res);
1512 device_printf(hba->pcidev, "alloc bar0 mem res failed\n");
1516 hba->bar2_rid = 0x18;
1517 hba->bar2_res = bus_alloc_resource_any(hba->pcidev,
1518 SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE);
1520 if (hba->bar2_res == NULL) {
1521 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1522 hba->bar0_rid, hba->bar0_res);
1523 device_printf(hba->pcidev, "failed to get iop bar2.\n");
1527 hba->bar2t = rman_get_bustag(hba->bar2_res);
1528 hba->bar2h = rman_get_bushandle(hba->bar2_res);
1529 hba->u.mvfrey.mu =
1530 (struct hpt_iopmu_mvfrey *)rman_get_virtual(hba->bar2_res);
1532 if (!hba->u.mvfrey.mu) {
1533 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1534 hba->bar0_rid, hba->bar0_res);
1535 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1536 hba->bar2_rid, hba->bar2_res);
1537 device_printf(hba->pcidev, "alloc mem bar2 res failed\n");
1544 static void hptiop_release_pci_res_itl(struct hpt_iop_hba *hba)
1546 if (hba->bar0_res)
1547 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1548 hba->bar0_rid, hba->bar0_res);
1551 static void hptiop_release_pci_res_mv(struct hpt_iop_hba *hba)
1553 if (hba->bar0_res)
1554 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1555 hba->bar0_rid, hba->bar0_res);
1556 if (hba->bar2_res)
1557 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1558 hba->bar2_rid, hba->bar2_res);
1561 static void hptiop_release_pci_res_mvfrey(struct hpt_iop_hba *hba)
1563 if (hba->bar0_res)
1564 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1565 hba->bar0_rid, hba->bar0_res);
1566 if (hba->bar2_res)
1567 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1568 hba->bar2_rid, hba->bar2_res);
1571 static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba)
1573 if (bus_dma_tag_create(hba->parent_dmat,
1585 &hba->ctlcfg_dmat)) {
1586 device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n");
1590 if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr,
1592 &hba->ctlcfg_dmamap) != 0) {
1593 device_printf(hba->pcidev,
1595 bus_dma_tag_destroy(hba->ctlcfg_dmat);
1599 if (bus_dmamap_load(hba->ctlcfg_dmat,
1600 hba->ctlcfg_dmamap, hba->ctlcfg_ptr,
1602 hptiop_mv_map_ctlcfg, hba, 0)) {
1603 device_printf(hba->pcidev, "bus_dmamap_load failed!\n");
1604 if (hba->ctlcfg_dmat) {
1605 bus_dmamem_free(hba->ctlcfg_dmat,
1606 hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1607 bus_dma_tag_destroy(hba->ctlcfg_dmat);
1615 static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba)
1625 hba->u.mvfrey.list_count = list_count;
1626 hba->u.mvfrey.internal_mem_size = 0x800
1630 if (bus_dma_tag_create(hba->parent_dmat,
1636 hba->u.mvfrey.internal_mem_size,
1642 &hba->ctlcfg_dmat)) {
1643 device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n");
1647 if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr,
1649 &hba->ctlcfg_dmamap) != 0) {
1650 device_printf(hba->pcidev,
1652 bus_dma_tag_destroy(hba->ctlcfg_dmat);
1656 if (bus_dmamap_load(hba->ctlcfg_dmat,
1657 hba->ctlcfg_dmamap, hba->ctlcfg_ptr,
1658 hba->u.mvfrey.internal_mem_size,
1659 hptiop_mvfrey_map_ctlcfg, hba, 0)) {
1660 device_printf(hba->pcidev, "bus_dmamap_load failed!\n");
1661 if (hba->ctlcfg_dmat) {
1662 bus_dmamem_free(hba->ctlcfg_dmat,
1663 hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1664 bus_dma_tag_destroy(hba->ctlcfg_dmat);
1672 static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba) {
1676 static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba)
1678 if (hba->ctlcfg_dmat) {
1679 bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
1680 bus_dmamem_free(hba->ctlcfg_dmat,
1681 hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1682 bus_dma_tag_destroy(hba->ctlcfg_dmat);
1688 static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba)
1690 if (hba->ctlcfg_dmat) {
1691 bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
1692 bus_dmamem_free(hba->ctlcfg_dmat,
1693 hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1694 bus_dma_tag_destroy(hba->ctlcfg_dmat);
1700 static int hptiop_reset_comm_mvfrey(struct hpt_iop_hba *hba)
1704 if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET_COMM, 3000))
1713 hba->u.mvfrey.inlist_phy & 0xffffffff);
1715 (hba->u.mvfrey.inlist_phy >> 16) >> 16);
1718 hba->u.mvfrey.outlist_phy & 0xffffffff);
1720 (hba->u.mvfrey.outlist_phy >> 16) >> 16);
1723 hba->u.mvfrey.outlist_cptr_phy & 0xffffffff);
1725 (hba->u.mvfrey.outlist_cptr_phy >> 16) >> 16);
1727 hba->u.mvfrey.inlist_wptr = (hba->u.mvfrey.list_count - 1)
1729 *hba->u.mvfrey.outlist_cptr = (hba->u.mvfrey.list_count - 1)
1731 hba->u.mvfrey.outlist_rptr = hba->u.mvfrey.list_count - 1;
1813 struct hpt_iop_hba *hba;
1872 hba = (struct hpt_iop_hba *)device_get_softc(dev);
1873 bzero(hba, sizeof(struct hpt_iop_hba));
1874 hba->ops = ops;
1876 KdPrint(("hba->ops=%p\n", hba->ops));
1882 struct hpt_iop_hba *hba = (struct hpt_iop_hba *)device_get_softc(dev);
1895 pci_get_function(dev), hba->ops));
1898 hba->pcidev = dev;
1899 hba->pciunit = unit;
1901 if (hba->ops->alloc_pci_res(hba))
1904 if (hba->ops->iop_wait_ready(hba, 2000)) {
1909 mtx_init(&hba->lock, "hptioplock", NULL, MTX_DEF);
1923 &hba->parent_dmat /* tag */))
1929 if (hba->ops->family == MV_BASED_IOP) {
1930 if (hba->ops->internal_memalloc(hba)) {
1936 if (hba->ops->get_config(hba, &iop_config)) {
1941 hba->firmware_version = iop_config.firmware_version;
1942 hba->interface_version = iop_config.interface_version;
1943 hba->max_requests = iop_config.max_requests;
1944 hba->max_devices = iop_config.max_devices;
1945 hba->max_request_size = iop_config.request_size;
1946 hba->max_sg_count = iop_config.max_sg_count;
1948 if (hba->ops->family == MVFREY_BASED_IOP) {
1949 if (hba->ops->internal_memalloc(hba)) {
1953 if (hba->ops->reset_comm(hba)) {
1959 if (bus_dma_tag_create(hba->parent_dmat,/* parent */
1965 PAGE_SIZE * (hba->max_sg_count-1), /* maxsize */
1966 hba->max_sg_count, /* nsegments */
1970 &hba->lock, /* lockfuncarg */
1971 &hba->io_dmat /* tag */))
1977 if (bus_dma_tag_create(hba->parent_dmat,/* parent */
1989 &hba->srb_dmat /* tag */))
1995 if (bus_dmamem_alloc(hba->srb_dmat, (void **)&hba->uncached_ptr,
1997 &hba->srb_dmamap) != 0)
2003 if (bus_dmamap_load(hba->srb_dmat,
2004 hba->srb_dmamap, hba->uncached_ptr,
2006 hptiop_map_srb, hba, 0))
2012 if ((devq = cam_simq_alloc(hba->max_requests - 1 )) == NULL) {
2017 hba->sim = cam_sim_alloc(hptiop_action, hptiop_poll, driver_name,
2018 hba, unit, &hba->lock, hba->max_requests - 1, 1, devq);
2019 if (!hba->sim) {
2024 hptiop_lock_adapter(hba);
2025 if (xpt_bus_register(hba->sim, dev, 0) != CAM_SUCCESS)
2031 if (xpt_create_path(&hba->path, /*periph */ NULL,
2032 cam_sim_path(hba->sim), CAM_TARGET_WILDCARD,
2037 hptiop_unlock_adapter(hba);
2041 set_config.vbus_id = cam_sim_path(hba->sim);
2044 if (hba->ops->set_config(hba, &set_config)) {
2049 xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5);
2053 ccb.callback_arg = hba->sim;
2057 if ((hba->irq_res = bus_alloc_resource_any(hba->pcidev, SYS_RES_IRQ,
2063 if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM | INTR_MPSAFE,
2064 NULL, hptiop_pci_intr, hba, &hba->irq_handle))
2070 if (hptiop_send_sync_msg(hba,
2076 hba->ops->enable_intr(hba);
2077 hba->initialized = 1;
2079 hba->ioctl_dev = make_dev(&hptiop_cdevsw, unit,
2088 bus_teardown_intr(dev, hba->irq_res, hba->irq_handle);
2091 bus_release_resource(dev, SYS_RES_IRQ, 0, hba->irq_res);
2093 hptiop_lock_adapter(hba);
2095 xpt_free_path(hba->path);
2098 xpt_bus_deregister(cam_sim_path(hba->sim));
2101 cam_sim_free(hba->sim, /*free devq*/ TRUE);
2102 hptiop_unlock_adapter(hba);
2105 if (hba->uncached_ptr)
2106 bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
2109 if (hba->uncached_ptr)
2110 bus_dmamem_free(hba->srb_dmat,
2111 hba->uncached_ptr, hba->srb_dmamap);
2114 if (hba->srb_dmat)
2115 bus_dma_tag_destroy(hba->srb_dmat);
2118 if (hba->io_dmat)
2119 bus_dma_tag_destroy(hba->io_dmat);
2122 hba->ops->internal_memfree(hba);
2125 if (hba->parent_dmat)
2126 bus_dma_tag_destroy(hba->parent_dmat);
2129 if (hba->ops->release_pci_res)
2130 hba->ops->release_pci_res(hba);
2137 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
2141 hptiop_lock_adapter(hba);
2142 for (i = 0; i < hba->max_devices; i++)
2143 if (hptiop_os_query_remove_device(hba, i)) {
2145 hba->pciunit, i);
2151 if (hptiop_send_sync_msg(hba,
2154 hptiop_unlock_adapter(hba);
2156 hptiop_release_resource(hba);
2159 hptiop_unlock_adapter(hba);
2165 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
2169 if (hba->flag & HPT_IOCTL_FLAG_OPEN) {
2170 device_printf(dev, "%d device is busy", hba->pciunit);
2174 hba->ops->disable_intr(hba);
2176 if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000))
2184 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
2185 hptiop_lock_adapter(hba);
2186 hba->ops->iop_intr(hba);
2187 hptiop_unlock_adapter(hba);
2192 struct hpt_iop_hba *hba;
2194 hba = cam_sim_softc(sim);
2195 hba->ops->iop_intr(hba);
2203 static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba)
2209 static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba)
2220 static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba)
2232 static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba)
2243 static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba)
2254 static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba)
2268 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)argv;
2269 if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000))
2271 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000);
2274 static void *hptiop_get_srb(struct hpt_iop_hba * hba)
2278 if (hba->srb_list) {
2279 srb = hba->srb_list;
2280 hba->srb_list = srb->next;
2287 static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb)
2289 srb->next = hba->srb_list;
2290 hba->srb_list = srb;
2295 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)cam_sim_softc(sim);
2303 ccb->ccb_h.target_id >= hba->max_devices ||
2311 if ((srb = hptiop_get_srb(hba)) == NULL) {
2312 device_printf(hba->pcidev, "srb allocated failed");
2319 error = bus_dmamap_load_ccb(hba->io_dmat,
2327 device_printf(hba->pcidev,
2329 hba->pciunit, error);
2330 xpt_freeze_simq(hba->sim, 1);
2332 hptiop_free_srb(hba, srb);
2340 device_printf(hba->pcidev, "reset adapter");
2341 hba->msg_done = 0;
2342 hptiop_reset_adapter(hba);
2363 cpi->max_target = hba->max_devices;
2367 cpi->initiator_id = hba->max_devices;
2390 static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
2413 device_printf(hba->pcidev, "invalid req offset\n");
2415 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
2416 hptiop_free_srb(hba, srb);
2445 bus_space_write_region_1(hba->bar0t, hba->bar0h, iop_req32,
2449 bus_dmamap_sync(hba->io_dmat,
2453 bus_dmamap_sync(hba->io_dmat,
2488 bus_dmamap_sync(hba->io_dmat,
2491 bus_dmamap_sync(hba->io_dmat,
2495 if (hba->firmware_version > 0x01020000
2496 || hba->interface_version > 0x01020000) {
2515 static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
2553 bus_dmamap_sync(hba->io_dmat,
2557 bus_dmamap_sync(hba->io_dmat,
2566 | imin(3, size), hba);
2569 static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba,
2607 bus_dmamap_sync(hba->io_dmat,
2611 bus_dmamap_sync(hba->io_dmat,
2621 hba->u.mvfrey.inlist_wptr++;
2622 index = hba->u.mvfrey.inlist_wptr & 0x3fff;
2624 if (index == hba->u.mvfrey.list_count) {
2626 hba->u.mvfrey.inlist_wptr &= ~0x3fff;
2627 hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
2630 hba->u.mvfrey.inlist[index].addr = req_phy;
2631 hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4;
2633 BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
2637 callout_reset(&srb->timeout, 20 * hz, hptiop_reset_adapter, hba);
2646 struct hpt_iop_hba *hba = srb->hba;
2648 if (error || nsegs > hba->max_sg_count) {
2654 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
2655 hptiop_free_srb(hba, srb);
2660 hba->ops->post_req(hba, srb, segs, nsegs);
2666 struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg;
2667 hba->ctlcfgcmd_phy = ((u_int64_t)segs->ds_addr + 0x1F)
2669 hba->ctlcfg_ptr = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F)
2676 struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg;
2679 u_int32_t list_count = hba->u.mvfrey.list_count;
2683 p = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F)
2686 hba->ctlcfgcmd_phy = phy;
2687 hba->ctlcfg_ptr = p;
2692 hba->u.mvfrey.inlist = (struct mvfrey_inlist_entry *)p;
2693 hba->u.mvfrey.inlist_phy = phy;
2698 hba->u.mvfrey.outlist = (struct mvfrey_outlist_entry *)p;
2699 hba->u.mvfrey.outlist_phy = phy;
2704 hba->u.mvfrey.outlist_cptr = (u_int32_t *)p;
2705 hba->u.mvfrey.outlist_cptr_phy = phy;
2711 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
2717 device_printf(hba->pcidev, "hptiop_map_srb error");
2723 (((unsigned long)hba->uncached_ptr + 0x1F)
2730 if (bus_dmamap_create(hba->io_dmat,
2732 device_printf(hba->pcidev, "dmamap create failed");
2737 tmp_srb->hba = hba;
2739 if (hba->ctlcfg_ptr == 0) {/*itl iop*/
2749 callout_init_mtx(&tmp_srb->timeout, &hba->lock, 0);
2750 hptiop_free_srb(hba, tmp_srb);
2751 hba->srb[i] = tmp_srb;
2755 device_printf(hba->pcidev, "invalid alignment");
2761 static void hptiop_os_message_callback(struct hpt_iop_hba * hba, u_int32_t msg)
2763 hba->msg_done = 1;
2766 static int hptiop_os_query_remove_device(struct hpt_iop_hba * hba,
2773 status = xpt_create_path(&path, NULL, hba->sim->path_id, target_id, 0);
2778 device_printf(hba->pcidev, "%d ,"
2781 hba->pciunit, target_id, periph->refcount);
2790 static void hptiop_release_resource(struct hpt_iop_hba *hba)
2794 if (hba->ioctl_dev)
2795 destroy_dev(hba->ioctl_dev);
2797 if (hba->path) {
2800 xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5);
2804 ccb.callback_arg = hba->sim;
2806 xpt_free_path(hba->path);
2809 if (hba->irq_handle)
2810 bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle);
2812 if (hba->sim) {
2813 hptiop_lock_adapter(hba);
2814 xpt_bus_deregister(cam_sim_path(hba->sim));
2815 cam_sim_free(hba->sim, TRUE);
2816 hptiop_unlock_adapter(hba);
2819 if (hba->ctlcfg_dmat) {
2820 bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
2821 bus_dmamem_free(hba->ctlcfg_dmat,
2822 hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
2823 bus_dma_tag_destroy(hba->ctlcfg_dmat);
2827 struct hpt_iop_srb *srb = hba->srb[i];
2829 bus_dmamap_destroy(hba->io_dmat, srb->dma_map);
2833 if (hba->srb_dmat) {
2834 bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
2835 bus_dmamap_destroy(hba->srb_dmat, hba->srb_dmamap);
2836 bus_dma_tag_destroy(hba->srb_dmat);
2839 if (hba->io_dmat)
2840 bus_dma_tag_destroy(hba->io_dmat);
2842 if (hba->parent_dmat)
2843 bus_dma_tag_destroy(hba->parent_dmat);
2845 if (hba->irq_res)
2846 bus_release_resource(hba->pcidev, SYS_RES_IRQ,
2847 0, hba->irq_res);
2849 if (hba->bar0_res)
2850 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
2851 hba->bar0_rid, hba->bar0_res);
2852 if (hba->bar2_res)
2853 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
2854 hba->bar2_rid, hba->bar2_res);
2855 mtx_destroy(&hba->lock);