1/* 2 * HighPoint RR3xxx/4xxx RAID Driver for FreeBSD 3 * Copyright (C) 2007-2012 HighPoint Technologies, Inc. All Rights Reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD$"); 29 30#include <sys/param.h> 31#include <sys/types.h> 32#include <sys/cons.h> 33#if (__FreeBSD_version >= 500000) 34#include <sys/time.h> 35#include <sys/systm.h> 36#else 37#include <machine/clock.h> 38#endif 39 40#include <sys/stat.h> 41#include <sys/malloc.h> 42#include <sys/conf.h> 43#include <sys/libkern.h> 44#include <sys/kernel.h> 45 46#if (__FreeBSD_version >= 500000) 47#include <sys/kthread.h> 48#include <sys/mutex.h> 49#include <sys/module.h> 50#endif 51 52#include <sys/eventhandler.h> 53#include <sys/bus.h> 54#include <sys/taskqueue.h> 55#include <sys/ioccom.h> 56 57#include <machine/resource.h> 58#include <machine/bus.h> 59#include <machine/stdarg.h> 60#include <sys/rman.h> 61 62#include <vm/vm.h> 63#include <vm/pmap.h> 64 65#if (__FreeBSD_version >= 500000) 66#include <dev/pci/pcireg.h> 67#include <dev/pci/pcivar.h> 68#else 69#include <pci/pcivar.h> 70#include <pci/pcireg.h> 71#endif 72 73#if (__FreeBSD_version <= 500043) 74#include <sys/devicestat.h> 75#endif 76 77#include <cam/cam.h> 78#include <cam/cam_ccb.h> 79#include <cam/cam_sim.h> 80#include <cam/cam_xpt_sim.h> 81#include <cam/cam_debug.h> 82#include <cam/cam_periph.h> 83#include <cam/scsi/scsi_all.h> 84#include <cam/scsi/scsi_message.h> 85 86#if (__FreeBSD_version < 500043) 87#include <sys/bus_private.h> 88#endif 89 90#include <dev/hptiop/hptiop.h> 91 92static const char driver_name[] = "hptiop"; 93static const char driver_version[] = "v1.9"; 94 95static devclass_t hptiop_devclass; 96 97static int hptiop_send_sync_msg(struct hpt_iop_hba *hba, 98 u_int32_t msg, u_int32_t millisec); 99static void hptiop_request_callback_itl(struct hpt_iop_hba *hba, 100 u_int32_t req); 101static void hptiop_request_callback_mv(struct hpt_iop_hba *hba, u_int64_t req); 102static void hptiop_request_callback_mvfrey(struct hpt_iop_hba *hba, 103 u_int32_t req); 104static void hptiop_os_message_callback(struct hpt_iop_hba *hba, u_int32_t msg); 105static int hptiop_do_ioctl_itl(struct hpt_iop_hba *hba, 106 struct hpt_iop_ioctl_param *pParams); 107static int hptiop_do_ioctl_mv(struct hpt_iop_hba *hba, 108 struct hpt_iop_ioctl_param *pParams); 109static int hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba, 110 struct hpt_iop_ioctl_param *pParams); 111static int hptiop_rescan_bus(struct hpt_iop_hba *hba); 112static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba); 113static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba); 114static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba); 115static int hptiop_get_config_itl(struct hpt_iop_hba *hba, 116 struct hpt_iop_request_get_config *config); 117static int hptiop_get_config_mv(struct hpt_iop_hba *hba, 118 struct hpt_iop_request_get_config *config); 119static int hptiop_get_config_mvfrey(struct hpt_iop_hba *hba, 120 struct hpt_iop_request_get_config *config); 121static int hptiop_set_config_itl(struct hpt_iop_hba *hba, 122 struct hpt_iop_request_set_config *config); 123static int hptiop_set_config_mv(struct hpt_iop_hba *hba, 124 struct hpt_iop_request_set_config *config); 125static int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba, 126 struct hpt_iop_request_set_config *config); 127static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba); 128static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba); 129static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba); 130static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba); 131static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba); 132static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba, 133 u_int32_t req32, struct hpt_iop_ioctl_param *pParams); 134static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba, 135 struct hpt_iop_request_ioctl_command *req, 136 struct hpt_iop_ioctl_param *pParams); 137static int hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba, 138 struct hpt_iop_request_ioctl_command *req, 139 struct hpt_iop_ioctl_param *pParams); 140static void hptiop_post_req_itl(struct hpt_iop_hba *hba, 141 struct hpt_iop_srb *srb, 142 bus_dma_segment_t *segs, int nsegs); 143static void hptiop_post_req_mv(struct hpt_iop_hba *hba, 144 struct hpt_iop_srb *srb, 145 bus_dma_segment_t *segs, int nsegs); 146static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba, 147 struct hpt_iop_srb *srb, 148 bus_dma_segment_t *segs, int nsegs); 149static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg); 150static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg); 151static void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg); 152static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba); 153static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba); 154static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba); 155static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba); 156static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba); 157static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba); 158static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb); 159static int hptiop_os_query_remove_device(struct hpt_iop_hba *hba, int tid); 160static int hptiop_probe(device_t dev); 161static int hptiop_attach(device_t dev); 162static int hptiop_detach(device_t dev); 163static int hptiop_shutdown(device_t dev); 164static void hptiop_action(struct cam_sim *sim, union ccb *ccb); 165static void hptiop_poll(struct cam_sim *sim); 166static void hptiop_async(void *callback_arg, u_int32_t code, 167 struct cam_path *path, void *arg); 168static void hptiop_pci_intr(void *arg); 169static void hptiop_release_resource(struct hpt_iop_hba *hba); 170static void hptiop_reset_adapter(void *argv); 171static d_open_t hptiop_open; 172static d_close_t hptiop_close; 173static d_ioctl_t hptiop_ioctl; 174 175static struct cdevsw hptiop_cdevsw = { 176 .d_open = hptiop_open, 177 .d_close = hptiop_close, 178 .d_ioctl = hptiop_ioctl, 179 .d_name = driver_name, 180#if __FreeBSD_version>=503000 181 .d_version = D_VERSION, 182#endif 183#if (__FreeBSD_version>=503000 && __FreeBSD_version<600034) 184 .d_flags = D_NEEDGIANT, 185#endif 186#if __FreeBSD_version<600034 187#if __FreeBSD_version>=501000 188 .d_maj = MAJOR_AUTO, 189#else 190 .d_maj = HPT_DEV_MAJOR, 191#endif 192#endif 193}; 194 195#if __FreeBSD_version < 503000 196#define hba_from_dev(dev) ((struct hpt_iop_hba *)(dev)->si_drv1) 197#else 198#define hba_from_dev(dev) \ 199 ((struct hpt_iop_hba *)devclass_get_softc(hptiop_devclass, dev2unit(dev))) 200#endif 201 202#define BUS_SPACE_WRT4_ITL(offset, value) bus_space_write_4(hba->bar0t,\ 203 hba->bar0h, offsetof(struct hpt_iopmu_itl, offset), (value)) 204#define BUS_SPACE_RD4_ITL(offset) bus_space_read_4(hba->bar0t,\ 205 hba->bar0h, offsetof(struct hpt_iopmu_itl, offset)) 206 207#define BUS_SPACE_WRT4_MV0(offset, value) bus_space_write_4(hba->bar0t,\ 208 hba->bar0h, offsetof(struct hpt_iopmv_regs, offset), value) 209#define BUS_SPACE_RD4_MV0(offset) bus_space_read_4(hba->bar0t,\ 210 hba->bar0h, offsetof(struct hpt_iopmv_regs, offset)) 211#define BUS_SPACE_WRT4_MV2(offset, value) bus_space_write_4(hba->bar2t,\ 212 hba->bar2h, offsetof(struct hpt_iopmu_mv, offset), value) 213#define BUS_SPACE_RD4_MV2(offset) bus_space_read_4(hba->bar2t,\ 214 hba->bar2h, offsetof(struct hpt_iopmu_mv, offset)) 215 216#define BUS_SPACE_WRT4_MVFREY2(offset, value) bus_space_write_4(hba->bar2t,\ 217 hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset), value) 218#define BUS_SPACE_RD4_MVFREY2(offset) bus_space_read_4(hba->bar2t,\ 219 hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset)) 220 221static int hptiop_open(ioctl_dev_t dev, int flags, 222 int devtype, ioctl_thread_t proc) 223{ 224 struct hpt_iop_hba *hba = hba_from_dev(dev); 225 226 if (hba==NULL) 227 return ENXIO; 228 if (hba->flag & HPT_IOCTL_FLAG_OPEN) 229 return EBUSY; 230 hba->flag |= HPT_IOCTL_FLAG_OPEN; 231 return 0; 232} 233 234static int hptiop_close(ioctl_dev_t dev, int flags, 235 int devtype, ioctl_thread_t proc) 236{ 237 struct hpt_iop_hba *hba = hba_from_dev(dev); 238 hba->flag &= ~(u_int32_t)HPT_IOCTL_FLAG_OPEN; 239 return 0; 240} 241 242static int hptiop_ioctl(ioctl_dev_t dev, u_long cmd, caddr_t data, 243 int flags, ioctl_thread_t proc) 244{ 245 int ret = EFAULT; 246 struct hpt_iop_hba *hba = hba_from_dev(dev); 247 248#if (__FreeBSD_version >= 500000) 249 mtx_lock(&Giant); 250#endif 251 252 switch (cmd) { 253 case HPT_DO_IOCONTROL: 254 ret = hba->ops->do_ioctl(hba, 255 (struct hpt_iop_ioctl_param *)data); 256 break; 257 case HPT_SCAN_BUS: 258 ret = hptiop_rescan_bus(hba); 259 break; 260 } 261 262#if (__FreeBSD_version >= 500000) 263 mtx_unlock(&Giant); 264#endif 265 266 return ret; 267} 268 269static u_int64_t hptiop_mv_outbound_read(struct hpt_iop_hba *hba) 270{ 271 u_int64_t p; 272 u_int32_t outbound_tail = BUS_SPACE_RD4_MV2(outbound_tail); 273 u_int32_t outbound_head = BUS_SPACE_RD4_MV2(outbound_head); 274 275 if (outbound_tail != outbound_head) { 276 bus_space_read_region_4(hba->bar2t, hba->bar2h, 277 offsetof(struct hpt_iopmu_mv, 278 outbound_q[outbound_tail]), 279 (u_int32_t *)&p, 2); 280 281 outbound_tail++; 282 283 if (outbound_tail == MVIOP_QUEUE_LEN) 284 outbound_tail = 0; 285 286 BUS_SPACE_WRT4_MV2(outbound_tail, outbound_tail); 287 return p; 288 } else 289 return 0; 290} 291 292static void hptiop_mv_inbound_write(u_int64_t p, struct hpt_iop_hba *hba) 293{ 294 u_int32_t inbound_head = BUS_SPACE_RD4_MV2(inbound_head); 295 u_int32_t head = inbound_head + 1; 296 297 if (head == MVIOP_QUEUE_LEN) 298 head = 0; 299 300 bus_space_write_region_4(hba->bar2t, hba->bar2h, 301 offsetof(struct hpt_iopmu_mv, inbound_q[inbound_head]), 302 (u_int32_t *)&p, 2); 303 BUS_SPACE_WRT4_MV2(inbound_head, head); 304 BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_POSTQUEUE); 305} 306 307static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg) 308{ 309 BUS_SPACE_WRT4_ITL(inbound_msgaddr0, msg); 310 BUS_SPACE_RD4_ITL(outbound_intstatus); 311} 312 313static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg) 314{ 315 316 BUS_SPACE_WRT4_MV2(inbound_msg, msg); 317 BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_MSG); 318 319 BUS_SPACE_RD4_MV0(outbound_intmask); 320} 321 322static void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg) 323{ 324 BUS_SPACE_WRT4_MVFREY2(f0_to_cpu_msg_a, msg); 325 BUS_SPACE_RD4_MVFREY2(f0_to_cpu_msg_a); 326} 327 328static int hptiop_wait_ready_itl(struct hpt_iop_hba * hba, u_int32_t millisec) 329{ 330 u_int32_t req=0; 331 int i; 332 333 for (i = 0; i < millisec; i++) { 334 req = BUS_SPACE_RD4_ITL(inbound_queue); 335 if (req != IOPMU_QUEUE_EMPTY) 336 break; 337 DELAY(1000); 338 } 339 340 if (req!=IOPMU_QUEUE_EMPTY) { 341 BUS_SPACE_WRT4_ITL(outbound_queue, req); 342 BUS_SPACE_RD4_ITL(outbound_intstatus); 343 return 0; 344 } 345 346 return -1; 347} 348 349static int hptiop_wait_ready_mv(struct hpt_iop_hba * hba, u_int32_t millisec) 350{ 351 if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec)) 352 return -1; 353 354 return 0; 355} 356 357static int hptiop_wait_ready_mvfrey(struct hpt_iop_hba * hba, 358 u_int32_t millisec) 359{ 360 if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec)) 361 return -1; 362 363 return 0; 364} 365 366static void hptiop_request_callback_itl(struct hpt_iop_hba * hba, 367 u_int32_t index) 368{ 369 struct hpt_iop_srb *srb; 370 struct hpt_iop_request_scsi_command *req=0; 371 union ccb *ccb; 372 u_int8_t *cdb; 373 u_int32_t result, temp, dxfer; 374 u_int64_t temp64; 375 376 if (index & IOPMU_QUEUE_MASK_HOST_BITS) { /*host req*/ 377 if (hba->firmware_version > 0x01020000 || 378 hba->interface_version > 0x01020000) { 379 srb = hba->srb[index & ~(u_int32_t) 380 (IOPMU_QUEUE_ADDR_HOST_BIT 381 | IOPMU_QUEUE_REQUEST_RESULT_BIT)]; 382 req = (struct hpt_iop_request_scsi_command *)srb; 383 if (index & IOPMU_QUEUE_REQUEST_RESULT_BIT) 384 result = IOP_RESULT_SUCCESS; 385 else 386 result = req->header.result; 387 } else { 388 srb = hba->srb[index & 389 ~(u_int32_t)IOPMU_QUEUE_ADDR_HOST_BIT]; 390 req = (struct hpt_iop_request_scsi_command *)srb; 391 result = req->header.result; 392 } 393 dxfer = req->dataxfer_length; 394 goto srb_complete; 395 } 396 397 /*iop req*/ 398 temp = bus_space_read_4(hba->bar0t, hba->bar0h, index + 399 offsetof(struct hpt_iop_request_header, type)); 400 result = bus_space_read_4(hba->bar0t, hba->bar0h, index + 401 offsetof(struct hpt_iop_request_header, result)); 402 switch(temp) { 403 case IOP_REQUEST_TYPE_IOCTL_COMMAND: 404 { 405 temp64 = 0; 406 bus_space_write_region_4(hba->bar0t, hba->bar0h, index + 407 offsetof(struct hpt_iop_request_header, context), 408 (u_int32_t *)&temp64, 2); 409 wakeup((void *)((unsigned long)hba->u.itl.mu + index)); 410 break; 411 } 412 413 case IOP_REQUEST_TYPE_SCSI_COMMAND: 414 bus_space_read_region_4(hba->bar0t, hba->bar0h, index + 415 offsetof(struct hpt_iop_request_header, context), 416 (u_int32_t *)&temp64, 2); 417 srb = (struct hpt_iop_srb *)(unsigned long)temp64; 418 dxfer = bus_space_read_4(hba->bar0t, hba->bar0h, 419 index + offsetof(struct hpt_iop_request_scsi_command, 420 dataxfer_length)); 421srb_complete: 422 ccb = (union ccb *)srb->ccb; 423 if (ccb->ccb_h.flags & CAM_CDB_POINTER) 424 cdb = ccb->csio.cdb_io.cdb_ptr; 425 else 426 cdb = ccb->csio.cdb_io.cdb_bytes; 427 428 if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */ 429 ccb->ccb_h.status = CAM_REQ_CMP; 430 goto scsi_done; 431 } 432 433 switch (result) { 434 case IOP_RESULT_SUCCESS: 435 switch (ccb->ccb_h.flags & CAM_DIR_MASK) { 436 case CAM_DIR_IN: 437 bus_dmamap_sync(hba->io_dmat, 438 srb->dma_map, BUS_DMASYNC_POSTREAD); 439 bus_dmamap_unload(hba->io_dmat, srb->dma_map); 440 break; 441 case CAM_DIR_OUT: 442 bus_dmamap_sync(hba->io_dmat, 443 srb->dma_map, BUS_DMASYNC_POSTWRITE); 444 bus_dmamap_unload(hba->io_dmat, srb->dma_map); 445 break; 446 } 447 448 ccb->ccb_h.status = CAM_REQ_CMP; 449 break; 450 451 case IOP_RESULT_BAD_TARGET: 452 ccb->ccb_h.status = CAM_DEV_NOT_THERE; 453 break; 454 case IOP_RESULT_BUSY: 455 ccb->ccb_h.status = CAM_BUSY; 456 break; 457 case IOP_RESULT_INVALID_REQUEST: 458 ccb->ccb_h.status = CAM_REQ_INVALID; 459 break; 460 case IOP_RESULT_FAIL: 461 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; 462 break; 463 case IOP_RESULT_RESET: 464 ccb->ccb_h.status = CAM_BUSY; 465 break; 466 case IOP_RESULT_CHECK_CONDITION: 467 memset(&ccb->csio.sense_data, 0, 468 sizeof(ccb->csio.sense_data)); 469 if (dxfer < ccb->csio.sense_len) 470 ccb->csio.sense_resid = ccb->csio.sense_len - 471 dxfer; 472 else 473 ccb->csio.sense_resid = 0; 474 if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {/*iop*/ 475 bus_space_read_region_1(hba->bar0t, hba->bar0h, 476 index + offsetof(struct hpt_iop_request_scsi_command, 477 sg_list), (u_int8_t *)&ccb->csio.sense_data, 478 MIN(dxfer, sizeof(ccb->csio.sense_data))); 479 } else { 480 memcpy(&ccb->csio.sense_data, &req->sg_list, 481 MIN(dxfer, sizeof(ccb->csio.sense_data))); 482 } 483 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; 484 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 485 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; 486 break; 487 default: 488 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; 489 break; 490 } 491scsi_done: 492 if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) 493 BUS_SPACE_WRT4_ITL(outbound_queue, index); 494 495 ccb->csio.resid = ccb->csio.dxfer_len - dxfer; 496 497 hptiop_free_srb(hba, srb); 498 xpt_done(ccb); 499 break; 500 } 501} 502 503static void hptiop_drain_outbound_queue_itl(struct hpt_iop_hba *hba) 504{ 505 u_int32_t req, temp; 506 507 while ((req = BUS_SPACE_RD4_ITL(outbound_queue)) !=IOPMU_QUEUE_EMPTY) { 508 if (req & IOPMU_QUEUE_MASK_HOST_BITS) 509 hptiop_request_callback_itl(hba, req); 510 else { 511 struct hpt_iop_request_header *p; 512 513 p = (struct hpt_iop_request_header *) 514 ((char *)hba->u.itl.mu + req); 515 temp = bus_space_read_4(hba->bar0t, 516 hba->bar0h,req + 517 offsetof(struct hpt_iop_request_header, 518 flags)); 519 if (temp & IOP_REQUEST_FLAG_SYNC_REQUEST) { 520 u_int64_t temp64; 521 bus_space_read_region_4(hba->bar0t, 522 hba->bar0h,req + 523 offsetof(struct hpt_iop_request_header, 524 context), 525 (u_int32_t *)&temp64, 2); 526 if (temp64) { 527 hptiop_request_callback_itl(hba, req); 528 } else { 529 temp64 = 1; 530 bus_space_write_region_4(hba->bar0t, 531 hba->bar0h,req + 532 offsetof(struct hpt_iop_request_header, 533 context), 534 (u_int32_t *)&temp64, 2); 535 } 536 } else 537 hptiop_request_callback_itl(hba, req); 538 } 539 } 540} 541 542static int hptiop_intr_itl(struct hpt_iop_hba * hba) 543{ 544 u_int32_t status; 545 int ret = 0; 546 547 status = BUS_SPACE_RD4_ITL(outbound_intstatus); 548 549 if (status & IOPMU_OUTBOUND_INT_MSG0) { 550 u_int32_t msg = BUS_SPACE_RD4_ITL(outbound_msgaddr0); 551 KdPrint(("hptiop: received outbound msg %x\n", msg)); 552 BUS_SPACE_WRT4_ITL(outbound_intstatus, IOPMU_OUTBOUND_INT_MSG0); 553 hptiop_os_message_callback(hba, msg); 554 ret = 1; 555 } 556 557 if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) { 558 hptiop_drain_outbound_queue_itl(hba); 559 ret = 1; 560 } 561 562 return ret; 563} 564 565static void hptiop_request_callback_mv(struct hpt_iop_hba * hba, 566 u_int64_t _tag) 567{ 568 u_int32_t context = (u_int32_t)_tag; 569 570 if (context & MVIOP_CMD_TYPE_SCSI) { 571 struct hpt_iop_srb *srb; 572 struct hpt_iop_request_scsi_command *req; 573 union ccb *ccb; 574 u_int8_t *cdb; 575 576 srb = hba->srb[context >> MVIOP_REQUEST_NUMBER_START_BIT]; 577 req = (struct hpt_iop_request_scsi_command *)srb; 578 ccb = (union ccb *)srb->ccb; 579 if (ccb->ccb_h.flags & CAM_CDB_POINTER) 580 cdb = ccb->csio.cdb_io.cdb_ptr; 581 else 582 cdb = ccb->csio.cdb_io.cdb_bytes; 583 584 if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */ 585 ccb->ccb_h.status = CAM_REQ_CMP; 586 goto scsi_done; 587 } 588 if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT) 589 req->header.result = IOP_RESULT_SUCCESS; 590 591 switch (req->header.result) { 592 case IOP_RESULT_SUCCESS: 593 switch (ccb->ccb_h.flags & CAM_DIR_MASK) { 594 case CAM_DIR_IN: 595 bus_dmamap_sync(hba->io_dmat, 596 srb->dma_map, BUS_DMASYNC_POSTREAD); 597 bus_dmamap_unload(hba->io_dmat, srb->dma_map); 598 break; 599 case CAM_DIR_OUT: 600 bus_dmamap_sync(hba->io_dmat, 601 srb->dma_map, BUS_DMASYNC_POSTWRITE); 602 bus_dmamap_unload(hba->io_dmat, srb->dma_map); 603 break; 604 } 605 ccb->ccb_h.status = CAM_REQ_CMP; 606 break; 607 case IOP_RESULT_BAD_TARGET: 608 ccb->ccb_h.status = CAM_DEV_NOT_THERE; 609 break; 610 case IOP_RESULT_BUSY: 611 ccb->ccb_h.status = CAM_BUSY; 612 break; 613 case IOP_RESULT_INVALID_REQUEST: 614 ccb->ccb_h.status = CAM_REQ_INVALID; 615 break; 616 case IOP_RESULT_FAIL: 617 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; 618 break; 619 case IOP_RESULT_RESET: 620 ccb->ccb_h.status = CAM_BUSY; 621 break; 622 case IOP_RESULT_CHECK_CONDITION: 623 memset(&ccb->csio.sense_data, 0, 624 sizeof(ccb->csio.sense_data)); 625 if (req->dataxfer_length < ccb->csio.sense_len) 626 ccb->csio.sense_resid = ccb->csio.sense_len - 627 req->dataxfer_length; 628 else 629 ccb->csio.sense_resid = 0; 630 memcpy(&ccb->csio.sense_data, &req->sg_list, 631 MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data))); 632 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; 633 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 634 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; 635 break; 636 default: 637 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; 638 break; 639 } 640scsi_done: 641 ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length; 642 643 hptiop_free_srb(hba, srb); 644 xpt_done(ccb); 645 } else if (context & MVIOP_CMD_TYPE_IOCTL) { 646 struct hpt_iop_request_ioctl_command *req = hba->ctlcfg_ptr; 647 if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT) 648 hba->config_done = 1; 649 else 650 hba->config_done = -1; 651 wakeup(req); 652 } else if (context & 653 (MVIOP_CMD_TYPE_SET_CONFIG | 654 MVIOP_CMD_TYPE_GET_CONFIG)) 655 hba->config_done = 1; 656 else { 657 device_printf(hba->pcidev, "wrong callback type\n"); 658 } 659} 660 661static void hptiop_request_callback_mvfrey(struct hpt_iop_hba * hba, 662 u_int32_t _tag) 663{ 664 u_int32_t req_type = _tag & 0xf; 665 666 struct hpt_iop_srb *srb; 667 struct hpt_iop_request_scsi_command *req; 668 union ccb *ccb; 669 u_int8_t *cdb; 670 671 switch (req_type) { 672 case IOP_REQUEST_TYPE_GET_CONFIG: 673 case IOP_REQUEST_TYPE_SET_CONFIG: 674 hba->config_done = 1; 675 break; 676 677 case IOP_REQUEST_TYPE_SCSI_COMMAND: 678 srb = hba->srb[(_tag >> 4) & 0xff]; 679 req = (struct hpt_iop_request_scsi_command *)srb; 680 681 ccb = (union ccb *)srb->ccb; 682 683 untimeout(hptiop_reset_adapter, hba, ccb->ccb_h.timeout_ch); 684 685 if (ccb->ccb_h.flags & CAM_CDB_POINTER) 686 cdb = ccb->csio.cdb_io.cdb_ptr; 687 else 688 cdb = ccb->csio.cdb_io.cdb_bytes; 689 690 if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */ 691 ccb->ccb_h.status = CAM_REQ_CMP; 692 goto scsi_done; 693 } 694 695 if (_tag & MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT) 696 req->header.result = IOP_RESULT_SUCCESS; 697 698 switch (req->header.result) { 699 case IOP_RESULT_SUCCESS: 700 switch (ccb->ccb_h.flags & CAM_DIR_MASK) { 701 case CAM_DIR_IN: 702 bus_dmamap_sync(hba->io_dmat, 703 srb->dma_map, BUS_DMASYNC_POSTREAD); 704 bus_dmamap_unload(hba->io_dmat, srb->dma_map); 705 break; 706 case CAM_DIR_OUT: 707 bus_dmamap_sync(hba->io_dmat, 708 srb->dma_map, BUS_DMASYNC_POSTWRITE); 709 bus_dmamap_unload(hba->io_dmat, srb->dma_map); 710 break; 711 } 712 ccb->ccb_h.status = CAM_REQ_CMP; 713 break; 714 case IOP_RESULT_BAD_TARGET: 715 ccb->ccb_h.status = CAM_DEV_NOT_THERE; 716 break; 717 case IOP_RESULT_BUSY: 718 ccb->ccb_h.status = CAM_BUSY; 719 break; 720 case IOP_RESULT_INVALID_REQUEST: 721 ccb->ccb_h.status = CAM_REQ_INVALID; 722 break; 723 case IOP_RESULT_FAIL: 724 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; 725 break; 726 case IOP_RESULT_RESET: 727 ccb->ccb_h.status = CAM_BUSY; 728 break; 729 case IOP_RESULT_CHECK_CONDITION: 730 memset(&ccb->csio.sense_data, 0, 731 sizeof(ccb->csio.sense_data)); 732 if (req->dataxfer_length < ccb->csio.sense_len) 733 ccb->csio.sense_resid = ccb->csio.sense_len - 734 req->dataxfer_length; 735 else 736 ccb->csio.sense_resid = 0; 737 memcpy(&ccb->csio.sense_data, &req->sg_list, 738 MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data))); 739 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; 740 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 741 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; 742 break; 743 default: 744 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; 745 break; 746 } 747scsi_done: 748 ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length; 749 750 hptiop_free_srb(hba, srb); 751 xpt_done(ccb); 752 break; 753 case IOP_REQUEST_TYPE_IOCTL_COMMAND: 754 if (_tag & MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT) 755 hba->config_done = 1; 756 else 757 hba->config_done = -1; 758 wakeup((struct hpt_iop_request_ioctl_command *)hba->ctlcfg_ptr); 759 break; 760 default: 761 device_printf(hba->pcidev, "wrong callback type\n"); 762 break; 763 } 764} 765 766static void hptiop_drain_outbound_queue_mv(struct hpt_iop_hba * hba) 767{ 768 u_int64_t req; 769 770 while ((req = hptiop_mv_outbound_read(hba))) { 771 if (req & MVIOP_MU_QUEUE_ADDR_HOST_BIT) { 772 if (req & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) { 773 hptiop_request_callback_mv(hba, req); 774 } 775 } 776 } 777} 778 779static int hptiop_intr_mv(struct hpt_iop_hba * hba) 780{ 781 u_int32_t status; 782 int ret = 0; 783 784 status = BUS_SPACE_RD4_MV0(outbound_doorbell); 785 786 if (status) 787 BUS_SPACE_WRT4_MV0(outbound_doorbell, ~status); 788 789 if (status & MVIOP_MU_OUTBOUND_INT_MSG) { 790 u_int32_t msg = BUS_SPACE_RD4_MV2(outbound_msg); 791 KdPrint(("hptiop: received outbound msg %x\n", msg)); 792 hptiop_os_message_callback(hba, msg); 793 ret = 1; 794 } 795 796 if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) { 797 hptiop_drain_outbound_queue_mv(hba); 798 ret = 1; 799 } 800 801 return ret; 802} 803 804static int hptiop_intr_mvfrey(struct hpt_iop_hba * hba) 805{ 806 u_int32_t status, _tag, cptr; 807 int ret = 0; 808 809 if (hba->initialized) { 810 BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0); 811 } 812 813 status = BUS_SPACE_RD4_MVFREY2(f0_doorbell); 814 if (status) { 815 BUS_SPACE_WRT4_MVFREY2(f0_doorbell, status); 816 if (status & CPU_TO_F0_DRBL_MSG_A_BIT) { 817 u_int32_t msg = BUS_SPACE_RD4_MVFREY2(cpu_to_f0_msg_a); 818 hptiop_os_message_callback(hba, msg); 819 } 820 ret = 1; 821 } 822 823 status = BUS_SPACE_RD4_MVFREY2(isr_cause); 824 if (status) { 825 BUS_SPACE_WRT4_MVFREY2(isr_cause, status); 826 do { 827 cptr = *hba->u.mvfrey.outlist_cptr & 0xff; 828 while (hba->u.mvfrey.outlist_rptr != cptr) { 829 hba->u.mvfrey.outlist_rptr++; 830 if (hba->u.mvfrey.outlist_rptr == hba->u.mvfrey.list_count) { 831 hba->u.mvfrey.outlist_rptr = 0; 832 } 833 834 _tag = hba->u.mvfrey.outlist[hba->u.mvfrey.outlist_rptr].val; 835 hptiop_request_callback_mvfrey(hba, _tag); 836 ret = 2; 837 } 838 } while (cptr != (*hba->u.mvfrey.outlist_cptr & 0xff)); 839 } 840 841 if (hba->initialized) { 842 BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0x1010); 843 } 844 845 return ret; 846} 847 848static int hptiop_send_sync_request_itl(struct hpt_iop_hba * hba, 849 u_int32_t req32, u_int32_t millisec) 850{ 851 u_int32_t i; 852 u_int64_t temp64; 853 854 BUS_SPACE_WRT4_ITL(inbound_queue, req32); 855 BUS_SPACE_RD4_ITL(outbound_intstatus); 856 857 for (i = 0; i < millisec; i++) { 858 hptiop_intr_itl(hba); 859 bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 + 860 offsetof(struct hpt_iop_request_header, context), 861 (u_int32_t *)&temp64, 2); 862 if (temp64) 863 return 0; 864 DELAY(1000); 865 } 866 867 return -1; 868} 869 870static int hptiop_send_sync_request_mv(struct hpt_iop_hba *hba, 871 void *req, u_int32_t millisec) 872{ 873 u_int32_t i; 874 u_int64_t phy_addr; 875 hba->config_done = 0; 876 877 phy_addr = hba->ctlcfgcmd_phy | 878 (u_int64_t)MVIOP_MU_QUEUE_ADDR_HOST_BIT; 879 ((struct hpt_iop_request_get_config *)req)->header.flags |= 880 IOP_REQUEST_FLAG_SYNC_REQUEST | 881 IOP_REQUEST_FLAG_OUTPUT_CONTEXT; 882 hptiop_mv_inbound_write(phy_addr, hba); 883 BUS_SPACE_RD4_MV0(outbound_intmask); 884 885 for (i = 0; i < millisec; i++) { 886 hptiop_intr_mv(hba); 887 if (hba->config_done) 888 return 0; 889 DELAY(1000); 890 } 891 return -1; 892} 893 894static int hptiop_send_sync_request_mvfrey(struct hpt_iop_hba *hba, 895 void *req, u_int32_t millisec) 896{ 897 u_int32_t i, index; 898 u_int64_t phy_addr; 899 struct hpt_iop_request_header *reqhdr = 900 (struct hpt_iop_request_header *)req; 901 902 hba->config_done = 0; 903 904 phy_addr = hba->ctlcfgcmd_phy; 905 reqhdr->flags = IOP_REQUEST_FLAG_SYNC_REQUEST 906 | IOP_REQUEST_FLAG_OUTPUT_CONTEXT 907 | IOP_REQUEST_FLAG_ADDR_BITS 908 | ((phy_addr >> 16) & 0xffff0000); 909 reqhdr->context = ((phy_addr & 0xffffffff) << 32 ) 910 | IOPMU_QUEUE_ADDR_HOST_BIT | reqhdr->type; 911 912 hba->u.mvfrey.inlist_wptr++; 913 index = hba->u.mvfrey.inlist_wptr & 0x3fff; 914 915 if (index == hba->u.mvfrey.list_count) { 916 index = 0; 917 hba->u.mvfrey.inlist_wptr &= ~0x3fff; 918 hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE; 919 } 920 921 hba->u.mvfrey.inlist[index].addr = phy_addr; 922 hba->u.mvfrey.inlist[index].intrfc_len = (reqhdr->size + 3) / 4; 923 924 BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr); 925 BUS_SPACE_RD4_MVFREY2(inbound_write_ptr); 926 927 for (i = 0; i < millisec; i++) { 928 hptiop_intr_mvfrey(hba); 929 if (hba->config_done) 930 return 0; 931 DELAY(1000); 932 } 933 return -1; 934} 935 936static int hptiop_send_sync_msg(struct hpt_iop_hba *hba, 937 u_int32_t msg, u_int32_t millisec) 938{ 939 u_int32_t i; 940 941 hba->msg_done = 0; 942 hba->ops->post_msg(hba, msg); 943 944 for (i=0; i<millisec; i++) { 945 hba->ops->iop_intr(hba); 946 if (hba->msg_done) 947 break; 948 DELAY(1000); 949 } 950 951 return hba->msg_done? 0 : -1; 952} 953 954static int hptiop_get_config_itl(struct hpt_iop_hba * hba, 955 struct hpt_iop_request_get_config * config) 956{ 957 u_int32_t req32; 958 959 config->header.size = sizeof(struct hpt_iop_request_get_config); 960 config->header.type = IOP_REQUEST_TYPE_GET_CONFIG; 961 config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST; 962 config->header.result = IOP_RESULT_PENDING; 963 config->header.context = 0; 964 965 req32 = BUS_SPACE_RD4_ITL(inbound_queue); 966 if (req32 == IOPMU_QUEUE_EMPTY) 967 return -1; 968 969 bus_space_write_region_4(hba->bar0t, hba->bar0h, 970 req32, (u_int32_t *)config, 971 sizeof(struct hpt_iop_request_header) >> 2); 972 973 if (hptiop_send_sync_request_itl(hba, req32, 20000)) { 974 KdPrint(("hptiop: get config send cmd failed")); 975 return -1; 976 } 977 978 bus_space_read_region_4(hba->bar0t, hba->bar0h, 979 req32, (u_int32_t *)config, 980 sizeof(struct hpt_iop_request_get_config) >> 2); 981 982 BUS_SPACE_WRT4_ITL(outbound_queue, req32); 983 984 return 0; 985} 986 987static int hptiop_get_config_mv(struct hpt_iop_hba * hba, 988 struct hpt_iop_request_get_config * config) 989{ 990 struct hpt_iop_request_get_config *req; 991 992 if (!(req = hba->ctlcfg_ptr)) 993 return -1; 994 995 req->header.flags = 0; 996 req->header.type = IOP_REQUEST_TYPE_GET_CONFIG; 997 req->header.size = sizeof(struct hpt_iop_request_get_config); 998 req->header.result = IOP_RESULT_PENDING; 999 req->header.context = MVIOP_CMD_TYPE_GET_CONFIG; 1000 1001 if (hptiop_send_sync_request_mv(hba, req, 20000)) { 1002 KdPrint(("hptiop: get config send cmd failed")); 1003 return -1; 1004 } 1005 1006 *config = *req; 1007 return 0; 1008} 1009 1010static int hptiop_get_config_mvfrey(struct hpt_iop_hba * hba, 1011 struct hpt_iop_request_get_config * config) 1012{ 1013 struct hpt_iop_request_get_config *info = hba->u.mvfrey.config; 1014 1015 if (info->header.size != sizeof(struct hpt_iop_request_get_config) || 1016 info->header.type != IOP_REQUEST_TYPE_GET_CONFIG) { 1017 KdPrint(("hptiop: header size %x/%x type %x/%x", 1018 info->header.size, (int)sizeof(struct hpt_iop_request_get_config), 1019 info->header.type, IOP_REQUEST_TYPE_GET_CONFIG)); 1020 return -1; 1021 } 1022 1023 config->interface_version = info->interface_version; 1024 config->firmware_version = info->firmware_version; 1025 config->max_requests = info->max_requests; 1026 config->request_size = info->request_size; 1027 config->max_sg_count = info->max_sg_count; 1028 config->data_transfer_length = info->data_transfer_length; 1029 config->alignment_mask = info->alignment_mask; 1030 config->max_devices = info->max_devices; 1031 config->sdram_size = info->sdram_size; 1032 1033 KdPrint(("hptiop: maxreq %x reqsz %x datalen %x maxdev %x sdram %x", 1034 config->max_requests, config->request_size, 1035 config->data_transfer_length, config->max_devices, 1036 config->sdram_size)); 1037 1038 return 0; 1039} 1040 1041static int hptiop_set_config_itl(struct hpt_iop_hba *hba, 1042 struct hpt_iop_request_set_config *config) 1043{ 1044 u_int32_t req32; 1045 1046 req32 = BUS_SPACE_RD4_ITL(inbound_queue); 1047 1048 if (req32 == IOPMU_QUEUE_EMPTY) 1049 return -1; 1050 1051 config->header.size = sizeof(struct hpt_iop_request_set_config); 1052 config->header.type = IOP_REQUEST_TYPE_SET_CONFIG; 1053 config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST; 1054 config->header.result = IOP_RESULT_PENDING; 1055 config->header.context = 0; 1056 1057 bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, 1058 (u_int32_t *)config, 1059 sizeof(struct hpt_iop_request_set_config) >> 2); 1060 1061 if (hptiop_send_sync_request_itl(hba, req32, 20000)) { 1062 KdPrint(("hptiop: set config send cmd failed")); 1063 return -1; 1064 } 1065 1066 BUS_SPACE_WRT4_ITL(outbound_queue, req32); 1067 1068 return 0; 1069} 1070 1071static int hptiop_set_config_mv(struct hpt_iop_hba *hba, 1072 struct hpt_iop_request_set_config *config) 1073{ 1074 struct hpt_iop_request_set_config *req; 1075 1076 if (!(req = hba->ctlcfg_ptr)) 1077 return -1; 1078 1079 memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header), 1080 (u_int8_t *)config + sizeof(struct hpt_iop_request_header), 1081 sizeof(struct hpt_iop_request_set_config) - 1082 sizeof(struct hpt_iop_request_header)); 1083 1084 req->header.flags = 0; 1085 req->header.type = IOP_REQUEST_TYPE_SET_CONFIG; 1086 req->header.size = sizeof(struct hpt_iop_request_set_config); 1087 req->header.result = IOP_RESULT_PENDING; 1088 req->header.context = MVIOP_CMD_TYPE_SET_CONFIG; 1089 1090 if (hptiop_send_sync_request_mv(hba, req, 20000)) { 1091 KdPrint(("hptiop: set config send cmd failed")); 1092 return -1; 1093 } 1094 1095 return 0; 1096} 1097 1098static int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba, 1099 struct hpt_iop_request_set_config *config) 1100{ 1101 struct hpt_iop_request_set_config *req; 1102 1103 if (!(req = hba->ctlcfg_ptr)) 1104 return -1; 1105 1106 memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header), 1107 (u_int8_t *)config + sizeof(struct hpt_iop_request_header), 1108 sizeof(struct hpt_iop_request_set_config) - 1109 sizeof(struct hpt_iop_request_header)); 1110 1111 req->header.type = IOP_REQUEST_TYPE_SET_CONFIG; 1112 req->header.size = sizeof(struct hpt_iop_request_set_config); 1113 req->header.result = IOP_RESULT_PENDING; 1114 1115 if (hptiop_send_sync_request_mvfrey(hba, req, 20000)) { 1116 KdPrint(("hptiop: set config send cmd failed")); 1117 return -1; 1118 } 1119 1120 return 0; 1121} 1122 1123static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba, 1124 u_int32_t req32, 1125 struct hpt_iop_ioctl_param *pParams) 1126{ 1127 u_int64_t temp64; 1128 struct hpt_iop_request_ioctl_command req; 1129 1130 if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) > 1131 (hba->max_request_size - 1132 offsetof(struct hpt_iop_request_ioctl_command, buf))) { 1133 device_printf(hba->pcidev, "request size beyond max value"); 1134 return -1; 1135 } 1136 1137 req.header.size = offsetof(struct hpt_iop_request_ioctl_command, buf) 1138 + pParams->nInBufferSize; 1139 req.header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND; 1140 req.header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST; 1141 req.header.result = IOP_RESULT_PENDING; 1142 req.header.context = req32 + (u_int64_t)(unsigned long)hba->u.itl.mu; 1143 req.ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode); 1144 req.inbuf_size = pParams->nInBufferSize; 1145 req.outbuf_size = pParams->nOutBufferSize; 1146 req.bytes_returned = 0; 1147 1148 bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, (u_int32_t *)&req, 1149 offsetof(struct hpt_iop_request_ioctl_command, buf)>>2); 1150 1151 hptiop_lock_adapter(hba); 1152 1153 BUS_SPACE_WRT4_ITL(inbound_queue, req32); 1154 BUS_SPACE_RD4_ITL(outbound_intstatus); 1155 1156 bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 + 1157 offsetof(struct hpt_iop_request_ioctl_command, header.context), 1158 (u_int32_t *)&temp64, 2); 1159 while (temp64) { 1160 if (hptiop_sleep(hba, (void *)((unsigned long)hba->u.itl.mu + req32), 1161 PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0) 1162 break; 1163 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000); 1164 bus_space_read_region_4(hba->bar0t, hba->bar0h,req32 + 1165 offsetof(struct hpt_iop_request_ioctl_command, 1166 header.context), 1167 (u_int32_t *)&temp64, 2); 1168 } 1169 1170 hptiop_unlock_adapter(hba); 1171 return 0; 1172} 1173 1174static int hptiop_bus_space_copyin(struct hpt_iop_hba *hba, u_int32_t bus, 1175 void *user, int size) 1176{ 1177 unsigned char byte; 1178 int i; 1179 1180 for (i=0; i<size; i++) { 1181 if (copyin((u_int8_t *)user + i, &byte, 1)) 1182 return -1; 1183 bus_space_write_1(hba->bar0t, hba->bar0h, bus + i, byte); 1184 } 1185 1186 return 0; 1187} 1188 1189static int hptiop_bus_space_copyout(struct hpt_iop_hba *hba, u_int32_t bus, 1190 void *user, int size) 1191{ 1192 unsigned char byte; 1193 int i; 1194 1195 for (i=0; i<size; i++) { 1196 byte = bus_space_read_1(hba->bar0t, hba->bar0h, bus + i); 1197 if (copyout(&byte, (u_int8_t *)user + i, 1)) 1198 return -1; 1199 } 1200 1201 return 0; 1202} 1203 1204static int hptiop_do_ioctl_itl(struct hpt_iop_hba *hba, 1205 struct hpt_iop_ioctl_param * pParams) 1206{ 1207 u_int32_t req32; 1208 u_int32_t result; 1209 1210 if ((pParams->Magic != HPT_IOCTL_MAGIC) && 1211 (pParams->Magic != HPT_IOCTL_MAGIC32)) 1212 return EFAULT; 1213 1214 req32 = BUS_SPACE_RD4_ITL(inbound_queue); 1215 if (req32 == IOPMU_QUEUE_EMPTY) 1216 return EFAULT; 1217 1218 if (pParams->nInBufferSize) 1219 if (hptiop_bus_space_copyin(hba, req32 + 1220 offsetof(struct hpt_iop_request_ioctl_command, buf), 1221 (void *)pParams->lpInBuffer, pParams->nInBufferSize)) 1222 goto invalid; 1223 1224 if (hptiop_post_ioctl_command_itl(hba, req32, pParams)) 1225 goto invalid; 1226 1227 result = bus_space_read_4(hba->bar0t, hba->bar0h, req32 + 1228 offsetof(struct hpt_iop_request_ioctl_command, 1229 header.result)); 1230 1231 if (result == IOP_RESULT_SUCCESS) { 1232 if (pParams->nOutBufferSize) 1233 if (hptiop_bus_space_copyout(hba, req32 + 1234 offsetof(struct hpt_iop_request_ioctl_command, buf) + 1235 ((pParams->nInBufferSize + 3) & ~3), 1236 (void *)pParams->lpOutBuffer, pParams->nOutBufferSize)) 1237 goto invalid; 1238 1239 if (pParams->lpBytesReturned) { 1240 if (hptiop_bus_space_copyout(hba, req32 + 1241 offsetof(struct hpt_iop_request_ioctl_command, bytes_returned), 1242 (void *)pParams->lpBytesReturned, sizeof(unsigned long))) 1243 goto invalid; 1244 } 1245 1246 BUS_SPACE_WRT4_ITL(outbound_queue, req32); 1247 1248 return 0; 1249 } else{ 1250invalid: 1251 BUS_SPACE_WRT4_ITL(outbound_queue, req32); 1252 1253 return EFAULT; 1254 } 1255} 1256 1257static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba, 1258 struct hpt_iop_request_ioctl_command *req, 1259 struct hpt_iop_ioctl_param *pParams) 1260{ 1261 u_int64_t req_phy; 1262 int size = 0; 1263 1264 if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) > 1265 (hba->max_request_size - 1266 offsetof(struct hpt_iop_request_ioctl_command, buf))) { 1267 device_printf(hba->pcidev, "request size beyond max value"); 1268 return -1; 1269 } 1270 1271 req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode); 1272 req->inbuf_size = pParams->nInBufferSize; 1273 req->outbuf_size = pParams->nOutBufferSize; 1274 req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf) 1275 + pParams->nInBufferSize; 1276 req->header.context = (u_int64_t)MVIOP_CMD_TYPE_IOCTL; 1277 req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND; 1278 req->header.result = IOP_RESULT_PENDING; 1279 req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT; 1280 size = req->header.size >> 8; 1281 size = size > 3 ? 3 : size; 1282 req_phy = hba->ctlcfgcmd_phy | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size; 1283 hptiop_mv_inbound_write(req_phy, hba); 1284 1285 BUS_SPACE_RD4_MV0(outbound_intmask); 1286 1287 while (hba->config_done == 0) { 1288 if (hptiop_sleep(hba, req, PPAUSE, 1289 "hptctl", HPT_OSM_TIMEOUT)==0) 1290 continue; 1291 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000); 1292 } 1293 return 0; 1294} 1295 1296static int hptiop_do_ioctl_mv(struct hpt_iop_hba *hba, 1297 struct hpt_iop_ioctl_param *pParams) 1298{ 1299 struct hpt_iop_request_ioctl_command *req; 1300 1301 if ((pParams->Magic != HPT_IOCTL_MAGIC) && 1302 (pParams->Magic != HPT_IOCTL_MAGIC32)) 1303 return EFAULT; 1304 1305 req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr); 1306 hba->config_done = 0; 1307 hptiop_lock_adapter(hba); 1308 if (pParams->nInBufferSize) 1309 if (copyin((void *)pParams->lpInBuffer, 1310 req->buf, pParams->nInBufferSize)) 1311 goto invalid; 1312 if (hptiop_post_ioctl_command_mv(hba, req, pParams)) 1313 goto invalid; 1314 1315 if (hba->config_done == 1) { 1316 if (pParams->nOutBufferSize) 1317 if (copyout(req->buf + 1318 ((pParams->nInBufferSize + 3) & ~3), 1319 (void *)pParams->lpOutBuffer, 1320 pParams->nOutBufferSize)) 1321 goto invalid; 1322 1323 if (pParams->lpBytesReturned) 1324 if (copyout(&req->bytes_returned, 1325 (void*)pParams->lpBytesReturned, 1326 sizeof(u_int32_t))) 1327 goto invalid; 1328 hptiop_unlock_adapter(hba); 1329 return 0; 1330 } else{ 1331invalid: 1332 hptiop_unlock_adapter(hba); 1333 return EFAULT; 1334 } 1335} 1336 1337static int hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba, 1338 struct hpt_iop_request_ioctl_command *req, 1339 struct hpt_iop_ioctl_param *pParams) 1340{ 1341 u_int64_t phy_addr; 1342 u_int32_t index; 1343 1344 phy_addr = hba->ctlcfgcmd_phy; 1345 1346 if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) > 1347 (hba->max_request_size - 1348 offsetof(struct hpt_iop_request_ioctl_command, buf))) { 1349 device_printf(hba->pcidev, "request size beyond max value"); 1350 return -1; 1351 } 1352 1353 req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode); 1354 req->inbuf_size = pParams->nInBufferSize; 1355 req->outbuf_size = pParams->nOutBufferSize; 1356 req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf) 1357 + pParams->nInBufferSize; 1358 1359 req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND; 1360 req->header.result = IOP_RESULT_PENDING; 1361 1362 req->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST 1363 | IOP_REQUEST_FLAG_OUTPUT_CONTEXT 1364 | IOP_REQUEST_FLAG_ADDR_BITS 1365 | ((phy_addr >> 16) & 0xffff0000); 1366 req->header.context = ((phy_addr & 0xffffffff) << 32 ) 1367 | IOPMU_QUEUE_ADDR_HOST_BIT | req->header.type; 1368 1369 hba->u.mvfrey.inlist_wptr++; 1370 index = hba->u.mvfrey.inlist_wptr & 0x3fff; 1371 1372 if (index == hba->u.mvfrey.list_count) { 1373 index = 0; 1374 hba->u.mvfrey.inlist_wptr &= ~0x3fff; 1375 hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE; 1376 } 1377 1378 hba->u.mvfrey.inlist[index].addr = phy_addr; 1379 hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4; 1380 1381 BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr); 1382 BUS_SPACE_RD4_MVFREY2(inbound_write_ptr); 1383 1384 while (hba->config_done == 0) { 1385 if (hptiop_sleep(hba, req, PPAUSE, 1386 "hptctl", HPT_OSM_TIMEOUT)==0) 1387 continue; 1388 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000); 1389 } 1390 return 0; 1391} 1392 1393static int hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba, 1394 struct hpt_iop_ioctl_param *pParams) 1395{ 1396 struct hpt_iop_request_ioctl_command *req; 1397 1398 if ((pParams->Magic != HPT_IOCTL_MAGIC) && 1399 (pParams->Magic != HPT_IOCTL_MAGIC32)) 1400 return EFAULT; 1401 1402 req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr); 1403 hba->config_done = 0; 1404 hptiop_lock_adapter(hba); 1405 if (pParams->nInBufferSize) 1406 if (copyin((void *)pParams->lpInBuffer, 1407 req->buf, pParams->nInBufferSize)) 1408 goto invalid; 1409 if (hptiop_post_ioctl_command_mvfrey(hba, req, pParams)) 1410 goto invalid; 1411 1412 if (hba->config_done == 1) { 1413 if (pParams->nOutBufferSize) 1414 if (copyout(req->buf + 1415 ((pParams->nInBufferSize + 3) & ~3), 1416 (void *)pParams->lpOutBuffer, 1417 pParams->nOutBufferSize)) 1418 goto invalid; 1419 1420 if (pParams->lpBytesReturned) 1421 if (copyout(&req->bytes_returned, 1422 (void*)pParams->lpBytesReturned, 1423 sizeof(u_int32_t))) 1424 goto invalid; 1425 hptiop_unlock_adapter(hba); 1426 return 0; 1427 } else{ 1428invalid: 1429 hptiop_unlock_adapter(hba); 1430 return EFAULT; 1431 } 1432} 1433 1434static int hptiop_rescan_bus(struct hpt_iop_hba * hba) 1435{ 1436 union ccb *ccb; 1437 1438 if ((ccb = xpt_alloc_ccb()) == NULL) 1439 return(ENOMEM); 1440 if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(hba->sim), 1441 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 1442 xpt_free_ccb(ccb); 1443 return(EIO); 1444 } 1445 xpt_rescan(ccb); 1446 return(0); 1447} 1448 1449static bus_dmamap_callback_t hptiop_map_srb; 1450static bus_dmamap_callback_t hptiop_post_scsi_command; 1451static bus_dmamap_callback_t hptiop_mv_map_ctlcfg; 1452static bus_dmamap_callback_t hptiop_mvfrey_map_ctlcfg; 1453 1454static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba) 1455{ 1456 hba->bar0_rid = 0x10; 1457 hba->bar0_res = bus_alloc_resource_any(hba->pcidev, 1458 SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE); 1459 1460 if (hba->bar0_res == NULL) { 1461 device_printf(hba->pcidev, 1462 "failed to get iop base adrress.\n"); 1463 return -1; 1464 } 1465 hba->bar0t = rman_get_bustag(hba->bar0_res); 1466 hba->bar0h = rman_get_bushandle(hba->bar0_res); 1467 hba->u.itl.mu = (struct hpt_iopmu_itl *) 1468 rman_get_virtual(hba->bar0_res); 1469 1470 if (!hba->u.itl.mu) { 1471 bus_release_resource(hba->pcidev, SYS_RES_MEMORY, 1472 hba->bar0_rid, hba->bar0_res); 1473 device_printf(hba->pcidev, "alloc mem res failed\n"); 1474 return -1; 1475 } 1476 1477 return 0; 1478} 1479 1480static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba) 1481{ 1482 hba->bar0_rid = 0x10; 1483 hba->bar0_res = bus_alloc_resource_any(hba->pcidev, 1484 SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE); 1485 1486 if (hba->bar0_res == NULL) { 1487 device_printf(hba->pcidev, "failed to get iop bar0.\n"); 1488 return -1; 1489 } 1490 hba->bar0t = rman_get_bustag(hba->bar0_res); 1491 hba->bar0h = rman_get_bushandle(hba->bar0_res); 1492 hba->u.mv.regs = (struct hpt_iopmv_regs *) 1493 rman_get_virtual(hba->bar0_res); 1494 1495 if (!hba->u.mv.regs) { 1496 bus_release_resource(hba->pcidev, SYS_RES_MEMORY, 1497 hba->bar0_rid, hba->bar0_res); 1498 device_printf(hba->pcidev, "alloc bar0 mem res failed\n"); 1499 return -1; 1500 } 1501 1502 hba->bar2_rid = 0x18; 1503 hba->bar2_res = bus_alloc_resource_any(hba->pcidev, 1504 SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE); 1505 1506 if (hba->bar2_res == NULL) { 1507 bus_release_resource(hba->pcidev, SYS_RES_MEMORY, 1508 hba->bar0_rid, hba->bar0_res); 1509 device_printf(hba->pcidev, "failed to get iop bar2.\n"); 1510 return -1; 1511 } 1512 1513 hba->bar2t = rman_get_bustag(hba->bar2_res); 1514 hba->bar2h = rman_get_bushandle(hba->bar2_res); 1515 hba->u.mv.mu = (struct hpt_iopmu_mv *)rman_get_virtual(hba->bar2_res); 1516 1517 if (!hba->u.mv.mu) { 1518 bus_release_resource(hba->pcidev, SYS_RES_MEMORY, 1519 hba->bar0_rid, hba->bar0_res); 1520 bus_release_resource(hba->pcidev, SYS_RES_MEMORY, 1521 hba->bar2_rid, hba->bar2_res); 1522 device_printf(hba->pcidev, "alloc mem bar2 res failed\n"); 1523 return -1; 1524 } 1525 1526 return 0; 1527} 1528 1529static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba) 1530{ 1531 hba->bar0_rid = 0x10; 1532 hba->bar0_res = bus_alloc_resource_any(hba->pcidev, 1533 SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE); 1534 1535 if (hba->bar0_res == NULL) { 1536 device_printf(hba->pcidev, "failed to get iop bar0.\n"); 1537 return -1; 1538 } 1539 hba->bar0t = rman_get_bustag(hba->bar0_res); 1540 hba->bar0h = rman_get_bushandle(hba->bar0_res); 1541 hba->u.mvfrey.config = (struct hpt_iop_request_get_config *) 1542 rman_get_virtual(hba->bar0_res); 1543 1544 if (!hba->u.mvfrey.config) { 1545 bus_release_resource(hba->pcidev, SYS_RES_MEMORY, 1546 hba->bar0_rid, hba->bar0_res); 1547 device_printf(hba->pcidev, "alloc bar0 mem res failed\n"); 1548 return -1; 1549 } 1550 1551 hba->bar2_rid = 0x18; 1552 hba->bar2_res = bus_alloc_resource_any(hba->pcidev, 1553 SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE); 1554 1555 if (hba->bar2_res == NULL) { 1556 bus_release_resource(hba->pcidev, SYS_RES_MEMORY, 1557 hba->bar0_rid, hba->bar0_res); 1558 device_printf(hba->pcidev, "failed to get iop bar2.\n"); 1559 return -1; 1560 } 1561 1562 hba->bar2t = rman_get_bustag(hba->bar2_res); 1563 hba->bar2h = rman_get_bushandle(hba->bar2_res); 1564 hba->u.mvfrey.mu = 1565 (struct hpt_iopmu_mvfrey *)rman_get_virtual(hba->bar2_res); 1566 1567 if (!hba->u.mvfrey.mu) { 1568 bus_release_resource(hba->pcidev, SYS_RES_MEMORY, 1569 hba->bar0_rid, hba->bar0_res); 1570 bus_release_resource(hba->pcidev, SYS_RES_MEMORY, 1571 hba->bar2_rid, hba->bar2_res); 1572 device_printf(hba->pcidev, "alloc mem bar2 res failed\n"); 1573 return -1; 1574 } 1575 1576 return 0; 1577} 1578 1579static void hptiop_release_pci_res_itl(struct hpt_iop_hba *hba) 1580{ 1581 if (hba->bar0_res) 1582 bus_release_resource(hba->pcidev, SYS_RES_MEMORY, 1583 hba->bar0_rid, hba->bar0_res); 1584} 1585 1586static void hptiop_release_pci_res_mv(struct hpt_iop_hba *hba) 1587{ 1588 if (hba->bar0_res) 1589 bus_release_resource(hba->pcidev, SYS_RES_MEMORY, 1590 hba->bar0_rid, hba->bar0_res); 1591 if (hba->bar2_res) 1592 bus_release_resource(hba->pcidev, SYS_RES_MEMORY, 1593 hba->bar2_rid, hba->bar2_res); 1594} 1595 1596static void hptiop_release_pci_res_mvfrey(struct hpt_iop_hba *hba) 1597{ 1598 if (hba->bar0_res) 1599 bus_release_resource(hba->pcidev, SYS_RES_MEMORY, 1600 hba->bar0_rid, hba->bar0_res); 1601 if (hba->bar2_res) 1602 bus_release_resource(hba->pcidev, SYS_RES_MEMORY, 1603 hba->bar2_rid, hba->bar2_res); 1604} 1605 1606static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba) 1607{ 1608 if (bus_dma_tag_create(hba->parent_dmat, 1609 1, 1610 0, 1611 BUS_SPACE_MAXADDR_32BIT, 1612 BUS_SPACE_MAXADDR, 1613 NULL, NULL, 1614 0x800 - 0x8, 1615 1, 1616 BUS_SPACE_MAXSIZE_32BIT, 1617 BUS_DMA_ALLOCNOW, 1618#if __FreeBSD_version > 502000 1619 NULL, 1620 NULL, 1621#endif 1622 &hba->ctlcfg_dmat)) { 1623 device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n"); 1624 return -1; 1625 } 1626 1627 if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr, 1628#if __FreeBSD_version>501000 1629 BUS_DMA_WAITOK | BUS_DMA_COHERENT, 1630#else 1631 BUS_DMA_WAITOK, 1632#endif 1633 &hba->ctlcfg_dmamap) != 0) { 1634 device_printf(hba->pcidev, 1635 "bus_dmamem_alloc failed!\n"); 1636 bus_dma_tag_destroy(hba->ctlcfg_dmat); 1637 return -1; 1638 } 1639 1640 if (bus_dmamap_load(hba->ctlcfg_dmat, 1641 hba->ctlcfg_dmamap, hba->ctlcfg_ptr, 1642 MVIOP_IOCTLCFG_SIZE, 1643 hptiop_mv_map_ctlcfg, hba, 0)) { 1644 device_printf(hba->pcidev, "bus_dmamap_load failed!\n"); 1645 if (hba->ctlcfg_dmat) 1646 bus_dmamem_free(hba->ctlcfg_dmat, 1647 hba->ctlcfg_ptr, hba->ctlcfg_dmamap); 1648 bus_dma_tag_destroy(hba->ctlcfg_dmat); 1649 return -1; 1650 } 1651 1652 return 0; 1653} 1654 1655static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba) 1656{ 1657 u_int32_t list_count = BUS_SPACE_RD4_MVFREY2(inbound_conf_ctl); 1658 1659 list_count >>= 16; 1660 1661 if (list_count == 0) { 1662 return -1; 1663 } 1664 1665 hba->u.mvfrey.list_count = list_count; 1666 hba->u.mvfrey.internal_mem_size = 0x800 1667 + list_count * sizeof(struct mvfrey_inlist_entry) 1668 + list_count * sizeof(struct mvfrey_outlist_entry) 1669 + sizeof(int); 1670 if (bus_dma_tag_create(hba->parent_dmat, 1671 1, 1672 0, 1673 BUS_SPACE_MAXADDR_32BIT, 1674 BUS_SPACE_MAXADDR, 1675 NULL, NULL, 1676 hba->u.mvfrey.internal_mem_size, 1677 1, 1678 BUS_SPACE_MAXSIZE_32BIT, 1679 BUS_DMA_ALLOCNOW, 1680#if __FreeBSD_version > 502000 1681 NULL, 1682 NULL, 1683#endif 1684 &hba->ctlcfg_dmat)) { 1685 device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n"); 1686 return -1; 1687 } 1688 1689 if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr, 1690#if __FreeBSD_version>501000 1691 BUS_DMA_WAITOK | BUS_DMA_COHERENT, 1692#else 1693 BUS_DMA_WAITOK, 1694#endif 1695 &hba->ctlcfg_dmamap) != 0) { 1696 device_printf(hba->pcidev, 1697 "bus_dmamem_alloc failed!\n"); 1698 bus_dma_tag_destroy(hba->ctlcfg_dmat); 1699 return -1; 1700 } 1701 1702 if (bus_dmamap_load(hba->ctlcfg_dmat, 1703 hba->ctlcfg_dmamap, hba->ctlcfg_ptr, 1704 hba->u.mvfrey.internal_mem_size, 1705 hptiop_mvfrey_map_ctlcfg, hba, 0)) { 1706 device_printf(hba->pcidev, "bus_dmamap_load failed!\n"); 1707 if (hba->ctlcfg_dmat) { 1708 bus_dmamem_free(hba->ctlcfg_dmat, 1709 hba->ctlcfg_ptr, hba->ctlcfg_dmamap); 1710 bus_dma_tag_destroy(hba->ctlcfg_dmat); 1711 } 1712 return -1; 1713 } 1714 1715 return 0; 1716} 1717 1718static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba) { 1719 return 0; 1720} 1721 1722static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba) 1723{ 1724 if (hba->ctlcfg_dmat) { 1725 bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap); 1726 bus_dmamem_free(hba->ctlcfg_dmat, 1727 hba->ctlcfg_ptr, hba->ctlcfg_dmamap); 1728 bus_dma_tag_destroy(hba->ctlcfg_dmat); 1729 } 1730 1731 return 0; 1732} 1733 1734static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba) 1735{ 1736 if (hba->ctlcfg_dmat) { 1737 bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap); 1738 bus_dmamem_free(hba->ctlcfg_dmat, 1739 hba->ctlcfg_ptr, hba->ctlcfg_dmamap); 1740 bus_dma_tag_destroy(hba->ctlcfg_dmat); 1741 } 1742 1743 return 0; 1744} 1745 1746static int hptiop_reset_comm_mvfrey(struct hpt_iop_hba *hba) 1747{ 1748 u_int32_t i = 100; 1749 1750 if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET_COMM, 3000)) 1751 return -1; 1752 1753 /* wait 100ms for MCU ready */ 1754 while(i--) { 1755 DELAY(1000); 1756 } 1757 1758 BUS_SPACE_WRT4_MVFREY2(inbound_base, 1759 hba->u.mvfrey.inlist_phy & 0xffffffff); 1760 BUS_SPACE_WRT4_MVFREY2(inbound_base_high, 1761 (hba->u.mvfrey.inlist_phy >> 16) >> 16); 1762 1763 BUS_SPACE_WRT4_MVFREY2(outbound_base, 1764 hba->u.mvfrey.outlist_phy & 0xffffffff); 1765 BUS_SPACE_WRT4_MVFREY2(outbound_base_high, 1766 (hba->u.mvfrey.outlist_phy >> 16) >> 16); 1767 1768 BUS_SPACE_WRT4_MVFREY2(outbound_shadow_base, 1769 hba->u.mvfrey.outlist_cptr_phy & 0xffffffff); 1770 BUS_SPACE_WRT4_MVFREY2(outbound_shadow_base_high, 1771 (hba->u.mvfrey.outlist_cptr_phy >> 16) >> 16); 1772 1773 hba->u.mvfrey.inlist_wptr = (hba->u.mvfrey.list_count - 1) 1774 | CL_POINTER_TOGGLE; 1775 *hba->u.mvfrey.outlist_cptr = (hba->u.mvfrey.list_count - 1) 1776 | CL_POINTER_TOGGLE; 1777 hba->u.mvfrey.outlist_rptr = hba->u.mvfrey.list_count - 1; 1778 1779 return 0; 1780} 1781 1782/* 1783 * CAM driver interface 1784 */ 1785static device_method_t driver_methods[] = { 1786 /* Device interface */ 1787 DEVMETHOD(device_probe, hptiop_probe), 1788 DEVMETHOD(device_attach, hptiop_attach), 1789 DEVMETHOD(device_detach, hptiop_detach), 1790 DEVMETHOD(device_shutdown, hptiop_shutdown), 1791 { 0, 0 } 1792}; 1793 1794static struct hptiop_adapter_ops hptiop_itl_ops = { 1795 .family = INTEL_BASED_IOP, 1796 .iop_wait_ready = hptiop_wait_ready_itl, 1797 .internal_memalloc = 0, 1798 .internal_memfree = hptiop_internal_memfree_itl, 1799 .alloc_pci_res = hptiop_alloc_pci_res_itl, 1800 .release_pci_res = hptiop_release_pci_res_itl, 1801 .enable_intr = hptiop_enable_intr_itl, 1802 .disable_intr = hptiop_disable_intr_itl, 1803 .get_config = hptiop_get_config_itl, 1804 .set_config = hptiop_set_config_itl, 1805 .iop_intr = hptiop_intr_itl, 1806 .post_msg = hptiop_post_msg_itl, 1807 .post_req = hptiop_post_req_itl, 1808 .do_ioctl = hptiop_do_ioctl_itl, 1809 .reset_comm = 0, 1810}; 1811 1812static struct hptiop_adapter_ops hptiop_mv_ops = { 1813 .family = MV_BASED_IOP, 1814 .iop_wait_ready = hptiop_wait_ready_mv, 1815 .internal_memalloc = hptiop_internal_memalloc_mv, 1816 .internal_memfree = hptiop_internal_memfree_mv, 1817 .alloc_pci_res = hptiop_alloc_pci_res_mv, 1818 .release_pci_res = hptiop_release_pci_res_mv, 1819 .enable_intr = hptiop_enable_intr_mv, 1820 .disable_intr = hptiop_disable_intr_mv, 1821 .get_config = hptiop_get_config_mv, 1822 .set_config = hptiop_set_config_mv, 1823 .iop_intr = hptiop_intr_mv, 1824 .post_msg = hptiop_post_msg_mv, 1825 .post_req = hptiop_post_req_mv, 1826 .do_ioctl = hptiop_do_ioctl_mv, 1827 .reset_comm = 0, 1828}; 1829 1830static struct hptiop_adapter_ops hptiop_mvfrey_ops = { 1831 .family = MVFREY_BASED_IOP, 1832 .iop_wait_ready = hptiop_wait_ready_mvfrey, 1833 .internal_memalloc = hptiop_internal_memalloc_mvfrey, 1834 .internal_memfree = hptiop_internal_memfree_mvfrey, 1835 .alloc_pci_res = hptiop_alloc_pci_res_mvfrey, 1836 .release_pci_res = hptiop_release_pci_res_mvfrey, 1837 .enable_intr = hptiop_enable_intr_mvfrey, 1838 .disable_intr = hptiop_disable_intr_mvfrey, 1839 .get_config = hptiop_get_config_mvfrey, 1840 .set_config = hptiop_set_config_mvfrey, 1841 .iop_intr = hptiop_intr_mvfrey, 1842 .post_msg = hptiop_post_msg_mvfrey, 1843 .post_req = hptiop_post_req_mvfrey, 1844 .do_ioctl = hptiop_do_ioctl_mvfrey, 1845 .reset_comm = hptiop_reset_comm_mvfrey, 1846}; 1847 1848static driver_t hptiop_pci_driver = { 1849 driver_name, 1850 driver_methods, 1851 sizeof(struct hpt_iop_hba) 1852}; 1853 1854DRIVER_MODULE(hptiop, pci, hptiop_pci_driver, hptiop_devclass, 0, 0); 1855MODULE_DEPEND(hptiop, cam, 1, 1, 1); 1856 1857static int hptiop_probe(device_t dev) 1858{ 1859 struct hpt_iop_hba *hba; 1860 u_int32_t id; 1861 static char buf[256]; 1862 int sas = 0; 1863 struct hptiop_adapter_ops *ops; 1864 1865 if (pci_get_vendor(dev) != 0x1103) 1866 return (ENXIO); 1867 1868 id = pci_get_device(dev); 1869 1870 switch (id) { 1871 case 0x4520: 1872 case 0x4521: 1873 case 0x4522: 1874 sas = 1; 1875 case 0x3620: 1876 case 0x3622: 1877 case 0x3640: 1878 ops = &hptiop_mvfrey_ops; 1879 break; 1880 case 0x4210: 1881 case 0x4211: 1882 case 0x4310: 1883 case 0x4311: 1884 case 0x4320: 1885 case 0x4321: 1886 case 0x4322: 1887 sas = 1; 1888 case 0x3220: 1889 case 0x3320: 1890 case 0x3410: 1891 case 0x3520: 1892 case 0x3510: 1893 case 0x3511: 1894 case 0x3521: 1895 case 0x3522: 1896 case 0x3530: 1897 case 0x3540: 1898 case 0x3560: 1899 ops = &hptiop_itl_ops; 1900 break; 1901 case 0x3020: 1902 case 0x3120: 1903 case 0x3122: 1904 ops = &hptiop_mv_ops; 1905 break; 1906 default: 1907 return (ENXIO); 1908 } 1909 1910 device_printf(dev, "adapter at PCI %d:%d:%d, IRQ %d\n", 1911 pci_get_bus(dev), pci_get_slot(dev), 1912 pci_get_function(dev), pci_get_irq(dev)); 1913 1914 sprintf(buf, "RocketRAID %x %s Controller\n", 1915 id, sas ? "SAS" : "SATA"); 1916 device_set_desc_copy(dev, buf); 1917 1918 hba = (struct hpt_iop_hba *)device_get_softc(dev); 1919 bzero(hba, sizeof(struct hpt_iop_hba)); 1920 hba->ops = ops; 1921 1922 KdPrint(("hba->ops=%p\n", hba->ops)); 1923 return 0; 1924} 1925 1926static int hptiop_attach(device_t dev) 1927{ 1928 struct hpt_iop_hba *hba = (struct hpt_iop_hba *)device_get_softc(dev); 1929 struct hpt_iop_request_get_config iop_config; 1930 struct hpt_iop_request_set_config set_config; 1931 int rid = 0; 1932 struct cam_devq *devq; 1933 struct ccb_setasync ccb; 1934 u_int32_t unit = device_get_unit(dev); 1935 1936 device_printf(dev, "%d RocketRAID 3xxx/4xxx controller driver %s\n", 1937 unit, driver_version); 1938 1939 KdPrint(("hptiop: attach(%d, %d/%d/%d) ops=%p\n", unit, 1940 pci_get_bus(dev), pci_get_slot(dev), 1941 pci_get_function(dev), hba->ops)); 1942 1943#if __FreeBSD_version >=440000 1944 pci_enable_busmaster(dev); 1945#endif 1946 hba->pcidev = dev; 1947 hba->pciunit = unit; 1948 1949 if (hba->ops->alloc_pci_res(hba)) 1950 return ENXIO; 1951 1952 if (hba->ops->iop_wait_ready(hba, 2000)) { 1953 device_printf(dev, "adapter is not ready\n"); 1954 goto release_pci_res; 1955 } 1956 1957#if (__FreeBSD_version >= 500000) 1958 mtx_init(&hba->lock, "hptioplock", NULL, MTX_DEF); 1959#endif 1960 1961 if (bus_dma_tag_create(bus_get_dma_tag(dev),/* PCI parent */ 1962 1, /* alignment */ 1963 0, /* boundary */ 1964 BUS_SPACE_MAXADDR, /* lowaddr */ 1965 BUS_SPACE_MAXADDR, /* highaddr */ 1966 NULL, NULL, /* filter, filterarg */ 1967 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1968 BUS_SPACE_UNRESTRICTED, /* nsegments */ 1969 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1970 0, /* flags */ 1971#if __FreeBSD_version>502000 1972 NULL, /* lockfunc */ 1973 NULL, /* lockfuncarg */ 1974#endif 1975 &hba->parent_dmat /* tag */)) 1976 { 1977 device_printf(dev, "alloc parent_dmat failed\n"); 1978 goto release_pci_res; 1979 } 1980 1981 if (hba->ops->family == MV_BASED_IOP) { 1982 if (hba->ops->internal_memalloc(hba)) { 1983 device_printf(dev, "alloc srb_dmat failed\n"); 1984 goto destroy_parent_tag; 1985 } 1986 } 1987 1988 if (hba->ops->get_config(hba, &iop_config)) { 1989 device_printf(dev, "get iop config failed.\n"); 1990 goto get_config_failed; 1991 } 1992 1993 hba->firmware_version = iop_config.firmware_version; 1994 hba->interface_version = iop_config.interface_version; 1995 hba->max_requests = iop_config.max_requests; 1996 hba->max_devices = iop_config.max_devices; 1997 hba->max_request_size = iop_config.request_size; 1998 hba->max_sg_count = iop_config.max_sg_count; 1999 2000 if (hba->ops->family == MVFREY_BASED_IOP) { 2001 if (hba->ops->internal_memalloc(hba)) { 2002 device_printf(dev, "alloc srb_dmat failed\n"); 2003 goto destroy_parent_tag; 2004 } 2005 if (hba->ops->reset_comm(hba)) { 2006 device_printf(dev, "reset comm failed\n"); 2007 goto get_config_failed; 2008 } 2009 } 2010 2011 if (bus_dma_tag_create(hba->parent_dmat,/* parent */ 2012 4, /* alignment */ 2013 BUS_SPACE_MAXADDR_32BIT+1, /* boundary */ 2014 BUS_SPACE_MAXADDR, /* lowaddr */ 2015 BUS_SPACE_MAXADDR, /* highaddr */ 2016 NULL, NULL, /* filter, filterarg */ 2017 PAGE_SIZE * (hba->max_sg_count-1), /* maxsize */ 2018 hba->max_sg_count, /* nsegments */ 2019 0x20000, /* maxsegsize */ 2020 BUS_DMA_ALLOCNOW, /* flags */ 2021#if __FreeBSD_version>502000 2022 busdma_lock_mutex, /* lockfunc */ 2023 &hba->lock, /* lockfuncarg */ 2024#endif 2025 &hba->io_dmat /* tag */)) 2026 { 2027 device_printf(dev, "alloc io_dmat failed\n"); 2028 goto get_config_failed; 2029 } 2030 2031 if (bus_dma_tag_create(hba->parent_dmat,/* parent */ 2032 1, /* alignment */ 2033 0, /* boundary */ 2034 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 2035 BUS_SPACE_MAXADDR, /* highaddr */ 2036 NULL, NULL, /* filter, filterarg */ 2037 HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE + 0x20, 2038 1, /* nsegments */ 2039 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 2040 0, /* flags */ 2041#if __FreeBSD_version>502000 2042 NULL, /* lockfunc */ 2043 NULL, /* lockfuncarg */ 2044#endif 2045 &hba->srb_dmat /* tag */)) 2046 { 2047 device_printf(dev, "alloc srb_dmat failed\n"); 2048 goto destroy_io_dmat; 2049 } 2050 2051 if (bus_dmamem_alloc(hba->srb_dmat, (void **)&hba->uncached_ptr, 2052#if __FreeBSD_version>501000 2053 BUS_DMA_WAITOK | BUS_DMA_COHERENT, 2054#else 2055 BUS_DMA_WAITOK, 2056#endif 2057 &hba->srb_dmamap) != 0) 2058 { 2059 device_printf(dev, "srb bus_dmamem_alloc failed!\n"); 2060 goto destroy_srb_dmat; 2061 } 2062 2063 if (bus_dmamap_load(hba->srb_dmat, 2064 hba->srb_dmamap, hba->uncached_ptr, 2065 (HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE) + 0x20, 2066 hptiop_map_srb, hba, 0)) 2067 { 2068 device_printf(dev, "bus_dmamap_load failed!\n"); 2069 goto srb_dmamem_free; 2070 } 2071 2072 if ((devq = cam_simq_alloc(hba->max_requests - 1 )) == NULL) { 2073 device_printf(dev, "cam_simq_alloc failed\n"); 2074 goto srb_dmamap_unload; 2075 } 2076 2077#if __FreeBSD_version <700000 2078 hba->sim = cam_sim_alloc(hptiop_action, hptiop_poll, driver_name, 2079 hba, unit, hba->max_requests - 1, 1, devq); 2080#else 2081 hba->sim = cam_sim_alloc(hptiop_action, hptiop_poll, driver_name, 2082 hba, unit, &Giant, hba->max_requests - 1, 1, devq); 2083#endif 2084 if (!hba->sim) { 2085 device_printf(dev, "cam_sim_alloc failed\n"); 2086 cam_simq_free(devq); 2087 goto srb_dmamap_unload; 2088 } 2089#if __FreeBSD_version <700000 2090 if (xpt_bus_register(hba->sim, 0) != CAM_SUCCESS) 2091#else 2092 if (xpt_bus_register(hba->sim, dev, 0) != CAM_SUCCESS) 2093#endif 2094 { 2095 device_printf(dev, "xpt_bus_register failed\n"); 2096 goto free_cam_sim; 2097 } 2098 2099 if (xpt_create_path(&hba->path, /*periph */ NULL, 2100 cam_sim_path(hba->sim), CAM_TARGET_WILDCARD, 2101 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2102 device_printf(dev, "xpt_create_path failed\n"); 2103 goto deregister_xpt_bus; 2104 } 2105 2106 bzero(&set_config, sizeof(set_config)); 2107 set_config.iop_id = unit; 2108 set_config.vbus_id = cam_sim_path(hba->sim); 2109 set_config.max_host_request_size = HPT_SRB_MAX_REQ_SIZE; 2110 2111 if (hba->ops->set_config(hba, &set_config)) { 2112 device_printf(dev, "set iop config failed.\n"); 2113 goto free_hba_path; 2114 } 2115 2116 xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5); 2117 ccb.ccb_h.func_code = XPT_SASYNC_CB; 2118 ccb.event_enable = (AC_FOUND_DEVICE | AC_LOST_DEVICE); 2119 ccb.callback = hptiop_async; 2120 ccb.callback_arg = hba->sim; 2121 xpt_action((union ccb *)&ccb); 2122 2123 rid = 0; 2124 if ((hba->irq_res = bus_alloc_resource(hba->pcidev, SYS_RES_IRQ, 2125 &rid, 0, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) { 2126 device_printf(dev, "allocate irq failed!\n"); 2127 goto free_hba_path; 2128 } 2129 2130#if __FreeBSD_version <700000 2131 if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM, 2132 hptiop_pci_intr, hba, &hba->irq_handle)) 2133#else 2134 if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM, 2135 NULL, hptiop_pci_intr, hba, &hba->irq_handle)) 2136#endif 2137 { 2138 device_printf(dev, "allocate intr function failed!\n"); 2139 goto free_irq_resource; 2140 } 2141 2142 if (hptiop_send_sync_msg(hba, 2143 IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) { 2144 device_printf(dev, "fail to start background task\n"); 2145 goto teartown_irq_resource; 2146 } 2147 2148 hba->ops->enable_intr(hba); 2149 hba->initialized = 1; 2150 2151 hba->ioctl_dev = make_dev(&hptiop_cdevsw, unit, 2152 UID_ROOT, GID_WHEEL /*GID_OPERATOR*/, 2153 S_IRUSR | S_IWUSR, "%s%d", driver_name, unit); 2154 2155#if __FreeBSD_version < 503000 2156 hba->ioctl_dev->si_drv1 = hba; 2157#endif 2158 2159 return 0; 2160 2161 2162teartown_irq_resource: 2163 bus_teardown_intr(dev, hba->irq_res, hba->irq_handle); 2164 2165free_irq_resource: 2166 bus_release_resource(dev, SYS_RES_IRQ, 0, hba->irq_res); 2167 2168free_hba_path: 2169 xpt_free_path(hba->path); 2170 2171deregister_xpt_bus: 2172 xpt_bus_deregister(cam_sim_path(hba->sim)); 2173 2174free_cam_sim: 2175 cam_sim_free(hba->sim, /*free devq*/ TRUE); 2176 2177srb_dmamap_unload: 2178 if (hba->uncached_ptr) 2179 bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap); 2180 2181srb_dmamem_free: 2182 if (hba->uncached_ptr) 2183 bus_dmamem_free(hba->srb_dmat, 2184 hba->uncached_ptr, hba->srb_dmamap); 2185 2186destroy_srb_dmat: 2187 if (hba->srb_dmat) 2188 bus_dma_tag_destroy(hba->srb_dmat); 2189 2190destroy_io_dmat: 2191 if (hba->io_dmat) 2192 bus_dma_tag_destroy(hba->io_dmat); 2193 2194get_config_failed: 2195 hba->ops->internal_memfree(hba); 2196 2197destroy_parent_tag: 2198 if (hba->parent_dmat) 2199 bus_dma_tag_destroy(hba->parent_dmat); 2200 2201release_pci_res: 2202 if (hba->ops->release_pci_res) 2203 hba->ops->release_pci_res(hba); 2204 2205 return ENXIO; 2206} 2207 2208static int hptiop_detach(device_t dev) 2209{ 2210 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev); 2211 int i; 2212 int error = EBUSY; 2213 2214 hptiop_lock_adapter(hba); 2215 for (i = 0; i < hba->max_devices; i++) 2216 if (hptiop_os_query_remove_device(hba, i)) { 2217 device_printf(dev, "%d file system is busy. id=%d", 2218 hba->pciunit, i); 2219 goto out; 2220 } 2221 2222 if ((error = hptiop_shutdown(dev)) != 0) 2223 goto out; 2224 if (hptiop_send_sync_msg(hba, 2225 IOPMU_INBOUND_MSG0_STOP_BACKGROUND_TASK, 60000)) 2226 goto out; 2227 2228 hptiop_release_resource(hba); 2229 error = 0; 2230out: 2231 hptiop_unlock_adapter(hba); 2232 return error; 2233} 2234 2235static int hptiop_shutdown(device_t dev) 2236{ 2237 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev); 2238 2239 int error = 0; 2240 2241 if (hba->flag & HPT_IOCTL_FLAG_OPEN) { 2242 device_printf(dev, "%d device is busy", hba->pciunit); 2243 return EBUSY; 2244 } 2245 2246 hba->ops->disable_intr(hba); 2247 2248 if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000)) 2249 error = EBUSY; 2250 2251 return error; 2252} 2253 2254static void hptiop_pci_intr(void *arg) 2255{ 2256 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg; 2257 hptiop_lock_adapter(hba); 2258 hba->ops->iop_intr(hba); 2259 hptiop_unlock_adapter(hba); 2260} 2261 2262static void hptiop_poll(struct cam_sim *sim) 2263{ 2264 hptiop_pci_intr(cam_sim_softc(sim)); 2265} 2266 2267static void hptiop_async(void * callback_arg, u_int32_t code, 2268 struct cam_path * path, void * arg) 2269{ 2270} 2271 2272static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba) 2273{ 2274 BUS_SPACE_WRT4_ITL(outbound_intmask, 2275 ~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0)); 2276} 2277 2278static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba) 2279{ 2280 u_int32_t int_mask; 2281 2282 int_mask = BUS_SPACE_RD4_MV0(outbound_intmask); 2283 2284 int_mask |= MVIOP_MU_OUTBOUND_INT_POSTQUEUE 2285 | MVIOP_MU_OUTBOUND_INT_MSG; 2286 BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask); 2287} 2288 2289static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba) 2290{ 2291 BUS_SPACE_WRT4_MVFREY2(f0_doorbell_enable, CPU_TO_F0_DRBL_MSG_A_BIT); 2292 BUS_SPACE_RD4_MVFREY2(f0_doorbell_enable); 2293 2294 BUS_SPACE_WRT4_MVFREY2(isr_enable, 0x1); 2295 BUS_SPACE_RD4_MVFREY2(isr_enable); 2296 2297 BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0x1010); 2298 BUS_SPACE_RD4_MVFREY2(pcie_f0_int_enable); 2299} 2300 2301static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba) 2302{ 2303 u_int32_t int_mask; 2304 2305 int_mask = BUS_SPACE_RD4_ITL(outbound_intmask); 2306 2307 int_mask |= IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0; 2308 BUS_SPACE_WRT4_ITL(outbound_intmask, int_mask); 2309 BUS_SPACE_RD4_ITL(outbound_intstatus); 2310} 2311 2312static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba) 2313{ 2314 u_int32_t int_mask; 2315 int_mask = BUS_SPACE_RD4_MV0(outbound_intmask); 2316 2317 int_mask &= ~(MVIOP_MU_OUTBOUND_INT_MSG 2318 | MVIOP_MU_OUTBOUND_INT_POSTQUEUE); 2319 BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask); 2320 BUS_SPACE_RD4_MV0(outbound_intmask); 2321} 2322 2323static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba) 2324{ 2325 BUS_SPACE_WRT4_MVFREY2(f0_doorbell_enable, 0); 2326 BUS_SPACE_RD4_MVFREY2(f0_doorbell_enable); 2327 2328 BUS_SPACE_WRT4_MVFREY2(isr_enable, 0); 2329 BUS_SPACE_RD4_MVFREY2(isr_enable); 2330 2331 BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0); 2332 BUS_SPACE_RD4_MVFREY2(pcie_f0_int_enable); 2333} 2334 2335static void hptiop_reset_adapter(void *argv) 2336{ 2337 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)argv; 2338 if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000)) 2339 return; 2340 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000); 2341} 2342 2343static void *hptiop_get_srb(struct hpt_iop_hba * hba) 2344{ 2345 struct hpt_iop_srb * srb; 2346 2347 if (hba->srb_list) { 2348 srb = hba->srb_list; 2349 hba->srb_list = srb->next; 2350 return srb; 2351 } 2352 2353 return NULL; 2354} 2355 2356static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb) 2357{ 2358 srb->next = hba->srb_list; 2359 hba->srb_list = srb; 2360} 2361 2362static void hptiop_action(struct cam_sim *sim, union ccb *ccb) 2363{ 2364 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)cam_sim_softc(sim); 2365 struct hpt_iop_srb * srb; 2366 int error; 2367 2368 switch (ccb->ccb_h.func_code) { 2369 2370 case XPT_SCSI_IO: 2371 hptiop_lock_adapter(hba); 2372 if (ccb->ccb_h.target_lun != 0 || 2373 ccb->ccb_h.target_id >= hba->max_devices || 2374 (ccb->ccb_h.flags & CAM_CDB_PHYS)) 2375 { 2376 ccb->ccb_h.status = CAM_TID_INVALID; 2377 xpt_done(ccb); 2378 goto scsi_done; 2379 } 2380 2381 if ((srb = hptiop_get_srb(hba)) == NULL) { 2382 device_printf(hba->pcidev, "srb allocated failed"); 2383 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2384 xpt_done(ccb); 2385 goto scsi_done; 2386 } 2387 2388 srb->ccb = ccb; 2389 error = bus_dmamap_load_ccb(hba->io_dmat, 2390 srb->dma_map, 2391 ccb, 2392 hptiop_post_scsi_command, 2393 srb, 2394 0); 2395 2396 if (error && error != EINPROGRESS) { 2397 device_printf(hba->pcidev, 2398 "%d bus_dmamap_load error %d", 2399 hba->pciunit, error); 2400 xpt_freeze_simq(hba->sim, 1); 2401 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2402 hptiop_free_srb(hba, srb); 2403 xpt_done(ccb); 2404 goto scsi_done; 2405 } 2406 2407scsi_done: 2408 hptiop_unlock_adapter(hba); 2409 return; 2410 2411 case XPT_RESET_BUS: 2412 device_printf(hba->pcidev, "reset adapter"); 2413 hptiop_lock_adapter(hba); 2414 hba->msg_done = 0; 2415 hptiop_reset_adapter(hba); 2416 hptiop_unlock_adapter(hba); 2417 break; 2418 2419 case XPT_GET_TRAN_SETTINGS: 2420 case XPT_SET_TRAN_SETTINGS: 2421 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 2422 break; 2423 2424 case XPT_CALC_GEOMETRY: 2425#if __FreeBSD_version >= 500000 2426 cam_calc_geometry(&ccb->ccg, 1); 2427#else 2428 ccb->ccg.heads = 255; 2429 ccb->ccg.secs_per_track = 63; 2430 ccb->ccg.cylinders = ccb->ccg.volume_size / 2431 (ccb->ccg.heads * ccb->ccg.secs_per_track); 2432 ccb->ccb_h.status = CAM_REQ_CMP; 2433#endif 2434 break; 2435 2436 case XPT_PATH_INQ: 2437 { 2438 struct ccb_pathinq *cpi = &ccb->cpi; 2439 2440 cpi->version_num = 1; 2441 cpi->hba_inquiry = PI_SDTR_ABLE; 2442 cpi->target_sprt = 0; 2443 cpi->hba_misc = PIM_NOBUSRESET; 2444 cpi->hba_eng_cnt = 0; 2445 cpi->max_target = hba->max_devices; 2446 cpi->max_lun = 0; 2447 cpi->unit_number = cam_sim_unit(sim); 2448 cpi->bus_id = cam_sim_bus(sim); 2449 cpi->initiator_id = hba->max_devices; 2450 cpi->base_transfer_speed = 3300; 2451 2452 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 2453 strncpy(cpi->hba_vid, "HPT ", HBA_IDLEN); 2454 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 2455 cpi->transport = XPORT_SPI; 2456 cpi->transport_version = 2; 2457 cpi->protocol = PROTO_SCSI; 2458 cpi->protocol_version = SCSI_REV_2; 2459 cpi->ccb_h.status = CAM_REQ_CMP; 2460 break; 2461 } 2462 2463 default: 2464 ccb->ccb_h.status = CAM_REQ_INVALID; 2465 break; 2466 } 2467 2468 xpt_done(ccb); 2469 return; 2470} 2471 2472static void hptiop_post_req_itl(struct hpt_iop_hba *hba, 2473 struct hpt_iop_srb *srb, 2474 bus_dma_segment_t *segs, int nsegs) 2475{ 2476 int idx; 2477 union ccb *ccb = srb->ccb; 2478 u_int8_t *cdb; 2479 2480 if (ccb->ccb_h.flags & CAM_CDB_POINTER) 2481 cdb = ccb->csio.cdb_io.cdb_ptr; 2482 else 2483 cdb = ccb->csio.cdb_io.cdb_bytes; 2484 2485 KdPrint(("ccb=%p %x-%x-%x\n", 2486 ccb, *(u_int32_t *)cdb, *((u_int32_t *)cdb+1), *((u_int32_t *)cdb+2))); 2487 2488 if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) { 2489 u_int32_t iop_req32; 2490 struct hpt_iop_request_scsi_command req; 2491 2492 iop_req32 = BUS_SPACE_RD4_ITL(inbound_queue); 2493 2494 if (iop_req32 == IOPMU_QUEUE_EMPTY) { 2495 device_printf(hba->pcidev, "invaild req offset\n"); 2496 ccb->ccb_h.status = CAM_BUSY; 2497 bus_dmamap_unload(hba->io_dmat, srb->dma_map); 2498 hptiop_free_srb(hba, srb); 2499 xpt_done(ccb); 2500 return; 2501 } 2502 2503 if (ccb->csio.dxfer_len && nsegs > 0) { 2504 struct hpt_iopsg *psg = req.sg_list; 2505 for (idx = 0; idx < nsegs; idx++, psg++) { 2506 psg->pci_address = (u_int64_t)segs[idx].ds_addr; 2507 psg->size = segs[idx].ds_len; 2508 psg->eot = 0; 2509 } 2510 psg[-1].eot = 1; 2511 } 2512 2513 bcopy(cdb, req.cdb, ccb->csio.cdb_len); 2514 2515 req.header.size = 2516 offsetof(struct hpt_iop_request_scsi_command, sg_list) 2517 + nsegs*sizeof(struct hpt_iopsg); 2518 req.header.type = IOP_REQUEST_TYPE_SCSI_COMMAND; 2519 req.header.flags = 0; 2520 req.header.result = IOP_RESULT_PENDING; 2521 req.header.context = (u_int64_t)(unsigned long)srb; 2522 req.dataxfer_length = ccb->csio.dxfer_len; 2523 req.channel = 0; 2524 req.target = ccb->ccb_h.target_id; 2525 req.lun = ccb->ccb_h.target_lun; 2526 2527 bus_space_write_region_1(hba->bar0t, hba->bar0h, iop_req32, 2528 (u_int8_t *)&req, req.header.size); 2529 2530 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2531 bus_dmamap_sync(hba->io_dmat, 2532 srb->dma_map, BUS_DMASYNC_PREREAD); 2533 } 2534 else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) 2535 bus_dmamap_sync(hba->io_dmat, 2536 srb->dma_map, BUS_DMASYNC_PREWRITE); 2537 2538 BUS_SPACE_WRT4_ITL(inbound_queue,iop_req32); 2539 } else { 2540 struct hpt_iop_request_scsi_command *req; 2541 2542 req = (struct hpt_iop_request_scsi_command *)srb; 2543 if (ccb->csio.dxfer_len && nsegs > 0) { 2544 struct hpt_iopsg *psg = req->sg_list; 2545 for (idx = 0; idx < nsegs; idx++, psg++) { 2546 psg->pci_address = 2547 (u_int64_t)segs[idx].ds_addr; 2548 psg->size = segs[idx].ds_len; 2549 psg->eot = 0; 2550 } 2551 psg[-1].eot = 1; 2552 } 2553 2554 bcopy(cdb, req->cdb, ccb->csio.cdb_len); 2555 2556 req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND; 2557 req->header.result = IOP_RESULT_PENDING; 2558 req->dataxfer_length = ccb->csio.dxfer_len; 2559 req->channel = 0; 2560 req->target = ccb->ccb_h.target_id; 2561 req->lun = ccb->ccb_h.target_lun; 2562 req->header.size = 2563 offsetof(struct hpt_iop_request_scsi_command, sg_list) 2564 + nsegs*sizeof(struct hpt_iopsg); 2565 req->header.context = (u_int64_t)srb->index | 2566 IOPMU_QUEUE_ADDR_HOST_BIT; 2567 req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT; 2568 2569 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2570 bus_dmamap_sync(hba->io_dmat, 2571 srb->dma_map, BUS_DMASYNC_PREREAD); 2572 }else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 2573 bus_dmamap_sync(hba->io_dmat, 2574 srb->dma_map, BUS_DMASYNC_PREWRITE); 2575 } 2576 2577 if (hba->firmware_version > 0x01020000 2578 || hba->interface_version > 0x01020000) { 2579 u_int32_t size_bits; 2580 2581 if (req->header.size < 256) 2582 size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT; 2583 else if (req->header.size < 512) 2584 size_bits = IOPMU_QUEUE_ADDR_HOST_BIT; 2585 else 2586 size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT 2587 | IOPMU_QUEUE_ADDR_HOST_BIT; 2588 2589 BUS_SPACE_WRT4_ITL(inbound_queue, 2590 (u_int32_t)srb->phy_addr | size_bits); 2591 } else 2592 BUS_SPACE_WRT4_ITL(inbound_queue, (u_int32_t)srb->phy_addr 2593 |IOPMU_QUEUE_ADDR_HOST_BIT); 2594 } 2595} 2596 2597static void hptiop_post_req_mv(struct hpt_iop_hba *hba, 2598 struct hpt_iop_srb *srb, 2599 bus_dma_segment_t *segs, int nsegs) 2600{ 2601 int idx, size; 2602 union ccb *ccb = srb->ccb; 2603 u_int8_t *cdb; 2604 struct hpt_iop_request_scsi_command *req; 2605 u_int64_t req_phy; 2606 2607 req = (struct hpt_iop_request_scsi_command *)srb; 2608 req_phy = srb->phy_addr; 2609 2610 if (ccb->csio.dxfer_len && nsegs > 0) { 2611 struct hpt_iopsg *psg = req->sg_list; 2612 for (idx = 0; idx < nsegs; idx++, psg++) { 2613 psg->pci_address = (u_int64_t)segs[idx].ds_addr; 2614 psg->size = segs[idx].ds_len; 2615 psg->eot = 0; 2616 } 2617 psg[-1].eot = 1; 2618 } 2619 if (ccb->ccb_h.flags & CAM_CDB_POINTER) 2620 cdb = ccb->csio.cdb_io.cdb_ptr; 2621 else 2622 cdb = ccb->csio.cdb_io.cdb_bytes; 2623 2624 bcopy(cdb, req->cdb, ccb->csio.cdb_len); 2625 req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND; 2626 req->header.result = IOP_RESULT_PENDING; 2627 req->dataxfer_length = ccb->csio.dxfer_len; 2628 req->channel = 0; 2629 req->target = ccb->ccb_h.target_id; 2630 req->lun = ccb->ccb_h.target_lun; 2631 req->header.size = sizeof(struct hpt_iop_request_scsi_command) 2632 - sizeof(struct hpt_iopsg) 2633 + nsegs * sizeof(struct hpt_iopsg); 2634 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2635 bus_dmamap_sync(hba->io_dmat, 2636 srb->dma_map, BUS_DMASYNC_PREREAD); 2637 } 2638 else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) 2639 bus_dmamap_sync(hba->io_dmat, 2640 srb->dma_map, BUS_DMASYNC_PREWRITE); 2641 req->header.context = (u_int64_t)srb->index 2642 << MVIOP_REQUEST_NUMBER_START_BIT 2643 | MVIOP_CMD_TYPE_SCSI; 2644 req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT; 2645 size = req->header.size >> 8; 2646 hptiop_mv_inbound_write(req_phy 2647 | MVIOP_MU_QUEUE_ADDR_HOST_BIT 2648 | (size > 3 ? 3 : size), hba); 2649} 2650 2651static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba, 2652 struct hpt_iop_srb *srb, 2653 bus_dma_segment_t *segs, int nsegs) 2654{ 2655 int idx, index; 2656 union ccb *ccb = srb->ccb; 2657 u_int8_t *cdb; 2658 struct hpt_iop_request_scsi_command *req; 2659 u_int64_t req_phy; 2660 2661 req = (struct hpt_iop_request_scsi_command *)srb; 2662 req_phy = srb->phy_addr; 2663 2664 if (ccb->csio.dxfer_len && nsegs > 0) { 2665 struct hpt_iopsg *psg = req->sg_list; 2666 for (idx = 0; idx < nsegs; idx++, psg++) { 2667 psg->pci_address = (u_int64_t)segs[idx].ds_addr | 1; 2668 psg->size = segs[idx].ds_len; 2669 psg->eot = 0; 2670 } 2671 psg[-1].eot = 1; 2672 } 2673 if (ccb->ccb_h.flags & CAM_CDB_POINTER) 2674 cdb = ccb->csio.cdb_io.cdb_ptr; 2675 else 2676 cdb = ccb->csio.cdb_io.cdb_bytes; 2677 2678 bcopy(cdb, req->cdb, ccb->csio.cdb_len); 2679 req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND; 2680 req->header.result = IOP_RESULT_PENDING; 2681 req->dataxfer_length = ccb->csio.dxfer_len; 2682 req->channel = 0; 2683 req->target = ccb->ccb_h.target_id; 2684 req->lun = ccb->ccb_h.target_lun; 2685 req->header.size = sizeof(struct hpt_iop_request_scsi_command) 2686 - sizeof(struct hpt_iopsg) 2687 + nsegs * sizeof(struct hpt_iopsg); 2688 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2689 bus_dmamap_sync(hba->io_dmat, 2690 srb->dma_map, BUS_DMASYNC_PREREAD); 2691 } 2692 else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) 2693 bus_dmamap_sync(hba->io_dmat, 2694 srb->dma_map, BUS_DMASYNC_PREWRITE); 2695 2696 req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT 2697 | IOP_REQUEST_FLAG_ADDR_BITS 2698 | ((req_phy >> 16) & 0xffff0000); 2699 req->header.context = ((req_phy & 0xffffffff) << 32 ) 2700 | srb->index << 4 2701 | IOPMU_QUEUE_ADDR_HOST_BIT | req->header.type; 2702 2703 hba->u.mvfrey.inlist_wptr++; 2704 index = hba->u.mvfrey.inlist_wptr & 0x3fff; 2705 2706 if (index == hba->u.mvfrey.list_count) { 2707 index = 0; 2708 hba->u.mvfrey.inlist_wptr &= ~0x3fff; 2709 hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE; 2710 } 2711 2712 hba->u.mvfrey.inlist[index].addr = req_phy; 2713 hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4; 2714 2715 BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr); 2716 BUS_SPACE_RD4_MVFREY2(inbound_write_ptr); 2717 2718 if (req->header.type == IOP_REQUEST_TYPE_SCSI_COMMAND) { 2719 ccb->ccb_h.timeout_ch = timeout(hptiop_reset_adapter, hba, 20*hz); 2720 } 2721} 2722 2723static void hptiop_post_scsi_command(void *arg, bus_dma_segment_t *segs, 2724 int nsegs, int error) 2725{ 2726 struct hpt_iop_srb *srb = (struct hpt_iop_srb *)arg; 2727 union ccb *ccb = srb->ccb; 2728 struct hpt_iop_hba *hba = srb->hba; 2729 2730 if (error || nsegs > hba->max_sg_count) { 2731 KdPrint(("hptiop: func_code=%x tid=%x lun=%x nsegs=%d\n", 2732 ccb->ccb_h.func_code, 2733 ccb->ccb_h.target_id, 2734 ccb->ccb_h.target_lun, nsegs)); 2735 ccb->ccb_h.status = CAM_BUSY; 2736 bus_dmamap_unload(hba->io_dmat, srb->dma_map); 2737 hptiop_free_srb(hba, srb); 2738 xpt_done(ccb); 2739 return; 2740 } 2741 2742 hba->ops->post_req(hba, srb, segs, nsegs); 2743} 2744 2745static void hptiop_mv_map_ctlcfg(void *arg, bus_dma_segment_t *segs, 2746 int nsegs, int error) 2747{ 2748 struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg; 2749 hba->ctlcfgcmd_phy = ((u_int64_t)segs->ds_addr + 0x1F) 2750 & ~(u_int64_t)0x1F; 2751 hba->ctlcfg_ptr = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F) 2752 & ~0x1F); 2753} 2754 2755static void hptiop_mvfrey_map_ctlcfg(void *arg, bus_dma_segment_t *segs, 2756 int nsegs, int error) 2757{ 2758 struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg; 2759 char *p; 2760 u_int64_t phy; 2761 u_int32_t list_count = hba->u.mvfrey.list_count; 2762 2763 phy = ((u_int64_t)segs->ds_addr + 0x1F) 2764 & ~(u_int64_t)0x1F; 2765 p = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F) 2766 & ~0x1F); 2767 2768 hba->ctlcfgcmd_phy = phy; 2769 hba->ctlcfg_ptr = p; 2770 2771 p += 0x800; 2772 phy += 0x800; 2773 2774 hba->u.mvfrey.inlist = (struct mvfrey_inlist_entry *)p; 2775 hba->u.mvfrey.inlist_phy = phy; 2776 2777 p += list_count * sizeof(struct mvfrey_inlist_entry); 2778 phy += list_count * sizeof(struct mvfrey_inlist_entry); 2779 2780 hba->u.mvfrey.outlist = (struct mvfrey_outlist_entry *)p; 2781 hba->u.mvfrey.outlist_phy = phy; 2782 2783 p += list_count * sizeof(struct mvfrey_outlist_entry); 2784 phy += list_count * sizeof(struct mvfrey_outlist_entry); 2785 2786 hba->u.mvfrey.outlist_cptr = (u_int32_t *)p; 2787 hba->u.mvfrey.outlist_cptr_phy = phy; 2788} 2789 2790static void hptiop_map_srb(void *arg, bus_dma_segment_t *segs, 2791 int nsegs, int error) 2792{ 2793 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg; 2794 bus_addr_t phy_addr = (segs->ds_addr + 0x1F) & ~(bus_addr_t)0x1F; 2795 struct hpt_iop_srb *srb, *tmp_srb; 2796 int i; 2797 2798 if (error || nsegs == 0) { 2799 device_printf(hba->pcidev, "hptiop_map_srb error"); 2800 return; 2801 } 2802 2803 /* map srb */ 2804 srb = (struct hpt_iop_srb *) 2805 (((unsigned long)hba->uncached_ptr + 0x1F) 2806 & ~(unsigned long)0x1F); 2807 2808 for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) { 2809 tmp_srb = (struct hpt_iop_srb *) 2810 ((char *)srb + i * HPT_SRB_MAX_SIZE); 2811 if (((unsigned long)tmp_srb & 0x1F) == 0) { 2812 if (bus_dmamap_create(hba->io_dmat, 2813 0, &tmp_srb->dma_map)) { 2814 device_printf(hba->pcidev, "dmamap create failed"); 2815 return; 2816 } 2817 2818 bzero(tmp_srb, sizeof(struct hpt_iop_srb)); 2819 tmp_srb->hba = hba; 2820 tmp_srb->index = i; 2821 if (hba->ctlcfg_ptr == 0) {/*itl iop*/ 2822 tmp_srb->phy_addr = (u_int64_t)(u_int32_t) 2823 (phy_addr >> 5); 2824 if (phy_addr & IOPMU_MAX_MEM_SUPPORT_MASK_32G) 2825 tmp_srb->srb_flag = 2826 HPT_SRB_FLAG_HIGH_MEM_ACESS; 2827 } else { 2828 tmp_srb->phy_addr = phy_addr; 2829 } 2830 2831 hptiop_free_srb(hba, tmp_srb); 2832 hba->srb[i] = tmp_srb; 2833 phy_addr += HPT_SRB_MAX_SIZE; 2834 } 2835 else { 2836 device_printf(hba->pcidev, "invalid alignment"); 2837 return; 2838 } 2839 } 2840} 2841 2842static void hptiop_os_message_callback(struct hpt_iop_hba * hba, u_int32_t msg) 2843{ 2844 hba->msg_done = 1; 2845} 2846 2847static int hptiop_os_query_remove_device(struct hpt_iop_hba * hba, 2848 int target_id) 2849{ 2850 struct cam_periph *periph = NULL; 2851 struct cam_path *path; 2852 int status, retval = 0; 2853 2854 status = xpt_create_path(&path, NULL, hba->sim->path_id, target_id, 0); 2855 2856 if (status == CAM_REQ_CMP) { 2857 if ((periph = cam_periph_find(path, "da")) != NULL) { 2858 if (periph->refcount >= 1) { 2859 device_printf(hba->pcidev, "%d ," 2860 "target_id=0x%x," 2861 "refcount=%d", 2862 hba->pciunit, target_id, periph->refcount); 2863 retval = -1; 2864 } 2865 } 2866 xpt_free_path(path); 2867 } 2868 return retval; 2869} 2870 2871static void hptiop_release_resource(struct hpt_iop_hba *hba) 2872{ 2873 int i; 2874 if (hba->path) { 2875 struct ccb_setasync ccb; 2876 2877 xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5); 2878 ccb.ccb_h.func_code = XPT_SASYNC_CB; 2879 ccb.event_enable = 0; 2880 ccb.callback = hptiop_async; 2881 ccb.callback_arg = hba->sim; 2882 xpt_action((union ccb *)&ccb); 2883 xpt_free_path(hba->path); 2884 } 2885 2886 if (hba->sim) { 2887 xpt_bus_deregister(cam_sim_path(hba->sim)); 2888 cam_sim_free(hba->sim, TRUE); 2889 } 2890 2891 if (hba->ctlcfg_dmat) { 2892 bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap); 2893 bus_dmamem_free(hba->ctlcfg_dmat, 2894 hba->ctlcfg_ptr, hba->ctlcfg_dmamap); 2895 bus_dma_tag_destroy(hba->ctlcfg_dmat); 2896 } 2897 2898 for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) { 2899 struct hpt_iop_srb *srb = hba->srb[i]; 2900 if (srb->dma_map) 2901 bus_dmamap_destroy(hba->io_dmat, srb->dma_map); 2902 } 2903 2904 if (hba->srb_dmat) { 2905 bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap); 2906 bus_dmamap_destroy(hba->srb_dmat, hba->srb_dmamap); 2907 bus_dma_tag_destroy(hba->srb_dmat); 2908 } 2909 2910 if (hba->io_dmat) 2911 bus_dma_tag_destroy(hba->io_dmat); 2912 2913 if (hba->parent_dmat) 2914 bus_dma_tag_destroy(hba->parent_dmat); 2915 2916 if (hba->irq_handle) 2917 bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle); 2918 2919 if (hba->irq_res) 2920 bus_release_resource(hba->pcidev, SYS_RES_IRQ, 2921 0, hba->irq_res); 2922 2923 if (hba->bar0_res) 2924 bus_release_resource(hba->pcidev, SYS_RES_MEMORY, 2925 hba->bar0_rid, hba->bar0_res); 2926 if (hba->bar2_res) 2927 bus_release_resource(hba->pcidev, SYS_RES_MEMORY, 2928 hba->bar2_rid, hba->bar2_res); 2929 if (hba->ioctl_dev) 2930 destroy_dev(hba->ioctl_dev); 2931} 2932