32 33/* 34 * AMCC'S 3ware driver for 9000 series storage controllers. 35 * 36 * Author: Vinod Kashyap 37 * Modifications by: Adam Radford 38 * Modifications by: Manjunath Ranganathaiah 39 */ 40 41 42/* 43 * FreeBSD specific functions not related to CAM, and other 44 * miscellaneous functions. 45 */ 46 47 48#include <dev/twa/tw_osl_includes.h> 49#include <dev/twa/tw_cl_fwif.h> 50#include <dev/twa/tw_cl_ioctl.h> 51#include <dev/twa/tw_osl_ioctl.h> 52 53#ifdef TW_OSL_DEBUG 54TW_INT32 TW_DEBUG_LEVEL_FOR_OSL = TW_OSL_DEBUG; 55TW_INT32 TW_OSL_DEBUG_LEVEL_FOR_CL = TW_OSL_DEBUG; 56#endif /* TW_OSL_DEBUG */ 57 58static MALLOC_DEFINE(TW_OSLI_MALLOC_CLASS, "twa_commands", "twa commands"); 59 60 61static d_open_t twa_open; 62static d_close_t twa_close; 63static d_ioctl_t twa_ioctl; 64 65static struct cdevsw twa_cdevsw = { 66 .d_version = D_VERSION, 67 .d_open = twa_open, 68 .d_close = twa_close, 69 .d_ioctl = twa_ioctl, 70 .d_name = "twa", 71}; 72 73static devclass_t twa_devclass; 74 75 76/* 77 * Function name: twa_open 78 * Description: Called when the controller is opened. 79 * Simply marks the controller as open. 80 * 81 * Input: dev -- control device corresponding to the ctlr 82 * flags -- mode of open 83 * fmt -- device type (character/block etc.) 84 * proc -- current process 85 * Output: None 86 * Return value: 0 -- success 87 * non-zero-- failure 88 */ 89static TW_INT32 90twa_open(struct cdev *dev, TW_INT32 flags, TW_INT32 fmt, struct thread *proc) 91{ 92 struct twa_softc *sc = (struct twa_softc *)(dev->si_drv1); 93 94 tw_osli_dbg_dprintf(5, sc, "entered"); 95 sc->open = TW_CL_TRUE; 96 return(0); 97} 98 99 100 101/* 102 * Function name: twa_close 103 * Description: Called when the controller is closed. 104 * Simply marks the controller as not open. 105 * 106 * Input: dev -- control device corresponding to the ctlr 107 * flags -- mode of corresponding open 108 * fmt -- device type (character/block etc.) 109 * proc -- current process 110 * Output: None 111 * Return value: 0 -- success 112 * non-zero-- failure 113 */ 114static TW_INT32 115twa_close(struct cdev *dev, TW_INT32 flags, TW_INT32 fmt, struct thread *proc) 116{ 117 struct twa_softc *sc = (struct twa_softc *)(dev->si_drv1); 118 119 tw_osli_dbg_dprintf(5, sc, "entered"); 120 sc->open = TW_CL_FALSE; 121 return(0); 122} 123 124 125 126/* 127 * Function name: twa_ioctl 128 * Description: Called when an ioctl is posted to the controller. 129 * Handles any OS Layer specific cmds, passes the rest 130 * on to the Common Layer. 131 * 132 * Input: dev -- control device corresponding to the ctlr 133 * cmd -- ioctl cmd 134 * buf -- ptr to buffer in kernel memory, which is 135 * a copy of the input buffer in user-space 136 * flags -- mode of corresponding open 137 * proc -- current process 138 * Output: buf -- ptr to buffer in kernel memory, which will 139 * be copied to the output buffer in user-space 140 * Return value: 0 -- success 141 * non-zero-- failure 142 */ 143static TW_INT32 144twa_ioctl(struct cdev *dev, u_long cmd, caddr_t buf, TW_INT32 flags, struct thread *proc) 145{ 146 struct twa_softc *sc = (struct twa_softc *)(dev->si_drv1); 147 TW_INT32 error; 148 149 tw_osli_dbg_dprintf(5, sc, "entered"); 150 151 switch (cmd) { 152 case TW_OSL_IOCTL_FIRMWARE_PASS_THROUGH: 153 tw_osli_dbg_dprintf(6, sc, "ioctl: fw_passthru"); 154 error = tw_osli_fw_passthru(sc, (TW_INT8 *)buf); 155 break; 156 157 case TW_OSL_IOCTL_SCAN_BUS: 158 /* Request CAM for a bus scan. */ 159 tw_osli_dbg_dprintf(6, sc, "ioctl: scan bus"); 160 error = tw_osli_request_bus_scan(sc); 161 break; 162 163 default: 164 tw_osli_dbg_dprintf(6, sc, "ioctl: 0x%lx", cmd); 165 error = tw_cl_ioctl(&sc->ctlr_handle, cmd, buf); 166 break; 167 } 168 return(error); 169} 170 171 172 173static TW_INT32 twa_probe(device_t dev); 174static TW_INT32 twa_attach(device_t dev); 175static TW_INT32 twa_detach(device_t dev); 176static TW_INT32 twa_shutdown(device_t dev); 177static TW_VOID twa_busdma_lock(TW_VOID *lock_arg, bus_dma_lock_op_t op); 178static TW_VOID twa_pci_intr(TW_VOID *arg); 179static TW_VOID twa_watchdog(TW_VOID *arg); 180int twa_setup_intr(struct twa_softc *sc); 181int twa_teardown_intr(struct twa_softc *sc); 182 183static TW_INT32 tw_osli_alloc_mem(struct twa_softc *sc); 184static TW_VOID tw_osli_free_resources(struct twa_softc *sc); 185 186static TW_VOID twa_map_load_data_callback(TW_VOID *arg, 187 bus_dma_segment_t *segs, TW_INT32 nsegments, TW_INT32 error); 188static TW_VOID twa_map_load_callback(TW_VOID *arg, 189 bus_dma_segment_t *segs, TW_INT32 nsegments, TW_INT32 error); 190 191 192static device_method_t twa_methods[] = { 193 /* Device interface */ 194 DEVMETHOD(device_probe, twa_probe), 195 DEVMETHOD(device_attach, twa_attach), 196 DEVMETHOD(device_detach, twa_detach), 197 DEVMETHOD(device_shutdown, twa_shutdown), 198 199 DEVMETHOD_END 200}; 201 202static driver_t twa_pci_driver = { 203 "twa", 204 twa_methods, 205 sizeof(struct twa_softc) 206}; 207 208DRIVER_MODULE(twa, pci, twa_pci_driver, twa_devclass, 0, 0); 209MODULE_DEPEND(twa, cam, 1, 1, 1); 210MODULE_DEPEND(twa, pci, 1, 1, 1); 211 212 213/* 214 * Function name: twa_probe 215 * Description: Called at driver load time. Claims 9000 ctlrs. 216 * 217 * Input: dev -- bus device corresponding to the ctlr 218 * Output: None 219 * Return value: <= 0 -- success 220 * > 0 -- failure 221 */ 222static TW_INT32 223twa_probe(device_t dev) 224{ 225 static TW_UINT8 first_ctlr = 1; 226 227 tw_osli_dbg_printf(3, "entered"); 228 229 if (tw_cl_ctlr_supported(pci_get_vendor(dev), pci_get_device(dev))) { 230 device_set_desc(dev, TW_OSLI_DEVICE_NAME); 231 /* Print the driver version only once. */ 232 if (first_ctlr) { 233 printf("3ware device driver for 9000 series storage " 234 "controllers, version: %s\n", 235 TW_OSL_DRIVER_VERSION_STRING); 236 first_ctlr = 0; 237 } 238 return(0); 239 } 240 return(ENXIO); 241} 242 243int twa_setup_intr(struct twa_softc *sc) 244{ 245 int error = 0; 246 247 if (!(sc->intr_handle) && (sc->irq_res)) { 248 error = bus_setup_intr(sc->bus_dev, sc->irq_res, 249 INTR_TYPE_CAM | INTR_MPSAFE, 250 NULL, twa_pci_intr, 251 sc, &sc->intr_handle); 252 } 253 return( error ); 254} 255 256 257int twa_teardown_intr(struct twa_softc *sc) 258{ 259 int error = 0; 260 261 if ((sc->intr_handle) && (sc->irq_res)) { 262 error = bus_teardown_intr(sc->bus_dev, 263 sc->irq_res, sc->intr_handle); 264 sc->intr_handle = NULL; 265 } 266 return( error ); 267} 268 269 270 271/* 272 * Function name: twa_attach 273 * Description: Allocates pci resources; updates sc; adds a node to the 274 * sysctl tree to expose the driver version; makes calls 275 * (to the Common Layer) to initialize ctlr, and to 276 * attach to CAM. 277 * 278 * Input: dev -- bus device corresponding to the ctlr 279 * Output: None 280 * Return value: 0 -- success 281 * non-zero-- failure 282 */ 283static TW_INT32 284twa_attach(device_t dev) 285{ 286 struct twa_softc *sc = device_get_softc(dev); 287 TW_UINT32 command; 288 TW_INT32 bar_num; 289 TW_INT32 bar0_offset; 290 TW_INT32 bar_size; 291 TW_INT32 error; 292 293 tw_osli_dbg_dprintf(3, sc, "entered"); 294 295 sc->ctlr_handle.osl_ctlr_ctxt = sc; 296 297 /* Initialize the softc structure. */ 298 sc->bus_dev = dev; 299 sc->device_id = pci_get_device(dev); 300 301 /* Initialize the mutexes right here. */ 302 sc->io_lock = &(sc->io_lock_handle); 303 mtx_init(sc->io_lock, "tw_osl_io_lock", NULL, MTX_SPIN); 304 sc->q_lock = &(sc->q_lock_handle); 305 mtx_init(sc->q_lock, "tw_osl_q_lock", NULL, MTX_SPIN); 306 sc->sim_lock = &(sc->sim_lock_handle); 307 mtx_init(sc->sim_lock, "tw_osl_sim_lock", NULL, MTX_DEF | MTX_RECURSE); 308 309 sysctl_ctx_init(&sc->sysctl_ctxt); 310 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctxt, 311 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, 312 device_get_nameunit(dev), CTLFLAG_RD, 0, ""); 313 if (sc->sysctl_tree == NULL) { 314 tw_osli_printf(sc, "error = %d", 315 TW_CL_SEVERITY_ERROR_STRING, 316 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 317 0x2000, 318 "Cannot add sysctl tree node", 319 ENXIO); 320 return(ENXIO); 321 } 322 SYSCTL_ADD_STRING(&sc->sysctl_ctxt, SYSCTL_CHILDREN(sc->sysctl_tree), 323 OID_AUTO, "driver_version", CTLFLAG_RD, 324 TW_OSL_DRIVER_VERSION_STRING, 0, "TWA driver version"); 325 326 /* Make sure we are going to be able to talk to this board. */ 327 command = pci_read_config(dev, PCIR_COMMAND, 2); 328 if ((command & PCIM_CMD_PORTEN) == 0) { 329 tw_osli_printf(sc, "error = %d", 330 TW_CL_SEVERITY_ERROR_STRING, 331 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 332 0x2001, 333 "Register window not available", 334 ENXIO); 335 tw_osli_free_resources(sc); 336 return(ENXIO); 337 } 338 339 /* Force the busmaster enable bit on, in case the BIOS forgot. */ 340 command |= PCIM_CMD_BUSMASTEREN; 341 pci_write_config(dev, PCIR_COMMAND, command, 2); 342 343 /* Allocate the PCI register window. */ 344 if ((error = tw_cl_get_pci_bar_info(sc->device_id, TW_CL_BAR_TYPE_MEM, 345 &bar_num, &bar0_offset, &bar_size))) { 346 tw_osli_printf(sc, "error = %d", 347 TW_CL_SEVERITY_ERROR_STRING, 348 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 349 0x201F, 350 "Can't get PCI BAR info", 351 error); 352 tw_osli_free_resources(sc); 353 return(error); 354 } 355 sc->reg_res_id = PCIR_BARS + bar0_offset; 356 if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY, 357 &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE)) 358 == NULL) { 359 tw_osli_printf(sc, "error = %d", 360 TW_CL_SEVERITY_ERROR_STRING, 361 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 362 0x2002, 363 "Can't allocate register window", 364 ENXIO); 365 tw_osli_free_resources(sc); 366 return(ENXIO); 367 } 368 sc->bus_tag = rman_get_bustag(sc->reg_res); 369 sc->bus_handle = rman_get_bushandle(sc->reg_res); 370 371 /* Allocate and register our interrupt. */ 372 sc->irq_res_id = 0; 373 if ((sc->irq_res = bus_alloc_resource(sc->bus_dev, SYS_RES_IRQ, 374 &(sc->irq_res_id), 0, ~0, 1, 375 RF_SHAREABLE | RF_ACTIVE)) == NULL) { 376 tw_osli_printf(sc, "error = %d", 377 TW_CL_SEVERITY_ERROR_STRING, 378 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 379 0x2003, 380 "Can't allocate interrupt", 381 ENXIO); 382 tw_osli_free_resources(sc); 383 return(ENXIO); 384 } 385 if ((error = twa_setup_intr(sc))) { 386 tw_osli_printf(sc, "error = %d", 387 TW_CL_SEVERITY_ERROR_STRING, 388 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 389 0x2004, 390 "Can't set up interrupt", 391 error); 392 tw_osli_free_resources(sc); 393 return(error); 394 } 395 396 if ((error = tw_osli_alloc_mem(sc))) { 397 tw_osli_printf(sc, "error = %d", 398 TW_CL_SEVERITY_ERROR_STRING, 399 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 400 0x2005, 401 "Memory allocation failure", 402 error); 403 tw_osli_free_resources(sc); 404 return(error); 405 } 406 407 /* Initialize the Common Layer for this controller. */ 408 if ((error = tw_cl_init_ctlr(&sc->ctlr_handle, sc->flags, sc->device_id, 409 TW_OSLI_MAX_NUM_REQUESTS, TW_OSLI_MAX_NUM_AENS, 410 sc->non_dma_mem, sc->dma_mem, 411 sc->dma_mem_phys 412 ))) { 413 tw_osli_printf(sc, "error = %d", 414 TW_CL_SEVERITY_ERROR_STRING, 415 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 416 0x2006, 417 "Failed to initialize Common Layer/controller", 418 error); 419 tw_osli_free_resources(sc); 420 return(error); 421 } 422 423 /* Create the control device. */ 424 sc->ctrl_dev = make_dev(&twa_cdevsw, device_get_unit(sc->bus_dev), 425 UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, 426 "twa%d", device_get_unit(sc->bus_dev)); 427 sc->ctrl_dev->si_drv1 = sc; 428 429 if ((error = tw_osli_cam_attach(sc))) { 430 tw_osli_free_resources(sc); 431 tw_osli_printf(sc, "error = %d", 432 TW_CL_SEVERITY_ERROR_STRING, 433 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 434 0x2007, 435 "Failed to initialize CAM", 436 error); 437 return(error); 438 } 439 440 sc->watchdog_index = 0; 441 callout_init(&(sc->watchdog_callout[0]), CALLOUT_MPSAFE); 442 callout_init(&(sc->watchdog_callout[1]), CALLOUT_MPSAFE); 443 callout_reset(&(sc->watchdog_callout[0]), 5*hz, twa_watchdog, &sc->ctlr_handle); 444 445 return(0); 446} 447 448 449static TW_VOID 450twa_watchdog(TW_VOID *arg) 451{ 452 struct tw_cl_ctlr_handle *ctlr_handle = 453 (struct tw_cl_ctlr_handle *)arg; 454 struct twa_softc *sc = ctlr_handle->osl_ctlr_ctxt; 455 int i; 456 int i_need_a_reset = 0; 457 int driver_is_active = 0; 458 int my_watchdog_was_pending = 1234; 459 TW_UINT64 current_time; 460 struct tw_osli_req_context *my_req; 461 462 463//============================================================================== 464 current_time = (TW_UINT64) (tw_osl_get_local_time()); 465 466 for (i = 0; i < TW_OSLI_MAX_NUM_REQUESTS; i++) { 467 my_req = &(sc->req_ctx_buf[i]); 468 469 if ((my_req->state == TW_OSLI_REQ_STATE_BUSY) && 470 (my_req->deadline) && 471 (my_req->deadline < current_time)) { 472 tw_cl_set_reset_needed(ctlr_handle); 473#ifdef TW_OSL_DEBUG 474 device_printf((sc)->bus_dev, "Request %d timed out! d = %llu, c = %llu\n", i, my_req->deadline, current_time); 475#else /* TW_OSL_DEBUG */ 476 device_printf((sc)->bus_dev, "Request %d timed out!\n", i); 477#endif /* TW_OSL_DEBUG */ 478 break; 479 } 480 } 481//============================================================================== 482 483 i_need_a_reset = tw_cl_is_reset_needed(ctlr_handle); 484 485 i = (int) ((sc->watchdog_index++) & 1); 486 487 driver_is_active = tw_cl_is_active(ctlr_handle); 488 489 if (i_need_a_reset) { 490#ifdef TW_OSL_DEBUG 491 device_printf((sc)->bus_dev, "Watchdog rescheduled in 70 seconds\n"); 492#endif /* TW_OSL_DEBUG */ 493 my_watchdog_was_pending = 494 callout_reset(&(sc->watchdog_callout[i]), 70*hz, twa_watchdog, &sc->ctlr_handle); 495 tw_cl_reset_ctlr(ctlr_handle); 496#ifdef TW_OSL_DEBUG 497 device_printf((sc)->bus_dev, "Watchdog reset completed!\n"); 498#endif /* TW_OSL_DEBUG */ 499 } else if (driver_is_active) { 500 my_watchdog_was_pending = 501 callout_reset(&(sc->watchdog_callout[i]), 5*hz, twa_watchdog, &sc->ctlr_handle); 502 } 503#ifdef TW_OSL_DEBUG 504 if (i_need_a_reset || my_watchdog_was_pending) 505 device_printf((sc)->bus_dev, "i_need_a_reset = %d, " 506 "driver_is_active = %d, my_watchdog_was_pending = %d\n", 507 i_need_a_reset, driver_is_active, my_watchdog_was_pending); 508#endif /* TW_OSL_DEBUG */ 509} 510 511 512/* 513 * Function name: tw_osli_alloc_mem 514 * Description: Allocates memory needed both by CL and OSL. 515 * 516 * Input: sc -- OSL internal controller context 517 * Output: None 518 * Return value: 0 -- success 519 * non-zero-- failure 520 */ 521static TW_INT32 522tw_osli_alloc_mem(struct twa_softc *sc) 523{ 524 struct tw_osli_req_context *req; 525 TW_UINT32 max_sg_elements; 526 TW_UINT32 non_dma_mem_size; 527 TW_UINT32 dma_mem_size; 528 TW_INT32 error; 529 TW_INT32 i; 530 531 tw_osli_dbg_dprintf(3, sc, "entered"); 532 533 sc->flags |= (sizeof(bus_addr_t) == 8) ? TW_CL_64BIT_ADDRESSES : 0; 534 sc->flags |= (sizeof(bus_size_t) == 8) ? TW_CL_64BIT_SG_LENGTH : 0; 535 536 max_sg_elements = (sizeof(bus_addr_t) == 8) ? 537 TW_CL_MAX_64BIT_SG_ELEMENTS : TW_CL_MAX_32BIT_SG_ELEMENTS; 538 539 if ((error = tw_cl_get_mem_requirements(&sc->ctlr_handle, sc->flags, 540 sc->device_id, TW_OSLI_MAX_NUM_REQUESTS, TW_OSLI_MAX_NUM_AENS, 541 &(sc->alignment), &(sc->sg_size_factor), 542 &non_dma_mem_size, &dma_mem_size 543 ))) { 544 tw_osli_printf(sc, "error = %d", 545 TW_CL_SEVERITY_ERROR_STRING, 546 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 547 0x2008, 548 "Can't get Common Layer's memory requirements", 549 error); 550 return(error); 551 } 552 553 if ((sc->non_dma_mem = malloc(non_dma_mem_size, TW_OSLI_MALLOC_CLASS, 554 M_WAITOK)) == NULL) { 555 tw_osli_printf(sc, "error = %d", 556 TW_CL_SEVERITY_ERROR_STRING, 557 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 558 0x2009, 559 "Can't allocate non-dma memory", 560 ENOMEM); 561 return(ENOMEM); 562 } 563 564 /* Create the parent dma tag. */ 565 if (bus_dma_tag_create(bus_get_dma_tag(sc->bus_dev), /* parent */ 566 sc->alignment, /* alignment */ 567 0, /* boundary */ 568 BUS_SPACE_MAXADDR, /* lowaddr */ 569 BUS_SPACE_MAXADDR, /* highaddr */ 570 NULL, NULL, /* filter, filterarg */ 571 TW_CL_MAX_IO_SIZE, /* maxsize */ 572 max_sg_elements, /* nsegments */ 573 TW_CL_MAX_IO_SIZE, /* maxsegsize */ 574 0, /* flags */ 575 NULL, /* lockfunc */ 576 NULL, /* lockfuncarg */ 577 &sc->parent_tag /* tag */)) { 578 tw_osli_printf(sc, "error = %d", 579 TW_CL_SEVERITY_ERROR_STRING, 580 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 581 0x200A, 582 "Can't allocate parent DMA tag", 583 ENOMEM); 584 return(ENOMEM); 585 } 586 587 /* Create a dma tag for Common Layer's DMA'able memory (dma_mem). */ 588 if (bus_dma_tag_create(sc->parent_tag, /* parent */ 589 sc->alignment, /* alignment */ 590 0, /* boundary */ 591 BUS_SPACE_MAXADDR, /* lowaddr */ 592 BUS_SPACE_MAXADDR, /* highaddr */ 593 NULL, NULL, /* filter, filterarg */ 594 dma_mem_size, /* maxsize */ 595 1, /* nsegments */ 596 BUS_SPACE_MAXSIZE, /* maxsegsize */ 597 0, /* flags */ 598 NULL, /* lockfunc */ 599 NULL, /* lockfuncarg */ 600 &sc->cmd_tag /* tag */)) { 601 tw_osli_printf(sc, "error = %d", 602 TW_CL_SEVERITY_ERROR_STRING, 603 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 604 0x200B, 605 "Can't allocate DMA tag for Common Layer's " 606 "DMA'able memory", 607 ENOMEM); 608 return(ENOMEM); 609 } 610 611 if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem, 612 BUS_DMA_NOWAIT, &sc->cmd_map)) { 613 /* Try a second time. */ 614 if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem, 615 BUS_DMA_NOWAIT, &sc->cmd_map)) { 616 tw_osli_printf(sc, "error = %d", 617 TW_CL_SEVERITY_ERROR_STRING, 618 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 619 0x200C, 620 "Can't allocate DMA'able memory for the" 621 "Common Layer", 622 ENOMEM); 623 return(ENOMEM); 624 } 625 } 626 627 bus_dmamap_load(sc->cmd_tag, sc->cmd_map, sc->dma_mem, 628 dma_mem_size, twa_map_load_callback, 629 &sc->dma_mem_phys, 0); 630 631 /* 632 * Create a dma tag for data buffers; size will be the maximum 633 * possible I/O size (128kB). 634 */ 635 if (bus_dma_tag_create(sc->parent_tag, /* parent */ 636 sc->alignment, /* alignment */ 637 0, /* boundary */ 638 BUS_SPACE_MAXADDR, /* lowaddr */ 639 BUS_SPACE_MAXADDR, /* highaddr */ 640 NULL, NULL, /* filter, filterarg */ 641 TW_CL_MAX_IO_SIZE, /* maxsize */ 642 max_sg_elements, /* nsegments */ 643 TW_CL_MAX_IO_SIZE, /* maxsegsize */ 644 BUS_DMA_ALLOCNOW, /* flags */ 645 twa_busdma_lock, /* lockfunc */ 646 sc->io_lock, /* lockfuncarg */ 647 &sc->dma_tag /* tag */)) { 648 tw_osli_printf(sc, "error = %d", 649 TW_CL_SEVERITY_ERROR_STRING, 650 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 651 0x200F, 652 "Can't allocate DMA tag for data buffers", 653 ENOMEM); 654 return(ENOMEM); 655 } 656 657 /* 658 * Create a dma tag for ioctl data buffers; size will be the maximum 659 * possible I/O size (128kB). 660 */ 661 if (bus_dma_tag_create(sc->parent_tag, /* parent */ 662 sc->alignment, /* alignment */ 663 0, /* boundary */ 664 BUS_SPACE_MAXADDR, /* lowaddr */ 665 BUS_SPACE_MAXADDR, /* highaddr */ 666 NULL, NULL, /* filter, filterarg */ 667 TW_CL_MAX_IO_SIZE, /* maxsize */ 668 max_sg_elements, /* nsegments */ 669 TW_CL_MAX_IO_SIZE, /* maxsegsize */ 670 BUS_DMA_ALLOCNOW, /* flags */ 671 twa_busdma_lock, /* lockfunc */ 672 sc->io_lock, /* lockfuncarg */ 673 &sc->ioctl_tag /* tag */)) { 674 tw_osli_printf(sc, "error = %d", 675 TW_CL_SEVERITY_ERROR_STRING, 676 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 677 0x2010, 678 "Can't allocate DMA tag for ioctl data buffers", 679 ENOMEM); 680 return(ENOMEM); 681 } 682 683 /* Create just one map for all ioctl request data buffers. */ 684 if (bus_dmamap_create(sc->ioctl_tag, 0, &sc->ioctl_map)) { 685 tw_osli_printf(sc, "error = %d", 686 TW_CL_SEVERITY_ERROR_STRING, 687 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 688 0x2011, 689 "Can't create ioctl map", 690 ENOMEM); 691 return(ENOMEM); 692 } 693 694 695 /* Initialize request queues. */ 696 tw_osli_req_q_init(sc, TW_OSLI_FREE_Q); 697 tw_osli_req_q_init(sc, TW_OSLI_BUSY_Q); 698 699 if ((sc->req_ctx_buf = (struct tw_osli_req_context *) 700 malloc((sizeof(struct tw_osli_req_context) * 701 TW_OSLI_MAX_NUM_REQUESTS), 702 TW_OSLI_MALLOC_CLASS, M_WAITOK)) == NULL) { 703 tw_osli_printf(sc, "error = %d", 704 TW_CL_SEVERITY_ERROR_STRING, 705 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 706 0x2012, 707 "Failed to allocate request packets", 708 ENOMEM); 709 return(ENOMEM); 710 } 711 bzero(sc->req_ctx_buf, 712 sizeof(struct tw_osli_req_context) * TW_OSLI_MAX_NUM_REQUESTS); 713 714 for (i = 0; i < TW_OSLI_MAX_NUM_REQUESTS; i++) { 715 req = &(sc->req_ctx_buf[i]); 716 req->ctlr = sc; 717 if (bus_dmamap_create(sc->dma_tag, 0, &req->dma_map)) { 718 tw_osli_printf(sc, "request # = %d, error = %d", 719 TW_CL_SEVERITY_ERROR_STRING, 720 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 721 0x2013, 722 "Can't create dma map", 723 i, ENOMEM); 724 return(ENOMEM); 725 } 726 727 /* Initialize the ioctl wakeup/ timeout mutex */ 728 req->ioctl_wake_timeout_lock = &(req->ioctl_wake_timeout_lock_handle); 729 mtx_init(req->ioctl_wake_timeout_lock, "tw_ioctl_wake_timeout_lock", NULL, MTX_DEF); 730 731 /* Insert request into the free queue. */ 732 tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q); 733 } 734 735 return(0); 736} 737 738 739 740/* 741 * Function name: tw_osli_free_resources 742 * Description: Performs clean-up at the time of going down. 743 * 744 * Input: sc -- ptr to OSL internal ctlr context 745 * Output: None 746 * Return value: None 747 */ 748static TW_VOID 749tw_osli_free_resources(struct twa_softc *sc) 750{ 751 struct tw_osli_req_context *req; 752 TW_INT32 error = 0; 753 754 tw_osli_dbg_dprintf(3, sc, "entered"); 755 756 /* Detach from CAM */ 757 tw_osli_cam_detach(sc); 758 759 if (sc->req_ctx_buf) 760 while ((req = tw_osli_req_q_remove_head(sc, TW_OSLI_FREE_Q)) != 761 NULL) { 762 mtx_destroy(req->ioctl_wake_timeout_lock); 763 764 if ((error = bus_dmamap_destroy(sc->dma_tag, 765 req->dma_map))) 766 tw_osli_dbg_dprintf(1, sc, 767 "dmamap_destroy(dma) returned %d", 768 error); 769 } 770 771 if ((sc->ioctl_tag) && (sc->ioctl_map)) 772 if ((error = bus_dmamap_destroy(sc->ioctl_tag, sc->ioctl_map))) 773 tw_osli_dbg_dprintf(1, sc, 774 "dmamap_destroy(ioctl) returned %d", error); 775 776 /* Free all memory allocated so far. */ 777 if (sc->req_ctx_buf) 778 free(sc->req_ctx_buf, TW_OSLI_MALLOC_CLASS); 779 780 if (sc->non_dma_mem) 781 free(sc->non_dma_mem, TW_OSLI_MALLOC_CLASS); 782 783 if (sc->dma_mem) { 784 bus_dmamap_unload(sc->cmd_tag, sc->cmd_map); 785 bus_dmamem_free(sc->cmd_tag, sc->dma_mem, 786 sc->cmd_map); 787 } 788 if (sc->cmd_tag) 789 if ((error = bus_dma_tag_destroy(sc->cmd_tag))) 790 tw_osli_dbg_dprintf(1, sc, 791 "dma_tag_destroy(cmd) returned %d", error); 792 793 if (sc->dma_tag) 794 if ((error = bus_dma_tag_destroy(sc->dma_tag))) 795 tw_osli_dbg_dprintf(1, sc, 796 "dma_tag_destroy(dma) returned %d", error); 797 798 if (sc->ioctl_tag) 799 if ((error = bus_dma_tag_destroy(sc->ioctl_tag))) 800 tw_osli_dbg_dprintf(1, sc, 801 "dma_tag_destroy(ioctl) returned %d", error); 802 803 if (sc->parent_tag) 804 if ((error = bus_dma_tag_destroy(sc->parent_tag))) 805 tw_osli_dbg_dprintf(1, sc, 806 "dma_tag_destroy(parent) returned %d", error); 807 808 809 /* Disconnect the interrupt handler. */ 810 if ((error = twa_teardown_intr(sc))) 811 tw_osli_dbg_dprintf(1, sc, 812 "teardown_intr returned %d", error); 813 814 if (sc->irq_res != NULL) 815 if ((error = bus_release_resource(sc->bus_dev, 816 SYS_RES_IRQ, sc->irq_res_id, sc->irq_res))) 817 tw_osli_dbg_dprintf(1, sc, 818 "release_resource(irq) returned %d", error); 819 820 821 /* Release the register window mapping. */ 822 if (sc->reg_res != NULL) 823 if ((error = bus_release_resource(sc->bus_dev, 824 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res))) 825 tw_osli_dbg_dprintf(1, sc, 826 "release_resource(io) returned %d", error); 827 828 829 /* Destroy the control device. */ 830 if (sc->ctrl_dev != (struct cdev *)NULL) 831 destroy_dev(sc->ctrl_dev); 832 833 if ((error = sysctl_ctx_free(&sc->sysctl_ctxt))) 834 tw_osli_dbg_dprintf(1, sc, 835 "sysctl_ctx_free returned %d", error); 836 837} 838 839 840 841/* 842 * Function name: twa_detach 843 * Description: Called when the controller is being detached from 844 * the pci bus. 845 * 846 * Input: dev -- bus device corresponding to the ctlr 847 * Output: None 848 * Return value: 0 -- success 849 * non-zero-- failure 850 */ 851static TW_INT32 852twa_detach(device_t dev) 853{ 854 struct twa_softc *sc = device_get_softc(dev); 855 TW_INT32 error; 856 857 tw_osli_dbg_dprintf(3, sc, "entered"); 858 859 error = EBUSY; 860 if (sc->open) { 861 tw_osli_printf(sc, "error = %d", 862 TW_CL_SEVERITY_ERROR_STRING, 863 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 864 0x2014, 865 "Device open", 866 error); 867 goto out; 868 } 869 870 /* Shut the controller down. */ 871 if ((error = twa_shutdown(dev))) 872 goto out; 873 874 /* Free all resources associated with this controller. */ 875 tw_osli_free_resources(sc); 876 error = 0; 877 878out: 879 return(error); 880} 881 882 883 884/* 885 * Function name: twa_shutdown 886 * Description: Called at unload/shutdown time. Lets the controller 887 * know that we are going down. 888 * 889 * Input: dev -- bus device corresponding to the ctlr 890 * Output: None 891 * Return value: 0 -- success 892 * non-zero-- failure 893 */ 894static TW_INT32 895twa_shutdown(device_t dev) 896{ 897 struct twa_softc *sc = device_get_softc(dev); 898 TW_INT32 error = 0; 899 900 tw_osli_dbg_dprintf(3, sc, "entered"); 901 902 /* Disconnect interrupts. */ 903 error = twa_teardown_intr(sc); 904 905 /* Stop watchdog task. */ 906 callout_drain(&(sc->watchdog_callout[0])); 907 callout_drain(&(sc->watchdog_callout[1])); 908 909 /* Disconnect from the controller. */ 910 if ((error = tw_cl_shutdown_ctlr(&(sc->ctlr_handle), 0))) { 911 tw_osli_printf(sc, "error = %d", 912 TW_CL_SEVERITY_ERROR_STRING, 913 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 914 0x2015, 915 "Failed to shutdown Common Layer/controller", 916 error); 917 } 918 return(error); 919} 920 921 922 923/* 924 * Function name: twa_busdma_lock 925 * Description: Function to provide synchronization during busdma_swi. 926 * 927 * Input: lock_arg -- lock mutex sent as argument 928 * op -- operation (lock/unlock) expected of the function 929 * Output: None 930 * Return value: None 931 */ 932TW_VOID 933twa_busdma_lock(TW_VOID *lock_arg, bus_dma_lock_op_t op) 934{ 935 struct mtx *lock; 936 937 lock = (struct mtx *)lock_arg; 938 switch (op) { 939 case BUS_DMA_LOCK: 940 mtx_lock_spin(lock); 941 break; 942 943 case BUS_DMA_UNLOCK: 944 mtx_unlock_spin(lock); 945 break; 946 947 default: 948 panic("Unknown operation 0x%x for twa_busdma_lock!", op); 949 } 950} 951 952 953/* 954 * Function name: twa_pci_intr 955 * Description: Interrupt handler. Wrapper for twa_interrupt. 956 * 957 * Input: arg -- ptr to OSL internal ctlr context 958 * Output: None 959 * Return value: None 960 */ 961static TW_VOID 962twa_pci_intr(TW_VOID *arg) 963{ 964 struct twa_softc *sc = (struct twa_softc *)arg; 965 966 tw_osli_dbg_dprintf(10, sc, "entered"); 967 tw_cl_interrupt(&(sc->ctlr_handle)); 968} 969 970 971/* 972 * Function name: tw_osli_fw_passthru 973 * Description: Builds a fw passthru cmd pkt, and submits it to CL. 974 * 975 * Input: sc -- ptr to OSL internal ctlr context 976 * buf -- ptr to ioctl pkt understood by CL 977 * Output: None 978 * Return value: 0 -- success 979 * non-zero-- failure 980 */ 981TW_INT32 982tw_osli_fw_passthru(struct twa_softc *sc, TW_INT8 *buf) 983{ 984 struct tw_osli_req_context *req; 985 struct tw_osli_ioctl_no_data_buf *user_buf = 986 (struct tw_osli_ioctl_no_data_buf *)buf; 987 TW_TIME end_time; 988 TW_UINT32 timeout = 60; 989 TW_UINT32 data_buf_size_adjusted; 990 struct tw_cl_req_packet *req_pkt; 991 struct tw_cl_passthru_req_packet *pt_req; 992 TW_INT32 error; 993 994 tw_osli_dbg_dprintf(5, sc, "ioctl: passthru"); 995 996 if ((req = tw_osli_get_request(sc)) == NULL) 997 return(EBUSY); 998 999 req->req_handle.osl_req_ctxt = req; 1000 req->orig_req = buf; 1001 req->flags |= TW_OSLI_REQ_FLAGS_PASSTHRU; 1002 1003 req_pkt = &(req->req_pkt); 1004 req_pkt->status = 0; 1005 req_pkt->tw_osl_callback = tw_osl_complete_passthru; 1006 /* Let the Common Layer retry the request on cmd queue full. */ 1007 req_pkt->flags |= TW_CL_REQ_RETRY_ON_BUSY; 1008 1009 pt_req = &(req_pkt->gen_req_pkt.pt_req); 1010 /* 1011 * Make sure that the data buffer sent to firmware is a 1012 * 512 byte multiple in size. 1013 */ 1014 data_buf_size_adjusted = 1015 (user_buf->driver_pkt.buffer_length + 1016 (sc->sg_size_factor - 1)) & ~(sc->sg_size_factor - 1); 1017 if ((req->length = data_buf_size_adjusted)) { 1018 if ((req->data = malloc(data_buf_size_adjusted, 1019 TW_OSLI_MALLOC_CLASS, M_WAITOK)) == NULL) { 1020 error = ENOMEM; 1021 tw_osli_printf(sc, "error = %d", 1022 TW_CL_SEVERITY_ERROR_STRING, 1023 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 1024 0x2016, 1025 "Could not alloc mem for " 1026 "fw_passthru data_buf", 1027 error); 1028 goto fw_passthru_err; 1029 } 1030 /* Copy the payload. */ 1031 if ((error = copyin((TW_VOID *)(user_buf->pdata), 1032 req->data, 1033 user_buf->driver_pkt.buffer_length)) != 0) { 1034 tw_osli_printf(sc, "error = %d", 1035 TW_CL_SEVERITY_ERROR_STRING, 1036 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 1037 0x2017, 1038 "Could not copyin fw_passthru data_buf", 1039 error); 1040 goto fw_passthru_err; 1041 } 1042 pt_req->sgl_entries = 1; /* will be updated during mapping */ 1043 req->flags |= (TW_OSLI_REQ_FLAGS_DATA_IN | 1044 TW_OSLI_REQ_FLAGS_DATA_OUT); 1045 } else 1046 pt_req->sgl_entries = 0; /* no payload */ 1047 1048 pt_req->cmd_pkt = (TW_VOID *)(&(user_buf->cmd_pkt)); 1049 pt_req->cmd_pkt_length = sizeof(struct tw_cl_command_packet); 1050 1051 if ((error = tw_osli_map_request(req))) 1052 goto fw_passthru_err; 1053 1054 end_time = tw_osl_get_local_time() + timeout; 1055 while (req->state != TW_OSLI_REQ_STATE_COMPLETE) { 1056 mtx_lock(req->ioctl_wake_timeout_lock); 1057 req->flags |= TW_OSLI_REQ_FLAGS_SLEEPING; 1058 1059 error = mtx_sleep(req, req->ioctl_wake_timeout_lock, 0, 1060 "twa_passthru", timeout*hz); 1061 mtx_unlock(req->ioctl_wake_timeout_lock); 1062 1063 if (!(req->flags & TW_OSLI_REQ_FLAGS_SLEEPING)) 1064 error = 0; 1065 req->flags &= ~TW_OSLI_REQ_FLAGS_SLEEPING; 1066 1067 if (! error) { 1068 if (((error = req->error_code)) || 1069 ((error = (req->state != 1070 TW_OSLI_REQ_STATE_COMPLETE))) || 1071 ((error = req_pkt->status))) 1072 goto fw_passthru_err; 1073 break; 1074 } 1075 1076 if (req_pkt->status) { 1077 error = req_pkt->status; 1078 goto fw_passthru_err; 1079 } 1080 1081 if (error == EWOULDBLOCK) { 1082 /* Time out! */ 1083 if ((!(req->error_code)) && 1084 (req->state == TW_OSLI_REQ_STATE_COMPLETE) && 1085 (!(req_pkt->status)) ) { 1086#ifdef TW_OSL_DEBUG 1087 tw_osli_printf(sc, "request = %p", 1088 TW_CL_SEVERITY_ERROR_STRING, 1089 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 1090 0x7777, 1091 "FALSE Passthru timeout!", 1092 req); 1093#endif /* TW_OSL_DEBUG */ 1094 error = 0; /* False error */ 1095 break; 1096 } 1097 if (!(tw_cl_is_reset_needed(&(req->ctlr->ctlr_handle)))) { 1098#ifdef TW_OSL_DEBUG 1099 tw_osli_printf(sc, "request = %p", 1100 TW_CL_SEVERITY_ERROR_STRING, 1101 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 1102 0x2018, 1103 "Passthru request timed out!", 1104 req); 1105#else /* TW_OSL_DEBUG */ 1106 device_printf((sc)->bus_dev, "Passthru request timed out!\n"); 1107#endif /* TW_OSL_DEBUG */ 1108 tw_cl_reset_ctlr(&(req->ctlr->ctlr_handle)); 1109 } 1110 1111 error = 0; 1112 end_time = tw_osl_get_local_time() + timeout; 1113 continue; 1114 /* 1115 * Don't touch req after a reset. It (and any 1116 * associated data) will be 1117 * unmapped by the callback. 1118 */ 1119 } 1120 /* 1121 * Either the request got completed, or we were woken up by a 1122 * signal. Calculate the new timeout, in case it was the latter. 1123 */ 1124 timeout = (end_time - tw_osl_get_local_time()); 1125 } /* End of while loop */ 1126 1127 /* If there was a payload, copy it back. */ 1128 if ((!error) && (req->length)) 1129 if ((error = copyout(req->data, user_buf->pdata, 1130 user_buf->driver_pkt.buffer_length))) 1131 tw_osli_printf(sc, "error = %d", 1132 TW_CL_SEVERITY_ERROR_STRING, 1133 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 1134 0x2019, 1135 "Could not copyout fw_passthru data_buf", 1136 error); 1137 1138fw_passthru_err: 1139 1140 if (req_pkt->status == TW_CL_ERR_REQ_BUS_RESET) 1141 error = EBUSY; 1142 1143 user_buf->driver_pkt.os_status = error; 1144 /* Free resources. */ 1145 if (req->data) 1146 free(req->data, TW_OSLI_MALLOC_CLASS); 1147 tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q); 1148 return(error); 1149} 1150 1151 1152 1153/* 1154 * Function name: tw_osl_complete_passthru 1155 * Description: Called to complete passthru requests. 1156 * 1157 * Input: req_handle -- ptr to request handle 1158 * Output: None 1159 * Return value: None 1160 */ 1161TW_VOID 1162tw_osl_complete_passthru(struct tw_cl_req_handle *req_handle) 1163{ 1164 struct tw_osli_req_context *req = req_handle->osl_req_ctxt; 1165 struct tw_cl_req_packet *req_pkt = 1166 (struct tw_cl_req_packet *)(&req->req_pkt); 1167 struct twa_softc *sc = req->ctlr; 1168 1169 tw_osli_dbg_dprintf(5, sc, "entered"); 1170 1171 if (req->state != TW_OSLI_REQ_STATE_BUSY) { 1172 tw_osli_printf(sc, "request = %p, status = %d", 1173 TW_CL_SEVERITY_ERROR_STRING, 1174 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 1175 0x201B, 1176 "Unposted command completed!!", 1177 req, req->state); 1178 } 1179 1180 /* 1181 * Remove request from the busy queue. Just mark it complete. 1182 * There's no need to move it into the complete queue as we are 1183 * going to be done with it right now. 1184 */ 1185 req->state = TW_OSLI_REQ_STATE_COMPLETE; 1186 tw_osli_req_q_remove_item(req, TW_OSLI_BUSY_Q); 1187 1188 tw_osli_unmap_request(req); 1189 1190 /* 1191 * Don't do a wake up if there was an error even before the request 1192 * was sent down to the Common Layer, and we hadn't gotten an 1193 * EINPROGRESS. The request originator will then be returned an 1194 * error, and he can do the clean-up. 1195 */ 1196 if ((req->error_code) && (!(req->flags & TW_OSLI_REQ_FLAGS_IN_PROGRESS))) 1197 return; 1198 1199 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) { 1200 if (req->flags & TW_OSLI_REQ_FLAGS_SLEEPING) { 1201 /* Wake up the sleeping command originator. */ 1202 tw_osli_dbg_dprintf(5, sc, 1203 "Waking up originator of request %p", req); 1204 req->flags &= ~TW_OSLI_REQ_FLAGS_SLEEPING; 1205 wakeup_one(req); 1206 } else { 1207 /* 1208 * If the request completed even before mtx_sleep 1209 * was called, simply return. 1210 */ 1211 if (req->flags & TW_OSLI_REQ_FLAGS_MAPPED) 1212 return; 1213 1214 if (req_pkt->status == TW_CL_ERR_REQ_BUS_RESET) 1215 return; 1216 1217 tw_osli_printf(sc, "request = %p", 1218 TW_CL_SEVERITY_ERROR_STRING, 1219 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 1220 0x201C, 1221 "Passthru callback called, " 1222 "and caller not sleeping", 1223 req); 1224 } 1225 } else { 1226 tw_osli_printf(sc, "request = %p", 1227 TW_CL_SEVERITY_ERROR_STRING, 1228 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 1229 0x201D, 1230 "Passthru callback called for non-passthru request", 1231 req); 1232 } 1233} 1234 1235 1236 1237/* 1238 * Function name: tw_osli_get_request 1239 * Description: Gets a request pkt from the free queue. 1240 * 1241 * Input: sc -- ptr to OSL internal ctlr context 1242 * Output: None 1243 * Return value: ptr to request pkt -- success 1244 * NULL -- failure 1245 */ 1246struct tw_osli_req_context * 1247tw_osli_get_request(struct twa_softc *sc) 1248{ 1249 struct tw_osli_req_context *req; 1250 1251 tw_osli_dbg_dprintf(4, sc, "entered"); 1252 1253 /* Get a free request packet. */ 1254 req = tw_osli_req_q_remove_head(sc, TW_OSLI_FREE_Q); 1255 1256 /* Initialize some fields to their defaults. */ 1257 if (req) { 1258 req->req_handle.osl_req_ctxt = NULL; 1259 req->req_handle.cl_req_ctxt = NULL; 1260 req->req_handle.is_io = 0; 1261 req->data = NULL; 1262 req->length = 0; 1263 req->deadline = 0; 1264 req->real_data = NULL; 1265 req->real_length = 0; 1266 req->state = TW_OSLI_REQ_STATE_INIT;/* req being initialized */ 1267 req->flags = 0; 1268 req->error_code = 0; 1269 req->orig_req = NULL; 1270 1271 bzero(&(req->req_pkt), sizeof(struct tw_cl_req_packet)); 1272 1273 } 1274 return(req); 1275} 1276 1277 1278 1279/* 1280 * Function name: twa_map_load_data_callback 1281 * Description: Callback of bus_dmamap_load for the buffer associated 1282 * with data. Updates the cmd pkt (size/sgl_entries 1283 * fields, as applicable) to reflect the number of sg 1284 * elements. 1285 * 1286 * Input: arg -- ptr to OSL internal request context 1287 * segs -- ptr to a list of segment descriptors 1288 * nsegments--# of segments 1289 * error -- 0 if no errors encountered before callback, 1290 * non-zero if errors were encountered 1291 * Output: None 1292 * Return value: None 1293 */ 1294static TW_VOID 1295twa_map_load_data_callback(TW_VOID *arg, bus_dma_segment_t *segs, 1296 TW_INT32 nsegments, TW_INT32 error) 1297{ 1298 struct tw_osli_req_context *req = 1299 (struct tw_osli_req_context *)arg; 1300 struct twa_softc *sc = req->ctlr; 1301 struct tw_cl_req_packet *req_pkt = &(req->req_pkt); 1302 1303 tw_osli_dbg_dprintf(10, sc, "entered"); 1304 1305 if (error == EINVAL) { 1306 req->error_code = error; 1307 return; 1308 } 1309 1310 /* Mark the request as currently being processed. */ 1311 req->state = TW_OSLI_REQ_STATE_BUSY; 1312 /* Move the request into the busy queue. */ 1313 tw_osli_req_q_insert_tail(req, TW_OSLI_BUSY_Q); 1314 1315 req->flags |= TW_OSLI_REQ_FLAGS_MAPPED; 1316 1317 if (error == EFBIG) { 1318 req->error_code = error; 1319 goto out; 1320 } 1321 1322 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) { 1323 struct tw_cl_passthru_req_packet *pt_req; 1324 1325 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) 1326 bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map, 1327 BUS_DMASYNC_PREREAD); 1328 1329 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT) { 1330 /* 1331 * If we're using an alignment buffer, and we're 1332 * writing data, copy the real data out. 1333 */ 1334 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) 1335 bcopy(req->real_data, req->data, req->real_length); 1336 bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map, 1337 BUS_DMASYNC_PREWRITE); 1338 } 1339 1340 pt_req = &(req_pkt->gen_req_pkt.pt_req); 1341 pt_req->sg_list = (TW_UINT8 *)segs; 1342 pt_req->sgl_entries += (nsegments - 1); 1343 error = tw_cl_fw_passthru(&(sc->ctlr_handle), req_pkt, 1344 &(req->req_handle)); 1345 } else { 1346 struct tw_cl_scsi_req_packet *scsi_req; 1347 1348 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) 1349 bus_dmamap_sync(sc->dma_tag, req->dma_map, 1350 BUS_DMASYNC_PREREAD); 1351 1352 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT) { 1353 /* 1354 * If we're using an alignment buffer, and we're 1355 * writing data, copy the real data out. 1356 */ 1357 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) 1358 bcopy(req->real_data, req->data, req->real_length); 1359 bus_dmamap_sync(sc->dma_tag, req->dma_map, 1360 BUS_DMASYNC_PREWRITE); 1361 } 1362 1363 scsi_req = &(req_pkt->gen_req_pkt.scsi_req); 1364 scsi_req->sg_list = (TW_UINT8 *)segs; 1365 scsi_req->sgl_entries += (nsegments - 1); 1366 error = tw_cl_start_io(&(sc->ctlr_handle), req_pkt, 1367 &(req->req_handle)); 1368 } 1369 1370out: 1371 if (error) { 1372 req->error_code = error; 1373 req_pkt->tw_osl_callback(&(req->req_handle)); 1374 /* 1375 * If the caller had been returned EINPROGRESS, and he has 1376 * registered a callback for handling completion, the callback 1377 * will never get called because we were unable to submit the 1378 * request. So, free up the request right here. 1379 */ 1380 if (req->flags & TW_OSLI_REQ_FLAGS_IN_PROGRESS) 1381 tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q); 1382 } 1383} 1384 1385 1386 1387/* 1388 * Function name: twa_map_load_callback 1389 * Description: Callback of bus_dmamap_load for the buffer associated 1390 * with a cmd pkt. 1391 * 1392 * Input: arg -- ptr to variable to hold phys addr 1393 * segs -- ptr to a list of segment descriptors 1394 * nsegments--# of segments 1395 * error -- 0 if no errors encountered before callback, 1396 * non-zero if errors were encountered 1397 * Output: None 1398 * Return value: None 1399 */ 1400static TW_VOID 1401twa_map_load_callback(TW_VOID *arg, bus_dma_segment_t *segs, 1402 TW_INT32 nsegments, TW_INT32 error) 1403{ 1404 *((bus_addr_t *)arg) = segs[0].ds_addr; 1405} 1406 1407 1408 1409/* 1410 * Function name: tw_osli_map_request 1411 * Description: Maps a cmd pkt and data associated with it, into 1412 * DMA'able memory. 1413 * 1414 * Input: req -- ptr to request pkt 1415 * Output: None 1416 * Return value: 0 -- success 1417 * non-zero-- failure 1418 */ 1419TW_INT32 1420tw_osli_map_request(struct tw_osli_req_context *req) 1421{ 1422 struct twa_softc *sc = req->ctlr; 1423 TW_INT32 error = 0; 1424 1425 tw_osli_dbg_dprintf(10, sc, "entered"); 1426 1427 /* If the command involves data, map that too. */ 1428 if (req->data != NULL) { 1429 /* 1430 * It's sufficient for the data pointer to be 4-byte aligned 1431 * to work with 9000. However, if 4-byte aligned addresses 1432 * are passed to bus_dmamap_load, we can get back sg elements 1433 * that are not 512-byte multiples in size. So, we will let 1434 * only those buffers that are 512-byte aligned to pass 1435 * through, and bounce the rest, so as to make sure that we 1436 * always get back sg elements that are 512-byte multiples 1437 * in size. 1438 */ 1439 if (((vm_offset_t)req->data % sc->sg_size_factor) || 1440 (req->length % sc->sg_size_factor)) { 1441 req->flags |= TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED; 1442 /* Save original data pointer and length. */ 1443 req->real_data = req->data; 1444 req->real_length = req->length; 1445 req->length = (req->length + 1446 (sc->sg_size_factor - 1)) & 1447 ~(sc->sg_size_factor - 1); 1448 req->data = malloc(req->length, TW_OSLI_MALLOC_CLASS, 1449 M_NOWAIT); 1450 if (req->data == NULL) { 1451 tw_osli_printf(sc, "error = %d", 1452 TW_CL_SEVERITY_ERROR_STRING, 1453 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 1454 0x201E, 1455 "Failed to allocate memory " 1456 "for bounce buffer", 1457 ENOMEM); 1458 /* Restore original data pointer and length. */ 1459 req->data = req->real_data; 1460 req->length = req->real_length; 1461 return(ENOMEM); 1462 } 1463 } 1464 1465 /* 1466 * Map the data buffer into bus space and build the SG list. 1467 */ 1468 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) { 1469 /* Lock against multiple simultaneous ioctl calls. */ 1470 mtx_lock_spin(sc->io_lock); 1471 error = bus_dmamap_load(sc->ioctl_tag, sc->ioctl_map, 1472 req->data, req->length, 1473 twa_map_load_data_callback, req, 1474 BUS_DMA_WAITOK); 1475 mtx_unlock_spin(sc->io_lock);
| 32 33/* 34 * AMCC'S 3ware driver for 9000 series storage controllers. 35 * 36 * Author: Vinod Kashyap 37 * Modifications by: Adam Radford 38 * Modifications by: Manjunath Ranganathaiah 39 */ 40 41 42/* 43 * FreeBSD specific functions not related to CAM, and other 44 * miscellaneous functions. 45 */ 46 47 48#include <dev/twa/tw_osl_includes.h> 49#include <dev/twa/tw_cl_fwif.h> 50#include <dev/twa/tw_cl_ioctl.h> 51#include <dev/twa/tw_osl_ioctl.h> 52 53#ifdef TW_OSL_DEBUG 54TW_INT32 TW_DEBUG_LEVEL_FOR_OSL = TW_OSL_DEBUG; 55TW_INT32 TW_OSL_DEBUG_LEVEL_FOR_CL = TW_OSL_DEBUG; 56#endif /* TW_OSL_DEBUG */ 57 58static MALLOC_DEFINE(TW_OSLI_MALLOC_CLASS, "twa_commands", "twa commands"); 59 60 61static d_open_t twa_open; 62static d_close_t twa_close; 63static d_ioctl_t twa_ioctl; 64 65static struct cdevsw twa_cdevsw = { 66 .d_version = D_VERSION, 67 .d_open = twa_open, 68 .d_close = twa_close, 69 .d_ioctl = twa_ioctl, 70 .d_name = "twa", 71}; 72 73static devclass_t twa_devclass; 74 75 76/* 77 * Function name: twa_open 78 * Description: Called when the controller is opened. 79 * Simply marks the controller as open. 80 * 81 * Input: dev -- control device corresponding to the ctlr 82 * flags -- mode of open 83 * fmt -- device type (character/block etc.) 84 * proc -- current process 85 * Output: None 86 * Return value: 0 -- success 87 * non-zero-- failure 88 */ 89static TW_INT32 90twa_open(struct cdev *dev, TW_INT32 flags, TW_INT32 fmt, struct thread *proc) 91{ 92 struct twa_softc *sc = (struct twa_softc *)(dev->si_drv1); 93 94 tw_osli_dbg_dprintf(5, sc, "entered"); 95 sc->open = TW_CL_TRUE; 96 return(0); 97} 98 99 100 101/* 102 * Function name: twa_close 103 * Description: Called when the controller is closed. 104 * Simply marks the controller as not open. 105 * 106 * Input: dev -- control device corresponding to the ctlr 107 * flags -- mode of corresponding open 108 * fmt -- device type (character/block etc.) 109 * proc -- current process 110 * Output: None 111 * Return value: 0 -- success 112 * non-zero-- failure 113 */ 114static TW_INT32 115twa_close(struct cdev *dev, TW_INT32 flags, TW_INT32 fmt, struct thread *proc) 116{ 117 struct twa_softc *sc = (struct twa_softc *)(dev->si_drv1); 118 119 tw_osli_dbg_dprintf(5, sc, "entered"); 120 sc->open = TW_CL_FALSE; 121 return(0); 122} 123 124 125 126/* 127 * Function name: twa_ioctl 128 * Description: Called when an ioctl is posted to the controller. 129 * Handles any OS Layer specific cmds, passes the rest 130 * on to the Common Layer. 131 * 132 * Input: dev -- control device corresponding to the ctlr 133 * cmd -- ioctl cmd 134 * buf -- ptr to buffer in kernel memory, which is 135 * a copy of the input buffer in user-space 136 * flags -- mode of corresponding open 137 * proc -- current process 138 * Output: buf -- ptr to buffer in kernel memory, which will 139 * be copied to the output buffer in user-space 140 * Return value: 0 -- success 141 * non-zero-- failure 142 */ 143static TW_INT32 144twa_ioctl(struct cdev *dev, u_long cmd, caddr_t buf, TW_INT32 flags, struct thread *proc) 145{ 146 struct twa_softc *sc = (struct twa_softc *)(dev->si_drv1); 147 TW_INT32 error; 148 149 tw_osli_dbg_dprintf(5, sc, "entered"); 150 151 switch (cmd) { 152 case TW_OSL_IOCTL_FIRMWARE_PASS_THROUGH: 153 tw_osli_dbg_dprintf(6, sc, "ioctl: fw_passthru"); 154 error = tw_osli_fw_passthru(sc, (TW_INT8 *)buf); 155 break; 156 157 case TW_OSL_IOCTL_SCAN_BUS: 158 /* Request CAM for a bus scan. */ 159 tw_osli_dbg_dprintf(6, sc, "ioctl: scan bus"); 160 error = tw_osli_request_bus_scan(sc); 161 break; 162 163 default: 164 tw_osli_dbg_dprintf(6, sc, "ioctl: 0x%lx", cmd); 165 error = tw_cl_ioctl(&sc->ctlr_handle, cmd, buf); 166 break; 167 } 168 return(error); 169} 170 171 172 173static TW_INT32 twa_probe(device_t dev); 174static TW_INT32 twa_attach(device_t dev); 175static TW_INT32 twa_detach(device_t dev); 176static TW_INT32 twa_shutdown(device_t dev); 177static TW_VOID twa_busdma_lock(TW_VOID *lock_arg, bus_dma_lock_op_t op); 178static TW_VOID twa_pci_intr(TW_VOID *arg); 179static TW_VOID twa_watchdog(TW_VOID *arg); 180int twa_setup_intr(struct twa_softc *sc); 181int twa_teardown_intr(struct twa_softc *sc); 182 183static TW_INT32 tw_osli_alloc_mem(struct twa_softc *sc); 184static TW_VOID tw_osli_free_resources(struct twa_softc *sc); 185 186static TW_VOID twa_map_load_data_callback(TW_VOID *arg, 187 bus_dma_segment_t *segs, TW_INT32 nsegments, TW_INT32 error); 188static TW_VOID twa_map_load_callback(TW_VOID *arg, 189 bus_dma_segment_t *segs, TW_INT32 nsegments, TW_INT32 error); 190 191 192static device_method_t twa_methods[] = { 193 /* Device interface */ 194 DEVMETHOD(device_probe, twa_probe), 195 DEVMETHOD(device_attach, twa_attach), 196 DEVMETHOD(device_detach, twa_detach), 197 DEVMETHOD(device_shutdown, twa_shutdown), 198 199 DEVMETHOD_END 200}; 201 202static driver_t twa_pci_driver = { 203 "twa", 204 twa_methods, 205 sizeof(struct twa_softc) 206}; 207 208DRIVER_MODULE(twa, pci, twa_pci_driver, twa_devclass, 0, 0); 209MODULE_DEPEND(twa, cam, 1, 1, 1); 210MODULE_DEPEND(twa, pci, 1, 1, 1); 211 212 213/* 214 * Function name: twa_probe 215 * Description: Called at driver load time. Claims 9000 ctlrs. 216 * 217 * Input: dev -- bus device corresponding to the ctlr 218 * Output: None 219 * Return value: <= 0 -- success 220 * > 0 -- failure 221 */ 222static TW_INT32 223twa_probe(device_t dev) 224{ 225 static TW_UINT8 first_ctlr = 1; 226 227 tw_osli_dbg_printf(3, "entered"); 228 229 if (tw_cl_ctlr_supported(pci_get_vendor(dev), pci_get_device(dev))) { 230 device_set_desc(dev, TW_OSLI_DEVICE_NAME); 231 /* Print the driver version only once. */ 232 if (first_ctlr) { 233 printf("3ware device driver for 9000 series storage " 234 "controllers, version: %s\n", 235 TW_OSL_DRIVER_VERSION_STRING); 236 first_ctlr = 0; 237 } 238 return(0); 239 } 240 return(ENXIO); 241} 242 243int twa_setup_intr(struct twa_softc *sc) 244{ 245 int error = 0; 246 247 if (!(sc->intr_handle) && (sc->irq_res)) { 248 error = bus_setup_intr(sc->bus_dev, sc->irq_res, 249 INTR_TYPE_CAM | INTR_MPSAFE, 250 NULL, twa_pci_intr, 251 sc, &sc->intr_handle); 252 } 253 return( error ); 254} 255 256 257int twa_teardown_intr(struct twa_softc *sc) 258{ 259 int error = 0; 260 261 if ((sc->intr_handle) && (sc->irq_res)) { 262 error = bus_teardown_intr(sc->bus_dev, 263 sc->irq_res, sc->intr_handle); 264 sc->intr_handle = NULL; 265 } 266 return( error ); 267} 268 269 270 271/* 272 * Function name: twa_attach 273 * Description: Allocates pci resources; updates sc; adds a node to the 274 * sysctl tree to expose the driver version; makes calls 275 * (to the Common Layer) to initialize ctlr, and to 276 * attach to CAM. 277 * 278 * Input: dev -- bus device corresponding to the ctlr 279 * Output: None 280 * Return value: 0 -- success 281 * non-zero-- failure 282 */ 283static TW_INT32 284twa_attach(device_t dev) 285{ 286 struct twa_softc *sc = device_get_softc(dev); 287 TW_UINT32 command; 288 TW_INT32 bar_num; 289 TW_INT32 bar0_offset; 290 TW_INT32 bar_size; 291 TW_INT32 error; 292 293 tw_osli_dbg_dprintf(3, sc, "entered"); 294 295 sc->ctlr_handle.osl_ctlr_ctxt = sc; 296 297 /* Initialize the softc structure. */ 298 sc->bus_dev = dev; 299 sc->device_id = pci_get_device(dev); 300 301 /* Initialize the mutexes right here. */ 302 sc->io_lock = &(sc->io_lock_handle); 303 mtx_init(sc->io_lock, "tw_osl_io_lock", NULL, MTX_SPIN); 304 sc->q_lock = &(sc->q_lock_handle); 305 mtx_init(sc->q_lock, "tw_osl_q_lock", NULL, MTX_SPIN); 306 sc->sim_lock = &(sc->sim_lock_handle); 307 mtx_init(sc->sim_lock, "tw_osl_sim_lock", NULL, MTX_DEF | MTX_RECURSE); 308 309 sysctl_ctx_init(&sc->sysctl_ctxt); 310 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctxt, 311 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, 312 device_get_nameunit(dev), CTLFLAG_RD, 0, ""); 313 if (sc->sysctl_tree == NULL) { 314 tw_osli_printf(sc, "error = %d", 315 TW_CL_SEVERITY_ERROR_STRING, 316 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 317 0x2000, 318 "Cannot add sysctl tree node", 319 ENXIO); 320 return(ENXIO); 321 } 322 SYSCTL_ADD_STRING(&sc->sysctl_ctxt, SYSCTL_CHILDREN(sc->sysctl_tree), 323 OID_AUTO, "driver_version", CTLFLAG_RD, 324 TW_OSL_DRIVER_VERSION_STRING, 0, "TWA driver version"); 325 326 /* Make sure we are going to be able to talk to this board. */ 327 command = pci_read_config(dev, PCIR_COMMAND, 2); 328 if ((command & PCIM_CMD_PORTEN) == 0) { 329 tw_osli_printf(sc, "error = %d", 330 TW_CL_SEVERITY_ERROR_STRING, 331 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 332 0x2001, 333 "Register window not available", 334 ENXIO); 335 tw_osli_free_resources(sc); 336 return(ENXIO); 337 } 338 339 /* Force the busmaster enable bit on, in case the BIOS forgot. */ 340 command |= PCIM_CMD_BUSMASTEREN; 341 pci_write_config(dev, PCIR_COMMAND, command, 2); 342 343 /* Allocate the PCI register window. */ 344 if ((error = tw_cl_get_pci_bar_info(sc->device_id, TW_CL_BAR_TYPE_MEM, 345 &bar_num, &bar0_offset, &bar_size))) { 346 tw_osli_printf(sc, "error = %d", 347 TW_CL_SEVERITY_ERROR_STRING, 348 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 349 0x201F, 350 "Can't get PCI BAR info", 351 error); 352 tw_osli_free_resources(sc); 353 return(error); 354 } 355 sc->reg_res_id = PCIR_BARS + bar0_offset; 356 if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY, 357 &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE)) 358 == NULL) { 359 tw_osli_printf(sc, "error = %d", 360 TW_CL_SEVERITY_ERROR_STRING, 361 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 362 0x2002, 363 "Can't allocate register window", 364 ENXIO); 365 tw_osli_free_resources(sc); 366 return(ENXIO); 367 } 368 sc->bus_tag = rman_get_bustag(sc->reg_res); 369 sc->bus_handle = rman_get_bushandle(sc->reg_res); 370 371 /* Allocate and register our interrupt. */ 372 sc->irq_res_id = 0; 373 if ((sc->irq_res = bus_alloc_resource(sc->bus_dev, SYS_RES_IRQ, 374 &(sc->irq_res_id), 0, ~0, 1, 375 RF_SHAREABLE | RF_ACTIVE)) == NULL) { 376 tw_osli_printf(sc, "error = %d", 377 TW_CL_SEVERITY_ERROR_STRING, 378 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 379 0x2003, 380 "Can't allocate interrupt", 381 ENXIO); 382 tw_osli_free_resources(sc); 383 return(ENXIO); 384 } 385 if ((error = twa_setup_intr(sc))) { 386 tw_osli_printf(sc, "error = %d", 387 TW_CL_SEVERITY_ERROR_STRING, 388 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 389 0x2004, 390 "Can't set up interrupt", 391 error); 392 tw_osli_free_resources(sc); 393 return(error); 394 } 395 396 if ((error = tw_osli_alloc_mem(sc))) { 397 tw_osli_printf(sc, "error = %d", 398 TW_CL_SEVERITY_ERROR_STRING, 399 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 400 0x2005, 401 "Memory allocation failure", 402 error); 403 tw_osli_free_resources(sc); 404 return(error); 405 } 406 407 /* Initialize the Common Layer for this controller. */ 408 if ((error = tw_cl_init_ctlr(&sc->ctlr_handle, sc->flags, sc->device_id, 409 TW_OSLI_MAX_NUM_REQUESTS, TW_OSLI_MAX_NUM_AENS, 410 sc->non_dma_mem, sc->dma_mem, 411 sc->dma_mem_phys 412 ))) { 413 tw_osli_printf(sc, "error = %d", 414 TW_CL_SEVERITY_ERROR_STRING, 415 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 416 0x2006, 417 "Failed to initialize Common Layer/controller", 418 error); 419 tw_osli_free_resources(sc); 420 return(error); 421 } 422 423 /* Create the control device. */ 424 sc->ctrl_dev = make_dev(&twa_cdevsw, device_get_unit(sc->bus_dev), 425 UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, 426 "twa%d", device_get_unit(sc->bus_dev)); 427 sc->ctrl_dev->si_drv1 = sc; 428 429 if ((error = tw_osli_cam_attach(sc))) { 430 tw_osli_free_resources(sc); 431 tw_osli_printf(sc, "error = %d", 432 TW_CL_SEVERITY_ERROR_STRING, 433 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 434 0x2007, 435 "Failed to initialize CAM", 436 error); 437 return(error); 438 } 439 440 sc->watchdog_index = 0; 441 callout_init(&(sc->watchdog_callout[0]), CALLOUT_MPSAFE); 442 callout_init(&(sc->watchdog_callout[1]), CALLOUT_MPSAFE); 443 callout_reset(&(sc->watchdog_callout[0]), 5*hz, twa_watchdog, &sc->ctlr_handle); 444 445 return(0); 446} 447 448 449static TW_VOID 450twa_watchdog(TW_VOID *arg) 451{ 452 struct tw_cl_ctlr_handle *ctlr_handle = 453 (struct tw_cl_ctlr_handle *)arg; 454 struct twa_softc *sc = ctlr_handle->osl_ctlr_ctxt; 455 int i; 456 int i_need_a_reset = 0; 457 int driver_is_active = 0; 458 int my_watchdog_was_pending = 1234; 459 TW_UINT64 current_time; 460 struct tw_osli_req_context *my_req; 461 462 463//============================================================================== 464 current_time = (TW_UINT64) (tw_osl_get_local_time()); 465 466 for (i = 0; i < TW_OSLI_MAX_NUM_REQUESTS; i++) { 467 my_req = &(sc->req_ctx_buf[i]); 468 469 if ((my_req->state == TW_OSLI_REQ_STATE_BUSY) && 470 (my_req->deadline) && 471 (my_req->deadline < current_time)) { 472 tw_cl_set_reset_needed(ctlr_handle); 473#ifdef TW_OSL_DEBUG 474 device_printf((sc)->bus_dev, "Request %d timed out! d = %llu, c = %llu\n", i, my_req->deadline, current_time); 475#else /* TW_OSL_DEBUG */ 476 device_printf((sc)->bus_dev, "Request %d timed out!\n", i); 477#endif /* TW_OSL_DEBUG */ 478 break; 479 } 480 } 481//============================================================================== 482 483 i_need_a_reset = tw_cl_is_reset_needed(ctlr_handle); 484 485 i = (int) ((sc->watchdog_index++) & 1); 486 487 driver_is_active = tw_cl_is_active(ctlr_handle); 488 489 if (i_need_a_reset) { 490#ifdef TW_OSL_DEBUG 491 device_printf((sc)->bus_dev, "Watchdog rescheduled in 70 seconds\n"); 492#endif /* TW_OSL_DEBUG */ 493 my_watchdog_was_pending = 494 callout_reset(&(sc->watchdog_callout[i]), 70*hz, twa_watchdog, &sc->ctlr_handle); 495 tw_cl_reset_ctlr(ctlr_handle); 496#ifdef TW_OSL_DEBUG 497 device_printf((sc)->bus_dev, "Watchdog reset completed!\n"); 498#endif /* TW_OSL_DEBUG */ 499 } else if (driver_is_active) { 500 my_watchdog_was_pending = 501 callout_reset(&(sc->watchdog_callout[i]), 5*hz, twa_watchdog, &sc->ctlr_handle); 502 } 503#ifdef TW_OSL_DEBUG 504 if (i_need_a_reset || my_watchdog_was_pending) 505 device_printf((sc)->bus_dev, "i_need_a_reset = %d, " 506 "driver_is_active = %d, my_watchdog_was_pending = %d\n", 507 i_need_a_reset, driver_is_active, my_watchdog_was_pending); 508#endif /* TW_OSL_DEBUG */ 509} 510 511 512/* 513 * Function name: tw_osli_alloc_mem 514 * Description: Allocates memory needed both by CL and OSL. 515 * 516 * Input: sc -- OSL internal controller context 517 * Output: None 518 * Return value: 0 -- success 519 * non-zero-- failure 520 */ 521static TW_INT32 522tw_osli_alloc_mem(struct twa_softc *sc) 523{ 524 struct tw_osli_req_context *req; 525 TW_UINT32 max_sg_elements; 526 TW_UINT32 non_dma_mem_size; 527 TW_UINT32 dma_mem_size; 528 TW_INT32 error; 529 TW_INT32 i; 530 531 tw_osli_dbg_dprintf(3, sc, "entered"); 532 533 sc->flags |= (sizeof(bus_addr_t) == 8) ? TW_CL_64BIT_ADDRESSES : 0; 534 sc->flags |= (sizeof(bus_size_t) == 8) ? TW_CL_64BIT_SG_LENGTH : 0; 535 536 max_sg_elements = (sizeof(bus_addr_t) == 8) ? 537 TW_CL_MAX_64BIT_SG_ELEMENTS : TW_CL_MAX_32BIT_SG_ELEMENTS; 538 539 if ((error = tw_cl_get_mem_requirements(&sc->ctlr_handle, sc->flags, 540 sc->device_id, TW_OSLI_MAX_NUM_REQUESTS, TW_OSLI_MAX_NUM_AENS, 541 &(sc->alignment), &(sc->sg_size_factor), 542 &non_dma_mem_size, &dma_mem_size 543 ))) { 544 tw_osli_printf(sc, "error = %d", 545 TW_CL_SEVERITY_ERROR_STRING, 546 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 547 0x2008, 548 "Can't get Common Layer's memory requirements", 549 error); 550 return(error); 551 } 552 553 if ((sc->non_dma_mem = malloc(non_dma_mem_size, TW_OSLI_MALLOC_CLASS, 554 M_WAITOK)) == NULL) { 555 tw_osli_printf(sc, "error = %d", 556 TW_CL_SEVERITY_ERROR_STRING, 557 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 558 0x2009, 559 "Can't allocate non-dma memory", 560 ENOMEM); 561 return(ENOMEM); 562 } 563 564 /* Create the parent dma tag. */ 565 if (bus_dma_tag_create(bus_get_dma_tag(sc->bus_dev), /* parent */ 566 sc->alignment, /* alignment */ 567 0, /* boundary */ 568 BUS_SPACE_MAXADDR, /* lowaddr */ 569 BUS_SPACE_MAXADDR, /* highaddr */ 570 NULL, NULL, /* filter, filterarg */ 571 TW_CL_MAX_IO_SIZE, /* maxsize */ 572 max_sg_elements, /* nsegments */ 573 TW_CL_MAX_IO_SIZE, /* maxsegsize */ 574 0, /* flags */ 575 NULL, /* lockfunc */ 576 NULL, /* lockfuncarg */ 577 &sc->parent_tag /* tag */)) { 578 tw_osli_printf(sc, "error = %d", 579 TW_CL_SEVERITY_ERROR_STRING, 580 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 581 0x200A, 582 "Can't allocate parent DMA tag", 583 ENOMEM); 584 return(ENOMEM); 585 } 586 587 /* Create a dma tag for Common Layer's DMA'able memory (dma_mem). */ 588 if (bus_dma_tag_create(sc->parent_tag, /* parent */ 589 sc->alignment, /* alignment */ 590 0, /* boundary */ 591 BUS_SPACE_MAXADDR, /* lowaddr */ 592 BUS_SPACE_MAXADDR, /* highaddr */ 593 NULL, NULL, /* filter, filterarg */ 594 dma_mem_size, /* maxsize */ 595 1, /* nsegments */ 596 BUS_SPACE_MAXSIZE, /* maxsegsize */ 597 0, /* flags */ 598 NULL, /* lockfunc */ 599 NULL, /* lockfuncarg */ 600 &sc->cmd_tag /* tag */)) { 601 tw_osli_printf(sc, "error = %d", 602 TW_CL_SEVERITY_ERROR_STRING, 603 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 604 0x200B, 605 "Can't allocate DMA tag for Common Layer's " 606 "DMA'able memory", 607 ENOMEM); 608 return(ENOMEM); 609 } 610 611 if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem, 612 BUS_DMA_NOWAIT, &sc->cmd_map)) { 613 /* Try a second time. */ 614 if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem, 615 BUS_DMA_NOWAIT, &sc->cmd_map)) { 616 tw_osli_printf(sc, "error = %d", 617 TW_CL_SEVERITY_ERROR_STRING, 618 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 619 0x200C, 620 "Can't allocate DMA'able memory for the" 621 "Common Layer", 622 ENOMEM); 623 return(ENOMEM); 624 } 625 } 626 627 bus_dmamap_load(sc->cmd_tag, sc->cmd_map, sc->dma_mem, 628 dma_mem_size, twa_map_load_callback, 629 &sc->dma_mem_phys, 0); 630 631 /* 632 * Create a dma tag for data buffers; size will be the maximum 633 * possible I/O size (128kB). 634 */ 635 if (bus_dma_tag_create(sc->parent_tag, /* parent */ 636 sc->alignment, /* alignment */ 637 0, /* boundary */ 638 BUS_SPACE_MAXADDR, /* lowaddr */ 639 BUS_SPACE_MAXADDR, /* highaddr */ 640 NULL, NULL, /* filter, filterarg */ 641 TW_CL_MAX_IO_SIZE, /* maxsize */ 642 max_sg_elements, /* nsegments */ 643 TW_CL_MAX_IO_SIZE, /* maxsegsize */ 644 BUS_DMA_ALLOCNOW, /* flags */ 645 twa_busdma_lock, /* lockfunc */ 646 sc->io_lock, /* lockfuncarg */ 647 &sc->dma_tag /* tag */)) { 648 tw_osli_printf(sc, "error = %d", 649 TW_CL_SEVERITY_ERROR_STRING, 650 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 651 0x200F, 652 "Can't allocate DMA tag for data buffers", 653 ENOMEM); 654 return(ENOMEM); 655 } 656 657 /* 658 * Create a dma tag for ioctl data buffers; size will be the maximum 659 * possible I/O size (128kB). 660 */ 661 if (bus_dma_tag_create(sc->parent_tag, /* parent */ 662 sc->alignment, /* alignment */ 663 0, /* boundary */ 664 BUS_SPACE_MAXADDR, /* lowaddr */ 665 BUS_SPACE_MAXADDR, /* highaddr */ 666 NULL, NULL, /* filter, filterarg */ 667 TW_CL_MAX_IO_SIZE, /* maxsize */ 668 max_sg_elements, /* nsegments */ 669 TW_CL_MAX_IO_SIZE, /* maxsegsize */ 670 BUS_DMA_ALLOCNOW, /* flags */ 671 twa_busdma_lock, /* lockfunc */ 672 sc->io_lock, /* lockfuncarg */ 673 &sc->ioctl_tag /* tag */)) { 674 tw_osli_printf(sc, "error = %d", 675 TW_CL_SEVERITY_ERROR_STRING, 676 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 677 0x2010, 678 "Can't allocate DMA tag for ioctl data buffers", 679 ENOMEM); 680 return(ENOMEM); 681 } 682 683 /* Create just one map for all ioctl request data buffers. */ 684 if (bus_dmamap_create(sc->ioctl_tag, 0, &sc->ioctl_map)) { 685 tw_osli_printf(sc, "error = %d", 686 TW_CL_SEVERITY_ERROR_STRING, 687 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 688 0x2011, 689 "Can't create ioctl map", 690 ENOMEM); 691 return(ENOMEM); 692 } 693 694 695 /* Initialize request queues. */ 696 tw_osli_req_q_init(sc, TW_OSLI_FREE_Q); 697 tw_osli_req_q_init(sc, TW_OSLI_BUSY_Q); 698 699 if ((sc->req_ctx_buf = (struct tw_osli_req_context *) 700 malloc((sizeof(struct tw_osli_req_context) * 701 TW_OSLI_MAX_NUM_REQUESTS), 702 TW_OSLI_MALLOC_CLASS, M_WAITOK)) == NULL) { 703 tw_osli_printf(sc, "error = %d", 704 TW_CL_SEVERITY_ERROR_STRING, 705 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 706 0x2012, 707 "Failed to allocate request packets", 708 ENOMEM); 709 return(ENOMEM); 710 } 711 bzero(sc->req_ctx_buf, 712 sizeof(struct tw_osli_req_context) * TW_OSLI_MAX_NUM_REQUESTS); 713 714 for (i = 0; i < TW_OSLI_MAX_NUM_REQUESTS; i++) { 715 req = &(sc->req_ctx_buf[i]); 716 req->ctlr = sc; 717 if (bus_dmamap_create(sc->dma_tag, 0, &req->dma_map)) { 718 tw_osli_printf(sc, "request # = %d, error = %d", 719 TW_CL_SEVERITY_ERROR_STRING, 720 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 721 0x2013, 722 "Can't create dma map", 723 i, ENOMEM); 724 return(ENOMEM); 725 } 726 727 /* Initialize the ioctl wakeup/ timeout mutex */ 728 req->ioctl_wake_timeout_lock = &(req->ioctl_wake_timeout_lock_handle); 729 mtx_init(req->ioctl_wake_timeout_lock, "tw_ioctl_wake_timeout_lock", NULL, MTX_DEF); 730 731 /* Insert request into the free queue. */ 732 tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q); 733 } 734 735 return(0); 736} 737 738 739 740/* 741 * Function name: tw_osli_free_resources 742 * Description: Performs clean-up at the time of going down. 743 * 744 * Input: sc -- ptr to OSL internal ctlr context 745 * Output: None 746 * Return value: None 747 */ 748static TW_VOID 749tw_osli_free_resources(struct twa_softc *sc) 750{ 751 struct tw_osli_req_context *req; 752 TW_INT32 error = 0; 753 754 tw_osli_dbg_dprintf(3, sc, "entered"); 755 756 /* Detach from CAM */ 757 tw_osli_cam_detach(sc); 758 759 if (sc->req_ctx_buf) 760 while ((req = tw_osli_req_q_remove_head(sc, TW_OSLI_FREE_Q)) != 761 NULL) { 762 mtx_destroy(req->ioctl_wake_timeout_lock); 763 764 if ((error = bus_dmamap_destroy(sc->dma_tag, 765 req->dma_map))) 766 tw_osli_dbg_dprintf(1, sc, 767 "dmamap_destroy(dma) returned %d", 768 error); 769 } 770 771 if ((sc->ioctl_tag) && (sc->ioctl_map)) 772 if ((error = bus_dmamap_destroy(sc->ioctl_tag, sc->ioctl_map))) 773 tw_osli_dbg_dprintf(1, sc, 774 "dmamap_destroy(ioctl) returned %d", error); 775 776 /* Free all memory allocated so far. */ 777 if (sc->req_ctx_buf) 778 free(sc->req_ctx_buf, TW_OSLI_MALLOC_CLASS); 779 780 if (sc->non_dma_mem) 781 free(sc->non_dma_mem, TW_OSLI_MALLOC_CLASS); 782 783 if (sc->dma_mem) { 784 bus_dmamap_unload(sc->cmd_tag, sc->cmd_map); 785 bus_dmamem_free(sc->cmd_tag, sc->dma_mem, 786 sc->cmd_map); 787 } 788 if (sc->cmd_tag) 789 if ((error = bus_dma_tag_destroy(sc->cmd_tag))) 790 tw_osli_dbg_dprintf(1, sc, 791 "dma_tag_destroy(cmd) returned %d", error); 792 793 if (sc->dma_tag) 794 if ((error = bus_dma_tag_destroy(sc->dma_tag))) 795 tw_osli_dbg_dprintf(1, sc, 796 "dma_tag_destroy(dma) returned %d", error); 797 798 if (sc->ioctl_tag) 799 if ((error = bus_dma_tag_destroy(sc->ioctl_tag))) 800 tw_osli_dbg_dprintf(1, sc, 801 "dma_tag_destroy(ioctl) returned %d", error); 802 803 if (sc->parent_tag) 804 if ((error = bus_dma_tag_destroy(sc->parent_tag))) 805 tw_osli_dbg_dprintf(1, sc, 806 "dma_tag_destroy(parent) returned %d", error); 807 808 809 /* Disconnect the interrupt handler. */ 810 if ((error = twa_teardown_intr(sc))) 811 tw_osli_dbg_dprintf(1, sc, 812 "teardown_intr returned %d", error); 813 814 if (sc->irq_res != NULL) 815 if ((error = bus_release_resource(sc->bus_dev, 816 SYS_RES_IRQ, sc->irq_res_id, sc->irq_res))) 817 tw_osli_dbg_dprintf(1, sc, 818 "release_resource(irq) returned %d", error); 819 820 821 /* Release the register window mapping. */ 822 if (sc->reg_res != NULL) 823 if ((error = bus_release_resource(sc->bus_dev, 824 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res))) 825 tw_osli_dbg_dprintf(1, sc, 826 "release_resource(io) returned %d", error); 827 828 829 /* Destroy the control device. */ 830 if (sc->ctrl_dev != (struct cdev *)NULL) 831 destroy_dev(sc->ctrl_dev); 832 833 if ((error = sysctl_ctx_free(&sc->sysctl_ctxt))) 834 tw_osli_dbg_dprintf(1, sc, 835 "sysctl_ctx_free returned %d", error); 836 837} 838 839 840 841/* 842 * Function name: twa_detach 843 * Description: Called when the controller is being detached from 844 * the pci bus. 845 * 846 * Input: dev -- bus device corresponding to the ctlr 847 * Output: None 848 * Return value: 0 -- success 849 * non-zero-- failure 850 */ 851static TW_INT32 852twa_detach(device_t dev) 853{ 854 struct twa_softc *sc = device_get_softc(dev); 855 TW_INT32 error; 856 857 tw_osli_dbg_dprintf(3, sc, "entered"); 858 859 error = EBUSY; 860 if (sc->open) { 861 tw_osli_printf(sc, "error = %d", 862 TW_CL_SEVERITY_ERROR_STRING, 863 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 864 0x2014, 865 "Device open", 866 error); 867 goto out; 868 } 869 870 /* Shut the controller down. */ 871 if ((error = twa_shutdown(dev))) 872 goto out; 873 874 /* Free all resources associated with this controller. */ 875 tw_osli_free_resources(sc); 876 error = 0; 877 878out: 879 return(error); 880} 881 882 883 884/* 885 * Function name: twa_shutdown 886 * Description: Called at unload/shutdown time. Lets the controller 887 * know that we are going down. 888 * 889 * Input: dev -- bus device corresponding to the ctlr 890 * Output: None 891 * Return value: 0 -- success 892 * non-zero-- failure 893 */ 894static TW_INT32 895twa_shutdown(device_t dev) 896{ 897 struct twa_softc *sc = device_get_softc(dev); 898 TW_INT32 error = 0; 899 900 tw_osli_dbg_dprintf(3, sc, "entered"); 901 902 /* Disconnect interrupts. */ 903 error = twa_teardown_intr(sc); 904 905 /* Stop watchdog task. */ 906 callout_drain(&(sc->watchdog_callout[0])); 907 callout_drain(&(sc->watchdog_callout[1])); 908 909 /* Disconnect from the controller. */ 910 if ((error = tw_cl_shutdown_ctlr(&(sc->ctlr_handle), 0))) { 911 tw_osli_printf(sc, "error = %d", 912 TW_CL_SEVERITY_ERROR_STRING, 913 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 914 0x2015, 915 "Failed to shutdown Common Layer/controller", 916 error); 917 } 918 return(error); 919} 920 921 922 923/* 924 * Function name: twa_busdma_lock 925 * Description: Function to provide synchronization during busdma_swi. 926 * 927 * Input: lock_arg -- lock mutex sent as argument 928 * op -- operation (lock/unlock) expected of the function 929 * Output: None 930 * Return value: None 931 */ 932TW_VOID 933twa_busdma_lock(TW_VOID *lock_arg, bus_dma_lock_op_t op) 934{ 935 struct mtx *lock; 936 937 lock = (struct mtx *)lock_arg; 938 switch (op) { 939 case BUS_DMA_LOCK: 940 mtx_lock_spin(lock); 941 break; 942 943 case BUS_DMA_UNLOCK: 944 mtx_unlock_spin(lock); 945 break; 946 947 default: 948 panic("Unknown operation 0x%x for twa_busdma_lock!", op); 949 } 950} 951 952 953/* 954 * Function name: twa_pci_intr 955 * Description: Interrupt handler. Wrapper for twa_interrupt. 956 * 957 * Input: arg -- ptr to OSL internal ctlr context 958 * Output: None 959 * Return value: None 960 */ 961static TW_VOID 962twa_pci_intr(TW_VOID *arg) 963{ 964 struct twa_softc *sc = (struct twa_softc *)arg; 965 966 tw_osli_dbg_dprintf(10, sc, "entered"); 967 tw_cl_interrupt(&(sc->ctlr_handle)); 968} 969 970 971/* 972 * Function name: tw_osli_fw_passthru 973 * Description: Builds a fw passthru cmd pkt, and submits it to CL. 974 * 975 * Input: sc -- ptr to OSL internal ctlr context 976 * buf -- ptr to ioctl pkt understood by CL 977 * Output: None 978 * Return value: 0 -- success 979 * non-zero-- failure 980 */ 981TW_INT32 982tw_osli_fw_passthru(struct twa_softc *sc, TW_INT8 *buf) 983{ 984 struct tw_osli_req_context *req; 985 struct tw_osli_ioctl_no_data_buf *user_buf = 986 (struct tw_osli_ioctl_no_data_buf *)buf; 987 TW_TIME end_time; 988 TW_UINT32 timeout = 60; 989 TW_UINT32 data_buf_size_adjusted; 990 struct tw_cl_req_packet *req_pkt; 991 struct tw_cl_passthru_req_packet *pt_req; 992 TW_INT32 error; 993 994 tw_osli_dbg_dprintf(5, sc, "ioctl: passthru"); 995 996 if ((req = tw_osli_get_request(sc)) == NULL) 997 return(EBUSY); 998 999 req->req_handle.osl_req_ctxt = req; 1000 req->orig_req = buf; 1001 req->flags |= TW_OSLI_REQ_FLAGS_PASSTHRU; 1002 1003 req_pkt = &(req->req_pkt); 1004 req_pkt->status = 0; 1005 req_pkt->tw_osl_callback = tw_osl_complete_passthru; 1006 /* Let the Common Layer retry the request on cmd queue full. */ 1007 req_pkt->flags |= TW_CL_REQ_RETRY_ON_BUSY; 1008 1009 pt_req = &(req_pkt->gen_req_pkt.pt_req); 1010 /* 1011 * Make sure that the data buffer sent to firmware is a 1012 * 512 byte multiple in size. 1013 */ 1014 data_buf_size_adjusted = 1015 (user_buf->driver_pkt.buffer_length + 1016 (sc->sg_size_factor - 1)) & ~(sc->sg_size_factor - 1); 1017 if ((req->length = data_buf_size_adjusted)) { 1018 if ((req->data = malloc(data_buf_size_adjusted, 1019 TW_OSLI_MALLOC_CLASS, M_WAITOK)) == NULL) { 1020 error = ENOMEM; 1021 tw_osli_printf(sc, "error = %d", 1022 TW_CL_SEVERITY_ERROR_STRING, 1023 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 1024 0x2016, 1025 "Could not alloc mem for " 1026 "fw_passthru data_buf", 1027 error); 1028 goto fw_passthru_err; 1029 } 1030 /* Copy the payload. */ 1031 if ((error = copyin((TW_VOID *)(user_buf->pdata), 1032 req->data, 1033 user_buf->driver_pkt.buffer_length)) != 0) { 1034 tw_osli_printf(sc, "error = %d", 1035 TW_CL_SEVERITY_ERROR_STRING, 1036 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 1037 0x2017, 1038 "Could not copyin fw_passthru data_buf", 1039 error); 1040 goto fw_passthru_err; 1041 } 1042 pt_req->sgl_entries = 1; /* will be updated during mapping */ 1043 req->flags |= (TW_OSLI_REQ_FLAGS_DATA_IN | 1044 TW_OSLI_REQ_FLAGS_DATA_OUT); 1045 } else 1046 pt_req->sgl_entries = 0; /* no payload */ 1047 1048 pt_req->cmd_pkt = (TW_VOID *)(&(user_buf->cmd_pkt)); 1049 pt_req->cmd_pkt_length = sizeof(struct tw_cl_command_packet); 1050 1051 if ((error = tw_osli_map_request(req))) 1052 goto fw_passthru_err; 1053 1054 end_time = tw_osl_get_local_time() + timeout; 1055 while (req->state != TW_OSLI_REQ_STATE_COMPLETE) { 1056 mtx_lock(req->ioctl_wake_timeout_lock); 1057 req->flags |= TW_OSLI_REQ_FLAGS_SLEEPING; 1058 1059 error = mtx_sleep(req, req->ioctl_wake_timeout_lock, 0, 1060 "twa_passthru", timeout*hz); 1061 mtx_unlock(req->ioctl_wake_timeout_lock); 1062 1063 if (!(req->flags & TW_OSLI_REQ_FLAGS_SLEEPING)) 1064 error = 0; 1065 req->flags &= ~TW_OSLI_REQ_FLAGS_SLEEPING; 1066 1067 if (! error) { 1068 if (((error = req->error_code)) || 1069 ((error = (req->state != 1070 TW_OSLI_REQ_STATE_COMPLETE))) || 1071 ((error = req_pkt->status))) 1072 goto fw_passthru_err; 1073 break; 1074 } 1075 1076 if (req_pkt->status) { 1077 error = req_pkt->status; 1078 goto fw_passthru_err; 1079 } 1080 1081 if (error == EWOULDBLOCK) { 1082 /* Time out! */ 1083 if ((!(req->error_code)) && 1084 (req->state == TW_OSLI_REQ_STATE_COMPLETE) && 1085 (!(req_pkt->status)) ) { 1086#ifdef TW_OSL_DEBUG 1087 tw_osli_printf(sc, "request = %p", 1088 TW_CL_SEVERITY_ERROR_STRING, 1089 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 1090 0x7777, 1091 "FALSE Passthru timeout!", 1092 req); 1093#endif /* TW_OSL_DEBUG */ 1094 error = 0; /* False error */ 1095 break; 1096 } 1097 if (!(tw_cl_is_reset_needed(&(req->ctlr->ctlr_handle)))) { 1098#ifdef TW_OSL_DEBUG 1099 tw_osli_printf(sc, "request = %p", 1100 TW_CL_SEVERITY_ERROR_STRING, 1101 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 1102 0x2018, 1103 "Passthru request timed out!", 1104 req); 1105#else /* TW_OSL_DEBUG */ 1106 device_printf((sc)->bus_dev, "Passthru request timed out!\n"); 1107#endif /* TW_OSL_DEBUG */ 1108 tw_cl_reset_ctlr(&(req->ctlr->ctlr_handle)); 1109 } 1110 1111 error = 0; 1112 end_time = tw_osl_get_local_time() + timeout; 1113 continue; 1114 /* 1115 * Don't touch req after a reset. It (and any 1116 * associated data) will be 1117 * unmapped by the callback. 1118 */ 1119 } 1120 /* 1121 * Either the request got completed, or we were woken up by a 1122 * signal. Calculate the new timeout, in case it was the latter. 1123 */ 1124 timeout = (end_time - tw_osl_get_local_time()); 1125 } /* End of while loop */ 1126 1127 /* If there was a payload, copy it back. */ 1128 if ((!error) && (req->length)) 1129 if ((error = copyout(req->data, user_buf->pdata, 1130 user_buf->driver_pkt.buffer_length))) 1131 tw_osli_printf(sc, "error = %d", 1132 TW_CL_SEVERITY_ERROR_STRING, 1133 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 1134 0x2019, 1135 "Could not copyout fw_passthru data_buf", 1136 error); 1137 1138fw_passthru_err: 1139 1140 if (req_pkt->status == TW_CL_ERR_REQ_BUS_RESET) 1141 error = EBUSY; 1142 1143 user_buf->driver_pkt.os_status = error; 1144 /* Free resources. */ 1145 if (req->data) 1146 free(req->data, TW_OSLI_MALLOC_CLASS); 1147 tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q); 1148 return(error); 1149} 1150 1151 1152 1153/* 1154 * Function name: tw_osl_complete_passthru 1155 * Description: Called to complete passthru requests. 1156 * 1157 * Input: req_handle -- ptr to request handle 1158 * Output: None 1159 * Return value: None 1160 */ 1161TW_VOID 1162tw_osl_complete_passthru(struct tw_cl_req_handle *req_handle) 1163{ 1164 struct tw_osli_req_context *req = req_handle->osl_req_ctxt; 1165 struct tw_cl_req_packet *req_pkt = 1166 (struct tw_cl_req_packet *)(&req->req_pkt); 1167 struct twa_softc *sc = req->ctlr; 1168 1169 tw_osli_dbg_dprintf(5, sc, "entered"); 1170 1171 if (req->state != TW_OSLI_REQ_STATE_BUSY) { 1172 tw_osli_printf(sc, "request = %p, status = %d", 1173 TW_CL_SEVERITY_ERROR_STRING, 1174 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 1175 0x201B, 1176 "Unposted command completed!!", 1177 req, req->state); 1178 } 1179 1180 /* 1181 * Remove request from the busy queue. Just mark it complete. 1182 * There's no need to move it into the complete queue as we are 1183 * going to be done with it right now. 1184 */ 1185 req->state = TW_OSLI_REQ_STATE_COMPLETE; 1186 tw_osli_req_q_remove_item(req, TW_OSLI_BUSY_Q); 1187 1188 tw_osli_unmap_request(req); 1189 1190 /* 1191 * Don't do a wake up if there was an error even before the request 1192 * was sent down to the Common Layer, and we hadn't gotten an 1193 * EINPROGRESS. The request originator will then be returned an 1194 * error, and he can do the clean-up. 1195 */ 1196 if ((req->error_code) && (!(req->flags & TW_OSLI_REQ_FLAGS_IN_PROGRESS))) 1197 return; 1198 1199 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) { 1200 if (req->flags & TW_OSLI_REQ_FLAGS_SLEEPING) { 1201 /* Wake up the sleeping command originator. */ 1202 tw_osli_dbg_dprintf(5, sc, 1203 "Waking up originator of request %p", req); 1204 req->flags &= ~TW_OSLI_REQ_FLAGS_SLEEPING; 1205 wakeup_one(req); 1206 } else { 1207 /* 1208 * If the request completed even before mtx_sleep 1209 * was called, simply return. 1210 */ 1211 if (req->flags & TW_OSLI_REQ_FLAGS_MAPPED) 1212 return; 1213 1214 if (req_pkt->status == TW_CL_ERR_REQ_BUS_RESET) 1215 return; 1216 1217 tw_osli_printf(sc, "request = %p", 1218 TW_CL_SEVERITY_ERROR_STRING, 1219 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 1220 0x201C, 1221 "Passthru callback called, " 1222 "and caller not sleeping", 1223 req); 1224 } 1225 } else { 1226 tw_osli_printf(sc, "request = %p", 1227 TW_CL_SEVERITY_ERROR_STRING, 1228 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 1229 0x201D, 1230 "Passthru callback called for non-passthru request", 1231 req); 1232 } 1233} 1234 1235 1236 1237/* 1238 * Function name: tw_osli_get_request 1239 * Description: Gets a request pkt from the free queue. 1240 * 1241 * Input: sc -- ptr to OSL internal ctlr context 1242 * Output: None 1243 * Return value: ptr to request pkt -- success 1244 * NULL -- failure 1245 */ 1246struct tw_osli_req_context * 1247tw_osli_get_request(struct twa_softc *sc) 1248{ 1249 struct tw_osli_req_context *req; 1250 1251 tw_osli_dbg_dprintf(4, sc, "entered"); 1252 1253 /* Get a free request packet. */ 1254 req = tw_osli_req_q_remove_head(sc, TW_OSLI_FREE_Q); 1255 1256 /* Initialize some fields to their defaults. */ 1257 if (req) { 1258 req->req_handle.osl_req_ctxt = NULL; 1259 req->req_handle.cl_req_ctxt = NULL; 1260 req->req_handle.is_io = 0; 1261 req->data = NULL; 1262 req->length = 0; 1263 req->deadline = 0; 1264 req->real_data = NULL; 1265 req->real_length = 0; 1266 req->state = TW_OSLI_REQ_STATE_INIT;/* req being initialized */ 1267 req->flags = 0; 1268 req->error_code = 0; 1269 req->orig_req = NULL; 1270 1271 bzero(&(req->req_pkt), sizeof(struct tw_cl_req_packet)); 1272 1273 } 1274 return(req); 1275} 1276 1277 1278 1279/* 1280 * Function name: twa_map_load_data_callback 1281 * Description: Callback of bus_dmamap_load for the buffer associated 1282 * with data. Updates the cmd pkt (size/sgl_entries 1283 * fields, as applicable) to reflect the number of sg 1284 * elements. 1285 * 1286 * Input: arg -- ptr to OSL internal request context 1287 * segs -- ptr to a list of segment descriptors 1288 * nsegments--# of segments 1289 * error -- 0 if no errors encountered before callback, 1290 * non-zero if errors were encountered 1291 * Output: None 1292 * Return value: None 1293 */ 1294static TW_VOID 1295twa_map_load_data_callback(TW_VOID *arg, bus_dma_segment_t *segs, 1296 TW_INT32 nsegments, TW_INT32 error) 1297{ 1298 struct tw_osli_req_context *req = 1299 (struct tw_osli_req_context *)arg; 1300 struct twa_softc *sc = req->ctlr; 1301 struct tw_cl_req_packet *req_pkt = &(req->req_pkt); 1302 1303 tw_osli_dbg_dprintf(10, sc, "entered"); 1304 1305 if (error == EINVAL) { 1306 req->error_code = error; 1307 return; 1308 } 1309 1310 /* Mark the request as currently being processed. */ 1311 req->state = TW_OSLI_REQ_STATE_BUSY; 1312 /* Move the request into the busy queue. */ 1313 tw_osli_req_q_insert_tail(req, TW_OSLI_BUSY_Q); 1314 1315 req->flags |= TW_OSLI_REQ_FLAGS_MAPPED; 1316 1317 if (error == EFBIG) { 1318 req->error_code = error; 1319 goto out; 1320 } 1321 1322 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) { 1323 struct tw_cl_passthru_req_packet *pt_req; 1324 1325 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) 1326 bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map, 1327 BUS_DMASYNC_PREREAD); 1328 1329 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT) { 1330 /* 1331 * If we're using an alignment buffer, and we're 1332 * writing data, copy the real data out. 1333 */ 1334 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) 1335 bcopy(req->real_data, req->data, req->real_length); 1336 bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map, 1337 BUS_DMASYNC_PREWRITE); 1338 } 1339 1340 pt_req = &(req_pkt->gen_req_pkt.pt_req); 1341 pt_req->sg_list = (TW_UINT8 *)segs; 1342 pt_req->sgl_entries += (nsegments - 1); 1343 error = tw_cl_fw_passthru(&(sc->ctlr_handle), req_pkt, 1344 &(req->req_handle)); 1345 } else { 1346 struct tw_cl_scsi_req_packet *scsi_req; 1347 1348 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) 1349 bus_dmamap_sync(sc->dma_tag, req->dma_map, 1350 BUS_DMASYNC_PREREAD); 1351 1352 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT) { 1353 /* 1354 * If we're using an alignment buffer, and we're 1355 * writing data, copy the real data out. 1356 */ 1357 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) 1358 bcopy(req->real_data, req->data, req->real_length); 1359 bus_dmamap_sync(sc->dma_tag, req->dma_map, 1360 BUS_DMASYNC_PREWRITE); 1361 } 1362 1363 scsi_req = &(req_pkt->gen_req_pkt.scsi_req); 1364 scsi_req->sg_list = (TW_UINT8 *)segs; 1365 scsi_req->sgl_entries += (nsegments - 1); 1366 error = tw_cl_start_io(&(sc->ctlr_handle), req_pkt, 1367 &(req->req_handle)); 1368 } 1369 1370out: 1371 if (error) { 1372 req->error_code = error; 1373 req_pkt->tw_osl_callback(&(req->req_handle)); 1374 /* 1375 * If the caller had been returned EINPROGRESS, and he has 1376 * registered a callback for handling completion, the callback 1377 * will never get called because we were unable to submit the 1378 * request. So, free up the request right here. 1379 */ 1380 if (req->flags & TW_OSLI_REQ_FLAGS_IN_PROGRESS) 1381 tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q); 1382 } 1383} 1384 1385 1386 1387/* 1388 * Function name: twa_map_load_callback 1389 * Description: Callback of bus_dmamap_load for the buffer associated 1390 * with a cmd pkt. 1391 * 1392 * Input: arg -- ptr to variable to hold phys addr 1393 * segs -- ptr to a list of segment descriptors 1394 * nsegments--# of segments 1395 * error -- 0 if no errors encountered before callback, 1396 * non-zero if errors were encountered 1397 * Output: None 1398 * Return value: None 1399 */ 1400static TW_VOID 1401twa_map_load_callback(TW_VOID *arg, bus_dma_segment_t *segs, 1402 TW_INT32 nsegments, TW_INT32 error) 1403{ 1404 *((bus_addr_t *)arg) = segs[0].ds_addr; 1405} 1406 1407 1408 1409/* 1410 * Function name: tw_osli_map_request 1411 * Description: Maps a cmd pkt and data associated with it, into 1412 * DMA'able memory. 1413 * 1414 * Input: req -- ptr to request pkt 1415 * Output: None 1416 * Return value: 0 -- success 1417 * non-zero-- failure 1418 */ 1419TW_INT32 1420tw_osli_map_request(struct tw_osli_req_context *req) 1421{ 1422 struct twa_softc *sc = req->ctlr; 1423 TW_INT32 error = 0; 1424 1425 tw_osli_dbg_dprintf(10, sc, "entered"); 1426 1427 /* If the command involves data, map that too. */ 1428 if (req->data != NULL) { 1429 /* 1430 * It's sufficient for the data pointer to be 4-byte aligned 1431 * to work with 9000. However, if 4-byte aligned addresses 1432 * are passed to bus_dmamap_load, we can get back sg elements 1433 * that are not 512-byte multiples in size. So, we will let 1434 * only those buffers that are 512-byte aligned to pass 1435 * through, and bounce the rest, so as to make sure that we 1436 * always get back sg elements that are 512-byte multiples 1437 * in size. 1438 */ 1439 if (((vm_offset_t)req->data % sc->sg_size_factor) || 1440 (req->length % sc->sg_size_factor)) { 1441 req->flags |= TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED; 1442 /* Save original data pointer and length. */ 1443 req->real_data = req->data; 1444 req->real_length = req->length; 1445 req->length = (req->length + 1446 (sc->sg_size_factor - 1)) & 1447 ~(sc->sg_size_factor - 1); 1448 req->data = malloc(req->length, TW_OSLI_MALLOC_CLASS, 1449 M_NOWAIT); 1450 if (req->data == NULL) { 1451 tw_osli_printf(sc, "error = %d", 1452 TW_CL_SEVERITY_ERROR_STRING, 1453 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 1454 0x201E, 1455 "Failed to allocate memory " 1456 "for bounce buffer", 1457 ENOMEM); 1458 /* Restore original data pointer and length. */ 1459 req->data = req->real_data; 1460 req->length = req->real_length; 1461 return(ENOMEM); 1462 } 1463 } 1464 1465 /* 1466 * Map the data buffer into bus space and build the SG list. 1467 */ 1468 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) { 1469 /* Lock against multiple simultaneous ioctl calls. */ 1470 mtx_lock_spin(sc->io_lock); 1471 error = bus_dmamap_load(sc->ioctl_tag, sc->ioctl_map, 1472 req->data, req->length, 1473 twa_map_load_data_callback, req, 1474 BUS_DMA_WAITOK); 1475 mtx_unlock_spin(sc->io_lock);
|