45 46static d_ioctl_t ispioctl; 47static void isp_intr_enable(void *); 48static void isp_cam_async(void *, uint32_t, struct cam_path *, void *); 49static void isp_poll(struct cam_sim *); 50static timeout_t isp_watchdog; 51static void isp_kthread(void *); 52static void isp_action(struct cam_sim *, union ccb *); 53 54 55#if __FreeBSD_version < 500000 56#define ISP_CDEV_MAJOR 248 57static struct cdevsw isp_cdevsw = { 58 /* open */ nullopen, 59 /* close */ nullclose, 60 /* read */ noread, 61 /* write */ nowrite, 62 /* ioctl */ ispioctl, 63 /* poll */ nopoll, 64 /* mmap */ nommap, 65 /* strategy */ nostrategy, 66 /* name */ "isp", 67 /* maj */ ISP_CDEV_MAJOR, 68 /* dump */ nodump, 69 /* psize */ nopsize, 70 /* flags */ D_TAPE, 71}; 72#else 73static struct cdevsw isp_cdevsw = { 74 .d_version = D_VERSION, 75 .d_flags = D_NEEDGIANT, 76 .d_ioctl = ispioctl, 77 .d_name = "isp", 78}; 79#endif 80 81static ispsoftc_t *isplist = NULL; 82 83void 84isp_attach(ispsoftc_t *isp) 85{ 86 int primary, secondary; 87 struct ccb_setasync csa; 88 struct cam_devq *devq; 89 struct cam_sim *sim; 90 struct cam_path *path; 91 92 /* 93 * Establish (in case of 12X0) which bus is the primary. 94 */ 95 96 primary = 0; 97 secondary = 1; 98 99 /* 100 * Create the device queue for our SIM(s). 101 */ 102 devq = cam_simq_alloc(isp->isp_maxcmds); 103 if (devq == NULL) { 104 return; 105 } 106 107 /* 108 * Construct our SIM entry. 109 */ 110 ISPLOCK_2_CAMLOCK(isp); 111 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp, 112 device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq); 113 if (sim == NULL) { 114 cam_simq_free(devq); 115 CAMLOCK_2_ISPLOCK(isp); 116 return; 117 } 118 CAMLOCK_2_ISPLOCK(isp); 119 120 isp->isp_osinfo.ehook.ich_func = isp_intr_enable; 121 isp->isp_osinfo.ehook.ich_arg = isp; 122 ISPLOCK_2_CAMLOCK(isp); 123 if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) { 124 cam_sim_free(sim, TRUE); 125 CAMLOCK_2_ISPLOCK(isp); 126 isp_prt(isp, ISP_LOGERR, 127 "could not establish interrupt enable hook"); 128 return; 129 } 130 131 if (xpt_bus_register(sim, primary) != CAM_SUCCESS) { 132 cam_sim_free(sim, TRUE); 133 CAMLOCK_2_ISPLOCK(isp); 134 return; 135 } 136 137 if (xpt_create_path(&path, NULL, cam_sim_path(sim), 138 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 139 xpt_bus_deregister(cam_sim_path(sim)); 140 cam_sim_free(sim, TRUE); 141 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 142 CAMLOCK_2_ISPLOCK(isp); 143 return; 144 } 145 146 xpt_setup_ccb(&csa.ccb_h, path, 5); 147 csa.ccb_h.func_code = XPT_SASYNC_CB; 148 csa.event_enable = AC_LOST_DEVICE; 149 csa.callback = isp_cam_async; 150 csa.callback_arg = sim; 151 xpt_action((union ccb *)&csa); 152 CAMLOCK_2_ISPLOCK(isp); 153 isp->isp_sim = sim; 154 isp->isp_path = path; 155 /* 156 * Create a kernel thread for fibre channel instances. We 157 * don't have dual channel FC cards. 158 */ 159 if (IS_FC(isp)) { 160 ISPLOCK_2_CAMLOCK(isp); 161#if __FreeBSD_version >= 500000 162 /* XXX: LOCK VIOLATION */ 163 cv_init(&isp->isp_osinfo.kthread_cv, "isp_kthread_cv"); 164 if (kthread_create(isp_kthread, isp, &isp->isp_osinfo.kproc, 165 RFHIGHPID, 0, "%s: fc_thrd", 166 device_get_nameunit(isp->isp_dev))) 167#else 168 if (kthread_create(isp_kthread, isp, &isp->isp_osinfo.kproc, 169 "%s: fc_thrd", device_get_nameunit(isp->isp_dev))) 170#endif 171 { 172 xpt_bus_deregister(cam_sim_path(sim)); 173 cam_sim_free(sim, TRUE); 174 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 175 CAMLOCK_2_ISPLOCK(isp); 176 isp_prt(isp, ISP_LOGERR, "could not create kthread"); 177 return; 178 } 179 CAMLOCK_2_ISPLOCK(isp); 180 } 181 182 183 /* 184 * If we have a second channel, construct SIM entry for that. 185 */ 186 if (IS_DUALBUS(isp)) { 187 ISPLOCK_2_CAMLOCK(isp); 188 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp, 189 device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq); 190 if (sim == NULL) { 191 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 192 xpt_free_path(isp->isp_path); 193 cam_simq_free(devq); 194 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 195 return; 196 } 197 if (xpt_bus_register(sim, secondary) != CAM_SUCCESS) { 198 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 199 xpt_free_path(isp->isp_path); 200 cam_sim_free(sim, TRUE); 201 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 202 CAMLOCK_2_ISPLOCK(isp); 203 return; 204 } 205 206 if (xpt_create_path(&path, NULL, cam_sim_path(sim), 207 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 208 xpt_bus_deregister(cam_sim_path(isp->isp_sim)); 209 xpt_free_path(isp->isp_path); 210 xpt_bus_deregister(cam_sim_path(sim)); 211 cam_sim_free(sim, TRUE); 212 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 213 CAMLOCK_2_ISPLOCK(isp); 214 return; 215 } 216 217 xpt_setup_ccb(&csa.ccb_h, path, 5); 218 csa.ccb_h.func_code = XPT_SASYNC_CB; 219 csa.event_enable = AC_LOST_DEVICE; 220 csa.callback = isp_cam_async; 221 csa.callback_arg = sim; 222 xpt_action((union ccb *)&csa); 223 CAMLOCK_2_ISPLOCK(isp); 224 isp->isp_sim2 = sim; 225 isp->isp_path2 = path; 226 } 227 228 /* 229 * Create device nodes 230 */ 231 (void) make_dev(&isp_cdevsw, device_get_unit(isp->isp_dev), UID_ROOT, 232 GID_OPERATOR, 0600, "%s", device_get_nameunit(isp->isp_dev)); 233 234 if (isp->isp_role != ISP_ROLE_NONE) { 235 isp->isp_state = ISP_RUNSTATE; 236 ENABLE_INTS(isp); 237 } 238 if (isplist == NULL) { 239 isplist = isp; 240 } else { 241 ispsoftc_t *tmp = isplist; 242 while (tmp->isp_osinfo.next) { 243 tmp = tmp->isp_osinfo.next; 244 } 245 tmp->isp_osinfo.next = isp; 246 } 247 248} 249 250static __inline void 251isp_freeze_loopdown(ispsoftc_t *isp, char *msg) 252{ 253 if (isp->isp_osinfo.simqfrozen == 0) { 254 isp_prt(isp, ISP_LOGDEBUG0, "%s: freeze simq (loopdown)", msg); 255 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; 256 ISPLOCK_2_CAMLOCK(isp); 257 xpt_freeze_simq(isp->isp_sim, 1); 258 CAMLOCK_2_ISPLOCK(isp); 259 } else { 260 isp_prt(isp, ISP_LOGDEBUG0, "%s: mark frozen (loopdown)", msg); 261 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN; 262 } 263} 264 265 266#if __FreeBSD_version < 500000 267#define _DEV dev_t 268#define _IOP struct proc 269#else 270#define _IOP struct thread 271#define _DEV struct cdev * 272#endif 273 274static int 275ispioctl(_DEV dev, u_long c, caddr_t addr, int flags, _IOP *td) 276{ 277 ispsoftc_t *isp; 278 int nr, retval = ENOTTY; 279 280 isp = isplist; 281 while (isp) { 282 if (minor(dev) == device_get_unit(isp->isp_dev)) { 283 break; 284 } 285 isp = isp->isp_osinfo.next; 286 } 287 if (isp == NULL) 288 return (ENXIO); 289 290 switch (c) { 291#ifdef ISP_FW_CRASH_DUMP 292 case ISP_GET_FW_CRASH_DUMP: 293 { 294 uint16_t *ptr = FCPARAM(isp)->isp_dump_data; 295 size_t sz; 296 297 retval = 0; 298 if (IS_2200(isp)) 299 sz = QLA2200_RISC_IMAGE_DUMP_SIZE; 300 else 301 sz = QLA2300_RISC_IMAGE_DUMP_SIZE; 302 ISP_LOCK(isp); 303 if (ptr && *ptr) { 304 void *uaddr = *((void **) addr); 305 if (copyout(ptr, uaddr, sz)) { 306 retval = EFAULT; 307 } else { 308 *ptr = 0; 309 } 310 } else { 311 retval = ENXIO; 312 } 313 ISP_UNLOCK(isp); 314 break; 315 } 316 317 case ISP_FORCE_CRASH_DUMP: 318 ISP_LOCK(isp); 319 isp_freeze_loopdown(isp, "ispioctl(ISP_FORCE_CRASH_DUMP)"); 320 isp_fw_dump(isp); 321 isp_reinit(isp); 322 ISP_UNLOCK(isp); 323 retval = 0; 324 break; 325#endif 326 case ISP_SDBLEV: 327 { 328 int olddblev = isp->isp_dblev; 329 isp->isp_dblev = *(int *)addr; 330 *(int *)addr = olddblev; 331 retval = 0; 332 break; 333 } 334 case ISP_GETROLE: 335 *(int *)addr = isp->isp_role; 336 retval = 0; 337 break; 338 case ISP_SETROLE: 339 nr = *(int *)addr; 340 if (nr & ~(ISP_ROLE_INITIATOR|ISP_ROLE_TARGET)) { 341 retval = EINVAL; 342 break; 343 } 344 *(int *)addr = isp->isp_role; 345 isp->isp_role = nr; 346 /* FALLTHROUGH */ 347 case ISP_RESETHBA: 348 ISP_LOCK(isp); 349 isp_reinit(isp); 350 ISP_UNLOCK(isp); 351 retval = 0; 352 break; 353 case ISP_RESCAN: 354 if (IS_FC(isp)) { 355 ISP_LOCK(isp); 356 if (isp_fc_runstate(isp, 5 * 1000000)) { 357 retval = EIO; 358 } else { 359 retval = 0; 360 } 361 ISP_UNLOCK(isp); 362 } 363 break; 364 case ISP_FC_LIP: 365 if (IS_FC(isp)) { 366 ISP_LOCK(isp); 367 if (isp_control(isp, ISPCTL_SEND_LIP, 0)) { 368 retval = EIO; 369 } else { 370 retval = 0; 371 } 372 ISP_UNLOCK(isp); 373 } 374 break; 375 case ISP_FC_GETDINFO: 376 { 377 struct isp_fc_device *ifc = (struct isp_fc_device *) addr; 378 struct lportdb *lp; 379 380 if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) { 381 retval = EINVAL; 382 break; 383 } 384 ISP_LOCK(isp); 385 lp = &FCPARAM(isp)->portdb[ifc->loopid]; 386 if (lp->valid) { 387 ifc->role = lp->roles; 388 ifc->loopid = lp->loopid; 389 ifc->portid = lp->portid; 390 ifc->node_wwn = lp->node_wwn; 391 ifc->port_wwn = lp->port_wwn; 392 retval = 0; 393 } else { 394 retval = ENODEV; 395 } 396 ISP_UNLOCK(isp); 397 break; 398 } 399 case ISP_GET_STATS: 400 { 401 isp_stats_t *sp = (isp_stats_t *) addr; 402 403 MEMZERO(sp, sizeof (*sp)); 404 sp->isp_stat_version = ISP_STATS_VERSION; 405 sp->isp_type = isp->isp_type; 406 sp->isp_revision = isp->isp_revision; 407 ISP_LOCK(isp); 408 sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt; 409 sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus; 410 sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc; 411 sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync; 412 sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt; 413 sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt; 414 sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater; 415 sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater; 416 ISP_UNLOCK(isp); 417 retval = 0; 418 break; 419 } 420 case ISP_CLR_STATS: 421 ISP_LOCK(isp); 422 isp->isp_intcnt = 0; 423 isp->isp_intbogus = 0; 424 isp->isp_intmboxc = 0; 425 isp->isp_intoasync = 0; 426 isp->isp_rsltccmplt = 0; 427 isp->isp_fphccmplt = 0; 428 isp->isp_rscchiwater = 0; 429 isp->isp_fpcchiwater = 0; 430 ISP_UNLOCK(isp); 431 retval = 0; 432 break; 433 case ISP_FC_GETHINFO: 434 { 435 struct isp_hba_device *hba = (struct isp_hba_device *) addr; 436 MEMZERO(hba, sizeof (*hba)); 437 ISP_LOCK(isp); 438 hba->fc_fw_major = ISP_FW_MAJORX(isp->isp_fwrev); 439 hba->fc_fw_minor = ISP_FW_MINORX(isp->isp_fwrev); 440 hba->fc_fw_micro = ISP_FW_MICROX(isp->isp_fwrev); 441 hba->fc_speed = FCPARAM(isp)->isp_gbspeed; 442 hba->fc_scsi_supported = 1; 443 hba->fc_topology = FCPARAM(isp)->isp_topo + 1; 444 hba->fc_loopid = FCPARAM(isp)->isp_loopid; 445 hba->nvram_node_wwn = FCPARAM(isp)->isp_nodewwn; 446 hba->nvram_port_wwn = FCPARAM(isp)->isp_portwwn; 447 hba->active_node_wwn = ISP_NODEWWN(isp); 448 hba->active_port_wwn = ISP_PORTWWN(isp); 449 ISP_UNLOCK(isp); 450 retval = 0; 451 break; 452 } 453 case ISP_GET_FC_PARAM: 454 { 455 struct isp_fc_param *f = (struct isp_fc_param *) addr; 456 457 if (!IS_FC(isp)) { 458 retval = EINVAL; 459 break; 460 } 461 f->parameter = 0; 462 if (strcmp(f->param_name, "framelength") == 0) { 463 f->parameter = FCPARAM(isp)->isp_maxfrmlen; 464 retval = 0; 465 break; 466 } 467 if (strcmp(f->param_name, "exec_throttle") == 0) { 468 f->parameter = FCPARAM(isp)->isp_execthrottle; 469 retval = 0; 470 break; 471 } 472 if (strcmp(f->param_name, "fullduplex") == 0) { 473 if (FCPARAM(isp)->isp_fwoptions & ICBOPT_FULL_DUPLEX) 474 f->parameter = 1; 475 retval = 0; 476 break; 477 } 478 if (strcmp(f->param_name, "loopid") == 0) { 479 f->parameter = FCPARAM(isp)->isp_loopid; 480 retval = 0; 481 break; 482 } 483 retval = EINVAL; 484 break; 485 } 486 case ISP_SET_FC_PARAM: 487 { 488 struct isp_fc_param *f = (struct isp_fc_param *) addr; 489 uint32_t param = f->parameter; 490 491 if (!IS_FC(isp)) { 492 retval = EINVAL; 493 break; 494 } 495 f->parameter = 0; 496 if (strcmp(f->param_name, "framelength") == 0) { 497 if (param != 512 && param != 1024 && param != 1024) { 498 retval = EINVAL; 499 break; 500 } 501 FCPARAM(isp)->isp_maxfrmlen = param; 502 retval = 0; 503 break; 504 } 505 if (strcmp(f->param_name, "exec_throttle") == 0) { 506 if (param < 16 || param > 255) { 507 retval = EINVAL; 508 break; 509 } 510 FCPARAM(isp)->isp_execthrottle = param; 511 retval = 0; 512 break; 513 } 514 if (strcmp(f->param_name, "fullduplex") == 0) { 515 if (param != 0 && param != 1) { 516 retval = EINVAL; 517 break; 518 } 519 if (param) { 520 FCPARAM(isp)->isp_fwoptions |= 521 ICBOPT_FULL_DUPLEX; 522 } else { 523 FCPARAM(isp)->isp_fwoptions &= 524 ~ICBOPT_FULL_DUPLEX; 525 } 526 retval = 0; 527 break; 528 } 529 if (strcmp(f->param_name, "loopid") == 0) { 530 if (param < 0 || param > 125) { 531 retval = EINVAL; 532 break; 533 } 534 FCPARAM(isp)->isp_loopid = param; 535 retval = 0; 536 break; 537 } 538 retval = EINVAL; 539 break; 540 } 541 case ISP_TSK_MGMT: 542 { 543 int needmarker; 544 struct isp_fc_tsk_mgmt *fct = (struct isp_fc_tsk_mgmt *) addr; 545 uint16_t loopid; 546 mbreg_t mbs; 547 548 if (IS_SCSI(isp)) { 549 retval = EINVAL; 550 break; 551 } 552 553 memset(&mbs, 0, sizeof (mbs)); 554 needmarker = retval = 0; 555 loopid = fct->loopid; 556 if (IS_2KLOGIN(isp) == 0) { 557 loopid <<= 8; 558 } 559 switch (fct->action) { 560 case CLEAR_ACA: 561 mbs.param[0] = MBOX_CLEAR_ACA; 562 mbs.param[1] = loopid; 563 mbs.param[2] = fct->lun; 564 break; 565 case TARGET_RESET: 566 mbs.param[0] = MBOX_TARGET_RESET; 567 mbs.param[1] = loopid; 568 needmarker = 1; 569 break; 570 case LUN_RESET: 571 mbs.param[0] = MBOX_LUN_RESET; 572 mbs.param[1] = loopid; 573 mbs.param[2] = fct->lun; 574 needmarker = 1; 575 break; 576 case CLEAR_TASK_SET: 577 mbs.param[0] = MBOX_CLEAR_TASK_SET; 578 mbs.param[1] = loopid; 579 mbs.param[2] = fct->lun; 580 needmarker = 1; 581 break; 582 case ABORT_TASK_SET: 583 mbs.param[0] = MBOX_ABORT_TASK_SET; 584 mbs.param[1] = loopid; 585 mbs.param[2] = fct->lun; 586 needmarker = 1; 587 break; 588 default: 589 retval = EINVAL; 590 break; 591 } 592 if (retval == 0) { 593 ISP_LOCK(isp); 594 if (needmarker) { 595 isp->isp_sendmarker |= 1; 596 } 597 retval = isp_control(isp, ISPCTL_RUN_MBOXCMD, &mbs); 598 ISP_UNLOCK(isp); 599 if (retval) 600 retval = EIO; 601 } 602 break; 603 } 604 default: 605 break; 606 } 607 return (retval); 608} 609 610static void 611isp_intr_enable(void *arg) 612{ 613 ispsoftc_t *isp = arg; 614 if (isp->isp_role != ISP_ROLE_NONE) { 615 ENABLE_INTS(isp); 616#if 0 617 isp->isp_osinfo.intsok = 1; 618#endif 619 } 620 /* Release our hook so that the boot can continue. */ 621 config_intrhook_disestablish(&isp->isp_osinfo.ehook); 622} 623 624/* 625 * Put the target mode functions here, because some are inlines 626 */ 627 628#ifdef ISP_TARGET_MODE 629 630static __inline int is_lun_enabled(ispsoftc_t *, int, lun_id_t); 631static __inline int are_any_luns_enabled(ispsoftc_t *, int); 632static __inline tstate_t *get_lun_statep(ispsoftc_t *, int, lun_id_t); 633static __inline void rls_lun_statep(ispsoftc_t *, tstate_t *); 634static __inline atio_private_data_t *isp_get_atpd(ispsoftc_t *, int); 635static cam_status 636create_lun_state(ispsoftc_t *, int, struct cam_path *, tstate_t **); 637static void destroy_lun_state(ispsoftc_t *, tstate_t *); 638static int isp_en_lun(ispsoftc_t *, union ccb *); 639static void isp_ledone(ispsoftc_t *, lun_entry_t *); 640static cam_status isp_abort_tgt_ccb(ispsoftc_t *, union ccb *); 641static timeout_t isp_refire_putback_atio; 642static void isp_complete_ctio(union ccb *); 643static void isp_target_putback_atio(union ccb *); 644static void isp_target_start_ctio(ispsoftc_t *, union ccb *); 645static int isp_handle_platform_atio(ispsoftc_t *, at_entry_t *); 646static int isp_handle_platform_atio2(ispsoftc_t *, at2_entry_t *); 647static int isp_handle_platform_ctio(ispsoftc_t *, void *); 648static int isp_handle_platform_notify_scsi(ispsoftc_t *, in_entry_t *); 649static int isp_handle_platform_notify_fc(ispsoftc_t *, in_fcentry_t *); 650 651static __inline int 652is_lun_enabled(ispsoftc_t *isp, int bus, lun_id_t lun) 653{ 654 tstate_t *tptr; 655 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)]; 656 if (tptr == NULL) { 657 return (0); 658 } 659 do { 660 if (tptr->lun == (lun_id_t) lun && tptr->bus == bus) { 661 return (1); 662 } 663 } while ((tptr = tptr->next) != NULL); 664 return (0); 665} 666 667static __inline int 668are_any_luns_enabled(ispsoftc_t *isp, int port) 669{ 670 int lo, hi; 671 if (IS_DUALBUS(isp)) { 672 lo = (port * (LUN_HASH_SIZE >> 1)); 673 hi = lo + (LUN_HASH_SIZE >> 1); 674 } else { 675 lo = 0; 676 hi = LUN_HASH_SIZE; 677 } 678 for (lo = 0; lo < hi; lo++) { 679 if (isp->isp_osinfo.lun_hash[lo]) { 680 return (1); 681 } 682 } 683 return (0); 684} 685 686static __inline tstate_t * 687get_lun_statep(ispsoftc_t *isp, int bus, lun_id_t lun) 688{ 689 tstate_t *tptr = NULL; 690 691 if (lun == CAM_LUN_WILDCARD) { 692 if (isp->isp_osinfo.tmflags[bus] & TM_WILDCARD_ENABLED) { 693 tptr = &isp->isp_osinfo.tsdflt[bus]; 694 tptr->hold++; 695 return (tptr); 696 } 697 return (NULL); 698 } else { 699 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)]; 700 if (tptr == NULL) { 701 return (NULL); 702 } 703 } 704 705 do { 706 if (tptr->lun == lun && tptr->bus == bus) { 707 tptr->hold++; 708 return (tptr); 709 } 710 } while ((tptr = tptr->next) != NULL); 711 return (tptr); 712} 713 714static __inline void 715rls_lun_statep(ispsoftc_t *isp, tstate_t *tptr) 716{ 717 if (tptr->hold) 718 tptr->hold--; 719} 720 721static __inline atio_private_data_t * 722isp_get_atpd(ispsoftc_t *isp, int tag) 723{ 724 atio_private_data_t *atp; 725 for (atp = isp->isp_osinfo.atpdp; 726 atp < &isp->isp_osinfo.atpdp[ATPDPSIZE]; atp++) { 727 if (atp->tag == tag) 728 return (atp); 729 } 730 return (NULL); 731} 732 733static cam_status 734create_lun_state(ispsoftc_t *isp, int bus, 735 struct cam_path *path, tstate_t **rslt) 736{ 737 cam_status status; 738 lun_id_t lun; 739 int hfx; 740 tstate_t *tptr, *new; 741 742 lun = xpt_path_lun_id(path); 743 if (lun < 0) { 744 return (CAM_LUN_INVALID); 745 } 746 if (is_lun_enabled(isp, bus, lun)) { 747 return (CAM_LUN_ALRDY_ENA); 748 } 749 new = (tstate_t *) malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO); 750 if (new == NULL) { 751 return (CAM_RESRC_UNAVAIL); 752 } 753 754 status = xpt_create_path(&new->owner, NULL, xpt_path_path_id(path), 755 xpt_path_target_id(path), xpt_path_lun_id(path)); 756 if (status != CAM_REQ_CMP) { 757 free(new, M_DEVBUF); 758 return (status); 759 } 760 new->bus = bus; 761 new->lun = lun; 762 SLIST_INIT(&new->atios); 763 SLIST_INIT(&new->inots); 764 new->hold = 1; 765 766 hfx = LUN_HASH_FUNC(isp, new->bus, new->lun); 767 tptr = isp->isp_osinfo.lun_hash[hfx]; 768 if (tptr == NULL) { 769 isp->isp_osinfo.lun_hash[hfx] = new; 770 } else { 771 while (tptr->next) 772 tptr = tptr->next; 773 tptr->next = new; 774 } 775 *rslt = new; 776 return (CAM_REQ_CMP); 777} 778 779static __inline void 780destroy_lun_state(ispsoftc_t *isp, tstate_t *tptr) 781{ 782 int hfx; 783 tstate_t *lw, *pw; 784 785 if (tptr->hold) { 786 return; 787 } 788 hfx = LUN_HASH_FUNC(isp, tptr->bus, tptr->lun); 789 pw = isp->isp_osinfo.lun_hash[hfx]; 790 if (pw == NULL) { 791 return; 792 } else if (pw->lun == tptr->lun && pw->bus == tptr->bus) { 793 isp->isp_osinfo.lun_hash[hfx] = pw->next; 794 } else { 795 lw = pw; 796 pw = lw->next; 797 while (pw) { 798 if (pw->lun == tptr->lun && pw->bus == tptr->bus) { 799 lw->next = pw->next; 800 break; 801 } 802 lw = pw; 803 pw = pw->next; 804 } 805 if (pw == NULL) { 806 return; 807 } 808 } 809 free(tptr, M_DEVBUF); 810} 811 812/* 813 * Enable luns. 814 */ 815static int 816isp_en_lun(ispsoftc_t *isp, union ccb *ccb) 817{ 818 struct ccb_en_lun *cel = &ccb->cel; 819 tstate_t *tptr; 820 uint32_t seq; 821 int bus, cmd, av, wildcard, tm_on; 822 lun_id_t lun; 823 target_id_t tgt; 824 825 bus = XS_CHANNEL(ccb); 826 if (bus > 1) { 827 xpt_print_path(ccb->ccb_h.path); 828 printf("illegal bus %d\n", bus); 829 ccb->ccb_h.status = CAM_PATH_INVALID; 830 return (-1); 831 } 832 tgt = ccb->ccb_h.target_id; 833 lun = ccb->ccb_h.target_lun; 834 835 isp_prt(isp, ISP_LOGTDEBUG0, 836 "isp_en_lun: %sabling lun 0x%x on channel %d", 837 cel->enable? "en" : "dis", lun, bus); 838 839 840 if ((lun != CAM_LUN_WILDCARD) && 841 (lun < 0 || lun >= (lun_id_t) isp->isp_maxluns)) { 842 ccb->ccb_h.status = CAM_LUN_INVALID; 843 return (-1); 844 } 845 846 if (IS_SCSI(isp)) { 847 sdparam *sdp = isp->isp_param; 848 sdp += bus; 849 if (tgt != CAM_TARGET_WILDCARD && 850 tgt != sdp->isp_initiator_id) { 851 ccb->ccb_h.status = CAM_TID_INVALID; 852 return (-1); 853 } 854 } else { 855 /* 856 * There's really no point in doing this yet w/o multi-tid 857 * capability. Even then, it's problematic. 858 */ 859#if 0 860 if (tgt != CAM_TARGET_WILDCARD && 861 tgt != FCPARAM(isp)->isp_iid) { 862 ccb->ccb_h.status = CAM_TID_INVALID; 863 return (-1); 864 } 865#endif 866 /* 867 * This is as a good a place as any to check f/w capabilities. 868 */ 869 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_TMODE) == 0) { 870 isp_prt(isp, ISP_LOGERR, 871 "firmware does not support target mode"); 872 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 873 return (-1); 874 } 875 /* 876 * XXX: We *could* handle non-SCCLUN f/w, but we'd have to 877 * XXX: dorks with our already fragile enable/disable code. 878 */ 879 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) { 880 isp_prt(isp, ISP_LOGERR, 881 "firmware not SCCLUN capable"); 882 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 883 return (-1); 884 } 885 } 886 887 if (tgt == CAM_TARGET_WILDCARD) { 888 if (lun == CAM_LUN_WILDCARD) { 889 wildcard = 1; 890 } else { 891 ccb->ccb_h.status = CAM_LUN_INVALID; 892 return (-1); 893 } 894 } else { 895 wildcard = 0; 896 } 897 898 tm_on = (isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED) != 0; 899 900 /* 901 * Next check to see whether this is a target/lun wildcard action. 902 * 903 * If so, we know that we can accept commands for luns that haven't 904 * been enabled yet and send them upstream. Otherwise, we have to 905 * handle them locally (if we see them at all). 906 */ 907 908 if (wildcard) { 909 tptr = &isp->isp_osinfo.tsdflt[bus]; 910 if (cel->enable) { 911 if (tm_on) { 912 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; 913 return (-1); 914 } 915 ccb->ccb_h.status = 916 xpt_create_path(&tptr->owner, NULL, 917 xpt_path_path_id(ccb->ccb_h.path), 918 xpt_path_target_id(ccb->ccb_h.path), 919 xpt_path_lun_id(ccb->ccb_h.path)); 920 if (ccb->ccb_h.status != CAM_REQ_CMP) { 921 return (-1); 922 } 923 SLIST_INIT(&tptr->atios); 924 SLIST_INIT(&tptr->inots); 925 isp->isp_osinfo.tmflags[bus] |= TM_WILDCARD_ENABLED; 926 } else { 927 if (tm_on == 0) { 928 ccb->ccb_h.status = CAM_REQ_CMP; 929 return (-1); 930 } 931 if (tptr->hold) { 932 ccb->ccb_h.status = CAM_SCSI_BUSY; 933 return (-1); 934 } 935 xpt_free_path(tptr->owner); 936 isp->isp_osinfo.tmflags[bus] &= ~TM_WILDCARD_ENABLED; 937 } 938 } 939 940 /* 941 * Now check to see whether this bus needs to be 942 * enabled/disabled with respect to target mode. 943 */ 944 av = bus << 31; 945 if (cel->enable && tm_on == 0) { 946 av |= ENABLE_TARGET_FLAG; 947 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 948 if (av) { 949 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 950 if (wildcard) { 951 isp->isp_osinfo.tmflags[bus] &= 952 ~TM_WILDCARD_ENABLED; 953 xpt_free_path(tptr->owner); 954 } 955 return (-1); 956 } 957 isp->isp_osinfo.tmflags[bus] |= TM_TMODE_ENABLED; 958 isp_prt(isp, ISP_LOGINFO, 959 "Target Mode enabled on channel %d", bus); 960 } else if (cel->enable == 0 && tm_on && wildcard) { 961 if (are_any_luns_enabled(isp, bus)) { 962 ccb->ccb_h.status = CAM_SCSI_BUSY; 963 return (-1); 964 } 965 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 966 if (av) { 967 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 968 return (-1); 969 } 970 isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED; 971 isp_prt(isp, ISP_LOGINFO, 972 "Target Mode disabled on channel %d", bus); 973 } 974 975 if (wildcard) { 976 ccb->ccb_h.status = CAM_REQ_CMP; 977 return (-1); 978 } 979 980 /* 981 * Find an empty slot 982 */ 983 for (seq = 0; seq < NLEACT; seq++) { 984 if (isp->isp_osinfo.leact[seq] == 0) { 985 break; 986 } 987 } 988 if (seq >= NLEACT) { 989 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 990 return (-1); 991 992 } 993 isp->isp_osinfo.leact[seq] = ccb; 994 995 if (cel->enable) { 996 ccb->ccb_h.status = 997 create_lun_state(isp, bus, ccb->ccb_h.path, &tptr); 998 if (ccb->ccb_h.status != CAM_REQ_CMP) { 999 isp->isp_osinfo.leact[seq] = 0; 1000 return (-1); 1001 } 1002 } else { 1003 tptr = get_lun_statep(isp, bus, lun); 1004 if (tptr == NULL) { 1005 ccb->ccb_h.status = CAM_LUN_INVALID; 1006 return (-1); 1007 } 1008 } 1009 1010 if (cel->enable) { 1011 int c, n, ulun = lun; 1012 1013 cmd = RQSTYPE_ENABLE_LUN; 1014 c = DFLT_CMND_CNT; 1015 n = DFLT_INOT_CNT; 1016 if (IS_FC(isp) && lun != 0) { 1017 cmd = RQSTYPE_MODIFY_LUN; 1018 n = 0; 1019 /* 1020 * For SCC firmware, we only deal with setting 1021 * (enabling or modifying) lun 0. 1022 */ 1023 ulun = 0; 1024 } 1025 if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq+1) == 0) { 1026 rls_lun_statep(isp, tptr); 1027 ccb->ccb_h.status = CAM_REQ_INPROG; 1028 return (seq); 1029 } 1030 } else { 1031 int c, n, ulun = lun; 1032 1033 cmd = -RQSTYPE_MODIFY_LUN; 1034 c = DFLT_CMND_CNT; 1035 n = DFLT_INOT_CNT; 1036 if (IS_FC(isp) && lun != 0) { 1037 n = 0; 1038 /* 1039 * For SCC firmware, we only deal with setting 1040 * (enabling or modifying) lun 0. 1041 */ 1042 ulun = 0; 1043 } 1044 if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq+1) == 0) { 1045 rls_lun_statep(isp, tptr); 1046 ccb->ccb_h.status = CAM_REQ_INPROG; 1047 return (seq); 1048 } 1049 } 1050 rls_lun_statep(isp, tptr); 1051 xpt_print_path(ccb->ccb_h.path); 1052 printf("isp_lun_cmd failed\n"); 1053 isp->isp_osinfo.leact[seq] = 0; 1054 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1055 return (-1); 1056} 1057 1058static void 1059isp_ledone(ispsoftc_t *isp, lun_entry_t *lep) 1060{ 1061 const char lfmt[] = "lun %d now %sabled for target mode on channel %d"; 1062 union ccb *ccb; 1063 uint32_t seq; 1064 tstate_t *tptr; 1065 int av; 1066 struct ccb_en_lun *cel; 1067 1068 seq = lep->le_reserved - 1; 1069 if (seq >= NLEACT) { 1070 isp_prt(isp, ISP_LOGERR, 1071 "seq out of range (%u) in isp_ledone", seq); 1072 return; 1073 } 1074 ccb = isp->isp_osinfo.leact[seq]; 1075 if (ccb == 0) { 1076 isp_prt(isp, ISP_LOGERR, 1077 "no ccb for seq %u in isp_ledone", seq); 1078 return; 1079 } 1080 cel = &ccb->cel; 1081 tptr = get_lun_statep(isp, XS_CHANNEL(ccb), XS_LUN(ccb)); 1082 if (tptr == NULL) { 1083 xpt_print_path(ccb->ccb_h.path); 1084 printf("null tptr in isp_ledone\n"); 1085 isp->isp_osinfo.leact[seq] = 0; 1086 return; 1087 } 1088 1089 if (lep->le_status != LUN_OK) { 1090 xpt_print_path(ccb->ccb_h.path); 1091 printf("ENABLE/MODIFY LUN returned 0x%x\n", lep->le_status); 1092err: 1093 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1094 xpt_print_path(ccb->ccb_h.path); 1095 rls_lun_statep(isp, tptr); 1096 isp->isp_osinfo.leact[seq] = 0; 1097 ISPLOCK_2_CAMLOCK(isp); 1098 xpt_done(ccb); 1099 CAMLOCK_2_ISPLOCK(isp); 1100 return; 1101 } else { 1102 isp_prt(isp, ISP_LOGTDEBUG0, 1103 "isp_ledone: ENABLE/MODIFY done okay"); 1104 } 1105 1106 1107 if (cel->enable) { 1108 ccb->ccb_h.status = CAM_REQ_CMP; 1109 isp_prt(isp, ISP_LOGINFO, lfmt, 1110 XS_LUN(ccb), "en", XS_CHANNEL(ccb)); 1111 rls_lun_statep(isp, tptr); 1112 isp->isp_osinfo.leact[seq] = 0; 1113 ISPLOCK_2_CAMLOCK(isp); 1114 xpt_done(ccb); 1115 CAMLOCK_2_ISPLOCK(isp); 1116 return; 1117 } 1118 1119 if (lep->le_header.rqs_entry_type == RQSTYPE_MODIFY_LUN) { 1120 if (isp_lun_cmd(isp, -RQSTYPE_ENABLE_LUN, XS_CHANNEL(ccb), 1121 XS_TGT(ccb), XS_LUN(ccb), 0, 0, seq+1)) { 1122 xpt_print_path(ccb->ccb_h.path); 1123 printf("isp_ledone: isp_lun_cmd failed\n"); 1124 goto err; 1125 } 1126 rls_lun_statep(isp, tptr); 1127 return; 1128 } 1129 1130 isp_prt(isp, ISP_LOGINFO, lfmt, XS_LUN(ccb), "dis", XS_CHANNEL(ccb)); 1131 rls_lun_statep(isp, tptr); 1132 destroy_lun_state(isp, tptr); 1133 ccb->ccb_h.status = CAM_REQ_CMP; 1134 isp->isp_osinfo.leact[seq] = 0; 1135 ISPLOCK_2_CAMLOCK(isp); 1136 xpt_done(ccb); 1137 CAMLOCK_2_ISPLOCK(isp); 1138 if (are_any_luns_enabled(isp, XS_CHANNEL(ccb)) == 0) { 1139 int bus = XS_CHANNEL(ccb); 1140 av = bus << 31; 1141 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av); 1142 if (av) { 1143 isp_prt(isp, ISP_LOGWARN, 1144 "disable target mode on channel %d failed", bus); 1145 } else { 1146 isp_prt(isp, ISP_LOGINFO, 1147 "Target Mode disabled on channel %d", bus); 1148 } 1149 isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED; 1150 } 1151} 1152 1153 1154static cam_status 1155isp_abort_tgt_ccb(ispsoftc_t *isp, union ccb *ccb) 1156{ 1157 tstate_t *tptr; 1158 struct ccb_hdr_slist *lp; 1159 struct ccb_hdr *curelm; 1160 int found, *ctr; 1161 union ccb *accb = ccb->cab.abort_ccb; 1162 1163 isp_prt(isp, ISP_LOGTDEBUG0, "aborting ccb %p", accb); 1164 if (accb->ccb_h.target_id != CAM_TARGET_WILDCARD) { 1165 int badpath = 0; 1166 if (IS_FC(isp) && (accb->ccb_h.target_id != 1167 ((fcparam *) isp->isp_param)->isp_loopid)) { 1168 badpath = 1; 1169 } else if (IS_SCSI(isp) && (accb->ccb_h.target_id != 1170 ((sdparam *) isp->isp_param)->isp_initiator_id)) { 1171 badpath = 1; 1172 } 1173 if (badpath) { 1174 /* 1175 * Being restrictive about target ids is really about 1176 * making sure we're aborting for the right multi-tid 1177 * path. This doesn't really make much sense at present. 1178 */ 1179#if 0 1180 return (CAM_PATH_INVALID); 1181#endif 1182 } 1183 } 1184 tptr = get_lun_statep(isp, XS_CHANNEL(ccb), accb->ccb_h.target_lun); 1185 if (tptr == NULL) { 1186 isp_prt(isp, ISP_LOGTDEBUG0, 1187 "isp_abort_tgt_ccb: can't get statep"); 1188 return (CAM_PATH_INVALID); 1189 } 1190 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 1191 lp = &tptr->atios; 1192 ctr = &tptr->atio_count; 1193 } else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 1194 lp = &tptr->inots; 1195 ctr = &tptr->inot_count; 1196 } else { 1197 rls_lun_statep(isp, tptr); 1198 isp_prt(isp, ISP_LOGTDEBUG0, 1199 "isp_abort_tgt_ccb: bad func %d\n", accb->ccb_h.func_code); 1200 return (CAM_UA_ABORT); 1201 } 1202 curelm = SLIST_FIRST(lp); 1203 found = 0; 1204 if (curelm == &accb->ccb_h) { 1205 found = 1; 1206 SLIST_REMOVE_HEAD(lp, sim_links.sle); 1207 } else { 1208 while(curelm != NULL) { 1209 struct ccb_hdr *nextelm; 1210 1211 nextelm = SLIST_NEXT(curelm, sim_links.sle); 1212 if (nextelm == &accb->ccb_h) { 1213 found = 1; 1214 SLIST_NEXT(curelm, sim_links.sle) = 1215 SLIST_NEXT(nextelm, sim_links.sle); 1216 break; 1217 } 1218 curelm = nextelm; 1219 } 1220 } 1221 rls_lun_statep(isp, tptr); 1222 if (found) { 1223 (*ctr)--; 1224 accb->ccb_h.status = CAM_REQ_ABORTED; 1225 xpt_done(accb); 1226 return (CAM_REQ_CMP); 1227 } 1228 isp_prt(isp, ISP_LOGTDEBUG0, 1229 "isp_abort_tgt_ccb: CCB %p not found\n", ccb); 1230 return (CAM_PATH_INVALID); 1231} 1232 1233static void 1234isp_target_start_ctio(ispsoftc_t *isp, union ccb *ccb) 1235{ 1236 void *qe; 1237 struct ccb_scsiio *cso = &ccb->csio; 1238 uint16_t *hp, save_handle; 1239 uint16_t nxti, optr; 1240 uint8_t local[QENTRY_LEN]; 1241 1242 1243 if (isp_getrqentry(isp, &nxti, &optr, &qe)) { 1244 xpt_print_path(ccb->ccb_h.path); 1245 printf("Request Queue Overflow in isp_target_start_ctio\n"); 1246 XS_SETERR(ccb, CAM_REQUEUE_REQ); 1247 goto out; 1248 } 1249 memset(local, 0, QENTRY_LEN); 1250 1251 /* 1252 * We're either moving data or completing a command here. 1253 */ 1254 1255 if (IS_FC(isp)) { 1256 atio_private_data_t *atp; 1257 ct2_entry_t *cto = (ct2_entry_t *) local; 1258 1259 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2; 1260 cto->ct_header.rqs_entry_count = 1; 1261 if (IS_2KLOGIN(isp)) { 1262 ((ct2e_entry_t *)cto)->ct_iid = cso->init_id; 1263 } else { 1264 cto->ct_iid = cso->init_id; 1265 if (!(FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN)) { 1266 cto->ct_lun = ccb->ccb_h.target_lun; 1267 } 1268 } 1269 1270 atp = isp_get_atpd(isp, cso->tag_id); 1271 if (atp == NULL) { 1272 isp_prt(isp, ISP_LOGERR, 1273 "cannot find private data adjunct for tag %x", 1274 cso->tag_id); 1275 XS_SETERR(ccb, CAM_REQ_CMP_ERR); 1276 goto out; 1277 } 1278 1279 cto->ct_rxid = cso->tag_id; 1280 if (cso->dxfer_len == 0) { 1281 cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA; 1282 if (ccb->ccb_h.flags & CAM_SEND_STATUS) { 1283 cto->ct_flags |= CT2_SENDSTATUS; 1284 cto->rsp.m1.ct_scsi_status = cso->scsi_status; 1285 cto->ct_resid = 1286 atp->orig_datalen - atp->bytes_xfered; 1287 if (cto->ct_resid < 0) { 1288 cto->rsp.m1.ct_scsi_status |= 1289 CT2_DATA_OVER; 1290 } else if (cto->ct_resid > 0) { 1291 cto->rsp.m1.ct_scsi_status |= 1292 CT2_DATA_UNDER; 1293 } 1294 } 1295 if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) { 1296 int m = min(cso->sense_len, MAXRESPLEN); 1297 memcpy(cto->rsp.m1.ct_resp, 1298 &cso->sense_data, m); 1299 cto->rsp.m1.ct_senselen = m; 1300 cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID; 1301 } 1302 } else { 1303 cto->ct_flags |= CT2_FLAG_MODE0; 1304 if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1305 cto->ct_flags |= CT2_DATA_IN; 1306 } else { 1307 cto->ct_flags |= CT2_DATA_OUT; 1308 } 1309 cto->ct_reloff = atp->bytes_xfered; 1310 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) { 1311 cto->ct_flags |= CT2_SENDSTATUS; 1312 cto->rsp.m0.ct_scsi_status = cso->scsi_status; 1313 cto->ct_resid = 1314 atp->orig_datalen - 1315 (atp->bytes_xfered + cso->dxfer_len); 1316 if (cto->ct_resid < 0) { 1317 cto->rsp.m0.ct_scsi_status |= 1318 CT2_DATA_OVER; 1319 } else if (cto->ct_resid > 0) { 1320 cto->rsp.m0.ct_scsi_status |= 1321 CT2_DATA_UNDER; 1322 } 1323 } else { 1324 atp->last_xframt = cso->dxfer_len; 1325 } 1326 /* 1327 * If we're sending data and status back together, 1328 * we can't also send back sense data as well. 1329 */ 1330 ccb->ccb_h.flags &= ~CAM_SEND_SENSE; 1331 } 1332 1333 if (cto->ct_flags & CT2_SENDSTATUS) { 1334 isp_prt(isp, ISP_LOGTDEBUG0, 1335 "CTIO2[%x] STATUS %x origd %u curd %u resid %u", 1336 cto->ct_rxid, cso->scsi_status, atp->orig_datalen, 1337 cso->dxfer_len, cto->ct_resid); 1338 cto->ct_flags |= CT2_CCINCR; 1339 atp->state = ATPD_STATE_LAST_CTIO; 1340 } else { 1341 atp->state = ATPD_STATE_CTIO; 1342 } 1343 cto->ct_timeout = 10; 1344 hp = &cto->ct_syshandle; 1345 } else { 1346 ct_entry_t *cto = (ct_entry_t *) local; 1347 1348 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 1349 cto->ct_header.rqs_entry_count = 1; 1350 cto->ct_iid = cso->init_id; 1351 cto->ct_iid |= XS_CHANNEL(ccb) << 7; 1352 cto->ct_tgt = ccb->ccb_h.target_id; 1353 cto->ct_lun = ccb->ccb_h.target_lun; 1354 cto->ct_fwhandle = AT_GET_HANDLE(cso->tag_id); 1355 if (AT_HAS_TAG(cso->tag_id)) { 1356 cto->ct_tag_val = (uint8_t) AT_GET_TAG(cso->tag_id); 1357 cto->ct_flags |= CT_TQAE; 1358 } 1359 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) { 1360 cto->ct_flags |= CT_NODISC; 1361 } 1362 if (cso->dxfer_len == 0) { 1363 cto->ct_flags |= CT_NO_DATA; 1364 } else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1365 cto->ct_flags |= CT_DATA_IN; 1366 } else { 1367 cto->ct_flags |= CT_DATA_OUT; 1368 } 1369 if (ccb->ccb_h.flags & CAM_SEND_STATUS) { 1370 cto->ct_flags |= CT_SENDSTATUS|CT_CCINCR; 1371 cto->ct_scsi_status = cso->scsi_status; 1372 cto->ct_resid = cso->resid; 1373 isp_prt(isp, ISP_LOGTDEBUG0, 1374 "CTIO[%x] SCSI STATUS 0x%x resid %d tag_id %x", 1375 cto->ct_fwhandle, cso->scsi_status, cso->resid, 1376 cso->tag_id); 1377 } 1378 ccb->ccb_h.flags &= ~CAM_SEND_SENSE; 1379 cto->ct_timeout = 10; 1380 hp = &cto->ct_syshandle; 1381 } 1382 1383 if (isp_save_xs_tgt(isp, ccb, hp)) { 1384 xpt_print_path(ccb->ccb_h.path); 1385 printf("No XFLIST pointers for isp_target_start_ctio\n"); 1386 XS_SETERR(ccb, CAM_REQUEUE_REQ); 1387 goto out; 1388 } 1389 1390 1391 /* 1392 * Call the dma setup routines for this entry (and any subsequent 1393 * CTIOs) if there's data to move, and then tell the f/w it's got 1394 * new things to play with. As with isp_start's usage of DMA setup, 1395 * any swizzling is done in the machine dependent layer. Because 1396 * of this, we put the request onto the queue area first in native 1397 * format. 1398 */ 1399 1400 save_handle = *hp; 1401 1402 switch (ISP_DMASETUP(isp, cso, (ispreq_t *) local, &nxti, optr)) { 1403 case CMD_QUEUED: 1404 ISP_ADD_REQUEST(isp, nxti); 1405 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1406 return; 1407 1408 case CMD_EAGAIN: 1409 XS_SETERR(ccb, CAM_REQUEUE_REQ); 1410 break; 1411 1412 default: 1413 break; 1414 } 1415 isp_destroy_tgt_handle(isp, save_handle); 1416 1417out: 1418 ISPLOCK_2_CAMLOCK(isp); 1419 xpt_done(ccb); 1420 CAMLOCK_2_ISPLOCK(isp); 1421} 1422 1423static void 1424isp_refire_putback_atio(void *arg) 1425{ 1426 int s = splcam(); 1427 isp_target_putback_atio(arg); 1428 splx(s); 1429} 1430 1431static void 1432isp_target_putback_atio(union ccb *ccb) 1433{ 1434 ispsoftc_t *isp; 1435 struct ccb_scsiio *cso; 1436 uint16_t nxti, optr; 1437 void *qe; 1438 1439 isp = XS_ISP(ccb); 1440 1441 if (isp_getrqentry(isp, &nxti, &optr, &qe)) { 1442 (void) timeout(isp_refire_putback_atio, ccb, 10); 1443 isp_prt(isp, ISP_LOGWARN, 1444 "isp_target_putback_atio: Request Queue Overflow"); 1445 return; 1446 } 1447 memset(qe, 0, QENTRY_LEN); 1448 cso = &ccb->csio; 1449 if (IS_FC(isp)) { 1450 at2_entry_t local, *at = &local; 1451 MEMZERO(at, sizeof (at2_entry_t)); 1452 at->at_header.rqs_entry_type = RQSTYPE_ATIO2; 1453 at->at_header.rqs_entry_count = 1; 1454 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) { 1455 at->at_scclun = (uint16_t) ccb->ccb_h.target_lun; 1456 } else { 1457 at->at_lun = (uint8_t) ccb->ccb_h.target_lun; 1458 } 1459 at->at_status = CT_OK; 1460 at->at_rxid = cso->tag_id; 1461 at->at_iid = cso->ccb_h.target_id; 1462 isp_put_atio2(isp, at, qe); 1463 } else { 1464 at_entry_t local, *at = &local; 1465 MEMZERO(at, sizeof (at_entry_t)); 1466 at->at_header.rqs_entry_type = RQSTYPE_ATIO; 1467 at->at_header.rqs_entry_count = 1; 1468 at->at_iid = cso->init_id; 1469 at->at_iid |= XS_CHANNEL(ccb) << 7; 1470 at->at_tgt = cso->ccb_h.target_id; 1471 at->at_lun = cso->ccb_h.target_lun; 1472 at->at_status = CT_OK; 1473 at->at_tag_val = AT_GET_TAG(cso->tag_id); 1474 at->at_handle = AT_GET_HANDLE(cso->tag_id); 1475 isp_put_atio(isp, at, qe); 1476 } 1477 ISP_TDQE(isp, "isp_target_putback_atio", (int) optr, qe); 1478 ISP_ADD_REQUEST(isp, nxti); 1479 isp_complete_ctio(ccb); 1480} 1481 1482static void 1483isp_complete_ctio(union ccb *ccb) 1484{ 1485 ISPLOCK_2_CAMLOCK(isp); 1486 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 1487 ccb->ccb_h.status |= CAM_REQ_CMP; 1488 } 1489 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1490 xpt_done(ccb); 1491 CAMLOCK_2_ISPLOCK(isp); 1492} 1493 1494/* 1495 * Handle ATIO stuff that the generic code can't. 1496 * This means handling CDBs. 1497 */ 1498 1499static int 1500isp_handle_platform_atio(ispsoftc_t *isp, at_entry_t *aep) 1501{ 1502 tstate_t *tptr; 1503 int status, bus, iswildcard; 1504 struct ccb_accept_tio *atiop; 1505 1506 /* 1507 * The firmware status (except for the QLTM_SVALID bit) 1508 * indicates why this ATIO was sent to us. 1509 * 1510 * If QLTM_SVALID is set, the firware has recommended Sense Data. 1511 * 1512 * If the DISCONNECTS DISABLED bit is set in the flags field, 1513 * we're still connected on the SCSI bus. 1514 */ 1515 status = aep->at_status; 1516 if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) { 1517 /* 1518 * Bus Phase Sequence error. We should have sense data 1519 * suggested by the f/w. I'm not sure quite yet what 1520 * to do about this for CAM. 1521 */ 1522 isp_prt(isp, ISP_LOGWARN, "PHASE ERROR"); 1523 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1524 return (0); 1525 } 1526 if ((status & ~QLTM_SVALID) != AT_CDB) { 1527 isp_prt(isp, ISP_LOGWARN, "bad atio (0x%x) leaked to platform", 1528 status); 1529 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1530 return (0); 1531 } 1532 1533 bus = GET_BUS_VAL(aep->at_iid); 1534 tptr = get_lun_statep(isp, bus, aep->at_lun); 1535 if (tptr == NULL) { 1536 tptr = get_lun_statep(isp, bus, CAM_LUN_WILDCARD); 1537 if (tptr == NULL) { 1538 /* 1539 * Because we can't autofeed sense data back with 1540 * a command for parallel SCSI, we can't give back 1541 * a CHECK CONDITION. We'll give back a BUSY status 1542 * instead. This works out okay because the only 1543 * time we should, in fact, get this, is in the 1544 * case that somebody configured us without the 1545 * blackhole driver, so they get what they deserve. 1546 */ 1547 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1548 return (0); 1549 } 1550 iswildcard = 1; 1551 } else { 1552 iswildcard = 0; 1553 } 1554 1555 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); 1556 if (atiop == NULL) { 1557 /* 1558 * Because we can't autofeed sense data back with 1559 * a command for parallel SCSI, we can't give back 1560 * a CHECK CONDITION. We'll give back a QUEUE FULL status 1561 * instead. This works out okay because the only time we 1562 * should, in fact, get this, is in the case that we've 1563 * run out of ATIOS. 1564 */ 1565 xpt_print_path(tptr->owner); 1566 isp_prt(isp, ISP_LOGWARN, 1567 "no ATIOS for lun %d from initiator %d on channel %d", 1568 aep->at_lun, GET_IID_VAL(aep->at_iid), bus); 1569 if (aep->at_flags & AT_TQAE) 1570 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0); 1571 else 1572 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1573 rls_lun_statep(isp, tptr); 1574 return (0); 1575 } 1576 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); 1577 tptr->atio_count--; 1578 isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO lun %d, count now %d", 1579 aep->at_lun, tptr->atio_count); 1580 if (iswildcard) { 1581 atiop->ccb_h.target_id = aep->at_tgt; 1582 atiop->ccb_h.target_lun = aep->at_lun; 1583 } 1584 if (aep->at_flags & AT_NODISC) { 1585 atiop->ccb_h.flags = CAM_DIS_DISCONNECT; 1586 } else { 1587 atiop->ccb_h.flags = 0; 1588 } 1589 1590 if (status & QLTM_SVALID) { 1591 size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data)); 1592 atiop->sense_len = amt; 1593 MEMCPY(&atiop->sense_data, aep->at_sense, amt); 1594 } else { 1595 atiop->sense_len = 0; 1596 } 1597 1598 atiop->init_id = GET_IID_VAL(aep->at_iid); 1599 atiop->cdb_len = aep->at_cdblen; 1600 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen); 1601 atiop->ccb_h.status = CAM_CDB_RECVD; 1602 /* 1603 * Construct a tag 'id' based upon tag value (which may be 0..255) 1604 * and the handle (which we have to preserve). 1605 */ 1606 AT_MAKE_TAGID(atiop->tag_id, device_get_unit(isp->isp_dev), aep); 1607 if (aep->at_flags & AT_TQAE) { 1608 atiop->tag_action = aep->at_tag_type; 1609 atiop->ccb_h.status |= CAM_TAG_ACTION_VALID; 1610 } 1611 xpt_done((union ccb*)atiop); 1612 isp_prt(isp, ISP_LOGTDEBUG0, 1613 "ATIO[%x] CDB=0x%x bus %d iid%d->lun%d tag 0x%x ttype 0x%x %s", 1614 aep->at_handle, aep->at_cdb[0] & 0xff, GET_BUS_VAL(aep->at_iid), 1615 GET_IID_VAL(aep->at_iid), aep->at_lun, aep->at_tag_val & 0xff, 1616 aep->at_tag_type, (aep->at_flags & AT_NODISC)? 1617 "nondisc" : "disconnecting"); 1618 rls_lun_statep(isp, tptr); 1619 return (0); 1620} 1621 1622static int 1623isp_handle_platform_atio2(ispsoftc_t *isp, at2_entry_t *aep) 1624{ 1625 lun_id_t lun; 1626 tstate_t *tptr; 1627 struct ccb_accept_tio *atiop; 1628 atio_private_data_t *atp; 1629 1630 /* 1631 * The firmware status (except for the QLTM_SVALID bit) 1632 * indicates why this ATIO was sent to us. 1633 * 1634 * If QLTM_SVALID is set, the firware has recommended Sense Data. 1635 */ 1636 if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) { 1637 isp_prt(isp, ISP_LOGWARN, 1638 "bogus atio (0x%x) leaked to platform", aep->at_status); 1639 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0); 1640 return (0); 1641 } 1642 1643 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) { 1644 lun = aep->at_scclun; 1645 } else { 1646 lun = aep->at_lun; 1647 } 1648 tptr = get_lun_statep(isp, 0, lun); 1649 if (tptr == NULL) { 1650 isp_prt(isp, ISP_LOGTDEBUG0, 1651 "[0x%x] no state pointer for lun %d", aep->at_rxid, lun); 1652 tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD); 1653 if (tptr == NULL) { 1654 isp_endcmd(isp, aep, 1655 SCSI_STATUS_CHECK_COND | ECMD_SVALID | 1656 (0x5 << 12) | (0x25 << 16), 0); 1657 return (0); 1658 } 1659 } 1660 1661 atp = isp_get_atpd(isp, 0); 1662 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios); 1663 if (atiop == NULL || atp == NULL) { 1664 1665 /* 1666 * Because we can't autofeed sense data back with 1667 * a command for parallel SCSI, we can't give back 1668 * a CHECK CONDITION. We'll give back a QUEUE FULL status 1669 * instead. This works out okay because the only time we 1670 * should, in fact, get this, is in the case that we've 1671 * run out of ATIOS. 1672 */ 1673 xpt_print_path(tptr->owner); 1674 isp_prt(isp, ISP_LOGWARN, 1675 "no %s for lun %d from initiator %d", 1676 (atp == NULL && atiop == NULL)? "ATIO2s *or* ATPS" : 1677 ((atp == NULL)? "ATPs" : "ATIO2s"), lun, aep->at_iid); 1678 rls_lun_statep(isp, tptr); 1679 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0); 1680 return (0); 1681 } 1682 atp->state = ATPD_STATE_ATIO; 1683 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle); 1684 tptr->atio_count--; 1685 isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO lun %d, count now %d", 1686 lun, tptr->atio_count); 1687 1688 if (tptr == &isp->isp_osinfo.tsdflt[0]) { 1689 atiop->ccb_h.target_id = 1690 ((fcparam *)isp->isp_param)->isp_loopid; 1691 atiop->ccb_h.target_lun = lun; 1692 } 1693 /* 1694 * We don't get 'suggested' sense data as we do with SCSI cards. 1695 */ 1696 atiop->sense_len = 0; 1697 1698 atiop->init_id = aep->at_iid; 1699 atiop->cdb_len = ATIO2_CDBLEN; 1700 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN); 1701 atiop->ccb_h.status = CAM_CDB_RECVD; 1702 atiop->tag_id = aep->at_rxid; 1703 switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) { 1704 case ATIO2_TC_ATTR_SIMPLEQ: 1705 atiop->tag_action = MSG_SIMPLE_Q_TAG; 1706 break; 1707 case ATIO2_TC_ATTR_HEADOFQ: 1708 atiop->tag_action = MSG_HEAD_OF_Q_TAG; 1709 break; 1710 case ATIO2_TC_ATTR_ORDERED: 1711 atiop->tag_action = MSG_ORDERED_Q_TAG; 1712 break; 1713 case ATIO2_TC_ATTR_ACAQ: /* ?? */ 1714 case ATIO2_TC_ATTR_UNTAGGED: 1715 default: 1716 atiop->tag_action = 0; 1717 break; 1718 } 1719 atiop->ccb_h.flags = CAM_TAG_ACTION_VALID; 1720 1721 atp->tag = atiop->tag_id; 1722 atp->lun = lun; 1723 atp->orig_datalen = aep->at_datalen; 1724 atp->last_xframt = 0; 1725 atp->bytes_xfered = 0; 1726 atp->state = ATPD_STATE_CAM; 1727 ISPLOCK_2_CAMLOCK(siP); 1728 xpt_done((union ccb*)atiop); 1729 1730 isp_prt(isp, ISP_LOGTDEBUG0, 1731 "ATIO2[%x] CDB=0x%x iid%d->lun%d tattr 0x%x datalen %u", 1732 aep->at_rxid, aep->at_cdb[0] & 0xff, aep->at_iid, 1733 lun, aep->at_taskflags, aep->at_datalen); 1734 rls_lun_statep(isp, tptr); 1735 return (0); 1736} 1737 1738static int 1739isp_handle_platform_ctio(ispsoftc_t *isp, void *arg) 1740{ 1741 union ccb *ccb; 1742 int sentstatus, ok, notify_cam, resid = 0; 1743 uint16_t tval; 1744 1745 /* 1746 * CTIO and CTIO2 are close enough.... 1747 */ 1748 1749 ccb = isp_find_xs_tgt(isp, ((ct_entry_t *)arg)->ct_syshandle); 1750 KASSERT((ccb != NULL), ("null ccb in isp_handle_platform_ctio")); 1751 isp_destroy_tgt_handle(isp, ((ct_entry_t *)arg)->ct_syshandle); 1752 1753 if (IS_FC(isp)) { 1754 ct2_entry_t *ct = arg; 1755 atio_private_data_t *atp = isp_get_atpd(isp, ct->ct_rxid); 1756 if (atp == NULL) { 1757 isp_prt(isp, ISP_LOGERR, 1758 "cannot find adjunct for %x after I/O", 1759 ct->ct_rxid); 1760 return (0); 1761 } 1762 sentstatus = ct->ct_flags & CT2_SENDSTATUS; 1763 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; 1764 if (ok && sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) { 1765 ccb->ccb_h.status |= CAM_SENT_SENSE; 1766 } 1767 notify_cam = ct->ct_header.rqs_seqno & 0x1; 1768 if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) { 1769 resid = ct->ct_resid; 1770 atp->bytes_xfered += (atp->last_xframt - resid); 1771 atp->last_xframt = 0; 1772 } 1773 if (sentstatus || !ok) { 1774 atp->tag = 0; 1775 } 1776 isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN, 1777 "CTIO2[%x] sts 0x%x flg 0x%x sns %d resid %d %s", 1778 ct->ct_rxid, ct->ct_status, ct->ct_flags, 1779 (ccb->ccb_h.status & CAM_SENT_SENSE) != 0, 1780 resid, sentstatus? "FIN" : "MID"); 1781 tval = ct->ct_rxid; 1782 1783 /* XXX: should really come after isp_complete_ctio */ 1784 atp->state = ATPD_STATE_PDON; 1785 } else { 1786 ct_entry_t *ct = arg; 1787 sentstatus = ct->ct_flags & CT_SENDSTATUS; 1788 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK; 1789 /* 1790 * We *ought* to be able to get back to the original ATIO 1791 * here, but for some reason this gets lost. It's just as 1792 * well because it's squirrelled away as part of periph 1793 * private data. 1794 * 1795 * We can live without it as long as we continue to use 1796 * the auto-replenish feature for CTIOs. 1797 */ 1798 notify_cam = ct->ct_header.rqs_seqno & 0x1; 1799 if (ct->ct_status & QLTM_SVALID) { 1800 char *sp = (char *)ct; 1801 sp += CTIO_SENSE_OFFSET; 1802 ccb->csio.sense_len = 1803 min(sizeof (ccb->csio.sense_data), QLTM_SENSELEN); 1804 MEMCPY(&ccb->csio.sense_data, sp, ccb->csio.sense_len); 1805 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 1806 } 1807 if ((ct->ct_flags & CT_DATAMASK) != CT_NO_DATA) { 1808 resid = ct->ct_resid; 1809 } 1810 isp_prt(isp, ISP_LOGTDEBUG0, 1811 "CTIO[%x] tag %x iid %d lun %d sts %x flg %x resid %d %s", 1812 ct->ct_fwhandle, ct->ct_tag_val, ct->ct_iid, ct->ct_lun, 1813 ct->ct_status, ct->ct_flags, resid, 1814 sentstatus? "FIN" : "MID"); 1815 tval = ct->ct_fwhandle; 1816 } 1817 ccb->csio.resid += resid; 1818 1819 /* 1820 * We're here either because intermediate data transfers are done 1821 * and/or the final status CTIO (which may have joined with a 1822 * Data Transfer) is done. 1823 * 1824 * In any case, for this platform, the upper layers figure out 1825 * what to do next, so all we do here is collect status and 1826 * pass information along. Any DMA handles have already been 1827 * freed. 1828 */ 1829 if (notify_cam == 0) { 1830 isp_prt(isp, ISP_LOGTDEBUG0, " INTER CTIO[0x%x] done", tval); 1831 return (0); 1832 } 1833 1834 isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done", 1835 (sentstatus)? " FINAL " : "MIDTERM ", tval); 1836 1837 if (!ok) { 1838 isp_target_putback_atio(ccb); 1839 } else { 1840 isp_complete_ctio(ccb); 1841 1842 } 1843 return (0); 1844} 1845 1846static int 1847isp_handle_platform_notify_scsi(ispsoftc_t *isp, in_entry_t *inp) 1848{ 1849 return (0); /* XXXX */ 1850} 1851 1852static int 1853isp_handle_platform_notify_fc(ispsoftc_t *isp, in_fcentry_t *inp) 1854{ 1855 1856 switch (inp->in_status) { 1857 case IN_PORT_LOGOUT: 1858 isp_prt(isp, ISP_LOGWARN, "port logout of iid %d", 1859 inp->in_iid); 1860 break; 1861 case IN_PORT_CHANGED: 1862 isp_prt(isp, ISP_LOGWARN, "port changed for iid %d", 1863 inp->in_iid); 1864 break; 1865 case IN_GLOBAL_LOGO: 1866 isp_prt(isp, ISP_LOGINFO, "all ports logged out"); 1867 break; 1868 case IN_ABORT_TASK: 1869 { 1870 atio_private_data_t *atp = isp_get_atpd(isp, inp->in_seqid); 1871 struct ccb_immed_notify *inot = NULL; 1872 1873 if (atp) { 1874 tstate_t *tptr = get_lun_statep(isp, 0, atp->lun); 1875 if (tptr) { 1876 inot = (struct ccb_immed_notify *) 1877 SLIST_FIRST(&tptr->inots); 1878 if (inot) { 1879 tptr->inot_count--; 1880 SLIST_REMOVE_HEAD(&tptr->inots, 1881 sim_links.sle); 1882 isp_prt(isp, ISP_LOGTDEBUG0, 1883 "Take FREE INOT count now %d", 1884 tptr->inot_count); 1885 } 1886 } 1887 isp_prt(isp, ISP_LOGWARN, 1888 "abort task RX_ID %x IID %d state %d", 1889 inp->in_seqid, inp->in_iid, atp->state); 1890 } else { 1891 isp_prt(isp, ISP_LOGWARN, 1892 "abort task RX_ID %x from iid %d, state unknown", 1893 inp->in_seqid, inp->in_iid); 1894 } 1895 if (inot) { 1896 inot->initiator_id = inp->in_iid; 1897 inot->sense_len = 0; 1898 inot->message_args[0] = MSG_ABORT_TAG; 1899 inot->message_args[1] = inp->in_seqid & 0xff; 1900 inot->message_args[2] = (inp->in_seqid >> 8) & 0xff; 1901 inot->ccb_h.status = CAM_MESSAGE_RECV; 1902 xpt_done((union ccb *)inot); 1903 } 1904 break; 1905 } 1906 default: 1907 break; 1908 } 1909 return (0); 1910} 1911#endif 1912 1913static void 1914isp_cam_async(void *cbarg, uint32_t code, struct cam_path *path, void *arg) 1915{ 1916 struct cam_sim *sim; 1917 ispsoftc_t *isp; 1918 1919 sim = (struct cam_sim *)cbarg; 1920 isp = (ispsoftc_t *) cam_sim_softc(sim); 1921 switch (code) { 1922 case AC_LOST_DEVICE: 1923 if (IS_SCSI(isp)) { 1924 uint16_t oflags, nflags; 1925 sdparam *sdp = isp->isp_param; 1926 int tgt; 1927 1928 tgt = xpt_path_target_id(path); 1929 if (tgt >= 0) { 1930 sdp += cam_sim_bus(sim); 1931 ISP_LOCK(isp); 1932 nflags = sdp->isp_devparam[tgt].nvrm_flags; 1933#ifndef ISP_TARGET_MODE 1934 nflags &= DPARM_SAFE_DFLT; 1935 if (isp->isp_loaded_fw) { 1936 nflags |= DPARM_NARROW | DPARM_ASYNC; 1937 } 1938#else 1939 nflags = DPARM_DEFAULT; 1940#endif 1941 oflags = sdp->isp_devparam[tgt].goal_flags; 1942 sdp->isp_devparam[tgt].goal_flags = nflags; 1943 sdp->isp_devparam[tgt].dev_update = 1; 1944 isp->isp_update |= (1 << cam_sim_bus(sim)); 1945 (void) isp_control(isp, 1946 ISPCTL_UPDATE_PARAMS, NULL); 1947 sdp->isp_devparam[tgt].goal_flags = oflags; 1948 ISP_UNLOCK(isp); 1949 } 1950 } 1951 break; 1952 default: 1953 isp_prt(isp, ISP_LOGWARN, "isp_cam_async: Code 0x%x", code); 1954 break; 1955 } 1956} 1957 1958static void 1959isp_poll(struct cam_sim *sim) 1960{ 1961 ispsoftc_t *isp = cam_sim_softc(sim); 1962 uint16_t isr, sema, mbox; 1963 1964 ISP_LOCK(isp); 1965 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 1966 isp_intr(isp, isr, sema, mbox); 1967 } 1968 ISP_UNLOCK(isp); 1969} 1970 1971 1972static void 1973isp_watchdog(void *arg) 1974{ 1975 XS_T *xs = arg; 1976 ispsoftc_t *isp = XS_ISP(xs); 1977 uint32_t handle; 1978 int iok; 1979 1980 /* 1981 * We've decided this command is dead. Make sure we're not trying 1982 * to kill a command that's already dead by getting it's handle and 1983 * and seeing whether it's still alive. 1984 */ 1985 ISP_LOCK(isp); 1986 iok = isp->isp_osinfo.intsok; 1987 isp->isp_osinfo.intsok = 0; 1988 handle = isp_find_handle(isp, xs); 1989 if (handle) { 1990 uint16_t isr, sema, mbox; 1991 1992 if (XS_CMD_DONE_P(xs)) { 1993 isp_prt(isp, ISP_LOGDEBUG1, 1994 "watchdog found done cmd (handle 0x%x)", handle); 1995 ISP_UNLOCK(isp); 1996 return; 1997 } 1998 1999 if (XS_CMD_WDOG_P(xs)) { 2000 isp_prt(isp, ISP_LOGDEBUG2, 2001 "recursive watchdog (handle 0x%x)", handle); 2002 ISP_UNLOCK(isp); 2003 return; 2004 } 2005 2006 XS_CMD_S_WDOG(xs); 2007 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 2008 isp_intr(isp, isr, sema, mbox); 2009 } 2010 if (XS_CMD_DONE_P(xs)) { 2011 isp_prt(isp, ISP_LOGDEBUG2, 2012 "watchdog cleanup for handle 0x%x", handle); 2013 xpt_done((union ccb *) xs); 2014 } else if (XS_CMD_GRACE_P(xs)) { 2015 /* 2016 * Make sure the command is *really* dead before we 2017 * release the handle (and DMA resources) for reuse. 2018 */ 2019 (void) isp_control(isp, ISPCTL_ABORT_CMD, arg); 2020 2021 /* 2022 * After this point, the comamnd is really dead. 2023 */ 2024 if (XS_XFRLEN(xs)) { 2025 ISP_DMAFREE(isp, xs, handle); 2026 } 2027 isp_destroy_handle(isp, handle); 2028 xpt_print_path(xs->ccb_h.path); 2029 isp_prt(isp, ISP_LOGWARN, 2030 "watchdog timeout for handle 0x%x", handle); 2031 XS_SETERR(xs, CAM_CMD_TIMEOUT); 2032 XS_CMD_C_WDOG(xs); 2033 isp_done(xs); 2034 } else { 2035 uint16_t nxti, optr; 2036 ispreq_t local, *mp= &local, *qe; 2037 2038 XS_CMD_C_WDOG(xs); 2039 xs->ccb_h.timeout_ch = timeout(isp_watchdog, xs, hz); 2040 if (isp_getrqentry(isp, &nxti, &optr, (void **) &qe)) { 2041 ISP_UNLOCK(isp); 2042 return; 2043 } 2044 XS_CMD_S_GRACE(xs); 2045 MEMZERO((void *) mp, sizeof (*mp)); 2046 mp->req_header.rqs_entry_count = 1; 2047 mp->req_header.rqs_entry_type = RQSTYPE_MARKER; 2048 mp->req_modifier = SYNC_ALL; 2049 mp->req_target = XS_CHANNEL(xs) << 7; 2050 isp_put_request(isp, mp, qe); 2051 ISP_ADD_REQUEST(isp, nxti); 2052 } 2053 } else { 2054 isp_prt(isp, ISP_LOGDEBUG2, "watchdog with no command"); 2055 } 2056 isp->isp_osinfo.intsok = iok; 2057 ISP_UNLOCK(isp); 2058} 2059 2060static void 2061isp_kthread(void *arg) 2062{ 2063 ispsoftc_t *isp = arg; 2064 2065 2066#if __FreeBSD_version < 500000 2067 int s; 2068 2069 s = splcam(); 2070 isp->isp_osinfo.intsok = 1; 2071#else 2072#ifdef ISP_SMPLOCK 2073 mtx_lock(&isp->isp_lock); 2074#else 2075 mtx_lock(&Giant); 2076#endif 2077#endif 2078 /* 2079 * The first loop is for our usage where we have yet to have 2080 * gotten good fibre channel state. 2081 */ 2082 for (;;) { 2083 int wasfrozen; 2084 2085 isp_prt(isp, ISP_LOGDEBUG0, "kthread: checking FC state"); 2086 while (isp_fc_runstate(isp, 2 * 1000000) != 0) { 2087 isp_prt(isp, ISP_LOGDEBUG0, "kthread: FC state ungood"); 2088 if (FCPARAM(isp)->isp_fwstate != FW_READY || 2089 FCPARAM(isp)->isp_loopstate < LOOP_PDB_RCVD) { 2090 if (FCPARAM(isp)->loop_seen_once == 0 || 2091 isp->isp_osinfo.ktmature == 0) { 2092 break; 2093 } 2094 } 2095#ifdef ISP_SMPLOCK 2096 msleep(isp_kthread, &isp->isp_lock, 2097 PRIBIO, "isp_fcthrd", hz); 2098#else 2099 (void) tsleep(isp_kthread, PRIBIO, "isp_fcthrd", hz); 2100#endif 2101 } 2102 2103 /* 2104 * Even if we didn't get good loop state we may be 2105 * unfreezing the SIMQ so that we can kill off 2106 * commands (if we've never seen loop before, for example). 2107 */ 2108 isp->isp_osinfo.ktmature = 1; 2109 wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN; 2110 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN; 2111 if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) { 2112 isp_prt(isp, ISP_LOGDEBUG0, "kthread: releasing simq"); 2113 ISPLOCK_2_CAMLOCK(isp); 2114 xpt_release_simq(isp->isp_sim, 1); 2115 CAMLOCK_2_ISPLOCK(isp); 2116 } 2117 isp_prt(isp, ISP_LOGDEBUG0, "kthread: waiting until called"); 2118#if __FreeBSD_version < 500000 2119 tsleep(&isp->isp_osinfo.kproc, PRIBIO, "isp_fc_worker", 0); 2120#else 2121#ifdef ISP_SMPLOCK 2122 cv_wait(&isp->isp_osinfo.kthread_cv, &isp->isp_lock); 2123#else 2124 (void) tsleep(&isp->isp_osinfo.kthread_cv, PRIBIO, "fc_cv", 0); 2125#endif 2126#endif 2127 } 2128} 2129 2130static void 2131isp_action(struct cam_sim *sim, union ccb *ccb) 2132{ 2133 int bus, tgt, error; 2134 ispsoftc_t *isp; 2135 struct ccb_trans_settings *cts; 2136 2137 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n")); 2138 2139 isp = (ispsoftc_t *)cam_sim_softc(sim); 2140 ccb->ccb_h.sim_priv.entries[0].field = 0; 2141 ccb->ccb_h.sim_priv.entries[1].ptr = isp; 2142 if (isp->isp_state != ISP_RUNSTATE && 2143 ccb->ccb_h.func_code == XPT_SCSI_IO) { 2144 CAMLOCK_2_ISPLOCK(isp); 2145 isp_init(isp); 2146 if (isp->isp_state != ISP_INITSTATE) { 2147 ISP_UNLOCK(isp); 2148 /* 2149 * Lie. Say it was a selection timeout. 2150 */ 2151 ccb->ccb_h.status = CAM_SEL_TIMEOUT | CAM_DEV_QFRZN; 2152 xpt_freeze_devq(ccb->ccb_h.path, 1); 2153 xpt_done(ccb); 2154 return; 2155 } 2156 isp->isp_state = ISP_RUNSTATE; 2157 ISPLOCK_2_CAMLOCK(isp); 2158 } 2159 isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code); 2160 2161 2162 switch (ccb->ccb_h.func_code) { 2163 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 2164 /* 2165 * Do a couple of preliminary checks... 2166 */ 2167 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 2168 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { 2169 ccb->ccb_h.status = CAM_REQ_INVALID; 2170 xpt_done(ccb); 2171 break; 2172 } 2173 } 2174#ifdef DIAGNOSTIC 2175 if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) { 2176 ccb->ccb_h.status = CAM_PATH_INVALID; 2177 } else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) { 2178 ccb->ccb_h.status = CAM_PATH_INVALID; 2179 } 2180 if (ccb->ccb_h.status == CAM_PATH_INVALID) { 2181 isp_prt(isp, ISP_LOGERR, 2182 "invalid tgt/lun (%d.%d) in XPT_SCSI_IO", 2183 ccb->ccb_h.target_id, ccb->ccb_h.target_lun); 2184 xpt_done(ccb); 2185 break; 2186 } 2187#endif 2188 ((struct ccb_scsiio *) ccb)->scsi_status = SCSI_STATUS_OK; 2189 CAMLOCK_2_ISPLOCK(isp); 2190 error = isp_start((XS_T *) ccb); 2191 switch (error) { 2192 case CMD_QUEUED: 2193 ccb->ccb_h.status |= CAM_SIM_QUEUED; 2194 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 2195 uint64_t ticks = (uint64_t) hz; 2196 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) 2197 ticks = 60 * 1000 * ticks; 2198 else 2199 ticks = ccb->ccb_h.timeout * hz; 2200 ticks = ((ticks + 999) / 1000) + hz + hz; 2201 if (ticks >= 0x80000000) { 2202 isp_prt(isp, ISP_LOGERR, 2203 "timeout overflow"); 2204 ticks = 0x7fffffff; 2205 } 2206 ccb->ccb_h.timeout_ch = timeout(isp_watchdog, 2207 (caddr_t)ccb, (int)ticks); 2208 } else { 2209 callout_handle_init(&ccb->ccb_h.timeout_ch); 2210 } 2211 ISPLOCK_2_CAMLOCK(isp); 2212 break; 2213 case CMD_RQLATER: 2214 /* 2215 * This can only happen for Fibre Channel 2216 */ 2217 KASSERT((IS_FC(isp)), ("CMD_RQLATER for FC only")); 2218 if (FCPARAM(isp)->loop_seen_once == 0 && 2219 isp->isp_osinfo.ktmature) { 2220 ISPLOCK_2_CAMLOCK(isp); 2221 XS_SETERR(ccb, CAM_SEL_TIMEOUT); 2222 xpt_done(ccb); 2223 break; 2224 } 2225#if __FreeBSD_version < 500000 2226 wakeup(&isp->isp_osinfo.kproc); 2227#else 2228#ifdef ISP_SMPLOCK 2229 cv_signal(&isp->isp_osinfo.kthread_cv); 2230#else 2231 wakeup(&isp->isp_osinfo.kthread_cv); 2232#endif 2233#endif 2234 isp_freeze_loopdown(isp, "isp_action(RQLATER)"); 2235 XS_SETERR(ccb, CAM_REQUEUE_REQ); 2236 ISPLOCK_2_CAMLOCK(isp); 2237 xpt_done(ccb); 2238 break; 2239 case CMD_EAGAIN: 2240 XS_SETERR(ccb, CAM_REQUEUE_REQ); 2241 ISPLOCK_2_CAMLOCK(isp); 2242 xpt_done(ccb); 2243 break; 2244 case CMD_COMPLETE: 2245 isp_done((struct ccb_scsiio *) ccb); 2246 ISPLOCK_2_CAMLOCK(isp); 2247 break; 2248 default: 2249 isp_prt(isp, ISP_LOGERR, 2250 "What's this? 0x%x at %d in file %s", 2251 error, __LINE__, __FILE__); 2252 XS_SETERR(ccb, CAM_REQ_CMP_ERR); 2253 xpt_done(ccb); 2254 ISPLOCK_2_CAMLOCK(isp); 2255 } 2256 break; 2257 2258#ifdef ISP_TARGET_MODE 2259 case XPT_EN_LUN: /* Enable LUN as a target */ 2260 { 2261 int seq, iok, i; 2262 CAMLOCK_2_ISPLOCK(isp); 2263 iok = isp->isp_osinfo.intsok; 2264 isp->isp_osinfo.intsok = 0; 2265 seq = isp_en_lun(isp, ccb); 2266 if (seq < 0) { 2267 isp->isp_osinfo.intsok = iok; 2268 ISPLOCK_2_CAMLOCK(isp); 2269 xpt_done(ccb); 2270 break; 2271 } 2272 for (i = 0; isp->isp_osinfo.leact[seq] && i < 30 * 1000; i++) { 2273 uint16_t isr, sema, mbox; 2274 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) { 2275 isp_intr(isp, isr, sema, mbox); 2276 } 2277 DELAY(1000); 2278 } 2279 isp->isp_osinfo.intsok = iok; 2280 ISPLOCK_2_CAMLOCK(isp); 2281 break; 2282 } 2283 case XPT_NOTIFY_ACK: /* recycle notify ack */ 2284 case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */ 2285 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ 2286 { 2287 tstate_t *tptr = 2288 get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun); 2289 if (tptr == NULL) { 2290 ccb->ccb_h.status = CAM_LUN_INVALID; 2291 xpt_done(ccb); 2292 break; 2293 } 2294 ccb->ccb_h.sim_priv.entries[0].field = 0; 2295 ccb->ccb_h.sim_priv.entries[1].ptr = isp; 2296 ccb->ccb_h.flags = 0; 2297 2298 CAMLOCK_2_ISPLOCK(isp); 2299 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 2300 /* 2301 * Note that the command itself may not be done- 2302 * it may not even have had the first CTIO sent. 2303 */ 2304 tptr->atio_count++; 2305 isp_prt(isp, ISP_LOGTDEBUG0, 2306 "Put FREE ATIO, lun %d, count now %d", 2307 ccb->ccb_h.target_lun, tptr->atio_count); 2308 SLIST_INSERT_HEAD(&tptr->atios, &ccb->ccb_h, 2309 sim_links.sle); 2310 } else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 2311 tptr->inot_count++; 2312 isp_prt(isp, ISP_LOGTDEBUG0, 2313 "Put FREE INOT, lun %d, count now %d", 2314 ccb->ccb_h.target_lun, tptr->inot_count); 2315 SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h, 2316 sim_links.sle); 2317 } else { 2318 isp_prt(isp, ISP_LOGWARN, "Got Notify ACK");; 2319 } 2320 rls_lun_statep(isp, tptr); 2321 ccb->ccb_h.status = CAM_REQ_INPROG; 2322 ISPLOCK_2_CAMLOCK(isp); 2323 break; 2324 } 2325 case XPT_CONT_TARGET_IO: 2326 { 2327 CAMLOCK_2_ISPLOCK(isp); 2328 isp_target_start_ctio(isp, ccb); 2329 ISPLOCK_2_CAMLOCK(isp); 2330 break; 2331 } 2332#endif 2333 case XPT_RESET_DEV: /* BDR the specified SCSI device */ 2334 2335 bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)); 2336 tgt = ccb->ccb_h.target_id; 2337 tgt |= (bus << 16); 2338 2339 CAMLOCK_2_ISPLOCK(isp); 2340 error = isp_control(isp, ISPCTL_RESET_DEV, &tgt); 2341 ISPLOCK_2_CAMLOCK(isp); 2342 if (error) { 2343 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2344 } else { 2345 ccb->ccb_h.status = CAM_REQ_CMP; 2346 } 2347 xpt_done(ccb); 2348 break; 2349 case XPT_ABORT: /* Abort the specified CCB */ 2350 { 2351 union ccb *accb = ccb->cab.abort_ccb; 2352 CAMLOCK_2_ISPLOCK(isp); 2353 switch (accb->ccb_h.func_code) { 2354#ifdef ISP_TARGET_MODE 2355 case XPT_ACCEPT_TARGET_IO: 2356 case XPT_IMMED_NOTIFY: 2357 ccb->ccb_h.status = isp_abort_tgt_ccb(isp, ccb); 2358 break; 2359 case XPT_CONT_TARGET_IO: 2360 isp_prt(isp, ISP_LOGERR, "cannot abort CTIOs yet"); 2361 ccb->ccb_h.status = CAM_UA_ABORT; 2362 break; 2363#endif 2364 case XPT_SCSI_IO: 2365 error = isp_control(isp, ISPCTL_ABORT_CMD, ccb); 2366 if (error) { 2367 ccb->ccb_h.status = CAM_UA_ABORT; 2368 } else { 2369 ccb->ccb_h.status = CAM_REQ_CMP; 2370 } 2371 break; 2372 default: 2373 ccb->ccb_h.status = CAM_REQ_INVALID; 2374 break; 2375 } 2376 ISPLOCK_2_CAMLOCK(isp); 2377 xpt_done(ccb); 2378 break; 2379 } 2380#ifdef CAM_NEW_TRAN_CODE 2381#define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS) 2382#else 2383#define IS_CURRENT_SETTINGS(c) (c->flags & CCB_TRANS_CURRENT_SETTINGS) 2384#endif 2385 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ 2386 cts = &ccb->cts; 2387 if (!IS_CURRENT_SETTINGS(cts)) { 2388 ccb->ccb_h.status = CAM_REQ_INVALID; 2389 xpt_done(ccb); 2390 break; 2391 } 2392 tgt = cts->ccb_h.target_id; 2393 CAMLOCK_2_ISPLOCK(isp); 2394 if (IS_SCSI(isp)) { 2395#ifndef CAM_NEW_TRAN_CODE 2396 sdparam *sdp = isp->isp_param; 2397 uint16_t *dptr; 2398 2399 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 2400 2401 sdp += bus; 2402 /* 2403 * We always update (internally) from goal_flags 2404 * so any request to change settings just gets 2405 * vectored to that location. 2406 */ 2407 dptr = &sdp->isp_devparam[tgt].goal_flags; 2408 2409 /* 2410 * Note that these operations affect the 2411 * the goal flags (goal_flags)- not 2412 * the current state flags. Then we mark 2413 * things so that the next operation to 2414 * this HBA will cause the update to occur. 2415 */ 2416 if (cts->valid & CCB_TRANS_DISC_VALID) { 2417 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) { 2418 *dptr |= DPARM_DISC; 2419 } else { 2420 *dptr &= ~DPARM_DISC; 2421 } 2422 } 2423 if (cts->valid & CCB_TRANS_TQ_VALID) { 2424 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) { 2425 *dptr |= DPARM_TQING; 2426 } else { 2427 *dptr &= ~DPARM_TQING; 2428 } 2429 } 2430 if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) { 2431 switch (cts->bus_width) { 2432 case MSG_EXT_WDTR_BUS_16_BIT: 2433 *dptr |= DPARM_WIDE; 2434 break; 2435 default: 2436 *dptr &= ~DPARM_WIDE; 2437 } 2438 } 2439 /* 2440 * Any SYNC RATE of nonzero and SYNC_OFFSET 2441 * of nonzero will cause us to go to the 2442 * selected (from NVRAM) maximum value for 2443 * this device. At a later point, we'll 2444 * allow finer control. 2445 */ 2446 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) && 2447 (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) && 2448 (cts->sync_offset > 0)) { 2449 *dptr |= DPARM_SYNC; 2450 } else { 2451 *dptr &= ~DPARM_SYNC; 2452 } 2453 *dptr |= DPARM_SAFE_DFLT; 2454#else 2455 struct ccb_trans_settings_scsi *scsi = 2456 &cts->proto_specific.scsi; 2457 struct ccb_trans_settings_spi *spi = 2458 &cts->xport_specific.spi; 2459 sdparam *sdp = isp->isp_param; 2460 uint16_t *dptr; 2461 2462 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 2463 sdp += bus; 2464 /* 2465 * We always update (internally) from goal_flags 2466 * so any request to change settings just gets 2467 * vectored to that location. 2468 */ 2469 dptr = &sdp->isp_devparam[tgt].goal_flags; 2470 2471 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 2472 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) 2473 *dptr |= DPARM_DISC; 2474 else 2475 *dptr &= ~DPARM_DISC; 2476 } 2477 2478 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 2479 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) 2480 *dptr |= DPARM_TQING; 2481 else 2482 *dptr &= ~DPARM_TQING; 2483 } 2484 2485 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { 2486 if (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) 2487 *dptr |= DPARM_WIDE; 2488 else 2489 *dptr &= ~DPARM_WIDE; 2490 } 2491 2492 /* 2493 * XXX: FIX ME 2494 */ 2495 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) && 2496 (spi->valid & CTS_SPI_VALID_SYNC_RATE) && 2497 (spi->sync_period && spi->sync_offset)) { 2498 *dptr |= DPARM_SYNC; 2499 /* 2500 * XXX: CHECK FOR LEGALITY 2501 */ 2502 sdp->isp_devparam[tgt].goal_period = 2503 spi->sync_period; 2504 sdp->isp_devparam[tgt].goal_offset = 2505 spi->sync_offset; 2506 } else { 2507 *dptr &= ~DPARM_SYNC; 2508 } 2509#endif 2510 isp_prt(isp, ISP_LOGDEBUG0, 2511 "SET bus %d targ %d to flags %x off %x per %x", 2512 bus, tgt, sdp->isp_devparam[tgt].goal_flags, 2513 sdp->isp_devparam[tgt].goal_offset, 2514 sdp->isp_devparam[tgt].goal_period); 2515 sdp->isp_devparam[tgt].dev_update = 1; 2516 isp->isp_update |= (1 << bus); 2517 } 2518 ISPLOCK_2_CAMLOCK(isp); 2519 ccb->ccb_h.status = CAM_REQ_CMP; 2520 xpt_done(ccb); 2521 break; 2522 case XPT_GET_TRAN_SETTINGS: 2523 cts = &ccb->cts; 2524 tgt = cts->ccb_h.target_id; 2525 CAMLOCK_2_ISPLOCK(isp); 2526 if (IS_FC(isp)) { 2527#ifndef CAM_NEW_TRAN_CODE 2528 /* 2529 * a lot of normal SCSI things don't make sense. 2530 */ 2531 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; 2532 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 2533 /* 2534 * How do you measure the width of a high 2535 * speed serial bus? Well, in bytes. 2536 * 2537 * Offset and period make no sense, though, so we set 2538 * (above) a 'base' transfer speed to be gigabit. 2539 */ 2540 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2541#else 2542 fcparam *fcp = isp->isp_param; 2543 struct ccb_trans_settings_fc *fc = 2544 &cts->xport_specific.fc; 2545 2546 cts->protocol = PROTO_SCSI; 2547 cts->protocol_version = SCSI_REV_2; 2548 cts->transport = XPORT_FC; 2549 cts->transport_version = 0; 2550 2551 fc->valid = CTS_FC_VALID_SPEED; 2552 if (fcp->isp_gbspeed == 2) 2553 fc->bitrate = 200000; 2554 else 2555 fc->bitrate = 100000; 2556 if (tgt > 0 && tgt < MAX_FC_TARG) { 2557 struct lportdb *lp = &fcp->portdb[tgt]; 2558 fc->wwnn = lp->node_wwn; 2559 fc->wwpn = lp->port_wwn; 2560 fc->port = lp->portid; 2561 fc->valid |= CTS_FC_VALID_WWNN | 2562 CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT; 2563 } 2564#endif 2565 } else { 2566#ifdef CAM_NEW_TRAN_CODE 2567 struct ccb_trans_settings_scsi *scsi = 2568 &cts->proto_specific.scsi; 2569 struct ccb_trans_settings_spi *spi = 2570 &cts->xport_specific.spi; 2571#endif 2572 sdparam *sdp = isp->isp_param; 2573 int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path)); 2574 uint16_t dval, pval, oval; 2575 2576 sdp += bus; 2577 2578 if (IS_CURRENT_SETTINGS(cts)) { 2579 sdp->isp_devparam[tgt].dev_refresh = 1; 2580 isp->isp_update |= (1 << bus); 2581 (void) isp_control(isp, ISPCTL_UPDATE_PARAMS, 2582 NULL); 2583 dval = sdp->isp_devparam[tgt].actv_flags; 2584 oval = sdp->isp_devparam[tgt].actv_offset; 2585 pval = sdp->isp_devparam[tgt].actv_period; 2586 } else { 2587 dval = sdp->isp_devparam[tgt].nvrm_flags; 2588 oval = sdp->isp_devparam[tgt].nvrm_offset; 2589 pval = sdp->isp_devparam[tgt].nvrm_period; 2590 } 2591 2592#ifndef CAM_NEW_TRAN_CODE 2593 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); 2594 2595 if (dval & DPARM_DISC) { 2596 cts->flags |= CCB_TRANS_DISC_ENB; 2597 } 2598 if (dval & DPARM_TQING) { 2599 cts->flags |= CCB_TRANS_TAG_ENB; 2600 } 2601 if (dval & DPARM_WIDE) { 2602 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 2603 } else { 2604 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2605 } 2606 cts->valid = CCB_TRANS_BUS_WIDTH_VALID | 2607 CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 2608 2609 if ((dval & DPARM_SYNC) && oval != 0) { 2610 cts->sync_period = pval; 2611 cts->sync_offset = oval; 2612 cts->valid |= 2613 CCB_TRANS_SYNC_RATE_VALID | 2614 CCB_TRANS_SYNC_OFFSET_VALID; 2615 } 2616#else 2617 cts->protocol = PROTO_SCSI; 2618 cts->protocol_version = SCSI_REV_2; 2619 cts->transport = XPORT_SPI; 2620 cts->transport_version = 2; 2621 2622 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; 2623 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; 2624 if (dval & DPARM_DISC) { 2625 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 2626 } 2627 if (dval & DPARM_TQING) { 2628 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 2629 } 2630 if ((dval & DPARM_SYNC) && oval && pval) { 2631 spi->sync_offset = oval; 2632 spi->sync_period = pval; 2633 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; 2634 spi->valid |= CTS_SPI_VALID_SYNC_RATE; 2635 } 2636 spi->valid |= CTS_SPI_VALID_BUS_WIDTH; 2637 if (dval & DPARM_WIDE) { 2638 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 2639 } else { 2640 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2641 } 2642 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { 2643 scsi->valid = CTS_SCSI_VALID_TQ; 2644 spi->valid |= CTS_SPI_VALID_DISC; 2645 } else { 2646 scsi->valid = 0; 2647 } 2648#endif 2649 isp_prt(isp, ISP_LOGDEBUG0, 2650 "GET %s bus %d targ %d to flags %x off %x per %x", 2651 IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM", 2652 bus, tgt, dval, oval, pval); 2653 } 2654 ISPLOCK_2_CAMLOCK(isp); 2655 ccb->ccb_h.status = CAM_REQ_CMP; 2656 xpt_done(ccb); 2657 break; 2658 2659 case XPT_CALC_GEOMETRY: 2660#if __FreeBSD_version < 500000 2661 { 2662 struct ccb_calc_geometry *ccg; 2663 u_int32_t secs_per_cylinder; 2664 u_int32_t size_mb; 2665 2666 ccg = &ccb->ccg; 2667 if (ccg->block_size == 0) { 2668 ccb->ccb_h.status = CAM_REQ_INVALID; 2669 xpt_done(ccb); 2670 break; 2671 } 2672 size_mb = ccg->volume_size /((1024L * 1024L) / ccg->block_size); 2673 if (size_mb > 1024) { 2674 ccg->heads = 255; 2675 ccg->secs_per_track = 63; 2676 } else { 2677 ccg->heads = 64; 2678 ccg->secs_per_track = 32; 2679 } 2680 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 2681 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 2682 ccb->ccb_h.status = CAM_REQ_CMP; 2683 xpt_done(ccb); 2684 break; 2685 } 2686#else 2687 { 2688 cam_calc_geometry(&ccb->ccg, /*extended*/1); 2689 xpt_done(ccb); 2690 break; 2691 } 2692#endif 2693 case XPT_RESET_BUS: /* Reset the specified bus */ 2694 bus = cam_sim_bus(sim); 2695 CAMLOCK_2_ISPLOCK(isp); 2696 error = isp_control(isp, ISPCTL_RESET_BUS, &bus); 2697 ISPLOCK_2_CAMLOCK(isp); 2698 if (error) 2699 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2700 else { 2701 if (cam_sim_bus(sim) && isp->isp_path2 != NULL) 2702 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL); 2703 else if (isp->isp_path != NULL) 2704 xpt_async(AC_BUS_RESET, isp->isp_path, NULL); 2705 ccb->ccb_h.status = CAM_REQ_CMP; 2706 } 2707 xpt_done(ccb); 2708 break; 2709 2710 case XPT_TERM_IO: /* Terminate the I/O process */ 2711 ccb->ccb_h.status = CAM_REQ_INVALID; 2712 xpt_done(ccb); 2713 break; 2714 2715 case XPT_PATH_INQ: /* Path routing inquiry */ 2716 { 2717 struct ccb_pathinq *cpi = &ccb->cpi; 2718 2719 cpi->version_num = 1; 2720#ifdef ISP_TARGET_MODE 2721 cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; 2722#else 2723 cpi->target_sprt = 0; 2724#endif 2725 cpi->hba_eng_cnt = 0; 2726 cpi->max_target = ISP_MAX_TARGETS(isp) - 1; 2727 cpi->max_lun = ISP_MAX_LUNS(isp) - 1; 2728 cpi->bus_id = cam_sim_bus(sim); 2729 if (IS_FC(isp)) { 2730 cpi->hba_misc = PIM_NOBUSRESET; 2731 /* 2732 * Because our loop ID can shift from time to time, 2733 * make our initiator ID out of range of our bus. 2734 */ 2735 cpi->initiator_id = cpi->max_target + 1; 2736 2737 /* 2738 * Set base transfer capabilities for Fibre Channel. 2739 * Technically not correct because we don't know 2740 * what media we're running on top of- but we'll 2741 * look good if we always say 100MB/s. 2742 */ 2743 if (FCPARAM(isp)->isp_gbspeed == 2) 2744 cpi->base_transfer_speed = 200000; 2745 else 2746 cpi->base_transfer_speed = 100000; 2747 cpi->hba_inquiry = PI_TAG_ABLE; 2748#ifdef CAM_NEW_TRAN_CODE 2749 cpi->transport = XPORT_FC; 2750 cpi->transport_version = 0; /* WHAT'S THIS FOR? */ 2751#endif 2752 } else { 2753 sdparam *sdp = isp->isp_param; 2754 sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path)); 2755 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 2756 cpi->hba_misc = 0; 2757 cpi->initiator_id = sdp->isp_initiator_id; 2758 cpi->base_transfer_speed = 3300; 2759#ifdef CAM_NEW_TRAN_CODE 2760 cpi->transport = XPORT_SPI; 2761 cpi->transport_version = 2; /* WHAT'S THIS FOR? */ 2762#endif 2763 } 2764#ifdef CAM_NEW_TRAN_CODE 2765 cpi->protocol = PROTO_SCSI; 2766 cpi->protocol_version = SCSI_REV_2; 2767#endif 2768 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 2769 strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN); 2770 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 2771 cpi->unit_number = cam_sim_unit(sim); 2772 cpi->ccb_h.status = CAM_REQ_CMP; 2773 xpt_done(ccb); 2774 break; 2775 } 2776 default: 2777 ccb->ccb_h.status = CAM_REQ_INVALID; 2778 xpt_done(ccb); 2779 break; 2780 } 2781} 2782 2783#define ISPDDB (CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB) 2784void 2785isp_done(struct ccb_scsiio *sccb) 2786{ 2787 ispsoftc_t *isp = XS_ISP(sccb); 2788 2789 if (XS_NOERR(sccb)) 2790 XS_SETERR(sccb, CAM_REQ_CMP); 2791 2792 if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && 2793 (sccb->scsi_status != SCSI_STATUS_OK)) { 2794 sccb->ccb_h.status &= ~CAM_STATUS_MASK; 2795 if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) && 2796 (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) { 2797 sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL; 2798 } else { 2799 sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 2800 } 2801 } 2802 2803 sccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2804 if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 2805 if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { 2806 sccb->ccb_h.status |= CAM_DEV_QFRZN; 2807 xpt_freeze_devq(sccb->ccb_h.path, 1); 2808 isp_prt(isp, ISP_LOGDEBUG0, 2809 "freeze devq %d.%d cam sts %x scsi sts %x", 2810 sccb->ccb_h.target_id, sccb->ccb_h.target_lun, 2811 sccb->ccb_h.status, sccb->scsi_status); 2812 } 2813 } 2814 2815 if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) && 2816 (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 2817 xpt_print_path(sccb->ccb_h.path); 2818 isp_prt(isp, ISP_LOGINFO, 2819 "cam completion status 0x%x", sccb->ccb_h.status); 2820 } 2821 2822 XS_CMD_S_DONE(sccb); 2823 if (XS_CMD_WDOG_P(sccb) == 0) { 2824 untimeout(isp_watchdog, (caddr_t)sccb, sccb->ccb_h.timeout_ch); 2825 if (XS_CMD_GRACE_P(sccb)) { 2826 isp_prt(isp, ISP_LOGDEBUG2, 2827 "finished command on borrowed time"); 2828 } 2829 XS_CMD_S_CLEAR(sccb); 2830 ISPLOCK_2_CAMLOCK(isp); 2831 xpt_done((union ccb *) sccb); 2832 CAMLOCK_2_ISPLOCK(isp); 2833 } 2834} 2835 2836int 2837isp_async(ispsoftc_t *isp, ispasync_t cmd, void *arg) 2838{ 2839 int bus, rv = 0; 2840 switch (cmd) { 2841 case ISPASYNC_NEW_TGT_PARAMS: 2842 { 2843#ifdef CAM_NEW_TRAN_CODE 2844 struct ccb_trans_settings_scsi *scsi; 2845 struct ccb_trans_settings_spi *spi; 2846#endif 2847 int flags, tgt; 2848 sdparam *sdp = isp->isp_param; 2849 struct ccb_trans_settings cts; 2850 struct cam_path *tmppath; 2851 2852 memset(&cts, 0, sizeof (struct ccb_trans_settings)); 2853 2854 tgt = *((int *)arg); 2855 bus = (tgt >> 16) & 0xffff; 2856 tgt &= 0xffff; 2857 sdp += bus; 2858 ISPLOCK_2_CAMLOCK(isp); 2859 if (xpt_create_path(&tmppath, NULL, 2860 cam_sim_path(bus? isp->isp_sim2 : isp->isp_sim), 2861 tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2862 CAMLOCK_2_ISPLOCK(isp); 2863 isp_prt(isp, ISP_LOGWARN, 2864 "isp_async cannot make temp path for %d.%d", 2865 tgt, bus); 2866 rv = -1; 2867 break; 2868 } 2869 CAMLOCK_2_ISPLOCK(isp); 2870 flags = sdp->isp_devparam[tgt].actv_flags; 2871#ifdef CAM_NEW_TRAN_CODE 2872 cts.type = CTS_TYPE_CURRENT_SETTINGS; 2873 cts.protocol = PROTO_SCSI; 2874 cts.transport = XPORT_SPI; 2875 2876 scsi = &cts.proto_specific.scsi; 2877 spi = &cts.xport_specific.spi; 2878 2879 if (flags & DPARM_TQING) { 2880 scsi->valid |= CTS_SCSI_VALID_TQ; 2881 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 2882 spi->flags |= CTS_SPI_FLAGS_TAG_ENB; 2883 } 2884 2885 if (flags & DPARM_DISC) { 2886 spi->valid |= CTS_SPI_VALID_DISC; 2887 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 2888 } 2889 spi->flags |= CTS_SPI_VALID_BUS_WIDTH; 2890 if (flags & DPARM_WIDE) { 2891 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 2892 } else { 2893 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2894 } 2895 if (flags & DPARM_SYNC) { 2896 spi->valid |= CTS_SPI_VALID_SYNC_RATE; 2897 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; 2898 spi->sync_period = sdp->isp_devparam[tgt].actv_period; 2899 spi->sync_offset = sdp->isp_devparam[tgt].actv_offset; 2900 } 2901#else 2902 cts.flags = CCB_TRANS_CURRENT_SETTINGS; 2903 cts.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 2904 if (flags & DPARM_DISC) { 2905 cts.flags |= CCB_TRANS_DISC_ENB; 2906 } 2907 if (flags & DPARM_TQING) { 2908 cts.flags |= CCB_TRANS_TAG_ENB; 2909 } 2910 cts.valid |= CCB_TRANS_BUS_WIDTH_VALID; 2911 cts.bus_width = (flags & DPARM_WIDE)? 2912 MSG_EXT_WDTR_BUS_8_BIT : MSG_EXT_WDTR_BUS_16_BIT; 2913 cts.sync_period = sdp->isp_devparam[tgt].actv_period; 2914 cts.sync_offset = sdp->isp_devparam[tgt].actv_offset; 2915 if (flags & DPARM_SYNC) { 2916 cts.valid |= 2917 CCB_TRANS_SYNC_RATE_VALID | 2918 CCB_TRANS_SYNC_OFFSET_VALID; 2919 } 2920#endif 2921 isp_prt(isp, ISP_LOGDEBUG2, 2922 "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x", 2923 bus, tgt, sdp->isp_devparam[tgt].actv_period, 2924 sdp->isp_devparam[tgt].actv_offset, flags); 2925 xpt_setup_ccb(&cts.ccb_h, tmppath, 1); 2926 ISPLOCK_2_CAMLOCK(isp); 2927 xpt_async(AC_TRANSFER_NEG, tmppath, &cts); 2928 xpt_free_path(tmppath); 2929 CAMLOCK_2_ISPLOCK(isp); 2930 break; 2931 } 2932 case ISPASYNC_BUS_RESET: 2933 bus = *((int *)arg); 2934 isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected", 2935 bus); 2936 if (bus > 0 && isp->isp_path2) { 2937 ISPLOCK_2_CAMLOCK(isp); 2938 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL); 2939 CAMLOCK_2_ISPLOCK(isp); 2940 } else if (isp->isp_path) { 2941 ISPLOCK_2_CAMLOCK(isp); 2942 xpt_async(AC_BUS_RESET, isp->isp_path, NULL); 2943 CAMLOCK_2_ISPLOCK(isp); 2944 } 2945 break; 2946 case ISPASYNC_LIP: 2947 if (isp->isp_path) { 2948 isp_freeze_loopdown(isp, "ISPASYNC_LIP"); 2949 } 2950 isp_prt(isp, ISP_LOGINFO, "LIP Received"); 2951 break; 2952 case ISPASYNC_LOOP_RESET: 2953 if (isp->isp_path) { 2954 isp_freeze_loopdown(isp, "ISPASYNC_LOOP_RESET"); 2955 } 2956 isp_prt(isp, ISP_LOGINFO, "Loop Reset Received"); 2957 break; 2958 case ISPASYNC_LOOP_DOWN: 2959 if (isp->isp_path) { 2960 isp_freeze_loopdown(isp, "ISPASYNC_LOOP_DOWN"); 2961 } 2962 isp_prt(isp, ISP_LOGINFO, "Loop DOWN"); 2963 break; 2964 case ISPASYNC_LOOP_UP: 2965 /* 2966 * Now we just note that Loop has come up. We don't 2967 * actually do anything because we're waiting for a 2968 * Change Notify before activating the FC cleanup 2969 * thread to look at the state of the loop again. 2970 */ 2971 isp_prt(isp, ISP_LOGINFO, "Loop UP"); 2972 break; 2973 case ISPASYNC_PROMENADE: 2974 { 2975 const char *fmt = "Target %d (Loop 0x%x) Port ID 0x%x " 2976 "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x"; 2977 static const char *roles[4] = { 2978 "(none)", "Target", "Initiator", "Target/Initiator" 2979 }; 2980 fcparam *fcp = isp->isp_param; 2981 int tgt = *((int *) arg); 2982#if __FreeBSD_version >= 500000 2983 int is_tgt_mask = (SVC3_TGT_ROLE >> SVC3_ROLE_SHIFT); 2984 struct cam_path *tmppath; 2985#endif 2986 struct lportdb *lp = &fcp->portdb[tgt]; 2987 2988 isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid, 2989 roles[lp->roles & 0x3], 2990 (lp->valid)? "Arrived" : "Departed", 2991 (uint32_t) (lp->port_wwn >> 32), 2992 (uint32_t) (lp->port_wwn & 0xffffffffLL), 2993 (uint32_t) (lp->node_wwn >> 32), 2994 (uint32_t) (lp->node_wwn & 0xffffffffLL)); 2995 2996 ISPLOCK_2_CAMLOCK(isp); 2997#if __FreeBSD_version >= 500000 2998 if (xpt_create_path(&tmppath, NULL, cam_sim_path(isp->isp_sim), 2999 (target_id_t)tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 3000 CAMLOCK_2_ISPLOCK(isp); 3001 break; 3002 } 3003 /* 3004 * Policy: only announce targets. 3005 */ 3006 if (lp->roles & is_tgt_mask) { 3007 if (lp->valid) { 3008 xpt_async(AC_FOUND_DEVICE, tmppath, NULL); 3009 } else { 3010 xpt_async(AC_LOST_DEVICE, tmppath, NULL); 3011 } 3012 } 3013 xpt_free_path(tmppath); 3014#endif 3015 CAMLOCK_2_ISPLOCK(isp); 3016 break; 3017 } 3018 case ISPASYNC_CHANGE_NOTIFY: 3019 if (arg == ISPASYNC_CHANGE_PDB) { 3020 isp_prt(isp, ISP_LOGINFO, 3021 "Port Database Changed"); 3022 } else if (arg == ISPASYNC_CHANGE_SNS) { 3023 isp_prt(isp, ISP_LOGINFO, 3024 "Name Server Database Changed"); 3025 } 3026#if __FreeBSD_version < 500000 3027 wakeup(&isp->isp_osinfo.kproc); 3028#else 3029#ifdef ISP_SMPLOCK 3030 cv_signal(&isp->isp_osinfo.kthread_cv); 3031#else 3032 wakeup(&isp->isp_osinfo.kthread_cv); 3033#endif 3034#endif 3035 break; 3036 case ISPASYNC_FABRIC_DEV: 3037 { 3038 int target, base, lim; 3039 fcparam *fcp = isp->isp_param; 3040 struct lportdb *lp = NULL; 3041 struct lportdb *clp = (struct lportdb *) arg; 3042 char *pt; 3043 3044 switch (clp->port_type) { 3045 case 1: 3046 pt = " N_Port"; 3047 break; 3048 case 2: 3049 pt = " NL_Port"; 3050 break; 3051 case 3: 3052 pt = "F/NL_Port"; 3053 break; 3054 case 0x7f: 3055 pt = " Nx_Port"; 3056 break; 3057 case 0x81: 3058 pt = " F_port"; 3059 break; 3060 case 0x82: 3061 pt = " FL_Port"; 3062 break; 3063 case 0x84: 3064 pt = " E_port"; 3065 break; 3066 default: 3067 pt = " "; 3068 break; 3069 } 3070 3071 isp_prt(isp, ISP_LOGINFO, 3072 "%s Fabric Device @ PortID 0x%x", pt, clp->portid); 3073 3074 /* 3075 * If we don't have an initiator role we bail. 3076 * 3077 * We just use ISPASYNC_FABRIC_DEV for announcement purposes. 3078 */ 3079 3080 if ((isp->isp_role & ISP_ROLE_INITIATOR) == 0) { 3081 break; 3082 } 3083 3084 /* 3085 * Is this entry for us? If so, we bail. 3086 */ 3087 3088 if (fcp->isp_portid == clp->portid) { 3089 break; 3090 } 3091 3092 /* 3093 * Else, the default policy is to find room for it in 3094 * our local port database. Later, when we execute 3095 * the call to isp_pdb_sync either this newly arrived 3096 * or already logged in device will be (re)announced. 3097 */ 3098 3099 if (fcp->isp_topo == TOPO_FL_PORT) 3100 base = FC_SNS_ID+1; 3101 else 3102 base = 0; 3103 3104 if (fcp->isp_topo == TOPO_N_PORT) 3105 lim = 1; 3106 else 3107 lim = MAX_FC_TARG; 3108 3109 /* 3110 * Is it already in our list? 3111 */ 3112 for (target = base; target < lim; target++) { 3113 if (target >= FL_PORT_ID && target <= FC_SNS_ID) { 3114 continue; 3115 } 3116 lp = &fcp->portdb[target]; 3117 if (lp->port_wwn == clp->port_wwn && 3118 lp->node_wwn == clp->node_wwn) { 3119 lp->fabric_dev = 1; 3120 break; 3121 } 3122 } 3123 if (target < lim) { 3124 break; 3125 } 3126 for (target = base; target < lim; target++) { 3127 if (target >= FL_PORT_ID && target <= FC_SNS_ID) { 3128 continue; 3129 } 3130 lp = &fcp->portdb[target]; 3131 if (lp->port_wwn == 0) { 3132 break; 3133 } 3134 } 3135 if (target == lim) { 3136 isp_prt(isp, ISP_LOGWARN, 3137 "out of space for fabric devices"); 3138 break; 3139 } 3140 lp->port_type = clp->port_type; 3141 lp->fc4_type = clp->fc4_type; 3142 lp->node_wwn = clp->node_wwn; 3143 lp->port_wwn = clp->port_wwn; 3144 lp->portid = clp->portid; 3145 lp->fabric_dev = 1; 3146 break; 3147 } 3148#ifdef ISP_TARGET_MODE 3149 case ISPASYNC_TARGET_NOTIFY: 3150 { 3151 tmd_notify_t *nt = arg; 3152 isp_prt(isp, ISP_LOGALL, 3153 "target notify code 0x%x", nt->nt_ncode); 3154 break; 3155 } 3156 case ISPASYNC_TARGET_ACTION: 3157 switch (((isphdr_t *)arg)->rqs_entry_type) { 3158 default: 3159 isp_prt(isp, ISP_LOGWARN, 3160 "event 0x%x for unhandled target action", 3161 ((isphdr_t *)arg)->rqs_entry_type); 3162 break; 3163 case RQSTYPE_NOTIFY: 3164 if (IS_SCSI(isp)) { 3165 rv = isp_handle_platform_notify_scsi(isp, 3166 (in_entry_t *) arg); 3167 } else { 3168 rv = isp_handle_platform_notify_fc(isp, 3169 (in_fcentry_t *) arg); 3170 } 3171 break; 3172 case RQSTYPE_ATIO: 3173 rv = isp_handle_platform_atio(isp, (at_entry_t *) arg); 3174 break; 3175 case RQSTYPE_ATIO2: 3176 rv = isp_handle_platform_atio2(isp, (at2_entry_t *)arg); 3177 break; 3178 case RQSTYPE_CTIO3: 3179 case RQSTYPE_CTIO2: 3180 case RQSTYPE_CTIO: 3181 rv = isp_handle_platform_ctio(isp, arg); 3182 break; 3183 case RQSTYPE_ENABLE_LUN: 3184 case RQSTYPE_MODIFY_LUN: 3185 isp_ledone(isp, (lun_entry_t *) arg); 3186 break; 3187 } 3188 break; 3189#endif 3190 case ISPASYNC_FW_CRASH: 3191 { 3192 uint16_t mbox1, mbox6; 3193 mbox1 = ISP_READ(isp, OUTMAILBOX1); 3194 if (IS_DUALBUS(isp)) { 3195 mbox6 = ISP_READ(isp, OUTMAILBOX6); 3196 } else { 3197 mbox6 = 0; 3198 } 3199 isp_prt(isp, ISP_LOGERR, 3200 "Internal Firmware Error on bus %d @ RISC Address 0x%x", 3201 mbox6, mbox1); 3202#ifdef ISP_FW_CRASH_DUMP 3203 /* 3204 * XXX: really need a thread to do this right. 3205 */ 3206 if (IS_FC(isp)) { 3207 FCPARAM(isp)->isp_fwstate = FW_CONFIG_WAIT; 3208 FCPARAM(isp)->isp_loopstate = LOOP_NIL; 3209 isp_freeze_loopdown(isp, "f/w crash"); 3210 isp_fw_dump(isp); 3211 } 3212 isp_reinit(isp); 3213 isp_async(isp, ISPASYNC_FW_RESTARTED, NULL); 3214#endif 3215 break; 3216 } 3217 case ISPASYNC_UNHANDLED_RESPONSE: 3218 break; 3219 default: 3220 isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd); 3221 break; 3222 } 3223 return (rv); 3224} 3225 3226 3227/* 3228 * Locks are held before coming here. 3229 */ 3230void 3231isp_uninit(ispsoftc_t *isp) 3232{ 3233 ISP_WRITE(isp, HCCR, HCCR_CMD_RESET); 3234 DISABLE_INTS(isp); 3235} 3236 3237void 3238isp_prt(ispsoftc_t *isp, int level, const char *fmt, ...) 3239{ 3240 va_list ap; 3241 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) { 3242 return; 3243 } 3244 printf("%s: ", device_get_nameunit(isp->isp_dev)); 3245 va_start(ap, fmt); 3246 vprintf(fmt, ap); 3247 va_end(ap); 3248 printf("\n"); 3249}
|