68 69/* 70 * Definition of an async handler callback block. These are used to add 71 * SIMs and peripherals to the async callback lists. 72 */ 73struct async_node { 74 SLIST_ENTRY(async_node) links; 75 u_int32_t event_enable; /* Async Event enables */ 76 void (*callback)(void *arg, u_int32_t code, 77 struct cam_path *path, void *args); 78 void *callback_arg; 79}; 80 81SLIST_HEAD(async_list, async_node); 82SLIST_HEAD(periph_list, cam_periph); 83static STAILQ_HEAD(highpowerlist, ccb_hdr) highpowerq; 84 85/* 86 * This is the maximum number of high powered commands (e.g. start unit) 87 * that can be outstanding at a particular time. 88 */ 89#ifndef CAM_MAX_HIGHPOWER 90#define CAM_MAX_HIGHPOWER 4 91#endif 92 93/* number of high powered commands that can go through right now */ 94static int num_highpower = CAM_MAX_HIGHPOWER; 95 96/* 97 * Structure for queueing a device in a run queue. 98 * There is one run queue for allocating new ccbs, 99 * and another for sending ccbs to the controller. 100 */ 101struct cam_ed_qinfo { 102 cam_pinfo pinfo; 103 struct cam_ed *device; 104}; 105 106/* 107 * The CAM EDT (Existing Device Table) contains the device information for 108 * all devices for all busses in the system. The table contains a 109 * cam_ed structure for each device on the bus. 110 */ 111struct cam_ed { 112 TAILQ_ENTRY(cam_ed) links; 113 struct cam_ed_qinfo alloc_ccb_entry; 114 struct cam_ed_qinfo send_ccb_entry; 115 struct cam_et *target; 116 lun_id_t lun_id; 117 struct camq drvq; /* 118 * Queue of type drivers wanting to do 119 * work on this device. 120 */ 121 struct cam_ccbq ccbq; /* Queue of pending ccbs */ 122 struct async_list asyncs; /* Async callback info for this B/T/L */ 123 struct periph_list periphs; /* All attached devices */ 124 u_int generation; /* Generation number */ 125 struct cam_periph *owner; /* Peripheral driver's ownership tag */ 126 struct xpt_quirk_entry *quirk; /* Oddities about this device */ 127 /* Storage for the inquiry data */ 128#ifdef CAM_NEW_TRAN_CODE 129 cam_proto protocol; 130 u_int protocol_version; 131 cam_xport transport; 132 u_int transport_version; 133#endif /* CAM_NEW_TRAN_CODE */ 134 struct scsi_inquiry_data inq_data; 135 u_int8_t inq_flags; /* 136 * Current settings for inquiry flags. 137 * This allows us to override settings 138 * like disconnection and tagged 139 * queuing for a device. 140 */ 141 u_int8_t queue_flags; /* Queue flags from the control page */ 142 u_int8_t serial_num_len; 143 u_int8_t *serial_num; 144 u_int32_t qfrozen_cnt; 145 u_int32_t flags; 146#define CAM_DEV_UNCONFIGURED 0x01 147#define CAM_DEV_REL_TIMEOUT_PENDING 0x02 148#define CAM_DEV_REL_ON_COMPLETE 0x04 149#define CAM_DEV_REL_ON_QUEUE_EMPTY 0x08 150#define CAM_DEV_RESIZE_QUEUE_NEEDED 0x10 151#define CAM_DEV_TAG_AFTER_COUNT 0x20 152#define CAM_DEV_INQUIRY_DATA_VALID 0x40 153 u_int32_t tag_delay_count; 154#define CAM_TAG_DELAY_COUNT 5 155 u_int32_t tag_saved_openings; 156 u_int32_t refcount; 157 struct callout_handle c_handle; 158}; 159 160/* 161 * Each target is represented by an ET (Existing Target). These 162 * entries are created when a target is successfully probed with an 163 * identify, and removed when a device fails to respond after a number 164 * of retries, or a bus rescan finds the device missing. 165 */ 166struct cam_et { 167 TAILQ_HEAD(, cam_ed) ed_entries; 168 TAILQ_ENTRY(cam_et) links; 169 struct cam_eb *bus; 170 target_id_t target_id; 171 u_int32_t refcount; 172 u_int generation; 173 struct timeval last_reset; 174}; 175 176/* 177 * Each bus is represented by an EB (Existing Bus). These entries 178 * are created by calls to xpt_bus_register and deleted by calls to 179 * xpt_bus_deregister. 180 */ 181struct cam_eb { 182 TAILQ_HEAD(, cam_et) et_entries; 183 TAILQ_ENTRY(cam_eb) links; 184 path_id_t path_id; 185 struct cam_sim *sim; 186 struct timeval last_reset; 187 u_int32_t flags; 188#define CAM_EB_RUNQ_SCHEDULED 0x01 189 u_int32_t refcount; 190 u_int generation; 191}; 192 193struct cam_path { 194 struct cam_periph *periph; 195 struct cam_eb *bus; 196 struct cam_et *target; 197 struct cam_ed *device; 198}; 199 200struct xpt_quirk_entry { 201 struct scsi_inquiry_pattern inq_pat; 202 u_int8_t quirks; 203#define CAM_QUIRK_NOLUNS 0x01 204#define CAM_QUIRK_NOSERIAL 0x02 205#define CAM_QUIRK_HILUNS 0x04 206#define CAM_QUIRK_NOHILUNS 0x08 207 u_int mintags; 208 u_int maxtags; 209}; 210#define CAM_SCSI2_MAXLUN 8 211/* 212 * If we're not quirked to search <= the first 8 luns 213 * and we are either quirked to search above lun 8, 214 * or we're > SCSI-2, we can look for luns above lun 8. 215 */ 216#define CAN_SRCH_HI(dv) \ 217 (((dv->quirk->quirks & CAM_QUIRK_NOHILUNS) == 0) \ 218 && ((dv->quirk->quirks & CAM_QUIRK_HILUNS) \ 219 || SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2)) 220 221typedef enum { 222 XPT_FLAG_OPEN = 0x01 223} xpt_flags; 224 225struct xpt_softc { 226 xpt_flags flags; 227 u_int32_t generation; 228}; 229 230static const char quantum[] = "QUANTUM"; 231static const char sony[] = "SONY"; 232static const char west_digital[] = "WDIGTL"; 233static const char samsung[] = "SAMSUNG"; 234static const char seagate[] = "SEAGATE"; 235static const char microp[] = "MICROP"; 236 237static struct xpt_quirk_entry xpt_quirk_table[] = 238{ 239 { 240 /* Reports QUEUE FULL for temporary resource shortages */ 241 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" }, 242 /*quirks*/0, /*mintags*/24, /*maxtags*/32 243 }, 244 { 245 /* Reports QUEUE FULL for temporary resource shortages */ 246 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" }, 247 /*quirks*/0, /*mintags*/24, /*maxtags*/32 248 }, 249 { 250 /* Reports QUEUE FULL for temporary resource shortages */ 251 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" }, 252 /*quirks*/0, /*mintags*/24, /*maxtags*/32 253 }, 254 { 255 /* Broken tagged queuing drive */ 256 { T_DIRECT, SIP_MEDIA_FIXED, microp, "4421-07*", "*" }, 257 /*quirks*/0, /*mintags*/0, /*maxtags*/0 258 }, 259 { 260 /* Broken tagged queuing drive */ 261 { T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" }, 262 /*quirks*/0, /*mintags*/0, /*maxtags*/0 263 }, 264 { 265 /* Broken tagged queuing drive */ 266 { T_DIRECT, SIP_MEDIA_FIXED, microp, "3391*", "x43h" }, 267 /*quirks*/0, /*mintags*/0, /*maxtags*/0 268 }, 269 { 270 /* 271 * Unfortunately, the Quantum Atlas III has the same 272 * problem as the Atlas II drives above. 273 * Reported by: "Johan Granlund" <johan@granlund.nu> 274 * 275 * For future reference, the drive with the problem was: 276 * QUANTUM QM39100TD-SW N1B0 277 * 278 * It's possible that Quantum will fix the problem in later 279 * firmware revisions. If that happens, the quirk entry 280 * will need to be made specific to the firmware revisions 281 * with the problem. 282 * 283 */ 284 /* Reports QUEUE FULL for temporary resource shortages */ 285 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" }, 286 /*quirks*/0, /*mintags*/24, /*maxtags*/32 287 }, 288 { 289 /* 290 * 18 Gig Atlas III, same problem as the 9G version. 291 * Reported by: Andre Albsmeier 292 * <andre.albsmeier@mchp.siemens.de> 293 * 294 * For future reference, the drive with the problem was: 295 * QUANTUM QM318000TD-S N491 296 */ 297 /* Reports QUEUE FULL for temporary resource shortages */ 298 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" }, 299 /*quirks*/0, /*mintags*/24, /*maxtags*/32 300 }, 301 { 302 /* 303 * Broken tagged queuing drive 304 * Reported by: Bret Ford <bford@uop.cs.uop.edu> 305 * and: Martin Renters <martin@tdc.on.ca> 306 */ 307 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" }, 308 /*quirks*/0, /*mintags*/0, /*maxtags*/0 309 }, 310 /* 311 * The Seagate Medalist Pro drives have very poor write 312 * performance with anything more than 2 tags. 313 * 314 * Reported by: Paul van der Zwan <paulz@trantor.xs4all.nl> 315 * Drive: <SEAGATE ST36530N 1444> 316 * 317 * Reported by: Jeremy Lea <reg@shale.csir.co.za> 318 * Drive: <SEAGATE ST34520W 1281> 319 * 320 * No one has actually reported that the 9G version 321 * (ST39140*) of the Medalist Pro has the same problem, but 322 * we're assuming that it does because the 4G and 6.5G 323 * versions of the drive are broken. 324 */ 325 { 326 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"}, 327 /*quirks*/0, /*mintags*/2, /*maxtags*/2 328 }, 329 { 330 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"}, 331 /*quirks*/0, /*mintags*/2, /*maxtags*/2 332 }, 333 { 334 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"}, 335 /*quirks*/0, /*mintags*/2, /*maxtags*/2 336 }, 337 { 338 /* 339 * Slow when tagged queueing is enabled. Write performance 340 * steadily drops off with more and more concurrent 341 * transactions. Best sequential write performance with 342 * tagged queueing turned off and write caching turned on. 343 * 344 * PR: kern/10398 345 * Submitted by: Hideaki Okada <hokada@isl.melco.co.jp> 346 * Drive: DCAS-34330 w/ "S65A" firmware. 347 * 348 * The drive with the problem had the "S65A" firmware 349 * revision, and has also been reported (by Stephen J. 350 * Roznowski <sjr@home.net>) for a drive with the "S61A" 351 * firmware revision. 352 * 353 * Although no one has reported problems with the 2 gig 354 * version of the DCAS drive, the assumption is that it 355 * has the same problems as the 4 gig version. Therefore 356 * this quirk entries disables tagged queueing for all 357 * DCAS drives. 358 */ 359 { T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" }, 360 /*quirks*/0, /*mintags*/0, /*maxtags*/0 361 }, 362 { 363 /* Broken tagged queuing drive */ 364 { T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" }, 365 /*quirks*/0, /*mintags*/0, /*maxtags*/0 366 }, 367 { 368 /* Broken tagged queuing drive */ 369 { T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" }, 370 /*quirks*/0, /*mintags*/0, /*maxtags*/0 371 }, 372 { 373 /* 374 * Broken tagged queuing drive. 375 * Submitted by: 376 * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp> 377 * in PR kern/9535 378 */ 379 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" }, 380 /*quirks*/0, /*mintags*/0, /*maxtags*/0 381 }, 382 { 383 /* 384 * Slow when tagged queueing is enabled. (1.5MB/sec versus 385 * 8MB/sec.) 386 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu> 387 * Best performance with these drives is achieved with 388 * tagged queueing turned off, and write caching turned on. 389 */ 390 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" }, 391 /*quirks*/0, /*mintags*/0, /*maxtags*/0 392 }, 393 { 394 /* 395 * Slow when tagged queueing is enabled. (1.5MB/sec versus 396 * 8MB/sec.) 397 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu> 398 * Best performance with these drives is achieved with 399 * tagged queueing turned off, and write caching turned on. 400 */ 401 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" }, 402 /*quirks*/0, /*mintags*/0, /*maxtags*/0 403 }, 404 { 405 /* 406 * Doesn't handle queue full condition correctly, 407 * so we need to limit maxtags to what the device 408 * can handle instead of determining this automatically. 409 */ 410 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" }, 411 /*quirks*/0, /*mintags*/2, /*maxtags*/32 412 }, 413 { 414 /* Really only one LUN */ 415 { T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA", "*" }, 416 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 417 }, 418 { 419 /* I can't believe we need a quirk for DPT volumes. */ 420 { T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, "DPT", "*", "*" }, 421 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, 422 /*mintags*/0, /*maxtags*/255 423 }, 424 { 425 /* 426 * Many Sony CDROM drives don't like multi-LUN probing. 427 */ 428 { T_CDROM, SIP_MEDIA_REMOVABLE, sony, "CD-ROM CDU*", "*" }, 429 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 430 }, 431 { 432 /* 433 * This drive doesn't like multiple LUN probing. 434 * Submitted by: Parag Patel <parag@cgt.com> 435 */ 436 { T_WORM, SIP_MEDIA_REMOVABLE, sony, "CD-R CDU9*", "*" }, 437 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 438 }, 439 { 440 { T_WORM, SIP_MEDIA_REMOVABLE, "YAMAHA", "CDR100*", "*" }, 441 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 442 }, 443 { 444 /* 445 * The 8200 doesn't like multi-lun probing, and probably 446 * don't like serial number requests either. 447 */ 448 { 449 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE", 450 "EXB-8200*", "*" 451 }, 452 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 453 }, 454 { 455 /* 456 * Let's try the same as above, but for a drive that says 457 * it's an IPL-6860 but is actually an EXB 8200. 458 */ 459 { 460 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE", 461 "IPL-6860*", "*" 462 }, 463 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 464 }, 465 { 466 /* 467 * These Hitachi drives don't like multi-lun probing. 468 * The PR submitter has a DK319H, but says that the Linux 469 * kernel has a similar work-around for the DK312 and DK314, 470 * so all DK31* drives are quirked here. 471 * PR: misc/18793 472 * Submitted by: Paul Haddad <paul@pth.com> 473 */ 474 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK31*", "*" }, 475 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255 476 }, 477 { 478 /* 479 * The Hitachi CJ series with J8A8 firmware apparantly has 480 * problems with tagged commands. 481 * PR: 23536 482 * Reported by: amagai@nue.org 483 */ 484 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK32CJ*", "J8A8" }, 485 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 486 }, 487 { 488 /* 489 * These are the large storage arrays. 490 * Submitted by: William Carrel <william.carrel@infospace.com> 491 */ 492 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "OPEN*", "*" }, 493 CAM_QUIRK_HILUNS, 2, 1024 494 }, 495 { 496 /* 497 * This old revision of the TDC3600 is also SCSI-1, and 498 * hangs upon serial number probing. 499 */ 500 { 501 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG", 502 " TDC 3600", "U07:" 503 }, 504 CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0 505 }, 506 { 507 /* 508 * Maxtor Personal Storage 3000XT (Firewire) 509 * hangs upon serial number probing. 510 */ 511 { 512 T_DIRECT, SIP_MEDIA_FIXED, "Maxtor", 513 "1394 storage", "*" 514 }, 515 CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0 516 }, 517 { 518 /* 519 * Would repond to all LUNs if asked for. 520 */ 521 { 522 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "CALIPER", 523 "CP150", "*" 524 }, 525 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 526 }, 527 { 528 /* 529 * Would repond to all LUNs if asked for. 530 */ 531 { 532 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY", 533 "96X2*", "*" 534 }, 535 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 536 }, 537 { 538 /* Submitted by: Matthew Dodd <winter@jurai.net> */ 539 { T_PROCESSOR, SIP_MEDIA_FIXED, "Cabletrn", "EA41*", "*" }, 540 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 541 }, 542 { 543 /* Submitted by: Matthew Dodd <winter@jurai.net> */ 544 { T_PROCESSOR, SIP_MEDIA_FIXED, "CABLETRN", "EA41*", "*" }, 545 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 546 }, 547 { 548 /* TeraSolutions special settings for TRC-22 RAID */ 549 { T_DIRECT, SIP_MEDIA_FIXED, "TERASOLU", "TRC-22", "*" }, 550 /*quirks*/0, /*mintags*/55, /*maxtags*/255 551 }, 552 { 553 /* Veritas Storage Appliance */ 554 { T_DIRECT, SIP_MEDIA_FIXED, "VERITAS", "*", "*" }, 555 CAM_QUIRK_HILUNS, /*mintags*/2, /*maxtags*/1024 556 }, 557 { 558 /* 559 * Would respond to all LUNs. Device type and removable 560 * flag are jumper-selectable. 561 */ 562 { T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, "MaxOptix", 563 "Tahiti 1", "*" 564 }, 565 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 566 }, 567 { 568 /* EasyRAID E5A aka. areca ARC-6010 */ 569 { T_DIRECT, SIP_MEDIA_FIXED, "easyRAID", "*", "*" }, 570 CAM_QUIRK_NOHILUNS, /*mintags*/2, /*maxtags*/255 571 }, 572 { 573 /* Default tagged queuing parameters for all devices */ 574 { 575 T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, 576 /*vendor*/"*", /*product*/"*", /*revision*/"*" 577 }, 578 /*quirks*/0, /*mintags*/2, /*maxtags*/255 579 }, 580}; 581 582static const int xpt_quirk_table_size = 583 sizeof(xpt_quirk_table) / sizeof(*xpt_quirk_table); 584 585typedef enum { 586 DM_RET_COPY = 0x01, 587 DM_RET_FLAG_MASK = 0x0f, 588 DM_RET_NONE = 0x00, 589 DM_RET_STOP = 0x10, 590 DM_RET_DESCEND = 0x20, 591 DM_RET_ERROR = 0x30, 592 DM_RET_ACTION_MASK = 0xf0 593} dev_match_ret; 594 595typedef enum { 596 XPT_DEPTH_BUS, 597 XPT_DEPTH_TARGET, 598 XPT_DEPTH_DEVICE, 599 XPT_DEPTH_PERIPH 600} xpt_traverse_depth; 601 602struct xpt_traverse_config { 603 xpt_traverse_depth depth; 604 void *tr_func; 605 void *tr_arg; 606}; 607 608typedef int xpt_busfunc_t (struct cam_eb *bus, void *arg); 609typedef int xpt_targetfunc_t (struct cam_et *target, void *arg); 610typedef int xpt_devicefunc_t (struct cam_ed *device, void *arg); 611typedef int xpt_periphfunc_t (struct cam_periph *periph, void *arg); 612typedef int xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg); 613 614/* Transport layer configuration information */ 615static struct xpt_softc xsoftc; 616 617/* Queues for our software interrupt handler */ 618typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t; 619static cam_isrq_t cam_bioq; 620static struct mtx cam_bioq_lock; 621 622/* "Pool" of inactive ccbs managed by xpt_alloc_ccb and xpt_free_ccb */ 623static SLIST_HEAD(,ccb_hdr) ccb_freeq; 624static u_int xpt_max_ccbs; /* 625 * Maximum size of ccb pool. Modified as 626 * devices are added/removed or have their 627 * opening counts changed. 628 */ 629static u_int xpt_ccb_count; /* Current count of allocated ccbs */ 630 631struct cam_periph *xpt_periph; 632 633static periph_init_t xpt_periph_init; 634 635static periph_init_t probe_periph_init; 636 637static struct periph_driver xpt_driver = 638{ 639 xpt_periph_init, "xpt", 640 TAILQ_HEAD_INITIALIZER(xpt_driver.units) 641}; 642 643static struct periph_driver probe_driver = 644{ 645 probe_periph_init, "probe", 646 TAILQ_HEAD_INITIALIZER(probe_driver.units) 647}; 648 649PERIPHDRIVER_DECLARE(xpt, xpt_driver); 650PERIPHDRIVER_DECLARE(probe, probe_driver); 651 652 653static d_open_t xptopen; 654static d_close_t xptclose; 655static d_ioctl_t xptioctl; 656 657static struct cdevsw xpt_cdevsw = { 658 .d_version = D_VERSION, 659 .d_flags = D_NEEDGIANT, 660 .d_open = xptopen, 661 .d_close = xptclose, 662 .d_ioctl = xptioctl, 663 .d_name = "xpt", 664}; 665 666static struct intr_config_hook *xpt_config_hook; 667 668/* Registered busses */ 669static TAILQ_HEAD(,cam_eb) xpt_busses; 670static u_int bus_generation; 671 672/* Storage for debugging datastructures */ 673#ifdef CAMDEBUG 674struct cam_path *cam_dpath; 675u_int32_t cam_dflags; 676u_int32_t cam_debug_delay; 677#endif 678 679/* Pointers to software interrupt handlers */ 680static void *cambio_ih; 681 682#if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG) 683#error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS" 684#endif 685 686/* 687 * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG 688 * enabled. Also, the user must have either none, or all of CAM_DEBUG_BUS, 689 * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified. 690 */ 691#if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \ 692 || defined(CAM_DEBUG_LUN) 693#ifdef CAMDEBUG 694#if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \ 695 || !defined(CAM_DEBUG_LUN) 696#error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \ 697 and CAM_DEBUG_LUN" 698#endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */ 699#else /* !CAMDEBUG */ 700#error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options" 701#endif /* CAMDEBUG */ 702#endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */ 703 704/* Our boot-time initialization hook */ 705static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *); 706 707static moduledata_t cam_moduledata = { 708 "cam", 709 cam_module_event_handler, 710 NULL 711}; 712 713static void xpt_init(void *); 714 715DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND); 716MODULE_VERSION(cam, 1); 717 718 719static cam_status xpt_compile_path(struct cam_path *new_path, 720 struct cam_periph *perph, 721 path_id_t path_id, 722 target_id_t target_id, 723 lun_id_t lun_id); 724 725static void xpt_release_path(struct cam_path *path); 726 727static void xpt_async_bcast(struct async_list *async_head, 728 u_int32_t async_code, 729 struct cam_path *path, 730 void *async_arg); 731static void xpt_dev_async(u_int32_t async_code, 732 struct cam_eb *bus, 733 struct cam_et *target, 734 struct cam_ed *device, 735 void *async_arg); 736static path_id_t xptnextfreepathid(void); 737static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus); 738static union ccb *xpt_get_ccb(struct cam_ed *device); 739static int xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo, 740 u_int32_t new_priority); 741static void xpt_run_dev_allocq(struct cam_eb *bus); 742static void xpt_run_dev_sendq(struct cam_eb *bus); 743static timeout_t xpt_release_devq_timeout; 744static timeout_t xpt_release_simq_timeout; 745static void xpt_release_bus(struct cam_eb *bus); 746static void xpt_release_devq_device(struct cam_ed *dev, u_int count, 747 int run_queue); 748static struct cam_et* 749 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id); 750static void xpt_release_target(struct cam_eb *bus, struct cam_et *target); 751static struct cam_ed* 752 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, 753 lun_id_t lun_id); 754static void xpt_release_device(struct cam_eb *bus, struct cam_et *target, 755 struct cam_ed *device); 756static u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings); 757static struct cam_eb* 758 xpt_find_bus(path_id_t path_id); 759static struct cam_et* 760 xpt_find_target(struct cam_eb *bus, target_id_t target_id); 761static struct cam_ed* 762 xpt_find_device(struct cam_et *target, lun_id_t lun_id); 763static void xpt_scan_bus(struct cam_periph *periph, union ccb *ccb); 764static void xpt_scan_lun(struct cam_periph *periph, 765 struct cam_path *path, cam_flags flags, 766 union ccb *ccb); 767static void xptscandone(struct cam_periph *periph, union ccb *done_ccb); 768static xpt_busfunc_t xptconfigbuscountfunc; 769static xpt_busfunc_t xptconfigfunc; 770static void xpt_config(void *arg); 771static xpt_devicefunc_t xptpassannouncefunc; 772static void xpt_finishconfig(struct cam_periph *periph, union ccb *ccb); 773static void xptaction(struct cam_sim *sim, union ccb *work_ccb); 774static void xptpoll(struct cam_sim *sim); 775static void camisr(void *); 776#if 0 777static void xptstart(struct cam_periph *periph, union ccb *work_ccb); 778static void xptasync(struct cam_periph *periph, 779 u_int32_t code, cam_path *path); 780#endif 781static dev_match_ret xptbusmatch(struct dev_match_pattern *patterns, 782 u_int num_patterns, struct cam_eb *bus); 783static dev_match_ret xptdevicematch(struct dev_match_pattern *patterns, 784 u_int num_patterns, 785 struct cam_ed *device); 786static dev_match_ret xptperiphmatch(struct dev_match_pattern *patterns, 787 u_int num_patterns, 788 struct cam_periph *periph); 789static xpt_busfunc_t xptedtbusfunc; 790static xpt_targetfunc_t xptedttargetfunc; 791static xpt_devicefunc_t xptedtdevicefunc; 792static xpt_periphfunc_t xptedtperiphfunc; 793static xpt_pdrvfunc_t xptplistpdrvfunc; 794static xpt_periphfunc_t xptplistperiphfunc; 795static int xptedtmatch(struct ccb_dev_match *cdm); 796static int xptperiphlistmatch(struct ccb_dev_match *cdm); 797static int xptbustraverse(struct cam_eb *start_bus, 798 xpt_busfunc_t *tr_func, void *arg); 799static int xpttargettraverse(struct cam_eb *bus, 800 struct cam_et *start_target, 801 xpt_targetfunc_t *tr_func, void *arg); 802static int xptdevicetraverse(struct cam_et *target, 803 struct cam_ed *start_device, 804 xpt_devicefunc_t *tr_func, void *arg); 805static int xptperiphtraverse(struct cam_ed *device, 806 struct cam_periph *start_periph, 807 xpt_periphfunc_t *tr_func, void *arg); 808static int xptpdrvtraverse(struct periph_driver **start_pdrv, 809 xpt_pdrvfunc_t *tr_func, void *arg); 810static int xptpdperiphtraverse(struct periph_driver **pdrv, 811 struct cam_periph *start_periph, 812 xpt_periphfunc_t *tr_func, 813 void *arg); 814static xpt_busfunc_t xptdefbusfunc; 815static xpt_targetfunc_t xptdeftargetfunc; 816static xpt_devicefunc_t xptdefdevicefunc; 817static xpt_periphfunc_t xptdefperiphfunc; 818static int xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg); 819#ifdef notusedyet 820static int xpt_for_all_targets(xpt_targetfunc_t *tr_func, 821 void *arg); 822#endif 823static int xpt_for_all_devices(xpt_devicefunc_t *tr_func, 824 void *arg); 825#ifdef notusedyet 826static int xpt_for_all_periphs(xpt_periphfunc_t *tr_func, 827 void *arg); 828#endif 829static xpt_devicefunc_t xptsetasyncfunc; 830static xpt_busfunc_t xptsetasyncbusfunc; 831static cam_status xptregister(struct cam_periph *periph, 832 void *arg); 833static cam_status proberegister(struct cam_periph *periph, 834 void *arg); 835static void probeschedule(struct cam_periph *probe_periph); 836static void probestart(struct cam_periph *periph, union ccb *start_ccb); 837static void proberequestdefaultnegotiation(struct cam_periph *periph); 838static void probedone(struct cam_periph *periph, union ccb *done_ccb); 839static void probecleanup(struct cam_periph *periph); 840static void xpt_find_quirk(struct cam_ed *device); 841#ifdef CAM_NEW_TRAN_CODE 842static void xpt_devise_transport(struct cam_path *path); 843#endif /* CAM_NEW_TRAN_CODE */ 844static void xpt_set_transfer_settings(struct ccb_trans_settings *cts, 845 struct cam_ed *device, 846 int async_update); 847static void xpt_toggle_tags(struct cam_path *path); 848static void xpt_start_tags(struct cam_path *path); 849static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus, 850 struct cam_ed *dev); 851static __inline int xpt_schedule_dev_sendq(struct cam_eb *bus, 852 struct cam_ed *dev); 853static __inline int periph_is_queued(struct cam_periph *periph); 854static __inline int device_is_alloc_queued(struct cam_ed *device); 855static __inline int device_is_send_queued(struct cam_ed *device); 856static __inline int dev_allocq_is_runnable(struct cam_devq *devq); 857 858static __inline int 859xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev) 860{ 861 int retval; 862 863 if (dev->ccbq.devq_openings > 0) { 864 if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) { 865 cam_ccbq_resize(&dev->ccbq, 866 dev->ccbq.dev_openings 867 + dev->ccbq.dev_active); 868 dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED; 869 } 870 /* 871 * The priority of a device waiting for CCB resources 872 * is that of the the highest priority peripheral driver 873 * enqueued. 874 */ 875 retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue, 876 &dev->alloc_ccb_entry.pinfo, 877 CAMQ_GET_HEAD(&dev->drvq)->priority); 878 } else { 879 retval = 0; 880 } 881 882 return (retval); 883} 884 885static __inline int 886xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev) 887{ 888 int retval; 889 890 if (dev->ccbq.dev_openings > 0) { 891 /* 892 * The priority of a device waiting for controller 893 * resources is that of the the highest priority CCB 894 * enqueued. 895 */ 896 retval = 897 xpt_schedule_dev(&bus->sim->devq->send_queue, 898 &dev->send_ccb_entry.pinfo, 899 CAMQ_GET_HEAD(&dev->ccbq.queue)->priority); 900 } else { 901 retval = 0; 902 } 903 return (retval); 904} 905 906static __inline int 907periph_is_queued(struct cam_periph *periph) 908{ 909 return (periph->pinfo.index != CAM_UNQUEUED_INDEX); 910} 911 912static __inline int 913device_is_alloc_queued(struct cam_ed *device) 914{ 915 return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX); 916} 917 918static __inline int 919device_is_send_queued(struct cam_ed *device) 920{ 921 return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX); 922} 923 924static __inline int 925dev_allocq_is_runnable(struct cam_devq *devq) 926{ 927 /* 928 * Have work to do. 929 * Have space to do more work. 930 * Allowed to do work. 931 */ 932 return ((devq->alloc_queue.qfrozen_cnt == 0) 933 && (devq->alloc_queue.entries > 0) 934 && (devq->alloc_openings > 0)); 935} 936 937static void 938xpt_periph_init() 939{ 940 make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0"); 941} 942 943static void 944probe_periph_init() 945{ 946} 947 948 949static void 950xptdone(struct cam_periph *periph, union ccb *done_ccb) 951{ 952 /* Caller will release the CCB */ 953 wakeup(&done_ccb->ccb_h.cbfcnp); 954} 955 956static int 957xptopen(struct cdev *dev, int flags, int fmt, struct thread *td) 958{ 959 int unit; 960 961 unit = minor(dev) & 0xff; 962 963 /* 964 * Only allow read-write access. 965 */ 966 if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0)) 967 return(EPERM); 968 969 /* 970 * We don't allow nonblocking access. 971 */ 972 if ((flags & O_NONBLOCK) != 0) { 973 printf("xpt%d: can't do nonblocking access\n", unit); 974 return(ENODEV); 975 } 976 977 /* 978 * We only have one transport layer right now. If someone accesses 979 * us via something other than minor number 1, point out their 980 * mistake. 981 */ 982 if (unit != 0) { 983 printf("xptopen: got invalid xpt unit %d\n", unit); 984 return(ENXIO); 985 } 986 987 /* Mark ourselves open */ 988 xsoftc.flags |= XPT_FLAG_OPEN; 989 990 return(0); 991} 992 993static int 994xptclose(struct cdev *dev, int flag, int fmt, struct thread *td) 995{ 996 int unit; 997 998 unit = minor(dev) & 0xff; 999 1000 /* 1001 * We only have one transport layer right now. If someone accesses 1002 * us via something other than minor number 1, point out their 1003 * mistake. 1004 */ 1005 if (unit != 0) { 1006 printf("xptclose: got invalid xpt unit %d\n", unit); 1007 return(ENXIO); 1008 } 1009 1010 /* Mark ourselves closed */ 1011 xsoftc.flags &= ~XPT_FLAG_OPEN; 1012 1013 return(0); 1014} 1015 1016static int 1017xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) 1018{ 1019 int unit, error; 1020 1021 error = 0; 1022 unit = minor(dev) & 0xff; 1023 1024 /* 1025 * We only have one transport layer right now. If someone accesses 1026 * us via something other than minor number 1, point out their 1027 * mistake. 1028 */ 1029 if (unit != 0) { 1030 printf("xptioctl: got invalid xpt unit %d\n", unit); 1031 return(ENXIO); 1032 } 1033 1034 switch(cmd) { 1035 /* 1036 * For the transport layer CAMIOCOMMAND ioctl, we really only want 1037 * to accept CCB types that don't quite make sense to send through a 1038 * passthrough driver. XPT_PATH_INQ is an exception to this, as stated 1039 * in the CAM spec. 1040 */ 1041 case CAMIOCOMMAND: { 1042 union ccb *ccb; 1043 union ccb *inccb; 1044 1045 inccb = (union ccb *)addr; 1046 1047 switch(inccb->ccb_h.func_code) { 1048 case XPT_SCAN_BUS: 1049 case XPT_RESET_BUS: 1050 if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD) 1051 || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) { 1052 error = EINVAL; 1053 break; 1054 } 1055 /* FALLTHROUGH */ 1056 case XPT_PATH_INQ: 1057 case XPT_ENG_INQ: 1058 case XPT_SCAN_LUN: 1059 1060 ccb = xpt_alloc_ccb(); 1061 1062 /* 1063 * Create a path using the bus, target, and lun the 1064 * user passed in. 1065 */ 1066 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, 1067 inccb->ccb_h.path_id, 1068 inccb->ccb_h.target_id, 1069 inccb->ccb_h.target_lun) != 1070 CAM_REQ_CMP){ 1071 error = EINVAL; 1072 xpt_free_ccb(ccb); 1073 break; 1074 } 1075 /* Ensure all of our fields are correct */ 1076 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 1077 inccb->ccb_h.pinfo.priority); 1078 xpt_merge_ccb(ccb, inccb); 1079 ccb->ccb_h.cbfcnp = xptdone; 1080 cam_periph_runccb(ccb, NULL, 0, 0, NULL); 1081 bcopy(ccb, inccb, sizeof(union ccb)); 1082 xpt_free_path(ccb->ccb_h.path); 1083 xpt_free_ccb(ccb); 1084 break; 1085 1086 case XPT_DEBUG: { 1087 union ccb ccb; 1088 1089 /* 1090 * This is an immediate CCB, so it's okay to 1091 * allocate it on the stack. 1092 */ 1093 1094 /* 1095 * Create a path using the bus, target, and lun the 1096 * user passed in. 1097 */ 1098 if (xpt_create_path(&ccb.ccb_h.path, xpt_periph, 1099 inccb->ccb_h.path_id, 1100 inccb->ccb_h.target_id, 1101 inccb->ccb_h.target_lun) != 1102 CAM_REQ_CMP){ 1103 error = EINVAL; 1104 break; 1105 } 1106 /* Ensure all of our fields are correct */ 1107 xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path, 1108 inccb->ccb_h.pinfo.priority); 1109 xpt_merge_ccb(&ccb, inccb); 1110 ccb.ccb_h.cbfcnp = xptdone; 1111 xpt_action(&ccb); 1112 bcopy(&ccb, inccb, sizeof(union ccb)); 1113 xpt_free_path(ccb.ccb_h.path); 1114 break; 1115 1116 } 1117 case XPT_DEV_MATCH: { 1118 struct cam_periph_map_info mapinfo; 1119 struct cam_path *old_path; 1120 1121 /* 1122 * We can't deal with physical addresses for this 1123 * type of transaction. 1124 */ 1125 if (inccb->ccb_h.flags & CAM_DATA_PHYS) { 1126 error = EINVAL; 1127 break; 1128 } 1129 1130 /* 1131 * Save this in case the caller had it set to 1132 * something in particular. 1133 */ 1134 old_path = inccb->ccb_h.path; 1135 1136 /* 1137 * We really don't need a path for the matching 1138 * code. The path is needed because of the 1139 * debugging statements in xpt_action(). They 1140 * assume that the CCB has a valid path. 1141 */ 1142 inccb->ccb_h.path = xpt_periph->path; 1143 1144 bzero(&mapinfo, sizeof(mapinfo)); 1145 1146 /* 1147 * Map the pattern and match buffers into kernel 1148 * virtual address space. 1149 */ 1150 error = cam_periph_mapmem(inccb, &mapinfo); 1151 1152 if (error) { 1153 inccb->ccb_h.path = old_path; 1154 break; 1155 } 1156 1157 /* 1158 * This is an immediate CCB, we can send it on directly. 1159 */ 1160 xpt_action(inccb); 1161 1162 /* 1163 * Map the buffers back into user space. 1164 */ 1165 cam_periph_unmapmem(inccb, &mapinfo); 1166 1167 inccb->ccb_h.path = old_path; 1168 1169 error = 0; 1170 break; 1171 } 1172 default: 1173 error = ENOTSUP; 1174 break; 1175 } 1176 break; 1177 } 1178 /* 1179 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input, 1180 * with the periphal driver name and unit name filled in. The other 1181 * fields don't really matter as input. The passthrough driver name 1182 * ("pass"), and unit number are passed back in the ccb. The current 1183 * device generation number, and the index into the device peripheral 1184 * driver list, and the status are also passed back. Note that 1185 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb, 1186 * we never return a status of CAM_GDEVLIST_LIST_CHANGED. It is 1187 * (or rather should be) impossible for the device peripheral driver 1188 * list to change since we look at the whole thing in one pass, and 1189 * we do it with splcam protection. 1190 * 1191 */ 1192 case CAMGETPASSTHRU: { 1193 union ccb *ccb; 1194 struct cam_periph *periph; 1195 struct periph_driver **p_drv; 1196 char *name; 1197 u_int unit; 1198 u_int cur_generation; 1199 int base_periph_found; 1200 int splbreaknum; 1201 int s; 1202 1203 ccb = (union ccb *)addr; 1204 unit = ccb->cgdl.unit_number; 1205 name = ccb->cgdl.periph_name; 1206 /* 1207 * Every 100 devices, we want to drop our spl protection to 1208 * give the software interrupt handler a chance to run. 1209 * Most systems won't run into this check, but this should 1210 * avoid starvation in the software interrupt handler in 1211 * large systems. 1212 */ 1213 splbreaknum = 100; 1214 1215 ccb = (union ccb *)addr; 1216 1217 base_periph_found = 0; 1218 1219 /* 1220 * Sanity check -- make sure we don't get a null peripheral 1221 * driver name. 1222 */ 1223 if (*ccb->cgdl.periph_name == '\0') { 1224 error = EINVAL; 1225 break; 1226 } 1227 1228 /* Keep the list from changing while we traverse it */ 1229 s = splcam(); 1230ptstartover: 1231 cur_generation = xsoftc.generation; 1232 1233 /* first find our driver in the list of drivers */ 1234 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) 1235 if (strcmp((*p_drv)->driver_name, name) == 0) 1236 break; 1237 1238 if (*p_drv == NULL) { 1239 splx(s); 1240 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1241 ccb->cgdl.status = CAM_GDEVLIST_ERROR; 1242 *ccb->cgdl.periph_name = '\0'; 1243 ccb->cgdl.unit_number = 0; 1244 error = ENOENT; 1245 break; 1246 } 1247 1248 /* 1249 * Run through every peripheral instance of this driver 1250 * and check to see whether it matches the unit passed 1251 * in by the user. If it does, get out of the loops and 1252 * find the passthrough driver associated with that 1253 * peripheral driver. 1254 */ 1255 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL; 1256 periph = TAILQ_NEXT(periph, unit_links)) { 1257 1258 if (periph->unit_number == unit) { 1259 break; 1260 } else if (--splbreaknum == 0) { 1261 splx(s); 1262 s = splcam(); 1263 splbreaknum = 100; 1264 if (cur_generation != xsoftc.generation) 1265 goto ptstartover; 1266 } 1267 } 1268 /* 1269 * If we found the peripheral driver that the user passed 1270 * in, go through all of the peripheral drivers for that 1271 * particular device and look for a passthrough driver. 1272 */ 1273 if (periph != NULL) { 1274 struct cam_ed *device; 1275 int i; 1276 1277 base_periph_found = 1; 1278 device = periph->path->device; 1279 for (i = 0, periph = SLIST_FIRST(&device->periphs); 1280 periph != NULL; 1281 periph = SLIST_NEXT(periph, periph_links), i++) { 1282 /* 1283 * Check to see whether we have a 1284 * passthrough device or not. 1285 */ 1286 if (strcmp(periph->periph_name, "pass") == 0) { 1287 /* 1288 * Fill in the getdevlist fields. 1289 */ 1290 strcpy(ccb->cgdl.periph_name, 1291 periph->periph_name); 1292 ccb->cgdl.unit_number = 1293 periph->unit_number; 1294 if (SLIST_NEXT(periph, periph_links)) 1295 ccb->cgdl.status = 1296 CAM_GDEVLIST_MORE_DEVS; 1297 else 1298 ccb->cgdl.status = 1299 CAM_GDEVLIST_LAST_DEVICE; 1300 ccb->cgdl.generation = 1301 device->generation; 1302 ccb->cgdl.index = i; 1303 /* 1304 * Fill in some CCB header fields 1305 * that the user may want. 1306 */ 1307 ccb->ccb_h.path_id = 1308 periph->path->bus->path_id; 1309 ccb->ccb_h.target_id = 1310 periph->path->target->target_id; 1311 ccb->ccb_h.target_lun = 1312 periph->path->device->lun_id; 1313 ccb->ccb_h.status = CAM_REQ_CMP; 1314 break; 1315 } 1316 } 1317 } 1318 1319 /* 1320 * If the periph is null here, one of two things has 1321 * happened. The first possibility is that we couldn't 1322 * find the unit number of the particular peripheral driver 1323 * that the user is asking about. e.g. the user asks for 1324 * the passthrough driver for "da11". We find the list of 1325 * "da" peripherals all right, but there is no unit 11. 1326 * The other possibility is that we went through the list 1327 * of peripheral drivers attached to the device structure, 1328 * but didn't find one with the name "pass". Either way, 1329 * we return ENOENT, since we couldn't find something. 1330 */ 1331 if (periph == NULL) { 1332 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1333 ccb->cgdl.status = CAM_GDEVLIST_ERROR; 1334 *ccb->cgdl.periph_name = '\0'; 1335 ccb->cgdl.unit_number = 0; 1336 error = ENOENT; 1337 /* 1338 * It is unfortunate that this is even necessary, 1339 * but there are many, many clueless users out there. 1340 * If this is true, the user is looking for the 1341 * passthrough driver, but doesn't have one in his 1342 * kernel. 1343 */ 1344 if (base_periph_found == 1) { 1345 printf("xptioctl: pass driver is not in the " 1346 "kernel\n"); 1347 printf("xptioctl: put \"device pass0\" in " 1348 "your kernel config file\n"); 1349 } 1350 } 1351 splx(s); 1352 break; 1353 } 1354 default: 1355 error = ENOTTY; 1356 break; 1357 } 1358 1359 return(error); 1360} 1361 1362static int 1363cam_module_event_handler(module_t mod, int what, void *arg) 1364{ 1365 if (what == MOD_LOAD) { 1366 xpt_init(NULL); 1367 } else if (what == MOD_UNLOAD) { 1368 return EBUSY; 1369 } else { 1370 return EOPNOTSUPP; 1371 } 1372 1373 return 0; 1374} 1375 1376/* Functions accessed by the peripheral drivers */ 1377static void 1378xpt_init(dummy) 1379 void *dummy; 1380{ 1381 struct cam_sim *xpt_sim; 1382 struct cam_path *path; 1383 struct cam_devq *devq; 1384 cam_status status; 1385 1386 TAILQ_INIT(&xpt_busses); 1387 TAILQ_INIT(&cam_bioq); 1388 SLIST_INIT(&ccb_freeq); 1389 STAILQ_INIT(&highpowerq); 1390 1391 mtx_init(&cam_bioq_lock, "CAM BIOQ lock", NULL, MTX_DEF); 1392 1393 /* 1394 * The xpt layer is, itself, the equivelent of a SIM. 1395 * Allow 16 ccbs in the ccb pool for it. This should 1396 * give decent parallelism when we probe busses and 1397 * perform other XPT functions. 1398 */ 1399 devq = cam_simq_alloc(16); 1400 xpt_sim = cam_sim_alloc(xptaction, 1401 xptpoll, 1402 "xpt", 1403 /*softc*/NULL, 1404 /*unit*/0, 1405 /*max_dev_transactions*/0, 1406 /*max_tagged_dev_transactions*/0, 1407 devq); 1408 xpt_max_ccbs = 16; 1409 1410 xpt_bus_register(xpt_sim, /*bus #*/0); 1411 1412 /* 1413 * Looking at the XPT from the SIM layer, the XPT is 1414 * the equivelent of a peripheral driver. Allocate 1415 * a peripheral driver entry for us. 1416 */ 1417 if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID, 1418 CAM_TARGET_WILDCARD, 1419 CAM_LUN_WILDCARD)) != CAM_REQ_CMP) { 1420 printf("xpt_init: xpt_create_path failed with status %#x," 1421 " failing attach\n", status); 1422 return; 1423 } 1424 1425 cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO, 1426 path, NULL, 0, NULL); 1427 xpt_free_path(path); 1428 1429 xpt_sim->softc = xpt_periph; 1430 1431 /* 1432 * Register a callback for when interrupts are enabled. 1433 */ 1434 xpt_config_hook = 1435 (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook), 1436 M_TEMP, M_NOWAIT | M_ZERO); 1437 if (xpt_config_hook == NULL) { 1438 printf("xpt_init: Cannot malloc config hook " 1439 "- failing attach\n"); 1440 return; 1441 } 1442 1443 xpt_config_hook->ich_func = xpt_config; 1444 if (config_intrhook_establish(xpt_config_hook) != 0) { 1445 free (xpt_config_hook, M_TEMP); 1446 printf("xpt_init: config_intrhook_establish failed " 1447 "- failing attach\n"); 1448 } 1449 1450 /* Install our software interrupt handlers */ 1451 swi_add(NULL, "cambio", camisr, &cam_bioq, SWI_CAMBIO, 0, &cambio_ih); 1452} 1453 1454static cam_status 1455xptregister(struct cam_periph *periph, void *arg) 1456{ 1457 if (periph == NULL) { 1458 printf("xptregister: periph was NULL!!\n"); 1459 return(CAM_REQ_CMP_ERR); 1460 } 1461 1462 periph->softc = NULL; 1463 1464 xpt_periph = periph; 1465 1466 return(CAM_REQ_CMP); 1467} 1468 1469int32_t 1470xpt_add_periph(struct cam_periph *periph) 1471{ 1472 struct cam_ed *device; 1473 int32_t status; 1474 struct periph_list *periph_head; 1475 1476 GIANT_REQUIRED; 1477 1478 device = periph->path->device; 1479 1480 periph_head = &device->periphs; 1481 1482 status = CAM_REQ_CMP; 1483 1484 if (device != NULL) { 1485 int s; 1486 1487 /* 1488 * Make room for this peripheral 1489 * so it will fit in the queue 1490 * when it's scheduled to run 1491 */ 1492 s = splsoftcam(); 1493 status = camq_resize(&device->drvq, 1494 device->drvq.array_size + 1); 1495 1496 device->generation++; 1497 1498 SLIST_INSERT_HEAD(periph_head, periph, periph_links); 1499 1500 splx(s); 1501 } 1502 1503 xsoftc.generation++; 1504 1505 return (status); 1506} 1507 1508void 1509xpt_remove_periph(struct cam_periph *periph) 1510{ 1511 struct cam_ed *device; 1512 1513 GIANT_REQUIRED; 1514 1515 device = periph->path->device; 1516 1517 if (device != NULL) { 1518 int s; 1519 struct periph_list *periph_head; 1520 1521 periph_head = &device->periphs; 1522 1523 /* Release the slot for this peripheral */ 1524 s = splsoftcam(); 1525 camq_resize(&device->drvq, device->drvq.array_size - 1); 1526 1527 device->generation++; 1528 1529 SLIST_REMOVE(periph_head, periph, cam_periph, periph_links); 1530 1531 splx(s); 1532 } 1533 1534 xsoftc.generation++; 1535 1536} 1537 1538#ifdef CAM_NEW_TRAN_CODE 1539 1540void 1541xpt_announce_periph(struct cam_periph *periph, char *announce_string) 1542{ 1543 struct ccb_pathinq cpi; 1544 struct ccb_trans_settings cts; 1545 struct cam_path *path; 1546 u_int speed; 1547 u_int freq; 1548 u_int mb; 1549 int s; 1550 1551 GIANT_REQUIRED; 1552 1553 path = periph->path; 1554 /* 1555 * To ensure that this is printed in one piece, 1556 * mask out CAM interrupts. 1557 */ 1558 s = splsoftcam(); 1559 printf("%s%d at %s%d bus %d target %d lun %d\n", 1560 periph->periph_name, periph->unit_number, 1561 path->bus->sim->sim_name, 1562 path->bus->sim->unit_number, 1563 path->bus->sim->bus_id, 1564 path->target->target_id, 1565 path->device->lun_id); 1566 printf("%s%d: ", periph->periph_name, periph->unit_number); 1567 scsi_print_inquiry(&path->device->inq_data); 1568 if (bootverbose && path->device->serial_num_len > 0) { 1569 /* Don't wrap the screen - print only the first 60 chars */ 1570 printf("%s%d: Serial Number %.60s\n", periph->periph_name, 1571 periph->unit_number, path->device->serial_num); 1572 } 1573 xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1); 1574 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; 1575 cts.type = CTS_TYPE_CURRENT_SETTINGS; 1576 xpt_action((union ccb*)&cts); 1577 1578 /* Ask the SIM for its base transfer speed */ 1579 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1); 1580 cpi.ccb_h.func_code = XPT_PATH_INQ; 1581 xpt_action((union ccb *)&cpi); 1582 1583 speed = cpi.base_transfer_speed; 1584 freq = 0; 1585 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) { 1586 struct ccb_trans_settings_spi *spi; 1587 1588 spi = &cts.xport_specific.spi; 1589 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0 1590 && spi->sync_offset != 0) { 1591 freq = scsi_calc_syncsrate(spi->sync_period); 1592 speed = freq; 1593 } 1594 1595 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) 1596 speed *= (0x01 << spi->bus_width); 1597 } 1598 1599 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) { 1600 struct ccb_trans_settings_fc *fc = &cts.xport_specific.fc; 1601 if (fc->valid & CTS_FC_VALID_SPEED) { 1602 speed = fc->bitrate; 1603 } 1604 } 1605 1606 mb = speed / 1000; 1607 if (mb > 0) 1608 printf("%s%d: %d.%03dMB/s transfers", 1609 periph->periph_name, periph->unit_number, 1610 mb, speed % 1000); 1611 else 1612 printf("%s%d: %dKB/s transfers", periph->periph_name, 1613 periph->unit_number, speed); 1614 /* Report additional information about SPI connections */ 1615 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) { 1616 struct ccb_trans_settings_spi *spi; 1617 1618 spi = &cts.xport_specific.spi; 1619 if (freq != 0) { 1620 printf(" (%d.%03dMHz%s, offset %d", freq / 1000, 1621 freq % 1000, 1622 (spi->ppr_options & MSG_EXT_PPR_DT_REQ) != 0 1623 ? " DT" : "", 1624 spi->sync_offset); 1625 } 1626 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0 1627 && spi->bus_width > 0) { 1628 if (freq != 0) { 1629 printf(", "); 1630 } else { 1631 printf(" ("); 1632 } 1633 printf("%dbit)", 8 * (0x01 << spi->bus_width)); 1634 } else if (freq != 0) { 1635 printf(")"); 1636 } 1637 } 1638 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) { 1639 struct ccb_trans_settings_fc *fc; 1640 1641 fc = &cts.xport_specific.fc; 1642 if (fc->valid & CTS_FC_VALID_WWNN) 1643 printf(" WWNN 0x%llx", (long long) fc->wwnn); 1644 if (fc->valid & CTS_FC_VALID_WWPN) 1645 printf(" WWPN 0x%llx", (long long) fc->wwpn); 1646 if (fc->valid & CTS_FC_VALID_PORT) 1647 printf(" PortID 0x%x", fc->port); 1648 } 1649 1650 if (path->device->inq_flags & SID_CmdQue 1651 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) { 1652 printf("\n%s%d: Tagged Queueing Enabled", 1653 periph->periph_name, periph->unit_number); 1654 } 1655 printf("\n"); 1656 1657 /* 1658 * We only want to print the caller's announce string if they've 1659 * passed one in.. 1660 */ 1661 if (announce_string != NULL) 1662 printf("%s%d: %s\n", periph->periph_name, 1663 periph->unit_number, announce_string); 1664 splx(s); 1665} 1666#else /* CAM_NEW_TRAN_CODE */ 1667void 1668xpt_announce_periph(struct cam_periph *periph, char *announce_string) 1669{ 1670 int s; 1671 u_int mb; 1672 struct cam_path *path; 1673 struct ccb_trans_settings cts; 1674 1675 GIANT_REQUIRED; 1676 1677 path = periph->path; 1678 /* 1679 * To ensure that this is printed in one piece, 1680 * mask out CAM interrupts. 1681 */ 1682 s = splsoftcam(); 1683 printf("%s%d at %s%d bus %d target %d lun %d\n", 1684 periph->periph_name, periph->unit_number, 1685 path->bus->sim->sim_name, 1686 path->bus->sim->unit_number, 1687 path->bus->sim->bus_id, 1688 path->target->target_id, 1689 path->device->lun_id); 1690 printf("%s%d: ", periph->periph_name, periph->unit_number); 1691 scsi_print_inquiry(&path->device->inq_data); 1692 if ((bootverbose) 1693 && (path->device->serial_num_len > 0)) { 1694 /* Don't wrap the screen - print only the first 60 chars */ 1695 printf("%s%d: Serial Number %.60s\n", periph->periph_name, 1696 periph->unit_number, path->device->serial_num); 1697 } 1698 xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1); 1699 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; 1700 cts.flags = CCB_TRANS_CURRENT_SETTINGS; 1701 xpt_action((union ccb*)&cts); 1702 if (cts.ccb_h.status == CAM_REQ_CMP) { 1703 u_int speed; 1704 u_int freq; 1705 1706 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0 1707 && cts.sync_offset != 0) { 1708 freq = scsi_calc_syncsrate(cts.sync_period); 1709 speed = freq; 1710 } else { 1711 struct ccb_pathinq cpi; 1712 1713 /* Ask the SIM for its base transfer speed */ 1714 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1); 1715 cpi.ccb_h.func_code = XPT_PATH_INQ; 1716 xpt_action((union ccb *)&cpi); 1717 1718 speed = cpi.base_transfer_speed; 1719 freq = 0; 1720 } 1721 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) 1722 speed *= (0x01 << cts.bus_width); 1723 mb = speed / 1000; 1724 if (mb > 0) 1725 printf("%s%d: %d.%03dMB/s transfers", 1726 periph->periph_name, periph->unit_number, 1727 mb, speed % 1000); 1728 else 1729 printf("%s%d: %dKB/s transfers", periph->periph_name, 1730 periph->unit_number, speed); 1731 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0 1732 && cts.sync_offset != 0) { 1733 printf(" (%d.%03dMHz, offset %d", freq / 1000, 1734 freq % 1000, cts.sync_offset); 1735 } 1736 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0 1737 && cts.bus_width > 0) { 1738 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0 1739 && cts.sync_offset != 0) { 1740 printf(", "); 1741 } else { 1742 printf(" ("); 1743 } 1744 printf("%dbit)", 8 * (0x01 << cts.bus_width)); 1745 } else if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0 1746 && cts.sync_offset != 0) { 1747 printf(")"); 1748 } 1749 1750 if (path->device->inq_flags & SID_CmdQue 1751 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) { 1752 printf(", Tagged Queueing Enabled"); 1753 } 1754 1755 printf("\n"); 1756 } else if (path->device->inq_flags & SID_CmdQue 1757 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) { 1758 printf("%s%d: Tagged Queueing Enabled\n", 1759 periph->periph_name, periph->unit_number); 1760 } 1761 1762 /* 1763 * We only want to print the caller's announce string if they've 1764 * passed one in.. 1765 */ 1766 if (announce_string != NULL) 1767 printf("%s%d: %s\n", periph->periph_name, 1768 periph->unit_number, announce_string); 1769 splx(s); 1770} 1771 1772#endif /* CAM_NEW_TRAN_CODE */ 1773 1774static dev_match_ret 1775xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns, 1776 struct cam_eb *bus) 1777{ 1778 dev_match_ret retval; 1779 int i; 1780 1781 retval = DM_RET_NONE; 1782 1783 /* 1784 * If we aren't given something to match against, that's an error. 1785 */ 1786 if (bus == NULL) 1787 return(DM_RET_ERROR); 1788 1789 /* 1790 * If there are no match entries, then this bus matches no 1791 * matter what. 1792 */ 1793 if ((patterns == NULL) || (num_patterns == 0)) 1794 return(DM_RET_DESCEND | DM_RET_COPY); 1795 1796 for (i = 0; i < num_patterns; i++) { 1797 struct bus_match_pattern *cur_pattern; 1798 1799 /* 1800 * If the pattern in question isn't for a bus node, we 1801 * aren't interested. However, we do indicate to the 1802 * calling routine that we should continue descending the 1803 * tree, since the user wants to match against lower-level 1804 * EDT elements. 1805 */ 1806 if (patterns[i].type != DEV_MATCH_BUS) { 1807 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1808 retval |= DM_RET_DESCEND; 1809 continue; 1810 } 1811 1812 cur_pattern = &patterns[i].pattern.bus_pattern; 1813 1814 /* 1815 * If they want to match any bus node, we give them any 1816 * device node. 1817 */ 1818 if (cur_pattern->flags == BUS_MATCH_ANY) { 1819 /* set the copy flag */ 1820 retval |= DM_RET_COPY; 1821 1822 /* 1823 * If we've already decided on an action, go ahead 1824 * and return. 1825 */ 1826 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE) 1827 return(retval); 1828 } 1829 1830 /* 1831 * Not sure why someone would do this... 1832 */ 1833 if (cur_pattern->flags == BUS_MATCH_NONE) 1834 continue; 1835 1836 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0) 1837 && (cur_pattern->path_id != bus->path_id)) 1838 continue; 1839 1840 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0) 1841 && (cur_pattern->bus_id != bus->sim->bus_id)) 1842 continue; 1843 1844 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0) 1845 && (cur_pattern->unit_number != bus->sim->unit_number)) 1846 continue; 1847 1848 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0) 1849 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name, 1850 DEV_IDLEN) != 0)) 1851 continue; 1852 1853 /* 1854 * If we get to this point, the user definitely wants 1855 * information on this bus. So tell the caller to copy the 1856 * data out. 1857 */ 1858 retval |= DM_RET_COPY; 1859 1860 /* 1861 * If the return action has been set to descend, then we 1862 * know that we've already seen a non-bus matching 1863 * expression, therefore we need to further descend the tree. 1864 * This won't change by continuing around the loop, so we 1865 * go ahead and return. If we haven't seen a non-bus 1866 * matching expression, we keep going around the loop until 1867 * we exhaust the matching expressions. We'll set the stop 1868 * flag once we fall out of the loop. 1869 */ 1870 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND) 1871 return(retval); 1872 } 1873 1874 /* 1875 * If the return action hasn't been set to descend yet, that means 1876 * we haven't seen anything other than bus matching patterns. So 1877 * tell the caller to stop descending the tree -- the user doesn't 1878 * want to match against lower level tree elements. 1879 */ 1880 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1881 retval |= DM_RET_STOP; 1882 1883 return(retval); 1884} 1885 1886static dev_match_ret 1887xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns, 1888 struct cam_ed *device) 1889{ 1890 dev_match_ret retval; 1891 int i; 1892 1893 retval = DM_RET_NONE; 1894 1895 /* 1896 * If we aren't given something to match against, that's an error. 1897 */ 1898 if (device == NULL) 1899 return(DM_RET_ERROR); 1900 1901 /* 1902 * If there are no match entries, then this device matches no 1903 * matter what. 1904 */ 1905 if ((patterns == NULL) || (num_patterns == 0)) 1906 return(DM_RET_DESCEND | DM_RET_COPY); 1907 1908 for (i = 0; i < num_patterns; i++) { 1909 struct device_match_pattern *cur_pattern; 1910 1911 /* 1912 * If the pattern in question isn't for a device node, we 1913 * aren't interested. 1914 */ 1915 if (patterns[i].type != DEV_MATCH_DEVICE) { 1916 if ((patterns[i].type == DEV_MATCH_PERIPH) 1917 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)) 1918 retval |= DM_RET_DESCEND; 1919 continue; 1920 } 1921 1922 cur_pattern = &patterns[i].pattern.device_pattern; 1923 1924 /* 1925 * If they want to match any device node, we give them any 1926 * device node. 1927 */ 1928 if (cur_pattern->flags == DEV_MATCH_ANY) { 1929 /* set the copy flag */ 1930 retval |= DM_RET_COPY; 1931 1932 1933 /* 1934 * If we've already decided on an action, go ahead 1935 * and return. 1936 */ 1937 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE) 1938 return(retval); 1939 } 1940 1941 /* 1942 * Not sure why someone would do this... 1943 */ 1944 if (cur_pattern->flags == DEV_MATCH_NONE) 1945 continue; 1946 1947 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0) 1948 && (cur_pattern->path_id != device->target->bus->path_id)) 1949 continue; 1950 1951 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0) 1952 && (cur_pattern->target_id != device->target->target_id)) 1953 continue; 1954 1955 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0) 1956 && (cur_pattern->target_lun != device->lun_id)) 1957 continue; 1958 1959 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0) 1960 && (cam_quirkmatch((caddr_t)&device->inq_data, 1961 (caddr_t)&cur_pattern->inq_pat, 1962 1, sizeof(cur_pattern->inq_pat), 1963 scsi_static_inquiry_match) == NULL)) 1964 continue; 1965 1966 /* 1967 * If we get to this point, the user definitely wants 1968 * information on this device. So tell the caller to copy 1969 * the data out. 1970 */ 1971 retval |= DM_RET_COPY; 1972 1973 /* 1974 * If the return action has been set to descend, then we 1975 * know that we've already seen a peripheral matching 1976 * expression, therefore we need to further descend the tree. 1977 * This won't change by continuing around the loop, so we 1978 * go ahead and return. If we haven't seen a peripheral 1979 * matching expression, we keep going around the loop until 1980 * we exhaust the matching expressions. We'll set the stop 1981 * flag once we fall out of the loop. 1982 */ 1983 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND) 1984 return(retval); 1985 } 1986 1987 /* 1988 * If the return action hasn't been set to descend yet, that means 1989 * we haven't seen any peripheral matching patterns. So tell the 1990 * caller to stop descending the tree -- the user doesn't want to 1991 * match against lower level tree elements. 1992 */ 1993 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1994 retval |= DM_RET_STOP; 1995 1996 return(retval); 1997} 1998 1999/* 2000 * Match a single peripheral against any number of match patterns. 2001 */ 2002static dev_match_ret 2003xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns, 2004 struct cam_periph *periph) 2005{ 2006 dev_match_ret retval; 2007 int i; 2008 2009 /* 2010 * If we aren't given something to match against, that's an error. 2011 */ 2012 if (periph == NULL) 2013 return(DM_RET_ERROR); 2014 2015 /* 2016 * If there are no match entries, then this peripheral matches no 2017 * matter what. 2018 */ 2019 if ((patterns == NULL) || (num_patterns == 0)) 2020 return(DM_RET_STOP | DM_RET_COPY); 2021 2022 /* 2023 * There aren't any nodes below a peripheral node, so there's no 2024 * reason to descend the tree any further. 2025 */ 2026 retval = DM_RET_STOP; 2027 2028 for (i = 0; i < num_patterns; i++) { 2029 struct periph_match_pattern *cur_pattern; 2030 2031 /* 2032 * If the pattern in question isn't for a peripheral, we 2033 * aren't interested. 2034 */ 2035 if (patterns[i].type != DEV_MATCH_PERIPH) 2036 continue; 2037 2038 cur_pattern = &patterns[i].pattern.periph_pattern; 2039 2040 /* 2041 * If they want to match on anything, then we will do so. 2042 */ 2043 if (cur_pattern->flags == PERIPH_MATCH_ANY) { 2044 /* set the copy flag */ 2045 retval |= DM_RET_COPY; 2046 2047 /* 2048 * We've already set the return action to stop, 2049 * since there are no nodes below peripherals in 2050 * the tree. 2051 */ 2052 return(retval); 2053 } 2054 2055 /* 2056 * Not sure why someone would do this... 2057 */ 2058 if (cur_pattern->flags == PERIPH_MATCH_NONE) 2059 continue; 2060 2061 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0) 2062 && (cur_pattern->path_id != periph->path->bus->path_id)) 2063 continue; 2064 2065 /* 2066 * For the target and lun id's, we have to make sure the 2067 * target and lun pointers aren't NULL. The xpt peripheral 2068 * has a wildcard target and device. 2069 */ 2070 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0) 2071 && ((periph->path->target == NULL) 2072 ||(cur_pattern->target_id != periph->path->target->target_id))) 2073 continue; 2074 2075 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0) 2076 && ((periph->path->device == NULL) 2077 || (cur_pattern->target_lun != periph->path->device->lun_id))) 2078 continue; 2079 2080 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0) 2081 && (cur_pattern->unit_number != periph->unit_number)) 2082 continue; 2083 2084 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0) 2085 && (strncmp(cur_pattern->periph_name, periph->periph_name, 2086 DEV_IDLEN) != 0)) 2087 continue; 2088 2089 /* 2090 * If we get to this point, the user definitely wants 2091 * information on this peripheral. So tell the caller to 2092 * copy the data out. 2093 */ 2094 retval |= DM_RET_COPY; 2095 2096 /* 2097 * The return action has already been set to stop, since 2098 * peripherals don't have any nodes below them in the EDT. 2099 */ 2100 return(retval); 2101 } 2102 2103 /* 2104 * If we get to this point, the peripheral that was passed in 2105 * doesn't match any of the patterns. 2106 */ 2107 return(retval); 2108} 2109 2110static int 2111xptedtbusfunc(struct cam_eb *bus, void *arg) 2112{ 2113 struct ccb_dev_match *cdm; 2114 dev_match_ret retval; 2115 2116 cdm = (struct ccb_dev_match *)arg; 2117 2118 /* 2119 * If our position is for something deeper in the tree, that means 2120 * that we've already seen this node. So, we keep going down. 2121 */ 2122 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 2123 && (cdm->pos.cookie.bus == bus) 2124 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 2125 && (cdm->pos.cookie.target != NULL)) 2126 retval = DM_RET_DESCEND; 2127 else 2128 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus); 2129 2130 /* 2131 * If we got an error, bail out of the search. 2132 */ 2133 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 2134 cdm->status = CAM_DEV_MATCH_ERROR; 2135 return(0); 2136 } 2137 2138 /* 2139 * If the copy flag is set, copy this bus out. 2140 */ 2141 if (retval & DM_RET_COPY) { 2142 int spaceleft, j; 2143 2144 spaceleft = cdm->match_buf_len - (cdm->num_matches * 2145 sizeof(struct dev_match_result)); 2146 2147 /* 2148 * If we don't have enough space to put in another 2149 * match result, save our position and tell the 2150 * user there are more devices to check. 2151 */ 2152 if (spaceleft < sizeof(struct dev_match_result)) { 2153 bzero(&cdm->pos, sizeof(cdm->pos)); 2154 cdm->pos.position_type = 2155 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS; 2156 2157 cdm->pos.cookie.bus = bus; 2158 cdm->pos.generations[CAM_BUS_GENERATION]= 2159 bus_generation; 2160 cdm->status = CAM_DEV_MATCH_MORE; 2161 return(0); 2162 } 2163 j = cdm->num_matches; 2164 cdm->num_matches++; 2165 cdm->matches[j].type = DEV_MATCH_BUS; 2166 cdm->matches[j].result.bus_result.path_id = bus->path_id; 2167 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id; 2168 cdm->matches[j].result.bus_result.unit_number = 2169 bus->sim->unit_number; 2170 strncpy(cdm->matches[j].result.bus_result.dev_name, 2171 bus->sim->sim_name, DEV_IDLEN); 2172 } 2173 2174 /* 2175 * If the user is only interested in busses, there's no 2176 * reason to descend to the next level in the tree. 2177 */ 2178 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP) 2179 return(1); 2180 2181 /* 2182 * If there is a target generation recorded, check it to 2183 * make sure the target list hasn't changed. 2184 */ 2185 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 2186 && (bus == cdm->pos.cookie.bus) 2187 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 2188 && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0) 2189 && (cdm->pos.generations[CAM_TARGET_GENERATION] != 2190 bus->generation)) { 2191 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 2192 return(0); 2193 } 2194 2195 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 2196 && (cdm->pos.cookie.bus == bus) 2197 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 2198 && (cdm->pos.cookie.target != NULL)) 2199 return(xpttargettraverse(bus, 2200 (struct cam_et *)cdm->pos.cookie.target, 2201 xptedttargetfunc, arg)); 2202 else 2203 return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg)); 2204} 2205 2206static int 2207xptedttargetfunc(struct cam_et *target, void *arg) 2208{ 2209 struct ccb_dev_match *cdm; 2210 2211 cdm = (struct ccb_dev_match *)arg; 2212 2213 /* 2214 * If there is a device list generation recorded, check it to 2215 * make sure the device list hasn't changed. 2216 */ 2217 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 2218 && (cdm->pos.cookie.bus == target->bus) 2219 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 2220 && (cdm->pos.cookie.target == target) 2221 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 2222 && (cdm->pos.generations[CAM_DEV_GENERATION] != 0) 2223 && (cdm->pos.generations[CAM_DEV_GENERATION] != 2224 target->generation)) { 2225 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 2226 return(0); 2227 } 2228 2229 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 2230 && (cdm->pos.cookie.bus == target->bus) 2231 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 2232 && (cdm->pos.cookie.target == target) 2233 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 2234 && (cdm->pos.cookie.device != NULL)) 2235 return(xptdevicetraverse(target, 2236 (struct cam_ed *)cdm->pos.cookie.device, 2237 xptedtdevicefunc, arg)); 2238 else 2239 return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg)); 2240} 2241 2242static int 2243xptedtdevicefunc(struct cam_ed *device, void *arg) 2244{ 2245 2246 struct ccb_dev_match *cdm; 2247 dev_match_ret retval; 2248 2249 cdm = (struct ccb_dev_match *)arg; 2250 2251 /* 2252 * If our position is for something deeper in the tree, that means 2253 * that we've already seen this node. So, we keep going down. 2254 */ 2255 if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE) 2256 && (cdm->pos.cookie.device == device) 2257 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 2258 && (cdm->pos.cookie.periph != NULL)) 2259 retval = DM_RET_DESCEND; 2260 else 2261 retval = xptdevicematch(cdm->patterns, cdm->num_patterns, 2262 device); 2263 2264 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 2265 cdm->status = CAM_DEV_MATCH_ERROR; 2266 return(0); 2267 } 2268 2269 /* 2270 * If the copy flag is set, copy this device out. 2271 */ 2272 if (retval & DM_RET_COPY) { 2273 int spaceleft, j; 2274 2275 spaceleft = cdm->match_buf_len - (cdm->num_matches * 2276 sizeof(struct dev_match_result)); 2277 2278 /* 2279 * If we don't have enough space to put in another 2280 * match result, save our position and tell the 2281 * user there are more devices to check. 2282 */ 2283 if (spaceleft < sizeof(struct dev_match_result)) { 2284 bzero(&cdm->pos, sizeof(cdm->pos)); 2285 cdm->pos.position_type = 2286 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS | 2287 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE; 2288 2289 cdm->pos.cookie.bus = device->target->bus; 2290 cdm->pos.generations[CAM_BUS_GENERATION]= 2291 bus_generation; 2292 cdm->pos.cookie.target = device->target; 2293 cdm->pos.generations[CAM_TARGET_GENERATION] = 2294 device->target->bus->generation; 2295 cdm->pos.cookie.device = device; 2296 cdm->pos.generations[CAM_DEV_GENERATION] = 2297 device->target->generation; 2298 cdm->status = CAM_DEV_MATCH_MORE; 2299 return(0); 2300 } 2301 j = cdm->num_matches; 2302 cdm->num_matches++; 2303 cdm->matches[j].type = DEV_MATCH_DEVICE; 2304 cdm->matches[j].result.device_result.path_id = 2305 device->target->bus->path_id; 2306 cdm->matches[j].result.device_result.target_id = 2307 device->target->target_id; 2308 cdm->matches[j].result.device_result.target_lun = 2309 device->lun_id; 2310 bcopy(&device->inq_data, 2311 &cdm->matches[j].result.device_result.inq_data, 2312 sizeof(struct scsi_inquiry_data)); 2313 2314 /* Let the user know whether this device is unconfigured */ 2315 if (device->flags & CAM_DEV_UNCONFIGURED) 2316 cdm->matches[j].result.device_result.flags = 2317 DEV_RESULT_UNCONFIGURED; 2318 else 2319 cdm->matches[j].result.device_result.flags = 2320 DEV_RESULT_NOFLAG; 2321 } 2322 2323 /* 2324 * If the user isn't interested in peripherals, don't descend 2325 * the tree any further. 2326 */ 2327 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP) 2328 return(1); 2329 2330 /* 2331 * If there is a peripheral list generation recorded, make sure 2332 * it hasn't changed. 2333 */ 2334 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 2335 && (device->target->bus == cdm->pos.cookie.bus) 2336 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 2337 && (device->target == cdm->pos.cookie.target) 2338 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 2339 && (device == cdm->pos.cookie.device) 2340 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 2341 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0) 2342 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 2343 device->generation)){ 2344 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 2345 return(0); 2346 } 2347 2348 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 2349 && (cdm->pos.cookie.bus == device->target->bus) 2350 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 2351 && (cdm->pos.cookie.target == device->target) 2352 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 2353 && (cdm->pos.cookie.device == device) 2354 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 2355 && (cdm->pos.cookie.periph != NULL)) 2356 return(xptperiphtraverse(device, 2357 (struct cam_periph *)cdm->pos.cookie.periph, 2358 xptedtperiphfunc, arg)); 2359 else 2360 return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg)); 2361} 2362 2363static int 2364xptedtperiphfunc(struct cam_periph *periph, void *arg) 2365{ 2366 struct ccb_dev_match *cdm; 2367 dev_match_ret retval; 2368 2369 cdm = (struct ccb_dev_match *)arg; 2370 2371 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph); 2372 2373 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 2374 cdm->status = CAM_DEV_MATCH_ERROR; 2375 return(0); 2376 } 2377 2378 /* 2379 * If the copy flag is set, copy this peripheral out. 2380 */ 2381 if (retval & DM_RET_COPY) { 2382 int spaceleft, j; 2383 2384 spaceleft = cdm->match_buf_len - (cdm->num_matches * 2385 sizeof(struct dev_match_result)); 2386 2387 /* 2388 * If we don't have enough space to put in another 2389 * match result, save our position and tell the 2390 * user there are more devices to check. 2391 */ 2392 if (spaceleft < sizeof(struct dev_match_result)) { 2393 bzero(&cdm->pos, sizeof(cdm->pos)); 2394 cdm->pos.position_type = 2395 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS | 2396 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE | 2397 CAM_DEV_POS_PERIPH; 2398 2399 cdm->pos.cookie.bus = periph->path->bus; 2400 cdm->pos.generations[CAM_BUS_GENERATION]= 2401 bus_generation; 2402 cdm->pos.cookie.target = periph->path->target; 2403 cdm->pos.generations[CAM_TARGET_GENERATION] = 2404 periph->path->bus->generation; 2405 cdm->pos.cookie.device = periph->path->device; 2406 cdm->pos.generations[CAM_DEV_GENERATION] = 2407 periph->path->target->generation; 2408 cdm->pos.cookie.periph = periph; 2409 cdm->pos.generations[CAM_PERIPH_GENERATION] = 2410 periph->path->device->generation; 2411 cdm->status = CAM_DEV_MATCH_MORE; 2412 return(0); 2413 } 2414 2415 j = cdm->num_matches; 2416 cdm->num_matches++; 2417 cdm->matches[j].type = DEV_MATCH_PERIPH; 2418 cdm->matches[j].result.periph_result.path_id = 2419 periph->path->bus->path_id; 2420 cdm->matches[j].result.periph_result.target_id = 2421 periph->path->target->target_id; 2422 cdm->matches[j].result.periph_result.target_lun = 2423 periph->path->device->lun_id; 2424 cdm->matches[j].result.periph_result.unit_number = 2425 periph->unit_number; 2426 strncpy(cdm->matches[j].result.periph_result.periph_name, 2427 periph->periph_name, DEV_IDLEN); 2428 } 2429 2430 return(1); 2431} 2432 2433static int 2434xptedtmatch(struct ccb_dev_match *cdm) 2435{ 2436 int ret; 2437 2438 cdm->num_matches = 0; 2439 2440 /* 2441 * Check the bus list generation. If it has changed, the user 2442 * needs to reset everything and start over. 2443 */ 2444 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 2445 && (cdm->pos.generations[CAM_BUS_GENERATION] != 0) 2446 && (cdm->pos.generations[CAM_BUS_GENERATION] != bus_generation)) { 2447 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 2448 return(0); 2449 } 2450 2451 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 2452 && (cdm->pos.cookie.bus != NULL)) 2453 ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus, 2454 xptedtbusfunc, cdm); 2455 else 2456 ret = xptbustraverse(NULL, xptedtbusfunc, cdm); 2457 2458 /* 2459 * If we get back 0, that means that we had to stop before fully 2460 * traversing the EDT. It also means that one of the subroutines 2461 * has set the status field to the proper value. If we get back 1, 2462 * we've fully traversed the EDT and copied out any matching entries. 2463 */ 2464 if (ret == 1) 2465 cdm->status = CAM_DEV_MATCH_LAST; 2466 2467 return(ret); 2468} 2469 2470static int 2471xptplistpdrvfunc(struct periph_driver **pdrv, void *arg) 2472{ 2473 struct ccb_dev_match *cdm; 2474 2475 cdm = (struct ccb_dev_match *)arg; 2476 2477 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) 2478 && (cdm->pos.cookie.pdrv == pdrv) 2479 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 2480 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0) 2481 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 2482 (*pdrv)->generation)) { 2483 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 2484 return(0); 2485 } 2486 2487 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) 2488 && (cdm->pos.cookie.pdrv == pdrv) 2489 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 2490 && (cdm->pos.cookie.periph != NULL)) 2491 return(xptpdperiphtraverse(pdrv, 2492 (struct cam_periph *)cdm->pos.cookie.periph, 2493 xptplistperiphfunc, arg)); 2494 else 2495 return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg)); 2496} 2497 2498static int 2499xptplistperiphfunc(struct cam_periph *periph, void *arg) 2500{ 2501 struct ccb_dev_match *cdm; 2502 dev_match_ret retval; 2503 2504 cdm = (struct ccb_dev_match *)arg; 2505 2506 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph); 2507 2508 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 2509 cdm->status = CAM_DEV_MATCH_ERROR; 2510 return(0); 2511 } 2512 2513 /* 2514 * If the copy flag is set, copy this peripheral out. 2515 */ 2516 if (retval & DM_RET_COPY) { 2517 int spaceleft, j; 2518 2519 spaceleft = cdm->match_buf_len - (cdm->num_matches * 2520 sizeof(struct dev_match_result)); 2521 2522 /* 2523 * If we don't have enough space to put in another 2524 * match result, save our position and tell the 2525 * user there are more devices to check. 2526 */ 2527 if (spaceleft < sizeof(struct dev_match_result)) { 2528 struct periph_driver **pdrv; 2529 2530 pdrv = NULL; 2531 bzero(&cdm->pos, sizeof(cdm->pos)); 2532 cdm->pos.position_type = 2533 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR | 2534 CAM_DEV_POS_PERIPH; 2535 2536 /* 2537 * This may look a bit non-sensical, but it is 2538 * actually quite logical. There are very few 2539 * peripheral drivers, and bloating every peripheral 2540 * structure with a pointer back to its parent 2541 * peripheral driver linker set entry would cost 2542 * more in the long run than doing this quick lookup. 2543 */ 2544 for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) { 2545 if (strcmp((*pdrv)->driver_name, 2546 periph->periph_name) == 0) 2547 break; 2548 } 2549 2550 if (*pdrv == NULL) { 2551 cdm->status = CAM_DEV_MATCH_ERROR; 2552 return(0); 2553 } 2554 2555 cdm->pos.cookie.pdrv = pdrv; 2556 /* 2557 * The periph generation slot does double duty, as 2558 * does the periph pointer slot. They are used for 2559 * both edt and pdrv lookups and positioning. 2560 */ 2561 cdm->pos.cookie.periph = periph; 2562 cdm->pos.generations[CAM_PERIPH_GENERATION] = 2563 (*pdrv)->generation; 2564 cdm->status = CAM_DEV_MATCH_MORE; 2565 return(0); 2566 } 2567 2568 j = cdm->num_matches; 2569 cdm->num_matches++; 2570 cdm->matches[j].type = DEV_MATCH_PERIPH; 2571 cdm->matches[j].result.periph_result.path_id = 2572 periph->path->bus->path_id; 2573 2574 /* 2575 * The transport layer peripheral doesn't have a target or 2576 * lun. 2577 */ 2578 if (periph->path->target) 2579 cdm->matches[j].result.periph_result.target_id = 2580 periph->path->target->target_id; 2581 else 2582 cdm->matches[j].result.periph_result.target_id = -1; 2583 2584 if (periph->path->device) 2585 cdm->matches[j].result.periph_result.target_lun = 2586 periph->path->device->lun_id; 2587 else 2588 cdm->matches[j].result.periph_result.target_lun = -1; 2589 2590 cdm->matches[j].result.periph_result.unit_number = 2591 periph->unit_number; 2592 strncpy(cdm->matches[j].result.periph_result.periph_name, 2593 periph->periph_name, DEV_IDLEN); 2594 } 2595 2596 return(1); 2597} 2598 2599static int 2600xptperiphlistmatch(struct ccb_dev_match *cdm) 2601{ 2602 int ret; 2603 2604 cdm->num_matches = 0; 2605 2606 /* 2607 * At this point in the edt traversal function, we check the bus 2608 * list generation to make sure that no busses have been added or 2609 * removed since the user last sent a XPT_DEV_MATCH ccb through. 2610 * For the peripheral driver list traversal function, however, we 2611 * don't have to worry about new peripheral driver types coming or 2612 * going; they're in a linker set, and therefore can't change 2613 * without a recompile. 2614 */ 2615 2616 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) 2617 && (cdm->pos.cookie.pdrv != NULL)) 2618 ret = xptpdrvtraverse( 2619 (struct periph_driver **)cdm->pos.cookie.pdrv, 2620 xptplistpdrvfunc, cdm); 2621 else 2622 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm); 2623 2624 /* 2625 * If we get back 0, that means that we had to stop before fully 2626 * traversing the peripheral driver tree. It also means that one of 2627 * the subroutines has set the status field to the proper value. If 2628 * we get back 1, we've fully traversed the EDT and copied out any 2629 * matching entries. 2630 */ 2631 if (ret == 1) 2632 cdm->status = CAM_DEV_MATCH_LAST; 2633 2634 return(ret); 2635} 2636 2637static int 2638xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg) 2639{ 2640 struct cam_eb *bus, *next_bus; 2641 int retval; 2642 2643 retval = 1; 2644 2645 for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xpt_busses)); 2646 bus != NULL; 2647 bus = next_bus) { 2648 next_bus = TAILQ_NEXT(bus, links); 2649 2650 retval = tr_func(bus, arg); 2651 if (retval == 0) 2652 return(retval); 2653 } 2654 2655 return(retval); 2656} 2657 2658static int 2659xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target, 2660 xpt_targetfunc_t *tr_func, void *arg) 2661{ 2662 struct cam_et *target, *next_target; 2663 int retval; 2664 2665 retval = 1; 2666 for (target = (start_target ? start_target : 2667 TAILQ_FIRST(&bus->et_entries)); 2668 target != NULL; target = next_target) { 2669 2670 next_target = TAILQ_NEXT(target, links); 2671 2672 retval = tr_func(target, arg); 2673 2674 if (retval == 0) 2675 return(retval); 2676 } 2677 2678 return(retval); 2679} 2680 2681static int 2682xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device, 2683 xpt_devicefunc_t *tr_func, void *arg) 2684{ 2685 struct cam_ed *device, *next_device; 2686 int retval; 2687 2688 retval = 1; 2689 for (device = (start_device ? start_device : 2690 TAILQ_FIRST(&target->ed_entries)); 2691 device != NULL; 2692 device = next_device) { 2693 2694 next_device = TAILQ_NEXT(device, links); 2695 2696 retval = tr_func(device, arg); 2697 2698 if (retval == 0) 2699 return(retval); 2700 } 2701 2702 return(retval); 2703} 2704 2705static int 2706xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph, 2707 xpt_periphfunc_t *tr_func, void *arg) 2708{ 2709 struct cam_periph *periph, *next_periph; 2710 int retval; 2711 2712 retval = 1; 2713 2714 for (periph = (start_periph ? start_periph : 2715 SLIST_FIRST(&device->periphs)); 2716 periph != NULL; 2717 periph = next_periph) { 2718 2719 next_periph = SLIST_NEXT(periph, periph_links); 2720 2721 retval = tr_func(periph, arg); 2722 if (retval == 0) 2723 return(retval); 2724 } 2725 2726 return(retval); 2727} 2728 2729static int 2730xptpdrvtraverse(struct periph_driver **start_pdrv, 2731 xpt_pdrvfunc_t *tr_func, void *arg) 2732{ 2733 struct periph_driver **pdrv; 2734 int retval; 2735 2736 retval = 1; 2737 2738 /* 2739 * We don't traverse the peripheral driver list like we do the 2740 * other lists, because it is a linker set, and therefore cannot be 2741 * changed during runtime. If the peripheral driver list is ever 2742 * re-done to be something other than a linker set (i.e. it can 2743 * change while the system is running), the list traversal should 2744 * be modified to work like the other traversal functions. 2745 */ 2746 for (pdrv = (start_pdrv ? start_pdrv : periph_drivers); 2747 *pdrv != NULL; pdrv++) { 2748 retval = tr_func(pdrv, arg); 2749 2750 if (retval == 0) 2751 return(retval); 2752 } 2753 2754 return(retval); 2755} 2756 2757static int 2758xptpdperiphtraverse(struct periph_driver **pdrv, 2759 struct cam_periph *start_periph, 2760 xpt_periphfunc_t *tr_func, void *arg) 2761{ 2762 struct cam_periph *periph, *next_periph; 2763 int retval; 2764 2765 retval = 1; 2766 2767 for (periph = (start_periph ? start_periph : 2768 TAILQ_FIRST(&(*pdrv)->units)); periph != NULL; 2769 periph = next_periph) { 2770 2771 next_periph = TAILQ_NEXT(periph, unit_links); 2772 2773 retval = tr_func(periph, arg); 2774 if (retval == 0) 2775 return(retval); 2776 } 2777 return(retval); 2778} 2779 2780static int 2781xptdefbusfunc(struct cam_eb *bus, void *arg) 2782{ 2783 struct xpt_traverse_config *tr_config; 2784 2785 tr_config = (struct xpt_traverse_config *)arg; 2786 2787 if (tr_config->depth == XPT_DEPTH_BUS) { 2788 xpt_busfunc_t *tr_func; 2789 2790 tr_func = (xpt_busfunc_t *)tr_config->tr_func; 2791 2792 return(tr_func(bus, tr_config->tr_arg)); 2793 } else 2794 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg)); 2795} 2796 2797static int 2798xptdeftargetfunc(struct cam_et *target, void *arg) 2799{ 2800 struct xpt_traverse_config *tr_config; 2801 2802 tr_config = (struct xpt_traverse_config *)arg; 2803 2804 if (tr_config->depth == XPT_DEPTH_TARGET) { 2805 xpt_targetfunc_t *tr_func; 2806 2807 tr_func = (xpt_targetfunc_t *)tr_config->tr_func; 2808 2809 return(tr_func(target, tr_config->tr_arg)); 2810 } else 2811 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg)); 2812} 2813 2814static int 2815xptdefdevicefunc(struct cam_ed *device, void *arg) 2816{ 2817 struct xpt_traverse_config *tr_config; 2818 2819 tr_config = (struct xpt_traverse_config *)arg; 2820 2821 if (tr_config->depth == XPT_DEPTH_DEVICE) { 2822 xpt_devicefunc_t *tr_func; 2823 2824 tr_func = (xpt_devicefunc_t *)tr_config->tr_func; 2825 2826 return(tr_func(device, tr_config->tr_arg)); 2827 } else 2828 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg)); 2829} 2830 2831static int 2832xptdefperiphfunc(struct cam_periph *periph, void *arg) 2833{ 2834 struct xpt_traverse_config *tr_config; 2835 xpt_periphfunc_t *tr_func; 2836 2837 tr_config = (struct xpt_traverse_config *)arg; 2838 2839 tr_func = (xpt_periphfunc_t *)tr_config->tr_func; 2840 2841 /* 2842 * Unlike the other default functions, we don't check for depth 2843 * here. The peripheral driver level is the last level in the EDT, 2844 * so if we're here, we should execute the function in question. 2845 */ 2846 return(tr_func(periph, tr_config->tr_arg)); 2847} 2848 2849/* 2850 * Execute the given function for every bus in the EDT. 2851 */ 2852static int 2853xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg) 2854{ 2855 struct xpt_traverse_config tr_config; 2856 2857 tr_config.depth = XPT_DEPTH_BUS; 2858 tr_config.tr_func = tr_func; 2859 tr_config.tr_arg = arg; 2860 2861 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2862} 2863 2864#ifdef notusedyet 2865/* 2866 * Execute the given function for every target in the EDT. 2867 */ 2868static int 2869xpt_for_all_targets(xpt_targetfunc_t *tr_func, void *arg) 2870{ 2871 struct xpt_traverse_config tr_config; 2872 2873 tr_config.depth = XPT_DEPTH_TARGET; 2874 tr_config.tr_func = tr_func; 2875 tr_config.tr_arg = arg; 2876 2877 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2878} 2879#endif /* notusedyet */ 2880 2881/* 2882 * Execute the given function for every device in the EDT. 2883 */ 2884static int 2885xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg) 2886{ 2887 struct xpt_traverse_config tr_config; 2888 2889 tr_config.depth = XPT_DEPTH_DEVICE; 2890 tr_config.tr_func = tr_func; 2891 tr_config.tr_arg = arg; 2892 2893 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2894} 2895 2896#ifdef notusedyet 2897/* 2898 * Execute the given function for every peripheral in the EDT. 2899 */ 2900static int 2901xpt_for_all_periphs(xpt_periphfunc_t *tr_func, void *arg) 2902{ 2903 struct xpt_traverse_config tr_config; 2904 2905 tr_config.depth = XPT_DEPTH_PERIPH; 2906 tr_config.tr_func = tr_func; 2907 tr_config.tr_arg = arg; 2908 2909 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2910} 2911#endif /* notusedyet */ 2912 2913static int 2914xptsetasyncfunc(struct cam_ed *device, void *arg) 2915{ 2916 struct cam_path path; 2917 struct ccb_getdev cgd; 2918 struct async_node *cur_entry; 2919 2920 cur_entry = (struct async_node *)arg; 2921 2922 /* 2923 * Don't report unconfigured devices (Wildcard devs, 2924 * devices only for target mode, device instances 2925 * that have been invalidated but are waiting for 2926 * their last reference count to be released). 2927 */ 2928 if ((device->flags & CAM_DEV_UNCONFIGURED) != 0) 2929 return (1); 2930 2931 xpt_compile_path(&path, 2932 NULL, 2933 device->target->bus->path_id, 2934 device->target->target_id, 2935 device->lun_id); 2936 xpt_setup_ccb(&cgd.ccb_h, &path, /*priority*/1); 2937 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 2938 xpt_action((union ccb *)&cgd); 2939 cur_entry->callback(cur_entry->callback_arg, 2940 AC_FOUND_DEVICE, 2941 &path, &cgd); 2942 xpt_release_path(&path); 2943 2944 return(1); 2945} 2946 2947static int 2948xptsetasyncbusfunc(struct cam_eb *bus, void *arg) 2949{ 2950 struct cam_path path; 2951 struct ccb_pathinq cpi; 2952 struct async_node *cur_entry; 2953 2954 cur_entry = (struct async_node *)arg; 2955 2956 xpt_compile_path(&path, /*periph*/NULL, 2957 bus->sim->path_id, 2958 CAM_TARGET_WILDCARD, 2959 CAM_LUN_WILDCARD); 2960 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1); 2961 cpi.ccb_h.func_code = XPT_PATH_INQ; 2962 xpt_action((union ccb *)&cpi); 2963 cur_entry->callback(cur_entry->callback_arg, 2964 AC_PATH_REGISTERED, 2965 &path, &cpi); 2966 xpt_release_path(&path); 2967 2968 return(1); 2969} 2970 2971void 2972xpt_action(union ccb *start_ccb) 2973{ 2974 int iopl; 2975 2976 GIANT_REQUIRED; 2977 2978 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n")); 2979 2980 start_ccb->ccb_h.status = CAM_REQ_INPROG; 2981 2982 iopl = splsoftcam(); 2983 switch (start_ccb->ccb_h.func_code) { 2984 case XPT_SCSI_IO: 2985 { 2986#ifdef CAM_NEW_TRAN_CODE 2987 struct cam_ed *device; 2988#endif /* CAM_NEW_TRAN_CODE */ 2989#ifdef CAMDEBUG 2990 char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1]; 2991 struct cam_path *path; 2992 2993 path = start_ccb->ccb_h.path; 2994#endif 2995 2996 /* 2997 * For the sake of compatibility with SCSI-1 2998 * devices that may not understand the identify 2999 * message, we include lun information in the 3000 * second byte of all commands. SCSI-1 specifies 3001 * that luns are a 3 bit value and reserves only 3 3002 * bits for lun information in the CDB. Later 3003 * revisions of the SCSI spec allow for more than 8 3004 * luns, but have deprecated lun information in the 3005 * CDB. So, if the lun won't fit, we must omit. 3006 * 3007 * Also be aware that during initial probing for devices, 3008 * the inquiry information is unknown but initialized to 0. 3009 * This means that this code will be exercised while probing 3010 * devices with an ANSI revision greater than 2. 3011 */ 3012#ifdef CAM_NEW_TRAN_CODE 3013 device = start_ccb->ccb_h.path->device; 3014 if (device->protocol_version <= SCSI_REV_2 3015#else /* CAM_NEW_TRAN_CODE */ 3016 if (SID_ANSI_REV(&start_ccb->ccb_h.path->device->inq_data) <= 2 3017#endif /* CAM_NEW_TRAN_CODE */ 3018 && start_ccb->ccb_h.target_lun < 8 3019 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) { 3020 3021 start_ccb->csio.cdb_io.cdb_bytes[1] |= 3022 start_ccb->ccb_h.target_lun << 5; 3023 } 3024 start_ccb->csio.scsi_status = SCSI_STATUS_OK; 3025 CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n", 3026 scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0], 3027 &path->device->inq_data), 3028 scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes, 3029 cdb_str, sizeof(cdb_str)))); 3030 } 3031 /* FALLTHROUGH */ 3032 case XPT_TARGET_IO: 3033 case XPT_CONT_TARGET_IO: 3034 start_ccb->csio.sense_resid = 0; 3035 start_ccb->csio.resid = 0; 3036 /* FALLTHROUGH */ 3037 case XPT_RESET_DEV: 3038 case XPT_ENG_EXEC: 3039 { 3040 struct cam_path *path; 3041 int s; 3042 int runq; 3043 3044 path = start_ccb->ccb_h.path; 3045 s = splsoftcam(); 3046 3047 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb); 3048 if (path->device->qfrozen_cnt == 0) 3049 runq = xpt_schedule_dev_sendq(path->bus, path->device); 3050 else 3051 runq = 0; 3052 splx(s); 3053 if (runq != 0) 3054 xpt_run_dev_sendq(path->bus); 3055 break; 3056 } 3057 case XPT_SET_TRAN_SETTINGS: 3058 { 3059 xpt_set_transfer_settings(&start_ccb->cts, 3060 start_ccb->ccb_h.path->device, 3061 /*async_update*/FALSE); 3062 break; 3063 } 3064 case XPT_CALC_GEOMETRY: 3065 { 3066 struct cam_sim *sim; 3067 3068 /* Filter out garbage */ 3069 if (start_ccb->ccg.block_size == 0 3070 || start_ccb->ccg.volume_size == 0) { 3071 start_ccb->ccg.cylinders = 0; 3072 start_ccb->ccg.heads = 0; 3073 start_ccb->ccg.secs_per_track = 0; 3074 start_ccb->ccb_h.status = CAM_REQ_CMP; 3075 break; 3076 } 3077#ifdef PC98 3078 /* 3079 * In a PC-98 system, geometry translation depens on 3080 * the "real" device geometry obtained from mode page 4. 3081 * SCSI geometry translation is performed in the 3082 * initialization routine of the SCSI BIOS and the result 3083 * stored in host memory. If the translation is available 3084 * in host memory, use it. If not, rely on the default 3085 * translation the device driver performs. 3086 */ 3087 if (scsi_da_bios_params(&start_ccb->ccg) != 0) { 3088 start_ccb->ccb_h.status = CAM_REQ_CMP; 3089 break; 3090 } 3091#endif 3092 sim = start_ccb->ccb_h.path->bus->sim; 3093 (*(sim->sim_action))(sim, start_ccb); 3094 break; 3095 } 3096 case XPT_ABORT: 3097 { 3098 union ccb* abort_ccb; 3099 int s; 3100 3101 abort_ccb = start_ccb->cab.abort_ccb; 3102 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) { 3103 3104 if (abort_ccb->ccb_h.pinfo.index >= 0) { 3105 struct cam_ccbq *ccbq; 3106 3107 ccbq = &abort_ccb->ccb_h.path->device->ccbq; 3108 cam_ccbq_remove_ccb(ccbq, abort_ccb); 3109 abort_ccb->ccb_h.status = 3110 CAM_REQ_ABORTED|CAM_DEV_QFRZN; 3111 xpt_freeze_devq(abort_ccb->ccb_h.path, 1); 3112 s = splcam(); 3113 xpt_done(abort_ccb); 3114 splx(s); 3115 start_ccb->ccb_h.status = CAM_REQ_CMP; 3116 break; 3117 } 3118 if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX 3119 && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) { 3120 /* 3121 * We've caught this ccb en route to 3122 * the SIM. Flag it for abort and the 3123 * SIM will do so just before starting 3124 * real work on the CCB. 3125 */ 3126 abort_ccb->ccb_h.status = 3127 CAM_REQ_ABORTED|CAM_DEV_QFRZN; 3128 xpt_freeze_devq(abort_ccb->ccb_h.path, 1); 3129 start_ccb->ccb_h.status = CAM_REQ_CMP; 3130 break; 3131 } 3132 } 3133 if (XPT_FC_IS_QUEUED(abort_ccb) 3134 && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) { 3135 /* 3136 * It's already completed but waiting 3137 * for our SWI to get to it. 3138 */ 3139 start_ccb->ccb_h.status = CAM_UA_ABORT; 3140 break; 3141 } 3142 /* 3143 * If we weren't able to take care of the abort request 3144 * in the XPT, pass the request down to the SIM for processing. 3145 */ 3146 } 3147 /* FALLTHROUGH */ 3148 case XPT_ACCEPT_TARGET_IO: 3149 case XPT_EN_LUN: 3150 case XPT_IMMED_NOTIFY: 3151 case XPT_NOTIFY_ACK: 3152 case XPT_GET_TRAN_SETTINGS: 3153 case XPT_RESET_BUS: 3154 { 3155 struct cam_sim *sim; 3156 3157 sim = start_ccb->ccb_h.path->bus->sim; 3158 (*(sim->sim_action))(sim, start_ccb); 3159 break; 3160 } 3161 case XPT_PATH_INQ: 3162 { 3163 struct cam_sim *sim; 3164 3165 sim = start_ccb->ccb_h.path->bus->sim; 3166 (*(sim->sim_action))(sim, start_ccb); 3167 break; 3168 } 3169 case XPT_PATH_STATS: 3170 start_ccb->cpis.last_reset = 3171 start_ccb->ccb_h.path->bus->last_reset; 3172 start_ccb->ccb_h.status = CAM_REQ_CMP; 3173 break; 3174 case XPT_GDEV_TYPE: 3175 { 3176 struct cam_ed *dev; 3177 int s; 3178 3179 dev = start_ccb->ccb_h.path->device; 3180 s = splcam(); 3181 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) { 3182 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE; 3183 } else { 3184 struct ccb_getdev *cgd; 3185 struct cam_eb *bus; 3186 struct cam_et *tar; 3187 3188 cgd = &start_ccb->cgd; 3189 bus = cgd->ccb_h.path->bus; 3190 tar = cgd->ccb_h.path->target; 3191 cgd->inq_data = dev->inq_data; 3192 cgd->ccb_h.status = CAM_REQ_CMP; 3193 cgd->serial_num_len = dev->serial_num_len; 3194 if ((dev->serial_num_len > 0) 3195 && (dev->serial_num != NULL)) 3196 bcopy(dev->serial_num, cgd->serial_num, 3197 dev->serial_num_len); 3198 } 3199 splx(s); 3200 break; 3201 } 3202 case XPT_GDEV_STATS: 3203 { 3204 struct cam_ed *dev; 3205 int s; 3206 3207 dev = start_ccb->ccb_h.path->device; 3208 s = splcam(); 3209 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) { 3210 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE; 3211 } else { 3212 struct ccb_getdevstats *cgds; 3213 struct cam_eb *bus; 3214 struct cam_et *tar; 3215 3216 cgds = &start_ccb->cgds; 3217 bus = cgds->ccb_h.path->bus; 3218 tar = cgds->ccb_h.path->target; 3219 cgds->dev_openings = dev->ccbq.dev_openings; 3220 cgds->dev_active = dev->ccbq.dev_active; 3221 cgds->devq_openings = dev->ccbq.devq_openings; 3222 cgds->devq_queued = dev->ccbq.queue.entries; 3223 cgds->held = dev->ccbq.held; 3224 cgds->last_reset = tar->last_reset; 3225 cgds->maxtags = dev->quirk->maxtags; 3226 cgds->mintags = dev->quirk->mintags; 3227 if (timevalcmp(&tar->last_reset, &bus->last_reset, <)) 3228 cgds->last_reset = bus->last_reset; 3229 cgds->ccb_h.status = CAM_REQ_CMP; 3230 } 3231 splx(s); 3232 break; 3233 } 3234 case XPT_GDEVLIST: 3235 { 3236 struct cam_periph *nperiph; 3237 struct periph_list *periph_head; 3238 struct ccb_getdevlist *cgdl; 3239 u_int i; 3240 int s; 3241 struct cam_ed *device; 3242 int found; 3243 3244 3245 found = 0; 3246 3247 /* 3248 * Don't want anyone mucking with our data. 3249 */ 3250 s = splcam(); 3251 device = start_ccb->ccb_h.path->device; 3252 periph_head = &device->periphs; 3253 cgdl = &start_ccb->cgdl; 3254 3255 /* 3256 * Check and see if the list has changed since the user 3257 * last requested a list member. If so, tell them that the 3258 * list has changed, and therefore they need to start over 3259 * from the beginning. 3260 */ 3261 if ((cgdl->index != 0) && 3262 (cgdl->generation != device->generation)) { 3263 cgdl->status = CAM_GDEVLIST_LIST_CHANGED; 3264 splx(s); 3265 break; 3266 } 3267 3268 /* 3269 * Traverse the list of peripherals and attempt to find 3270 * the requested peripheral. 3271 */ 3272 for (nperiph = SLIST_FIRST(periph_head), i = 0; 3273 (nperiph != NULL) && (i <= cgdl->index); 3274 nperiph = SLIST_NEXT(nperiph, periph_links), i++) { 3275 if (i == cgdl->index) { 3276 strncpy(cgdl->periph_name, 3277 nperiph->periph_name, 3278 DEV_IDLEN); 3279 cgdl->unit_number = nperiph->unit_number; 3280 found = 1; 3281 } 3282 } 3283 if (found == 0) { 3284 cgdl->status = CAM_GDEVLIST_ERROR; 3285 splx(s); 3286 break; 3287 } 3288 3289 if (nperiph == NULL) 3290 cgdl->status = CAM_GDEVLIST_LAST_DEVICE; 3291 else 3292 cgdl->status = CAM_GDEVLIST_MORE_DEVS; 3293 3294 cgdl->index++; 3295 cgdl->generation = device->generation; 3296 3297 splx(s); 3298 cgdl->ccb_h.status = CAM_REQ_CMP; 3299 break; 3300 } 3301 case XPT_DEV_MATCH: 3302 { 3303 int s; 3304 dev_pos_type position_type; 3305 struct ccb_dev_match *cdm; 3306 3307 cdm = &start_ccb->cdm; 3308 3309 /* 3310 * Prevent EDT changes while we traverse it. 3311 */ 3312 s = splcam(); 3313 /* 3314 * There are two ways of getting at information in the EDT. 3315 * The first way is via the primary EDT tree. It starts 3316 * with a list of busses, then a list of targets on a bus, 3317 * then devices/luns on a target, and then peripherals on a 3318 * device/lun. The "other" way is by the peripheral driver 3319 * lists. The peripheral driver lists are organized by 3320 * peripheral driver. (obviously) So it makes sense to 3321 * use the peripheral driver list if the user is looking 3322 * for something like "da1", or all "da" devices. If the 3323 * user is looking for something on a particular bus/target 3324 * or lun, it's generally better to go through the EDT tree. 3325 */ 3326 3327 if (cdm->pos.position_type != CAM_DEV_POS_NONE) 3328 position_type = cdm->pos.position_type; 3329 else { 3330 u_int i; 3331 3332 position_type = CAM_DEV_POS_NONE; 3333 3334 for (i = 0; i < cdm->num_patterns; i++) { 3335 if ((cdm->patterns[i].type == DEV_MATCH_BUS) 3336 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){ 3337 position_type = CAM_DEV_POS_EDT; 3338 break; 3339 } 3340 } 3341 3342 if (cdm->num_patterns == 0) 3343 position_type = CAM_DEV_POS_EDT; 3344 else if (position_type == CAM_DEV_POS_NONE) 3345 position_type = CAM_DEV_POS_PDRV; 3346 } 3347 3348 switch(position_type & CAM_DEV_POS_TYPEMASK) { 3349 case CAM_DEV_POS_EDT: 3350 xptedtmatch(cdm); 3351 break; 3352 case CAM_DEV_POS_PDRV: 3353 xptperiphlistmatch(cdm); 3354 break; 3355 default: 3356 cdm->status = CAM_DEV_MATCH_ERROR; 3357 break; 3358 } 3359 3360 splx(s); 3361 3362 if (cdm->status == CAM_DEV_MATCH_ERROR) 3363 start_ccb->ccb_h.status = CAM_REQ_CMP_ERR; 3364 else 3365 start_ccb->ccb_h.status = CAM_REQ_CMP; 3366 3367 break; 3368 } 3369 case XPT_SASYNC_CB: 3370 { 3371 struct ccb_setasync *csa; 3372 struct async_node *cur_entry; 3373 struct async_list *async_head; 3374 u_int32_t added; 3375 int s; 3376 3377 csa = &start_ccb->csa; 3378 added = csa->event_enable; 3379 async_head = &csa->ccb_h.path->device->asyncs; 3380 3381 /* 3382 * If there is already an entry for us, simply 3383 * update it. 3384 */ 3385 s = splcam(); 3386 cur_entry = SLIST_FIRST(async_head); 3387 while (cur_entry != NULL) { 3388 if ((cur_entry->callback_arg == csa->callback_arg) 3389 && (cur_entry->callback == csa->callback)) 3390 break; 3391 cur_entry = SLIST_NEXT(cur_entry, links); 3392 } 3393 3394 if (cur_entry != NULL) { 3395 /* 3396 * If the request has no flags set, 3397 * remove the entry. 3398 */ 3399 added &= ~cur_entry->event_enable; 3400 if (csa->event_enable == 0) { 3401 SLIST_REMOVE(async_head, cur_entry, 3402 async_node, links); 3403 csa->ccb_h.path->device->refcount--;
| 69 70/* 71 * Definition of an async handler callback block. These are used to add 72 * SIMs and peripherals to the async callback lists. 73 */ 74struct async_node { 75 SLIST_ENTRY(async_node) links; 76 u_int32_t event_enable; /* Async Event enables */ 77 void (*callback)(void *arg, u_int32_t code, 78 struct cam_path *path, void *args); 79 void *callback_arg; 80}; 81 82SLIST_HEAD(async_list, async_node); 83SLIST_HEAD(periph_list, cam_periph); 84static STAILQ_HEAD(highpowerlist, ccb_hdr) highpowerq; 85 86/* 87 * This is the maximum number of high powered commands (e.g. start unit) 88 * that can be outstanding at a particular time. 89 */ 90#ifndef CAM_MAX_HIGHPOWER 91#define CAM_MAX_HIGHPOWER 4 92#endif 93 94/* number of high powered commands that can go through right now */ 95static int num_highpower = CAM_MAX_HIGHPOWER; 96 97/* 98 * Structure for queueing a device in a run queue. 99 * There is one run queue for allocating new ccbs, 100 * and another for sending ccbs to the controller. 101 */ 102struct cam_ed_qinfo { 103 cam_pinfo pinfo; 104 struct cam_ed *device; 105}; 106 107/* 108 * The CAM EDT (Existing Device Table) contains the device information for 109 * all devices for all busses in the system. The table contains a 110 * cam_ed structure for each device on the bus. 111 */ 112struct cam_ed { 113 TAILQ_ENTRY(cam_ed) links; 114 struct cam_ed_qinfo alloc_ccb_entry; 115 struct cam_ed_qinfo send_ccb_entry; 116 struct cam_et *target; 117 lun_id_t lun_id; 118 struct camq drvq; /* 119 * Queue of type drivers wanting to do 120 * work on this device. 121 */ 122 struct cam_ccbq ccbq; /* Queue of pending ccbs */ 123 struct async_list asyncs; /* Async callback info for this B/T/L */ 124 struct periph_list periphs; /* All attached devices */ 125 u_int generation; /* Generation number */ 126 struct cam_periph *owner; /* Peripheral driver's ownership tag */ 127 struct xpt_quirk_entry *quirk; /* Oddities about this device */ 128 /* Storage for the inquiry data */ 129#ifdef CAM_NEW_TRAN_CODE 130 cam_proto protocol; 131 u_int protocol_version; 132 cam_xport transport; 133 u_int transport_version; 134#endif /* CAM_NEW_TRAN_CODE */ 135 struct scsi_inquiry_data inq_data; 136 u_int8_t inq_flags; /* 137 * Current settings for inquiry flags. 138 * This allows us to override settings 139 * like disconnection and tagged 140 * queuing for a device. 141 */ 142 u_int8_t queue_flags; /* Queue flags from the control page */ 143 u_int8_t serial_num_len; 144 u_int8_t *serial_num; 145 u_int32_t qfrozen_cnt; 146 u_int32_t flags; 147#define CAM_DEV_UNCONFIGURED 0x01 148#define CAM_DEV_REL_TIMEOUT_PENDING 0x02 149#define CAM_DEV_REL_ON_COMPLETE 0x04 150#define CAM_DEV_REL_ON_QUEUE_EMPTY 0x08 151#define CAM_DEV_RESIZE_QUEUE_NEEDED 0x10 152#define CAM_DEV_TAG_AFTER_COUNT 0x20 153#define CAM_DEV_INQUIRY_DATA_VALID 0x40 154 u_int32_t tag_delay_count; 155#define CAM_TAG_DELAY_COUNT 5 156 u_int32_t tag_saved_openings; 157 u_int32_t refcount; 158 struct callout_handle c_handle; 159}; 160 161/* 162 * Each target is represented by an ET (Existing Target). These 163 * entries are created when a target is successfully probed with an 164 * identify, and removed when a device fails to respond after a number 165 * of retries, or a bus rescan finds the device missing. 166 */ 167struct cam_et { 168 TAILQ_HEAD(, cam_ed) ed_entries; 169 TAILQ_ENTRY(cam_et) links; 170 struct cam_eb *bus; 171 target_id_t target_id; 172 u_int32_t refcount; 173 u_int generation; 174 struct timeval last_reset; 175}; 176 177/* 178 * Each bus is represented by an EB (Existing Bus). These entries 179 * are created by calls to xpt_bus_register and deleted by calls to 180 * xpt_bus_deregister. 181 */ 182struct cam_eb { 183 TAILQ_HEAD(, cam_et) et_entries; 184 TAILQ_ENTRY(cam_eb) links; 185 path_id_t path_id; 186 struct cam_sim *sim; 187 struct timeval last_reset; 188 u_int32_t flags; 189#define CAM_EB_RUNQ_SCHEDULED 0x01 190 u_int32_t refcount; 191 u_int generation; 192}; 193 194struct cam_path { 195 struct cam_periph *periph; 196 struct cam_eb *bus; 197 struct cam_et *target; 198 struct cam_ed *device; 199}; 200 201struct xpt_quirk_entry { 202 struct scsi_inquiry_pattern inq_pat; 203 u_int8_t quirks; 204#define CAM_QUIRK_NOLUNS 0x01 205#define CAM_QUIRK_NOSERIAL 0x02 206#define CAM_QUIRK_HILUNS 0x04 207#define CAM_QUIRK_NOHILUNS 0x08 208 u_int mintags; 209 u_int maxtags; 210}; 211#define CAM_SCSI2_MAXLUN 8 212/* 213 * If we're not quirked to search <= the first 8 luns 214 * and we are either quirked to search above lun 8, 215 * or we're > SCSI-2, we can look for luns above lun 8. 216 */ 217#define CAN_SRCH_HI(dv) \ 218 (((dv->quirk->quirks & CAM_QUIRK_NOHILUNS) == 0) \ 219 && ((dv->quirk->quirks & CAM_QUIRK_HILUNS) \ 220 || SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2)) 221 222typedef enum { 223 XPT_FLAG_OPEN = 0x01 224} xpt_flags; 225 226struct xpt_softc { 227 xpt_flags flags; 228 u_int32_t generation; 229}; 230 231static const char quantum[] = "QUANTUM"; 232static const char sony[] = "SONY"; 233static const char west_digital[] = "WDIGTL"; 234static const char samsung[] = "SAMSUNG"; 235static const char seagate[] = "SEAGATE"; 236static const char microp[] = "MICROP"; 237 238static struct xpt_quirk_entry xpt_quirk_table[] = 239{ 240 { 241 /* Reports QUEUE FULL for temporary resource shortages */ 242 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" }, 243 /*quirks*/0, /*mintags*/24, /*maxtags*/32 244 }, 245 { 246 /* Reports QUEUE FULL for temporary resource shortages */ 247 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" }, 248 /*quirks*/0, /*mintags*/24, /*maxtags*/32 249 }, 250 { 251 /* Reports QUEUE FULL for temporary resource shortages */ 252 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" }, 253 /*quirks*/0, /*mintags*/24, /*maxtags*/32 254 }, 255 { 256 /* Broken tagged queuing drive */ 257 { T_DIRECT, SIP_MEDIA_FIXED, microp, "4421-07*", "*" }, 258 /*quirks*/0, /*mintags*/0, /*maxtags*/0 259 }, 260 { 261 /* Broken tagged queuing drive */ 262 { T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" }, 263 /*quirks*/0, /*mintags*/0, /*maxtags*/0 264 }, 265 { 266 /* Broken tagged queuing drive */ 267 { T_DIRECT, SIP_MEDIA_FIXED, microp, "3391*", "x43h" }, 268 /*quirks*/0, /*mintags*/0, /*maxtags*/0 269 }, 270 { 271 /* 272 * Unfortunately, the Quantum Atlas III has the same 273 * problem as the Atlas II drives above. 274 * Reported by: "Johan Granlund" <johan@granlund.nu> 275 * 276 * For future reference, the drive with the problem was: 277 * QUANTUM QM39100TD-SW N1B0 278 * 279 * It's possible that Quantum will fix the problem in later 280 * firmware revisions. If that happens, the quirk entry 281 * will need to be made specific to the firmware revisions 282 * with the problem. 283 * 284 */ 285 /* Reports QUEUE FULL for temporary resource shortages */ 286 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" }, 287 /*quirks*/0, /*mintags*/24, /*maxtags*/32 288 }, 289 { 290 /* 291 * 18 Gig Atlas III, same problem as the 9G version. 292 * Reported by: Andre Albsmeier 293 * <andre.albsmeier@mchp.siemens.de> 294 * 295 * For future reference, the drive with the problem was: 296 * QUANTUM QM318000TD-S N491 297 */ 298 /* Reports QUEUE FULL for temporary resource shortages */ 299 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" }, 300 /*quirks*/0, /*mintags*/24, /*maxtags*/32 301 }, 302 { 303 /* 304 * Broken tagged queuing drive 305 * Reported by: Bret Ford <bford@uop.cs.uop.edu> 306 * and: Martin Renters <martin@tdc.on.ca> 307 */ 308 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" }, 309 /*quirks*/0, /*mintags*/0, /*maxtags*/0 310 }, 311 /* 312 * The Seagate Medalist Pro drives have very poor write 313 * performance with anything more than 2 tags. 314 * 315 * Reported by: Paul van der Zwan <paulz@trantor.xs4all.nl> 316 * Drive: <SEAGATE ST36530N 1444> 317 * 318 * Reported by: Jeremy Lea <reg@shale.csir.co.za> 319 * Drive: <SEAGATE ST34520W 1281> 320 * 321 * No one has actually reported that the 9G version 322 * (ST39140*) of the Medalist Pro has the same problem, but 323 * we're assuming that it does because the 4G and 6.5G 324 * versions of the drive are broken. 325 */ 326 { 327 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"}, 328 /*quirks*/0, /*mintags*/2, /*maxtags*/2 329 }, 330 { 331 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"}, 332 /*quirks*/0, /*mintags*/2, /*maxtags*/2 333 }, 334 { 335 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"}, 336 /*quirks*/0, /*mintags*/2, /*maxtags*/2 337 }, 338 { 339 /* 340 * Slow when tagged queueing is enabled. Write performance 341 * steadily drops off with more and more concurrent 342 * transactions. Best sequential write performance with 343 * tagged queueing turned off and write caching turned on. 344 * 345 * PR: kern/10398 346 * Submitted by: Hideaki Okada <hokada@isl.melco.co.jp> 347 * Drive: DCAS-34330 w/ "S65A" firmware. 348 * 349 * The drive with the problem had the "S65A" firmware 350 * revision, and has also been reported (by Stephen J. 351 * Roznowski <sjr@home.net>) for a drive with the "S61A" 352 * firmware revision. 353 * 354 * Although no one has reported problems with the 2 gig 355 * version of the DCAS drive, the assumption is that it 356 * has the same problems as the 4 gig version. Therefore 357 * this quirk entries disables tagged queueing for all 358 * DCAS drives. 359 */ 360 { T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" }, 361 /*quirks*/0, /*mintags*/0, /*maxtags*/0 362 }, 363 { 364 /* Broken tagged queuing drive */ 365 { T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" }, 366 /*quirks*/0, /*mintags*/0, /*maxtags*/0 367 }, 368 { 369 /* Broken tagged queuing drive */ 370 { T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" }, 371 /*quirks*/0, /*mintags*/0, /*maxtags*/0 372 }, 373 { 374 /* 375 * Broken tagged queuing drive. 376 * Submitted by: 377 * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp> 378 * in PR kern/9535 379 */ 380 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" }, 381 /*quirks*/0, /*mintags*/0, /*maxtags*/0 382 }, 383 { 384 /* 385 * Slow when tagged queueing is enabled. (1.5MB/sec versus 386 * 8MB/sec.) 387 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu> 388 * Best performance with these drives is achieved with 389 * tagged queueing turned off, and write caching turned on. 390 */ 391 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" }, 392 /*quirks*/0, /*mintags*/0, /*maxtags*/0 393 }, 394 { 395 /* 396 * Slow when tagged queueing is enabled. (1.5MB/sec versus 397 * 8MB/sec.) 398 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu> 399 * Best performance with these drives is achieved with 400 * tagged queueing turned off, and write caching turned on. 401 */ 402 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" }, 403 /*quirks*/0, /*mintags*/0, /*maxtags*/0 404 }, 405 { 406 /* 407 * Doesn't handle queue full condition correctly, 408 * so we need to limit maxtags to what the device 409 * can handle instead of determining this automatically. 410 */ 411 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" }, 412 /*quirks*/0, /*mintags*/2, /*maxtags*/32 413 }, 414 { 415 /* Really only one LUN */ 416 { T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA", "*" }, 417 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 418 }, 419 { 420 /* I can't believe we need a quirk for DPT volumes. */ 421 { T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, "DPT", "*", "*" }, 422 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, 423 /*mintags*/0, /*maxtags*/255 424 }, 425 { 426 /* 427 * Many Sony CDROM drives don't like multi-LUN probing. 428 */ 429 { T_CDROM, SIP_MEDIA_REMOVABLE, sony, "CD-ROM CDU*", "*" }, 430 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 431 }, 432 { 433 /* 434 * This drive doesn't like multiple LUN probing. 435 * Submitted by: Parag Patel <parag@cgt.com> 436 */ 437 { T_WORM, SIP_MEDIA_REMOVABLE, sony, "CD-R CDU9*", "*" }, 438 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 439 }, 440 { 441 { T_WORM, SIP_MEDIA_REMOVABLE, "YAMAHA", "CDR100*", "*" }, 442 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 443 }, 444 { 445 /* 446 * The 8200 doesn't like multi-lun probing, and probably 447 * don't like serial number requests either. 448 */ 449 { 450 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE", 451 "EXB-8200*", "*" 452 }, 453 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 454 }, 455 { 456 /* 457 * Let's try the same as above, but for a drive that says 458 * it's an IPL-6860 but is actually an EXB 8200. 459 */ 460 { 461 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE", 462 "IPL-6860*", "*" 463 }, 464 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 465 }, 466 { 467 /* 468 * These Hitachi drives don't like multi-lun probing. 469 * The PR submitter has a DK319H, but says that the Linux 470 * kernel has a similar work-around for the DK312 and DK314, 471 * so all DK31* drives are quirked here. 472 * PR: misc/18793 473 * Submitted by: Paul Haddad <paul@pth.com> 474 */ 475 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK31*", "*" }, 476 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255 477 }, 478 { 479 /* 480 * The Hitachi CJ series with J8A8 firmware apparantly has 481 * problems with tagged commands. 482 * PR: 23536 483 * Reported by: amagai@nue.org 484 */ 485 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK32CJ*", "J8A8" }, 486 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 487 }, 488 { 489 /* 490 * These are the large storage arrays. 491 * Submitted by: William Carrel <william.carrel@infospace.com> 492 */ 493 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "OPEN*", "*" }, 494 CAM_QUIRK_HILUNS, 2, 1024 495 }, 496 { 497 /* 498 * This old revision of the TDC3600 is also SCSI-1, and 499 * hangs upon serial number probing. 500 */ 501 { 502 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG", 503 " TDC 3600", "U07:" 504 }, 505 CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0 506 }, 507 { 508 /* 509 * Maxtor Personal Storage 3000XT (Firewire) 510 * hangs upon serial number probing. 511 */ 512 { 513 T_DIRECT, SIP_MEDIA_FIXED, "Maxtor", 514 "1394 storage", "*" 515 }, 516 CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0 517 }, 518 { 519 /* 520 * Would repond to all LUNs if asked for. 521 */ 522 { 523 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "CALIPER", 524 "CP150", "*" 525 }, 526 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 527 }, 528 { 529 /* 530 * Would repond to all LUNs if asked for. 531 */ 532 { 533 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY", 534 "96X2*", "*" 535 }, 536 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 537 }, 538 { 539 /* Submitted by: Matthew Dodd <winter@jurai.net> */ 540 { T_PROCESSOR, SIP_MEDIA_FIXED, "Cabletrn", "EA41*", "*" }, 541 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 542 }, 543 { 544 /* Submitted by: Matthew Dodd <winter@jurai.net> */ 545 { T_PROCESSOR, SIP_MEDIA_FIXED, "CABLETRN", "EA41*", "*" }, 546 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 547 }, 548 { 549 /* TeraSolutions special settings for TRC-22 RAID */ 550 { T_DIRECT, SIP_MEDIA_FIXED, "TERASOLU", "TRC-22", "*" }, 551 /*quirks*/0, /*mintags*/55, /*maxtags*/255 552 }, 553 { 554 /* Veritas Storage Appliance */ 555 { T_DIRECT, SIP_MEDIA_FIXED, "VERITAS", "*", "*" }, 556 CAM_QUIRK_HILUNS, /*mintags*/2, /*maxtags*/1024 557 }, 558 { 559 /* 560 * Would respond to all LUNs. Device type and removable 561 * flag are jumper-selectable. 562 */ 563 { T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, "MaxOptix", 564 "Tahiti 1", "*" 565 }, 566 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 567 }, 568 { 569 /* EasyRAID E5A aka. areca ARC-6010 */ 570 { T_DIRECT, SIP_MEDIA_FIXED, "easyRAID", "*", "*" }, 571 CAM_QUIRK_NOHILUNS, /*mintags*/2, /*maxtags*/255 572 }, 573 { 574 /* Default tagged queuing parameters for all devices */ 575 { 576 T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, 577 /*vendor*/"*", /*product*/"*", /*revision*/"*" 578 }, 579 /*quirks*/0, /*mintags*/2, /*maxtags*/255 580 }, 581}; 582 583static const int xpt_quirk_table_size = 584 sizeof(xpt_quirk_table) / sizeof(*xpt_quirk_table); 585 586typedef enum { 587 DM_RET_COPY = 0x01, 588 DM_RET_FLAG_MASK = 0x0f, 589 DM_RET_NONE = 0x00, 590 DM_RET_STOP = 0x10, 591 DM_RET_DESCEND = 0x20, 592 DM_RET_ERROR = 0x30, 593 DM_RET_ACTION_MASK = 0xf0 594} dev_match_ret; 595 596typedef enum { 597 XPT_DEPTH_BUS, 598 XPT_DEPTH_TARGET, 599 XPT_DEPTH_DEVICE, 600 XPT_DEPTH_PERIPH 601} xpt_traverse_depth; 602 603struct xpt_traverse_config { 604 xpt_traverse_depth depth; 605 void *tr_func; 606 void *tr_arg; 607}; 608 609typedef int xpt_busfunc_t (struct cam_eb *bus, void *arg); 610typedef int xpt_targetfunc_t (struct cam_et *target, void *arg); 611typedef int xpt_devicefunc_t (struct cam_ed *device, void *arg); 612typedef int xpt_periphfunc_t (struct cam_periph *periph, void *arg); 613typedef int xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg); 614 615/* Transport layer configuration information */ 616static struct xpt_softc xsoftc; 617 618/* Queues for our software interrupt handler */ 619typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t; 620static cam_isrq_t cam_bioq; 621static struct mtx cam_bioq_lock; 622 623/* "Pool" of inactive ccbs managed by xpt_alloc_ccb and xpt_free_ccb */ 624static SLIST_HEAD(,ccb_hdr) ccb_freeq; 625static u_int xpt_max_ccbs; /* 626 * Maximum size of ccb pool. Modified as 627 * devices are added/removed or have their 628 * opening counts changed. 629 */ 630static u_int xpt_ccb_count; /* Current count of allocated ccbs */ 631 632struct cam_periph *xpt_periph; 633 634static periph_init_t xpt_periph_init; 635 636static periph_init_t probe_periph_init; 637 638static struct periph_driver xpt_driver = 639{ 640 xpt_periph_init, "xpt", 641 TAILQ_HEAD_INITIALIZER(xpt_driver.units) 642}; 643 644static struct periph_driver probe_driver = 645{ 646 probe_periph_init, "probe", 647 TAILQ_HEAD_INITIALIZER(probe_driver.units) 648}; 649 650PERIPHDRIVER_DECLARE(xpt, xpt_driver); 651PERIPHDRIVER_DECLARE(probe, probe_driver); 652 653 654static d_open_t xptopen; 655static d_close_t xptclose; 656static d_ioctl_t xptioctl; 657 658static struct cdevsw xpt_cdevsw = { 659 .d_version = D_VERSION, 660 .d_flags = D_NEEDGIANT, 661 .d_open = xptopen, 662 .d_close = xptclose, 663 .d_ioctl = xptioctl, 664 .d_name = "xpt", 665}; 666 667static struct intr_config_hook *xpt_config_hook; 668 669/* Registered busses */ 670static TAILQ_HEAD(,cam_eb) xpt_busses; 671static u_int bus_generation; 672 673/* Storage for debugging datastructures */ 674#ifdef CAMDEBUG 675struct cam_path *cam_dpath; 676u_int32_t cam_dflags; 677u_int32_t cam_debug_delay; 678#endif 679 680/* Pointers to software interrupt handlers */ 681static void *cambio_ih; 682 683#if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG) 684#error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS" 685#endif 686 687/* 688 * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG 689 * enabled. Also, the user must have either none, or all of CAM_DEBUG_BUS, 690 * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified. 691 */ 692#if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \ 693 || defined(CAM_DEBUG_LUN) 694#ifdef CAMDEBUG 695#if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \ 696 || !defined(CAM_DEBUG_LUN) 697#error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \ 698 and CAM_DEBUG_LUN" 699#endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */ 700#else /* !CAMDEBUG */ 701#error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options" 702#endif /* CAMDEBUG */ 703#endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */ 704 705/* Our boot-time initialization hook */ 706static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *); 707 708static moduledata_t cam_moduledata = { 709 "cam", 710 cam_module_event_handler, 711 NULL 712}; 713 714static void xpt_init(void *); 715 716DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND); 717MODULE_VERSION(cam, 1); 718 719 720static cam_status xpt_compile_path(struct cam_path *new_path, 721 struct cam_periph *perph, 722 path_id_t path_id, 723 target_id_t target_id, 724 lun_id_t lun_id); 725 726static void xpt_release_path(struct cam_path *path); 727 728static void xpt_async_bcast(struct async_list *async_head, 729 u_int32_t async_code, 730 struct cam_path *path, 731 void *async_arg); 732static void xpt_dev_async(u_int32_t async_code, 733 struct cam_eb *bus, 734 struct cam_et *target, 735 struct cam_ed *device, 736 void *async_arg); 737static path_id_t xptnextfreepathid(void); 738static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus); 739static union ccb *xpt_get_ccb(struct cam_ed *device); 740static int xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo, 741 u_int32_t new_priority); 742static void xpt_run_dev_allocq(struct cam_eb *bus); 743static void xpt_run_dev_sendq(struct cam_eb *bus); 744static timeout_t xpt_release_devq_timeout; 745static timeout_t xpt_release_simq_timeout; 746static void xpt_release_bus(struct cam_eb *bus); 747static void xpt_release_devq_device(struct cam_ed *dev, u_int count, 748 int run_queue); 749static struct cam_et* 750 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id); 751static void xpt_release_target(struct cam_eb *bus, struct cam_et *target); 752static struct cam_ed* 753 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, 754 lun_id_t lun_id); 755static void xpt_release_device(struct cam_eb *bus, struct cam_et *target, 756 struct cam_ed *device); 757static u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings); 758static struct cam_eb* 759 xpt_find_bus(path_id_t path_id); 760static struct cam_et* 761 xpt_find_target(struct cam_eb *bus, target_id_t target_id); 762static struct cam_ed* 763 xpt_find_device(struct cam_et *target, lun_id_t lun_id); 764static void xpt_scan_bus(struct cam_periph *periph, union ccb *ccb); 765static void xpt_scan_lun(struct cam_periph *periph, 766 struct cam_path *path, cam_flags flags, 767 union ccb *ccb); 768static void xptscandone(struct cam_periph *periph, union ccb *done_ccb); 769static xpt_busfunc_t xptconfigbuscountfunc; 770static xpt_busfunc_t xptconfigfunc; 771static void xpt_config(void *arg); 772static xpt_devicefunc_t xptpassannouncefunc; 773static void xpt_finishconfig(struct cam_periph *periph, union ccb *ccb); 774static void xptaction(struct cam_sim *sim, union ccb *work_ccb); 775static void xptpoll(struct cam_sim *sim); 776static void camisr(void *); 777#if 0 778static void xptstart(struct cam_periph *periph, union ccb *work_ccb); 779static void xptasync(struct cam_periph *periph, 780 u_int32_t code, cam_path *path); 781#endif 782static dev_match_ret xptbusmatch(struct dev_match_pattern *patterns, 783 u_int num_patterns, struct cam_eb *bus); 784static dev_match_ret xptdevicematch(struct dev_match_pattern *patterns, 785 u_int num_patterns, 786 struct cam_ed *device); 787static dev_match_ret xptperiphmatch(struct dev_match_pattern *patterns, 788 u_int num_patterns, 789 struct cam_periph *periph); 790static xpt_busfunc_t xptedtbusfunc; 791static xpt_targetfunc_t xptedttargetfunc; 792static xpt_devicefunc_t xptedtdevicefunc; 793static xpt_periphfunc_t xptedtperiphfunc; 794static xpt_pdrvfunc_t xptplistpdrvfunc; 795static xpt_periphfunc_t xptplistperiphfunc; 796static int xptedtmatch(struct ccb_dev_match *cdm); 797static int xptperiphlistmatch(struct ccb_dev_match *cdm); 798static int xptbustraverse(struct cam_eb *start_bus, 799 xpt_busfunc_t *tr_func, void *arg); 800static int xpttargettraverse(struct cam_eb *bus, 801 struct cam_et *start_target, 802 xpt_targetfunc_t *tr_func, void *arg); 803static int xptdevicetraverse(struct cam_et *target, 804 struct cam_ed *start_device, 805 xpt_devicefunc_t *tr_func, void *arg); 806static int xptperiphtraverse(struct cam_ed *device, 807 struct cam_periph *start_periph, 808 xpt_periphfunc_t *tr_func, void *arg); 809static int xptpdrvtraverse(struct periph_driver **start_pdrv, 810 xpt_pdrvfunc_t *tr_func, void *arg); 811static int xptpdperiphtraverse(struct periph_driver **pdrv, 812 struct cam_periph *start_periph, 813 xpt_periphfunc_t *tr_func, 814 void *arg); 815static xpt_busfunc_t xptdefbusfunc; 816static xpt_targetfunc_t xptdeftargetfunc; 817static xpt_devicefunc_t xptdefdevicefunc; 818static xpt_periphfunc_t xptdefperiphfunc; 819static int xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg); 820#ifdef notusedyet 821static int xpt_for_all_targets(xpt_targetfunc_t *tr_func, 822 void *arg); 823#endif 824static int xpt_for_all_devices(xpt_devicefunc_t *tr_func, 825 void *arg); 826#ifdef notusedyet 827static int xpt_for_all_periphs(xpt_periphfunc_t *tr_func, 828 void *arg); 829#endif 830static xpt_devicefunc_t xptsetasyncfunc; 831static xpt_busfunc_t xptsetasyncbusfunc; 832static cam_status xptregister(struct cam_periph *periph, 833 void *arg); 834static cam_status proberegister(struct cam_periph *periph, 835 void *arg); 836static void probeschedule(struct cam_periph *probe_periph); 837static void probestart(struct cam_periph *periph, union ccb *start_ccb); 838static void proberequestdefaultnegotiation(struct cam_periph *periph); 839static void probedone(struct cam_periph *periph, union ccb *done_ccb); 840static void probecleanup(struct cam_periph *periph); 841static void xpt_find_quirk(struct cam_ed *device); 842#ifdef CAM_NEW_TRAN_CODE 843static void xpt_devise_transport(struct cam_path *path); 844#endif /* CAM_NEW_TRAN_CODE */ 845static void xpt_set_transfer_settings(struct ccb_trans_settings *cts, 846 struct cam_ed *device, 847 int async_update); 848static void xpt_toggle_tags(struct cam_path *path); 849static void xpt_start_tags(struct cam_path *path); 850static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus, 851 struct cam_ed *dev); 852static __inline int xpt_schedule_dev_sendq(struct cam_eb *bus, 853 struct cam_ed *dev); 854static __inline int periph_is_queued(struct cam_periph *periph); 855static __inline int device_is_alloc_queued(struct cam_ed *device); 856static __inline int device_is_send_queued(struct cam_ed *device); 857static __inline int dev_allocq_is_runnable(struct cam_devq *devq); 858 859static __inline int 860xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev) 861{ 862 int retval; 863 864 if (dev->ccbq.devq_openings > 0) { 865 if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) { 866 cam_ccbq_resize(&dev->ccbq, 867 dev->ccbq.dev_openings 868 + dev->ccbq.dev_active); 869 dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED; 870 } 871 /* 872 * The priority of a device waiting for CCB resources 873 * is that of the the highest priority peripheral driver 874 * enqueued. 875 */ 876 retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue, 877 &dev->alloc_ccb_entry.pinfo, 878 CAMQ_GET_HEAD(&dev->drvq)->priority); 879 } else { 880 retval = 0; 881 } 882 883 return (retval); 884} 885 886static __inline int 887xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev) 888{ 889 int retval; 890 891 if (dev->ccbq.dev_openings > 0) { 892 /* 893 * The priority of a device waiting for controller 894 * resources is that of the the highest priority CCB 895 * enqueued. 896 */ 897 retval = 898 xpt_schedule_dev(&bus->sim->devq->send_queue, 899 &dev->send_ccb_entry.pinfo, 900 CAMQ_GET_HEAD(&dev->ccbq.queue)->priority); 901 } else { 902 retval = 0; 903 } 904 return (retval); 905} 906 907static __inline int 908periph_is_queued(struct cam_periph *periph) 909{ 910 return (periph->pinfo.index != CAM_UNQUEUED_INDEX); 911} 912 913static __inline int 914device_is_alloc_queued(struct cam_ed *device) 915{ 916 return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX); 917} 918 919static __inline int 920device_is_send_queued(struct cam_ed *device) 921{ 922 return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX); 923} 924 925static __inline int 926dev_allocq_is_runnable(struct cam_devq *devq) 927{ 928 /* 929 * Have work to do. 930 * Have space to do more work. 931 * Allowed to do work. 932 */ 933 return ((devq->alloc_queue.qfrozen_cnt == 0) 934 && (devq->alloc_queue.entries > 0) 935 && (devq->alloc_openings > 0)); 936} 937 938static void 939xpt_periph_init() 940{ 941 make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0"); 942} 943 944static void 945probe_periph_init() 946{ 947} 948 949 950static void 951xptdone(struct cam_periph *periph, union ccb *done_ccb) 952{ 953 /* Caller will release the CCB */ 954 wakeup(&done_ccb->ccb_h.cbfcnp); 955} 956 957static int 958xptopen(struct cdev *dev, int flags, int fmt, struct thread *td) 959{ 960 int unit; 961 962 unit = minor(dev) & 0xff; 963 964 /* 965 * Only allow read-write access. 966 */ 967 if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0)) 968 return(EPERM); 969 970 /* 971 * We don't allow nonblocking access. 972 */ 973 if ((flags & O_NONBLOCK) != 0) { 974 printf("xpt%d: can't do nonblocking access\n", unit); 975 return(ENODEV); 976 } 977 978 /* 979 * We only have one transport layer right now. If someone accesses 980 * us via something other than minor number 1, point out their 981 * mistake. 982 */ 983 if (unit != 0) { 984 printf("xptopen: got invalid xpt unit %d\n", unit); 985 return(ENXIO); 986 } 987 988 /* Mark ourselves open */ 989 xsoftc.flags |= XPT_FLAG_OPEN; 990 991 return(0); 992} 993 994static int 995xptclose(struct cdev *dev, int flag, int fmt, struct thread *td) 996{ 997 int unit; 998 999 unit = minor(dev) & 0xff; 1000 1001 /* 1002 * We only have one transport layer right now. If someone accesses 1003 * us via something other than minor number 1, point out their 1004 * mistake. 1005 */ 1006 if (unit != 0) { 1007 printf("xptclose: got invalid xpt unit %d\n", unit); 1008 return(ENXIO); 1009 } 1010 1011 /* Mark ourselves closed */ 1012 xsoftc.flags &= ~XPT_FLAG_OPEN; 1013 1014 return(0); 1015} 1016 1017static int 1018xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) 1019{ 1020 int unit, error; 1021 1022 error = 0; 1023 unit = minor(dev) & 0xff; 1024 1025 /* 1026 * We only have one transport layer right now. If someone accesses 1027 * us via something other than minor number 1, point out their 1028 * mistake. 1029 */ 1030 if (unit != 0) { 1031 printf("xptioctl: got invalid xpt unit %d\n", unit); 1032 return(ENXIO); 1033 } 1034 1035 switch(cmd) { 1036 /* 1037 * For the transport layer CAMIOCOMMAND ioctl, we really only want 1038 * to accept CCB types that don't quite make sense to send through a 1039 * passthrough driver. XPT_PATH_INQ is an exception to this, as stated 1040 * in the CAM spec. 1041 */ 1042 case CAMIOCOMMAND: { 1043 union ccb *ccb; 1044 union ccb *inccb; 1045 1046 inccb = (union ccb *)addr; 1047 1048 switch(inccb->ccb_h.func_code) { 1049 case XPT_SCAN_BUS: 1050 case XPT_RESET_BUS: 1051 if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD) 1052 || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) { 1053 error = EINVAL; 1054 break; 1055 } 1056 /* FALLTHROUGH */ 1057 case XPT_PATH_INQ: 1058 case XPT_ENG_INQ: 1059 case XPT_SCAN_LUN: 1060 1061 ccb = xpt_alloc_ccb(); 1062 1063 /* 1064 * Create a path using the bus, target, and lun the 1065 * user passed in. 1066 */ 1067 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, 1068 inccb->ccb_h.path_id, 1069 inccb->ccb_h.target_id, 1070 inccb->ccb_h.target_lun) != 1071 CAM_REQ_CMP){ 1072 error = EINVAL; 1073 xpt_free_ccb(ccb); 1074 break; 1075 } 1076 /* Ensure all of our fields are correct */ 1077 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 1078 inccb->ccb_h.pinfo.priority); 1079 xpt_merge_ccb(ccb, inccb); 1080 ccb->ccb_h.cbfcnp = xptdone; 1081 cam_periph_runccb(ccb, NULL, 0, 0, NULL); 1082 bcopy(ccb, inccb, sizeof(union ccb)); 1083 xpt_free_path(ccb->ccb_h.path); 1084 xpt_free_ccb(ccb); 1085 break; 1086 1087 case XPT_DEBUG: { 1088 union ccb ccb; 1089 1090 /* 1091 * This is an immediate CCB, so it's okay to 1092 * allocate it on the stack. 1093 */ 1094 1095 /* 1096 * Create a path using the bus, target, and lun the 1097 * user passed in. 1098 */ 1099 if (xpt_create_path(&ccb.ccb_h.path, xpt_periph, 1100 inccb->ccb_h.path_id, 1101 inccb->ccb_h.target_id, 1102 inccb->ccb_h.target_lun) != 1103 CAM_REQ_CMP){ 1104 error = EINVAL; 1105 break; 1106 } 1107 /* Ensure all of our fields are correct */ 1108 xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path, 1109 inccb->ccb_h.pinfo.priority); 1110 xpt_merge_ccb(&ccb, inccb); 1111 ccb.ccb_h.cbfcnp = xptdone; 1112 xpt_action(&ccb); 1113 bcopy(&ccb, inccb, sizeof(union ccb)); 1114 xpt_free_path(ccb.ccb_h.path); 1115 break; 1116 1117 } 1118 case XPT_DEV_MATCH: { 1119 struct cam_periph_map_info mapinfo; 1120 struct cam_path *old_path; 1121 1122 /* 1123 * We can't deal with physical addresses for this 1124 * type of transaction. 1125 */ 1126 if (inccb->ccb_h.flags & CAM_DATA_PHYS) { 1127 error = EINVAL; 1128 break; 1129 } 1130 1131 /* 1132 * Save this in case the caller had it set to 1133 * something in particular. 1134 */ 1135 old_path = inccb->ccb_h.path; 1136 1137 /* 1138 * We really don't need a path for the matching 1139 * code. The path is needed because of the 1140 * debugging statements in xpt_action(). They 1141 * assume that the CCB has a valid path. 1142 */ 1143 inccb->ccb_h.path = xpt_periph->path; 1144 1145 bzero(&mapinfo, sizeof(mapinfo)); 1146 1147 /* 1148 * Map the pattern and match buffers into kernel 1149 * virtual address space. 1150 */ 1151 error = cam_periph_mapmem(inccb, &mapinfo); 1152 1153 if (error) { 1154 inccb->ccb_h.path = old_path; 1155 break; 1156 } 1157 1158 /* 1159 * This is an immediate CCB, we can send it on directly. 1160 */ 1161 xpt_action(inccb); 1162 1163 /* 1164 * Map the buffers back into user space. 1165 */ 1166 cam_periph_unmapmem(inccb, &mapinfo); 1167 1168 inccb->ccb_h.path = old_path; 1169 1170 error = 0; 1171 break; 1172 } 1173 default: 1174 error = ENOTSUP; 1175 break; 1176 } 1177 break; 1178 } 1179 /* 1180 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input, 1181 * with the periphal driver name and unit name filled in. The other 1182 * fields don't really matter as input. The passthrough driver name 1183 * ("pass"), and unit number are passed back in the ccb. The current 1184 * device generation number, and the index into the device peripheral 1185 * driver list, and the status are also passed back. Note that 1186 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb, 1187 * we never return a status of CAM_GDEVLIST_LIST_CHANGED. It is 1188 * (or rather should be) impossible for the device peripheral driver 1189 * list to change since we look at the whole thing in one pass, and 1190 * we do it with splcam protection. 1191 * 1192 */ 1193 case CAMGETPASSTHRU: { 1194 union ccb *ccb; 1195 struct cam_periph *periph; 1196 struct periph_driver **p_drv; 1197 char *name; 1198 u_int unit; 1199 u_int cur_generation; 1200 int base_periph_found; 1201 int splbreaknum; 1202 int s; 1203 1204 ccb = (union ccb *)addr; 1205 unit = ccb->cgdl.unit_number; 1206 name = ccb->cgdl.periph_name; 1207 /* 1208 * Every 100 devices, we want to drop our spl protection to 1209 * give the software interrupt handler a chance to run. 1210 * Most systems won't run into this check, but this should 1211 * avoid starvation in the software interrupt handler in 1212 * large systems. 1213 */ 1214 splbreaknum = 100; 1215 1216 ccb = (union ccb *)addr; 1217 1218 base_periph_found = 0; 1219 1220 /* 1221 * Sanity check -- make sure we don't get a null peripheral 1222 * driver name. 1223 */ 1224 if (*ccb->cgdl.periph_name == '\0') { 1225 error = EINVAL; 1226 break; 1227 } 1228 1229 /* Keep the list from changing while we traverse it */ 1230 s = splcam(); 1231ptstartover: 1232 cur_generation = xsoftc.generation; 1233 1234 /* first find our driver in the list of drivers */ 1235 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) 1236 if (strcmp((*p_drv)->driver_name, name) == 0) 1237 break; 1238 1239 if (*p_drv == NULL) { 1240 splx(s); 1241 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1242 ccb->cgdl.status = CAM_GDEVLIST_ERROR; 1243 *ccb->cgdl.periph_name = '\0'; 1244 ccb->cgdl.unit_number = 0; 1245 error = ENOENT; 1246 break; 1247 } 1248 1249 /* 1250 * Run through every peripheral instance of this driver 1251 * and check to see whether it matches the unit passed 1252 * in by the user. If it does, get out of the loops and 1253 * find the passthrough driver associated with that 1254 * peripheral driver. 1255 */ 1256 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL; 1257 periph = TAILQ_NEXT(periph, unit_links)) { 1258 1259 if (periph->unit_number == unit) { 1260 break; 1261 } else if (--splbreaknum == 0) { 1262 splx(s); 1263 s = splcam(); 1264 splbreaknum = 100; 1265 if (cur_generation != xsoftc.generation) 1266 goto ptstartover; 1267 } 1268 } 1269 /* 1270 * If we found the peripheral driver that the user passed 1271 * in, go through all of the peripheral drivers for that 1272 * particular device and look for a passthrough driver. 1273 */ 1274 if (periph != NULL) { 1275 struct cam_ed *device; 1276 int i; 1277 1278 base_periph_found = 1; 1279 device = periph->path->device; 1280 for (i = 0, periph = SLIST_FIRST(&device->periphs); 1281 periph != NULL; 1282 periph = SLIST_NEXT(periph, periph_links), i++) { 1283 /* 1284 * Check to see whether we have a 1285 * passthrough device or not. 1286 */ 1287 if (strcmp(periph->periph_name, "pass") == 0) { 1288 /* 1289 * Fill in the getdevlist fields. 1290 */ 1291 strcpy(ccb->cgdl.periph_name, 1292 periph->periph_name); 1293 ccb->cgdl.unit_number = 1294 periph->unit_number; 1295 if (SLIST_NEXT(periph, periph_links)) 1296 ccb->cgdl.status = 1297 CAM_GDEVLIST_MORE_DEVS; 1298 else 1299 ccb->cgdl.status = 1300 CAM_GDEVLIST_LAST_DEVICE; 1301 ccb->cgdl.generation = 1302 device->generation; 1303 ccb->cgdl.index = i; 1304 /* 1305 * Fill in some CCB header fields 1306 * that the user may want. 1307 */ 1308 ccb->ccb_h.path_id = 1309 periph->path->bus->path_id; 1310 ccb->ccb_h.target_id = 1311 periph->path->target->target_id; 1312 ccb->ccb_h.target_lun = 1313 periph->path->device->lun_id; 1314 ccb->ccb_h.status = CAM_REQ_CMP; 1315 break; 1316 } 1317 } 1318 } 1319 1320 /* 1321 * If the periph is null here, one of two things has 1322 * happened. The first possibility is that we couldn't 1323 * find the unit number of the particular peripheral driver 1324 * that the user is asking about. e.g. the user asks for 1325 * the passthrough driver for "da11". We find the list of 1326 * "da" peripherals all right, but there is no unit 11. 1327 * The other possibility is that we went through the list 1328 * of peripheral drivers attached to the device structure, 1329 * but didn't find one with the name "pass". Either way, 1330 * we return ENOENT, since we couldn't find something. 1331 */ 1332 if (periph == NULL) { 1333 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1334 ccb->cgdl.status = CAM_GDEVLIST_ERROR; 1335 *ccb->cgdl.periph_name = '\0'; 1336 ccb->cgdl.unit_number = 0; 1337 error = ENOENT; 1338 /* 1339 * It is unfortunate that this is even necessary, 1340 * but there are many, many clueless users out there. 1341 * If this is true, the user is looking for the 1342 * passthrough driver, but doesn't have one in his 1343 * kernel. 1344 */ 1345 if (base_periph_found == 1) { 1346 printf("xptioctl: pass driver is not in the " 1347 "kernel\n"); 1348 printf("xptioctl: put \"device pass0\" in " 1349 "your kernel config file\n"); 1350 } 1351 } 1352 splx(s); 1353 break; 1354 } 1355 default: 1356 error = ENOTTY; 1357 break; 1358 } 1359 1360 return(error); 1361} 1362 1363static int 1364cam_module_event_handler(module_t mod, int what, void *arg) 1365{ 1366 if (what == MOD_LOAD) { 1367 xpt_init(NULL); 1368 } else if (what == MOD_UNLOAD) { 1369 return EBUSY; 1370 } else { 1371 return EOPNOTSUPP; 1372 } 1373 1374 return 0; 1375} 1376 1377/* Functions accessed by the peripheral drivers */ 1378static void 1379xpt_init(dummy) 1380 void *dummy; 1381{ 1382 struct cam_sim *xpt_sim; 1383 struct cam_path *path; 1384 struct cam_devq *devq; 1385 cam_status status; 1386 1387 TAILQ_INIT(&xpt_busses); 1388 TAILQ_INIT(&cam_bioq); 1389 SLIST_INIT(&ccb_freeq); 1390 STAILQ_INIT(&highpowerq); 1391 1392 mtx_init(&cam_bioq_lock, "CAM BIOQ lock", NULL, MTX_DEF); 1393 1394 /* 1395 * The xpt layer is, itself, the equivelent of a SIM. 1396 * Allow 16 ccbs in the ccb pool for it. This should 1397 * give decent parallelism when we probe busses and 1398 * perform other XPT functions. 1399 */ 1400 devq = cam_simq_alloc(16); 1401 xpt_sim = cam_sim_alloc(xptaction, 1402 xptpoll, 1403 "xpt", 1404 /*softc*/NULL, 1405 /*unit*/0, 1406 /*max_dev_transactions*/0, 1407 /*max_tagged_dev_transactions*/0, 1408 devq); 1409 xpt_max_ccbs = 16; 1410 1411 xpt_bus_register(xpt_sim, /*bus #*/0); 1412 1413 /* 1414 * Looking at the XPT from the SIM layer, the XPT is 1415 * the equivelent of a peripheral driver. Allocate 1416 * a peripheral driver entry for us. 1417 */ 1418 if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID, 1419 CAM_TARGET_WILDCARD, 1420 CAM_LUN_WILDCARD)) != CAM_REQ_CMP) { 1421 printf("xpt_init: xpt_create_path failed with status %#x," 1422 " failing attach\n", status); 1423 return; 1424 } 1425 1426 cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO, 1427 path, NULL, 0, NULL); 1428 xpt_free_path(path); 1429 1430 xpt_sim->softc = xpt_periph; 1431 1432 /* 1433 * Register a callback for when interrupts are enabled. 1434 */ 1435 xpt_config_hook = 1436 (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook), 1437 M_TEMP, M_NOWAIT | M_ZERO); 1438 if (xpt_config_hook == NULL) { 1439 printf("xpt_init: Cannot malloc config hook " 1440 "- failing attach\n"); 1441 return; 1442 } 1443 1444 xpt_config_hook->ich_func = xpt_config; 1445 if (config_intrhook_establish(xpt_config_hook) != 0) { 1446 free (xpt_config_hook, M_TEMP); 1447 printf("xpt_init: config_intrhook_establish failed " 1448 "- failing attach\n"); 1449 } 1450 1451 /* Install our software interrupt handlers */ 1452 swi_add(NULL, "cambio", camisr, &cam_bioq, SWI_CAMBIO, 0, &cambio_ih); 1453} 1454 1455static cam_status 1456xptregister(struct cam_periph *periph, void *arg) 1457{ 1458 if (periph == NULL) { 1459 printf("xptregister: periph was NULL!!\n"); 1460 return(CAM_REQ_CMP_ERR); 1461 } 1462 1463 periph->softc = NULL; 1464 1465 xpt_periph = periph; 1466 1467 return(CAM_REQ_CMP); 1468} 1469 1470int32_t 1471xpt_add_periph(struct cam_periph *periph) 1472{ 1473 struct cam_ed *device; 1474 int32_t status; 1475 struct periph_list *periph_head; 1476 1477 GIANT_REQUIRED; 1478 1479 device = periph->path->device; 1480 1481 periph_head = &device->periphs; 1482 1483 status = CAM_REQ_CMP; 1484 1485 if (device != NULL) { 1486 int s; 1487 1488 /* 1489 * Make room for this peripheral 1490 * so it will fit in the queue 1491 * when it's scheduled to run 1492 */ 1493 s = splsoftcam(); 1494 status = camq_resize(&device->drvq, 1495 device->drvq.array_size + 1); 1496 1497 device->generation++; 1498 1499 SLIST_INSERT_HEAD(periph_head, periph, periph_links); 1500 1501 splx(s); 1502 } 1503 1504 xsoftc.generation++; 1505 1506 return (status); 1507} 1508 1509void 1510xpt_remove_periph(struct cam_periph *periph) 1511{ 1512 struct cam_ed *device; 1513 1514 GIANT_REQUIRED; 1515 1516 device = periph->path->device; 1517 1518 if (device != NULL) { 1519 int s; 1520 struct periph_list *periph_head; 1521 1522 periph_head = &device->periphs; 1523 1524 /* Release the slot for this peripheral */ 1525 s = splsoftcam(); 1526 camq_resize(&device->drvq, device->drvq.array_size - 1); 1527 1528 device->generation++; 1529 1530 SLIST_REMOVE(periph_head, periph, cam_periph, periph_links); 1531 1532 splx(s); 1533 } 1534 1535 xsoftc.generation++; 1536 1537} 1538 1539#ifdef CAM_NEW_TRAN_CODE 1540 1541void 1542xpt_announce_periph(struct cam_periph *periph, char *announce_string) 1543{ 1544 struct ccb_pathinq cpi; 1545 struct ccb_trans_settings cts; 1546 struct cam_path *path; 1547 u_int speed; 1548 u_int freq; 1549 u_int mb; 1550 int s; 1551 1552 GIANT_REQUIRED; 1553 1554 path = periph->path; 1555 /* 1556 * To ensure that this is printed in one piece, 1557 * mask out CAM interrupts. 1558 */ 1559 s = splsoftcam(); 1560 printf("%s%d at %s%d bus %d target %d lun %d\n", 1561 periph->periph_name, periph->unit_number, 1562 path->bus->sim->sim_name, 1563 path->bus->sim->unit_number, 1564 path->bus->sim->bus_id, 1565 path->target->target_id, 1566 path->device->lun_id); 1567 printf("%s%d: ", periph->periph_name, periph->unit_number); 1568 scsi_print_inquiry(&path->device->inq_data); 1569 if (bootverbose && path->device->serial_num_len > 0) { 1570 /* Don't wrap the screen - print only the first 60 chars */ 1571 printf("%s%d: Serial Number %.60s\n", periph->periph_name, 1572 periph->unit_number, path->device->serial_num); 1573 } 1574 xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1); 1575 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; 1576 cts.type = CTS_TYPE_CURRENT_SETTINGS; 1577 xpt_action((union ccb*)&cts); 1578 1579 /* Ask the SIM for its base transfer speed */ 1580 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1); 1581 cpi.ccb_h.func_code = XPT_PATH_INQ; 1582 xpt_action((union ccb *)&cpi); 1583 1584 speed = cpi.base_transfer_speed; 1585 freq = 0; 1586 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) { 1587 struct ccb_trans_settings_spi *spi; 1588 1589 spi = &cts.xport_specific.spi; 1590 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0 1591 && spi->sync_offset != 0) { 1592 freq = scsi_calc_syncsrate(spi->sync_period); 1593 speed = freq; 1594 } 1595 1596 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) 1597 speed *= (0x01 << spi->bus_width); 1598 } 1599 1600 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) { 1601 struct ccb_trans_settings_fc *fc = &cts.xport_specific.fc; 1602 if (fc->valid & CTS_FC_VALID_SPEED) { 1603 speed = fc->bitrate; 1604 } 1605 } 1606 1607 mb = speed / 1000; 1608 if (mb > 0) 1609 printf("%s%d: %d.%03dMB/s transfers", 1610 periph->periph_name, periph->unit_number, 1611 mb, speed % 1000); 1612 else 1613 printf("%s%d: %dKB/s transfers", periph->periph_name, 1614 periph->unit_number, speed); 1615 /* Report additional information about SPI connections */ 1616 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) { 1617 struct ccb_trans_settings_spi *spi; 1618 1619 spi = &cts.xport_specific.spi; 1620 if (freq != 0) { 1621 printf(" (%d.%03dMHz%s, offset %d", freq / 1000, 1622 freq % 1000, 1623 (spi->ppr_options & MSG_EXT_PPR_DT_REQ) != 0 1624 ? " DT" : "", 1625 spi->sync_offset); 1626 } 1627 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0 1628 && spi->bus_width > 0) { 1629 if (freq != 0) { 1630 printf(", "); 1631 } else { 1632 printf(" ("); 1633 } 1634 printf("%dbit)", 8 * (0x01 << spi->bus_width)); 1635 } else if (freq != 0) { 1636 printf(")"); 1637 } 1638 } 1639 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) { 1640 struct ccb_trans_settings_fc *fc; 1641 1642 fc = &cts.xport_specific.fc; 1643 if (fc->valid & CTS_FC_VALID_WWNN) 1644 printf(" WWNN 0x%llx", (long long) fc->wwnn); 1645 if (fc->valid & CTS_FC_VALID_WWPN) 1646 printf(" WWPN 0x%llx", (long long) fc->wwpn); 1647 if (fc->valid & CTS_FC_VALID_PORT) 1648 printf(" PortID 0x%x", fc->port); 1649 } 1650 1651 if (path->device->inq_flags & SID_CmdQue 1652 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) { 1653 printf("\n%s%d: Tagged Queueing Enabled", 1654 periph->periph_name, periph->unit_number); 1655 } 1656 printf("\n"); 1657 1658 /* 1659 * We only want to print the caller's announce string if they've 1660 * passed one in.. 1661 */ 1662 if (announce_string != NULL) 1663 printf("%s%d: %s\n", periph->periph_name, 1664 periph->unit_number, announce_string); 1665 splx(s); 1666} 1667#else /* CAM_NEW_TRAN_CODE */ 1668void 1669xpt_announce_periph(struct cam_periph *periph, char *announce_string) 1670{ 1671 int s; 1672 u_int mb; 1673 struct cam_path *path; 1674 struct ccb_trans_settings cts; 1675 1676 GIANT_REQUIRED; 1677 1678 path = periph->path; 1679 /* 1680 * To ensure that this is printed in one piece, 1681 * mask out CAM interrupts. 1682 */ 1683 s = splsoftcam(); 1684 printf("%s%d at %s%d bus %d target %d lun %d\n", 1685 periph->periph_name, periph->unit_number, 1686 path->bus->sim->sim_name, 1687 path->bus->sim->unit_number, 1688 path->bus->sim->bus_id, 1689 path->target->target_id, 1690 path->device->lun_id); 1691 printf("%s%d: ", periph->periph_name, periph->unit_number); 1692 scsi_print_inquiry(&path->device->inq_data); 1693 if ((bootverbose) 1694 && (path->device->serial_num_len > 0)) { 1695 /* Don't wrap the screen - print only the first 60 chars */ 1696 printf("%s%d: Serial Number %.60s\n", periph->periph_name, 1697 periph->unit_number, path->device->serial_num); 1698 } 1699 xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1); 1700 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; 1701 cts.flags = CCB_TRANS_CURRENT_SETTINGS; 1702 xpt_action((union ccb*)&cts); 1703 if (cts.ccb_h.status == CAM_REQ_CMP) { 1704 u_int speed; 1705 u_int freq; 1706 1707 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0 1708 && cts.sync_offset != 0) { 1709 freq = scsi_calc_syncsrate(cts.sync_period); 1710 speed = freq; 1711 } else { 1712 struct ccb_pathinq cpi; 1713 1714 /* Ask the SIM for its base transfer speed */ 1715 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1); 1716 cpi.ccb_h.func_code = XPT_PATH_INQ; 1717 xpt_action((union ccb *)&cpi); 1718 1719 speed = cpi.base_transfer_speed; 1720 freq = 0; 1721 } 1722 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) 1723 speed *= (0x01 << cts.bus_width); 1724 mb = speed / 1000; 1725 if (mb > 0) 1726 printf("%s%d: %d.%03dMB/s transfers", 1727 periph->periph_name, periph->unit_number, 1728 mb, speed % 1000); 1729 else 1730 printf("%s%d: %dKB/s transfers", periph->periph_name, 1731 periph->unit_number, speed); 1732 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0 1733 && cts.sync_offset != 0) { 1734 printf(" (%d.%03dMHz, offset %d", freq / 1000, 1735 freq % 1000, cts.sync_offset); 1736 } 1737 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0 1738 && cts.bus_width > 0) { 1739 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0 1740 && cts.sync_offset != 0) { 1741 printf(", "); 1742 } else { 1743 printf(" ("); 1744 } 1745 printf("%dbit)", 8 * (0x01 << cts.bus_width)); 1746 } else if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0 1747 && cts.sync_offset != 0) { 1748 printf(")"); 1749 } 1750 1751 if (path->device->inq_flags & SID_CmdQue 1752 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) { 1753 printf(", Tagged Queueing Enabled"); 1754 } 1755 1756 printf("\n"); 1757 } else if (path->device->inq_flags & SID_CmdQue 1758 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) { 1759 printf("%s%d: Tagged Queueing Enabled\n", 1760 periph->periph_name, periph->unit_number); 1761 } 1762 1763 /* 1764 * We only want to print the caller's announce string if they've 1765 * passed one in.. 1766 */ 1767 if (announce_string != NULL) 1768 printf("%s%d: %s\n", periph->periph_name, 1769 periph->unit_number, announce_string); 1770 splx(s); 1771} 1772 1773#endif /* CAM_NEW_TRAN_CODE */ 1774 1775static dev_match_ret 1776xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns, 1777 struct cam_eb *bus) 1778{ 1779 dev_match_ret retval; 1780 int i; 1781 1782 retval = DM_RET_NONE; 1783 1784 /* 1785 * If we aren't given something to match against, that's an error. 1786 */ 1787 if (bus == NULL) 1788 return(DM_RET_ERROR); 1789 1790 /* 1791 * If there are no match entries, then this bus matches no 1792 * matter what. 1793 */ 1794 if ((patterns == NULL) || (num_patterns == 0)) 1795 return(DM_RET_DESCEND | DM_RET_COPY); 1796 1797 for (i = 0; i < num_patterns; i++) { 1798 struct bus_match_pattern *cur_pattern; 1799 1800 /* 1801 * If the pattern in question isn't for a bus node, we 1802 * aren't interested. However, we do indicate to the 1803 * calling routine that we should continue descending the 1804 * tree, since the user wants to match against lower-level 1805 * EDT elements. 1806 */ 1807 if (patterns[i].type != DEV_MATCH_BUS) { 1808 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1809 retval |= DM_RET_DESCEND; 1810 continue; 1811 } 1812 1813 cur_pattern = &patterns[i].pattern.bus_pattern; 1814 1815 /* 1816 * If they want to match any bus node, we give them any 1817 * device node. 1818 */ 1819 if (cur_pattern->flags == BUS_MATCH_ANY) { 1820 /* set the copy flag */ 1821 retval |= DM_RET_COPY; 1822 1823 /* 1824 * If we've already decided on an action, go ahead 1825 * and return. 1826 */ 1827 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE) 1828 return(retval); 1829 } 1830 1831 /* 1832 * Not sure why someone would do this... 1833 */ 1834 if (cur_pattern->flags == BUS_MATCH_NONE) 1835 continue; 1836 1837 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0) 1838 && (cur_pattern->path_id != bus->path_id)) 1839 continue; 1840 1841 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0) 1842 && (cur_pattern->bus_id != bus->sim->bus_id)) 1843 continue; 1844 1845 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0) 1846 && (cur_pattern->unit_number != bus->sim->unit_number)) 1847 continue; 1848 1849 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0) 1850 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name, 1851 DEV_IDLEN) != 0)) 1852 continue; 1853 1854 /* 1855 * If we get to this point, the user definitely wants 1856 * information on this bus. So tell the caller to copy the 1857 * data out. 1858 */ 1859 retval |= DM_RET_COPY; 1860 1861 /* 1862 * If the return action has been set to descend, then we 1863 * know that we've already seen a non-bus matching 1864 * expression, therefore we need to further descend the tree. 1865 * This won't change by continuing around the loop, so we 1866 * go ahead and return. If we haven't seen a non-bus 1867 * matching expression, we keep going around the loop until 1868 * we exhaust the matching expressions. We'll set the stop 1869 * flag once we fall out of the loop. 1870 */ 1871 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND) 1872 return(retval); 1873 } 1874 1875 /* 1876 * If the return action hasn't been set to descend yet, that means 1877 * we haven't seen anything other than bus matching patterns. So 1878 * tell the caller to stop descending the tree -- the user doesn't 1879 * want to match against lower level tree elements. 1880 */ 1881 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1882 retval |= DM_RET_STOP; 1883 1884 return(retval); 1885} 1886 1887static dev_match_ret 1888xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns, 1889 struct cam_ed *device) 1890{ 1891 dev_match_ret retval; 1892 int i; 1893 1894 retval = DM_RET_NONE; 1895 1896 /* 1897 * If we aren't given something to match against, that's an error. 1898 */ 1899 if (device == NULL) 1900 return(DM_RET_ERROR); 1901 1902 /* 1903 * If there are no match entries, then this device matches no 1904 * matter what. 1905 */ 1906 if ((patterns == NULL) || (num_patterns == 0)) 1907 return(DM_RET_DESCEND | DM_RET_COPY); 1908 1909 for (i = 0; i < num_patterns; i++) { 1910 struct device_match_pattern *cur_pattern; 1911 1912 /* 1913 * If the pattern in question isn't for a device node, we 1914 * aren't interested. 1915 */ 1916 if (patterns[i].type != DEV_MATCH_DEVICE) { 1917 if ((patterns[i].type == DEV_MATCH_PERIPH) 1918 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)) 1919 retval |= DM_RET_DESCEND; 1920 continue; 1921 } 1922 1923 cur_pattern = &patterns[i].pattern.device_pattern; 1924 1925 /* 1926 * If they want to match any device node, we give them any 1927 * device node. 1928 */ 1929 if (cur_pattern->flags == DEV_MATCH_ANY) { 1930 /* set the copy flag */ 1931 retval |= DM_RET_COPY; 1932 1933 1934 /* 1935 * If we've already decided on an action, go ahead 1936 * and return. 1937 */ 1938 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE) 1939 return(retval); 1940 } 1941 1942 /* 1943 * Not sure why someone would do this... 1944 */ 1945 if (cur_pattern->flags == DEV_MATCH_NONE) 1946 continue; 1947 1948 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0) 1949 && (cur_pattern->path_id != device->target->bus->path_id)) 1950 continue; 1951 1952 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0) 1953 && (cur_pattern->target_id != device->target->target_id)) 1954 continue; 1955 1956 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0) 1957 && (cur_pattern->target_lun != device->lun_id)) 1958 continue; 1959 1960 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0) 1961 && (cam_quirkmatch((caddr_t)&device->inq_data, 1962 (caddr_t)&cur_pattern->inq_pat, 1963 1, sizeof(cur_pattern->inq_pat), 1964 scsi_static_inquiry_match) == NULL)) 1965 continue; 1966 1967 /* 1968 * If we get to this point, the user definitely wants 1969 * information on this device. So tell the caller to copy 1970 * the data out. 1971 */ 1972 retval |= DM_RET_COPY; 1973 1974 /* 1975 * If the return action has been set to descend, then we 1976 * know that we've already seen a peripheral matching 1977 * expression, therefore we need to further descend the tree. 1978 * This won't change by continuing around the loop, so we 1979 * go ahead and return. If we haven't seen a peripheral 1980 * matching expression, we keep going around the loop until 1981 * we exhaust the matching expressions. We'll set the stop 1982 * flag once we fall out of the loop. 1983 */ 1984 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND) 1985 return(retval); 1986 } 1987 1988 /* 1989 * If the return action hasn't been set to descend yet, that means 1990 * we haven't seen any peripheral matching patterns. So tell the 1991 * caller to stop descending the tree -- the user doesn't want to 1992 * match against lower level tree elements. 1993 */ 1994 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1995 retval |= DM_RET_STOP; 1996 1997 return(retval); 1998} 1999 2000/* 2001 * Match a single peripheral against any number of match patterns. 2002 */ 2003static dev_match_ret 2004xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns, 2005 struct cam_periph *periph) 2006{ 2007 dev_match_ret retval; 2008 int i; 2009 2010 /* 2011 * If we aren't given something to match against, that's an error. 2012 */ 2013 if (periph == NULL) 2014 return(DM_RET_ERROR); 2015 2016 /* 2017 * If there are no match entries, then this peripheral matches no 2018 * matter what. 2019 */ 2020 if ((patterns == NULL) || (num_patterns == 0)) 2021 return(DM_RET_STOP | DM_RET_COPY); 2022 2023 /* 2024 * There aren't any nodes below a peripheral node, so there's no 2025 * reason to descend the tree any further. 2026 */ 2027 retval = DM_RET_STOP; 2028 2029 for (i = 0; i < num_patterns; i++) { 2030 struct periph_match_pattern *cur_pattern; 2031 2032 /* 2033 * If the pattern in question isn't for a peripheral, we 2034 * aren't interested. 2035 */ 2036 if (patterns[i].type != DEV_MATCH_PERIPH) 2037 continue; 2038 2039 cur_pattern = &patterns[i].pattern.periph_pattern; 2040 2041 /* 2042 * If they want to match on anything, then we will do so. 2043 */ 2044 if (cur_pattern->flags == PERIPH_MATCH_ANY) { 2045 /* set the copy flag */ 2046 retval |= DM_RET_COPY; 2047 2048 /* 2049 * We've already set the return action to stop, 2050 * since there are no nodes below peripherals in 2051 * the tree. 2052 */ 2053 return(retval); 2054 } 2055 2056 /* 2057 * Not sure why someone would do this... 2058 */ 2059 if (cur_pattern->flags == PERIPH_MATCH_NONE) 2060 continue; 2061 2062 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0) 2063 && (cur_pattern->path_id != periph->path->bus->path_id)) 2064 continue; 2065 2066 /* 2067 * For the target and lun id's, we have to make sure the 2068 * target and lun pointers aren't NULL. The xpt peripheral 2069 * has a wildcard target and device. 2070 */ 2071 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0) 2072 && ((periph->path->target == NULL) 2073 ||(cur_pattern->target_id != periph->path->target->target_id))) 2074 continue; 2075 2076 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0) 2077 && ((periph->path->device == NULL) 2078 || (cur_pattern->target_lun != periph->path->device->lun_id))) 2079 continue; 2080 2081 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0) 2082 && (cur_pattern->unit_number != periph->unit_number)) 2083 continue; 2084 2085 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0) 2086 && (strncmp(cur_pattern->periph_name, periph->periph_name, 2087 DEV_IDLEN) != 0)) 2088 continue; 2089 2090 /* 2091 * If we get to this point, the user definitely wants 2092 * information on this peripheral. So tell the caller to 2093 * copy the data out. 2094 */ 2095 retval |= DM_RET_COPY; 2096 2097 /* 2098 * The return action has already been set to stop, since 2099 * peripherals don't have any nodes below them in the EDT. 2100 */ 2101 return(retval); 2102 } 2103 2104 /* 2105 * If we get to this point, the peripheral that was passed in 2106 * doesn't match any of the patterns. 2107 */ 2108 return(retval); 2109} 2110 2111static int 2112xptedtbusfunc(struct cam_eb *bus, void *arg) 2113{ 2114 struct ccb_dev_match *cdm; 2115 dev_match_ret retval; 2116 2117 cdm = (struct ccb_dev_match *)arg; 2118 2119 /* 2120 * If our position is for something deeper in the tree, that means 2121 * that we've already seen this node. So, we keep going down. 2122 */ 2123 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 2124 && (cdm->pos.cookie.bus == bus) 2125 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 2126 && (cdm->pos.cookie.target != NULL)) 2127 retval = DM_RET_DESCEND; 2128 else 2129 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus); 2130 2131 /* 2132 * If we got an error, bail out of the search. 2133 */ 2134 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 2135 cdm->status = CAM_DEV_MATCH_ERROR; 2136 return(0); 2137 } 2138 2139 /* 2140 * If the copy flag is set, copy this bus out. 2141 */ 2142 if (retval & DM_RET_COPY) { 2143 int spaceleft, j; 2144 2145 spaceleft = cdm->match_buf_len - (cdm->num_matches * 2146 sizeof(struct dev_match_result)); 2147 2148 /* 2149 * If we don't have enough space to put in another 2150 * match result, save our position and tell the 2151 * user there are more devices to check. 2152 */ 2153 if (spaceleft < sizeof(struct dev_match_result)) { 2154 bzero(&cdm->pos, sizeof(cdm->pos)); 2155 cdm->pos.position_type = 2156 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS; 2157 2158 cdm->pos.cookie.bus = bus; 2159 cdm->pos.generations[CAM_BUS_GENERATION]= 2160 bus_generation; 2161 cdm->status = CAM_DEV_MATCH_MORE; 2162 return(0); 2163 } 2164 j = cdm->num_matches; 2165 cdm->num_matches++; 2166 cdm->matches[j].type = DEV_MATCH_BUS; 2167 cdm->matches[j].result.bus_result.path_id = bus->path_id; 2168 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id; 2169 cdm->matches[j].result.bus_result.unit_number = 2170 bus->sim->unit_number; 2171 strncpy(cdm->matches[j].result.bus_result.dev_name, 2172 bus->sim->sim_name, DEV_IDLEN); 2173 } 2174 2175 /* 2176 * If the user is only interested in busses, there's no 2177 * reason to descend to the next level in the tree. 2178 */ 2179 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP) 2180 return(1); 2181 2182 /* 2183 * If there is a target generation recorded, check it to 2184 * make sure the target list hasn't changed. 2185 */ 2186 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 2187 && (bus == cdm->pos.cookie.bus) 2188 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 2189 && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0) 2190 && (cdm->pos.generations[CAM_TARGET_GENERATION] != 2191 bus->generation)) { 2192 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 2193 return(0); 2194 } 2195 2196 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 2197 && (cdm->pos.cookie.bus == bus) 2198 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 2199 && (cdm->pos.cookie.target != NULL)) 2200 return(xpttargettraverse(bus, 2201 (struct cam_et *)cdm->pos.cookie.target, 2202 xptedttargetfunc, arg)); 2203 else 2204 return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg)); 2205} 2206 2207static int 2208xptedttargetfunc(struct cam_et *target, void *arg) 2209{ 2210 struct ccb_dev_match *cdm; 2211 2212 cdm = (struct ccb_dev_match *)arg; 2213 2214 /* 2215 * If there is a device list generation recorded, check it to 2216 * make sure the device list hasn't changed. 2217 */ 2218 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 2219 && (cdm->pos.cookie.bus == target->bus) 2220 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 2221 && (cdm->pos.cookie.target == target) 2222 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 2223 && (cdm->pos.generations[CAM_DEV_GENERATION] != 0) 2224 && (cdm->pos.generations[CAM_DEV_GENERATION] != 2225 target->generation)) { 2226 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 2227 return(0); 2228 } 2229 2230 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 2231 && (cdm->pos.cookie.bus == target->bus) 2232 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 2233 && (cdm->pos.cookie.target == target) 2234 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 2235 && (cdm->pos.cookie.device != NULL)) 2236 return(xptdevicetraverse(target, 2237 (struct cam_ed *)cdm->pos.cookie.device, 2238 xptedtdevicefunc, arg)); 2239 else 2240 return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg)); 2241} 2242 2243static int 2244xptedtdevicefunc(struct cam_ed *device, void *arg) 2245{ 2246 2247 struct ccb_dev_match *cdm; 2248 dev_match_ret retval; 2249 2250 cdm = (struct ccb_dev_match *)arg; 2251 2252 /* 2253 * If our position is for something deeper in the tree, that means 2254 * that we've already seen this node. So, we keep going down. 2255 */ 2256 if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE) 2257 && (cdm->pos.cookie.device == device) 2258 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 2259 && (cdm->pos.cookie.periph != NULL)) 2260 retval = DM_RET_DESCEND; 2261 else 2262 retval = xptdevicematch(cdm->patterns, cdm->num_patterns, 2263 device); 2264 2265 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 2266 cdm->status = CAM_DEV_MATCH_ERROR; 2267 return(0); 2268 } 2269 2270 /* 2271 * If the copy flag is set, copy this device out. 2272 */ 2273 if (retval & DM_RET_COPY) { 2274 int spaceleft, j; 2275 2276 spaceleft = cdm->match_buf_len - (cdm->num_matches * 2277 sizeof(struct dev_match_result)); 2278 2279 /* 2280 * If we don't have enough space to put in another 2281 * match result, save our position and tell the 2282 * user there are more devices to check. 2283 */ 2284 if (spaceleft < sizeof(struct dev_match_result)) { 2285 bzero(&cdm->pos, sizeof(cdm->pos)); 2286 cdm->pos.position_type = 2287 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS | 2288 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE; 2289 2290 cdm->pos.cookie.bus = device->target->bus; 2291 cdm->pos.generations[CAM_BUS_GENERATION]= 2292 bus_generation; 2293 cdm->pos.cookie.target = device->target; 2294 cdm->pos.generations[CAM_TARGET_GENERATION] = 2295 device->target->bus->generation; 2296 cdm->pos.cookie.device = device; 2297 cdm->pos.generations[CAM_DEV_GENERATION] = 2298 device->target->generation; 2299 cdm->status = CAM_DEV_MATCH_MORE; 2300 return(0); 2301 } 2302 j = cdm->num_matches; 2303 cdm->num_matches++; 2304 cdm->matches[j].type = DEV_MATCH_DEVICE; 2305 cdm->matches[j].result.device_result.path_id = 2306 device->target->bus->path_id; 2307 cdm->matches[j].result.device_result.target_id = 2308 device->target->target_id; 2309 cdm->matches[j].result.device_result.target_lun = 2310 device->lun_id; 2311 bcopy(&device->inq_data, 2312 &cdm->matches[j].result.device_result.inq_data, 2313 sizeof(struct scsi_inquiry_data)); 2314 2315 /* Let the user know whether this device is unconfigured */ 2316 if (device->flags & CAM_DEV_UNCONFIGURED) 2317 cdm->matches[j].result.device_result.flags = 2318 DEV_RESULT_UNCONFIGURED; 2319 else 2320 cdm->matches[j].result.device_result.flags = 2321 DEV_RESULT_NOFLAG; 2322 } 2323 2324 /* 2325 * If the user isn't interested in peripherals, don't descend 2326 * the tree any further. 2327 */ 2328 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP) 2329 return(1); 2330 2331 /* 2332 * If there is a peripheral list generation recorded, make sure 2333 * it hasn't changed. 2334 */ 2335 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 2336 && (device->target->bus == cdm->pos.cookie.bus) 2337 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 2338 && (device->target == cdm->pos.cookie.target) 2339 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 2340 && (device == cdm->pos.cookie.device) 2341 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 2342 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0) 2343 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 2344 device->generation)){ 2345 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 2346 return(0); 2347 } 2348 2349 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 2350 && (cdm->pos.cookie.bus == device->target->bus) 2351 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 2352 && (cdm->pos.cookie.target == device->target) 2353 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 2354 && (cdm->pos.cookie.device == device) 2355 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 2356 && (cdm->pos.cookie.periph != NULL)) 2357 return(xptperiphtraverse(device, 2358 (struct cam_periph *)cdm->pos.cookie.periph, 2359 xptedtperiphfunc, arg)); 2360 else 2361 return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg)); 2362} 2363 2364static int 2365xptedtperiphfunc(struct cam_periph *periph, void *arg) 2366{ 2367 struct ccb_dev_match *cdm; 2368 dev_match_ret retval; 2369 2370 cdm = (struct ccb_dev_match *)arg; 2371 2372 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph); 2373 2374 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 2375 cdm->status = CAM_DEV_MATCH_ERROR; 2376 return(0); 2377 } 2378 2379 /* 2380 * If the copy flag is set, copy this peripheral out. 2381 */ 2382 if (retval & DM_RET_COPY) { 2383 int spaceleft, j; 2384 2385 spaceleft = cdm->match_buf_len - (cdm->num_matches * 2386 sizeof(struct dev_match_result)); 2387 2388 /* 2389 * If we don't have enough space to put in another 2390 * match result, save our position and tell the 2391 * user there are more devices to check. 2392 */ 2393 if (spaceleft < sizeof(struct dev_match_result)) { 2394 bzero(&cdm->pos, sizeof(cdm->pos)); 2395 cdm->pos.position_type = 2396 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS | 2397 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE | 2398 CAM_DEV_POS_PERIPH; 2399 2400 cdm->pos.cookie.bus = periph->path->bus; 2401 cdm->pos.generations[CAM_BUS_GENERATION]= 2402 bus_generation; 2403 cdm->pos.cookie.target = periph->path->target; 2404 cdm->pos.generations[CAM_TARGET_GENERATION] = 2405 periph->path->bus->generation; 2406 cdm->pos.cookie.device = periph->path->device; 2407 cdm->pos.generations[CAM_DEV_GENERATION] = 2408 periph->path->target->generation; 2409 cdm->pos.cookie.periph = periph; 2410 cdm->pos.generations[CAM_PERIPH_GENERATION] = 2411 periph->path->device->generation; 2412 cdm->status = CAM_DEV_MATCH_MORE; 2413 return(0); 2414 } 2415 2416 j = cdm->num_matches; 2417 cdm->num_matches++; 2418 cdm->matches[j].type = DEV_MATCH_PERIPH; 2419 cdm->matches[j].result.periph_result.path_id = 2420 periph->path->bus->path_id; 2421 cdm->matches[j].result.periph_result.target_id = 2422 periph->path->target->target_id; 2423 cdm->matches[j].result.periph_result.target_lun = 2424 periph->path->device->lun_id; 2425 cdm->matches[j].result.periph_result.unit_number = 2426 periph->unit_number; 2427 strncpy(cdm->matches[j].result.periph_result.periph_name, 2428 periph->periph_name, DEV_IDLEN); 2429 } 2430 2431 return(1); 2432} 2433 2434static int 2435xptedtmatch(struct ccb_dev_match *cdm) 2436{ 2437 int ret; 2438 2439 cdm->num_matches = 0; 2440 2441 /* 2442 * Check the bus list generation. If it has changed, the user 2443 * needs to reset everything and start over. 2444 */ 2445 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 2446 && (cdm->pos.generations[CAM_BUS_GENERATION] != 0) 2447 && (cdm->pos.generations[CAM_BUS_GENERATION] != bus_generation)) { 2448 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 2449 return(0); 2450 } 2451 2452 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 2453 && (cdm->pos.cookie.bus != NULL)) 2454 ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus, 2455 xptedtbusfunc, cdm); 2456 else 2457 ret = xptbustraverse(NULL, xptedtbusfunc, cdm); 2458 2459 /* 2460 * If we get back 0, that means that we had to stop before fully 2461 * traversing the EDT. It also means that one of the subroutines 2462 * has set the status field to the proper value. If we get back 1, 2463 * we've fully traversed the EDT and copied out any matching entries. 2464 */ 2465 if (ret == 1) 2466 cdm->status = CAM_DEV_MATCH_LAST; 2467 2468 return(ret); 2469} 2470 2471static int 2472xptplistpdrvfunc(struct periph_driver **pdrv, void *arg) 2473{ 2474 struct ccb_dev_match *cdm; 2475 2476 cdm = (struct ccb_dev_match *)arg; 2477 2478 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) 2479 && (cdm->pos.cookie.pdrv == pdrv) 2480 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 2481 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0) 2482 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 2483 (*pdrv)->generation)) { 2484 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 2485 return(0); 2486 } 2487 2488 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) 2489 && (cdm->pos.cookie.pdrv == pdrv) 2490 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 2491 && (cdm->pos.cookie.periph != NULL)) 2492 return(xptpdperiphtraverse(pdrv, 2493 (struct cam_periph *)cdm->pos.cookie.periph, 2494 xptplistperiphfunc, arg)); 2495 else 2496 return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg)); 2497} 2498 2499static int 2500xptplistperiphfunc(struct cam_periph *periph, void *arg) 2501{ 2502 struct ccb_dev_match *cdm; 2503 dev_match_ret retval; 2504 2505 cdm = (struct ccb_dev_match *)arg; 2506 2507 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph); 2508 2509 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 2510 cdm->status = CAM_DEV_MATCH_ERROR; 2511 return(0); 2512 } 2513 2514 /* 2515 * If the copy flag is set, copy this peripheral out. 2516 */ 2517 if (retval & DM_RET_COPY) { 2518 int spaceleft, j; 2519 2520 spaceleft = cdm->match_buf_len - (cdm->num_matches * 2521 sizeof(struct dev_match_result)); 2522 2523 /* 2524 * If we don't have enough space to put in another 2525 * match result, save our position and tell the 2526 * user there are more devices to check. 2527 */ 2528 if (spaceleft < sizeof(struct dev_match_result)) { 2529 struct periph_driver **pdrv; 2530 2531 pdrv = NULL; 2532 bzero(&cdm->pos, sizeof(cdm->pos)); 2533 cdm->pos.position_type = 2534 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR | 2535 CAM_DEV_POS_PERIPH; 2536 2537 /* 2538 * This may look a bit non-sensical, but it is 2539 * actually quite logical. There are very few 2540 * peripheral drivers, and bloating every peripheral 2541 * structure with a pointer back to its parent 2542 * peripheral driver linker set entry would cost 2543 * more in the long run than doing this quick lookup. 2544 */ 2545 for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) { 2546 if (strcmp((*pdrv)->driver_name, 2547 periph->periph_name) == 0) 2548 break; 2549 } 2550 2551 if (*pdrv == NULL) { 2552 cdm->status = CAM_DEV_MATCH_ERROR; 2553 return(0); 2554 } 2555 2556 cdm->pos.cookie.pdrv = pdrv; 2557 /* 2558 * The periph generation slot does double duty, as 2559 * does the periph pointer slot. They are used for 2560 * both edt and pdrv lookups and positioning. 2561 */ 2562 cdm->pos.cookie.periph = periph; 2563 cdm->pos.generations[CAM_PERIPH_GENERATION] = 2564 (*pdrv)->generation; 2565 cdm->status = CAM_DEV_MATCH_MORE; 2566 return(0); 2567 } 2568 2569 j = cdm->num_matches; 2570 cdm->num_matches++; 2571 cdm->matches[j].type = DEV_MATCH_PERIPH; 2572 cdm->matches[j].result.periph_result.path_id = 2573 periph->path->bus->path_id; 2574 2575 /* 2576 * The transport layer peripheral doesn't have a target or 2577 * lun. 2578 */ 2579 if (periph->path->target) 2580 cdm->matches[j].result.periph_result.target_id = 2581 periph->path->target->target_id; 2582 else 2583 cdm->matches[j].result.periph_result.target_id = -1; 2584 2585 if (periph->path->device) 2586 cdm->matches[j].result.periph_result.target_lun = 2587 periph->path->device->lun_id; 2588 else 2589 cdm->matches[j].result.periph_result.target_lun = -1; 2590 2591 cdm->matches[j].result.periph_result.unit_number = 2592 periph->unit_number; 2593 strncpy(cdm->matches[j].result.periph_result.periph_name, 2594 periph->periph_name, DEV_IDLEN); 2595 } 2596 2597 return(1); 2598} 2599 2600static int 2601xptperiphlistmatch(struct ccb_dev_match *cdm) 2602{ 2603 int ret; 2604 2605 cdm->num_matches = 0; 2606 2607 /* 2608 * At this point in the edt traversal function, we check the bus 2609 * list generation to make sure that no busses have been added or 2610 * removed since the user last sent a XPT_DEV_MATCH ccb through. 2611 * For the peripheral driver list traversal function, however, we 2612 * don't have to worry about new peripheral driver types coming or 2613 * going; they're in a linker set, and therefore can't change 2614 * without a recompile. 2615 */ 2616 2617 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) 2618 && (cdm->pos.cookie.pdrv != NULL)) 2619 ret = xptpdrvtraverse( 2620 (struct periph_driver **)cdm->pos.cookie.pdrv, 2621 xptplistpdrvfunc, cdm); 2622 else 2623 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm); 2624 2625 /* 2626 * If we get back 0, that means that we had to stop before fully 2627 * traversing the peripheral driver tree. It also means that one of 2628 * the subroutines has set the status field to the proper value. If 2629 * we get back 1, we've fully traversed the EDT and copied out any 2630 * matching entries. 2631 */ 2632 if (ret == 1) 2633 cdm->status = CAM_DEV_MATCH_LAST; 2634 2635 return(ret); 2636} 2637 2638static int 2639xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg) 2640{ 2641 struct cam_eb *bus, *next_bus; 2642 int retval; 2643 2644 retval = 1; 2645 2646 for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xpt_busses)); 2647 bus != NULL; 2648 bus = next_bus) { 2649 next_bus = TAILQ_NEXT(bus, links); 2650 2651 retval = tr_func(bus, arg); 2652 if (retval == 0) 2653 return(retval); 2654 } 2655 2656 return(retval); 2657} 2658 2659static int 2660xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target, 2661 xpt_targetfunc_t *tr_func, void *arg) 2662{ 2663 struct cam_et *target, *next_target; 2664 int retval; 2665 2666 retval = 1; 2667 for (target = (start_target ? start_target : 2668 TAILQ_FIRST(&bus->et_entries)); 2669 target != NULL; target = next_target) { 2670 2671 next_target = TAILQ_NEXT(target, links); 2672 2673 retval = tr_func(target, arg); 2674 2675 if (retval == 0) 2676 return(retval); 2677 } 2678 2679 return(retval); 2680} 2681 2682static int 2683xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device, 2684 xpt_devicefunc_t *tr_func, void *arg) 2685{ 2686 struct cam_ed *device, *next_device; 2687 int retval; 2688 2689 retval = 1; 2690 for (device = (start_device ? start_device : 2691 TAILQ_FIRST(&target->ed_entries)); 2692 device != NULL; 2693 device = next_device) { 2694 2695 next_device = TAILQ_NEXT(device, links); 2696 2697 retval = tr_func(device, arg); 2698 2699 if (retval == 0) 2700 return(retval); 2701 } 2702 2703 return(retval); 2704} 2705 2706static int 2707xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph, 2708 xpt_periphfunc_t *tr_func, void *arg) 2709{ 2710 struct cam_periph *periph, *next_periph; 2711 int retval; 2712 2713 retval = 1; 2714 2715 for (periph = (start_periph ? start_periph : 2716 SLIST_FIRST(&device->periphs)); 2717 periph != NULL; 2718 periph = next_periph) { 2719 2720 next_periph = SLIST_NEXT(periph, periph_links); 2721 2722 retval = tr_func(periph, arg); 2723 if (retval == 0) 2724 return(retval); 2725 } 2726 2727 return(retval); 2728} 2729 2730static int 2731xptpdrvtraverse(struct periph_driver **start_pdrv, 2732 xpt_pdrvfunc_t *tr_func, void *arg) 2733{ 2734 struct periph_driver **pdrv; 2735 int retval; 2736 2737 retval = 1; 2738 2739 /* 2740 * We don't traverse the peripheral driver list like we do the 2741 * other lists, because it is a linker set, and therefore cannot be 2742 * changed during runtime. If the peripheral driver list is ever 2743 * re-done to be something other than a linker set (i.e. it can 2744 * change while the system is running), the list traversal should 2745 * be modified to work like the other traversal functions. 2746 */ 2747 for (pdrv = (start_pdrv ? start_pdrv : periph_drivers); 2748 *pdrv != NULL; pdrv++) { 2749 retval = tr_func(pdrv, arg); 2750 2751 if (retval == 0) 2752 return(retval); 2753 } 2754 2755 return(retval); 2756} 2757 2758static int 2759xptpdperiphtraverse(struct periph_driver **pdrv, 2760 struct cam_periph *start_periph, 2761 xpt_periphfunc_t *tr_func, void *arg) 2762{ 2763 struct cam_periph *periph, *next_periph; 2764 int retval; 2765 2766 retval = 1; 2767 2768 for (periph = (start_periph ? start_periph : 2769 TAILQ_FIRST(&(*pdrv)->units)); periph != NULL; 2770 periph = next_periph) { 2771 2772 next_periph = TAILQ_NEXT(periph, unit_links); 2773 2774 retval = tr_func(periph, arg); 2775 if (retval == 0) 2776 return(retval); 2777 } 2778 return(retval); 2779} 2780 2781static int 2782xptdefbusfunc(struct cam_eb *bus, void *arg) 2783{ 2784 struct xpt_traverse_config *tr_config; 2785 2786 tr_config = (struct xpt_traverse_config *)arg; 2787 2788 if (tr_config->depth == XPT_DEPTH_BUS) { 2789 xpt_busfunc_t *tr_func; 2790 2791 tr_func = (xpt_busfunc_t *)tr_config->tr_func; 2792 2793 return(tr_func(bus, tr_config->tr_arg)); 2794 } else 2795 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg)); 2796} 2797 2798static int 2799xptdeftargetfunc(struct cam_et *target, void *arg) 2800{ 2801 struct xpt_traverse_config *tr_config; 2802 2803 tr_config = (struct xpt_traverse_config *)arg; 2804 2805 if (tr_config->depth == XPT_DEPTH_TARGET) { 2806 xpt_targetfunc_t *tr_func; 2807 2808 tr_func = (xpt_targetfunc_t *)tr_config->tr_func; 2809 2810 return(tr_func(target, tr_config->tr_arg)); 2811 } else 2812 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg)); 2813} 2814 2815static int 2816xptdefdevicefunc(struct cam_ed *device, void *arg) 2817{ 2818 struct xpt_traverse_config *tr_config; 2819 2820 tr_config = (struct xpt_traverse_config *)arg; 2821 2822 if (tr_config->depth == XPT_DEPTH_DEVICE) { 2823 xpt_devicefunc_t *tr_func; 2824 2825 tr_func = (xpt_devicefunc_t *)tr_config->tr_func; 2826 2827 return(tr_func(device, tr_config->tr_arg)); 2828 } else 2829 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg)); 2830} 2831 2832static int 2833xptdefperiphfunc(struct cam_periph *periph, void *arg) 2834{ 2835 struct xpt_traverse_config *tr_config; 2836 xpt_periphfunc_t *tr_func; 2837 2838 tr_config = (struct xpt_traverse_config *)arg; 2839 2840 tr_func = (xpt_periphfunc_t *)tr_config->tr_func; 2841 2842 /* 2843 * Unlike the other default functions, we don't check for depth 2844 * here. The peripheral driver level is the last level in the EDT, 2845 * so if we're here, we should execute the function in question. 2846 */ 2847 return(tr_func(periph, tr_config->tr_arg)); 2848} 2849 2850/* 2851 * Execute the given function for every bus in the EDT. 2852 */ 2853static int 2854xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg) 2855{ 2856 struct xpt_traverse_config tr_config; 2857 2858 tr_config.depth = XPT_DEPTH_BUS; 2859 tr_config.tr_func = tr_func; 2860 tr_config.tr_arg = arg; 2861 2862 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2863} 2864 2865#ifdef notusedyet 2866/* 2867 * Execute the given function for every target in the EDT. 2868 */ 2869static int 2870xpt_for_all_targets(xpt_targetfunc_t *tr_func, void *arg) 2871{ 2872 struct xpt_traverse_config tr_config; 2873 2874 tr_config.depth = XPT_DEPTH_TARGET; 2875 tr_config.tr_func = tr_func; 2876 tr_config.tr_arg = arg; 2877 2878 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2879} 2880#endif /* notusedyet */ 2881 2882/* 2883 * Execute the given function for every device in the EDT. 2884 */ 2885static int 2886xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg) 2887{ 2888 struct xpt_traverse_config tr_config; 2889 2890 tr_config.depth = XPT_DEPTH_DEVICE; 2891 tr_config.tr_func = tr_func; 2892 tr_config.tr_arg = arg; 2893 2894 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2895} 2896 2897#ifdef notusedyet 2898/* 2899 * Execute the given function for every peripheral in the EDT. 2900 */ 2901static int 2902xpt_for_all_periphs(xpt_periphfunc_t *tr_func, void *arg) 2903{ 2904 struct xpt_traverse_config tr_config; 2905 2906 tr_config.depth = XPT_DEPTH_PERIPH; 2907 tr_config.tr_func = tr_func; 2908 tr_config.tr_arg = arg; 2909 2910 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2911} 2912#endif /* notusedyet */ 2913 2914static int 2915xptsetasyncfunc(struct cam_ed *device, void *arg) 2916{ 2917 struct cam_path path; 2918 struct ccb_getdev cgd; 2919 struct async_node *cur_entry; 2920 2921 cur_entry = (struct async_node *)arg; 2922 2923 /* 2924 * Don't report unconfigured devices (Wildcard devs, 2925 * devices only for target mode, device instances 2926 * that have been invalidated but are waiting for 2927 * their last reference count to be released). 2928 */ 2929 if ((device->flags & CAM_DEV_UNCONFIGURED) != 0) 2930 return (1); 2931 2932 xpt_compile_path(&path, 2933 NULL, 2934 device->target->bus->path_id, 2935 device->target->target_id, 2936 device->lun_id); 2937 xpt_setup_ccb(&cgd.ccb_h, &path, /*priority*/1); 2938 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 2939 xpt_action((union ccb *)&cgd); 2940 cur_entry->callback(cur_entry->callback_arg, 2941 AC_FOUND_DEVICE, 2942 &path, &cgd); 2943 xpt_release_path(&path); 2944 2945 return(1); 2946} 2947 2948static int 2949xptsetasyncbusfunc(struct cam_eb *bus, void *arg) 2950{ 2951 struct cam_path path; 2952 struct ccb_pathinq cpi; 2953 struct async_node *cur_entry; 2954 2955 cur_entry = (struct async_node *)arg; 2956 2957 xpt_compile_path(&path, /*periph*/NULL, 2958 bus->sim->path_id, 2959 CAM_TARGET_WILDCARD, 2960 CAM_LUN_WILDCARD); 2961 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1); 2962 cpi.ccb_h.func_code = XPT_PATH_INQ; 2963 xpt_action((union ccb *)&cpi); 2964 cur_entry->callback(cur_entry->callback_arg, 2965 AC_PATH_REGISTERED, 2966 &path, &cpi); 2967 xpt_release_path(&path); 2968 2969 return(1); 2970} 2971 2972void 2973xpt_action(union ccb *start_ccb) 2974{ 2975 int iopl; 2976 2977 GIANT_REQUIRED; 2978 2979 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n")); 2980 2981 start_ccb->ccb_h.status = CAM_REQ_INPROG; 2982 2983 iopl = splsoftcam(); 2984 switch (start_ccb->ccb_h.func_code) { 2985 case XPT_SCSI_IO: 2986 { 2987#ifdef CAM_NEW_TRAN_CODE 2988 struct cam_ed *device; 2989#endif /* CAM_NEW_TRAN_CODE */ 2990#ifdef CAMDEBUG 2991 char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1]; 2992 struct cam_path *path; 2993 2994 path = start_ccb->ccb_h.path; 2995#endif 2996 2997 /* 2998 * For the sake of compatibility with SCSI-1 2999 * devices that may not understand the identify 3000 * message, we include lun information in the 3001 * second byte of all commands. SCSI-1 specifies 3002 * that luns are a 3 bit value and reserves only 3 3003 * bits for lun information in the CDB. Later 3004 * revisions of the SCSI spec allow for more than 8 3005 * luns, but have deprecated lun information in the 3006 * CDB. So, if the lun won't fit, we must omit. 3007 * 3008 * Also be aware that during initial probing for devices, 3009 * the inquiry information is unknown but initialized to 0. 3010 * This means that this code will be exercised while probing 3011 * devices with an ANSI revision greater than 2. 3012 */ 3013#ifdef CAM_NEW_TRAN_CODE 3014 device = start_ccb->ccb_h.path->device; 3015 if (device->protocol_version <= SCSI_REV_2 3016#else /* CAM_NEW_TRAN_CODE */ 3017 if (SID_ANSI_REV(&start_ccb->ccb_h.path->device->inq_data) <= 2 3018#endif /* CAM_NEW_TRAN_CODE */ 3019 && start_ccb->ccb_h.target_lun < 8 3020 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) { 3021 3022 start_ccb->csio.cdb_io.cdb_bytes[1] |= 3023 start_ccb->ccb_h.target_lun << 5; 3024 } 3025 start_ccb->csio.scsi_status = SCSI_STATUS_OK; 3026 CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n", 3027 scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0], 3028 &path->device->inq_data), 3029 scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes, 3030 cdb_str, sizeof(cdb_str)))); 3031 } 3032 /* FALLTHROUGH */ 3033 case XPT_TARGET_IO: 3034 case XPT_CONT_TARGET_IO: 3035 start_ccb->csio.sense_resid = 0; 3036 start_ccb->csio.resid = 0; 3037 /* FALLTHROUGH */ 3038 case XPT_RESET_DEV: 3039 case XPT_ENG_EXEC: 3040 { 3041 struct cam_path *path; 3042 int s; 3043 int runq; 3044 3045 path = start_ccb->ccb_h.path; 3046 s = splsoftcam(); 3047 3048 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb); 3049 if (path->device->qfrozen_cnt == 0) 3050 runq = xpt_schedule_dev_sendq(path->bus, path->device); 3051 else 3052 runq = 0; 3053 splx(s); 3054 if (runq != 0) 3055 xpt_run_dev_sendq(path->bus); 3056 break; 3057 } 3058 case XPT_SET_TRAN_SETTINGS: 3059 { 3060 xpt_set_transfer_settings(&start_ccb->cts, 3061 start_ccb->ccb_h.path->device, 3062 /*async_update*/FALSE); 3063 break; 3064 } 3065 case XPT_CALC_GEOMETRY: 3066 { 3067 struct cam_sim *sim; 3068 3069 /* Filter out garbage */ 3070 if (start_ccb->ccg.block_size == 0 3071 || start_ccb->ccg.volume_size == 0) { 3072 start_ccb->ccg.cylinders = 0; 3073 start_ccb->ccg.heads = 0; 3074 start_ccb->ccg.secs_per_track = 0; 3075 start_ccb->ccb_h.status = CAM_REQ_CMP; 3076 break; 3077 } 3078#ifdef PC98 3079 /* 3080 * In a PC-98 system, geometry translation depens on 3081 * the "real" device geometry obtained from mode page 4. 3082 * SCSI geometry translation is performed in the 3083 * initialization routine of the SCSI BIOS and the result 3084 * stored in host memory. If the translation is available 3085 * in host memory, use it. If not, rely on the default 3086 * translation the device driver performs. 3087 */ 3088 if (scsi_da_bios_params(&start_ccb->ccg) != 0) { 3089 start_ccb->ccb_h.status = CAM_REQ_CMP; 3090 break; 3091 } 3092#endif 3093 sim = start_ccb->ccb_h.path->bus->sim; 3094 (*(sim->sim_action))(sim, start_ccb); 3095 break; 3096 } 3097 case XPT_ABORT: 3098 { 3099 union ccb* abort_ccb; 3100 int s; 3101 3102 abort_ccb = start_ccb->cab.abort_ccb; 3103 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) { 3104 3105 if (abort_ccb->ccb_h.pinfo.index >= 0) { 3106 struct cam_ccbq *ccbq; 3107 3108 ccbq = &abort_ccb->ccb_h.path->device->ccbq; 3109 cam_ccbq_remove_ccb(ccbq, abort_ccb); 3110 abort_ccb->ccb_h.status = 3111 CAM_REQ_ABORTED|CAM_DEV_QFRZN; 3112 xpt_freeze_devq(abort_ccb->ccb_h.path, 1); 3113 s = splcam(); 3114 xpt_done(abort_ccb); 3115 splx(s); 3116 start_ccb->ccb_h.status = CAM_REQ_CMP; 3117 break; 3118 } 3119 if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX 3120 && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) { 3121 /* 3122 * We've caught this ccb en route to 3123 * the SIM. Flag it for abort and the 3124 * SIM will do so just before starting 3125 * real work on the CCB. 3126 */ 3127 abort_ccb->ccb_h.status = 3128 CAM_REQ_ABORTED|CAM_DEV_QFRZN; 3129 xpt_freeze_devq(abort_ccb->ccb_h.path, 1); 3130 start_ccb->ccb_h.status = CAM_REQ_CMP; 3131 break; 3132 } 3133 } 3134 if (XPT_FC_IS_QUEUED(abort_ccb) 3135 && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) { 3136 /* 3137 * It's already completed but waiting 3138 * for our SWI to get to it. 3139 */ 3140 start_ccb->ccb_h.status = CAM_UA_ABORT; 3141 break; 3142 } 3143 /* 3144 * If we weren't able to take care of the abort request 3145 * in the XPT, pass the request down to the SIM for processing. 3146 */ 3147 } 3148 /* FALLTHROUGH */ 3149 case XPT_ACCEPT_TARGET_IO: 3150 case XPT_EN_LUN: 3151 case XPT_IMMED_NOTIFY: 3152 case XPT_NOTIFY_ACK: 3153 case XPT_GET_TRAN_SETTINGS: 3154 case XPT_RESET_BUS: 3155 { 3156 struct cam_sim *sim; 3157 3158 sim = start_ccb->ccb_h.path->bus->sim; 3159 (*(sim->sim_action))(sim, start_ccb); 3160 break; 3161 } 3162 case XPT_PATH_INQ: 3163 { 3164 struct cam_sim *sim; 3165 3166 sim = start_ccb->ccb_h.path->bus->sim; 3167 (*(sim->sim_action))(sim, start_ccb); 3168 break; 3169 } 3170 case XPT_PATH_STATS: 3171 start_ccb->cpis.last_reset = 3172 start_ccb->ccb_h.path->bus->last_reset; 3173 start_ccb->ccb_h.status = CAM_REQ_CMP; 3174 break; 3175 case XPT_GDEV_TYPE: 3176 { 3177 struct cam_ed *dev; 3178 int s; 3179 3180 dev = start_ccb->ccb_h.path->device; 3181 s = splcam(); 3182 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) { 3183 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE; 3184 } else { 3185 struct ccb_getdev *cgd; 3186 struct cam_eb *bus; 3187 struct cam_et *tar; 3188 3189 cgd = &start_ccb->cgd; 3190 bus = cgd->ccb_h.path->bus; 3191 tar = cgd->ccb_h.path->target; 3192 cgd->inq_data = dev->inq_data; 3193 cgd->ccb_h.status = CAM_REQ_CMP; 3194 cgd->serial_num_len = dev->serial_num_len; 3195 if ((dev->serial_num_len > 0) 3196 && (dev->serial_num != NULL)) 3197 bcopy(dev->serial_num, cgd->serial_num, 3198 dev->serial_num_len); 3199 } 3200 splx(s); 3201 break; 3202 } 3203 case XPT_GDEV_STATS: 3204 { 3205 struct cam_ed *dev; 3206 int s; 3207 3208 dev = start_ccb->ccb_h.path->device; 3209 s = splcam(); 3210 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) { 3211 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE; 3212 } else { 3213 struct ccb_getdevstats *cgds; 3214 struct cam_eb *bus; 3215 struct cam_et *tar; 3216 3217 cgds = &start_ccb->cgds; 3218 bus = cgds->ccb_h.path->bus; 3219 tar = cgds->ccb_h.path->target; 3220 cgds->dev_openings = dev->ccbq.dev_openings; 3221 cgds->dev_active = dev->ccbq.dev_active; 3222 cgds->devq_openings = dev->ccbq.devq_openings; 3223 cgds->devq_queued = dev->ccbq.queue.entries; 3224 cgds->held = dev->ccbq.held; 3225 cgds->last_reset = tar->last_reset; 3226 cgds->maxtags = dev->quirk->maxtags; 3227 cgds->mintags = dev->quirk->mintags; 3228 if (timevalcmp(&tar->last_reset, &bus->last_reset, <)) 3229 cgds->last_reset = bus->last_reset; 3230 cgds->ccb_h.status = CAM_REQ_CMP; 3231 } 3232 splx(s); 3233 break; 3234 } 3235 case XPT_GDEVLIST: 3236 { 3237 struct cam_periph *nperiph; 3238 struct periph_list *periph_head; 3239 struct ccb_getdevlist *cgdl; 3240 u_int i; 3241 int s; 3242 struct cam_ed *device; 3243 int found; 3244 3245 3246 found = 0; 3247 3248 /* 3249 * Don't want anyone mucking with our data. 3250 */ 3251 s = splcam(); 3252 device = start_ccb->ccb_h.path->device; 3253 periph_head = &device->periphs; 3254 cgdl = &start_ccb->cgdl; 3255 3256 /* 3257 * Check and see if the list has changed since the user 3258 * last requested a list member. If so, tell them that the 3259 * list has changed, and therefore they need to start over 3260 * from the beginning. 3261 */ 3262 if ((cgdl->index != 0) && 3263 (cgdl->generation != device->generation)) { 3264 cgdl->status = CAM_GDEVLIST_LIST_CHANGED; 3265 splx(s); 3266 break; 3267 } 3268 3269 /* 3270 * Traverse the list of peripherals and attempt to find 3271 * the requested peripheral. 3272 */ 3273 for (nperiph = SLIST_FIRST(periph_head), i = 0; 3274 (nperiph != NULL) && (i <= cgdl->index); 3275 nperiph = SLIST_NEXT(nperiph, periph_links), i++) { 3276 if (i == cgdl->index) { 3277 strncpy(cgdl->periph_name, 3278 nperiph->periph_name, 3279 DEV_IDLEN); 3280 cgdl->unit_number = nperiph->unit_number; 3281 found = 1; 3282 } 3283 } 3284 if (found == 0) { 3285 cgdl->status = CAM_GDEVLIST_ERROR; 3286 splx(s); 3287 break; 3288 } 3289 3290 if (nperiph == NULL) 3291 cgdl->status = CAM_GDEVLIST_LAST_DEVICE; 3292 else 3293 cgdl->status = CAM_GDEVLIST_MORE_DEVS; 3294 3295 cgdl->index++; 3296 cgdl->generation = device->generation; 3297 3298 splx(s); 3299 cgdl->ccb_h.status = CAM_REQ_CMP; 3300 break; 3301 } 3302 case XPT_DEV_MATCH: 3303 { 3304 int s; 3305 dev_pos_type position_type; 3306 struct ccb_dev_match *cdm; 3307 3308 cdm = &start_ccb->cdm; 3309 3310 /* 3311 * Prevent EDT changes while we traverse it. 3312 */ 3313 s = splcam(); 3314 /* 3315 * There are two ways of getting at information in the EDT. 3316 * The first way is via the primary EDT tree. It starts 3317 * with a list of busses, then a list of targets on a bus, 3318 * then devices/luns on a target, and then peripherals on a 3319 * device/lun. The "other" way is by the peripheral driver 3320 * lists. The peripheral driver lists are organized by 3321 * peripheral driver. (obviously) So it makes sense to 3322 * use the peripheral driver list if the user is looking 3323 * for something like "da1", or all "da" devices. If the 3324 * user is looking for something on a particular bus/target 3325 * or lun, it's generally better to go through the EDT tree. 3326 */ 3327 3328 if (cdm->pos.position_type != CAM_DEV_POS_NONE) 3329 position_type = cdm->pos.position_type; 3330 else { 3331 u_int i; 3332 3333 position_type = CAM_DEV_POS_NONE; 3334 3335 for (i = 0; i < cdm->num_patterns; i++) { 3336 if ((cdm->patterns[i].type == DEV_MATCH_BUS) 3337 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){ 3338 position_type = CAM_DEV_POS_EDT; 3339 break; 3340 } 3341 } 3342 3343 if (cdm->num_patterns == 0) 3344 position_type = CAM_DEV_POS_EDT; 3345 else if (position_type == CAM_DEV_POS_NONE) 3346 position_type = CAM_DEV_POS_PDRV; 3347 } 3348 3349 switch(position_type & CAM_DEV_POS_TYPEMASK) { 3350 case CAM_DEV_POS_EDT: 3351 xptedtmatch(cdm); 3352 break; 3353 case CAM_DEV_POS_PDRV: 3354 xptperiphlistmatch(cdm); 3355 break; 3356 default: 3357 cdm->status = CAM_DEV_MATCH_ERROR; 3358 break; 3359 } 3360 3361 splx(s); 3362 3363 if (cdm->status == CAM_DEV_MATCH_ERROR) 3364 start_ccb->ccb_h.status = CAM_REQ_CMP_ERR; 3365 else 3366 start_ccb->ccb_h.status = CAM_REQ_CMP; 3367 3368 break; 3369 } 3370 case XPT_SASYNC_CB: 3371 { 3372 struct ccb_setasync *csa; 3373 struct async_node *cur_entry; 3374 struct async_list *async_head; 3375 u_int32_t added; 3376 int s; 3377 3378 csa = &start_ccb->csa; 3379 added = csa->event_enable; 3380 async_head = &csa->ccb_h.path->device->asyncs; 3381 3382 /* 3383 * If there is already an entry for us, simply 3384 * update it. 3385 */ 3386 s = splcam(); 3387 cur_entry = SLIST_FIRST(async_head); 3388 while (cur_entry != NULL) { 3389 if ((cur_entry->callback_arg == csa->callback_arg) 3390 && (cur_entry->callback == csa->callback)) 3391 break; 3392 cur_entry = SLIST_NEXT(cur_entry, links); 3393 } 3394 3395 if (cur_entry != NULL) { 3396 /* 3397 * If the request has no flags set, 3398 * remove the entry. 3399 */ 3400 added &= ~cur_entry->event_enable; 3401 if (csa->event_enable == 0) { 3402 SLIST_REMOVE(async_head, cur_entry, 3403 async_node, links); 3404 csa->ccb_h.path->device->refcount--;
|
5113 xpt_release_target(bus, target); 5114 } else 5115 splx(s); 5116} 5117 5118static u_int32_t 5119xpt_dev_ccbq_resize(struct cam_path *path, int newopenings) 5120{ 5121 int s; 5122 int diff; 5123 int result; 5124 struct cam_ed *dev; 5125 5126 dev = path->device; 5127 s = splsoftcam(); 5128 5129 diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings); 5130 result = cam_ccbq_resize(&dev->ccbq, newopenings); 5131 if (result == CAM_REQ_CMP && (diff < 0)) { 5132 dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED; 5133 } 5134 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 5135 || (dev->inq_flags & SID_CmdQue) != 0) 5136 dev->tag_saved_openings = newopenings; 5137 /* Adjust the global limit */ 5138 xpt_max_ccbs += diff; 5139 splx(s); 5140 return (result); 5141} 5142 5143static struct cam_eb * 5144xpt_find_bus(path_id_t path_id) 5145{ 5146 struct cam_eb *bus; 5147 5148 for (bus = TAILQ_FIRST(&xpt_busses); 5149 bus != NULL; 5150 bus = TAILQ_NEXT(bus, links)) { 5151 if (bus->path_id == path_id) { 5152 bus->refcount++; 5153 break; 5154 } 5155 } 5156 return (bus); 5157} 5158 5159static struct cam_et * 5160xpt_find_target(struct cam_eb *bus, target_id_t target_id) 5161{ 5162 struct cam_et *target; 5163 5164 for (target = TAILQ_FIRST(&bus->et_entries); 5165 target != NULL; 5166 target = TAILQ_NEXT(target, links)) { 5167 if (target->target_id == target_id) { 5168 target->refcount++; 5169 break; 5170 } 5171 } 5172 return (target); 5173} 5174 5175static struct cam_ed * 5176xpt_find_device(struct cam_et *target, lun_id_t lun_id) 5177{ 5178 struct cam_ed *device; 5179 5180 for (device = TAILQ_FIRST(&target->ed_entries); 5181 device != NULL; 5182 device = TAILQ_NEXT(device, links)) { 5183 if (device->lun_id == lun_id) { 5184 device->refcount++; 5185 break; 5186 } 5187 } 5188 return (device); 5189} 5190 5191typedef struct { 5192 union ccb *request_ccb; 5193 struct ccb_pathinq *cpi; 5194 int pending_count; 5195} xpt_scan_bus_info; 5196 5197/* 5198 * To start a scan, request_ccb is an XPT_SCAN_BUS ccb. 5199 * As the scan progresses, xpt_scan_bus is used as the 5200 * callback on completion function. 5201 */ 5202static void 5203xpt_scan_bus(struct cam_periph *periph, union ccb *request_ccb) 5204{ 5205 CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE, 5206 ("xpt_scan_bus\n")); 5207 switch (request_ccb->ccb_h.func_code) { 5208 case XPT_SCAN_BUS: 5209 { 5210 xpt_scan_bus_info *scan_info; 5211 union ccb *work_ccb; 5212 struct cam_path *path; 5213 u_int i; 5214 u_int max_target; 5215 u_int initiator_id; 5216 5217 /* Find out the characteristics of the bus */ 5218 work_ccb = xpt_alloc_ccb(); 5219 xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path, 5220 request_ccb->ccb_h.pinfo.priority); 5221 work_ccb->ccb_h.func_code = XPT_PATH_INQ; 5222 xpt_action(work_ccb); 5223 if (work_ccb->ccb_h.status != CAM_REQ_CMP) { 5224 request_ccb->ccb_h.status = work_ccb->ccb_h.status; 5225 xpt_free_ccb(work_ccb); 5226 xpt_done(request_ccb); 5227 return; 5228 } 5229 5230 if ((work_ccb->cpi.hba_misc & PIM_NOINITIATOR) != 0) { 5231 /* 5232 * Can't scan the bus on an adapter that 5233 * cannot perform the initiator role. 5234 */ 5235 request_ccb->ccb_h.status = CAM_REQ_CMP; 5236 xpt_free_ccb(work_ccb); 5237 xpt_done(request_ccb); 5238 return; 5239 } 5240 5241 /* Save some state for use while we probe for devices */ 5242 scan_info = (xpt_scan_bus_info *) 5243 malloc(sizeof(xpt_scan_bus_info), M_TEMP, M_WAITOK); 5244 scan_info->request_ccb = request_ccb; 5245 scan_info->cpi = &work_ccb->cpi; 5246 5247 /* Cache on our stack so we can work asynchronously */ 5248 max_target = scan_info->cpi->max_target; 5249 initiator_id = scan_info->cpi->initiator_id; 5250 5251 /* 5252 * Don't count the initiator if the 5253 * initiator is addressable. 5254 */ 5255 scan_info->pending_count = max_target + 1; 5256 if (initiator_id <= max_target) 5257 scan_info->pending_count--; 5258 5259 for (i = 0; i <= max_target; i++) { 5260 cam_status status; 5261 if (i == initiator_id) 5262 continue; 5263 5264 status = xpt_create_path(&path, xpt_periph, 5265 request_ccb->ccb_h.path_id, 5266 i, 0); 5267 if (status != CAM_REQ_CMP) { 5268 printf("xpt_scan_bus: xpt_create_path failed" 5269 " with status %#x, bus scan halted\n", 5270 status); 5271 break; 5272 } 5273 work_ccb = xpt_alloc_ccb(); 5274 xpt_setup_ccb(&work_ccb->ccb_h, path, 5275 request_ccb->ccb_h.pinfo.priority); 5276 work_ccb->ccb_h.func_code = XPT_SCAN_LUN; 5277 work_ccb->ccb_h.cbfcnp = xpt_scan_bus; 5278 work_ccb->ccb_h.ppriv_ptr0 = scan_info; 5279 work_ccb->crcn.flags = request_ccb->crcn.flags; 5280 xpt_action(work_ccb); 5281 } 5282 break; 5283 } 5284 case XPT_SCAN_LUN: 5285 { 5286 xpt_scan_bus_info *scan_info; 5287 path_id_t path_id; 5288 target_id_t target_id; 5289 lun_id_t lun_id; 5290 5291 /* Reuse the same CCB to query if a device was really found */ 5292 scan_info = (xpt_scan_bus_info *)request_ccb->ccb_h.ppriv_ptr0; 5293 xpt_setup_ccb(&request_ccb->ccb_h, request_ccb->ccb_h.path, 5294 request_ccb->ccb_h.pinfo.priority); 5295 request_ccb->ccb_h.func_code = XPT_GDEV_TYPE; 5296 5297 path_id = request_ccb->ccb_h.path_id; 5298 target_id = request_ccb->ccb_h.target_id; 5299 lun_id = request_ccb->ccb_h.target_lun; 5300 xpt_action(request_ccb); 5301 5302 if (request_ccb->ccb_h.status != CAM_REQ_CMP) { 5303 struct cam_ed *device; 5304 struct cam_et *target; 5305 int s, phl; 5306 5307 /* 5308 * If we already probed lun 0 successfully, or 5309 * we have additional configured luns on this 5310 * target that might have "gone away", go onto 5311 * the next lun. 5312 */ 5313 target = request_ccb->ccb_h.path->target; 5314 /* 5315 * We may touch devices that we don't 5316 * hold references too, so ensure they 5317 * don't disappear out from under us. 5318 * The target above is referenced by the 5319 * path in the request ccb. 5320 */ 5321 phl = 0; 5322 s = splcam(); 5323 device = TAILQ_FIRST(&target->ed_entries); 5324 if (device != NULL) { 5325 phl = CAN_SRCH_HI(device); 5326 if (device->lun_id == 0) 5327 device = TAILQ_NEXT(device, links); 5328 } 5329 splx(s); 5330 if ((lun_id != 0) || (device != NULL)) { 5331 if (lun_id < (CAM_SCSI2_MAXLUN-1) || phl) 5332 lun_id++; 5333 } 5334 } else { 5335 struct cam_ed *device; 5336 5337 device = request_ccb->ccb_h.path->device; 5338 5339 if ((device->quirk->quirks & CAM_QUIRK_NOLUNS) == 0) { 5340 /* Try the next lun */ 5341 if (lun_id < (CAM_SCSI2_MAXLUN-1) 5342 || CAN_SRCH_HI(device)) 5343 lun_id++; 5344 } 5345 } 5346 5347 xpt_free_path(request_ccb->ccb_h.path); 5348 5349 /* Check Bounds */ 5350 if ((lun_id == request_ccb->ccb_h.target_lun) 5351 || lun_id > scan_info->cpi->max_lun) { 5352 /* We're done */ 5353 5354 xpt_free_ccb(request_ccb); 5355 scan_info->pending_count--; 5356 if (scan_info->pending_count == 0) { 5357 xpt_free_ccb((union ccb *)scan_info->cpi); 5358 request_ccb = scan_info->request_ccb; 5359 free(scan_info, M_TEMP); 5360 request_ccb->ccb_h.status = CAM_REQ_CMP; 5361 xpt_done(request_ccb); 5362 } 5363 } else { 5364 /* Try the next device */ 5365 struct cam_path *path; 5366 cam_status status; 5367 5368 status = xpt_create_path(&path, xpt_periph, 5369 path_id, target_id, lun_id); 5370 if (status != CAM_REQ_CMP) { 5371 printf("xpt_scan_bus: xpt_create_path failed " 5372 "with status %#x, halting LUN scan\n", 5373 status); 5374 xpt_free_ccb(request_ccb); 5375 scan_info->pending_count--; 5376 if (scan_info->pending_count == 0) { 5377 xpt_free_ccb( 5378 (union ccb *)scan_info->cpi); 5379 request_ccb = scan_info->request_ccb; 5380 free(scan_info, M_TEMP); 5381 request_ccb->ccb_h.status = CAM_REQ_CMP; 5382 xpt_done(request_ccb); 5383 } 5384 break; 5385 } 5386 xpt_setup_ccb(&request_ccb->ccb_h, path, 5387 request_ccb->ccb_h.pinfo.priority); 5388 request_ccb->ccb_h.func_code = XPT_SCAN_LUN; 5389 request_ccb->ccb_h.cbfcnp = xpt_scan_bus; 5390 request_ccb->ccb_h.ppriv_ptr0 = scan_info; 5391 request_ccb->crcn.flags = 5392 scan_info->request_ccb->crcn.flags; 5393 xpt_action(request_ccb); 5394 } 5395 break; 5396 } 5397 default: 5398 break; 5399 } 5400} 5401 5402typedef enum { 5403 PROBE_TUR, 5404 PROBE_INQUIRY, 5405 PROBE_FULL_INQUIRY, 5406 PROBE_MODE_SENSE, 5407 PROBE_SERIAL_NUM, 5408 PROBE_TUR_FOR_NEGOTIATION 5409} probe_action; 5410 5411typedef enum { 5412 PROBE_INQUIRY_CKSUM = 0x01, 5413 PROBE_SERIAL_CKSUM = 0x02, 5414 PROBE_NO_ANNOUNCE = 0x04 5415} probe_flags; 5416 5417typedef struct { 5418 TAILQ_HEAD(, ccb_hdr) request_ccbs; 5419 probe_action action; 5420 union ccb saved_ccb; 5421 probe_flags flags; 5422 MD5_CTX context; 5423 u_int8_t digest[16]; 5424} probe_softc; 5425 5426static void 5427xpt_scan_lun(struct cam_periph *periph, struct cam_path *path, 5428 cam_flags flags, union ccb *request_ccb) 5429{ 5430 struct ccb_pathinq cpi; 5431 cam_status status; 5432 struct cam_path *new_path; 5433 struct cam_periph *old_periph; 5434 int s; 5435 5436 CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE, 5437 ("xpt_scan_lun\n")); 5438 5439 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1); 5440 cpi.ccb_h.func_code = XPT_PATH_INQ; 5441 xpt_action((union ccb *)&cpi); 5442 5443 if (cpi.ccb_h.status != CAM_REQ_CMP) { 5444 if (request_ccb != NULL) { 5445 request_ccb->ccb_h.status = cpi.ccb_h.status; 5446 xpt_done(request_ccb); 5447 } 5448 return; 5449 } 5450 5451 if ((cpi.hba_misc & PIM_NOINITIATOR) != 0) { 5452 /* 5453 * Can't scan the bus on an adapter that 5454 * cannot perform the initiator role. 5455 */ 5456 if (request_ccb != NULL) { 5457 request_ccb->ccb_h.status = CAM_REQ_CMP; 5458 xpt_done(request_ccb); 5459 } 5460 return; 5461 } 5462 5463 if (request_ccb == NULL) { 5464 request_ccb = malloc(sizeof(union ccb), M_TEMP, M_NOWAIT); 5465 if (request_ccb == NULL) { 5466 xpt_print_path(path); 5467 printf("xpt_scan_lun: can't allocate CCB, can't " 5468 "continue\n"); 5469 return; 5470 } 5471 new_path = malloc(sizeof(*new_path), M_TEMP, M_NOWAIT); 5472 if (new_path == NULL) { 5473 xpt_print_path(path); 5474 printf("xpt_scan_lun: can't allocate path, can't " 5475 "continue\n"); 5476 free(request_ccb, M_TEMP); 5477 return; 5478 } 5479 status = xpt_compile_path(new_path, xpt_periph, 5480 path->bus->path_id, 5481 path->target->target_id, 5482 path->device->lun_id); 5483 5484 if (status != CAM_REQ_CMP) { 5485 xpt_print_path(path); 5486 printf("xpt_scan_lun: can't compile path, can't " 5487 "continue\n"); 5488 free(request_ccb, M_TEMP); 5489 free(new_path, M_TEMP); 5490 return; 5491 } 5492 xpt_setup_ccb(&request_ccb->ccb_h, new_path, /*priority*/ 1); 5493 request_ccb->ccb_h.cbfcnp = xptscandone; 5494 request_ccb->ccb_h.func_code = XPT_SCAN_LUN; 5495 request_ccb->crcn.flags = flags; 5496 } 5497 5498 s = splsoftcam(); 5499 if ((old_periph = cam_periph_find(path, "probe")) != NULL) { 5500 probe_softc *softc; 5501 5502 softc = (probe_softc *)old_periph->softc; 5503 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h, 5504 periph_links.tqe); 5505 } else { 5506 status = cam_periph_alloc(proberegister, NULL, probecleanup, 5507 probestart, "probe", 5508 CAM_PERIPH_BIO, 5509 request_ccb->ccb_h.path, NULL, 0, 5510 request_ccb); 5511 5512 if (status != CAM_REQ_CMP) { 5513 xpt_print_path(path); 5514 printf("xpt_scan_lun: cam_alloc_periph returned an " 5515 "error, can't continue probe\n"); 5516 request_ccb->ccb_h.status = status; 5517 xpt_done(request_ccb); 5518 } 5519 } 5520 splx(s); 5521} 5522 5523static void 5524xptscandone(struct cam_periph *periph, union ccb *done_ccb) 5525{ 5526 xpt_release_path(done_ccb->ccb_h.path); 5527 free(done_ccb->ccb_h.path, M_TEMP); 5528 free(done_ccb, M_TEMP); 5529} 5530 5531static cam_status 5532proberegister(struct cam_periph *periph, void *arg) 5533{ 5534 union ccb *request_ccb; /* CCB representing the probe request */ 5535 probe_softc *softc; 5536 5537 request_ccb = (union ccb *)arg; 5538 if (periph == NULL) { 5539 printf("proberegister: periph was NULL!!\n"); 5540 return(CAM_REQ_CMP_ERR); 5541 } 5542 5543 if (request_ccb == NULL) { 5544 printf("proberegister: no probe CCB, " 5545 "can't register device\n"); 5546 return(CAM_REQ_CMP_ERR); 5547 } 5548 5549 softc = (probe_softc *)malloc(sizeof(*softc), M_TEMP, M_NOWAIT); 5550 5551 if (softc == NULL) { 5552 printf("proberegister: Unable to probe new device. " 5553 "Unable to allocate softc\n"); 5554 return(CAM_REQ_CMP_ERR); 5555 } 5556 TAILQ_INIT(&softc->request_ccbs); 5557 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h, 5558 periph_links.tqe); 5559 softc->flags = 0; 5560 periph->softc = softc; 5561 cam_periph_acquire(periph); 5562 /* 5563 * Ensure we've waited at least a bus settle 5564 * delay before attempting to probe the device. 5565 * For HBAs that don't do bus resets, this won't make a difference. 5566 */ 5567 cam_periph_freeze_after_event(periph, &periph->path->bus->last_reset, 5568 scsi_delay); 5569 probeschedule(periph); 5570 return(CAM_REQ_CMP); 5571} 5572 5573static void 5574probeschedule(struct cam_periph *periph) 5575{ 5576 struct ccb_pathinq cpi; 5577 union ccb *ccb; 5578 probe_softc *softc; 5579 5580 softc = (probe_softc *)periph->softc; 5581 ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs); 5582 5583 xpt_setup_ccb(&cpi.ccb_h, periph->path, /*priority*/1); 5584 cpi.ccb_h.func_code = XPT_PATH_INQ; 5585 xpt_action((union ccb *)&cpi); 5586 5587 /* 5588 * If a device has gone away and another device, or the same one, 5589 * is back in the same place, it should have a unit attention 5590 * condition pending. It will not report the unit attention in 5591 * response to an inquiry, which may leave invalid transfer 5592 * negotiations in effect. The TUR will reveal the unit attention 5593 * condition. Only send the TUR for lun 0, since some devices 5594 * will get confused by commands other than inquiry to non-existent 5595 * luns. If you think a device has gone away start your scan from 5596 * lun 0. This will insure that any bogus transfer settings are 5597 * invalidated. 5598 * 5599 * If we haven't seen the device before and the controller supports 5600 * some kind of transfer negotiation, negotiate with the first 5601 * sent command if no bus reset was performed at startup. This 5602 * ensures that the device is not confused by transfer negotiation 5603 * settings left over by loader or BIOS action. 5604 */ 5605 if (((ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) == 0) 5606 && (ccb->ccb_h.target_lun == 0)) { 5607 softc->action = PROBE_TUR; 5608 } else if ((cpi.hba_inquiry & (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) != 0 5609 && (cpi.hba_misc & PIM_NOBUSRESET) != 0) { 5610 proberequestdefaultnegotiation(periph); 5611 softc->action = PROBE_INQUIRY; 5612 } else { 5613 softc->action = PROBE_INQUIRY; 5614 } 5615 5616 if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE) 5617 softc->flags |= PROBE_NO_ANNOUNCE; 5618 else 5619 softc->flags &= ~PROBE_NO_ANNOUNCE; 5620 5621 xpt_schedule(periph, ccb->ccb_h.pinfo.priority); 5622} 5623 5624static void 5625probestart(struct cam_periph *periph, union ccb *start_ccb) 5626{ 5627 /* Probe the device that our peripheral driver points to */ 5628 struct ccb_scsiio *csio; 5629 probe_softc *softc; 5630 5631 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n")); 5632 5633 softc = (probe_softc *)periph->softc; 5634 csio = &start_ccb->csio; 5635 5636 switch (softc->action) { 5637 case PROBE_TUR: 5638 case PROBE_TUR_FOR_NEGOTIATION: 5639 { 5640 scsi_test_unit_ready(csio, 5641 /*retries*/4, 5642 probedone, 5643 MSG_SIMPLE_Q_TAG, 5644 SSD_FULL_SIZE, 5645 /*timeout*/60000); 5646 break; 5647 } 5648 case PROBE_INQUIRY: 5649 case PROBE_FULL_INQUIRY: 5650 { 5651 u_int inquiry_len; 5652 struct scsi_inquiry_data *inq_buf; 5653 5654 inq_buf = &periph->path->device->inq_data; 5655 /* 5656 * If the device is currently configured, we calculate an 5657 * MD5 checksum of the inquiry data, and if the serial number 5658 * length is greater than 0, add the serial number data 5659 * into the checksum as well. Once the inquiry and the 5660 * serial number check finish, we attempt to figure out 5661 * whether we still have the same device. 5662 */ 5663 if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) { 5664 5665 MD5Init(&softc->context); 5666 MD5Update(&softc->context, (unsigned char *)inq_buf, 5667 sizeof(struct scsi_inquiry_data)); 5668 softc->flags |= PROBE_INQUIRY_CKSUM; 5669 if (periph->path->device->serial_num_len > 0) { 5670 MD5Update(&softc->context, 5671 periph->path->device->serial_num, 5672 periph->path->device->serial_num_len); 5673 softc->flags |= PROBE_SERIAL_CKSUM; 5674 } 5675 MD5Final(softc->digest, &softc->context); 5676 } 5677 5678 if (softc->action == PROBE_INQUIRY) 5679 inquiry_len = SHORT_INQUIRY_LENGTH; 5680 else 5681 inquiry_len = inq_buf->additional_length 5682 + offsetof(struct scsi_inquiry_data, 5683 additional_length) + 1; 5684 5685 /* 5686 * Some parallel SCSI devices fail to send an 5687 * ignore wide residue message when dealing with 5688 * odd length inquiry requests. Round up to be 5689 * safe. 5690 */ 5691 inquiry_len = roundup2(inquiry_len, 2); 5692 5693 scsi_inquiry(csio, 5694 /*retries*/4, 5695 probedone, 5696 MSG_SIMPLE_Q_TAG, 5697 (u_int8_t *)inq_buf, 5698 inquiry_len, 5699 /*evpd*/FALSE, 5700 /*page_code*/0, 5701 SSD_MIN_SIZE, 5702 /*timeout*/60 * 1000); 5703 break; 5704 } 5705 case PROBE_MODE_SENSE: 5706 { 5707 void *mode_buf; 5708 int mode_buf_len; 5709 5710 mode_buf_len = sizeof(struct scsi_mode_header_6) 5711 + sizeof(struct scsi_mode_blk_desc) 5712 + sizeof(struct scsi_control_page); 5713 mode_buf = malloc(mode_buf_len, M_TEMP, M_NOWAIT); 5714 if (mode_buf != NULL) { 5715 scsi_mode_sense(csio, 5716 /*retries*/4, 5717 probedone, 5718 MSG_SIMPLE_Q_TAG, 5719 /*dbd*/FALSE, 5720 SMS_PAGE_CTRL_CURRENT, 5721 SMS_CONTROL_MODE_PAGE, 5722 mode_buf, 5723 mode_buf_len, 5724 SSD_FULL_SIZE, 5725 /*timeout*/60000); 5726 break; 5727 } 5728 xpt_print_path(periph->path); 5729 printf("Unable to mode sense control page - malloc failure\n"); 5730 softc->action = PROBE_SERIAL_NUM; 5731 } 5732 /* FALLTHROUGH */ 5733 case PROBE_SERIAL_NUM: 5734 { 5735 struct scsi_vpd_unit_serial_number *serial_buf; 5736 struct cam_ed* device; 5737 5738 serial_buf = NULL; 5739 device = periph->path->device; 5740 device->serial_num = NULL; 5741 device->serial_num_len = 0; 5742 5743 if ((device->quirk->quirks & CAM_QUIRK_NOSERIAL) == 0) 5744 serial_buf = (struct scsi_vpd_unit_serial_number *) 5745 malloc(sizeof(*serial_buf), M_TEMP, 5746 M_NOWAIT | M_ZERO); 5747 5748 if (serial_buf != NULL) { 5749 scsi_inquiry(csio, 5750 /*retries*/4, 5751 probedone, 5752 MSG_SIMPLE_Q_TAG, 5753 (u_int8_t *)serial_buf, 5754 sizeof(*serial_buf), 5755 /*evpd*/TRUE, 5756 SVPD_UNIT_SERIAL_NUMBER, 5757 SSD_MIN_SIZE, 5758 /*timeout*/60 * 1000); 5759 break; 5760 } 5761 /* 5762 * We'll have to do without, let our probedone 5763 * routine finish up for us. 5764 */ 5765 start_ccb->csio.data_ptr = NULL; 5766 probedone(periph, start_ccb); 5767 return; 5768 } 5769 } 5770 xpt_action(start_ccb); 5771} 5772 5773static void 5774proberequestdefaultnegotiation(struct cam_periph *periph) 5775{ 5776 struct ccb_trans_settings cts; 5777 5778 xpt_setup_ccb(&cts.ccb_h, periph->path, /*priority*/1); 5779 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; 5780#ifdef CAM_NEW_TRAN_CODE 5781 cts.type = CTS_TYPE_USER_SETTINGS; 5782#else /* CAM_NEW_TRAN_CODE */ 5783 cts.flags = CCB_TRANS_USER_SETTINGS; 5784#endif /* CAM_NEW_TRAN_CODE */ 5785 xpt_action((union ccb *)&cts); 5786 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; 5787#ifdef CAM_NEW_TRAN_CODE 5788 cts.type = CTS_TYPE_CURRENT_SETTINGS; 5789#else /* CAM_NEW_TRAN_CODE */ 5790 cts.flags &= ~CCB_TRANS_USER_SETTINGS; 5791 cts.flags |= CCB_TRANS_CURRENT_SETTINGS; 5792#endif /* CAM_NEW_TRAN_CODE */ 5793 xpt_action((union ccb *)&cts); 5794} 5795 5796static void 5797probedone(struct cam_periph *periph, union ccb *done_ccb) 5798{ 5799 probe_softc *softc; 5800 struct cam_path *path; 5801 u_int32_t priority; 5802 5803 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n")); 5804 5805 softc = (probe_softc *)periph->softc; 5806 path = done_ccb->ccb_h.path; 5807 priority = done_ccb->ccb_h.pinfo.priority; 5808 5809 switch (softc->action) { 5810 case PROBE_TUR: 5811 { 5812 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 5813 5814 if (cam_periph_error(done_ccb, 0, 5815 SF_NO_PRINT, NULL) == ERESTART) 5816 return; 5817 else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 5818 /* Don't wedge the queue */ 5819 xpt_release_devq(done_ccb->ccb_h.path, 5820 /*count*/1, 5821 /*run_queue*/TRUE); 5822 } 5823 softc->action = PROBE_INQUIRY; 5824 xpt_release_ccb(done_ccb); 5825 xpt_schedule(periph, priority); 5826 return; 5827 } 5828 case PROBE_INQUIRY: 5829 case PROBE_FULL_INQUIRY: 5830 { 5831 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 5832 struct scsi_inquiry_data *inq_buf; 5833 u_int8_t periph_qual; 5834 5835 path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID; 5836 inq_buf = &path->device->inq_data; 5837 5838 periph_qual = SID_QUAL(inq_buf); 5839 5840 switch(periph_qual) { 5841 case SID_QUAL_LU_CONNECTED: 5842 { 5843 u_int8_t len; 5844 5845 /* 5846 * We conservatively request only 5847 * SHORT_INQUIRY_LEN bytes of inquiry 5848 * information during our first try 5849 * at sending an INQUIRY. If the device 5850 * has more information to give, 5851 * perform a second request specifying 5852 * the amount of information the device 5853 * is willing to give. 5854 */ 5855 len = inq_buf->additional_length 5856 + offsetof(struct scsi_inquiry_data, 5857 additional_length) + 1; 5858 if (softc->action == PROBE_INQUIRY 5859 && len > SHORT_INQUIRY_LENGTH) { 5860 softc->action = PROBE_FULL_INQUIRY; 5861 xpt_release_ccb(done_ccb); 5862 xpt_schedule(periph, priority); 5863 return; 5864 } 5865 5866 xpt_find_quirk(path->device); 5867 5868#ifdef CAM_NEW_TRAN_CODE 5869 xpt_devise_transport(path); 5870#endif /* CAM_NEW_TRAN_CODE */ 5871 if ((inq_buf->flags & SID_CmdQue) != 0) 5872 softc->action = PROBE_MODE_SENSE; 5873 else 5874 softc->action = PROBE_SERIAL_NUM; 5875 5876 path->device->flags &= ~CAM_DEV_UNCONFIGURED; 5877 5878 xpt_release_ccb(done_ccb); 5879 xpt_schedule(periph, priority); 5880 return; 5881 } 5882 default: 5883 break; 5884 } 5885 } else if (cam_periph_error(done_ccb, 0, 5886 done_ccb->ccb_h.target_lun > 0 5887 ? SF_RETRY_UA|SF_QUIET_IR 5888 : SF_RETRY_UA, 5889 &softc->saved_ccb) == ERESTART) { 5890 return; 5891 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 5892 /* Don't wedge the queue */ 5893 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, 5894 /*run_queue*/TRUE); 5895 } 5896 /* 5897 * If we get to this point, we got an error status back 5898 * from the inquiry and the error status doesn't require 5899 * automatically retrying the command. Therefore, the 5900 * inquiry failed. If we had inquiry information before 5901 * for this device, but this latest inquiry command failed, 5902 * the device has probably gone away. If this device isn't 5903 * already marked unconfigured, notify the peripheral 5904 * drivers that this device is no more. 5905 */ 5906 if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0) 5907 /* Send the async notification. */ 5908 xpt_async(AC_LOST_DEVICE, path, NULL); 5909 5910 xpt_release_ccb(done_ccb); 5911 break; 5912 } 5913 case PROBE_MODE_SENSE: 5914 { 5915 struct ccb_scsiio *csio; 5916 struct scsi_mode_header_6 *mode_hdr; 5917 5918 csio = &done_ccb->csio; 5919 mode_hdr = (struct scsi_mode_header_6 *)csio->data_ptr; 5920 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 5921 struct scsi_control_page *page; 5922 u_int8_t *offset; 5923 5924 offset = ((u_int8_t *)&mode_hdr[1]) 5925 + mode_hdr->blk_desc_len; 5926 page = (struct scsi_control_page *)offset; 5927 path->device->queue_flags = page->queue_flags; 5928 } else if (cam_periph_error(done_ccb, 0, 5929 SF_RETRY_UA|SF_NO_PRINT, 5930 &softc->saved_ccb) == ERESTART) { 5931 return; 5932 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 5933 /* Don't wedge the queue */ 5934 xpt_release_devq(done_ccb->ccb_h.path, 5935 /*count*/1, /*run_queue*/TRUE); 5936 } 5937 xpt_release_ccb(done_ccb); 5938 free(mode_hdr, M_TEMP); 5939 softc->action = PROBE_SERIAL_NUM; 5940 xpt_schedule(periph, priority); 5941 return; 5942 } 5943 case PROBE_SERIAL_NUM: 5944 { 5945 struct ccb_scsiio *csio; 5946 struct scsi_vpd_unit_serial_number *serial_buf; 5947 u_int32_t priority; 5948 int changed; 5949 int have_serialnum; 5950 5951 changed = 1; 5952 have_serialnum = 0; 5953 csio = &done_ccb->csio; 5954 priority = done_ccb->ccb_h.pinfo.priority; 5955 serial_buf = 5956 (struct scsi_vpd_unit_serial_number *)csio->data_ptr; 5957 5958 /* Clean up from previous instance of this device */ 5959 if (path->device->serial_num != NULL) {
| 5125 xpt_release_target(bus, target); 5126 } else 5127 splx(s); 5128} 5129 5130static u_int32_t 5131xpt_dev_ccbq_resize(struct cam_path *path, int newopenings) 5132{ 5133 int s; 5134 int diff; 5135 int result; 5136 struct cam_ed *dev; 5137 5138 dev = path->device; 5139 s = splsoftcam(); 5140 5141 diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings); 5142 result = cam_ccbq_resize(&dev->ccbq, newopenings); 5143 if (result == CAM_REQ_CMP && (diff < 0)) { 5144 dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED; 5145 } 5146 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 5147 || (dev->inq_flags & SID_CmdQue) != 0) 5148 dev->tag_saved_openings = newopenings; 5149 /* Adjust the global limit */ 5150 xpt_max_ccbs += diff; 5151 splx(s); 5152 return (result); 5153} 5154 5155static struct cam_eb * 5156xpt_find_bus(path_id_t path_id) 5157{ 5158 struct cam_eb *bus; 5159 5160 for (bus = TAILQ_FIRST(&xpt_busses); 5161 bus != NULL; 5162 bus = TAILQ_NEXT(bus, links)) { 5163 if (bus->path_id == path_id) { 5164 bus->refcount++; 5165 break; 5166 } 5167 } 5168 return (bus); 5169} 5170 5171static struct cam_et * 5172xpt_find_target(struct cam_eb *bus, target_id_t target_id) 5173{ 5174 struct cam_et *target; 5175 5176 for (target = TAILQ_FIRST(&bus->et_entries); 5177 target != NULL; 5178 target = TAILQ_NEXT(target, links)) { 5179 if (target->target_id == target_id) { 5180 target->refcount++; 5181 break; 5182 } 5183 } 5184 return (target); 5185} 5186 5187static struct cam_ed * 5188xpt_find_device(struct cam_et *target, lun_id_t lun_id) 5189{ 5190 struct cam_ed *device; 5191 5192 for (device = TAILQ_FIRST(&target->ed_entries); 5193 device != NULL; 5194 device = TAILQ_NEXT(device, links)) { 5195 if (device->lun_id == lun_id) { 5196 device->refcount++; 5197 break; 5198 } 5199 } 5200 return (device); 5201} 5202 5203typedef struct { 5204 union ccb *request_ccb; 5205 struct ccb_pathinq *cpi; 5206 int pending_count; 5207} xpt_scan_bus_info; 5208 5209/* 5210 * To start a scan, request_ccb is an XPT_SCAN_BUS ccb. 5211 * As the scan progresses, xpt_scan_bus is used as the 5212 * callback on completion function. 5213 */ 5214static void 5215xpt_scan_bus(struct cam_periph *periph, union ccb *request_ccb) 5216{ 5217 CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE, 5218 ("xpt_scan_bus\n")); 5219 switch (request_ccb->ccb_h.func_code) { 5220 case XPT_SCAN_BUS: 5221 { 5222 xpt_scan_bus_info *scan_info; 5223 union ccb *work_ccb; 5224 struct cam_path *path; 5225 u_int i; 5226 u_int max_target; 5227 u_int initiator_id; 5228 5229 /* Find out the characteristics of the bus */ 5230 work_ccb = xpt_alloc_ccb(); 5231 xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path, 5232 request_ccb->ccb_h.pinfo.priority); 5233 work_ccb->ccb_h.func_code = XPT_PATH_INQ; 5234 xpt_action(work_ccb); 5235 if (work_ccb->ccb_h.status != CAM_REQ_CMP) { 5236 request_ccb->ccb_h.status = work_ccb->ccb_h.status; 5237 xpt_free_ccb(work_ccb); 5238 xpt_done(request_ccb); 5239 return; 5240 } 5241 5242 if ((work_ccb->cpi.hba_misc & PIM_NOINITIATOR) != 0) { 5243 /* 5244 * Can't scan the bus on an adapter that 5245 * cannot perform the initiator role. 5246 */ 5247 request_ccb->ccb_h.status = CAM_REQ_CMP; 5248 xpt_free_ccb(work_ccb); 5249 xpt_done(request_ccb); 5250 return; 5251 } 5252 5253 /* Save some state for use while we probe for devices */ 5254 scan_info = (xpt_scan_bus_info *) 5255 malloc(sizeof(xpt_scan_bus_info), M_TEMP, M_WAITOK); 5256 scan_info->request_ccb = request_ccb; 5257 scan_info->cpi = &work_ccb->cpi; 5258 5259 /* Cache on our stack so we can work asynchronously */ 5260 max_target = scan_info->cpi->max_target; 5261 initiator_id = scan_info->cpi->initiator_id; 5262 5263 /* 5264 * Don't count the initiator if the 5265 * initiator is addressable. 5266 */ 5267 scan_info->pending_count = max_target + 1; 5268 if (initiator_id <= max_target) 5269 scan_info->pending_count--; 5270 5271 for (i = 0; i <= max_target; i++) { 5272 cam_status status; 5273 if (i == initiator_id) 5274 continue; 5275 5276 status = xpt_create_path(&path, xpt_periph, 5277 request_ccb->ccb_h.path_id, 5278 i, 0); 5279 if (status != CAM_REQ_CMP) { 5280 printf("xpt_scan_bus: xpt_create_path failed" 5281 " with status %#x, bus scan halted\n", 5282 status); 5283 break; 5284 } 5285 work_ccb = xpt_alloc_ccb(); 5286 xpt_setup_ccb(&work_ccb->ccb_h, path, 5287 request_ccb->ccb_h.pinfo.priority); 5288 work_ccb->ccb_h.func_code = XPT_SCAN_LUN; 5289 work_ccb->ccb_h.cbfcnp = xpt_scan_bus; 5290 work_ccb->ccb_h.ppriv_ptr0 = scan_info; 5291 work_ccb->crcn.flags = request_ccb->crcn.flags; 5292 xpt_action(work_ccb); 5293 } 5294 break; 5295 } 5296 case XPT_SCAN_LUN: 5297 { 5298 xpt_scan_bus_info *scan_info; 5299 path_id_t path_id; 5300 target_id_t target_id; 5301 lun_id_t lun_id; 5302 5303 /* Reuse the same CCB to query if a device was really found */ 5304 scan_info = (xpt_scan_bus_info *)request_ccb->ccb_h.ppriv_ptr0; 5305 xpt_setup_ccb(&request_ccb->ccb_h, request_ccb->ccb_h.path, 5306 request_ccb->ccb_h.pinfo.priority); 5307 request_ccb->ccb_h.func_code = XPT_GDEV_TYPE; 5308 5309 path_id = request_ccb->ccb_h.path_id; 5310 target_id = request_ccb->ccb_h.target_id; 5311 lun_id = request_ccb->ccb_h.target_lun; 5312 xpt_action(request_ccb); 5313 5314 if (request_ccb->ccb_h.status != CAM_REQ_CMP) { 5315 struct cam_ed *device; 5316 struct cam_et *target; 5317 int s, phl; 5318 5319 /* 5320 * If we already probed lun 0 successfully, or 5321 * we have additional configured luns on this 5322 * target that might have "gone away", go onto 5323 * the next lun. 5324 */ 5325 target = request_ccb->ccb_h.path->target; 5326 /* 5327 * We may touch devices that we don't 5328 * hold references too, so ensure they 5329 * don't disappear out from under us. 5330 * The target above is referenced by the 5331 * path in the request ccb. 5332 */ 5333 phl = 0; 5334 s = splcam(); 5335 device = TAILQ_FIRST(&target->ed_entries); 5336 if (device != NULL) { 5337 phl = CAN_SRCH_HI(device); 5338 if (device->lun_id == 0) 5339 device = TAILQ_NEXT(device, links); 5340 } 5341 splx(s); 5342 if ((lun_id != 0) || (device != NULL)) { 5343 if (lun_id < (CAM_SCSI2_MAXLUN-1) || phl) 5344 lun_id++; 5345 } 5346 } else { 5347 struct cam_ed *device; 5348 5349 device = request_ccb->ccb_h.path->device; 5350 5351 if ((device->quirk->quirks & CAM_QUIRK_NOLUNS) == 0) { 5352 /* Try the next lun */ 5353 if (lun_id < (CAM_SCSI2_MAXLUN-1) 5354 || CAN_SRCH_HI(device)) 5355 lun_id++; 5356 } 5357 } 5358 5359 xpt_free_path(request_ccb->ccb_h.path); 5360 5361 /* Check Bounds */ 5362 if ((lun_id == request_ccb->ccb_h.target_lun) 5363 || lun_id > scan_info->cpi->max_lun) { 5364 /* We're done */ 5365 5366 xpt_free_ccb(request_ccb); 5367 scan_info->pending_count--; 5368 if (scan_info->pending_count == 0) { 5369 xpt_free_ccb((union ccb *)scan_info->cpi); 5370 request_ccb = scan_info->request_ccb; 5371 free(scan_info, M_TEMP); 5372 request_ccb->ccb_h.status = CAM_REQ_CMP; 5373 xpt_done(request_ccb); 5374 } 5375 } else { 5376 /* Try the next device */ 5377 struct cam_path *path; 5378 cam_status status; 5379 5380 status = xpt_create_path(&path, xpt_periph, 5381 path_id, target_id, lun_id); 5382 if (status != CAM_REQ_CMP) { 5383 printf("xpt_scan_bus: xpt_create_path failed " 5384 "with status %#x, halting LUN scan\n", 5385 status); 5386 xpt_free_ccb(request_ccb); 5387 scan_info->pending_count--; 5388 if (scan_info->pending_count == 0) { 5389 xpt_free_ccb( 5390 (union ccb *)scan_info->cpi); 5391 request_ccb = scan_info->request_ccb; 5392 free(scan_info, M_TEMP); 5393 request_ccb->ccb_h.status = CAM_REQ_CMP; 5394 xpt_done(request_ccb); 5395 } 5396 break; 5397 } 5398 xpt_setup_ccb(&request_ccb->ccb_h, path, 5399 request_ccb->ccb_h.pinfo.priority); 5400 request_ccb->ccb_h.func_code = XPT_SCAN_LUN; 5401 request_ccb->ccb_h.cbfcnp = xpt_scan_bus; 5402 request_ccb->ccb_h.ppriv_ptr0 = scan_info; 5403 request_ccb->crcn.flags = 5404 scan_info->request_ccb->crcn.flags; 5405 xpt_action(request_ccb); 5406 } 5407 break; 5408 } 5409 default: 5410 break; 5411 } 5412} 5413 5414typedef enum { 5415 PROBE_TUR, 5416 PROBE_INQUIRY, 5417 PROBE_FULL_INQUIRY, 5418 PROBE_MODE_SENSE, 5419 PROBE_SERIAL_NUM, 5420 PROBE_TUR_FOR_NEGOTIATION 5421} probe_action; 5422 5423typedef enum { 5424 PROBE_INQUIRY_CKSUM = 0x01, 5425 PROBE_SERIAL_CKSUM = 0x02, 5426 PROBE_NO_ANNOUNCE = 0x04 5427} probe_flags; 5428 5429typedef struct { 5430 TAILQ_HEAD(, ccb_hdr) request_ccbs; 5431 probe_action action; 5432 union ccb saved_ccb; 5433 probe_flags flags; 5434 MD5_CTX context; 5435 u_int8_t digest[16]; 5436} probe_softc; 5437 5438static void 5439xpt_scan_lun(struct cam_periph *periph, struct cam_path *path, 5440 cam_flags flags, union ccb *request_ccb) 5441{ 5442 struct ccb_pathinq cpi; 5443 cam_status status; 5444 struct cam_path *new_path; 5445 struct cam_periph *old_periph; 5446 int s; 5447 5448 CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE, 5449 ("xpt_scan_lun\n")); 5450 5451 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1); 5452 cpi.ccb_h.func_code = XPT_PATH_INQ; 5453 xpt_action((union ccb *)&cpi); 5454 5455 if (cpi.ccb_h.status != CAM_REQ_CMP) { 5456 if (request_ccb != NULL) { 5457 request_ccb->ccb_h.status = cpi.ccb_h.status; 5458 xpt_done(request_ccb); 5459 } 5460 return; 5461 } 5462 5463 if ((cpi.hba_misc & PIM_NOINITIATOR) != 0) { 5464 /* 5465 * Can't scan the bus on an adapter that 5466 * cannot perform the initiator role. 5467 */ 5468 if (request_ccb != NULL) { 5469 request_ccb->ccb_h.status = CAM_REQ_CMP; 5470 xpt_done(request_ccb); 5471 } 5472 return; 5473 } 5474 5475 if (request_ccb == NULL) { 5476 request_ccb = malloc(sizeof(union ccb), M_TEMP, M_NOWAIT); 5477 if (request_ccb == NULL) { 5478 xpt_print_path(path); 5479 printf("xpt_scan_lun: can't allocate CCB, can't " 5480 "continue\n"); 5481 return; 5482 } 5483 new_path = malloc(sizeof(*new_path), M_TEMP, M_NOWAIT); 5484 if (new_path == NULL) { 5485 xpt_print_path(path); 5486 printf("xpt_scan_lun: can't allocate path, can't " 5487 "continue\n"); 5488 free(request_ccb, M_TEMP); 5489 return; 5490 } 5491 status = xpt_compile_path(new_path, xpt_periph, 5492 path->bus->path_id, 5493 path->target->target_id, 5494 path->device->lun_id); 5495 5496 if (status != CAM_REQ_CMP) { 5497 xpt_print_path(path); 5498 printf("xpt_scan_lun: can't compile path, can't " 5499 "continue\n"); 5500 free(request_ccb, M_TEMP); 5501 free(new_path, M_TEMP); 5502 return; 5503 } 5504 xpt_setup_ccb(&request_ccb->ccb_h, new_path, /*priority*/ 1); 5505 request_ccb->ccb_h.cbfcnp = xptscandone; 5506 request_ccb->ccb_h.func_code = XPT_SCAN_LUN; 5507 request_ccb->crcn.flags = flags; 5508 } 5509 5510 s = splsoftcam(); 5511 if ((old_periph = cam_periph_find(path, "probe")) != NULL) { 5512 probe_softc *softc; 5513 5514 softc = (probe_softc *)old_periph->softc; 5515 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h, 5516 periph_links.tqe); 5517 } else { 5518 status = cam_periph_alloc(proberegister, NULL, probecleanup, 5519 probestart, "probe", 5520 CAM_PERIPH_BIO, 5521 request_ccb->ccb_h.path, NULL, 0, 5522 request_ccb); 5523 5524 if (status != CAM_REQ_CMP) { 5525 xpt_print_path(path); 5526 printf("xpt_scan_lun: cam_alloc_periph returned an " 5527 "error, can't continue probe\n"); 5528 request_ccb->ccb_h.status = status; 5529 xpt_done(request_ccb); 5530 } 5531 } 5532 splx(s); 5533} 5534 5535static void 5536xptscandone(struct cam_periph *periph, union ccb *done_ccb) 5537{ 5538 xpt_release_path(done_ccb->ccb_h.path); 5539 free(done_ccb->ccb_h.path, M_TEMP); 5540 free(done_ccb, M_TEMP); 5541} 5542 5543static cam_status 5544proberegister(struct cam_periph *periph, void *arg) 5545{ 5546 union ccb *request_ccb; /* CCB representing the probe request */ 5547 probe_softc *softc; 5548 5549 request_ccb = (union ccb *)arg; 5550 if (periph == NULL) { 5551 printf("proberegister: periph was NULL!!\n"); 5552 return(CAM_REQ_CMP_ERR); 5553 } 5554 5555 if (request_ccb == NULL) { 5556 printf("proberegister: no probe CCB, " 5557 "can't register device\n"); 5558 return(CAM_REQ_CMP_ERR); 5559 } 5560 5561 softc = (probe_softc *)malloc(sizeof(*softc), M_TEMP, M_NOWAIT); 5562 5563 if (softc == NULL) { 5564 printf("proberegister: Unable to probe new device. " 5565 "Unable to allocate softc\n"); 5566 return(CAM_REQ_CMP_ERR); 5567 } 5568 TAILQ_INIT(&softc->request_ccbs); 5569 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h, 5570 periph_links.tqe); 5571 softc->flags = 0; 5572 periph->softc = softc; 5573 cam_periph_acquire(periph); 5574 /* 5575 * Ensure we've waited at least a bus settle 5576 * delay before attempting to probe the device. 5577 * For HBAs that don't do bus resets, this won't make a difference. 5578 */ 5579 cam_periph_freeze_after_event(periph, &periph->path->bus->last_reset, 5580 scsi_delay); 5581 probeschedule(periph); 5582 return(CAM_REQ_CMP); 5583} 5584 5585static void 5586probeschedule(struct cam_periph *periph) 5587{ 5588 struct ccb_pathinq cpi; 5589 union ccb *ccb; 5590 probe_softc *softc; 5591 5592 softc = (probe_softc *)periph->softc; 5593 ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs); 5594 5595 xpt_setup_ccb(&cpi.ccb_h, periph->path, /*priority*/1); 5596 cpi.ccb_h.func_code = XPT_PATH_INQ; 5597 xpt_action((union ccb *)&cpi); 5598 5599 /* 5600 * If a device has gone away and another device, or the same one, 5601 * is back in the same place, it should have a unit attention 5602 * condition pending. It will not report the unit attention in 5603 * response to an inquiry, which may leave invalid transfer 5604 * negotiations in effect. The TUR will reveal the unit attention 5605 * condition. Only send the TUR for lun 0, since some devices 5606 * will get confused by commands other than inquiry to non-existent 5607 * luns. If you think a device has gone away start your scan from 5608 * lun 0. This will insure that any bogus transfer settings are 5609 * invalidated. 5610 * 5611 * If we haven't seen the device before and the controller supports 5612 * some kind of transfer negotiation, negotiate with the first 5613 * sent command if no bus reset was performed at startup. This 5614 * ensures that the device is not confused by transfer negotiation 5615 * settings left over by loader or BIOS action. 5616 */ 5617 if (((ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) == 0) 5618 && (ccb->ccb_h.target_lun == 0)) { 5619 softc->action = PROBE_TUR; 5620 } else if ((cpi.hba_inquiry & (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) != 0 5621 && (cpi.hba_misc & PIM_NOBUSRESET) != 0) { 5622 proberequestdefaultnegotiation(periph); 5623 softc->action = PROBE_INQUIRY; 5624 } else { 5625 softc->action = PROBE_INQUIRY; 5626 } 5627 5628 if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE) 5629 softc->flags |= PROBE_NO_ANNOUNCE; 5630 else 5631 softc->flags &= ~PROBE_NO_ANNOUNCE; 5632 5633 xpt_schedule(periph, ccb->ccb_h.pinfo.priority); 5634} 5635 5636static void 5637probestart(struct cam_periph *periph, union ccb *start_ccb) 5638{ 5639 /* Probe the device that our peripheral driver points to */ 5640 struct ccb_scsiio *csio; 5641 probe_softc *softc; 5642 5643 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n")); 5644 5645 softc = (probe_softc *)periph->softc; 5646 csio = &start_ccb->csio; 5647 5648 switch (softc->action) { 5649 case PROBE_TUR: 5650 case PROBE_TUR_FOR_NEGOTIATION: 5651 { 5652 scsi_test_unit_ready(csio, 5653 /*retries*/4, 5654 probedone, 5655 MSG_SIMPLE_Q_TAG, 5656 SSD_FULL_SIZE, 5657 /*timeout*/60000); 5658 break; 5659 } 5660 case PROBE_INQUIRY: 5661 case PROBE_FULL_INQUIRY: 5662 { 5663 u_int inquiry_len; 5664 struct scsi_inquiry_data *inq_buf; 5665 5666 inq_buf = &periph->path->device->inq_data; 5667 /* 5668 * If the device is currently configured, we calculate an 5669 * MD5 checksum of the inquiry data, and if the serial number 5670 * length is greater than 0, add the serial number data 5671 * into the checksum as well. Once the inquiry and the 5672 * serial number check finish, we attempt to figure out 5673 * whether we still have the same device. 5674 */ 5675 if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) { 5676 5677 MD5Init(&softc->context); 5678 MD5Update(&softc->context, (unsigned char *)inq_buf, 5679 sizeof(struct scsi_inquiry_data)); 5680 softc->flags |= PROBE_INQUIRY_CKSUM; 5681 if (periph->path->device->serial_num_len > 0) { 5682 MD5Update(&softc->context, 5683 periph->path->device->serial_num, 5684 periph->path->device->serial_num_len); 5685 softc->flags |= PROBE_SERIAL_CKSUM; 5686 } 5687 MD5Final(softc->digest, &softc->context); 5688 } 5689 5690 if (softc->action == PROBE_INQUIRY) 5691 inquiry_len = SHORT_INQUIRY_LENGTH; 5692 else 5693 inquiry_len = inq_buf->additional_length 5694 + offsetof(struct scsi_inquiry_data, 5695 additional_length) + 1; 5696 5697 /* 5698 * Some parallel SCSI devices fail to send an 5699 * ignore wide residue message when dealing with 5700 * odd length inquiry requests. Round up to be 5701 * safe. 5702 */ 5703 inquiry_len = roundup2(inquiry_len, 2); 5704 5705 scsi_inquiry(csio, 5706 /*retries*/4, 5707 probedone, 5708 MSG_SIMPLE_Q_TAG, 5709 (u_int8_t *)inq_buf, 5710 inquiry_len, 5711 /*evpd*/FALSE, 5712 /*page_code*/0, 5713 SSD_MIN_SIZE, 5714 /*timeout*/60 * 1000); 5715 break; 5716 } 5717 case PROBE_MODE_SENSE: 5718 { 5719 void *mode_buf; 5720 int mode_buf_len; 5721 5722 mode_buf_len = sizeof(struct scsi_mode_header_6) 5723 + sizeof(struct scsi_mode_blk_desc) 5724 + sizeof(struct scsi_control_page); 5725 mode_buf = malloc(mode_buf_len, M_TEMP, M_NOWAIT); 5726 if (mode_buf != NULL) { 5727 scsi_mode_sense(csio, 5728 /*retries*/4, 5729 probedone, 5730 MSG_SIMPLE_Q_TAG, 5731 /*dbd*/FALSE, 5732 SMS_PAGE_CTRL_CURRENT, 5733 SMS_CONTROL_MODE_PAGE, 5734 mode_buf, 5735 mode_buf_len, 5736 SSD_FULL_SIZE, 5737 /*timeout*/60000); 5738 break; 5739 } 5740 xpt_print_path(periph->path); 5741 printf("Unable to mode sense control page - malloc failure\n"); 5742 softc->action = PROBE_SERIAL_NUM; 5743 } 5744 /* FALLTHROUGH */ 5745 case PROBE_SERIAL_NUM: 5746 { 5747 struct scsi_vpd_unit_serial_number *serial_buf; 5748 struct cam_ed* device; 5749 5750 serial_buf = NULL; 5751 device = periph->path->device; 5752 device->serial_num = NULL; 5753 device->serial_num_len = 0; 5754 5755 if ((device->quirk->quirks & CAM_QUIRK_NOSERIAL) == 0) 5756 serial_buf = (struct scsi_vpd_unit_serial_number *) 5757 malloc(sizeof(*serial_buf), M_TEMP, 5758 M_NOWAIT | M_ZERO); 5759 5760 if (serial_buf != NULL) { 5761 scsi_inquiry(csio, 5762 /*retries*/4, 5763 probedone, 5764 MSG_SIMPLE_Q_TAG, 5765 (u_int8_t *)serial_buf, 5766 sizeof(*serial_buf), 5767 /*evpd*/TRUE, 5768 SVPD_UNIT_SERIAL_NUMBER, 5769 SSD_MIN_SIZE, 5770 /*timeout*/60 * 1000); 5771 break; 5772 } 5773 /* 5774 * We'll have to do without, let our probedone 5775 * routine finish up for us. 5776 */ 5777 start_ccb->csio.data_ptr = NULL; 5778 probedone(periph, start_ccb); 5779 return; 5780 } 5781 } 5782 xpt_action(start_ccb); 5783} 5784 5785static void 5786proberequestdefaultnegotiation(struct cam_periph *periph) 5787{ 5788 struct ccb_trans_settings cts; 5789 5790 xpt_setup_ccb(&cts.ccb_h, periph->path, /*priority*/1); 5791 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; 5792#ifdef CAM_NEW_TRAN_CODE 5793 cts.type = CTS_TYPE_USER_SETTINGS; 5794#else /* CAM_NEW_TRAN_CODE */ 5795 cts.flags = CCB_TRANS_USER_SETTINGS; 5796#endif /* CAM_NEW_TRAN_CODE */ 5797 xpt_action((union ccb *)&cts); 5798 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; 5799#ifdef CAM_NEW_TRAN_CODE 5800 cts.type = CTS_TYPE_CURRENT_SETTINGS; 5801#else /* CAM_NEW_TRAN_CODE */ 5802 cts.flags &= ~CCB_TRANS_USER_SETTINGS; 5803 cts.flags |= CCB_TRANS_CURRENT_SETTINGS; 5804#endif /* CAM_NEW_TRAN_CODE */ 5805 xpt_action((union ccb *)&cts); 5806} 5807 5808static void 5809probedone(struct cam_periph *periph, union ccb *done_ccb) 5810{ 5811 probe_softc *softc; 5812 struct cam_path *path; 5813 u_int32_t priority; 5814 5815 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n")); 5816 5817 softc = (probe_softc *)periph->softc; 5818 path = done_ccb->ccb_h.path; 5819 priority = done_ccb->ccb_h.pinfo.priority; 5820 5821 switch (softc->action) { 5822 case PROBE_TUR: 5823 { 5824 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 5825 5826 if (cam_periph_error(done_ccb, 0, 5827 SF_NO_PRINT, NULL) == ERESTART) 5828 return; 5829 else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 5830 /* Don't wedge the queue */ 5831 xpt_release_devq(done_ccb->ccb_h.path, 5832 /*count*/1, 5833 /*run_queue*/TRUE); 5834 } 5835 softc->action = PROBE_INQUIRY; 5836 xpt_release_ccb(done_ccb); 5837 xpt_schedule(periph, priority); 5838 return; 5839 } 5840 case PROBE_INQUIRY: 5841 case PROBE_FULL_INQUIRY: 5842 { 5843 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 5844 struct scsi_inquiry_data *inq_buf; 5845 u_int8_t periph_qual; 5846 5847 path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID; 5848 inq_buf = &path->device->inq_data; 5849 5850 periph_qual = SID_QUAL(inq_buf); 5851 5852 switch(periph_qual) { 5853 case SID_QUAL_LU_CONNECTED: 5854 { 5855 u_int8_t len; 5856 5857 /* 5858 * We conservatively request only 5859 * SHORT_INQUIRY_LEN bytes of inquiry 5860 * information during our first try 5861 * at sending an INQUIRY. If the device 5862 * has more information to give, 5863 * perform a second request specifying 5864 * the amount of information the device 5865 * is willing to give. 5866 */ 5867 len = inq_buf->additional_length 5868 + offsetof(struct scsi_inquiry_data, 5869 additional_length) + 1; 5870 if (softc->action == PROBE_INQUIRY 5871 && len > SHORT_INQUIRY_LENGTH) { 5872 softc->action = PROBE_FULL_INQUIRY; 5873 xpt_release_ccb(done_ccb); 5874 xpt_schedule(periph, priority); 5875 return; 5876 } 5877 5878 xpt_find_quirk(path->device); 5879 5880#ifdef CAM_NEW_TRAN_CODE 5881 xpt_devise_transport(path); 5882#endif /* CAM_NEW_TRAN_CODE */ 5883 if ((inq_buf->flags & SID_CmdQue) != 0) 5884 softc->action = PROBE_MODE_SENSE; 5885 else 5886 softc->action = PROBE_SERIAL_NUM; 5887 5888 path->device->flags &= ~CAM_DEV_UNCONFIGURED; 5889 5890 xpt_release_ccb(done_ccb); 5891 xpt_schedule(periph, priority); 5892 return; 5893 } 5894 default: 5895 break; 5896 } 5897 } else if (cam_periph_error(done_ccb, 0, 5898 done_ccb->ccb_h.target_lun > 0 5899 ? SF_RETRY_UA|SF_QUIET_IR 5900 : SF_RETRY_UA, 5901 &softc->saved_ccb) == ERESTART) { 5902 return; 5903 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 5904 /* Don't wedge the queue */ 5905 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, 5906 /*run_queue*/TRUE); 5907 } 5908 /* 5909 * If we get to this point, we got an error status back 5910 * from the inquiry and the error status doesn't require 5911 * automatically retrying the command. Therefore, the 5912 * inquiry failed. If we had inquiry information before 5913 * for this device, but this latest inquiry command failed, 5914 * the device has probably gone away. If this device isn't 5915 * already marked unconfigured, notify the peripheral 5916 * drivers that this device is no more. 5917 */ 5918 if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0) 5919 /* Send the async notification. */ 5920 xpt_async(AC_LOST_DEVICE, path, NULL); 5921 5922 xpt_release_ccb(done_ccb); 5923 break; 5924 } 5925 case PROBE_MODE_SENSE: 5926 { 5927 struct ccb_scsiio *csio; 5928 struct scsi_mode_header_6 *mode_hdr; 5929 5930 csio = &done_ccb->csio; 5931 mode_hdr = (struct scsi_mode_header_6 *)csio->data_ptr; 5932 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 5933 struct scsi_control_page *page; 5934 u_int8_t *offset; 5935 5936 offset = ((u_int8_t *)&mode_hdr[1]) 5937 + mode_hdr->blk_desc_len; 5938 page = (struct scsi_control_page *)offset; 5939 path->device->queue_flags = page->queue_flags; 5940 } else if (cam_periph_error(done_ccb, 0, 5941 SF_RETRY_UA|SF_NO_PRINT, 5942 &softc->saved_ccb) == ERESTART) { 5943 return; 5944 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 5945 /* Don't wedge the queue */ 5946 xpt_release_devq(done_ccb->ccb_h.path, 5947 /*count*/1, /*run_queue*/TRUE); 5948 } 5949 xpt_release_ccb(done_ccb); 5950 free(mode_hdr, M_TEMP); 5951 softc->action = PROBE_SERIAL_NUM; 5952 xpt_schedule(periph, priority); 5953 return; 5954 } 5955 case PROBE_SERIAL_NUM: 5956 { 5957 struct ccb_scsiio *csio; 5958 struct scsi_vpd_unit_serial_number *serial_buf; 5959 u_int32_t priority; 5960 int changed; 5961 int have_serialnum; 5962 5963 changed = 1; 5964 have_serialnum = 0; 5965 csio = &done_ccb->csio; 5966 priority = done_ccb->ccb_h.pinfo.priority; 5967 serial_buf = 5968 (struct scsi_vpd_unit_serial_number *)csio->data_ptr; 5969 5970 /* Clean up from previous instance of this device */ 5971 if (path->device->serial_num != NULL) {
|