cam_xpt.c revision 45963
1/* 2 * Implementation of the Common Access Method Transport (XPT) layer. 3 * 4 * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs. 5 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions, and the following disclaimer, 13 * without modification, immediately at the beginning of the file. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $Id: cam_xpt.c,v 1.53 1999/04/21 07:26:24 peter Exp $ 30 */ 31#include <sys/param.h> 32#include <sys/systm.h> 33#include <sys/types.h> 34#include <sys/malloc.h> 35#include <sys/device.h> 36#include <sys/kernel.h> 37#include <sys/conf.h> 38#include <sys/fcntl.h> 39#include <sys/md5.h> 40#include <sys/devicestat.h> 41#include <sys/interrupt.h> 42 43#ifdef PC98 44#include <pc98/pc98/pc98_machdep.h> /* geometry translation */ 45#endif 46 47#include <machine/clock.h> 48#include <machine/ipl.h> 49 50#include <cam/cam.h> 51#include <cam/cam_conf.h> 52#include <cam/cam_ccb.h> 53#include <cam/cam_periph.h> 54#include <cam/cam_sim.h> 55#include <cam/cam_xpt.h> 56#include <cam/cam_xpt_sim.h> 57#include <cam/cam_xpt_periph.h> 58#include <cam/cam_debug.h> 59 60#include <cam/scsi/scsi_all.h> 61#include <cam/scsi/scsi_message.h> 62#include <cam/scsi/scsi_pass.h> 63#include "opt_cam.h" 64#include "opt_scsi.h" 65 66/* Datastructures internal to the xpt layer */ 67 68/* 69 * Definition of an async handler callback block. These are used to add 70 * SIMs and peripherals to the async callback lists. 71 */ 72struct async_node { 73 SLIST_ENTRY(async_node) links; 74 u_int32_t event_enable; /* Async Event enables */ 75 void (*callback)(void *arg, u_int32_t code, 76 struct cam_path *path, void *args); 77 void *callback_arg; 78}; 79 80SLIST_HEAD(async_list, async_node); 81SLIST_HEAD(periph_list, cam_periph); 82static STAILQ_HEAD(highpowerlist, ccb_hdr) highpowerq; 83 84/* 85 * This is the maximum number of high powered commands (e.g. start unit) 86 * that can be outstanding at a particular time. 87 */ 88#ifndef CAM_MAX_HIGHPOWER 89#define CAM_MAX_HIGHPOWER 4 90#endif 91 92/* 93 * This is the number of seconds we wait for devices to settle after a SCSI 94 * bus reset. 95 */ 96#ifndef SCSI_DELAY 97#define SCSI_DELAY 2000 98#endif 99/* 100 * If someone sets this to 0, we assume that they want the minimum 101 * allowable bus settle delay. All devices need _some_ sort of bus settle 102 * delay, so we'll set it to a minimum value of 100ms. 103 */ 104#if (SCSI_DELAY == 0) 105#undef SCSI_DELAY 106#define SCSI_DELAY 100 107#endif 108 109/* 110 * Make sure the user isn't using seconds instead of milliseconds. 111 */ 112#if (SCSI_DELAY < 100) 113#error "SCSI_DELAY is in milliseconds, not seconds! Please use a larger value" 114#endif 115 116/* number of high powered commands that can go through right now */ 117static int num_highpower = CAM_MAX_HIGHPOWER; 118 119/* 120 * Structure for queueing a device in a run queue. 121 * There is one run queue for allocating new ccbs, 122 * and another for sending ccbs to the controller. 123 */ 124struct cam_ed_qinfo { 125 cam_pinfo pinfo; 126 struct cam_ed *device; 127}; 128 129/* 130 * The CAM EDT (Existing Device Table) contains the device information for 131 * all devices for all busses in the system. The table contains a 132 * cam_ed structure for each device on the bus. 133 */ 134struct cam_ed { 135 TAILQ_ENTRY(cam_ed) links; 136 struct cam_ed_qinfo alloc_ccb_entry; 137 struct cam_ed_qinfo send_ccb_entry; 138 struct cam_et *target; 139 lun_id_t lun_id; 140 struct camq drvq; /* 141 * Queue of type drivers wanting to do 142 * work on this device. 143 */ 144 struct cam_ccbq ccbq; /* Queue of pending ccbs */ 145 struct async_list asyncs; /* Async callback info for this B/T/L */ 146 struct periph_list periphs; /* All attached devices */ 147 u_int generation; /* Generation number */ 148 struct cam_periph *owner; /* Peripheral driver's ownership tag */ 149 struct xpt_quirk_entry *quirk; /* Oddities about this device */ 150 /* Storage for the inquiry data */ 151 struct scsi_inquiry_data inq_data; 152 u_int8_t inq_flags; /* 153 * Current settings for inquiry flags. 154 * This allows us to override settings 155 * like disconnection and tagged 156 * queuing for a device. 157 */ 158 u_int8_t queue_flags; /* Queue flags from the control page */ 159 u_int8_t *serial_num; 160 u_int8_t serial_num_len; 161 u_int32_t qfrozen_cnt; 162 u_int32_t flags; 163#define CAM_DEV_UNCONFIGURED 0x01 164#define CAM_DEV_REL_TIMEOUT_PENDING 0x02 165#define CAM_DEV_REL_ON_COMPLETE 0x04 166#define CAM_DEV_REL_ON_QUEUE_EMPTY 0x08 167#define CAM_DEV_RESIZE_QUEUE_NEEDED 0x10 168#define CAM_DEV_TAG_AFTER_COUNT 0x20 169 u_int32_t tag_delay_count; 170#define CAM_TAG_DELAY_COUNT 5 171 u_int32_t refcount; 172 struct callout_handle c_handle; 173}; 174 175/* 176 * Each target is represented by an ET (Existing Target). These 177 * entries are created when a target is successfully probed with an 178 * identify, and removed when a device fails to respond after a number 179 * of retries, or a bus rescan finds the device missing. 180 */ 181struct cam_et { 182 TAILQ_HEAD(, cam_ed) ed_entries; 183 TAILQ_ENTRY(cam_et) links; 184 struct cam_eb *bus; 185 target_id_t target_id; 186 u_int32_t refcount; 187 u_int generation; 188}; 189 190/* 191 * Each bus is represented by an EB (Existing Bus). These entries 192 * are created by calls to xpt_bus_register and deleted by calls to 193 * xpt_bus_deregister. 194 */ 195struct cam_eb { 196 TAILQ_HEAD(, cam_et) et_entries; 197 TAILQ_ENTRY(cam_eb) links; 198 path_id_t path_id; 199 struct cam_sim *sim; 200 u_int32_t flags; 201#define CAM_EB_RUNQ_SCHEDULED 0x01 202 u_int32_t refcount; 203 u_int generation; 204}; 205 206struct cam_path { 207 struct cam_periph *periph; 208 struct cam_eb *bus; 209 struct cam_et *target; 210 struct cam_ed *device; 211}; 212 213struct xpt_quirk_entry { 214 struct scsi_inquiry_pattern inq_pat; 215 u_int8_t quirks; 216#define CAM_QUIRK_NOLUNS 0x01 217#define CAM_QUIRK_NOSERIAL 0x02 218 u_int8_t mintags; 219 u_int8_t maxtags; 220}; 221 222typedef enum { 223 XPT_FLAG_OPEN = 0x01 224} xpt_flags; 225 226struct xpt_softc { 227 xpt_flags flags; 228 u_int32_t generation; 229#ifdef DEVFS 230 void *xpt_devfs_token; 231 void *ctl_devfs_token; 232#endif 233}; 234 235static const char quantum[] = "QUANTUM"; 236static const char sony[] = "SONY"; 237static const char west_digital[] = "WDIGTL"; 238static const char samsung[] = "SAMSUNG"; 239static const char seagate[] = "SEAGATE"; 240 241static struct xpt_quirk_entry xpt_quirk_table[] = 242{ 243 { 244 /* Reports QUEUE FULL for temporary resource shortages */ 245 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" }, 246 /*quirks*/0, /*mintags*/24, /*maxtags*/32 247 }, 248 { 249 /* Reports QUEUE FULL for temporary resource shortages */ 250 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" }, 251 /*quirks*/0, /*mintags*/24, /*maxtags*/32 252 }, 253 { 254 /* Reports QUEUE FULL for temporary resource shortages */ 255 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" }, 256 /*quirks*/0, /*mintags*/24, /*maxtags*/32 257 }, 258 { 259 /* Broken tagged queuing drive */ 260 { T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" }, 261 /*quirks*/0, /*mintags*/0, /*maxtags*/0 262 }, 263 { 264 /* Broken tagged queuing drive */ 265 { T_DIRECT, SIP_MEDIA_FIXED, "MICROP", "3391*", "x43h" }, 266 /*quirks*/0, /*mintags*/0, /*maxtags*/0 267 }, 268 { 269 /* 270 * Unfortunately, the Quantum Atlas III has the same 271 * problem as the Atlas II drives above. 272 * Reported by: "Johan Granlund" <johan@granlund.nu> 273 * 274 * For future reference, the drive with the problem was: 275 * QUANTUM QM39100TD-SW N1B0 276 * 277 * It's possible that Quantum will fix the problem in later 278 * firmware revisions. If that happens, the quirk entry 279 * will need to be made specific to the firmware revisions 280 * with the problem. 281 * 282 */ 283 /* Reports QUEUE FULL for temporary resource shortages */ 284 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" }, 285 /*quirks*/0, /*mintags*/24, /*maxtags*/32 286 }, 287 { 288 /* 289 * 18 Gig Atlas III, same problem as the 9G version. 290 * Reported by: Andre Albsmeier 291 * <andre.albsmeier@mchp.siemens.de> 292 * 293 * For future reference, the drive with the problem was: 294 * QUANTUM QM318000TD-S N491 295 */ 296 /* Reports QUEUE FULL for temporary resource shortages */ 297 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" }, 298 /*quirks*/0, /*mintags*/24, /*maxtags*/32 299 }, 300 { 301 /* 302 * Broken tagged queuing drive 303 * Reported by: Bret Ford <bford@uop.cs.uop.edu> 304 * and: Martin Renters <martin@tdc.on.ca> 305 */ 306 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" }, 307 /*quirks*/0, /*mintags*/0, /*maxtags*/0 308 }, 309 /* 310 * The Seagate Medalist Pro drives have very poor write 311 * performance with anything more than 2 tags. 312 * 313 * Reported by: Paul van der Zwan <paulz@trantor.xs4all.nl> 314 * Drive: <SEAGATE ST36530N 1444> 315 * 316 * Reported by: Jeremy Lea <reg@shale.csir.co.za> 317 * Drive: <SEAGATE ST34520W 1281> 318 * 319 * No one has actually reported that the 9G version 320 * (ST39140*) of the Medalist Pro has the same problem, but 321 * we're assuming that it does because the 4G and 6.5G 322 * versions of the drive are broken. 323 */ 324 { 325 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"}, 326 /*quirks*/0, /*mintags*/2, /*maxtags*/2 327 }, 328 { 329 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"}, 330 /*quirks*/0, /*mintags*/2, /*maxtags*/2 331 }, 332 { 333 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"}, 334 /*quirks*/0, /*mintags*/2, /*maxtags*/2 335 }, 336 { 337 /* 338 * Slow when tagged queueing is enabled. Write performance 339 * steadily drops off with more and more concurrent 340 * transactions. Best sequential write performance with 341 * tagged queueing turned off and write caching turned on. 342 * 343 * PR: kern/10398 344 * Submitted by: Hideaki Okada <hokada@isl.melco.co.jp> 345 * Drive: DCAS-34330 w/ "S65A" firmware. 346 * 347 * The drive with the problem had the "S65A" firmware 348 * revision, and has also been reported (by Stephen J. 349 * Roznowski <sjr@home.net>) for a drive with the "S61A" 350 * firmware revision. 351 * 352 * Although no one has reported problems with the 2 gig 353 * version of the DCAS drive, the assumption is that it 354 * has the same problems as the 4 gig version. Therefore 355 * this quirk entries disables tagged queueing for all 356 * DCAS drives. 357 */ 358 { T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" }, 359 /*quirks*/0, /*mintags*/0, /*maxtags*/0 360 }, 361 { 362 /* Broken tagged queuing drive */ 363 { T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" }, 364 /*quirks*/0, /*mintags*/0, /*maxtags*/0 365 }, 366 { 367 /* Broken tagged queuing drive */ 368 { T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" }, 369 /*quirks*/0, /*mintags*/0, /*maxtags*/0 370 }, 371 { 372 /* 373 * Broken tagged queuing drive. 374 * Submitted by: 375 * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp> 376 * in PR kern/9535 377 */ 378 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" }, 379 /*quirks*/0, /*mintags*/0, /*maxtags*/0 380 }, 381 { 382 /* 383 * Slow when tagged queueing is enabled. (1.5MB/sec versus 384 * 8MB/sec.) 385 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu> 386 * Best performance with these drives is achieved with 387 * tagged queueing turned off, and write caching turned on. 388 */ 389 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" }, 390 /*quirks*/0, /*mintags*/0, /*maxtags*/0 391 }, 392 { 393 /* 394 * Slow when tagged queueing is enabled. (1.5MB/sec versus 395 * 8MB/sec.) 396 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu> 397 * Best performance with these drives is achieved with 398 * tagged queueing turned off, and write caching turned on. 399 */ 400 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" }, 401 /*quirks*/0, /*mintags*/0, /*maxtags*/0 402 }, 403 { 404 /* 405 * Doesn't handle queue full condition correctly, 406 * so we need to limit maxtags to what the device 407 * can handle instead of determining this automatically. 408 */ 409 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" }, 410 /*quirks*/0, /*mintags*/2, /*maxtags*/32 411 }, 412 { 413 /* Really only one LUN */ 414 { 415 T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA*", "*" 416 }, 417 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 418 }, 419 { 420 /* I can't believe we need a quirk for DPT volumes. */ 421 { 422 T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, 423 "DPT", "*", "*" 424 }, 425 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, 426 /*mintags*/0, /*maxtags*/255 427 }, 428 { 429 /* 430 * Many Sony CDROM drives don't like multi-LUN probing. 431 */ 432 { 433 T_CDROM, SIP_MEDIA_REMOVABLE, sony, 434 "CD-ROM CDU*", "*" 435 }, 436 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 437 }, 438 { 439 /* 440 * This drive doesn't like multiple LUN probing. 441 * Submitted by: Parag Patel <parag@cgt.com> 442 */ 443 { 444 T_WORM, SIP_MEDIA_REMOVABLE, sony, 445 "CD-R CDU9*", "*" 446 }, 447 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 448 }, 449 { 450 /* 451 * The 8200 doesn't like multi-lun probing, and probably 452 * don't like serial number requests either. 453 */ 454 { 455 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE", 456 "EXB-8200*", "*" 457 }, 458 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 459 }, 460 { 461 /* 462 * This old revision of the TDC3600 is also SCSI-1, and 463 * hangs upon serial number probing. 464 */ 465 { 466 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG", 467 " TDC 3600", "U07:" 468 }, 469 CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0 470 }, 471 { 472 /* Default tagged queuing parameters for all devices */ 473 { 474 T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, 475 /*vendor*/"*", /*product*/"*", /*revision*/"*" 476 }, 477 /*quirks*/0, /*mintags*/2, /*maxtags*/255 478 }, 479}; 480 481typedef enum { 482 DM_RET_COPY = 0x01, 483 DM_RET_FLAG_MASK = 0x0f, 484 DM_RET_NONE = 0x00, 485 DM_RET_STOP = 0x10, 486 DM_RET_DESCEND = 0x20, 487 DM_RET_ERROR = 0x30, 488 DM_RET_ACTION_MASK = 0xf0 489} dev_match_ret; 490 491typedef enum { 492 XPT_DEPTH_BUS, 493 XPT_DEPTH_TARGET, 494 XPT_DEPTH_DEVICE, 495 XPT_DEPTH_PERIPH 496} xpt_traverse_depth; 497 498struct xpt_traverse_config { 499 xpt_traverse_depth depth; 500 void *tr_func; 501 void *tr_arg; 502}; 503 504typedef int xpt_busfunc_t (struct cam_eb *bus, void *arg); 505typedef int xpt_targetfunc_t (struct cam_et *target, void *arg); 506typedef int xpt_devicefunc_t (struct cam_ed *device, void *arg); 507typedef int xpt_periphfunc_t (struct cam_periph *periph, void *arg); 508typedef int xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg); 509 510/* Transport layer configuration information */ 511static struct xpt_softc xsoftc; 512 513/* Queues for our software interrupt handler */ 514typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t; 515static cam_isrq_t cam_bioq; 516static cam_isrq_t cam_netq; 517 518/* "Pool" of inactive ccbs managed by xpt_alloc_ccb and xpt_free_ccb */ 519static SLIST_HEAD(,ccb_hdr) ccb_freeq; 520static u_int xpt_max_ccbs; /* 521 * Maximum size of ccb pool. Modified as 522 * devices are added/removed or have their 523 * opening counts changed. 524 */ 525static u_int xpt_ccb_count; /* Current count of allocated ccbs */ 526 527static struct cam_periph *xpt_periph; 528 529static periph_init_t xpt_periph_init; 530 531static periph_init_t probe_periph_init; 532 533static struct periph_driver xpt_driver = 534{ 535 xpt_periph_init, "xpt", 536 TAILQ_HEAD_INITIALIZER(xpt_driver.units) 537}; 538 539static struct periph_driver probe_driver = 540{ 541 probe_periph_init, "probe", 542 TAILQ_HEAD_INITIALIZER(probe_driver.units) 543}; 544 545DATA_SET(periphdriver_set, xpt_driver); 546DATA_SET(periphdriver_set, probe_driver); 547 548#define XPT_CDEV_MAJOR 104 549 550static d_open_t xptopen; 551static d_close_t xptclose; 552static d_ioctl_t xptioctl; 553 554static struct cdevsw xpt_cdevsw = 555{ 556 /*d_open*/ xptopen, 557 /*d_close*/ xptclose, 558 /*d_read*/ noread, 559 /*d_write*/ nowrite, 560 /*d_ioctl*/ xptioctl, 561 /*d_stop*/ nostop, 562 /*d_reset*/ noreset, 563 /*d_devtotty*/ nodevtotty, 564 /*d_poll*/ NULL, 565 /*d_mmap*/ nommap, 566 /*d_strategy*/ nostrategy, 567 /*d_name*/ "xpt", 568 /*d_spare*/ NULL, 569 /*d_maj*/ -1, 570 /*d_dump*/ nodump, 571 /*d_psize*/ nopsize, 572 /*d_flags*/ 0, 573 /*d_maxio*/ 0, 574 /*b_maj*/ -1 575}; 576 577static struct intr_config_hook *xpt_config_hook; 578 579/* Registered busses */ 580static TAILQ_HEAD(,cam_eb) xpt_busses; 581static u_int bus_generation; 582 583/* Storage for debugging datastructures */ 584#ifdef CAMDEBUG 585struct cam_path *cam_dpath; 586u_int32_t cam_dflags; 587#endif 588 589#if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG) 590#error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS" 591#endif 592 593/* 594 * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG 595 * enabled. Also, the user must have either none, or all of CAM_DEBUG_BUS, 596 * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified. 597 */ 598#if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \ 599 || defined(CAM_DEBUG_LUN) 600#ifdef CAMDEBUG 601#if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \ 602 || !defined(CAM_DEBUG_LUN) 603#error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \ 604 and CAM_DEBUG_LUN" 605#endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */ 606#else /* !CAMDEBUG */ 607#error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options" 608#endif /* CAMDEBUG */ 609#endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */ 610 611/* Our boot-time initialization hook */ 612static void xpt_init(void *); 613SYSINIT(cam, SI_SUB_CONFIGURE, SI_ORDER_SECOND, xpt_init, NULL); 614 615static cam_status xpt_compile_path(struct cam_path *new_path, 616 struct cam_periph *perph, 617 path_id_t path_id, 618 target_id_t target_id, 619 lun_id_t lun_id); 620 621static void xpt_release_path(struct cam_path *path); 622 623static void xpt_async_bcast(struct async_list *async_head, 624 u_int32_t async_code, 625 struct cam_path *path, 626 void *async_arg); 627static int xptnextfreebus(path_id_t startbus); 628static int xptpathid(const char *sim_name, int sim_unit, int sim_bus, 629 path_id_t *nextpath); 630static union ccb *xpt_get_ccb(struct cam_ed *device); 631static int xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo, 632 u_int32_t new_priority); 633static void xpt_run_dev_allocq(struct cam_eb *bus); 634static void xpt_run_dev_sendq(struct cam_eb *bus); 635static timeout_t xpt_release_devq_timeout; 636static timeout_t xpt_release_simq_timeout; 637static void xpt_release_bus(struct cam_eb *bus); 638static struct cam_et* 639 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id); 640static void xpt_release_target(struct cam_eb *bus, struct cam_et *target); 641static struct cam_ed* 642 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, 643 lun_id_t lun_id); 644static void xpt_release_device(struct cam_eb *bus, struct cam_et *target, 645 struct cam_ed *device); 646static u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings); 647static struct cam_eb* 648 xpt_find_bus(path_id_t path_id); 649static struct cam_et* 650 xpt_find_target(struct cam_eb *bus, target_id_t target_id); 651static struct cam_ed* 652 xpt_find_device(struct cam_et *target, lun_id_t lun_id); 653static void xpt_scan_bus(struct cam_periph *periph, union ccb *ccb); 654static void xpt_scan_lun(struct cam_periph *periph, 655 struct cam_path *path, cam_flags flags, 656 union ccb *ccb); 657static void xptscandone(struct cam_periph *periph, union ccb *done_ccb); 658static xpt_busfunc_t xptconfigbuscountfunc; 659static xpt_busfunc_t xptconfigfunc; 660static void xpt_config(void *arg); 661static xpt_devicefunc_t xptpassannouncefunc; 662static void xpt_finishconfig(struct cam_periph *periph, union ccb *ccb); 663static void xptaction(struct cam_sim *sim, union ccb *work_ccb); 664static swihand_t swi_camnet; 665static swihand_t swi_cambio; 666static void camisr(cam_isrq_t *queue); 667#if 0 668static void xptstart(struct cam_periph *periph, union ccb *work_ccb); 669static void xptasync(struct cam_periph *periph, 670 u_int32_t code, cam_path *path); 671#endif 672static dev_match_ret xptbusmatch(struct dev_match_pattern *patterns, 673 int num_patterns, struct cam_eb *bus); 674static dev_match_ret xptdevicematch(struct dev_match_pattern *patterns, 675 int num_patterns, struct cam_ed *device); 676static dev_match_ret xptperiphmatch(struct dev_match_pattern *patterns, 677 int num_patterns, 678 struct cam_periph *periph); 679static xpt_busfunc_t xptedtbusfunc; 680static xpt_targetfunc_t xptedttargetfunc; 681static xpt_devicefunc_t xptedtdevicefunc; 682static xpt_periphfunc_t xptedtperiphfunc; 683static xpt_pdrvfunc_t xptplistpdrvfunc; 684static xpt_periphfunc_t xptplistperiphfunc; 685static int xptedtmatch(struct ccb_dev_match *cdm); 686static int xptperiphlistmatch(struct ccb_dev_match *cdm); 687static int xptbustraverse(struct cam_eb *start_bus, 688 xpt_busfunc_t *tr_func, void *arg); 689static int xpttargettraverse(struct cam_eb *bus, 690 struct cam_et *start_target, 691 xpt_targetfunc_t *tr_func, void *arg); 692static int xptdevicetraverse(struct cam_et *target, 693 struct cam_ed *start_device, 694 xpt_devicefunc_t *tr_func, void *arg); 695static int xptperiphtraverse(struct cam_ed *device, 696 struct cam_periph *start_periph, 697 xpt_periphfunc_t *tr_func, void *arg); 698static int xptpdrvtraverse(struct periph_driver **start_pdrv, 699 xpt_pdrvfunc_t *tr_func, void *arg); 700static int xptpdperiphtraverse(struct periph_driver **pdrv, 701 struct cam_periph *start_periph, 702 xpt_periphfunc_t *tr_func, 703 void *arg); 704static xpt_busfunc_t xptdefbusfunc; 705static xpt_targetfunc_t xptdeftargetfunc; 706static xpt_devicefunc_t xptdefdevicefunc; 707static xpt_periphfunc_t xptdefperiphfunc; 708static int xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg); 709#ifdef notusedyet 710static int xpt_for_all_targets(xpt_targetfunc_t *tr_func, 711 void *arg); 712#endif 713static int xpt_for_all_devices(xpt_devicefunc_t *tr_func, 714 void *arg); 715#ifdef notusedyet 716static int xpt_for_all_periphs(xpt_periphfunc_t *tr_func, 717 void *arg); 718#endif 719static xpt_devicefunc_t xptsetasyncfunc; 720static xpt_busfunc_t xptsetasyncbusfunc; 721static cam_status xptregister(struct cam_periph *periph, 722 void *arg); 723static cam_status proberegister(struct cam_periph *periph, 724 void *arg); 725static void probeschedule(struct cam_periph *probe_periph); 726static void probestart(struct cam_periph *periph, union ccb *start_ccb); 727static void probedone(struct cam_periph *periph, union ccb *done_ccb); 728static void probecleanup(struct cam_periph *periph); 729static void xpt_find_quirk(struct cam_ed *device); 730static void xpt_set_transfer_settings(struct ccb_trans_settings *cts, 731 struct cam_ed *device, 732 int async_update); 733static void xpt_toggle_tags(struct cam_path *path); 734static void xpt_start_tags(struct cam_path *path); 735static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus, 736 struct cam_ed *dev); 737static __inline int xpt_schedule_dev_sendq(struct cam_eb *bus, 738 struct cam_ed *dev); 739static __inline int periph_is_queued(struct cam_periph *periph); 740static __inline int device_is_alloc_queued(struct cam_ed *device); 741static __inline int device_is_send_queued(struct cam_ed *device); 742static __inline int dev_allocq_is_runnable(struct cam_devq *devq); 743 744static __inline int 745xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev) 746{ 747 int retval; 748 749 if (dev->ccbq.devq_openings > 0) { 750 if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) { 751 cam_ccbq_resize(&dev->ccbq, 752 dev->ccbq.dev_openings 753 + dev->ccbq.dev_active); 754 dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED; 755 } 756 /* 757 * The priority of a device waiting for CCB resources 758 * is that of the the highest priority peripheral driver 759 * enqueued. 760 */ 761 retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue, 762 &dev->alloc_ccb_entry.pinfo, 763 CAMQ_GET_HEAD(&dev->drvq)->priority); 764 } else { 765 retval = 0; 766 } 767 768 return (retval); 769} 770 771static __inline int 772xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev) 773{ 774 int retval; 775 776 if (dev->ccbq.dev_openings > 0) { 777 /* 778 * The priority of a device waiting for controller 779 * resources is that of the the highest priority CCB 780 * enqueued. 781 */ 782 retval = 783 xpt_schedule_dev(&bus->sim->devq->send_queue, 784 &dev->send_ccb_entry.pinfo, 785 CAMQ_GET_HEAD(&dev->ccbq.queue)->priority); 786 } else { 787 retval = 0; 788 } 789 return (retval); 790} 791 792static __inline int 793periph_is_queued(struct cam_periph *periph) 794{ 795 return (periph->pinfo.index != CAM_UNQUEUED_INDEX); 796} 797 798static __inline int 799device_is_alloc_queued(struct cam_ed *device) 800{ 801 return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX); 802} 803 804static __inline int 805device_is_send_queued(struct cam_ed *device) 806{ 807 return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX); 808} 809 810static __inline int 811dev_allocq_is_runnable(struct cam_devq *devq) 812{ 813 /* 814 * Have work to do. 815 * Have space to do more work. 816 * Allowed to do work. 817 */ 818 return ((devq->alloc_queue.qfrozen_cnt == 0) 819 && (devq->alloc_queue.entries > 0) 820 && (devq->alloc_openings > 0)); 821} 822 823static void 824xpt_periph_init() 825{ 826 dev_t dev; 827 828 dev = makedev(XPT_CDEV_MAJOR, 0); 829 cdevsw_add(&dev, &xpt_cdevsw, NULL); 830} 831 832static void 833probe_periph_init() 834{ 835} 836 837 838static void 839xptdone(struct cam_periph *periph, union ccb *done_ccb) 840{ 841 /* Caller will release the CCB */ 842 wakeup(&done_ccb->ccb_h.cbfcnp); 843} 844 845static int 846xptopen(dev_t dev, int flags, int fmt, struct proc *p) 847{ 848 int unit; 849 850 unit = minor(dev) & 0xff; 851 852 /* 853 * Only allow read-write access. 854 */ 855 if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0)) 856 return(EPERM); 857 858 /* 859 * We don't allow nonblocking access. 860 */ 861 if ((flags & O_NONBLOCK) != 0) { 862 printf("xpt%d: can't do nonblocking accesss\n", unit); 863 return(ENODEV); 864 } 865 866 /* 867 * We only have one transport layer right now. If someone accesses 868 * us via something other than minor number 1, point out their 869 * mistake. 870 */ 871 if (unit != 0) { 872 printf("xptopen: got invalid xpt unit %d\n", unit); 873 return(ENXIO); 874 } 875 876 /* Mark ourselves open */ 877 xsoftc.flags |= XPT_FLAG_OPEN; 878 879 return(0); 880} 881 882static int 883xptclose(dev_t dev, int flag, int fmt, struct proc *p) 884{ 885 int unit; 886 887 unit = minor(dev) & 0xff; 888 889 /* 890 * We only have one transport layer right now. If someone accesses 891 * us via something other than minor number 1, point out their 892 * mistake. 893 */ 894 if (unit != 0) { 895 printf("xptclose: got invalid xpt unit %d\n", unit); 896 return(ENXIO); 897 } 898 899 /* Mark ourselves closed */ 900 xsoftc.flags &= ~XPT_FLAG_OPEN; 901 902 return(0); 903} 904 905static int 906xptioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p) 907{ 908 int unit, error; 909 910 error = 0; 911 unit = minor(dev) & 0xff; 912 913 /* 914 * We only have one transport layer right now. If someone accesses 915 * us via something other than minor number 1, point out their 916 * mistake. 917 */ 918 if (unit != 0) { 919 printf("xptioctl: got invalid xpt unit %d\n", unit); 920 return(ENXIO); 921 } 922 923 switch(cmd) { 924 /* 925 * For the transport layer CAMIOCOMMAND ioctl, we really only want 926 * to accept CCB types that don't quite make sense to send through a 927 * passthrough driver. 928 */ 929 case CAMIOCOMMAND: { 930 union ccb *ccb; 931 union ccb *inccb; 932 933 inccb = (union ccb *)addr; 934 935 switch(inccb->ccb_h.func_code) { 936 case XPT_SCAN_BUS: 937 case XPT_RESET_BUS: 938 if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD) 939 || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) { 940 error = EINVAL; 941 break; 942 } 943 /* FALLTHROUGH */ 944 case XPT_SCAN_LUN: 945 case XPT_ENG_INQ: /* XXX not implemented yet */ 946 case XPT_ENG_EXEC: 947 948 ccb = xpt_alloc_ccb(); 949 950 /* 951 * Create a path using the bus, target, and lun the 952 * user passed in. 953 */ 954 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, 955 inccb->ccb_h.path_id, 956 inccb->ccb_h.target_id, 957 inccb->ccb_h.target_lun) != 958 CAM_REQ_CMP){ 959 error = EINVAL; 960 xpt_free_ccb(ccb); 961 break; 962 } 963 /* Ensure all of our fields are correct */ 964 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 965 inccb->ccb_h.pinfo.priority); 966 xpt_merge_ccb(ccb, inccb); 967 ccb->ccb_h.cbfcnp = xptdone; 968 cam_periph_runccb(ccb, NULL, 0, 0, NULL); 969 bcopy(ccb, inccb, sizeof(union ccb)); 970 xpt_free_path(ccb->ccb_h.path); 971 xpt_free_ccb(ccb); 972 break; 973 974 case XPT_DEBUG: { 975 union ccb ccb; 976 977 /* 978 * This is an immediate CCB, so it's okay to 979 * allocate it on the stack. 980 */ 981 982 /* 983 * Create a path using the bus, target, and lun the 984 * user passed in. 985 */ 986 if (xpt_create_path(&ccb.ccb_h.path, xpt_periph, 987 inccb->ccb_h.path_id, 988 inccb->ccb_h.target_id, 989 inccb->ccb_h.target_lun) != 990 CAM_REQ_CMP){ 991 error = EINVAL; 992 break; 993 } 994 /* Ensure all of our fields are correct */ 995 xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path, 996 inccb->ccb_h.pinfo.priority); 997 xpt_merge_ccb(&ccb, inccb); 998 ccb.ccb_h.cbfcnp = xptdone; 999 xpt_action(&ccb); 1000 bcopy(&ccb, inccb, sizeof(union ccb)); 1001 xpt_free_path(ccb.ccb_h.path); 1002 break; 1003 1004 } 1005 case XPT_DEV_MATCH: { 1006 struct cam_periph_map_info mapinfo; 1007 struct cam_path *old_path; 1008 1009 /* 1010 * We can't deal with physical addresses for this 1011 * type of transaction. 1012 */ 1013 if (inccb->ccb_h.flags & CAM_DATA_PHYS) { 1014 error = EINVAL; 1015 break; 1016 } 1017 1018 /* 1019 * Save this in case the caller had it set to 1020 * something in particular. 1021 */ 1022 old_path = inccb->ccb_h.path; 1023 1024 /* 1025 * We really don't need a path for the matching 1026 * code. The path is needed because of the 1027 * debugging statements in xpt_action(). They 1028 * assume that the CCB has a valid path. 1029 */ 1030 inccb->ccb_h.path = xpt_periph->path; 1031 1032 bzero(&mapinfo, sizeof(mapinfo)); 1033 1034 /* 1035 * Map the pattern and match buffers into kernel 1036 * virtual address space. 1037 */ 1038 error = cam_periph_mapmem(inccb, &mapinfo); 1039 1040 if (error) { 1041 inccb->ccb_h.path = old_path; 1042 break; 1043 } 1044 1045 /* 1046 * This is an immediate CCB, we can send it on directly. 1047 */ 1048 xpt_action(inccb); 1049 1050 /* 1051 * Map the buffers back into user space. 1052 */ 1053 cam_periph_unmapmem(inccb, &mapinfo); 1054 1055 inccb->ccb_h.path = old_path; 1056 1057 error = 0; 1058 break; 1059 } 1060 default: 1061 error = EINVAL; 1062 break; 1063 } 1064 break; 1065 } 1066 /* 1067 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input, 1068 * with the periphal driver name and unit name filled in. The other 1069 * fields don't really matter as input. The passthrough driver name 1070 * ("pass"), and unit number are passed back in the ccb. The current 1071 * device generation number, and the index into the device peripheral 1072 * driver list, and the status are also passed back. Note that 1073 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb, 1074 * we never return a status of CAM_GDEVLIST_LIST_CHANGED. It is 1075 * (or rather should be) impossible for the device peripheral driver 1076 * list to change since we look at the whole thing in one pass, and 1077 * we do it with splcam protection. 1078 * 1079 */ 1080 case CAMGETPASSTHRU: { 1081 union ccb *ccb; 1082 struct cam_periph *periph; 1083 struct periph_driver **p_drv; 1084 char *name; 1085 int unit; 1086 int cur_generation; 1087 int base_periph_found; 1088 int splbreaknum; 1089 int s; 1090 1091 ccb = (union ccb *)addr; 1092 unit = ccb->cgdl.unit_number; 1093 name = ccb->cgdl.periph_name; 1094 /* 1095 * Every 100 devices, we want to drop our spl protection to 1096 * give the software interrupt handler a chance to run. 1097 * Most systems won't run into this check, but this should 1098 * avoid starvation in the software interrupt handler in 1099 * large systems. 1100 */ 1101 splbreaknum = 100; 1102 1103 ccb = (union ccb *)addr; 1104 1105 base_periph_found = 0; 1106 1107 /* 1108 * Sanity check -- make sure we don't get a null peripheral 1109 * driver name. 1110 */ 1111 if (*ccb->cgdl.periph_name == '\0') { 1112 error = EINVAL; 1113 break; 1114 } 1115 1116 /* Keep the list from changing while we traverse it */ 1117 s = splcam(); 1118ptstartover: 1119 cur_generation = xsoftc.generation; 1120 1121 /* first find our driver in the list of drivers */ 1122 for (p_drv = (struct periph_driver **)periphdriver_set.ls_items; 1123 *p_drv != NULL; p_drv++) 1124 if (strcmp((*p_drv)->driver_name, name) == 0) 1125 break; 1126 1127 if (*p_drv == NULL) { 1128 splx(s); 1129 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1130 ccb->cgdl.status = CAM_GDEVLIST_ERROR; 1131 *ccb->cgdl.periph_name = '\0'; 1132 ccb->cgdl.unit_number = 0; 1133 error = ENOENT; 1134 break; 1135 } 1136 1137 /* 1138 * Run through every peripheral instance of this driver 1139 * and check to see whether it matches the unit passed 1140 * in by the user. If it does, get out of the loops and 1141 * find the passthrough driver associated with that 1142 * peripheral driver. 1143 */ 1144 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL; 1145 periph = TAILQ_NEXT(periph, unit_links)) { 1146 1147 if (periph->unit_number == unit) { 1148 break; 1149 } else if (--splbreaknum == 0) { 1150 splx(s); 1151 s = splcam(); 1152 splbreaknum = 100; 1153 if (cur_generation != xsoftc.generation) 1154 goto ptstartover; 1155 } 1156 } 1157 /* 1158 * If we found the peripheral driver that the user passed 1159 * in, go through all of the peripheral drivers for that 1160 * particular device and look for a passthrough driver. 1161 */ 1162 if (periph != NULL) { 1163 struct cam_ed *device; 1164 int i; 1165 1166 base_periph_found = 1; 1167 device = periph->path->device; 1168 for (i = 0, periph = device->periphs.slh_first; 1169 periph != NULL; 1170 periph = periph->periph_links.sle_next, i++) { 1171 /* 1172 * Check to see whether we have a 1173 * passthrough device or not. 1174 */ 1175 if (strcmp(periph->periph_name, "pass") == 0) { 1176 /* 1177 * Fill in the getdevlist fields. 1178 */ 1179 strcpy(ccb->cgdl.periph_name, 1180 periph->periph_name); 1181 ccb->cgdl.unit_number = 1182 periph->unit_number; 1183 if (periph->periph_links.sle_next) 1184 ccb->cgdl.status = 1185 CAM_GDEVLIST_MORE_DEVS; 1186 else 1187 ccb->cgdl.status = 1188 CAM_GDEVLIST_LAST_DEVICE; 1189 ccb->cgdl.generation = 1190 device->generation; 1191 ccb->cgdl.index = i; 1192 /* 1193 * Fill in some CCB header fields 1194 * that the user may want. 1195 */ 1196 ccb->ccb_h.path_id = 1197 periph->path->bus->path_id; 1198 ccb->ccb_h.target_id = 1199 periph->path->target->target_id; 1200 ccb->ccb_h.target_lun = 1201 periph->path->device->lun_id; 1202 ccb->ccb_h.status = CAM_REQ_CMP; 1203 break; 1204 } 1205 } 1206 } 1207 1208 /* 1209 * If the periph is null here, one of two things has 1210 * happened. The first possibility is that we couldn't 1211 * find the unit number of the particular peripheral driver 1212 * that the user is asking about. e.g. the user asks for 1213 * the passthrough driver for "da11". We find the list of 1214 * "da" peripherals all right, but there is no unit 11. 1215 * The other possibility is that we went through the list 1216 * of peripheral drivers attached to the device structure, 1217 * but didn't find one with the name "pass". Either way, 1218 * we return ENOENT, since we couldn't find something. 1219 */ 1220 if (periph == NULL) { 1221 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1222 ccb->cgdl.status = CAM_GDEVLIST_ERROR; 1223 *ccb->cgdl.periph_name = '\0'; 1224 ccb->cgdl.unit_number = 0; 1225 error = ENOENT; 1226 /* 1227 * It is unfortunate that this is even necessary, 1228 * but there are many, many clueless users out there. 1229 * If this is true, the user is looking for the 1230 * passthrough driver, but doesn't have one in his 1231 * kernel. 1232 */ 1233 if (base_periph_found == 1) { 1234 printf("xptioctl: pass driver is not in the " 1235 "kernel\n"); 1236 printf("xptioctl: put \"device pass0\" in " 1237 "your kernel config file\n"); 1238 } 1239 } 1240 splx(s); 1241 break; 1242 } 1243 default: 1244 error = ENOTTY; 1245 break; 1246 } 1247 1248 return(error); 1249} 1250 1251/* Functions accessed by the peripheral drivers */ 1252static void 1253xpt_init(dummy) 1254 void *dummy; 1255{ 1256 struct cam_sim *xpt_sim; 1257 struct cam_path *path; 1258 struct cam_devq; 1259 cam_status status; 1260 1261 TAILQ_INIT(&xpt_busses); 1262 TAILQ_INIT(&cam_bioq); 1263 TAILQ_INIT(&cam_netq); 1264 SLIST_INIT(&ccb_freeq); 1265 STAILQ_INIT(&highpowerq); 1266 1267 /* 1268 * The xpt layer is, itself, the equivelent of a SIM. 1269 * Allow 16 ccbs in the ccb pool for it. This should 1270 * give decent parallelism when we probe busses and 1271 * perform other XPT functions. 1272 */ 1273 xpt_sim = (struct cam_sim *)malloc(sizeof(*xpt_sim), 1274 M_DEVBUF, M_WAITOK); 1275 xpt_sim->sim_action = xptaction; 1276 xpt_sim->sim_name = "xpt"; 1277 xpt_sim->path_id = CAM_XPT_PATH_ID; 1278 xpt_sim->bus_id = 0; 1279 xpt_sim->max_tagged_dev_openings = 0; 1280 xpt_sim->max_dev_openings = 0; 1281 xpt_sim->devq = cam_simq_alloc(16); 1282 xpt_max_ccbs = 16; 1283 1284 xpt_bus_register(xpt_sim, 0); 1285 1286 /* 1287 * Looking at the XPT from the SIM layer, the XPT is 1288 * the equivelent of a peripheral driver. Allocate 1289 * a peripheral driver entry for us. 1290 */ 1291 if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID, 1292 CAM_TARGET_WILDCARD, 1293 CAM_LUN_WILDCARD)) != CAM_REQ_CMP) { 1294 printf("xpt_init: xpt_create_path failed with status %#x," 1295 " failing attach\n", status); 1296 return; 1297 } 1298 1299 cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO, 1300 path, NULL, 0, NULL); 1301 xpt_free_path(path); 1302 1303 xpt_sim->softc = xpt_periph; 1304 1305 /* 1306 * Register a callback for when interrupts are enabled. 1307 */ 1308 xpt_config_hook = 1309 (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook), 1310 M_TEMP, M_NOWAIT); 1311 if (xpt_config_hook == NULL) { 1312 printf("xpt_init: Cannot malloc config hook " 1313 "- failing attach\n"); 1314 return; 1315 } 1316 bzero(xpt_config_hook, sizeof(*xpt_config_hook)); 1317 1318 xpt_config_hook->ich_func = xpt_config; 1319 if (config_intrhook_establish(xpt_config_hook) != 0) { 1320 free (xpt_config_hook, M_TEMP); 1321 printf("xpt_init: config_intrhook_establish failed " 1322 "- failing attach\n"); 1323 } 1324 1325 /* Install our software interrupt handlers */ 1326 register_swi(SWI_CAMNET, swi_camnet); 1327 register_swi(SWI_CAMBIO, swi_cambio); 1328} 1329 1330static cam_status 1331xptregister(struct cam_periph *periph, void *arg) 1332{ 1333 if (periph == NULL) { 1334 printf("xptregister: periph was NULL!!\n"); 1335 return(CAM_REQ_CMP_ERR); 1336 } 1337 1338 periph->softc = NULL; 1339 1340 xpt_periph = periph; 1341 1342 return(CAM_REQ_CMP); 1343} 1344 1345int32_t 1346xpt_add_periph(struct cam_periph *periph) 1347{ 1348 struct cam_ed *device; 1349 int32_t status; 1350 struct periph_list *periph_head; 1351 1352 device = periph->path->device; 1353 1354 periph_head = &device->periphs; 1355 1356 status = CAM_REQ_CMP; 1357 1358 if (device != NULL) { 1359 int s; 1360 1361 /* 1362 * Make room for this peripheral 1363 * so it will fit in the queue 1364 * when it's scheduled to run 1365 */ 1366 s = splsoftcam(); 1367 status = camq_resize(&device->drvq, 1368 device->drvq.array_size + 1); 1369 1370 device->generation++; 1371 1372 SLIST_INSERT_HEAD(periph_head, periph, periph_links); 1373 1374 splx(s); 1375 } 1376 1377 xsoftc.generation++; 1378 1379 return (status); 1380} 1381 1382void 1383xpt_remove_periph(struct cam_periph *periph) 1384{ 1385 struct cam_ed *device; 1386 1387 device = periph->path->device; 1388 1389 if (device != NULL) { 1390 int s; 1391 struct periph_list *periph_head; 1392 1393 periph_head = &device->periphs; 1394 1395 /* Release the slot for this peripheral */ 1396 s = splsoftcam(); 1397 camq_resize(&device->drvq, device->drvq.array_size - 1); 1398 1399 device->generation++; 1400 1401 SLIST_REMOVE(periph_head, periph, cam_periph, periph_links); 1402 1403 splx(s); 1404 } 1405 1406 xsoftc.generation++; 1407 1408} 1409 1410void 1411xpt_announce_periph(struct cam_periph *periph, char *announce_string) 1412{ 1413 int s; 1414 u_int mb; 1415 struct cam_path *path; 1416 struct ccb_trans_settings cts; 1417 1418 path = periph->path; 1419 /* 1420 * To ensure that this is printed in one piece, 1421 * mask out CAM interrupts. 1422 */ 1423 s = splsoftcam(); 1424 printf("%s%d at %s%d bus %d target %d lun %d\n", 1425 periph->periph_name, periph->unit_number, 1426 path->bus->sim->sim_name, 1427 path->bus->sim->unit_number, 1428 path->bus->sim->bus_id, 1429 path->target->target_id, 1430 path->device->lun_id); 1431 printf("%s%d: ", periph->periph_name, periph->unit_number); 1432 scsi_print_inquiry(&path->device->inq_data); 1433 if ((bootverbose) 1434 && (path->device->serial_num_len > 0)) { 1435 /* Don't wrap the screen - print only the first 60 chars */ 1436 printf("%s%d: Serial Number %.60s\n", periph->periph_name, 1437 periph->unit_number, path->device->serial_num); 1438 } 1439 xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1); 1440 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; 1441 cts.flags = CCB_TRANS_CURRENT_SETTINGS; 1442 xpt_action((union ccb*)&cts); 1443 if (cts.ccb_h.status == CAM_REQ_CMP) { 1444 u_int speed; 1445 u_int freq; 1446 1447 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0 1448 && cts.sync_offset != 0) { 1449 freq = scsi_calc_syncsrate(cts.sync_period); 1450 speed = freq; 1451 } else { 1452 freq = 0; 1453 speed = path->bus->sim->base_transfer_speed; 1454 } 1455 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) 1456 speed *= (0x01 << cts.bus_width); 1457 mb = speed / 1000; 1458 if (mb > 0) 1459 printf("%s%d: %d.%03dMB/s transfers", 1460 periph->periph_name, periph->unit_number, 1461 mb, speed % 1000); 1462 else 1463 printf("%s%d: %dKB/s transfers", periph->periph_name, 1464 periph->unit_number, (speed % 1000) * 1000); 1465 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0 1466 && cts.sync_offset != 0) { 1467 printf(" (%d.%03dMHz, offset %d", freq / 1000, 1468 freq % 1000, cts.sync_offset); 1469 } 1470 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0 1471 && cts.bus_width > 0) { 1472 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0 1473 && cts.sync_offset != 0) { 1474 printf(", "); 1475 } else { 1476 printf(" ("); 1477 } 1478 printf("%dbit)", 8 * (0x01 << cts.bus_width)); 1479 } else if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0 1480 && cts.sync_offset != 0) { 1481 printf(")"); 1482 } 1483 1484 if (path->device->inq_flags & SID_CmdQue 1485 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) { 1486 printf(", Tagged Queueing Enabled"); 1487 } 1488 1489 printf("\n"); 1490 } else if (path->device->inq_flags & SID_CmdQue 1491 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) { 1492 printf("%s%d: Tagged Queueing Enabled\n", 1493 periph->periph_name, periph->unit_number); 1494 } 1495 1496 /* 1497 * We only want to print the caller's announce string if they've 1498 * passed one in.. 1499 */ 1500 if (announce_string != NULL) 1501 printf("%s%d: %s\n", periph->periph_name, 1502 periph->unit_number, announce_string); 1503 splx(s); 1504} 1505 1506 1507static dev_match_ret 1508xptbusmatch(struct dev_match_pattern *patterns, int num_patterns, 1509 struct cam_eb *bus) 1510{ 1511 dev_match_ret retval; 1512 int i; 1513 1514 retval = DM_RET_NONE; 1515 1516 /* 1517 * If we aren't given something to match against, that's an error. 1518 */ 1519 if (bus == NULL) 1520 return(DM_RET_ERROR); 1521 1522 /* 1523 * If there are no match entries, then this bus matches no 1524 * matter what. 1525 */ 1526 if ((patterns == NULL) || (num_patterns == 0)) 1527 return(DM_RET_DESCEND | DM_RET_COPY); 1528 1529 for (i = 0; i < num_patterns; i++) { 1530 struct bus_match_pattern *cur_pattern; 1531 1532 /* 1533 * If the pattern in question isn't for a bus node, we 1534 * aren't interested. However, we do indicate to the 1535 * calling routine that we should continue descending the 1536 * tree, since the user wants to match against lower-level 1537 * EDT elements. 1538 */ 1539 if (patterns[i].type != DEV_MATCH_BUS) { 1540 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1541 retval |= DM_RET_DESCEND; 1542 continue; 1543 } 1544 1545 cur_pattern = &patterns[i].pattern.bus_pattern; 1546 1547 /* 1548 * If they want to match any bus node, we give them any 1549 * device node. 1550 */ 1551 if (cur_pattern->flags == BUS_MATCH_ANY) { 1552 /* set the copy flag */ 1553 retval |= DM_RET_COPY; 1554 1555 /* 1556 * If we've already decided on an action, go ahead 1557 * and return. 1558 */ 1559 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE) 1560 return(retval); 1561 } 1562 1563 /* 1564 * Not sure why someone would do this... 1565 */ 1566 if (cur_pattern->flags == BUS_MATCH_NONE) 1567 continue; 1568 1569 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0) 1570 && (cur_pattern->path_id != bus->path_id)) 1571 continue; 1572 1573 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0) 1574 && (cur_pattern->bus_id != bus->sim->bus_id)) 1575 continue; 1576 1577 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0) 1578 && (cur_pattern->unit_number != bus->sim->unit_number)) 1579 continue; 1580 1581 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0) 1582 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name, 1583 DEV_IDLEN) != 0)) 1584 continue; 1585 1586 /* 1587 * If we get to this point, the user definitely wants 1588 * information on this bus. So tell the caller to copy the 1589 * data out. 1590 */ 1591 retval |= DM_RET_COPY; 1592 1593 /* 1594 * If the return action has been set to descend, then we 1595 * know that we've already seen a non-bus matching 1596 * expression, therefore we need to further descend the tree. 1597 * This won't change by continuing around the loop, so we 1598 * go ahead and return. If we haven't seen a non-bus 1599 * matching expression, we keep going around the loop until 1600 * we exhaust the matching expressions. We'll set the stop 1601 * flag once we fall out of the loop. 1602 */ 1603 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND) 1604 return(retval); 1605 } 1606 1607 /* 1608 * If the return action hasn't been set to descend yet, that means 1609 * we haven't seen anything other than bus matching patterns. So 1610 * tell the caller to stop descending the tree -- the user doesn't 1611 * want to match against lower level tree elements. 1612 */ 1613 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1614 retval |= DM_RET_STOP; 1615 1616 return(retval); 1617} 1618 1619static dev_match_ret 1620xptdevicematch(struct dev_match_pattern *patterns, int num_patterns, 1621 struct cam_ed *device) 1622{ 1623 dev_match_ret retval; 1624 int i; 1625 1626 retval = DM_RET_NONE; 1627 1628 /* 1629 * If we aren't given something to match against, that's an error. 1630 */ 1631 if (device == NULL) 1632 return(DM_RET_ERROR); 1633 1634 /* 1635 * If there are no match entries, then this device matches no 1636 * matter what. 1637 */ 1638 if ((patterns == NULL) || (patterns == 0)) 1639 return(DM_RET_DESCEND | DM_RET_COPY); 1640 1641 for (i = 0; i < num_patterns; i++) { 1642 struct device_match_pattern *cur_pattern; 1643 1644 /* 1645 * If the pattern in question isn't for a device node, we 1646 * aren't interested. 1647 */ 1648 if (patterns[i].type != DEV_MATCH_DEVICE) { 1649 if ((patterns[i].type == DEV_MATCH_PERIPH) 1650 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)) 1651 retval |= DM_RET_DESCEND; 1652 continue; 1653 } 1654 1655 cur_pattern = &patterns[i].pattern.device_pattern; 1656 1657 /* 1658 * If they want to match any device node, we give them any 1659 * device node. 1660 */ 1661 if (cur_pattern->flags == DEV_MATCH_ANY) { 1662 /* set the copy flag */ 1663 retval |= DM_RET_COPY; 1664 1665 1666 /* 1667 * If we've already decided on an action, go ahead 1668 * and return. 1669 */ 1670 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE) 1671 return(retval); 1672 } 1673 1674 /* 1675 * Not sure why someone would do this... 1676 */ 1677 if (cur_pattern->flags == DEV_MATCH_NONE) 1678 continue; 1679 1680 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0) 1681 && (cur_pattern->path_id != device->target->bus->path_id)) 1682 continue; 1683 1684 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0) 1685 && (cur_pattern->target_id != device->target->target_id)) 1686 continue; 1687 1688 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0) 1689 && (cur_pattern->target_lun != device->lun_id)) 1690 continue; 1691 1692 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0) 1693 && (cam_quirkmatch((caddr_t)&device->inq_data, 1694 (caddr_t)&cur_pattern->inq_pat, 1695 1, sizeof(cur_pattern->inq_pat), 1696 scsi_static_inquiry_match) == NULL)) 1697 continue; 1698 1699 /* 1700 * If we get to this point, the user definitely wants 1701 * information on this device. So tell the caller to copy 1702 * the data out. 1703 */ 1704 retval |= DM_RET_COPY; 1705 1706 /* 1707 * If the return action has been set to descend, then we 1708 * know that we've already seen a peripheral matching 1709 * expression, therefore we need to further descend the tree. 1710 * This won't change by continuing around the loop, so we 1711 * go ahead and return. If we haven't seen a peripheral 1712 * matching expression, we keep going around the loop until 1713 * we exhaust the matching expressions. We'll set the stop 1714 * flag once we fall out of the loop. 1715 */ 1716 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND) 1717 return(retval); 1718 } 1719 1720 /* 1721 * If the return action hasn't been set to descend yet, that means 1722 * we haven't seen any peripheral matching patterns. So tell the 1723 * caller to stop descending the tree -- the user doesn't want to 1724 * match against lower level tree elements. 1725 */ 1726 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1727 retval |= DM_RET_STOP; 1728 1729 return(retval); 1730} 1731 1732/* 1733 * Match a single peripheral against any number of match patterns. 1734 */ 1735static dev_match_ret 1736xptperiphmatch(struct dev_match_pattern *patterns, int num_patterns, 1737 struct cam_periph *periph) 1738{ 1739 dev_match_ret retval; 1740 int i; 1741 1742 /* 1743 * If we aren't given something to match against, that's an error. 1744 */ 1745 if (periph == NULL) 1746 return(DM_RET_ERROR); 1747 1748 /* 1749 * If there are no match entries, then this peripheral matches no 1750 * matter what. 1751 */ 1752 if ((patterns == NULL) || (num_patterns == 0)) 1753 return(DM_RET_STOP | DM_RET_COPY); 1754 1755 /* 1756 * There aren't any nodes below a peripheral node, so there's no 1757 * reason to descend the tree any further. 1758 */ 1759 retval = DM_RET_STOP; 1760 1761 for (i = 0; i < num_patterns; i++) { 1762 struct periph_match_pattern *cur_pattern; 1763 1764 /* 1765 * If the pattern in question isn't for a peripheral, we 1766 * aren't interested. 1767 */ 1768 if (patterns[i].type != DEV_MATCH_PERIPH) 1769 continue; 1770 1771 cur_pattern = &patterns[i].pattern.periph_pattern; 1772 1773 /* 1774 * If they want to match on anything, then we will do so. 1775 */ 1776 if (cur_pattern->flags == PERIPH_MATCH_ANY) { 1777 /* set the copy flag */ 1778 retval |= DM_RET_COPY; 1779 1780 /* 1781 * We've already set the return action to stop, 1782 * since there are no nodes below peripherals in 1783 * the tree. 1784 */ 1785 return(retval); 1786 } 1787 1788 /* 1789 * Not sure why someone would do this... 1790 */ 1791 if (cur_pattern->flags == PERIPH_MATCH_NONE) 1792 continue; 1793 1794 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0) 1795 && (cur_pattern->path_id != periph->path->bus->path_id)) 1796 continue; 1797 1798 /* 1799 * For the target and lun id's, we have to make sure the 1800 * target and lun pointers aren't NULL. The xpt peripheral 1801 * has a wildcard target and device. 1802 */ 1803 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0) 1804 && ((periph->path->target == NULL) 1805 ||(cur_pattern->target_id != periph->path->target->target_id))) 1806 continue; 1807 1808 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0) 1809 && ((periph->path->device == NULL) 1810 || (cur_pattern->target_lun != periph->path->device->lun_id))) 1811 continue; 1812 1813 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0) 1814 && (cur_pattern->unit_number != periph->unit_number)) 1815 continue; 1816 1817 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0) 1818 && (strncmp(cur_pattern->periph_name, periph->periph_name, 1819 DEV_IDLEN) != 0)) 1820 continue; 1821 1822 /* 1823 * If we get to this point, the user definitely wants 1824 * information on this peripheral. So tell the caller to 1825 * copy the data out. 1826 */ 1827 retval |= DM_RET_COPY; 1828 1829 /* 1830 * The return action has already been set to stop, since 1831 * peripherals don't have any nodes below them in the EDT. 1832 */ 1833 return(retval); 1834 } 1835 1836 /* 1837 * If we get to this point, the peripheral that was passed in 1838 * doesn't match any of the patterns. 1839 */ 1840 return(retval); 1841} 1842 1843static int 1844xptedtbusfunc(struct cam_eb *bus, void *arg) 1845{ 1846 struct ccb_dev_match *cdm; 1847 dev_match_ret retval; 1848 1849 cdm = (struct ccb_dev_match *)arg; 1850 1851 /* 1852 * If our position is for something deeper in the tree, that means 1853 * that we've already seen this node. So, we keep going down. 1854 */ 1855 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1856 && (cdm->pos.cookie.bus == bus) 1857 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1858 && (cdm->pos.cookie.target != NULL)) 1859 retval = DM_RET_DESCEND; 1860 else 1861 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus); 1862 1863 /* 1864 * If we got an error, bail out of the search. 1865 */ 1866 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1867 cdm->status = CAM_DEV_MATCH_ERROR; 1868 return(0); 1869 } 1870 1871 /* 1872 * If the copy flag is set, copy this bus out. 1873 */ 1874 if (retval & DM_RET_COPY) { 1875 int spaceleft, j; 1876 1877 spaceleft = cdm->match_buf_len - (cdm->num_matches * 1878 sizeof(struct dev_match_result)); 1879 1880 /* 1881 * If we don't have enough space to put in another 1882 * match result, save our position and tell the 1883 * user there are more devices to check. 1884 */ 1885 if (spaceleft < sizeof(struct dev_match_result)) { 1886 bzero(&cdm->pos, sizeof(cdm->pos)); 1887 cdm->pos.position_type = 1888 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS; 1889 1890 cdm->pos.cookie.bus = bus; 1891 cdm->pos.generations[CAM_BUS_GENERATION]= 1892 bus_generation; 1893 cdm->status = CAM_DEV_MATCH_MORE; 1894 return(0); 1895 } 1896 j = cdm->num_matches; 1897 cdm->num_matches++; 1898 cdm->matches[j].type = DEV_MATCH_BUS; 1899 cdm->matches[j].result.bus_result.path_id = bus->path_id; 1900 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id; 1901 cdm->matches[j].result.bus_result.unit_number = 1902 bus->sim->unit_number; 1903 strncpy(cdm->matches[j].result.bus_result.dev_name, 1904 bus->sim->sim_name, DEV_IDLEN); 1905 } 1906 1907 /* 1908 * If the user is only interested in busses, there's no 1909 * reason to descend to the next level in the tree. 1910 */ 1911 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP) 1912 return(1); 1913 1914 /* 1915 * If there is a target generation recorded, check it to 1916 * make sure the target list hasn't changed. 1917 */ 1918 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1919 && (bus == cdm->pos.cookie.bus) 1920 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1921 && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0) 1922 && (cdm->pos.generations[CAM_TARGET_GENERATION] != 1923 bus->generation)) { 1924 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1925 return(0); 1926 } 1927 1928 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1929 && (cdm->pos.cookie.bus == bus) 1930 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1931 && (cdm->pos.cookie.target != NULL)) 1932 return(xpttargettraverse(bus, 1933 (struct cam_et *)cdm->pos.cookie.target, 1934 xptedttargetfunc, arg)); 1935 else 1936 return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg)); 1937} 1938 1939static int 1940xptedttargetfunc(struct cam_et *target, void *arg) 1941{ 1942 struct ccb_dev_match *cdm; 1943 1944 cdm = (struct ccb_dev_match *)arg; 1945 1946 /* 1947 * If there is a device list generation recorded, check it to 1948 * make sure the device list hasn't changed. 1949 */ 1950 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1951 && (cdm->pos.cookie.bus == target->bus) 1952 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1953 && (cdm->pos.cookie.target == target) 1954 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1955 && (cdm->pos.generations[CAM_DEV_GENERATION] != 0) 1956 && (cdm->pos.generations[CAM_DEV_GENERATION] != 1957 target->generation)) { 1958 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1959 return(0); 1960 } 1961 1962 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1963 && (cdm->pos.cookie.bus == target->bus) 1964 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1965 && (cdm->pos.cookie.target == target) 1966 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1967 && (cdm->pos.cookie.device != NULL)) 1968 return(xptdevicetraverse(target, 1969 (struct cam_ed *)cdm->pos.cookie.device, 1970 xptedtdevicefunc, arg)); 1971 else 1972 return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg)); 1973} 1974 1975static int 1976xptedtdevicefunc(struct cam_ed *device, void *arg) 1977{ 1978 1979 struct ccb_dev_match *cdm; 1980 dev_match_ret retval; 1981 1982 cdm = (struct ccb_dev_match *)arg; 1983 1984 /* 1985 * If our position is for something deeper in the tree, that means 1986 * that we've already seen this node. So, we keep going down. 1987 */ 1988 if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1989 && (cdm->pos.cookie.device == device) 1990 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 1991 && (cdm->pos.cookie.periph != NULL)) 1992 retval = DM_RET_DESCEND; 1993 else 1994 retval = xptdevicematch(cdm->patterns, cdm->num_patterns, 1995 device); 1996 1997 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1998 cdm->status = CAM_DEV_MATCH_ERROR; 1999 return(0); 2000 } 2001 2002 /* 2003 * If the copy flag is set, copy this device out. 2004 */ 2005 if (retval & DM_RET_COPY) { 2006 int spaceleft, j; 2007 2008 spaceleft = cdm->match_buf_len - (cdm->num_matches * 2009 sizeof(struct dev_match_result)); 2010 2011 /* 2012 * If we don't have enough space to put in another 2013 * match result, save our position and tell the 2014 * user there are more devices to check. 2015 */ 2016 if (spaceleft < sizeof(struct dev_match_result)) { 2017 bzero(&cdm->pos, sizeof(cdm->pos)); 2018 cdm->pos.position_type = 2019 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS | 2020 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE; 2021 2022 cdm->pos.cookie.bus = device->target->bus; 2023 cdm->pos.generations[CAM_BUS_GENERATION]= 2024 bus_generation; 2025 cdm->pos.cookie.target = device->target; 2026 cdm->pos.generations[CAM_TARGET_GENERATION] = 2027 device->target->bus->generation; 2028 cdm->pos.cookie.device = device; 2029 cdm->pos.generations[CAM_DEV_GENERATION] = 2030 device->target->generation; 2031 cdm->status = CAM_DEV_MATCH_MORE; 2032 return(0); 2033 } 2034 j = cdm->num_matches; 2035 cdm->num_matches++; 2036 cdm->matches[j].type = DEV_MATCH_DEVICE; 2037 cdm->matches[j].result.device_result.path_id = 2038 device->target->bus->path_id; 2039 cdm->matches[j].result.device_result.target_id = 2040 device->target->target_id; 2041 cdm->matches[j].result.device_result.target_lun = 2042 device->lun_id; 2043 bcopy(&device->inq_data, 2044 &cdm->matches[j].result.device_result.inq_data, 2045 sizeof(struct scsi_inquiry_data)); 2046 } 2047 2048 /* 2049 * If the user isn't interested in peripherals, don't descend 2050 * the tree any further. 2051 */ 2052 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP) 2053 return(1); 2054 2055 /* 2056 * If there is a peripheral list generation recorded, make sure 2057 * it hasn't changed. 2058 */ 2059 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 2060 && (device->target->bus == cdm->pos.cookie.bus) 2061 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 2062 && (device->target == cdm->pos.cookie.target) 2063 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 2064 && (device == cdm->pos.cookie.device) 2065 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 2066 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0) 2067 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 2068 device->generation)){ 2069 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 2070 return(0); 2071 } 2072 2073 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 2074 && (cdm->pos.cookie.bus == device->target->bus) 2075 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 2076 && (cdm->pos.cookie.target == device->target) 2077 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 2078 && (cdm->pos.cookie.device == device) 2079 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 2080 && (cdm->pos.cookie.periph != NULL)) 2081 return(xptperiphtraverse(device, 2082 (struct cam_periph *)cdm->pos.cookie.periph, 2083 xptedtperiphfunc, arg)); 2084 else 2085 return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg)); 2086} 2087 2088static int 2089xptedtperiphfunc(struct cam_periph *periph, void *arg) 2090{ 2091 struct ccb_dev_match *cdm; 2092 dev_match_ret retval; 2093 2094 cdm = (struct ccb_dev_match *)arg; 2095 2096 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph); 2097 2098 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 2099 cdm->status = CAM_DEV_MATCH_ERROR; 2100 return(0); 2101 } 2102 2103 /* 2104 * If the copy flag is set, copy this peripheral out. 2105 */ 2106 if (retval & DM_RET_COPY) { 2107 int spaceleft, j; 2108 2109 spaceleft = cdm->match_buf_len - (cdm->num_matches * 2110 sizeof(struct dev_match_result)); 2111 2112 /* 2113 * If we don't have enough space to put in another 2114 * match result, save our position and tell the 2115 * user there are more devices to check. 2116 */ 2117 if (spaceleft < sizeof(struct dev_match_result)) { 2118 bzero(&cdm->pos, sizeof(cdm->pos)); 2119 cdm->pos.position_type = 2120 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS | 2121 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE | 2122 CAM_DEV_POS_PERIPH; 2123 2124 cdm->pos.cookie.bus = periph->path->bus; 2125 cdm->pos.generations[CAM_BUS_GENERATION]= 2126 bus_generation; 2127 cdm->pos.cookie.target = periph->path->target; 2128 cdm->pos.generations[CAM_TARGET_GENERATION] = 2129 periph->path->bus->generation; 2130 cdm->pos.cookie.device = periph->path->device; 2131 cdm->pos.generations[CAM_DEV_GENERATION] = 2132 periph->path->target->generation; 2133 cdm->pos.cookie.periph = periph; 2134 cdm->pos.generations[CAM_PERIPH_GENERATION] = 2135 periph->path->device->generation; 2136 cdm->status = CAM_DEV_MATCH_MORE; 2137 return(0); 2138 } 2139 2140 j = cdm->num_matches; 2141 cdm->num_matches++; 2142 cdm->matches[j].type = DEV_MATCH_PERIPH; 2143 cdm->matches[j].result.periph_result.path_id = 2144 periph->path->bus->path_id; 2145 cdm->matches[j].result.periph_result.target_id = 2146 periph->path->target->target_id; 2147 cdm->matches[j].result.periph_result.target_lun = 2148 periph->path->device->lun_id; 2149 cdm->matches[j].result.periph_result.unit_number = 2150 periph->unit_number; 2151 strncpy(cdm->matches[j].result.periph_result.periph_name, 2152 periph->periph_name, DEV_IDLEN); 2153 } 2154 2155 return(1); 2156} 2157 2158static int 2159xptedtmatch(struct ccb_dev_match *cdm) 2160{ 2161 int ret; 2162 2163 cdm->num_matches = 0; 2164 2165 /* 2166 * Check the bus list generation. If it has changed, the user 2167 * needs to reset everything and start over. 2168 */ 2169 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 2170 && (cdm->pos.generations[CAM_BUS_GENERATION] != 0) 2171 && (cdm->pos.generations[CAM_BUS_GENERATION] != bus_generation)) { 2172 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 2173 return(0); 2174 } 2175 2176 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 2177 && (cdm->pos.cookie.bus != NULL)) 2178 ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus, 2179 xptedtbusfunc, cdm); 2180 else 2181 ret = xptbustraverse(NULL, xptedtbusfunc, cdm); 2182 2183 /* 2184 * If we get back 0, that means that we had to stop before fully 2185 * traversing the EDT. It also means that one of the subroutines 2186 * has set the status field to the proper value. If we get back 1, 2187 * we've fully traversed the EDT and copied out any matching entries. 2188 */ 2189 if (ret == 1) 2190 cdm->status = CAM_DEV_MATCH_LAST; 2191 2192 return(ret); 2193} 2194 2195static int 2196xptplistpdrvfunc(struct periph_driver **pdrv, void *arg) 2197{ 2198 struct ccb_dev_match *cdm; 2199 2200 cdm = (struct ccb_dev_match *)arg; 2201 2202 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) 2203 && (cdm->pos.cookie.pdrv == pdrv) 2204 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 2205 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0) 2206 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 2207 (*pdrv)->generation)) { 2208 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 2209 return(0); 2210 } 2211 2212 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) 2213 && (cdm->pos.cookie.pdrv == pdrv) 2214 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 2215 && (cdm->pos.cookie.periph != NULL)) 2216 return(xptpdperiphtraverse(pdrv, 2217 (struct cam_periph *)cdm->pos.cookie.periph, 2218 xptplistperiphfunc, arg)); 2219 else 2220 return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg)); 2221} 2222 2223static int 2224xptplistperiphfunc(struct cam_periph *periph, void *arg) 2225{ 2226 struct ccb_dev_match *cdm; 2227 dev_match_ret retval; 2228 2229 cdm = (struct ccb_dev_match *)arg; 2230 2231 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph); 2232 2233 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 2234 cdm->status = CAM_DEV_MATCH_ERROR; 2235 return(0); 2236 } 2237 2238 /* 2239 * If the copy flag is set, copy this peripheral out. 2240 */ 2241 if (retval & DM_RET_COPY) { 2242 int spaceleft, j; 2243 2244 spaceleft = cdm->match_buf_len - (cdm->num_matches * 2245 sizeof(struct dev_match_result)); 2246 2247 /* 2248 * If we don't have enough space to put in another 2249 * match result, save our position and tell the 2250 * user there are more devices to check. 2251 */ 2252 if (spaceleft < sizeof(struct dev_match_result)) { 2253 struct periph_driver **pdrv; 2254 2255 pdrv = NULL; 2256 bzero(&cdm->pos, sizeof(cdm->pos)); 2257 cdm->pos.position_type = 2258 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR | 2259 CAM_DEV_POS_PERIPH; 2260 2261 /* 2262 * This may look a bit non-sensical, but it is 2263 * actually quite logical. There are very few 2264 * peripheral drivers, and bloating every peripheral 2265 * structure with a pointer back to its parent 2266 * peripheral driver linker set entry would cost 2267 * more in the long run than doing this quick lookup. 2268 */ 2269 for (pdrv = 2270 (struct periph_driver **)periphdriver_set.ls_items; 2271 *pdrv != NULL; pdrv++) { 2272 if (strcmp((*pdrv)->driver_name, 2273 periph->periph_name) == 0) 2274 break; 2275 } 2276 2277 if (pdrv == NULL) { 2278 cdm->status = CAM_DEV_MATCH_ERROR; 2279 return(0); 2280 } 2281 2282 cdm->pos.cookie.pdrv = pdrv; 2283 /* 2284 * The periph generation slot does double duty, as 2285 * does the periph pointer slot. They are used for 2286 * both edt and pdrv lookups and positioning. 2287 */ 2288 cdm->pos.cookie.periph = periph; 2289 cdm->pos.generations[CAM_PERIPH_GENERATION] = 2290 (*pdrv)->generation; 2291 cdm->status = CAM_DEV_MATCH_MORE; 2292 return(0); 2293 } 2294 2295 j = cdm->num_matches; 2296 cdm->num_matches++; 2297 cdm->matches[j].type = DEV_MATCH_PERIPH; 2298 cdm->matches[j].result.periph_result.path_id = 2299 periph->path->bus->path_id; 2300 2301 /* 2302 * The transport layer peripheral doesn't have a target or 2303 * lun. 2304 */ 2305 if (periph->path->target) 2306 cdm->matches[j].result.periph_result.target_id = 2307 periph->path->target->target_id; 2308 else 2309 cdm->matches[j].result.periph_result.target_id = -1; 2310 2311 if (periph->path->device) 2312 cdm->matches[j].result.periph_result.target_lun = 2313 periph->path->device->lun_id; 2314 else 2315 cdm->matches[j].result.periph_result.target_lun = -1; 2316 2317 cdm->matches[j].result.periph_result.unit_number = 2318 periph->unit_number; 2319 strncpy(cdm->matches[j].result.periph_result.periph_name, 2320 periph->periph_name, DEV_IDLEN); 2321 } 2322 2323 return(1); 2324} 2325 2326static int 2327xptperiphlistmatch(struct ccb_dev_match *cdm) 2328{ 2329 int ret; 2330 2331 cdm->num_matches = 0; 2332 2333 /* 2334 * At this point in the edt traversal function, we check the bus 2335 * list generation to make sure that no busses have been added or 2336 * removed since the user last sent a XPT_DEV_MATCH ccb through. 2337 * For the peripheral driver list traversal function, however, we 2338 * don't have to worry about new peripheral driver types coming or 2339 * going; they're in a linker set, and therefore can't change 2340 * without a recompile. 2341 */ 2342 2343 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) 2344 && (cdm->pos.cookie.pdrv != NULL)) 2345 ret = xptpdrvtraverse( 2346 (struct periph_driver **)cdm->pos.cookie.pdrv, 2347 xptplistpdrvfunc, cdm); 2348 else 2349 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm); 2350 2351 /* 2352 * If we get back 0, that means that we had to stop before fully 2353 * traversing the peripheral driver tree. It also means that one of 2354 * the subroutines has set the status field to the proper value. If 2355 * we get back 1, we've fully traversed the EDT and copied out any 2356 * matching entries. 2357 */ 2358 if (ret == 1) 2359 cdm->status = CAM_DEV_MATCH_LAST; 2360 2361 return(ret); 2362} 2363 2364static int 2365xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg) 2366{ 2367 struct cam_eb *bus, *next_bus; 2368 int retval; 2369 2370 retval = 1; 2371 2372 for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xpt_busses)); 2373 bus != NULL; 2374 bus = next_bus) { 2375 next_bus = TAILQ_NEXT(bus, links); 2376 2377 retval = tr_func(bus, arg); 2378 if (retval == 0) 2379 return(retval); 2380 } 2381 2382 return(retval); 2383} 2384 2385static int 2386xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target, 2387 xpt_targetfunc_t *tr_func, void *arg) 2388{ 2389 struct cam_et *target, *next_target; 2390 int retval; 2391 2392 retval = 1; 2393 for (target = (start_target ? start_target : 2394 TAILQ_FIRST(&bus->et_entries)); 2395 target != NULL; target = next_target) { 2396 2397 next_target = TAILQ_NEXT(target, links); 2398 2399 retval = tr_func(target, arg); 2400 2401 if (retval == 0) 2402 return(retval); 2403 } 2404 2405 return(retval); 2406} 2407 2408static int 2409xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device, 2410 xpt_devicefunc_t *tr_func, void *arg) 2411{ 2412 struct cam_ed *device, *next_device; 2413 int retval; 2414 2415 retval = 1; 2416 for (device = (start_device ? start_device : 2417 TAILQ_FIRST(&target->ed_entries)); 2418 device != NULL; 2419 device = next_device) { 2420 2421 next_device = TAILQ_NEXT(device, links); 2422 2423 retval = tr_func(device, arg); 2424 2425 if (retval == 0) 2426 return(retval); 2427 } 2428 2429 return(retval); 2430} 2431 2432static int 2433xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph, 2434 xpt_periphfunc_t *tr_func, void *arg) 2435{ 2436 struct cam_periph *periph, *next_periph; 2437 int retval; 2438 2439 retval = 1; 2440 2441 for (periph = (start_periph ? start_periph : 2442 SLIST_FIRST(&device->periphs)); 2443 periph != NULL; 2444 periph = next_periph) { 2445 2446 next_periph = SLIST_NEXT(periph, periph_links); 2447 2448 retval = tr_func(periph, arg); 2449 if (retval == 0) 2450 return(retval); 2451 } 2452 2453 return(retval); 2454} 2455 2456static int 2457xptpdrvtraverse(struct periph_driver **start_pdrv, 2458 xpt_pdrvfunc_t *tr_func, void *arg) 2459{ 2460 struct periph_driver **pdrv; 2461 int retval; 2462 2463 retval = 1; 2464 2465 /* 2466 * We don't traverse the peripheral driver list like we do the 2467 * other lists, because it is a linker set, and therefore cannot be 2468 * changed during runtime. If the peripheral driver list is ever 2469 * re-done to be something other than a linker set (i.e. it can 2470 * change while the system is running), the list traversal should 2471 * be modified to work like the other traversal functions. 2472 */ 2473 for (pdrv = (start_pdrv ? start_pdrv : 2474 (struct periph_driver **)periphdriver_set.ls_items); 2475 *pdrv != NULL; pdrv++) { 2476 retval = tr_func(pdrv, arg); 2477 2478 if (retval == 0) 2479 return(retval); 2480 } 2481 2482 return(retval); 2483} 2484 2485static int 2486xptpdperiphtraverse(struct periph_driver **pdrv, 2487 struct cam_periph *start_periph, 2488 xpt_periphfunc_t *tr_func, void *arg) 2489{ 2490 struct cam_periph *periph, *next_periph; 2491 int retval; 2492 2493 retval = 1; 2494 2495 for (periph = (start_periph ? start_periph : 2496 TAILQ_FIRST(&(*pdrv)->units)); periph != NULL; 2497 periph = next_periph) { 2498 2499 next_periph = TAILQ_NEXT(periph, unit_links); 2500 2501 retval = tr_func(periph, arg); 2502 if (retval == 0) 2503 return(retval); 2504 } 2505 return(retval); 2506} 2507 2508static int 2509xptdefbusfunc(struct cam_eb *bus, void *arg) 2510{ 2511 struct xpt_traverse_config *tr_config; 2512 2513 tr_config = (struct xpt_traverse_config *)arg; 2514 2515 if (tr_config->depth == XPT_DEPTH_BUS) { 2516 xpt_busfunc_t *tr_func; 2517 2518 tr_func = (xpt_busfunc_t *)tr_config->tr_func; 2519 2520 return(tr_func(bus, tr_config->tr_arg)); 2521 } else 2522 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg)); 2523} 2524 2525static int 2526xptdeftargetfunc(struct cam_et *target, void *arg) 2527{ 2528 struct xpt_traverse_config *tr_config; 2529 2530 tr_config = (struct xpt_traverse_config *)arg; 2531 2532 if (tr_config->depth == XPT_DEPTH_TARGET) { 2533 xpt_targetfunc_t *tr_func; 2534 2535 tr_func = (xpt_targetfunc_t *)tr_config->tr_func; 2536 2537 return(tr_func(target, tr_config->tr_arg)); 2538 } else 2539 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg)); 2540} 2541 2542static int 2543xptdefdevicefunc(struct cam_ed *device, void *arg) 2544{ 2545 struct xpt_traverse_config *tr_config; 2546 2547 tr_config = (struct xpt_traverse_config *)arg; 2548 2549 if (tr_config->depth == XPT_DEPTH_DEVICE) { 2550 xpt_devicefunc_t *tr_func; 2551 2552 tr_func = (xpt_devicefunc_t *)tr_config->tr_func; 2553 2554 return(tr_func(device, tr_config->tr_arg)); 2555 } else 2556 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg)); 2557} 2558 2559static int 2560xptdefperiphfunc(struct cam_periph *periph, void *arg) 2561{ 2562 struct xpt_traverse_config *tr_config; 2563 xpt_periphfunc_t *tr_func; 2564 2565 tr_config = (struct xpt_traverse_config *)arg; 2566 2567 tr_func = (xpt_periphfunc_t *)tr_config->tr_func; 2568 2569 /* 2570 * Unlike the other default functions, we don't check for depth 2571 * here. The peripheral driver level is the last level in the EDT, 2572 * so if we're here, we should execute the function in question. 2573 */ 2574 return(tr_func(periph, tr_config->tr_arg)); 2575} 2576 2577/* 2578 * Execute the given function for every bus in the EDT. 2579 */ 2580static int 2581xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg) 2582{ 2583 struct xpt_traverse_config tr_config; 2584 2585 tr_config.depth = XPT_DEPTH_BUS; 2586 tr_config.tr_func = tr_func; 2587 tr_config.tr_arg = arg; 2588 2589 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2590} 2591 2592#ifdef notusedyet 2593/* 2594 * Execute the given function for every target in the EDT. 2595 */ 2596static int 2597xpt_for_all_targets(xpt_targetfunc_t *tr_func, void *arg) 2598{ 2599 struct xpt_traverse_config tr_config; 2600 2601 tr_config.depth = XPT_DEPTH_TARGET; 2602 tr_config.tr_func = tr_func; 2603 tr_config.tr_arg = arg; 2604 2605 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2606} 2607#endif /* notusedyet */ 2608 2609/* 2610 * Execute the given function for every device in the EDT. 2611 */ 2612static int 2613xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg) 2614{ 2615 struct xpt_traverse_config tr_config; 2616 2617 tr_config.depth = XPT_DEPTH_DEVICE; 2618 tr_config.tr_func = tr_func; 2619 tr_config.tr_arg = arg; 2620 2621 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2622} 2623 2624#ifdef notusedyet 2625/* 2626 * Execute the given function for every peripheral in the EDT. 2627 */ 2628static int 2629xpt_for_all_periphs(xpt_periphfunc_t *tr_func, void *arg) 2630{ 2631 struct xpt_traverse_config tr_config; 2632 2633 tr_config.depth = XPT_DEPTH_PERIPH; 2634 tr_config.tr_func = tr_func; 2635 tr_config.tr_arg = arg; 2636 2637 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2638} 2639#endif /* notusedyet */ 2640 2641static int 2642xptsetasyncfunc(struct cam_ed *device, void *arg) 2643{ 2644 struct cam_path path; 2645 struct ccb_getdev cgd; 2646 struct async_node *cur_entry; 2647 2648 cur_entry = (struct async_node *)arg; 2649 2650 /* 2651 * Don't report unconfigured devices (Wildcard devs, 2652 * devices only for target mode, device instances 2653 * that have been invalidated but are waiting for 2654 * their last reference count to be released). 2655 */ 2656 if ((device->flags & CAM_DEV_UNCONFIGURED) != 0) 2657 return (1); 2658 2659 xpt_compile_path(&path, 2660 NULL, 2661 device->target->bus->path_id, 2662 device->target->target_id, 2663 device->lun_id); 2664 xpt_setup_ccb(&cgd.ccb_h, &path, /*priority*/1); 2665 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 2666 xpt_action((union ccb *)&cgd); 2667 cur_entry->callback(cur_entry->callback_arg, 2668 AC_FOUND_DEVICE, 2669 &path, &cgd); 2670 xpt_release_path(&path); 2671 2672 return(1); 2673} 2674 2675static int 2676xptsetasyncbusfunc(struct cam_eb *bus, void *arg) 2677{ 2678 struct cam_path path; 2679 struct ccb_pathinq cpi; 2680 struct async_node *cur_entry; 2681 2682 cur_entry = (struct async_node *)arg; 2683 2684 xpt_compile_path(&path, /*periph*/NULL, 2685 bus->sim->path_id, 2686 CAM_TARGET_WILDCARD, 2687 CAM_LUN_WILDCARD); 2688 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1); 2689 cpi.ccb_h.func_code = XPT_PATH_INQ; 2690 xpt_action((union ccb *)&cpi); 2691 cur_entry->callback(cur_entry->callback_arg, 2692 AC_PATH_REGISTERED, 2693 &path, &cpi); 2694 xpt_release_path(&path); 2695 2696 return(1); 2697} 2698 2699void 2700xpt_action(union ccb *start_ccb) 2701{ 2702 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n")); 2703 2704 start_ccb->ccb_h.status = CAM_REQ_INPROG; 2705 2706 switch (start_ccb->ccb_h.func_code) { 2707 case XPT_SCSI_IO: 2708 { 2709#ifdef CAMDEBUG 2710 char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1]; 2711 struct cam_path *path; 2712 2713 path = start_ccb->ccb_h.path; 2714#endif 2715 2716 /* 2717 * For the sake of compatibility with SCSI-1 2718 * devices that may not understand the identify 2719 * message, we include lun information in the 2720 * second byte of all commands. SCSI-1 specifies 2721 * that luns are a 3 bit value and reserves only 3 2722 * bits for lun information in the CDB. Later 2723 * revisions of the SCSI spec allow for more than 8 2724 * luns, but have deprecated lun information in the 2725 * CDB. So, if the lun won't fit, we must omit. 2726 * 2727 * Also be aware that during initial probing for devices, 2728 * the inquiry information is unknown but initialized to 0. 2729 * This means that this code will be exercised while probing 2730 * devices with an ANSI revision greater than 2. 2731 */ 2732 if (SID_ANSI_REV(&start_ccb->ccb_h.path->device->inq_data) <= 2 2733 && start_ccb->ccb_h.target_lun < 8 2734 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) { 2735 2736 start_ccb->csio.cdb_io.cdb_bytes[1] |= 2737 start_ccb->ccb_h.target_lun << 5; 2738 } 2739 start_ccb->csio.scsi_status = SCSI_STATUS_OK; 2740 start_ccb->csio.sense_resid = 0; 2741 start_ccb->csio.resid = 0; 2742 CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n", 2743 scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0], 2744 &path->device->inq_data), 2745 scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes, 2746 cdb_str, sizeof(cdb_str)))); 2747 /* FALLTRHOUGH */ 2748 } 2749 case XPT_TARGET_IO: 2750 case XPT_CONT_TARGET_IO: 2751 case XPT_ENG_EXEC: 2752 { 2753 struct cam_path *path; 2754 int s; 2755 int runq; 2756 2757 path = start_ccb->ccb_h.path; 2758 s = splsoftcam(); 2759 2760 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb); 2761 if (path->device->qfrozen_cnt == 0) 2762 runq = xpt_schedule_dev_sendq(path->bus, path->device); 2763 else 2764 runq = 0; 2765 splx(s); 2766 if (runq != 0) 2767 xpt_run_dev_sendq(path->bus); 2768 break; 2769 } 2770 case XPT_SET_TRAN_SETTINGS: 2771 { 2772 xpt_set_transfer_settings(&start_ccb->cts, 2773 start_ccb->ccb_h.path->device, 2774 /*async_update*/FALSE); 2775 break; 2776 } 2777 case XPT_CALC_GEOMETRY: 2778 /* Filter out garbage */ 2779 if (start_ccb->ccg.block_size == 0 2780 || start_ccb->ccg.volume_size == 0) { 2781 start_ccb->ccg.cylinders = 0; 2782 start_ccb->ccg.heads = 0; 2783 start_ccb->ccg.secs_per_track = 0; 2784 start_ccb->ccb_h.status = CAM_REQ_CMP; 2785 break; 2786 } 2787#ifdef PC98 2788 /* 2789 * In a PC-98 system, geometry translation depens on 2790 * the "real" device geometry obtained from mode page 4. 2791 * SCSI geometry translation is performed in the 2792 * initialization routine of the SCSI BIOS and the result 2793 * stored in host memory. If the translation is available 2794 * in host memory, use it. If not, rely on the default 2795 * translation the device driver performs. 2796 */ 2797 if (scsi_da_bios_params(&start_ccb->ccg) != 0) { 2798 start_ccb->ccb_h.status = CAM_REQ_CMP; 2799 break; 2800 } 2801 /* FALLTHROUGH */ 2802#endif 2803 case XPT_ABORT: 2804 case XPT_RESET_DEV: 2805 case XPT_ACCEPT_TARGET_IO: 2806 case XPT_EN_LUN: 2807 case XPT_IMMED_NOTIFY: 2808 case XPT_NOTIFY_ACK: 2809 case XPT_GET_TRAN_SETTINGS: 2810 case XPT_PATH_INQ: 2811 case XPT_RESET_BUS: 2812 { 2813 struct cam_sim *sim; 2814 2815 sim = start_ccb->ccb_h.path->bus->sim; 2816 (*(sim->sim_action))(sim, start_ccb); 2817 break; 2818 } 2819 case XPT_GDEV_TYPE: 2820 { 2821 int s; 2822 2823 s = splcam(); 2824 if ((start_ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) != 0) { 2825 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE; 2826 } else { 2827 struct ccb_getdev *cgd; 2828 struct cam_et *tar; 2829 struct cam_ed *dev; 2830 2831 cgd = &start_ccb->cgd; 2832 tar = cgd->ccb_h.path->target; 2833 dev = cgd->ccb_h.path->device; 2834 cgd->inq_data = dev->inq_data; 2835 cgd->pd_type = SID_TYPE(&dev->inq_data); 2836 cgd->dev_openings = dev->ccbq.dev_openings; 2837 cgd->dev_active = dev->ccbq.dev_active; 2838 cgd->devq_openings = dev->ccbq.devq_openings; 2839 cgd->devq_queued = dev->ccbq.queue.entries; 2840 cgd->held = dev->ccbq.held; 2841 cgd->maxtags = dev->quirk->maxtags; 2842 cgd->mintags = dev->quirk->mintags; 2843 cgd->ccb_h.status = CAM_REQ_CMP; 2844 cgd->serial_num_len = dev->serial_num_len; 2845 if ((dev->serial_num_len > 0) 2846 && (dev->serial_num != NULL)) 2847 bcopy(dev->serial_num, cgd->serial_num, 2848 dev->serial_num_len); 2849 } 2850 splx(s); 2851 break; 2852 } 2853 case XPT_GDEVLIST: 2854 { 2855 struct cam_periph *nperiph; 2856 struct periph_list *periph_head; 2857 struct ccb_getdevlist *cgdl; 2858 int i; 2859 int s; 2860 struct cam_ed *device; 2861 int found; 2862 2863 2864 found = 0; 2865 2866 /* 2867 * Don't want anyone mucking with our data. 2868 */ 2869 s = splcam(); 2870 device = start_ccb->ccb_h.path->device; 2871 periph_head = &device->periphs; 2872 cgdl = &start_ccb->cgdl; 2873 2874 /* 2875 * Check and see if the list has changed since the user 2876 * last requested a list member. If so, tell them that the 2877 * list has changed, and therefore they need to start over 2878 * from the beginning. 2879 */ 2880 if ((cgdl->index != 0) && 2881 (cgdl->generation != device->generation)) { 2882 cgdl->status = CAM_GDEVLIST_LIST_CHANGED; 2883 splx(s); 2884 break; 2885 } 2886 2887 /* 2888 * Traverse the list of peripherals and attempt to find 2889 * the requested peripheral. 2890 */ 2891 for (nperiph = periph_head->slh_first, i = 0; 2892 (nperiph != NULL) && (i <= cgdl->index); 2893 nperiph = nperiph->periph_links.sle_next, i++) { 2894 if (i == cgdl->index) { 2895 strncpy(cgdl->periph_name, 2896 nperiph->periph_name, 2897 DEV_IDLEN); 2898 cgdl->unit_number = nperiph->unit_number; 2899 found = 1; 2900 } 2901 } 2902 if (found == 0) { 2903 cgdl->status = CAM_GDEVLIST_ERROR; 2904 splx(s); 2905 break; 2906 } 2907 2908 if (nperiph == NULL) 2909 cgdl->status = CAM_GDEVLIST_LAST_DEVICE; 2910 else 2911 cgdl->status = CAM_GDEVLIST_MORE_DEVS; 2912 2913 cgdl->index++; 2914 cgdl->generation = device->generation; 2915 2916 splx(s); 2917 cgdl->ccb_h.status = CAM_REQ_CMP; 2918 break; 2919 } 2920 case XPT_DEV_MATCH: 2921 { 2922 int s; 2923 dev_pos_type position_type; 2924 struct ccb_dev_match *cdm; 2925 int ret; 2926 2927 cdm = &start_ccb->cdm; 2928 2929 /* 2930 * Prevent EDT changes while we traverse it. 2931 */ 2932 s = splcam(); 2933 /* 2934 * There are two ways of getting at information in the EDT. 2935 * The first way is via the primary EDT tree. It starts 2936 * with a list of busses, then a list of targets on a bus, 2937 * then devices/luns on a target, and then peripherals on a 2938 * device/lun. The "other" way is by the peripheral driver 2939 * lists. The peripheral driver lists are organized by 2940 * peripheral driver. (obviously) So it makes sense to 2941 * use the peripheral driver list if the user is looking 2942 * for something like "da1", or all "da" devices. If the 2943 * user is looking for something on a particular bus/target 2944 * or lun, it's generally better to go through the EDT tree. 2945 */ 2946 2947 if (cdm->pos.position_type != CAM_DEV_POS_NONE) 2948 position_type = cdm->pos.position_type; 2949 else { 2950 int i; 2951 2952 position_type = CAM_DEV_POS_NONE; 2953 2954 for (i = 0; i < cdm->num_patterns; i++) { 2955 if ((cdm->patterns[i].type == DEV_MATCH_BUS) 2956 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){ 2957 position_type = CAM_DEV_POS_EDT; 2958 break; 2959 } 2960 } 2961 2962 if (cdm->num_patterns == 0) 2963 position_type = CAM_DEV_POS_EDT; 2964 else if (position_type == CAM_DEV_POS_NONE) 2965 position_type = CAM_DEV_POS_PDRV; 2966 } 2967 2968 switch(position_type & CAM_DEV_POS_TYPEMASK) { 2969 case CAM_DEV_POS_EDT: 2970 ret = xptedtmatch(cdm); 2971 break; 2972 case CAM_DEV_POS_PDRV: 2973 ret = xptperiphlistmatch(cdm); 2974 break; 2975 default: 2976 cdm->status = CAM_DEV_MATCH_ERROR; 2977 break; 2978 } 2979 2980 splx(s); 2981 2982 if (cdm->status == CAM_DEV_MATCH_ERROR) 2983 start_ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2984 else 2985 start_ccb->ccb_h.status = CAM_REQ_CMP; 2986 2987 break; 2988 } 2989 case XPT_SASYNC_CB: 2990 { 2991 struct ccb_setasync *csa; 2992 struct async_node *cur_entry; 2993 struct async_list *async_head; 2994 u_int32_t added; 2995 int s; 2996 2997 csa = &start_ccb->csa; 2998 added = csa->event_enable; 2999 async_head = &csa->ccb_h.path->device->asyncs; 3000 3001 /* 3002 * If there is already an entry for us, simply 3003 * update it. 3004 */ 3005 s = splcam(); 3006 cur_entry = SLIST_FIRST(async_head); 3007 while (cur_entry != NULL) { 3008 if ((cur_entry->callback_arg == csa->callback_arg) 3009 && (cur_entry->callback == csa->callback)) 3010 break; 3011 cur_entry = SLIST_NEXT(cur_entry, links); 3012 } 3013 3014 if (cur_entry != NULL) { 3015 /* 3016 * If the request has no flags set, 3017 * remove the entry. 3018 */ 3019 added &= ~cur_entry->event_enable; 3020 if (csa->event_enable == 0) { 3021 SLIST_REMOVE(async_head, cur_entry, 3022 async_node, links); 3023 csa->ccb_h.path->device->refcount--; 3024 free(cur_entry, M_DEVBUF); 3025 } else { 3026 cur_entry->event_enable = csa->event_enable; 3027 } 3028 } else { 3029 cur_entry = malloc(sizeof(*cur_entry), M_DEVBUF, 3030 M_NOWAIT); 3031 if (cur_entry == NULL) { 3032 splx(s); 3033 csa->ccb_h.status = CAM_RESRC_UNAVAIL; 3034 break; 3035 } 3036 cur_entry->callback_arg = csa->callback_arg; 3037 cur_entry->callback = csa->callback; 3038 cur_entry->event_enable = csa->event_enable; 3039 SLIST_INSERT_HEAD(async_head, cur_entry, links); 3040 csa->ccb_h.path->device->refcount++; 3041 } 3042 3043 if ((added & AC_FOUND_DEVICE) != 0) { 3044 /* 3045 * Get this peripheral up to date with all 3046 * the currently existing devices. 3047 */ 3048 xpt_for_all_devices(xptsetasyncfunc, cur_entry); 3049 } 3050 if ((added & AC_PATH_REGISTERED) != 0) { 3051 /* 3052 * Get this peripheral up to date with all 3053 * the currently existing busses. 3054 */ 3055 xpt_for_all_busses(xptsetasyncbusfunc, cur_entry); 3056 } 3057 splx(s); 3058 start_ccb->ccb_h.status = CAM_REQ_CMP; 3059 break; 3060 } 3061 case XPT_REL_SIMQ: 3062 { 3063 struct ccb_relsim *crs; 3064 struct cam_ed *dev; 3065 int s; 3066 3067 crs = &start_ccb->crs; 3068 dev = crs->ccb_h.path->device; 3069 if (dev == NULL) { 3070 3071 crs->ccb_h.status = CAM_DEV_NOT_THERE; 3072 break; 3073 } 3074 3075 s = splcam(); 3076 3077 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) { 3078 3079 if ((dev->inq_data.flags & SID_CmdQue) != 0) { 3080 3081 /* Don't ever go below one opening */ 3082 if (crs->openings > 0) { 3083 xpt_dev_ccbq_resize(crs->ccb_h.path, 3084 crs->openings); 3085 3086 if (bootverbose) { 3087 xpt_print_path(crs->ccb_h.path); 3088 printf("tagged openings " 3089 "now %d\n", 3090 crs->openings); 3091 } 3092 } 3093 } 3094 } 3095 3096 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) { 3097 3098 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) { 3099 3100 /* 3101 * Just extend the old timeout and decrement 3102 * the freeze count so that a single timeout 3103 * is sufficient for releasing the queue. 3104 */ 3105 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 3106 untimeout(xpt_release_devq_timeout, 3107 dev, dev->c_handle); 3108 } else { 3109 3110 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 3111 } 3112 3113 dev->c_handle = 3114 timeout(xpt_release_devq_timeout, 3115 dev, 3116 (crs->release_timeout * hz) / 1000); 3117 3118 dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING; 3119 3120 } 3121 3122 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) { 3123 3124 if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) { 3125 /* 3126 * Decrement the freeze count so that a single 3127 * completion is still sufficient to unfreeze 3128 * the queue. 3129 */ 3130 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 3131 } else { 3132 3133 dev->flags |= CAM_DEV_REL_ON_COMPLETE; 3134 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 3135 } 3136 } 3137 3138 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) { 3139 3140 if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0 3141 || (dev->ccbq.dev_active == 0)) { 3142 3143 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 3144 } else { 3145 3146 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY; 3147 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 3148 } 3149 } 3150 splx(s); 3151 3152 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) { 3153 3154 xpt_release_devq(crs->ccb_h.path->device, 3155 /*run_queue*/TRUE); 3156 } 3157 start_ccb->crs.qfrozen_cnt = dev->qfrozen_cnt; 3158 start_ccb->ccb_h.status = CAM_REQ_CMP; 3159 break; 3160 } 3161 case XPT_SCAN_BUS: 3162 xpt_scan_bus(start_ccb->ccb_h.path->periph, start_ccb); 3163 break; 3164 case XPT_SCAN_LUN: 3165 xpt_scan_lun(start_ccb->ccb_h.path->periph, 3166 start_ccb->ccb_h.path, start_ccb->crcn.flags, 3167 start_ccb); 3168 break; 3169 case XPT_DEBUG: { 3170#ifdef CAMDEBUG 3171 int s; 3172 3173 s = splcam(); 3174 cam_dflags = start_ccb->cdbg.flags; 3175 if (cam_dpath != NULL) { 3176 xpt_free_path(cam_dpath); 3177 cam_dpath = NULL; 3178 } 3179 3180 if (cam_dflags != CAM_DEBUG_NONE) { 3181 if (xpt_create_path(&cam_dpath, xpt_periph, 3182 start_ccb->ccb_h.path_id, 3183 start_ccb->ccb_h.target_id, 3184 start_ccb->ccb_h.target_lun) != 3185 CAM_REQ_CMP) { 3186 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 3187 cam_dflags = CAM_DEBUG_NONE; 3188 } else { 3189 start_ccb->ccb_h.status = CAM_REQ_CMP; 3190 xpt_print_path(cam_dpath); 3191 printf("debugging flags now %x\n", cam_dflags); 3192 } 3193 } else { 3194 cam_dpath = NULL; 3195 start_ccb->ccb_h.status = CAM_REQ_CMP; 3196 } 3197 splx(s); 3198#else /* !CAMDEBUG */ 3199 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 3200#endif /* CAMDEBUG */ 3201 break; 3202 } 3203 case XPT_NOOP: 3204 start_ccb->ccb_h.status = CAM_REQ_CMP; 3205 break; 3206 default: 3207 case XPT_SDEV_TYPE: 3208 case XPT_TERM_IO: 3209 case XPT_ENG_INQ: 3210 /* XXX Implement */ 3211 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL; 3212 break; 3213 } 3214} 3215 3216void 3217xpt_polled_action(union ccb *start_ccb) 3218{ 3219 int s; 3220 u_int32_t timeout; 3221 struct cam_sim *sim; 3222 struct cam_devq *devq; 3223 struct cam_ed *dev; 3224 3225 timeout = start_ccb->ccb_h.timeout; 3226 sim = start_ccb->ccb_h.path->bus->sim; 3227 devq = sim->devq; 3228 dev = start_ccb->ccb_h.path->device; 3229 3230 s = splcam(); 3231 3232 /* 3233 * Steal an opening so that no other queued requests 3234 * can get it before us while we simulate interrupts. 3235 */ 3236 dev->ccbq.devq_openings--; 3237 dev->ccbq.dev_openings--; 3238 3239 while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0) 3240 && (--timeout > 0)) { 3241 DELAY(1000); 3242 (*(sim->sim_poll))(sim); 3243 swi_camnet(); 3244 swi_cambio(); 3245 } 3246 3247 dev->ccbq.devq_openings++; 3248 dev->ccbq.dev_openings++; 3249 3250 if (timeout != 0) { 3251 xpt_action(start_ccb); 3252 while(--timeout > 0) { 3253 (*(sim->sim_poll))(sim); 3254 swi_camnet(); 3255 swi_cambio(); 3256 if ((start_ccb->ccb_h.status & CAM_STATUS_MASK) 3257 != CAM_REQ_INPROG) 3258 break; 3259 DELAY(1000); 3260 } 3261 if (timeout == 0) { 3262 /* 3263 * XXX Is it worth adding a sim_timeout entry 3264 * point so we can attempt recovery? If 3265 * this is only used for dumps, I don't think 3266 * it is. 3267 */ 3268 start_ccb->ccb_h.status = CAM_CMD_TIMEOUT; 3269 } 3270 } else { 3271 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 3272 } 3273 splx(s); 3274} 3275 3276/* 3277 * Schedule a peripheral driver to receive a ccb when it's 3278 * target device has space for more transactions. 3279 */ 3280void 3281xpt_schedule(struct cam_periph *perph, u_int32_t new_priority) 3282{ 3283 struct cam_ed *device; 3284 int s; 3285 int runq; 3286 3287 CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n")); 3288 device = perph->path->device; 3289 s = splsoftcam(); 3290 if (periph_is_queued(perph)) { 3291 /* Simply reorder based on new priority */ 3292 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE, 3293 (" change priority to %d\n", new_priority)); 3294 if (new_priority < perph->pinfo.priority) { 3295 camq_change_priority(&device->drvq, 3296 perph->pinfo.index, 3297 new_priority); 3298 } 3299 runq = 0; 3300 } else { 3301 /* New entry on the queue */ 3302 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE, 3303 (" added periph to queue\n")); 3304 perph->pinfo.priority = new_priority; 3305 perph->pinfo.generation = ++device->drvq.generation; 3306 camq_insert(&device->drvq, &perph->pinfo); 3307 runq = xpt_schedule_dev_allocq(perph->path->bus, device); 3308 } 3309 splx(s); 3310 if (runq != 0) { 3311 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE, 3312 (" calling xpt_run_devq\n")); 3313 xpt_run_dev_allocq(perph->path->bus); 3314 } 3315} 3316 3317 3318/* 3319 * Schedule a device to run on a given queue. 3320 * If the device was inserted as a new entry on the queue, 3321 * return 1 meaning the device queue should be run. If we 3322 * were already queued, implying someone else has already 3323 * started the queue, return 0 so the caller doesn't attempt 3324 * to run the queue. Must be run at either splsoftcam 3325 * (or splcam since that encompases splsoftcam). 3326 */ 3327static int 3328xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo, 3329 u_int32_t new_priority) 3330{ 3331 int retval; 3332 u_int32_t old_priority; 3333 3334 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n")); 3335 3336 old_priority = pinfo->priority; 3337 3338 /* 3339 * Are we already queued? 3340 */ 3341 if (pinfo->index != CAM_UNQUEUED_INDEX) { 3342 /* Simply reorder based on new priority */ 3343 if (new_priority < old_priority) { 3344 camq_change_priority(queue, pinfo->index, 3345 new_priority); 3346 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3347 ("changed priority to %d\n", 3348 new_priority)); 3349 } 3350 retval = 0; 3351 } else { 3352 /* New entry on the queue */ 3353 if (new_priority < old_priority) 3354 pinfo->priority = new_priority; 3355 3356 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3357 ("Inserting onto queue\n")); 3358 pinfo->generation = ++queue->generation; 3359 camq_insert(queue, pinfo); 3360 retval = 1; 3361 } 3362 return (retval); 3363} 3364 3365static void 3366xpt_run_dev_allocq(struct cam_eb *bus) 3367{ 3368 struct cam_devq *devq; 3369 int s; 3370 3371 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n")); 3372 devq = bus->sim->devq; 3373 3374 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3375 (" qfrozen_cnt == 0x%x, entries == %d, " 3376 "openings == %d, active == %d\n", 3377 devq->alloc_queue.qfrozen_cnt, 3378 devq->alloc_queue.entries, 3379 devq->alloc_openings, 3380 devq->alloc_active)); 3381 3382 s = splsoftcam(); 3383 devq->alloc_queue.qfrozen_cnt++; 3384 while ((devq->alloc_queue.entries > 0) 3385 && (devq->alloc_openings > 0) 3386 && (devq->alloc_queue.qfrozen_cnt <= 1)) { 3387 struct cam_ed_qinfo *qinfo; 3388 struct cam_ed *device; 3389 union ccb *work_ccb; 3390 struct cam_periph *drv; 3391 struct camq *drvq; 3392 3393 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue, 3394 CAMQ_HEAD); 3395 device = qinfo->device; 3396 3397 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3398 ("running device %p\n", device)); 3399 3400 drvq = &device->drvq; 3401 3402#ifdef CAMDEBUG 3403 if (drvq->entries <= 0) { 3404 panic("xpt_run_dev_allocq: " 3405 "Device on queue without any work to do"); 3406 } 3407#endif 3408 if ((work_ccb = xpt_get_ccb(device)) != NULL) { 3409 devq->alloc_openings--; 3410 devq->alloc_active++; 3411 drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD); 3412 splx(s); 3413 xpt_setup_ccb(&work_ccb->ccb_h, drv->path, 3414 drv->pinfo.priority); 3415 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3416 ("calling periph start\n")); 3417 drv->periph_start(drv, work_ccb); 3418 } else { 3419 /* 3420 * Malloc failure in alloc_ccb 3421 */ 3422 /* 3423 * XXX add us to a list to be run from free_ccb 3424 * if we don't have any ccbs active on this 3425 * device queue otherwise we may never get run 3426 * again. 3427 */ 3428 break; 3429 } 3430 3431 /* Raise IPL for possible insertion and test at top of loop */ 3432 s = splsoftcam(); 3433 3434 if (drvq->entries > 0) { 3435 /* We have more work. Attempt to reschedule */ 3436 xpt_schedule_dev_allocq(bus, device); 3437 } 3438 } 3439 devq->alloc_queue.qfrozen_cnt--; 3440 splx(s); 3441} 3442 3443static void 3444xpt_run_dev_sendq(struct cam_eb *bus) 3445{ 3446 struct cam_devq *devq; 3447 int s; 3448 3449 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n")); 3450 3451 devq = bus->sim->devq; 3452 3453 s = splcam(); 3454 devq->send_queue.qfrozen_cnt++; 3455 splx(s); 3456 s = splsoftcam(); 3457 while ((devq->send_queue.entries > 0) 3458 && (devq->send_openings > 0)) { 3459 struct cam_ed_qinfo *qinfo; 3460 struct cam_ed *device; 3461 union ccb *work_ccb; 3462 struct cam_sim *sim; 3463 int ospl; 3464 3465 ospl = splcam(); 3466 if (devq->send_queue.qfrozen_cnt > 1) { 3467 splx(ospl); 3468 break; 3469 } 3470 3471 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue, 3472 CAMQ_HEAD); 3473 device = qinfo->device; 3474 3475 /* 3476 * If the device has been "frozen", don't attempt 3477 * to run it. 3478 */ 3479 if (device->qfrozen_cnt > 0) { 3480 splx(ospl); 3481 continue; 3482 } 3483 3484 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3485 ("running device %p\n", device)); 3486 3487 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD); 3488 if (work_ccb == NULL) { 3489 printf("device on run queue with no ccbs???"); 3490 splx(ospl); 3491 continue; 3492 } 3493 3494 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) { 3495 3496 if (num_highpower <= 0) { 3497 /* 3498 * We got a high power command, but we 3499 * don't have any available slots. Freeze 3500 * the device queue until we have a slot 3501 * available. 3502 */ 3503 device->qfrozen_cnt++; 3504 STAILQ_INSERT_TAIL(&highpowerq, 3505 &work_ccb->ccb_h, 3506 xpt_links.stqe); 3507 3508 splx(ospl); 3509 continue; 3510 } else { 3511 /* 3512 * Consume a high power slot while 3513 * this ccb runs. 3514 */ 3515 num_highpower--; 3516 } 3517 } 3518 devq->active_dev = device; 3519 cam_ccbq_remove_ccb(&device->ccbq, work_ccb); 3520 3521 cam_ccbq_send_ccb(&device->ccbq, work_ccb); 3522 splx(ospl); 3523 3524 devq->send_openings--; 3525 devq->send_active++; 3526 3527 if (device->ccbq.queue.entries > 0) 3528 xpt_schedule_dev_sendq(bus, device); 3529 3530 if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){ 3531 /* 3532 * The client wants to freeze the queue 3533 * after this CCB is sent. 3534 */ 3535 ospl = splcam(); 3536 device->qfrozen_cnt++; 3537 splx(ospl); 3538 } 3539 3540 splx(s); 3541 3542 if ((device->inq_flags & SID_CmdQue) != 0) 3543 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID; 3544 else 3545 /* 3546 * Clear this in case of a retried CCB that failed 3547 * due to a rejected tag. 3548 */ 3549 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID; 3550 3551 /* 3552 * Device queues can be shared among multiple sim instances 3553 * that reside on different busses. Use the SIM in the queue 3554 * CCB's path, rather than the one in the bus that was passed 3555 * into this function. 3556 */ 3557 sim = work_ccb->ccb_h.path->bus->sim; 3558 (*(sim->sim_action))(sim, work_ccb); 3559 3560 ospl = splcam(); 3561 devq->active_dev = NULL; 3562 splx(ospl); 3563 /* Raise IPL for possible insertion and test at top of loop */ 3564 s = splsoftcam(); 3565 } 3566 splx(s); 3567 s = splcam(); 3568 devq->send_queue.qfrozen_cnt--; 3569 splx(s); 3570} 3571 3572/* 3573 * This function merges stuff from the slave ccb into the master ccb, while 3574 * keeping important fields in the master ccb constant. 3575 */ 3576void 3577xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb) 3578{ 3579 /* 3580 * Pull fields that are valid for peripheral drivers to set 3581 * into the master CCB along with the CCB "payload". 3582 */ 3583 master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count; 3584 master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code; 3585 master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout; 3586 master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags; 3587 bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1], 3588 sizeof(union ccb) - sizeof(struct ccb_hdr)); 3589} 3590 3591void 3592xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority) 3593{ 3594 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n")); 3595 ccb_h->pinfo.priority = priority; 3596 ccb_h->path = path; 3597 ccb_h->path_id = path->bus->path_id; 3598 if (path->target) 3599 ccb_h->target_id = path->target->target_id; 3600 else 3601 ccb_h->target_id = CAM_TARGET_WILDCARD; 3602 if (path->device) { 3603 ccb_h->target_lun = path->device->lun_id; 3604 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation; 3605 } else { 3606 ccb_h->target_lun = CAM_TARGET_WILDCARD; 3607 } 3608 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX; 3609 ccb_h->flags = 0; 3610} 3611 3612/* Path manipulation functions */ 3613cam_status 3614xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph, 3615 path_id_t path_id, target_id_t target_id, lun_id_t lun_id) 3616{ 3617 struct cam_path *path; 3618 cam_status status; 3619 3620 path = (struct cam_path *)malloc(sizeof(*path), M_DEVBUF, M_NOWAIT); 3621 3622 if (path == NULL) { 3623 status = CAM_RESRC_UNAVAIL; 3624 return(status); 3625 } 3626 status = xpt_compile_path(path, perph, path_id, target_id, lun_id); 3627 if (status != CAM_REQ_CMP) { 3628 free(path, M_DEVBUF); 3629 path = NULL; 3630 } 3631 *new_path_ptr = path; 3632 return (status); 3633} 3634 3635static cam_status 3636xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph, 3637 path_id_t path_id, target_id_t target_id, lun_id_t lun_id) 3638{ 3639 struct cam_eb *bus; 3640 struct cam_et *target; 3641 struct cam_ed *device; 3642 cam_status status; 3643 int s; 3644 3645 status = CAM_REQ_CMP; /* Completed without error */ 3646 target = NULL; /* Wildcarded */ 3647 device = NULL; /* Wildcarded */ 3648 3649 /* 3650 * We will potentially modify the EDT, so block interrupts 3651 * that may attempt to create cam paths. 3652 */ 3653 s = splcam(); 3654 bus = xpt_find_bus(path_id); 3655 if (bus == NULL) { 3656 status = CAM_PATH_INVALID; 3657 } else { 3658 target = xpt_find_target(bus, target_id); 3659 if (target == NULL) { 3660 /* Create one */ 3661 struct cam_et *new_target; 3662 3663 new_target = xpt_alloc_target(bus, target_id); 3664 if (new_target == NULL) { 3665 status = CAM_RESRC_UNAVAIL; 3666 } else { 3667 target = new_target; 3668 } 3669 } 3670 if (target != NULL) { 3671 device = xpt_find_device(target, lun_id); 3672 if (device == NULL) { 3673 /* Create one */ 3674 struct cam_ed *new_device; 3675 3676 new_device = xpt_alloc_device(bus, 3677 target, 3678 lun_id); 3679 if (new_device == NULL) { 3680 status = CAM_RESRC_UNAVAIL; 3681 } else { 3682 device = new_device; 3683 } 3684 } 3685 } 3686 } 3687 splx(s); 3688 3689 /* 3690 * Only touch the user's data if we are successful. 3691 */ 3692 if (status == CAM_REQ_CMP) { 3693 new_path->periph = perph; 3694 new_path->bus = bus; 3695 new_path->target = target; 3696 new_path->device = device; 3697 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n")); 3698 } else { 3699 if (device != NULL) 3700 xpt_release_device(bus, target, device); 3701 if (target != NULL) 3702 xpt_release_target(bus, target); 3703 if (bus != NULL) 3704 xpt_release_bus(bus); 3705 } 3706 return (status); 3707} 3708 3709static void 3710xpt_release_path(struct cam_path *path) 3711{ 3712 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n")); 3713 if (path->device != NULL) { 3714 xpt_release_device(path->bus, path->target, path->device); 3715 path->device = NULL; 3716 } 3717 if (path->target != NULL) { 3718 xpt_release_target(path->bus, path->target); 3719 path->target = NULL; 3720 } 3721 if (path->bus != NULL) { 3722 xpt_release_bus(path->bus); 3723 path->bus = NULL; 3724 } 3725} 3726 3727void 3728xpt_free_path(struct cam_path *path) 3729{ 3730 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n")); 3731 xpt_release_path(path); 3732 free(path, M_DEVBUF); 3733} 3734 3735 3736/* 3737 * Return -1 for failure, 0 for exact match, 1 for match with wildcards. 3738 */ 3739int 3740xpt_path_comp(struct cam_path *path1, struct cam_path *path2) 3741{ 3742 int retval = 0; 3743 3744 if (path1->bus != path2->bus) { 3745 if ((path1->bus == NULL) 3746 || (path2->bus == NULL)) 3747 retval = 1; 3748 else 3749 return (-1); 3750 } 3751 if (path1->target != path2->target) { 3752 if ((path1->target == NULL) 3753 || (path2->target == NULL)) 3754 retval = 1; 3755 else 3756 return (-1); 3757 } 3758 if (path1->device != path2->device) { 3759 if ((path1->device == NULL) 3760 || (path2->device == NULL)) 3761 retval = 1; 3762 else 3763 return (-1); 3764 } 3765 return (retval); 3766} 3767 3768void 3769xpt_print_path(struct cam_path *path) 3770{ 3771 if (path == NULL) 3772 printf("(nopath): "); 3773 else { 3774 if (path->periph != NULL) 3775 printf("(%s%d:", path->periph->periph_name, 3776 path->periph->unit_number); 3777 else 3778 printf("(noperiph:"); 3779 3780 if (path->bus != NULL) 3781 printf("%s%d:%d:", path->bus->sim->sim_name, 3782 path->bus->sim->unit_number, 3783 path->bus->sim->bus_id); 3784 else 3785 printf("nobus:"); 3786 3787 if (path->target != NULL) 3788 printf("%d:", path->target->target_id); 3789 else 3790 printf("X:"); 3791 3792 if (path->device != NULL) 3793 printf("%d): ", path->device->lun_id); 3794 else 3795 printf("X): "); 3796 } 3797} 3798 3799path_id_t 3800xpt_path_path_id(struct cam_path *path) 3801{ 3802 return(path->bus->path_id); 3803} 3804 3805target_id_t 3806xpt_path_target_id(struct cam_path *path) 3807{ 3808 if (path->target != NULL) 3809 return (path->target->target_id); 3810 else 3811 return (CAM_TARGET_WILDCARD); 3812} 3813 3814lun_id_t 3815xpt_path_lun_id(struct cam_path *path) 3816{ 3817 if (path->device != NULL) 3818 return (path->device->lun_id); 3819 else 3820 return (CAM_LUN_WILDCARD); 3821} 3822 3823struct cam_sim * 3824xpt_path_sim(struct cam_path *path) 3825{ 3826 return (path->bus->sim); 3827} 3828 3829struct cam_periph* 3830xpt_path_periph(struct cam_path *path) 3831{ 3832 return (path->periph); 3833} 3834 3835/* 3836 * Release a CAM control block for the caller. Remit the cost of the structure 3837 * to the device referenced by the path. If the this device had no 'credits' 3838 * and peripheral drivers have registered async callbacks for this notification 3839 * call them now. 3840 */ 3841void 3842xpt_release_ccb(union ccb *free_ccb) 3843{ 3844 int s; 3845 struct cam_path *path; 3846 struct cam_ed *device; 3847 struct cam_eb *bus; 3848 3849 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n")); 3850 path = free_ccb->ccb_h.path; 3851 device = path->device; 3852 bus = path->bus; 3853 s = splsoftcam(); 3854 cam_ccbq_release_opening(&device->ccbq); 3855 if (xpt_ccb_count > xpt_max_ccbs) { 3856 xpt_free_ccb(free_ccb); 3857 xpt_ccb_count--; 3858 } else { 3859 SLIST_INSERT_HEAD(&ccb_freeq, &free_ccb->ccb_h, xpt_links.sle); 3860 } 3861 bus->sim->devq->alloc_openings++; 3862 bus->sim->devq->alloc_active--; 3863 /* XXX Turn this into an inline function - xpt_run_device?? */ 3864 if ((device_is_alloc_queued(device) == 0) 3865 && (device->drvq.entries > 0)) { 3866 xpt_schedule_dev_allocq(bus, device); 3867 } 3868 splx(s); 3869 if (dev_allocq_is_runnable(bus->sim->devq)) 3870 xpt_run_dev_allocq(bus); 3871} 3872 3873/* Functions accessed by SIM drivers */ 3874 3875/* 3876 * A sim structure, listing the SIM entry points and instance 3877 * identification info is passed to xpt_bus_register to hook the SIM 3878 * into the CAM framework. xpt_bus_register creates a cam_eb entry 3879 * for this new bus and places it in the array of busses and assigns 3880 * it a path_id. The path_id may be influenced by "hard wiring" 3881 * information specified by the user. Once interrupt services are 3882 * availible, the bus will be probed. 3883 */ 3884int32_t 3885xpt_bus_register(struct cam_sim *sim, u_int32_t bus) 3886{ 3887 static path_id_t buscount; 3888 struct cam_eb *new_bus; 3889 struct ccb_pathinq cpi; 3890 int s; 3891 3892 sim->bus_id = bus; 3893 new_bus = (struct cam_eb *)malloc(sizeof(*new_bus), 3894 M_DEVBUF, M_NOWAIT); 3895 if (new_bus == NULL) { 3896 /* Couldn't satisfy request */ 3897 return (CAM_RESRC_UNAVAIL); 3898 } 3899 3900 bzero(new_bus, sizeof(*new_bus)); 3901 3902 if (strcmp(sim->sim_name, "xpt") != 0) { 3903 3904 sim->path_id = xptpathid(sim->sim_name, sim->unit_number, 3905 sim->bus_id, &buscount); 3906 } 3907 3908 new_bus->path_id = sim->path_id; 3909 new_bus->sim = sim; 3910 TAILQ_INIT(&new_bus->et_entries); 3911 new_bus->refcount = 1; /* Held until a bus_deregister event */ 3912 s = splcam(); 3913 TAILQ_INSERT_TAIL(&xpt_busses, new_bus, links); 3914 bus_generation++; 3915 splx(s); 3916 3917 /* Notify interested parties */ 3918 if (sim->path_id != CAM_XPT_PATH_ID) { 3919 struct cam_path path; 3920 3921 xpt_compile_path(&path, /*periph*/NULL, sim->path_id, 3922 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 3923 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1); 3924 cpi.ccb_h.func_code = XPT_PATH_INQ; 3925 xpt_action((union ccb *)&cpi); 3926 xpt_async(AC_PATH_REGISTERED, xpt_periph->path, &cpi); 3927 xpt_release_path(&path); 3928 } 3929 return (CAM_SUCCESS); 3930} 3931 3932static int 3933xptnextfreebus(path_id_t startbus) 3934{ 3935 struct cam_sim_config *sim_conf; 3936 3937 sim_conf = cam_sinit; 3938 while (sim_conf->sim_name != NULL) { 3939 3940 if (IS_SPECIFIED(sim_conf->pathid) 3941 && (startbus == sim_conf->pathid)) { 3942 ++startbus; 3943 /* Start the search over */ 3944 sim_conf = cam_sinit; 3945 } else { 3946 sim_conf++; 3947 } 3948 } 3949 return (startbus); 3950} 3951 3952static int 3953xptpathid(const char *sim_name, int sim_unit, 3954 int sim_bus, path_id_t *nextpath) 3955{ 3956 struct cam_sim_config *sim_conf; 3957 path_id_t pathid; 3958 3959 pathid = CAM_XPT_PATH_ID; 3960 for (sim_conf = cam_sinit; sim_conf->sim_name != NULL; sim_conf++) { 3961 3962 if (!IS_SPECIFIED(sim_conf->pathid)) 3963 continue; 3964 3965 if (!strcmp(sim_name, sim_conf->sim_name) 3966 && (sim_unit == sim_conf->sim_unit)) { 3967 3968 if (IS_SPECIFIED(sim_conf->sim_bus)) { 3969 if (sim_bus == sim_conf->sim_bus) { 3970 pathid = sim_conf->pathid; 3971 break; 3972 } 3973 } else if (sim_bus == 0) { 3974 /* Unspecified matches bus 0 */ 3975 pathid = sim_conf->pathid; 3976 break; 3977 } else { 3978 printf("Ambiguous scbus configuration for %s%d " 3979 "bus %d, cannot wire down. The kernel " 3980 "config entry for scbus%d should " 3981 "specify a controller bus.\n" 3982 "Scbus will be assigned dynamically.\n", 3983 sim_name, sim_unit, sim_bus, 3984 sim_conf->pathid); 3985 break; 3986 } 3987 } 3988 } 3989 3990 if (pathid == CAM_XPT_PATH_ID) { 3991 pathid = xptnextfreebus(*nextpath); 3992 *nextpath = pathid + 1; 3993 } 3994 return (pathid); 3995} 3996 3997int32_t 3998xpt_bus_deregister(path_id) 3999 u_int8_t path_id; 4000{ 4001 /* XXX */ 4002 return (CAM_SUCCESS); 4003} 4004 4005void 4006xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg) 4007{ 4008 struct cam_eb *bus; 4009 struct cam_et *target, *next_target; 4010 struct cam_ed *device, *next_device; 4011 int s; 4012 4013 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_async\n")); 4014 4015 /* 4016 * Most async events come from a CAM interrupt context. In 4017 * a few cases, the error recovery code at the peripheral layer, 4018 * which may run from our SWI or a process context, may signal 4019 * deferred events with a call to xpt_async. Ensure async 4020 * notifications are serialized by blocking cam interrupts. 4021 */ 4022 s = splcam(); 4023 4024 bus = path->bus; 4025 4026 /* 4027 * Freeze the SIM queue for SCSI_DELAY ms to 4028 * allow the bus to settle. 4029 */ 4030 if (async_code == AC_BUS_RESET) { 4031 struct cam_sim *sim; 4032 4033 sim = bus->sim; 4034 4035 /* 4036 * If there isn't already another timeout pending, go ahead 4037 * and freeze the simq and set the timeout flag. If there 4038 * is another timeout pending, replace it with this 4039 * timeout. There could be two bus reset async broadcasts 4040 * sent for some dual-channel controllers. 4041 */ 4042 if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) == 0) { 4043 xpt_freeze_simq(sim, 1); 4044 sim->flags |= CAM_SIM_REL_TIMEOUT_PENDING; 4045 } else 4046 untimeout(xpt_release_simq_timeout, sim, sim->c_handle); 4047 4048 sim->c_handle = timeout(xpt_release_simq_timeout, 4049 sim, (SCSI_DELAY * hz) / 1000); 4050 } 4051 4052 for (target = TAILQ_FIRST(&bus->et_entries); 4053 target != NULL; 4054 target = next_target) { 4055 4056 next_target = TAILQ_NEXT(target, links); 4057 4058 if (path->target != target 4059 && path->target != NULL) 4060 continue; 4061 4062 for (device = TAILQ_FIRST(&target->ed_entries); 4063 device != NULL; 4064 device = next_device) { 4065 cam_status status; 4066 struct cam_path newpath; 4067 4068 next_device = TAILQ_NEXT(device, links); 4069 4070 if (path->device != device 4071 && path->device != NULL) 4072 continue; 4073 4074 /* 4075 * We need our own path with wildcards expanded to 4076 * handle certain types of events. 4077 */ 4078 if ((async_code == AC_SENT_BDR) 4079 || (async_code == AC_BUS_RESET) 4080 || (async_code == AC_INQ_CHANGED)) 4081 status = xpt_compile_path(&newpath, NULL, 4082 bus->path_id, 4083 target->target_id, 4084 device->lun_id); 4085 else 4086 status = CAM_REQ_CMP_ERR; 4087 4088 if (status == CAM_REQ_CMP) { 4089 4090 /* 4091 * Allow transfer negotiation to occur in a 4092 * tag free environment. 4093 */ 4094 if (async_code == AC_SENT_BDR 4095 || async_code == AC_BUS_RESET) 4096 xpt_toggle_tags(&newpath); 4097 4098 /* 4099 * If we send a BDR, freeze the device queue 4100 * for SCSI_DELAY ms to allow it to settle 4101 * down. 4102 */ 4103 if (async_code == AC_SENT_BDR) { 4104 xpt_freeze_devq(&newpath, 1); 4105 /* 4106 * Although this looks bad, it 4107 * isn't as bad as it seems. We're 4108 * passing in a stack-allocated path 4109 * that we then immediately release 4110 * after scheduling a timeout to 4111 * release the device queue. So 4112 * the path won't be around when 4113 * the timeout fires, right? Right. 4114 * But it doesn't matter, since 4115 * xpt_release_devq and its timeout 4116 * function both take the device as 4117 * an argument. Theoretically, the 4118 * device will still be there when 4119 * the timeout fires, even though 4120 * the path will be gone. 4121 */ 4122 cam_release_devq( 4123 &newpath, 4124 /*relsim_flags*/ 4125 RELSIM_RELEASE_AFTER_TIMEOUT, 4126 /*reduction*/0, 4127 /*timeout*/SCSI_DELAY, 4128 /*getcount_only*/0); 4129 } else if (async_code == AC_INQ_CHANGED) { 4130 /* 4131 * We've sent a start unit command, or 4132 * something similar to a device that 4133 * may have caused its inquiry data to 4134 * change. So we re-scan the device to 4135 * refresh the inquiry data for it. 4136 */ 4137 xpt_scan_lun(newpath.periph, &newpath, 4138 CAM_EXPECT_INQ_CHANGE, 4139 NULL); 4140 } 4141 xpt_release_path(&newpath); 4142 } else if (async_code == AC_LOST_DEVICE) { 4143 device->flags |= CAM_DEV_UNCONFIGURED; 4144 } else if (async_code == AC_TRANSFER_NEG) { 4145 struct ccb_trans_settings *settings; 4146 4147 settings = 4148 (struct ccb_trans_settings *)async_arg; 4149 xpt_set_transfer_settings(settings, device, 4150 /*async_update*/TRUE); 4151 } 4152 4153 xpt_async_bcast(&device->asyncs, 4154 async_code, 4155 path, 4156 async_arg); 4157 } 4158 } 4159 4160 /* 4161 * If this wasn't a fully wildcarded async, tell all 4162 * clients that want all async events. 4163 */ 4164 if (bus != xpt_periph->path->bus) 4165 xpt_async_bcast(&xpt_periph->path->device->asyncs, async_code, 4166 path, async_arg); 4167 splx(s); 4168} 4169 4170static void 4171xpt_async_bcast(struct async_list *async_head, 4172 u_int32_t async_code, 4173 struct cam_path *path, void *async_arg) 4174{ 4175 struct async_node *cur_entry; 4176 4177 cur_entry = SLIST_FIRST(async_head); 4178 while (cur_entry != NULL) { 4179 struct async_node *next_entry; 4180 /* 4181 * Grab the next list entry before we call the current 4182 * entry's callback. This is because the callback function 4183 * can delete its async callback entry. 4184 */ 4185 next_entry = SLIST_NEXT(cur_entry, links); 4186 if ((cur_entry->event_enable & async_code) != 0) 4187 cur_entry->callback(cur_entry->callback_arg, 4188 async_code, path, 4189 async_arg); 4190 cur_entry = next_entry; 4191 } 4192} 4193 4194u_int32_t 4195xpt_freeze_devq(struct cam_path *path, u_int count) 4196{ 4197 int s; 4198 struct ccb_hdr *ccbh; 4199 4200 s = splcam(); 4201 path->device->qfrozen_cnt += count; 4202 4203 /* 4204 * Mark the last CCB in the queue as needing 4205 * to be requeued if the driver hasn't 4206 * changed it's state yet. This fixes a race 4207 * where a ccb is just about to be queued to 4208 * a controller driver when it's interrupt routine 4209 * freezes the queue. To completly close the 4210 * hole, controller drives must check to see 4211 * if a ccb's status is still CAM_REQ_INPROG 4212 * under spl protection just before they queue 4213 * the CCB. See ahc_action/ahc_freeze_devq for 4214 * an example. 4215 */ 4216 ccbh = TAILQ_LAST(&path->device->ccbq.active_ccbs, ccb_hdr_tailq); 4217 if (ccbh && ccbh->status == CAM_REQ_INPROG) 4218 ccbh->status = CAM_REQUEUE_REQ; 4219 splx(s); 4220 return (path->device->qfrozen_cnt); 4221} 4222 4223u_int32_t 4224xpt_freeze_simq(struct cam_sim *sim, u_int count) 4225{ 4226 sim->devq->send_queue.qfrozen_cnt += count; 4227 if (sim->devq->active_dev != NULL) { 4228 struct ccb_hdr *ccbh; 4229 4230 ccbh = TAILQ_LAST(&sim->devq->active_dev->ccbq.active_ccbs, 4231 ccb_hdr_tailq); 4232 if (ccbh && ccbh->status == CAM_REQ_INPROG) 4233 ccbh->status = CAM_REQUEUE_REQ; 4234 } 4235 return (sim->devq->send_queue.qfrozen_cnt); 4236} 4237 4238static void 4239xpt_release_devq_timeout(void *arg) 4240{ 4241 struct cam_ed *device; 4242 4243 device = (struct cam_ed *)arg; 4244 4245 xpt_release_devq(device, /*run_queue*/TRUE); 4246} 4247 4248void 4249xpt_release_devq(struct cam_ed *dev, int run_queue) 4250{ 4251 int rundevq; 4252 int s; 4253 4254 rundevq = 0; 4255 s = splcam(); 4256 if (dev->qfrozen_cnt > 0) { 4257 4258 dev->qfrozen_cnt--; 4259 if (dev->qfrozen_cnt == 0) { 4260 4261 /* 4262 * No longer need to wait for a successful 4263 * command completion. 4264 */ 4265 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE; 4266 4267 /* 4268 * Remove any timeouts that might be scheduled 4269 * to release this queue. 4270 */ 4271 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) { 4272 untimeout(xpt_release_devq_timeout, dev, 4273 dev->c_handle); 4274 dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING; 4275 } 4276 4277 /* 4278 * Now that we are unfrozen schedule the 4279 * device so any pending transactions are 4280 * run. 4281 */ 4282 if ((dev->ccbq.queue.entries > 0) 4283 && (xpt_schedule_dev_sendq(dev->target->bus, dev)) 4284 && (run_queue != 0)) { 4285 rundevq = 1; 4286 } 4287 } 4288 } 4289 splx(s); 4290 if (rundevq != 0) 4291 xpt_run_dev_sendq(dev->target->bus); 4292} 4293 4294void 4295xpt_release_simq(struct cam_sim *sim, int run_queue) 4296{ 4297 int s; 4298 struct camq *sendq; 4299 4300 sendq = &(sim->devq->send_queue); 4301 s = splcam(); 4302 if (sendq->qfrozen_cnt > 0) { 4303 4304 sendq->qfrozen_cnt--; 4305 if (sendq->qfrozen_cnt == 0) { 4306 struct cam_eb *bus; 4307 4308 /* 4309 * If there is a timeout scheduled to release this 4310 * sim queue, remove it. The queue frozen count is 4311 * already at 0. 4312 */ 4313 if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){ 4314 untimeout(xpt_release_simq_timeout, sim, 4315 sim->c_handle); 4316 sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING; 4317 } 4318 bus = xpt_find_bus(sim->path_id); 4319 splx(s); 4320 4321 if (run_queue) { 4322 /* 4323 * Now that we are unfrozen run the send queue. 4324 */ 4325 xpt_run_dev_sendq(bus); 4326 } 4327 xpt_release_bus(bus); 4328 } else 4329 splx(s); 4330 } else 4331 splx(s); 4332} 4333 4334static void 4335xpt_release_simq_timeout(void *arg) 4336{ 4337 struct cam_sim *sim; 4338 4339 sim = (struct cam_sim *)arg; 4340 xpt_release_simq(sim, /* run_queue */ TRUE); 4341} 4342 4343void 4344xpt_done(union ccb *done_ccb) 4345{ 4346 int s; 4347 4348 s = splcam(); 4349 4350 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n")); 4351 switch (done_ccb->ccb_h.func_code) { 4352 case XPT_SCSI_IO: 4353 case XPT_ENG_EXEC: 4354 case XPT_TARGET_IO: 4355 case XPT_ACCEPT_TARGET_IO: 4356 case XPT_CONT_TARGET_IO: 4357 case XPT_IMMED_NOTIFY: 4358 case XPT_SCAN_BUS: 4359 case XPT_SCAN_LUN: 4360 { 4361 /* 4362 * Queue up the request for handling by our SWI handler 4363 * any of the "non-immediate" type of ccbs. 4364 */ 4365 switch (done_ccb->ccb_h.path->periph->type) { 4366 case CAM_PERIPH_BIO: 4367 TAILQ_INSERT_TAIL(&cam_bioq, &done_ccb->ccb_h, 4368 sim_links.tqe); 4369 done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX; 4370 setsoftcambio(); 4371 break; 4372 case CAM_PERIPH_NET: 4373 TAILQ_INSERT_TAIL(&cam_netq, &done_ccb->ccb_h, 4374 sim_links.tqe); 4375 done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX; 4376 setsoftcamnet(); 4377 break; 4378 } 4379 break; 4380 } 4381 default: 4382 break; 4383 } 4384 splx(s); 4385} 4386 4387union ccb * 4388xpt_alloc_ccb() 4389{ 4390 union ccb *new_ccb; 4391 4392 new_ccb = malloc(sizeof(*new_ccb), M_DEVBUF, M_WAITOK); 4393 return (new_ccb); 4394} 4395 4396void 4397xpt_free_ccb(union ccb *free_ccb) 4398{ 4399 free(free_ccb, M_DEVBUF); 4400} 4401 4402 4403 4404/* Private XPT functions */ 4405 4406/* 4407 * Get a CAM control block for the caller. Charge the structure to the device 4408 * referenced by the path. If the this device has no 'credits' then the 4409 * device already has the maximum number of outstanding operations under way 4410 * and we return NULL. If we don't have sufficient resources to allocate more 4411 * ccbs, we also return NULL. 4412 */ 4413static union ccb * 4414xpt_get_ccb(struct cam_ed *device) 4415{ 4416 union ccb *new_ccb; 4417 int s; 4418 4419 s = splsoftcam(); 4420 if ((new_ccb = (union ccb *)ccb_freeq.slh_first) == NULL) { 4421 new_ccb = malloc(sizeof(*new_ccb), M_DEVBUF, M_NOWAIT); 4422 if (new_ccb == NULL) { 4423 splx(s); 4424 return (NULL); 4425 } 4426 callout_handle_init(&new_ccb->ccb_h.timeout_ch); 4427 SLIST_INSERT_HEAD(&ccb_freeq, &new_ccb->ccb_h, 4428 xpt_links.sle); 4429 xpt_ccb_count++; 4430 } 4431 cam_ccbq_take_opening(&device->ccbq); 4432 SLIST_REMOVE_HEAD(&ccb_freeq, xpt_links.sle); 4433 splx(s); 4434 return (new_ccb); 4435} 4436 4437static void 4438xpt_release_bus(struct cam_eb *bus) 4439{ 4440 int s; 4441 4442 s = splcam(); 4443 if ((--bus->refcount == 0) 4444 && (TAILQ_FIRST(&bus->et_entries) == NULL)) { 4445 TAILQ_REMOVE(&xpt_busses, bus, links); 4446 bus_generation++; 4447 splx(s); 4448 free(bus, M_DEVBUF); 4449 } else 4450 splx(s); 4451} 4452 4453static struct cam_et * 4454xpt_alloc_target(struct cam_eb *bus, target_id_t target_id) 4455{ 4456 struct cam_et *target; 4457 4458 target = (struct cam_et *)malloc(sizeof(*target), M_DEVBUF, M_NOWAIT); 4459 if (target != NULL) { 4460 struct cam_et *cur_target; 4461 4462 target->bus = bus; 4463 target->target_id = target_id; 4464 target->refcount = 1; 4465 /* 4466 * Hold a reference to our parent bus so it 4467 * will not go away before we do. 4468 */ 4469 bus->refcount++; 4470 TAILQ_INIT(&target->ed_entries); 4471 4472 /* Insertion sort into our bus's target list */ 4473 cur_target = TAILQ_FIRST(&bus->et_entries); 4474 while (cur_target != NULL && cur_target->target_id < target_id) 4475 cur_target = TAILQ_NEXT(cur_target, links); 4476 4477 if (cur_target != NULL) { 4478 TAILQ_INSERT_BEFORE(cur_target, target, links); 4479 } else { 4480 TAILQ_INSERT_TAIL(&bus->et_entries, target, links); 4481 } 4482 bus->generation++; 4483 } 4484 return (target); 4485} 4486 4487static void 4488xpt_release_target(struct cam_eb *bus, struct cam_et *target) 4489{ 4490 int s; 4491 4492 s = splcam(); 4493 if ((--target->refcount == 0) 4494 && (TAILQ_FIRST(&target->ed_entries) == NULL)) { 4495 TAILQ_REMOVE(&bus->et_entries, target, links); 4496 bus->generation++; 4497 splx(s); 4498 free(target, M_DEVBUF); 4499 xpt_release_bus(bus); 4500 } else 4501 splx(s); 4502} 4503 4504static struct cam_ed * 4505xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) 4506{ 4507 struct cam_ed *device; 4508 struct cam_devq *devq; 4509 cam_status status; 4510 4511 /* Make space for us in the device queue on our bus */ 4512 devq = bus->sim->devq; 4513 status = cam_devq_resize(devq, devq->alloc_queue.array_size + 1); 4514 4515 if (status != CAM_REQ_CMP) { 4516 device = NULL; 4517 } else { 4518 device = (struct cam_ed *)malloc(sizeof(*device), 4519 M_DEVBUF, M_NOWAIT); 4520 } 4521 4522 if (device != NULL) { 4523 struct cam_ed *cur_device; 4524 4525 bzero(device, sizeof(*device)); 4526 4527 SLIST_INIT(&device->asyncs); 4528 SLIST_INIT(&device->periphs); 4529 callout_handle_init(&device->c_handle); 4530 device->refcount = 1; 4531 device->flags |= CAM_DEV_UNCONFIGURED; 4532 4533 cam_init_pinfo(&device->alloc_ccb_entry.pinfo); 4534 device->alloc_ccb_entry.device = device; 4535 cam_init_pinfo(&device->send_ccb_entry.pinfo); 4536 device->send_ccb_entry.device = device; 4537 4538 device->target = target; 4539 /* 4540 * Hold a reference to our parent target so it 4541 * will not go away before we do. 4542 */ 4543 target->refcount++; 4544 4545 device->lun_id = lun_id; 4546 4547 /* Initialize our queues */ 4548 if (camq_init(&device->drvq, 0) != 0) { 4549 free(device, M_DEVBUF); 4550 return (NULL); 4551 } 4552 4553 if (cam_ccbq_init(&device->ccbq, 4554 bus->sim->max_dev_openings) != 0) { 4555 camq_fini(&device->drvq); 4556 free(device, M_DEVBUF); 4557 return (NULL); 4558 } 4559 /* 4560 * XXX should be limited by number of CCBs this bus can 4561 * do. 4562 */ 4563 xpt_max_ccbs += device->ccbq.devq_openings; 4564 /* Insertion sort into our target's device list */ 4565 cur_device = TAILQ_FIRST(&target->ed_entries); 4566 while (cur_device != NULL && cur_device->lun_id < lun_id) 4567 cur_device = TAILQ_NEXT(cur_device, links); 4568 if (cur_device != NULL) { 4569 TAILQ_INSERT_BEFORE(cur_device, device, links); 4570 } else { 4571 TAILQ_INSERT_TAIL(&target->ed_entries, device, links); 4572 } 4573 target->generation++; 4574 } 4575 return (device); 4576} 4577 4578static void 4579xpt_release_device(struct cam_eb *bus, struct cam_et *target, 4580 struct cam_ed *device) 4581{ 4582 int s; 4583 4584 s = splcam(); 4585 if ((--device->refcount == 0) 4586 && ((device->flags & CAM_DEV_UNCONFIGURED) != 0)) { 4587 struct cam_devq *devq; 4588 4589 if (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX 4590 || device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX) 4591 panic("Removing device while still queued for ccbs"); 4592 TAILQ_REMOVE(&target->ed_entries, device,links); 4593 target->generation++; 4594 xpt_max_ccbs -= device->ccbq.devq_openings; 4595 /* Release our slot in the devq */ 4596 devq = bus->sim->devq; 4597 cam_devq_resize(devq, devq->alloc_queue.array_size - 1); 4598 splx(s); 4599 free(device, M_DEVBUF); 4600 } else 4601 splx(s); 4602} 4603 4604static u_int32_t 4605xpt_dev_ccbq_resize(struct cam_path *path, int newopenings) 4606{ 4607 int s; 4608 int diff; 4609 int result; 4610 struct cam_ed *dev; 4611 4612 dev = path->device; 4613 s = splsoftcam(); 4614 4615 diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings); 4616 result = cam_ccbq_resize(&dev->ccbq, newopenings); 4617 if (result == CAM_REQ_CMP && (diff < 0)) { 4618 dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED; 4619 } 4620 /* Adjust the global limit */ 4621 xpt_max_ccbs += diff; 4622 splx(s); 4623 return (result); 4624} 4625 4626static struct cam_eb * 4627xpt_find_bus(path_id_t path_id) 4628{ 4629 struct cam_eb *bus; 4630 4631 for (bus = TAILQ_FIRST(&xpt_busses); 4632 bus != NULL; 4633 bus = TAILQ_NEXT(bus, links)) { 4634 if (bus->path_id == path_id) { 4635 bus->refcount++; 4636 break; 4637 } 4638 } 4639 return (bus); 4640} 4641 4642static struct cam_et * 4643xpt_find_target(struct cam_eb *bus, target_id_t target_id) 4644{ 4645 struct cam_et *target; 4646 4647 for (target = TAILQ_FIRST(&bus->et_entries); 4648 target != NULL; 4649 target = TAILQ_NEXT(target, links)) { 4650 if (target->target_id == target_id) { 4651 target->refcount++; 4652 break; 4653 } 4654 } 4655 return (target); 4656} 4657 4658static struct cam_ed * 4659xpt_find_device(struct cam_et *target, lun_id_t lun_id) 4660{ 4661 struct cam_ed *device; 4662 4663 for (device = TAILQ_FIRST(&target->ed_entries); 4664 device != NULL; 4665 device = TAILQ_NEXT(device, links)) { 4666 if (device->lun_id == lun_id) { 4667 device->refcount++; 4668 break; 4669 } 4670 } 4671 return (device); 4672} 4673 4674typedef struct { 4675 union ccb *request_ccb; 4676 struct ccb_pathinq *cpi; 4677 int pending_count; 4678} xpt_scan_bus_info; 4679 4680/* 4681 * To start a scan, request_ccb is an XPT_SCAN_BUS ccb. 4682 * As the scan progresses, xpt_scan_bus is used as the 4683 * callback on completion function. 4684 */ 4685static void 4686xpt_scan_bus(struct cam_periph *periph, union ccb *request_ccb) 4687{ 4688 CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE, 4689 ("xpt_scan_bus\n")); 4690 switch (request_ccb->ccb_h.func_code) { 4691 case XPT_SCAN_BUS: 4692 { 4693 xpt_scan_bus_info *scan_info; 4694 union ccb *work_ccb; 4695 struct cam_path *path; 4696 u_int i; 4697 u_int max_target; 4698 u_int initiator_id; 4699 4700 /* Find out the characteristics of the bus */ 4701 work_ccb = xpt_alloc_ccb(); 4702 xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path, 4703 request_ccb->ccb_h.pinfo.priority); 4704 work_ccb->ccb_h.func_code = XPT_PATH_INQ; 4705 xpt_action(work_ccb); 4706 if (work_ccb->ccb_h.status != CAM_REQ_CMP) { 4707 request_ccb->ccb_h.status = work_ccb->ccb_h.status; 4708 xpt_free_ccb(work_ccb); 4709 xpt_done(request_ccb); 4710 return; 4711 } 4712 4713 if ((work_ccb->cpi.hba_misc & PIM_NOINITIATOR) != 0) { 4714 /* 4715 * Can't scan the bus on an adapter that 4716 * cannot perform the initiator role. 4717 */ 4718 request_ccb->ccb_h.status = CAM_REQ_CMP; 4719 xpt_free_ccb(work_ccb); 4720 xpt_done(request_ccb); 4721 return; 4722 } 4723 4724 /* Save some state for use while we probe for devices */ 4725 scan_info = (xpt_scan_bus_info *) 4726 malloc(sizeof(xpt_scan_bus_info), M_TEMP, M_WAITOK); 4727 scan_info->request_ccb = request_ccb; 4728 scan_info->cpi = &work_ccb->cpi; 4729 4730 /* Cache on our stack so we can work asynchronously */ 4731 max_target = scan_info->cpi->max_target; 4732 initiator_id = scan_info->cpi->initiator_id; 4733 4734 /* 4735 * Don't count the initiator if the 4736 * initiator is addressable. 4737 */ 4738 scan_info->pending_count = max_target + 1; 4739 if (initiator_id <= max_target) 4740 scan_info->pending_count--; 4741 4742 for (i = 0; i <= max_target; i++) { 4743 cam_status status; 4744 if (i == initiator_id) 4745 continue; 4746 4747 status = xpt_create_path(&path, xpt_periph, 4748 request_ccb->ccb_h.path_id, 4749 i, 0); 4750 if (status != CAM_REQ_CMP) { 4751 printf("xpt_scan_bus: xpt_create_path failed" 4752 " with status %#x, bus scan halted\n", 4753 status); 4754 break; 4755 } 4756 work_ccb = xpt_alloc_ccb(); 4757 xpt_setup_ccb(&work_ccb->ccb_h, path, 4758 request_ccb->ccb_h.pinfo.priority); 4759 work_ccb->ccb_h.func_code = XPT_SCAN_LUN; 4760 work_ccb->ccb_h.cbfcnp = xpt_scan_bus; 4761 work_ccb->ccb_h.ppriv_ptr0 = scan_info; 4762 work_ccb->crcn.flags = request_ccb->crcn.flags; 4763#if 0 4764 printf("xpt_scan_bus: probing %d:%d:%d\n", 4765 request_ccb->ccb_h.path_id, i, 0); 4766#endif 4767 xpt_action(work_ccb); 4768 } 4769 break; 4770 } 4771 case XPT_SCAN_LUN: 4772 { 4773 xpt_scan_bus_info *scan_info; 4774 path_id_t path_id; 4775 target_id_t target_id; 4776 lun_id_t lun_id; 4777 4778 /* Reuse the same CCB to query if a device was really found */ 4779 scan_info = (xpt_scan_bus_info *)request_ccb->ccb_h.ppriv_ptr0; 4780 xpt_setup_ccb(&request_ccb->ccb_h, request_ccb->ccb_h.path, 4781 request_ccb->ccb_h.pinfo.priority); 4782 request_ccb->ccb_h.func_code = XPT_GDEV_TYPE; 4783 4784 path_id = request_ccb->ccb_h.path_id; 4785 target_id = request_ccb->ccb_h.target_id; 4786 lun_id = request_ccb->ccb_h.target_lun; 4787 xpt_action(request_ccb); 4788 4789#if 0 4790 printf("xpt_scan_bus: got back probe from %d:%d:%d\n", 4791 path_id, target_id, lun_id); 4792#endif 4793 4794 if (request_ccb->ccb_h.status != CAM_REQ_CMP) { 4795 struct cam_ed *device; 4796 struct cam_et *target; 4797 int s; 4798 4799 /* 4800 * If we already probed lun 0 successfully, or 4801 * we have additional configured luns on this 4802 * target that might have "gone away", go onto 4803 * the next lun. 4804 */ 4805 target = request_ccb->ccb_h.path->target; 4806 s = splcam(); 4807 device = TAILQ_FIRST(&target->ed_entries); 4808 if (device != NULL) 4809 device = TAILQ_NEXT(device, links); 4810 splx(s); 4811 4812 if ((lun_id != 0) || (device != NULL)) { 4813 /* Try the next lun */ 4814 lun_id++; 4815 } 4816 } else { 4817 struct cam_ed *device; 4818 4819 device = request_ccb->ccb_h.path->device; 4820 4821 if ((device->quirk->quirks & CAM_QUIRK_NOLUNS) == 0) { 4822 /* Try the next lun */ 4823 lun_id++; 4824 } 4825 } 4826 4827 xpt_free_path(request_ccb->ccb_h.path); 4828 4829 /* Check Bounds */ 4830 if ((lun_id == request_ccb->ccb_h.target_lun) 4831 || lun_id > scan_info->cpi->max_lun) { 4832 /* We're done */ 4833 4834 xpt_free_ccb(request_ccb); 4835 scan_info->pending_count--; 4836 if (scan_info->pending_count == 0) { 4837 xpt_free_ccb((union ccb *)scan_info->cpi); 4838 request_ccb = scan_info->request_ccb; 4839 free(scan_info, M_TEMP); 4840 request_ccb->ccb_h.status = CAM_REQ_CMP; 4841 xpt_done(request_ccb); 4842 } 4843 } else { 4844 /* Try the next device */ 4845 struct cam_path *path; 4846 cam_status status; 4847 4848 path = request_ccb->ccb_h.path; 4849 status = xpt_create_path(&path, xpt_periph, 4850 path_id, target_id, lun_id); 4851 if (status != CAM_REQ_CMP) { 4852 printf("xpt_scan_bus: xpt_create_path failed " 4853 "with status %#x, halting LUN scan\n", 4854 status); 4855 xpt_free_ccb(request_ccb); 4856 scan_info->pending_count--; 4857 if (scan_info->pending_count == 0) { 4858 xpt_free_ccb( 4859 (union ccb *)scan_info->cpi); 4860 request_ccb = scan_info->request_ccb; 4861 free(scan_info, M_TEMP); 4862 request_ccb->ccb_h.status = CAM_REQ_CMP; 4863 xpt_done(request_ccb); 4864 break; 4865 } 4866 } 4867 xpt_setup_ccb(&request_ccb->ccb_h, path, 4868 request_ccb->ccb_h.pinfo.priority); 4869 request_ccb->ccb_h.func_code = XPT_SCAN_LUN; 4870 request_ccb->ccb_h.cbfcnp = xpt_scan_bus; 4871 request_ccb->ccb_h.ppriv_ptr0 = scan_info; 4872 request_ccb->crcn.flags = 4873 scan_info->request_ccb->crcn.flags; 4874#if 0 4875 xpt_print_path(path); 4876 printf("xpt_scan bus probing\n"); 4877#endif 4878 xpt_action(request_ccb); 4879 } 4880 break; 4881 } 4882 default: 4883 break; 4884 } 4885} 4886 4887typedef enum { 4888 PROBE_TUR, 4889 PROBE_INQUIRY, 4890 PROBE_MODE_SENSE, 4891 PROBE_SERIAL_NUM, 4892 PROBE_TUR_FOR_NEGOTIATION 4893} probe_action; 4894 4895typedef enum { 4896 PROBE_INQUIRY_CKSUM = 0x01, 4897 PROBE_SERIAL_CKSUM = 0x02, 4898 PROBE_NO_ANNOUNCE = 0x04 4899} probe_flags; 4900 4901typedef struct { 4902 TAILQ_HEAD(, ccb_hdr) request_ccbs; 4903 probe_action action; 4904 union ccb saved_ccb; 4905 probe_flags flags; 4906 MD5_CTX context; 4907 u_int8_t digest[16]; 4908} probe_softc; 4909 4910static void 4911xpt_scan_lun(struct cam_periph *periph, struct cam_path *path, 4912 cam_flags flags, union ccb *request_ccb) 4913{ 4914 struct ccb_pathinq cpi; 4915 cam_status status; 4916 struct cam_path *new_path; 4917 struct cam_periph *old_periph; 4918 int s; 4919 4920 CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE, 4921 ("xpt_scan_lun\n")); 4922 4923 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1); 4924 cpi.ccb_h.func_code = XPT_PATH_INQ; 4925 xpt_action((union ccb *)&cpi); 4926 4927 if (cpi.ccb_h.status != CAM_REQ_CMP) { 4928 if (request_ccb != NULL) { 4929 request_ccb->ccb_h.status = cpi.ccb_h.status; 4930 xpt_done(request_ccb); 4931 } 4932 return; 4933 } 4934 4935 if ((cpi.hba_misc & PIM_NOINITIATOR) != 0) { 4936 /* 4937 * Can't scan the bus on an adapter that 4938 * cannot perform the initiator role. 4939 */ 4940 if (request_ccb != NULL) { 4941 request_ccb->ccb_h.status = CAM_REQ_CMP; 4942 xpt_done(request_ccb); 4943 } 4944 return; 4945 } 4946 4947 if (request_ccb == NULL) { 4948 request_ccb = malloc(sizeof(union ccb), M_TEMP, M_NOWAIT); 4949 if (request_ccb == NULL) { 4950 xpt_print_path(path); 4951 printf("xpt_scan_lun: can't allocate CCB, can't " 4952 "continue\n"); 4953 return; 4954 } 4955 new_path = malloc(sizeof(*new_path), M_TEMP, M_NOWAIT); 4956 if (new_path == NULL) { 4957 xpt_print_path(path); 4958 printf("xpt_scan_lun: can't allocate path, can't " 4959 "continue\n"); 4960 free(request_ccb, M_TEMP); 4961 return; 4962 } 4963 status = xpt_compile_path(new_path, xpt_periph, 4964 path->bus->path_id, 4965 path->target->target_id, 4966 path->device->lun_id); 4967 4968 if (status != CAM_REQ_CMP) { 4969 xpt_print_path(path); 4970 printf("xpt_scan_lun: can't compile path, can't " 4971 "continue\n"); 4972 free(request_ccb, M_TEMP); 4973 free(new_path, M_TEMP); 4974 return; 4975 } 4976 xpt_setup_ccb(&request_ccb->ccb_h, new_path, /*priority*/ 1); 4977 request_ccb->ccb_h.cbfcnp = xptscandone; 4978 request_ccb->ccb_h.func_code = XPT_SCAN_LUN; 4979 request_ccb->crcn.flags = flags; 4980 } 4981 4982 s = splsoftcam(); 4983 if ((old_periph = cam_periph_find(path, "probe")) != NULL) { 4984 probe_softc *softc; 4985 4986 softc = (probe_softc *)old_periph->softc; 4987 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h, 4988 periph_links.tqe); 4989 } else { 4990 status = cam_periph_alloc(proberegister, NULL, probecleanup, 4991 probestart, "probe", 4992 CAM_PERIPH_BIO, 4993 request_ccb->ccb_h.path, NULL, 0, 4994 request_ccb); 4995 4996 if (status != CAM_REQ_CMP) { 4997 xpt_print_path(path); 4998 printf("xpt_scan_lun: cam_alloc_periph returned an " 4999 "error, can't continue probe\n"); 5000 request_ccb->ccb_h.status = status; 5001 xpt_done(request_ccb); 5002 } 5003 } 5004 splx(s); 5005} 5006 5007static void 5008xptscandone(struct cam_periph *periph, union ccb *done_ccb) 5009{ 5010 xpt_release_path(done_ccb->ccb_h.path); 5011 free(done_ccb->ccb_h.path, M_TEMP); 5012 free(done_ccb, M_TEMP); 5013} 5014 5015static cam_status 5016proberegister(struct cam_periph *periph, void *arg) 5017{ 5018 struct ccb_getdev *cgd; 5019 probe_softc *softc; 5020 union ccb *ccb; 5021 5022 cgd = (struct ccb_getdev *)arg; 5023 if (periph == NULL) { 5024 printf("proberegister: periph was NULL!!\n"); 5025 return(CAM_REQ_CMP_ERR); 5026 } 5027 5028 if (cgd == NULL) { 5029 printf("proberegister: no getdev CCB, can't register device\n"); 5030 return(CAM_REQ_CMP_ERR); 5031 } 5032 5033 softc = (probe_softc *)malloc(sizeof(*softc), M_TEMP, M_NOWAIT); 5034 5035 if (softc == NULL) { 5036 printf("proberegister: Unable to probe new device. " 5037 "Unable to allocate softc\n"); 5038 return(CAM_REQ_CMP_ERR); 5039 } 5040 ccb = (union ccb *)cgd; 5041 TAILQ_INIT(&softc->request_ccbs); 5042 TAILQ_INSERT_TAIL(&softc->request_ccbs, &ccb->ccb_h, periph_links.tqe); 5043 softc->flags = 0; 5044 periph->softc = softc; 5045 cam_periph_acquire(periph); 5046 probeschedule(periph); 5047 return(CAM_REQ_CMP); 5048} 5049 5050static void 5051probeschedule(struct cam_periph *periph) 5052{ 5053 union ccb *ccb; 5054 probe_softc *softc; 5055 5056 softc = (probe_softc *)periph->softc; 5057 ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs); 5058 5059 /* 5060 * If a device has gone away and another device, or the same one, 5061 * is back in the same place, it should have a unit attention 5062 * condition pending. It will not report the unit attention in 5063 * response to an inquiry, which may leave invalid transfer 5064 * negotiations in effect. The TUR will reveal the unit attention 5065 * condition. Only send the TUR for lun 0, since some devices 5066 * will get confused by commands other than inquiry to non-existent 5067 * luns. If you think a device has gone away start your scan from 5068 * lun 0. This will insure that any bogus transfer settings are 5069 * invalidated. 5070 */ 5071 if (((ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) == 0) 5072 && (ccb->ccb_h.target_lun == 0)) 5073 softc->action = PROBE_TUR; 5074 else 5075 softc->action = PROBE_INQUIRY; 5076 5077 if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE) 5078 softc->flags |= PROBE_NO_ANNOUNCE; 5079 else 5080 softc->flags &= ~PROBE_NO_ANNOUNCE; 5081 5082 xpt_schedule(periph, ccb->ccb_h.pinfo.priority); 5083} 5084 5085static void 5086probestart(struct cam_periph *periph, union ccb *start_ccb) 5087{ 5088 /* Probe the device that our peripheral driver points to */ 5089 struct ccb_scsiio *csio; 5090 probe_softc *softc; 5091 5092 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n")); 5093 5094 softc = (probe_softc *)periph->softc; 5095 csio = &start_ccb->csio; 5096 5097 switch (softc->action) { 5098 case PROBE_TUR: 5099 case PROBE_TUR_FOR_NEGOTIATION: 5100 { 5101 scsi_test_unit_ready(csio, 5102 /*retries*/4, 5103 probedone, 5104 MSG_SIMPLE_Q_TAG, 5105 SSD_FULL_SIZE, 5106 /*timeout*/60000); 5107 break; 5108 } 5109 case PROBE_INQUIRY: 5110 { 5111 struct scsi_inquiry_data *inq_buf; 5112 5113 inq_buf = &periph->path->device->inq_data; 5114 /* 5115 * If the device is currently configured, we calculate an 5116 * MD5 checksum of the inquiry data, and if the serial number 5117 * length is greater than 0, add the serial number data 5118 * into the checksum as well. Once the inquiry and the 5119 * serial number check finish, we attempt to figure out 5120 * whether we still have the same device. 5121 */ 5122 if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) { 5123 5124 MD5Init(&softc->context); 5125 MD5Update(&softc->context, (unsigned char *)inq_buf, 5126 sizeof(struct scsi_inquiry_data)); 5127 softc->flags |= PROBE_INQUIRY_CKSUM; 5128 if (periph->path->device->serial_num_len > 0) { 5129 MD5Update(&softc->context, 5130 periph->path->device->serial_num, 5131 periph->path->device->serial_num_len); 5132 softc->flags |= PROBE_SERIAL_CKSUM; 5133 } 5134 MD5Final(softc->digest, &softc->context); 5135 } 5136 5137 scsi_inquiry(csio, 5138 /*retries*/4, 5139 probedone, 5140 MSG_SIMPLE_Q_TAG, 5141 (u_int8_t *)inq_buf, 5142 sizeof(*inq_buf), 5143 /*evpd*/FALSE, 5144 /*page_code*/0, 5145 SSD_MIN_SIZE, 5146 /*timeout*/60 * 1000); 5147 break; 5148 } 5149 case PROBE_MODE_SENSE: 5150 { 5151 void *mode_buf; 5152 int mode_buf_len; 5153 5154 mode_buf_len = sizeof(struct scsi_mode_header_6) 5155 + sizeof(struct scsi_mode_blk_desc) 5156 + sizeof(struct scsi_control_page); 5157 mode_buf = malloc(mode_buf_len, M_TEMP, M_NOWAIT); 5158 if (mode_buf != NULL) { 5159 scsi_mode_sense(csio, 5160 /*retries*/4, 5161 probedone, 5162 MSG_SIMPLE_Q_TAG, 5163 /*dbd*/FALSE, 5164 SMS_PAGE_CTRL_CURRENT, 5165 SMS_CONTROL_MODE_PAGE, 5166 mode_buf, 5167 mode_buf_len, 5168 SSD_FULL_SIZE, 5169 /*timeout*/60000); 5170 break; 5171 } 5172 xpt_print_path(periph->path); 5173 printf("Unable to mode sense control page - malloc failure\n"); 5174 softc->action = PROBE_SERIAL_NUM; 5175 /* FALLTHROUGH */ 5176 } 5177 case PROBE_SERIAL_NUM: 5178 { 5179 struct scsi_vpd_unit_serial_number *serial_buf; 5180 struct cam_ed* device; 5181 5182 serial_buf = NULL; 5183 device = periph->path->device; 5184 device->serial_num = NULL; 5185 device->serial_num_len = 0; 5186 5187 if ((device->quirk->quirks & CAM_QUIRK_NOSERIAL) == 0) 5188 serial_buf = (struct scsi_vpd_unit_serial_number *) 5189 malloc(sizeof(*serial_buf), M_TEMP, M_NOWAIT); 5190 5191 if (serial_buf != NULL) { 5192 bzero(serial_buf, sizeof(*serial_buf)); 5193 scsi_inquiry(csio, 5194 /*retries*/4, 5195 probedone, 5196 MSG_SIMPLE_Q_TAG, 5197 (u_int8_t *)serial_buf, 5198 sizeof(*serial_buf), 5199 /*evpd*/TRUE, 5200 SVPD_UNIT_SERIAL_NUMBER, 5201 SSD_MIN_SIZE, 5202 /*timeout*/60 * 1000); 5203 break; 5204 } 5205 /* 5206 * We'll have to do without, let our probedone 5207 * routine finish up for us. 5208 */ 5209 start_ccb->csio.data_ptr = NULL; 5210 probedone(periph, start_ccb); 5211 return; 5212 } 5213 } 5214 xpt_action(start_ccb); 5215} 5216 5217static void 5218probedone(struct cam_periph *periph, union ccb *done_ccb) 5219{ 5220 probe_softc *softc; 5221 struct cam_path *path; 5222 u_int32_t priority; 5223 5224 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n")); 5225 5226 softc = (probe_softc *)periph->softc; 5227 path = done_ccb->ccb_h.path; 5228 priority = done_ccb->ccb_h.pinfo.priority; 5229 5230 switch (softc->action) { 5231 case PROBE_TUR: 5232 { 5233 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 5234 5235 if (cam_periph_error(done_ccb, 0, 5236 SF_NO_PRINT, NULL) == ERESTART) 5237 return; 5238 else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 5239 /* Don't wedge the queue */ 5240 xpt_release_devq(done_ccb->ccb_h.path->device, 5241 /*run_queue*/TRUE); 5242 } 5243 softc->action = PROBE_INQUIRY; 5244 xpt_release_ccb(done_ccb); 5245 xpt_schedule(periph, priority); 5246 return; 5247 } 5248 case PROBE_INQUIRY: 5249 { 5250 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 5251 struct scsi_inquiry_data *inq_buf; 5252 u_int8_t periph_qual; 5253 u_int8_t periph_dtype; 5254 5255 inq_buf = &path->device->inq_data; 5256 5257 periph_qual = SID_QUAL(inq_buf); 5258 periph_dtype = SID_TYPE(inq_buf); 5259 if (periph_dtype != T_NODEVICE) { 5260 switch(periph_qual) { 5261 case SID_QUAL_LU_CONNECTED: 5262 { 5263 xpt_find_quirk(path->device); 5264 5265 if ((inq_buf->flags & SID_CmdQue) != 0) 5266 softc->action = 5267 PROBE_MODE_SENSE; 5268 else 5269 softc->action = 5270 PROBE_SERIAL_NUM; 5271 5272 path->device->flags &= 5273 ~CAM_DEV_UNCONFIGURED; 5274 5275 xpt_release_ccb(done_ccb); 5276 xpt_schedule(periph, priority); 5277 return; 5278 } 5279 default: 5280 break; 5281 } 5282 } 5283 } else if (cam_periph_error(done_ccb, 0, 5284 done_ccb->ccb_h.target_lun > 0 5285 ? SF_RETRY_UA|SF_QUIET_IR 5286 : SF_RETRY_UA, 5287 &softc->saved_ccb) == ERESTART) { 5288 return; 5289 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 5290 /* Don't wedge the queue */ 5291 xpt_release_devq(done_ccb->ccb_h.path->device, 5292 /*run_queue*/TRUE); 5293 } 5294 /* 5295 * If we get to this point, we got an error status back 5296 * from the inquiry and the error status doesn't require 5297 * automatically retrying the command. Therefore, the 5298 * inquiry failed. If we had inquiry information before 5299 * for this device, but this latest inquiry command failed, 5300 * the device has probably gone away. If this device isn't 5301 * already marked unconfigured, notify the peripheral 5302 * drivers that this device is no more. 5303 */ 5304 if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0) 5305 /* Send the async notification. */ 5306 xpt_async(AC_LOST_DEVICE, path, NULL); 5307 5308 xpt_release_ccb(done_ccb); 5309 break; 5310 } 5311 case PROBE_MODE_SENSE: 5312 { 5313 struct ccb_scsiio *csio; 5314 struct scsi_mode_header_6 *mode_hdr; 5315 5316 csio = &done_ccb->csio; 5317 mode_hdr = (struct scsi_mode_header_6 *)csio->data_ptr; 5318 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 5319 struct scsi_control_page *page; 5320 u_int8_t *offset; 5321 5322 offset = ((u_int8_t *)&mode_hdr[1]) 5323 + mode_hdr->blk_desc_len; 5324 page = (struct scsi_control_page *)offset; 5325 path->device->queue_flags = page->queue_flags; 5326 } else if (cam_periph_error(done_ccb, 0, 5327 SF_RETRY_UA|SF_NO_PRINT, 5328 &softc->saved_ccb) == ERESTART) { 5329 return; 5330 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 5331 /* Don't wedge the queue */ 5332 xpt_release_devq(done_ccb->ccb_h.path->device, 5333 /*run_queue*/TRUE); 5334 } 5335 xpt_release_ccb(done_ccb); 5336 free(mode_hdr, M_TEMP); 5337 softc->action = PROBE_SERIAL_NUM; 5338 xpt_schedule(periph, priority); 5339 return; 5340 } 5341 case PROBE_SERIAL_NUM: 5342 { 5343 struct ccb_scsiio *csio; 5344 struct scsi_vpd_unit_serial_number *serial_buf; 5345 u_int32_t priority; 5346 int changed; 5347 int have_serialnum; 5348 5349 changed = 1; 5350 have_serialnum = 0; 5351 csio = &done_ccb->csio; 5352 priority = done_ccb->ccb_h.pinfo.priority; 5353 serial_buf = 5354 (struct scsi_vpd_unit_serial_number *)csio->data_ptr; 5355 5356 /* Clean up from previous instance of this device */ 5357 if (path->device->serial_num != NULL) { 5358 free(path->device->serial_num, M_DEVBUF); 5359 path->device->serial_num = NULL; 5360 path->device->serial_num_len = 0; 5361 } 5362 5363 if (serial_buf == NULL) { 5364 /* 5365 * Don't process the command as it was never sent 5366 */ 5367 } else if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP 5368 && (serial_buf->length > 0)) { 5369 5370 have_serialnum = 1; 5371 path->device->serial_num = 5372 (u_int8_t *)malloc((serial_buf->length + 1), 5373 M_DEVBUF, M_NOWAIT); 5374 if (path->device->serial_num != NULL) { 5375 bcopy(serial_buf->serial_num, 5376 path->device->serial_num, 5377 serial_buf->length); 5378 path->device->serial_num_len = 5379 serial_buf->length; 5380 path->device->serial_num[serial_buf->length] 5381 = '\0'; 5382 } 5383 } else if (cam_periph_error(done_ccb, 0, 5384 SF_RETRY_UA|SF_NO_PRINT, 5385 &softc->saved_ccb) == ERESTART) { 5386 return; 5387 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 5388 /* Don't wedge the queue */ 5389 xpt_release_devq(done_ccb->ccb_h.path->device, 5390 /*run_queue*/TRUE); 5391 } 5392 5393 /* 5394 * Let's see if we have seen this device before. 5395 */ 5396 if ((softc->flags & PROBE_INQUIRY_CKSUM) != 0) { 5397 MD5_CTX context; 5398 u_int8_t digest[16]; 5399 5400 MD5Init(&context); 5401 5402 MD5Update(&context, 5403 (unsigned char *)&path->device->inq_data, 5404 sizeof(struct scsi_inquiry_data)); 5405 5406 if (have_serialnum) 5407 MD5Update(&context, serial_buf->serial_num, 5408 serial_buf->length); 5409 5410 MD5Final(digest, &context); 5411 if (bcmp(softc->digest, digest, 16) == 0) 5412 changed = 0; 5413 5414 /* 5415 * XXX Do we need to do a TUR in order to ensure 5416 * that the device really hasn't changed??? 5417 */ 5418 if ((changed != 0) 5419 && ((softc->flags & PROBE_NO_ANNOUNCE) == 0)) 5420 xpt_async(AC_LOST_DEVICE, path, NULL); 5421 } 5422 if (serial_buf != NULL) 5423 free(serial_buf, M_TEMP); 5424 5425 if (changed != 0) { 5426 /* 5427 * Now that we have all the necessary 5428 * information to safely perform transfer 5429 * negotiations... Controllers don't perform 5430 * any negotiation or tagged queuing until 5431 * after the first XPT_SET_TRAN_SETTINGS ccb is 5432 * received. So, on a new device, just retreive 5433 * the user settings, and set them as the current 5434 * settings to set the device up. 5435 */ 5436 done_ccb->ccb_h.func_code = XPT_GET_TRAN_SETTINGS; 5437 done_ccb->cts.flags = CCB_TRANS_USER_SETTINGS; 5438 xpt_action(done_ccb); 5439 done_ccb->ccb_h.func_code = XPT_SET_TRAN_SETTINGS; 5440 done_ccb->cts.flags &= ~CCB_TRANS_USER_SETTINGS; 5441 done_ccb->cts.flags |= CCB_TRANS_CURRENT_SETTINGS; 5442 xpt_action(done_ccb); 5443 xpt_release_ccb(done_ccb); 5444 5445 /* 5446 * Perform a TUR to allow the controller to 5447 * perform any necessary transfer negotiation. 5448 */ 5449 softc->action = PROBE_TUR_FOR_NEGOTIATION; 5450 xpt_schedule(periph, priority); 5451 return; 5452 } 5453 xpt_release_ccb(done_ccb); 5454 break; 5455 } 5456 case PROBE_TUR_FOR_NEGOTIATION: 5457 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 5458 /* Don't wedge the queue */ 5459 xpt_release_devq(done_ccb->ccb_h.path->device, 5460 /*run_queue*/TRUE); 5461 } 5462 5463 path->device->flags &= ~CAM_DEV_UNCONFIGURED; 5464 5465 if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) { 5466 /* Inform the XPT that a new device has been found */ 5467 done_ccb->ccb_h.func_code = XPT_GDEV_TYPE; 5468 xpt_action(done_ccb); 5469 5470 xpt_async(AC_FOUND_DEVICE, xpt_periph->path, done_ccb); 5471 } 5472 xpt_release_ccb(done_ccb); 5473 break; 5474 } 5475 done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs); 5476 TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe); 5477 done_ccb->ccb_h.status = CAM_REQ_CMP; 5478 xpt_done(done_ccb); 5479 if (TAILQ_FIRST(&softc->request_ccbs) == NULL) { 5480 cam_periph_invalidate(periph); 5481 cam_periph_release(periph); 5482 } else { 5483 probeschedule(periph); 5484 } 5485} 5486 5487static void 5488probecleanup(struct cam_periph *periph) 5489{ 5490 free(periph->softc, M_TEMP); 5491} 5492 5493static void 5494xpt_find_quirk(struct cam_ed *device) 5495{ 5496 caddr_t match; 5497 5498 match = cam_quirkmatch((caddr_t)&device->inq_data, 5499 (caddr_t)xpt_quirk_table, 5500 sizeof(xpt_quirk_table)/sizeof(*xpt_quirk_table), 5501 sizeof(*xpt_quirk_table), scsi_inquiry_match); 5502 5503 if (match == NULL) 5504 panic("xpt_find_quirk: device didn't match wildcard entry!!"); 5505 5506 device->quirk = (struct xpt_quirk_entry *)match; 5507} 5508 5509static void 5510xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device, 5511 int async_update) 5512{ 5513 struct cam_sim *sim; 5514 int qfrozen; 5515 5516 sim = cts->ccb_h.path->bus->sim; 5517 if (async_update == FALSE) { 5518 struct scsi_inquiry_data *inq_data; 5519 struct ccb_pathinq cpi; 5520 5521 if (device == NULL) { 5522 cts->ccb_h.status = CAM_PATH_INVALID; 5523 xpt_done((union ccb *)cts); 5524 return; 5525 } 5526 5527 /* 5528 * Perform sanity checking against what the 5529 * controller and device can do. 5530 */ 5531 xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1); 5532 cpi.ccb_h.func_code = XPT_PATH_INQ; 5533 xpt_action((union ccb *)&cpi); 5534 5535 inq_data = &device->inq_data; 5536 if ((inq_data->flags & SID_Sync) == 0 5537 || (cpi.hba_inquiry & PI_SDTR_ABLE) == 0) { 5538 /* Force async */ 5539 cts->sync_period = 0; 5540 cts->sync_offset = 0; 5541 } 5542 5543 switch (cts->bus_width) { 5544 case MSG_EXT_WDTR_BUS_32_BIT: 5545 if ((inq_data->flags & SID_WBus32) != 0 5546 && (cpi.hba_inquiry & PI_WIDE_32) != 0) 5547 break; 5548 /* Fall Through to 16-bit */ 5549 case MSG_EXT_WDTR_BUS_16_BIT: 5550 if ((inq_data->flags & SID_WBus16) != 0 5551 && (cpi.hba_inquiry & PI_WIDE_16) != 0) { 5552 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 5553 break; 5554 } 5555 /* Fall Through to 8-bit */ 5556 default: /* New bus width?? */ 5557 case MSG_EXT_WDTR_BUS_8_BIT: 5558 /* All targets can do this */ 5559 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 5560 break; 5561 } 5562 5563 if ((cts->flags & CCB_TRANS_DISC_ENB) == 0) { 5564 /* 5565 * Can't tag queue without disconnection. 5566 */ 5567 cts->flags &= ~CCB_TRANS_TAG_ENB; 5568 cts->valid |= CCB_TRANS_TQ_VALID; 5569 } 5570 5571 if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0 5572 || (inq_data->flags & SID_CmdQue) == 0 5573 || (device->queue_flags & SCP_QUEUE_DQUE) != 0 5574 || (device->quirk->mintags == 0)) { 5575 /* 5576 * Can't tag on hardware that doesn't support, 5577 * doesn't have it enabled, or has broken tag support. 5578 */ 5579 cts->flags &= ~CCB_TRANS_TAG_ENB; 5580 } 5581 } 5582 5583 qfrozen = FALSE; 5584 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0 5585 && (async_update == FALSE)) { 5586 int device_tagenb; 5587 5588 /* 5589 * If we are transitioning from tags to no-tags or 5590 * vice-versa, we need to carefully freeze and restart 5591 * the queue so that we don't overlap tagged and non-tagged 5592 * commands. We also temporarily stop tags if there is 5593 * a change in transfer negotiation settings to allow 5594 * "tag-less" negotiation. 5595 */ 5596 if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 5597 || (device->inq_flags & SID_CmdQue) != 0) 5598 device_tagenb = TRUE; 5599 else 5600 device_tagenb = FALSE; 5601 5602 if (((cts->flags & CCB_TRANS_TAG_ENB) != 0 5603 && device_tagenb == FALSE) 5604 || ((cts->flags & CCB_TRANS_TAG_ENB) == 0 5605 && device_tagenb == TRUE)) { 5606 5607 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) { 5608 /* 5609 * Delay change to use tags until after a 5610 * few commands have gone to this device so 5611 * the controller has time to perform transfer 5612 * negotiations without tagged messages getting 5613 * in the way. 5614 */ 5615 device->tag_delay_count = CAM_TAG_DELAY_COUNT; 5616 device->flags |= CAM_DEV_TAG_AFTER_COUNT; 5617 } else { 5618 xpt_freeze_devq(cts->ccb_h.path, /*count*/1); 5619 qfrozen = TRUE; 5620 device->inq_flags &= ~SID_CmdQue; 5621 xpt_dev_ccbq_resize(cts->ccb_h.path, 5622 sim->max_dev_openings); 5623 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT; 5624 device->tag_delay_count = 0; 5625 } 5626 } 5627 } 5628 5629 if (async_update == FALSE) { 5630 /* 5631 * If we are currently performing tagged transactions to 5632 * this device and want to change its negotiation parameters, 5633 * go non-tagged for a bit to give the controller a chance to 5634 * negotiate unhampered by tag messages. 5635 */ 5636 if ((device->inq_flags & SID_CmdQue) != 0 5637 && (cts->flags & (CCB_TRANS_SYNC_RATE_VALID| 5638 CCB_TRANS_SYNC_OFFSET_VALID| 5639 CCB_TRANS_BUS_WIDTH_VALID)) != 0) 5640 xpt_toggle_tags(cts->ccb_h.path); 5641 5642 (*(sim->sim_action))(sim, (union ccb *)cts); 5643 } 5644 5645 if (qfrozen) { 5646 struct ccb_relsim crs; 5647 5648 xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path, 5649 /*priority*/1); 5650 crs.ccb_h.func_code = XPT_REL_SIMQ; 5651 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY; 5652 crs.openings 5653 = crs.release_timeout 5654 = crs.qfrozen_cnt 5655 = 0; 5656 xpt_action((union ccb *)&crs); 5657 } 5658} 5659 5660static void 5661xpt_toggle_tags(struct cam_path *path) 5662{ 5663 /* 5664 * Give controllers a chance to renegotiate 5665 * before starting tag operations. We 5666 * "toggle" tagged queuing off then on 5667 * which causes the tag enable command delay 5668 * counter to come into effect. 5669 */ 5670 if ((path->device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 5671 || (path->device->inq_flags & SID_CmdQue) != 0) { 5672 struct ccb_trans_settings cts; 5673 5674 xpt_setup_ccb(&cts.ccb_h, path, 1); 5675 cts.flags = 0; 5676 cts.valid = CCB_TRANS_TQ_VALID; 5677 xpt_set_transfer_settings(&cts, path->device, 5678 /*async_update*/TRUE); 5679 cts.flags = CCB_TRANS_TAG_ENB; 5680 xpt_set_transfer_settings(&cts, path->device, 5681 /*async_update*/TRUE); 5682 } 5683} 5684 5685static void 5686xpt_start_tags(struct cam_path *path) 5687{ 5688 struct ccb_relsim crs; 5689 struct cam_ed *device; 5690 struct cam_sim *sim; 5691 int newopenings; 5692 5693 device = path->device; 5694 sim = path->bus->sim; 5695 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT; 5696 xpt_freeze_devq(path, /*count*/1); 5697 device->inq_flags |= SID_CmdQue; 5698 newopenings = min(device->quirk->maxtags, sim->max_tagged_dev_openings); 5699 xpt_dev_ccbq_resize(path, newopenings); 5700 xpt_setup_ccb(&crs.ccb_h, path, /*priority*/1); 5701 crs.ccb_h.func_code = XPT_REL_SIMQ; 5702 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY; 5703 crs.openings 5704 = crs.release_timeout 5705 = crs.qfrozen_cnt 5706 = 0; 5707 xpt_action((union ccb *)&crs); 5708} 5709 5710static int busses_to_config; 5711 5712static int 5713xptconfigbuscountfunc(struct cam_eb *bus, void *arg) 5714{ 5715 if (bus->path_id != CAM_XPT_PATH_ID) 5716 busses_to_config++; 5717 5718 return(1); 5719} 5720 5721static int 5722xptconfigfunc(struct cam_eb *bus, void *arg) 5723{ 5724 struct cam_path *path; 5725 union ccb *work_ccb; 5726 5727 if (bus->path_id != CAM_XPT_PATH_ID) { 5728 cam_status status; 5729 5730 work_ccb = xpt_alloc_ccb(); 5731 if ((status = xpt_create_path(&path, xpt_periph, bus->path_id, 5732 CAM_TARGET_WILDCARD, 5733 CAM_LUN_WILDCARD)) !=CAM_REQ_CMP){ 5734 printf("xptconfigfunc: xpt_create_path failed with " 5735 "status %#x for bus %d\n", status, bus->path_id); 5736 printf("xptconfigfunc: halting bus configuration\n"); 5737 xpt_free_ccb(work_ccb); 5738 busses_to_config--; 5739 xpt_finishconfig(xpt_periph, NULL); 5740 return(0); 5741 } 5742 xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1); 5743 work_ccb->ccb_h.func_code = XPT_PATH_INQ; 5744 xpt_action(work_ccb); 5745 if (work_ccb->ccb_h.status != CAM_REQ_CMP) { 5746 printf("xptconfigfunc: CPI failed on bus %d " 5747 "with status %d\n", bus->path_id, 5748 work_ccb->ccb_h.status); 5749 xpt_finishconfig(xpt_periph, work_ccb); 5750 return(1); 5751 } 5752 5753 if ((work_ccb->cpi.hba_misc & PIM_NOBUSRESET) == 0) { 5754 xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1); 5755 work_ccb->ccb_h.func_code = XPT_RESET_BUS; 5756 work_ccb->ccb_h.cbfcnp = NULL; 5757 CAM_DEBUG(path, CAM_DEBUG_SUBTRACE, 5758 ("Resetting Bus\n")); 5759 xpt_action(work_ccb); 5760 xpt_finishconfig(xpt_periph, work_ccb); 5761 } else { 5762 /* Act as though we performed a successful BUS RESET */ 5763 work_ccb->ccb_h.func_code = XPT_RESET_BUS; 5764 xpt_finishconfig(xpt_periph, work_ccb); 5765 } 5766 } 5767 5768 return(1); 5769} 5770 5771static void 5772xpt_config(void *arg) 5773{ 5774 /* Now that interrupts are enabled, go find our devices */ 5775 5776#ifdef CAMDEBUG 5777 /* Setup debugging flags and path */ 5778#ifdef CAM_DEBUG_FLAGS 5779 cam_dflags = CAM_DEBUG_FLAGS; 5780#else /* !CAM_DEBUG_FLAGS */ 5781 cam_dflags = CAM_DEBUG_NONE; 5782#endif /* CAM_DEBUG_FLAGS */ 5783#ifdef CAM_DEBUG_BUS 5784 if (cam_dflags != CAM_DEBUG_NONE) { 5785 if (xpt_create_path(&cam_dpath, xpt_periph, 5786 CAM_DEBUG_BUS, CAM_DEBUG_TARGET, 5787 CAM_DEBUG_LUN) != CAM_REQ_CMP) { 5788 printf("xpt_config: xpt_create_path() failed for debug" 5789 " target %d:%d:%d, debugging disabled\n", 5790 CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN); 5791 cam_dflags = CAM_DEBUG_NONE; 5792 } 5793 } else 5794 cam_dpath = NULL; 5795#else /* !CAM_DEBUG_BUS */ 5796 cam_dpath = NULL; 5797#endif /* CAM_DEBUG_BUS */ 5798#endif /* CAMDEBUG */ 5799 5800 /* 5801 * Scan all installed busses. 5802 */ 5803 xpt_for_all_busses(xptconfigbuscountfunc, NULL); 5804 5805 if (busses_to_config == 0) { 5806 /* Call manually because we don't have any busses */ 5807 xpt_finishconfig(xpt_periph, NULL); 5808 } else { 5809 if (SCSI_DELAY >= 2000) { 5810 printf("Waiting %d seconds for SCSI " 5811 "devices to settle\n", SCSI_DELAY/1000); 5812 } 5813 xpt_for_all_busses(xptconfigfunc, NULL); 5814 } 5815} 5816 5817/* 5818 * If the given device only has one peripheral attached to it, and if that 5819 * peripheral is the passthrough driver, announce it. This insures that the 5820 * user sees some sort of announcement for every peripheral in their system. 5821 */ 5822static int 5823xptpassannouncefunc(struct cam_ed *device, void *arg) 5824{ 5825 struct cam_periph *periph; 5826 int i; 5827 5828 for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL; 5829 periph = SLIST_NEXT(periph, periph_links), i++); 5830 5831 periph = SLIST_FIRST(&device->periphs); 5832 if ((i == 1) 5833 && (strncmp(periph->periph_name, "pass", 4) == 0)) 5834 xpt_announce_periph(periph, NULL); 5835 5836 return(1); 5837} 5838 5839static void 5840xpt_finishconfig(struct cam_periph *periph, union ccb *done_ccb) 5841{ 5842 struct periph_driver **p_drv; 5843 int i; 5844 5845 if (done_ccb != NULL) { 5846 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, 5847 ("xpt_finishconfig\n")); 5848 switch(done_ccb->ccb_h.func_code) { 5849 case XPT_RESET_BUS: 5850 if (done_ccb->ccb_h.status == CAM_REQ_CMP) { 5851 done_ccb->ccb_h.func_code = XPT_SCAN_BUS; 5852 done_ccb->ccb_h.cbfcnp = xpt_finishconfig; 5853 xpt_action(done_ccb); 5854 return; 5855 } 5856 /* FALLTHROUGH */ 5857 case XPT_SCAN_BUS: 5858 default: 5859 xpt_free_path(done_ccb->ccb_h.path); 5860 busses_to_config--; 5861 break; 5862 } 5863 } 5864 5865 if (busses_to_config == 0) { 5866 /* Register all the peripheral drivers */ 5867 /* XXX This will have to change when we have loadable modules */ 5868 p_drv = (struct periph_driver **)periphdriver_set.ls_items; 5869 for (i = 0; p_drv[i] != NULL; i++) { 5870 (*p_drv[i]->init)(); 5871 } 5872 5873 /* 5874 * Check for devices with no "standard" peripheral driver 5875 * attached. For any devices like that, announce the 5876 * passthrough driver so the user will see something. 5877 */ 5878 xpt_for_all_devices(xptpassannouncefunc, NULL); 5879 5880 /* Release our hook so that the boot can continue. */ 5881 config_intrhook_disestablish(xpt_config_hook); 5882 free(xpt_config_hook, M_TEMP); 5883 xpt_config_hook = NULL; 5884 } 5885 if (done_ccb != NULL) 5886 xpt_free_ccb(done_ccb); 5887} 5888 5889static void 5890xptaction(struct cam_sim *sim, union ccb *work_ccb) 5891{ 5892 CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n")); 5893 5894 switch (work_ccb->ccb_h.func_code) { 5895 /* Common cases first */ 5896 case XPT_PATH_INQ: /* Path routing inquiry */ 5897 { 5898 struct ccb_pathinq *cpi; 5899 5900 cpi = &work_ccb->cpi; 5901 cpi->version_num = 1; /* XXX??? */ 5902 cpi->hba_inquiry = 0; 5903 cpi->target_sprt = 0; 5904 cpi->hba_misc = 0; 5905 cpi->hba_eng_cnt = 0; 5906 cpi->max_target = 0; 5907 cpi->max_lun = 0; 5908 cpi->initiator_id = 0; 5909 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 5910 strncpy(cpi->hba_vid, "", HBA_IDLEN); 5911 strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN); 5912 cpi->unit_number = sim->unit_number; 5913 cpi->bus_id = sim->bus_id; 5914 cpi->ccb_h.status = CAM_REQ_CMP; 5915 xpt_done(work_ccb); 5916 break; 5917 } 5918 default: 5919 work_ccb->ccb_h.status = CAM_REQ_INVALID; 5920 xpt_done(work_ccb); 5921 break; 5922 } 5923} 5924 5925/* 5926 * Should only be called by the machine interrupt dispatch routines, 5927 * so put these prototypes here instead of in the header. 5928 */ 5929 5930static void 5931swi_camnet(void) 5932{ 5933 camisr(&cam_netq); 5934} 5935 5936static void 5937swi_cambio(void) 5938{ 5939 camisr(&cam_bioq); 5940} 5941 5942static void 5943camisr(cam_isrq_t *queue) 5944{ 5945 int s; 5946 struct ccb_hdr *ccb_h; 5947 5948 s = splcam(); 5949 while ((ccb_h = TAILQ_FIRST(queue)) != NULL) { 5950 int runq; 5951 5952 TAILQ_REMOVE(queue, ccb_h, sim_links.tqe); 5953 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX; 5954 splx(s); 5955 5956 CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE, 5957 ("camisr")); 5958 5959 runq = FALSE; 5960 5961 if (ccb_h->flags & CAM_HIGH_POWER) { 5962 struct highpowerlist *hphead; 5963 struct cam_ed *device; 5964 union ccb *send_ccb; 5965 5966 hphead = &highpowerq; 5967 5968 send_ccb = (union ccb *)STAILQ_FIRST(hphead); 5969 5970 /* 5971 * Increment the count since this command is done. 5972 */ 5973 num_highpower++; 5974 5975 /* 5976 * Any high powered commands queued up? 5977 */ 5978 if (send_ccb != NULL) { 5979 device = send_ccb->ccb_h.path->device; 5980 5981 STAILQ_REMOVE_HEAD(hphead, xpt_links.stqe); 5982 5983 xpt_release_devq(send_ccb->ccb_h.path->device, 5984 TRUE); 5985 } 5986 } 5987 if ((ccb_h->func_code != XPT_ACCEPT_TARGET_IO) 5988 && (ccb_h->func_code != XPT_IMMED_NOTIFY) 5989 && (ccb_h->func_code != XPT_SCAN_LUN) 5990 && (ccb_h->func_code != XPT_SCAN_BUS)) { 5991 struct cam_ed *dev; 5992 5993 dev = ccb_h->path->device; 5994 5995 s = splcam(); 5996 cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h); 5997 5998 ccb_h->path->bus->sim->devq->send_active--; 5999 ccb_h->path->bus->sim->devq->send_openings++; 6000 splx(s); 6001 6002 if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0 6003 || ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0 6004 && (dev->ccbq.dev_active == 0))) { 6005 6006 xpt_release_devq(ccb_h->path->device, 6007 /*run_queue*/TRUE); 6008 } 6009 6010 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 6011 && (--dev->tag_delay_count == 0)) 6012 xpt_start_tags(ccb_h->path); 6013 6014 if ((dev->ccbq.queue.entries > 0) 6015 && (dev->qfrozen_cnt == 0) 6016 && (device_is_send_queued(dev) == 0)) { 6017 runq = xpt_schedule_dev_sendq(ccb_h->path->bus, 6018 dev); 6019 } 6020 } 6021 6022 if (ccb_h->status & CAM_RELEASE_SIMQ) { 6023 xpt_release_simq(ccb_h->path->bus->sim, 6024 /*run_queue*/TRUE); 6025 } else if ((ccb_h->flags & CAM_DEV_QFRZDIS) 6026 && (ccb_h->status & CAM_DEV_QFRZN)) { 6027 xpt_release_devq(ccb_h->path->device, 6028 /*run_queue*/TRUE); 6029 ccb_h->status &= ~CAM_DEV_QFRZN; 6030 } else if (runq) { 6031 xpt_run_dev_sendq(ccb_h->path->bus); 6032 } 6033 6034 /* Call the peripheral driver's callback */ 6035 (*ccb_h->cbfcnp)(ccb_h->path->periph, 6036 (union ccb *)ccb_h); 6037 6038 /* Raise IPL for while test */ 6039 s = splcam(); 6040 } 6041 splx(s); 6042} 6043